summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt17
-rw-r--r--Documentation/devicetree/bindings/usb/msm-ssusb.txt2
-rw-r--r--arch/arm/Kconfig2
-rw-r--r--arch/arm/boot/dts/qcom/msm-audio.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm-smb138x.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-sde.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-v3.dtsi65
-rw-r--r--arch/arm/boot/dts/qcom/msm8998.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts2
-rw-r--r--arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts2
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-qrd.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/sdm660.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts19
-rw-r--r--arch/arm64/configs/msm-auto-perf_defconfig4
-rw-r--r--arch/arm64/configs/msm-auto_defconfig24
-rw-r--r--drivers/android/binder.c1
-rw-r--r--drivers/char/diag/diag_masks.c128
-rw-r--r--drivers/clk/msm/mdss/mdss-pll.h1
-rw-r--r--drivers/crypto/msm/ice.c14
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c1
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c2
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c13
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c8
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.h17
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c25
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.c22
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c25
-rw-r--r--drivers/gpu/msm/kgsl.c18
-rw-r--r--drivers/iio/adc/qcom-rradc.c53
-rw-r--r--drivers/iommu/arm-smmu.c15
-rw-r--r--drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c4
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c9
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc.c39
-rw-r--r--drivers/mmc/card/block.c10
-rw-r--r--drivers/mmc/core/core.c24
-rw-r--r--drivers/mmc/core/sd.c5
-rw-r--r--drivers/mmc/host/sdhci-msm.c3
-rw-r--r--drivers/net/ethernet/msm/msm_rmnet_mhi.c8
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c355
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.h35
-rw-r--r--drivers/net/wireless/ath/wil6210/ftm.c47
-rw-r--r--drivers/net/wireless/ath/wil6210/ftm.h2
-rw-r--r--drivers/net/wireless/cnss/Kconfig2
-rw-r--r--drivers/net/wireless/cnss2/Makefile1
-rw-r--r--drivers/net/wireless/cnss2/main.c28
-rw-r--r--drivers/net/wireless/cnss2/main.h11
-rw-r--r--drivers/net/wireless/cnss2/utils.c129
-rw-r--r--drivers/net/wireless/cnss_genl/cnss_nl.c2
-rw-r--r--drivers/platform/msm/gpio-usbdetect.c10
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c18
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c26
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c46
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c11
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_flt.c7
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c152
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h184
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_rt.c87
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c526
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c153
-rw-r--r--drivers/power/supply/qcom/qpnp-fg-gen3.c1
-rw-r--r--drivers/power/supply/qcom/smb-lib.c10
-rw-r--r--drivers/power/supply/qcom/step-chg-jeita.c14
-rw-r--r--drivers/pwm/pwm-qpnp.c2
-rw-r--r--drivers/scsi/ufs/ufshcd.c57
-rw-r--r--drivers/scsi/ufs/ufshcd.h4
-rw-r--r--drivers/soc/qcom/glink.c4
-rw-r--r--drivers/soc/qcom/pil-msa.c7
-rw-r--r--drivers/soc/qcom/spcom.c16
-rw-r--r--drivers/soc/qcom/wcd-dsp-glink.c19
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c74
-rw-r--r--drivers/usb/gadget/function/f_ccid.c106
-rw-r--r--drivers/usb/gadget/function/f_gsi.c5
-rw-r--r--drivers/usb/host/xhci-plat.c30
-rw-r--r--drivers/usb/host/xhci.c6
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/usb/pd/policy_engine.c603
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c48
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h6
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c9
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_host.c54
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.c144
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_tx.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.c49
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.h10
-rw-r--r--include/linux/mmc/core.h1
-rw-r--r--include/linux/mmc/host.h1
-rw-r--r--include/linux/rcutree.h2
-rw-r--r--include/net/cnss2.h6
-rw-r--r--include/net/cnss_nl.h4
-rw-r--r--include/uapi/linux/msm_ipa.h111
-rw-r--r--include/uapi/linux/msm_mdp_ext.h22
-rw-r--r--include/uapi/linux/rmnet_ipa_fd_ioctl.h80
-rw-r--r--include/uapi/linux/videodev2.h7
-rw-r--r--kernel/cgroup.c5
-rw-r--r--kernel/locking/osq_lock.c26
-rw-r--r--kernel/rcu/tree.c44
-rw-r--r--kernel/rcu/tree_plugin.h14
-rw-r--r--kernel/sched/core.c9
-rw-r--r--net/netfilter/xt_socket.c4
-rw-r--r--net/wireless/db.txt34
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c4
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c32
-rw-r--r--sound/soc/codecs/wcd-dsp-mgr.c3
-rw-r--r--sound/soc/codecs/wcd9335.c26
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x.c28
-rw-r--r--sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c9
-rw-r--r--sound/soc/msm/qdsp6v2/msm-lsm-client.c2
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c329
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c8
112 files changed, 3741 insertions, 792 deletions
diff --git a/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt b/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt
index acc850773210..c1a8d1bd697d 100644
--- a/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt
+++ b/Documentation/devicetree/bindings/net/wireless/qcom,wcn3990-wifi.txt
@@ -11,13 +11,24 @@ Required properties:
- compatible: "qcom,wcn3990-wifi";
- reg: Memory regions defined as starting address and size
- reg-names: Names of the memory regions defined in reg entry
+ - clocks: List of clock phandles
+ - clock-names: List of clock names corresponding to the "clocks" property
- interrupts: Copy engine interrupt table
+Optional properties:
+ - <supply-name>-supply: phandle to the regulator device tree node
+ optional "supply-name" is "vdd-0.8-cx-mx".
+ - qcom,<supply>-config: Specifies voltage levels for supply. Should be
+ specified in pairs (min, max), units uV. There can
+ be optional load in uA and Regulator settle delay in
+ uS.
Example:
msm_ath10k_wlan: qcom,msm_ath10k_wlan@18800000 {
compatible = "qcom,wcn3990-wifi";
reg = <0x18800000 0x800000>;
reg-names = "membase";
+ clocks = <&clock_gcc clk_aggre2_noc_clk>;
+ clock-names = "smmu_aggre2_noc_clk";
interrupts =
<0 130 0 /* CE0 */ >,
<0 131 0 /* CE1 */ >,
@@ -31,4 +42,10 @@ Example:
<0 139 0 /* CE9 */ >,
<0 140 0 /* CE10 */ >,
<0 141 0 /* CE11 */ >;
+ vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+ vdd-1.8-xo-supply = <&pm8998_l7_pin_ctrl>;
+ vdd-1.3-rfa-supply = <&pm8998_l17_pin_ctrl>;
+ vdd-3.3-ch0-supply = <&pm8998_l25_pin_ctrl>;
+ qcom,vdd-0.8-cx-mx-config = <800000 800000>;
+ qcom,vdd-3.3-ch0-config = <3104000 3312000>;
};
diff --git a/Documentation/devicetree/bindings/usb/msm-ssusb.txt b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
index 47fad8aa4a1a..54792335e67e 100644
--- a/Documentation/devicetree/bindings/usb/msm-ssusb.txt
+++ b/Documentation/devicetree/bindings/usb/msm-ssusb.txt
@@ -64,6 +64,8 @@ Optional properties :
device provides both "USB" and "USB-HOST" events.
- qcom,pm-qos-latency: This represents max tolerable CPU latency in microsecs,
which is used as a vote by driver to get max performance in perf mode.
+- qcom,no-wakeup-src-in-hostmode: If present then driver doesn't use wakeup_source APIs
+ in host mode. This allows PM suspend to happen irrespective of runtimePM state of host.
Sub nodes:
- Sub node for "DWC3- USB3 controller".
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig
index 588393412271..22b546e0f845 100644
--- a/arch/arm/Kconfig
+++ b/arch/arm/Kconfig
@@ -1776,7 +1776,7 @@ source "mm/Kconfig"
choice
prompt "Virtual Memory Reclaim"
- default NO_VM_RECLAIM
+ default ENABLE_VMALLOC_SAVING
help
Select the method of reclaiming virtual memory
diff --git a/arch/arm/boot/dts/qcom/msm-audio.dtsi b/arch/arm/boot/dts/qcom/msm-audio.dtsi
index 3a7514397139..75aea7280e6c 100644
--- a/arch/arm/boot/dts/qcom/msm-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-audio.dtsi
@@ -383,6 +383,7 @@
qcom,msm-cpudai-auxpcm-data = <0>, <0>;
qcom,msm-cpudai-auxpcm-pcm-clk-rate = <2048000>, <2048000>;
qcom,msm-auxpcm-interface = "primary";
+ qcom,msm-cpudai-afe-clk-ver = <2>;
};
dai_sec_auxpcm: qcom,msm-sec-auxpcm {
diff --git a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
index c156e91dfcf9..fa21dd7995eb 100644
--- a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
@@ -88,7 +88,7 @@
};
};
- smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 {
+ smb1381_charger: qcom,smb1381-charger@1000 {
compatible = "qcom,smb138x-parallel-slave";
qcom,pmic-revid = <&smb138x_revid>;
reg = <0x1000 0x700>;
@@ -129,7 +129,7 @@
};
};
-&smb138x_parallel_slave {
+&smb1381_charger {
smb138x_vbus: qcom,smb138x-vbus {
status = "disabled";
regulator-name = "smb138x-vbus";
diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
index ac6afd999fd0..c1728da49d5e 100644
--- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
@@ -1381,6 +1381,7 @@
&usb2s {
status = "ok";
+ qcom,no-wakeup-src-in-hostmode;
};
&usb3 {
@@ -1388,6 +1389,7 @@
vbus_dwc3-supply = <&usb_otg_switch>;
vdda33-supply = <&pm8994_l24>;
vdda18-supply = <&pm8994_l12>;
+ qcom,no-wakeup-src-in-hostmode;
};
&blsp1_uart2 {
diff --git a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
index b0688668e667..11c45606f6c2 100644
--- a/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-sde.dtsi
@@ -183,7 +183,7 @@
};
smmu_kms_unsec: qcom,smmu_kms_unsec_cb {
- compatible = "qcom,smmu_kms_unsec";
+ compatible = "qcom,smmu_sde_unsec";
iommus = <&mdp_smmu 0>;
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-v3.dtsi b/arch/arm/boot/dts/qcom/msm8996-v3.dtsi
index 7e5fa8a495c9..8e46ce5277b3 100644
--- a/arch/arm/boot/dts/qcom/msm8996-v3.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-v3.dtsi
@@ -259,6 +259,71 @@
};
};
+ qcom,gpu-pwrlevels-2 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,speed-bin = <2>;
+
+ qcom,initial-pwrlevel = <4>;
+
+ qcom,gpu-pwrlevel@0 {
+ reg = <0>;
+ qcom,gpu-freq = <560000000>;
+ qcom,bus-freq = <11>;
+ qcom,bus-min = <11>;
+ qcom,bus-max = <11>;
+ };
+
+ qcom,gpu-pwrlevel@1 {
+ reg = <1>;
+ qcom,gpu-freq = <510000000>;
+ qcom,bus-freq = <9>;
+ qcom,bus-min = <8>;
+ qcom,bus-max = <10>;
+ };
+
+ qcom,gpu-pwrlevel@2 {
+ reg = <2>;
+ qcom,gpu-freq = <401800000>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <7>;
+ qcom,bus-max = <9>;
+ };
+
+ qcom,gpu-pwrlevel@3 {
+ reg = <3>;
+ qcom,gpu-freq = <315000000>;
+ qcom,bus-freq = <6>;
+ qcom,bus-min = <5>;
+ qcom,bus-max = <7>;
+ };
+
+ qcom,gpu-pwrlevel@4 {
+ reg = <4>;
+ qcom,gpu-freq = <214000000>;
+ qcom,bus-freq = <4>;
+ qcom,bus-min = <3>;
+ qcom,bus-max = <5>;
+ };
+
+ qcom,gpu-pwrlevel@5 {
+ reg = <5>;
+ qcom,gpu-freq = <133000000>;
+ qcom,bus-freq = <3>;
+ qcom,bus-min = <2>;
+ qcom,bus-max = <4>;
+ };
+
+ qcom,gpu-pwrlevel@6 {
+ reg = <6>;
+ qcom,gpu-freq = <27000000>;
+ qcom,bus-freq = <0>;
+ qcom,bus-min = <0>;
+ qcom,bus-max = <0>;
+
+ };
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi
index eafa6b841c17..76e3282d327e 100644
--- a/arch/arm/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998.dtsi
@@ -3106,6 +3106,8 @@
compatible = "qcom,wcn3990-wifi";
reg = <0x18800000 0x800000>;
reg-names = "membase";
+ clocks = <&clock_gcc clk_rf_clk2_pin>;
+ clock-names = "cxo_ref_clk_pin";
interrupts =
<0 413 0 /* CE0 */ >,
<0 414 0 /* CE1 */ >,
@@ -3119,6 +3121,12 @@
<0 423 0 /* CE9 */ >,
<0 424 0 /* CE10 */ >,
<0 425 0 /* CE11 */ >;
+ vdd-0.8-cx-mx-supply = <&pm8998_l5>;
+ vdd-1.8-xo-supply = <&pm8998_l7_pin_ctrl>;
+ vdd-1.3-rfa-supply = <&pm8998_l17_pin_ctrl>;
+ vdd-3.3-ch0-supply = <&pm8998_l25_pin_ctrl>;
+ qcom,vdd-0.8-cx-mx-config = <800000 800000>;
+ qcom,vdd-3.3-ch0-config = <3104000 3312000>;
};
qcom,icnss@18800000 {
diff --git a/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts b/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts
index f4a9592bf4ff..ccc1be75f39b 100644
--- a/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts
+++ b/arch/arm/boot/dts/qcom/sda636-pm660a-qrd-hdk.dts
@@ -98,7 +98,7 @@
};
};
- smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 {
+ smb1381_charger: qcom,smb1381-charger@1000 {
compatible = "qcom,smb138x-parallel-slave";
qcom,pmic-revid = <&smb138x_revid>;
reg = <0x1000 0x700>;
diff --git a/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts b/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts
index 5f44b4c32c98..0d7b6c0341b5 100644
--- a/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts
+++ b/arch/arm/boot/dts/qcom/sda660-pm660a-qrd-hdk.dts
@@ -98,7 +98,7 @@
};
};
- smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 {
+ smb1381_charger: qcom,smb1381-charger@1000 {
compatible = "qcom,smb138x-parallel-slave";
qcom,pmic-revid = <&smb138x_revid>;
reg = <0x1000 0x700>;
diff --git a/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
index af3c5d1b51da..384e24d221c4 100644
--- a/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
@@ -92,7 +92,7 @@
};
};
- smb138x_parallel_slave: qcom,smb138x-parallel-slave@1000 {
+ smb1381_charger: qcom,smb1381-charger@1000 {
compatible = "qcom,smb138x-parallel-slave";
qcom,pmic-revid = <&smb138x_revid>;
reg = <0x1000 0x700>;
diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi
index c436ce643091..c626698ffd51 100644
--- a/arch/arm/boot/dts/qcom/sdm660.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660.dtsi
@@ -1606,6 +1606,7 @@
qcom,msm_fastrpc {
compatible = "qcom,msm-fastrpc-adsp";
qcom,fastrpc-glink;
+ qcom,fastrpc-vmid-heap-shared;
qcom,msm_fastrpc_compute_cb1 {
compatible = "qcom,msm-fastrpc-compute-cb";
diff --git a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts
index e6d9f7b7d2f2..8e0cb4e4efaf 100644
--- a/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts
+++ b/arch/arm/boot/dts/qcom/vplatform-lfv-msm8996.dts
@@ -91,6 +91,25 @@
ranges = <0 0 0 0xffffffff>;
compatible = "simple-bus";
+ qcom,mpm2-sleep-counter@4a3000 {
+ compatible = "qcom,mpm2-sleep-counter";
+ reg = <0x004a3000 0x1000>;
+ clock-frequency = <32768>;
+ };
+
+ qcom,msm-imem@66bf000 {
+ compatible = "qcom,msm-imem";
+ reg = <0x66bf000 0x1000>; /* Address and size of IMEM */
+ ranges = <0x0 0x66bf000 0x1000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ boot_stats@6b0 {
+ compatible = "qcom,msm-imem-boot_stats";
+ reg = <0x6b0 32>;
+ };
+ };
+
sound-adp-agave {
compatible = "qcom,apq8096-asoc-snd-adp-agave";
qcom,model = "apq8096-adp-agave-snd-card";
diff --git a/arch/arm64/configs/msm-auto-perf_defconfig b/arch/arm64/configs/msm-auto-perf_defconfig
index 7e3bf18b06f7..e55ebfc79ddb 100644
--- a/arch/arm64/configs/msm-auto-perf_defconfig
+++ b/arch/arm64/configs/msm-auto-perf_defconfig
@@ -277,10 +277,10 @@ CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CNSS_CRYPTO=y
CONFIG_ATH_CARDS=y
CONFIG_WIL6210=m
-CONFIG_CNSS=y
-CONFIG_CNSS_ASYNC=y
CONFIG_CLD_LL_CORE=y
CONFIG_BUS_AUTO_SUSPEND=y
+CONFIG_CNSS2=y
+CONFIG_CNSS2_DEBUG=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
CONFIG_KEYBOARD_GPIO=y
diff --git a/arch/arm64/configs/msm-auto_defconfig b/arch/arm64/configs/msm-auto_defconfig
index 92fc522c11ed..8f8e696f8866 100644
--- a/arch/arm64/configs/msm-auto_defconfig
+++ b/arch/arm64/configs/msm-auto_defconfig
@@ -1,4 +1,5 @@
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_FHANDLE=y
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
@@ -232,6 +233,8 @@ CONFIG_CFG80211_INTERNAL_REGDB=y
CONFIG_RFKILL=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
+CONFIG_DEVTMPFS=y
+CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
@@ -278,10 +281,10 @@ CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CNSS_CRYPTO=y
CONFIG_ATH_CARDS=y
CONFIG_WIL6210=m
-CONFIG_CNSS=y
-CONFIG_CNSS_ASYNC=y
CONFIG_CLD_LL_CORE=y
CONFIG_BUS_AUTO_SUSPEND=y
+CONFIG_CNSS2=y
+CONFIG_CNSS2_DEBUG=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
CONFIG_KEYBOARD_GPIO=y
@@ -311,7 +314,6 @@ CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
CONFIG_MSM_RDBG=m
-CONFIG_I2C=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MUX=y
CONFIG_I2C_QUP=y
@@ -348,7 +350,6 @@ CONFIG_THERMAL_TSENS8974=y
CONFIG_THERMAL_QPNP_ADC_TM=y
CONFIG_MFD_SPMI_PMIC=y
CONFIG_WCD9335_CODEC=y
-CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_FAN53555=y
CONFIG_REGULATOR_MAX20010=y
@@ -380,15 +381,11 @@ CONFIG_MSM_AIS_CAMERA_SENSOR=y
# CONFIG_MEDIA_SUBDRV_AUTOSELECT is not set
CONFIG_VIDEO_ADV7481=y
CONFIG_QCOM_KGSL=y
+CONFIG_DRM=y
CONFIG_MSM_BA_V4L2=y
-CONFIG_FB=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_MSM_DBA=y
+CONFIG_MSM_DBA_ADV7533=y
CONFIG_BACKLIGHT_LCD_SUPPORT=y
-CONFIG_BACKLIGHT_CLASS_DEVICE=y
CONFIG_BACKLIGHT_GENERIC=m
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
@@ -472,7 +469,7 @@ CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_TIMED_GPIO=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
-CONFIG_SW_SYNC_USER=y
+CONFIG_SYNC=y
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_QPNP_REVID=y
@@ -521,7 +518,6 @@ CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SPM=y
CONFIG_MSM_L2_SPM=y
-CONFIG_QCOM_SCM=y
CONFIG_QCOM_SCM_XPU=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
@@ -576,7 +572,6 @@ CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
@@ -593,6 +588,7 @@ CONFIG_DEBUG_OBJECTS_TIMERS=y
CONFIG_DEBUG_OBJECTS_WORK=y
CONFIG_DEBUG_OBJECTS_RCU_HEAD=y
CONFIG_DEBUG_OBJECTS_PERCPU_COUNTER=y
+CONFIG_SLUB_DEBUG_ON=y
CONFIG_DEBUG_KMEMLEAK=y
CONFIG_DEBUG_KMEMLEAK_DEFAULT_OFF=y
CONFIG_DEBUG_STACK_USAGE=y
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index 13598d807de0..34f45abe0181 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -3253,6 +3253,7 @@ static void binder_transaction(struct binder_proc *proc,
err_dead_proc_or_thread:
return_error = BR_DEAD_REPLY;
return_error_line = __LINE__;
+ binder_dequeue_work(proc, tcomplete);
err_translate_failed:
err_bad_object_type:
err_bad_offset:
diff --git a/drivers/char/diag/diag_masks.c b/drivers/char/diag/diag_masks.c
index e206d9db4d7d..e1e86f6e74dc 100644
--- a/drivers/char/diag/diag_masks.c
+++ b/drivers/char/diag/diag_masks.c
@@ -555,6 +555,11 @@ static int diag_cmd_get_ssid_range(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
if (!diag_apps_responds())
return 0;
@@ -656,7 +661,11 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
-
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
if (!diag_apps_responds())
return 0;
@@ -669,6 +678,12 @@ static int diag_cmd_get_msg_mask(unsigned char *src_buf, int src_len,
rsp.status = MSG_STATUS_FAIL;
rsp.padding = 0;
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ return -EINVAL;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
if ((req->ssid_first < mask->ssid_first) ||
(req->ssid_first > mask->ssid_last_tools)) {
@@ -714,11 +729,23 @@ static int diag_cmd_set_msg_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_msg_build_mask_t *)src_buf;
mutex_lock(&mask_info->lock);
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
if (i < (driver->msg_mask_tbl_count - 1)) {
mask_next = mask;
@@ -831,6 +858,11 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_msg_config_rsp_t *)src_buf;
@@ -838,6 +870,13 @@ static int diag_cmd_set_all_msg_mask(unsigned char *src_buf, int src_len,
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
mask_info->status = (req->rt_mask) ? DIAG_CTRL_MASK_ALL_ENABLED :
DIAG_CTRL_MASK_ALL_DISABLED;
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
@@ -931,7 +970,11 @@ static int diag_cmd_update_event_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
-
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_event_mask_config_t *)src_buf;
mask_len = EVENT_COUNT_TO_BYTES(req->num_bits);
if (mask_len <= 0 || mask_len > event_mask.mask_len) {
@@ -989,6 +1032,11 @@ static int diag_cmd_toggle_events(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
toggle = *(src_buf + 1);
mutex_lock(&mask_info->lock);
@@ -1046,6 +1094,11 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
if (!diag_apps_responds())
return 0;
@@ -1065,6 +1118,11 @@ static int diag_cmd_get_log_mask(unsigned char *src_buf, int src_len,
write_len += rsp_header_len;
log_item = (struct diag_log_mask_t *)mask_info->ptr;
+ if (!log_item->ptr) {
+ pr_err("diag: Invalid input in %s, mask: %pK\n",
+ __func__, log_item);
+ return -EINVAL;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, log_item++) {
if (log_item->equip_id != req->equip_id)
continue;
@@ -1172,11 +1230,20 @@ static int diag_cmd_set_log_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
req = (struct diag_log_config_req_t *)src_buf;
read_len += req_header_len;
mask = (struct diag_log_mask_t *)mask_info->ptr;
-
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ return -EINVAL;
+ }
if (req->equip_id >= MAX_EQUIP_ID) {
pr_err("diag: In %s, Invalid logging mask request, equip_id: %d\n",
__func__, req->equip_id);
@@ -1294,9 +1361,17 @@ static int diag_cmd_disable_log_mask(unsigned char *src_buf, int src_len,
mask_info);
return -EINVAL;
}
-
+ if (!mask_info->ptr) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK\n",
+ __func__, mask_info->ptr);
+ return -EINVAL;
+ }
mask = (struct diag_log_mask_t *)mask_info->ptr;
-
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ return -EINVAL;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
mutex_lock(&mask->lock);
memset(mask->ptr, 0, mask->range);
@@ -1562,7 +1637,7 @@ static int __diag_mask_init(struct diag_mask_info *mask_info, int mask_len,
static void __diag_mask_exit(struct diag_mask_info *mask_info)
{
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr)
return;
mutex_lock(&mask_info->lock);
@@ -1619,11 +1694,17 @@ void diag_log_mask_free(struct diag_mask_info *mask_info)
int i;
struct diag_log_mask_t *mask = NULL;
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr)
return;
mutex_lock(&mask_info->lock);
mask = (struct diag_log_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&mask_info->lock);
+ return;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
kfree(mask->ptr);
mask->ptr = NULL;
@@ -1698,11 +1779,18 @@ void diag_msg_mask_free(struct diag_mask_info *mask_info)
int i;
struct diag_msg_mask_t *mask = NULL;
- if (!mask_info)
+ if (!mask_info || !mask_info->ptr)
return;
mutex_lock(&mask_info->lock);
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)mask_info->ptr;
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
kfree(mask->ptr);
mask->ptr = NULL;
@@ -1869,6 +1957,11 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
if (!mask_info)
return -EIO;
+ if (!mask_info->ptr || !mask_info->update_buf) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n",
+ __func__, mask_info->ptr, mask_info->update_buf);
+ return -EINVAL;
+ }
mutex_lock(&driver->diag_maskclear_mutex);
if (driver->mask_clear) {
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -1881,6 +1974,13 @@ int diag_copy_to_user_msg_mask(char __user *buf, size_t count,
mutex_lock(&driver->msg_mask_lock);
mask = (struct diag_msg_mask_t *)(mask_info->ptr);
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&driver->msg_mask_lock);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
for (i = 0; i < driver->msg_mask_tbl_count; i++, mask++) {
ptr = mask_info->update_buf;
len = 0;
@@ -1941,8 +2041,20 @@ int diag_copy_to_user_log_mask(char __user *buf, size_t count,
if (!mask_info)
return -EIO;
+ if (!mask_info->ptr || !mask_info->update_buf) {
+ pr_err("diag: In %s, invalid input mask_info->ptr: %pK, mask_info->update_buf: %pK\n",
+ __func__, mask_info->ptr, mask_info->update_buf);
+ return -EINVAL;
+ }
+
mutex_lock(&mask_info->lock);
mask = (struct diag_log_mask_t *)(mask_info->ptr);
+ if (!mask->ptr) {
+ pr_err("diag: Invalid input in %s, mask->ptr: %pK\n",
+ __func__, mask->ptr);
+ mutex_unlock(&mask_info->lock);
+ return -EINVAL;
+ }
for (i = 0; i < MAX_EQUIP_ID; i++, mask++) {
ptr = mask_info->update_buf;
len = 0;
diff --git a/drivers/clk/msm/mdss/mdss-pll.h b/drivers/clk/msm/mdss/mdss-pll.h
index 0120d71f0daf..7aa8b0d6c051 100644
--- a/drivers/clk/msm/mdss/mdss-pll.h
+++ b/drivers/clk/msm/mdss/mdss-pll.h
@@ -70,6 +70,7 @@ struct dfps_info {
struct dfps_panel_info panel_dfps;
struct dfps_codes_info codes_dfps[DFPS_MAX_NUM_OF_FRAME_RATES];
void *dfps_fb_base;
+ uint32_t chip_serial;
};
struct mdss_pll_resources {
diff --git a/drivers/crypto/msm/ice.c b/drivers/crypto/msm/ice.c
index 490f8d9ddb9f..68b6a26f00b8 100644
--- a/drivers/crypto/msm/ice.c
+++ b/drivers/crypto/msm/ice.c
@@ -869,7 +869,7 @@ static int qcom_ice_restore_key_config(struct ice_device *ice_dev)
static int qcom_ice_init_clocks(struct ice_device *ice)
{
int ret = -EINVAL;
- struct ice_clk_info *clki;
+ struct ice_clk_info *clki = NULL;
struct device *dev = ice->pdev;
struct list_head *head = &ice->clk_list_head;
@@ -913,7 +913,7 @@ out:
static int qcom_ice_enable_clocks(struct ice_device *ice, bool enable)
{
int ret = 0;
- struct ice_clk_info *clki;
+ struct ice_clk_info *clki = NULL;
struct device *dev = ice->pdev;
struct list_head *head = &ice->clk_list_head;
@@ -1590,12 +1590,14 @@ struct platform_device *qcom_ice_get_pdevice(struct device_node *node)
if (ice_dev->pdev->of_node == node) {
pr_info("%s: found ice device %pK\n", __func__,
ice_dev);
+ ice_pdev = to_platform_device(ice_dev->pdev);
break;
}
}
- ice_pdev = to_platform_device(ice_dev->pdev);
- pr_info("%s: matching platform device %pK\n", __func__, ice_pdev);
+ if (ice_pdev)
+ pr_info("%s: matching platform device %pK\n", __func__,
+ ice_pdev);
out:
return ice_pdev;
}
@@ -1615,11 +1617,11 @@ static struct ice_device *get_ice_device_from_storage_type
if (!strcmp(ice_dev->ice_instance_type, storage_type)) {
pr_debug("%s: found ice device %pK\n",
__func__, ice_dev);
- break;
+ return ice_dev;
}
}
out:
- return ice_dev;
+ return NULL;
}
static int enable_ice_setup(struct ice_device *ice_dev)
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
index 3c470caec571..0d93edb9201f 100644
--- a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -549,6 +549,7 @@ static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge)
display->pll_update_enable = false;
display->sink_hdcp_ver = SDE_HDMI_HDCP_NONE;
+ display->sink_hdcp22_support = false;
mutex_unlock(&display->display_lock);
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 7d660ba56594..9dbd86eff816 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -424,7 +424,7 @@ static struct hdmi_platform_config hdmi_tx_8994_config = {
static struct hdmi_platform_config hdmi_tx_8996_config = {
.phy_init = NULL,
HDMI_CFG(pwr_reg, none),
- HDMI_CFG(hpd_reg, none),
+ HDMI_CFG(hpd_reg, 8x74),
HDMI_CFG(pwr_clk, 8x74),
HDMI_CFG(hpd_clk, 8x74),
.hpd_freq = hpd_clk_freq_8x74,
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 83b34a071ced..c61753311771 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -185,9 +185,14 @@ static void vblank_ctrl_worker(struct kthread_work *work)
struct msm_kms *kms = priv->kms;
struct vblank_event *vbl_ev, *tmp;
unsigned long flags;
+ struct kthread_worker *worker = work->worker;
+ struct msm_drm_commit *commit = container_of(worker,
+ struct msm_drm_commit, worker);
spin_lock_irqsave(&vbl_ctrl->lock, flags);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
+ if (vbl_ev->crtc_id != commit->crtc_id)
+ continue;
list_del(&vbl_ev->node);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
@@ -673,10 +678,10 @@ static int msm_open(struct drm_device *dev, struct drm_file *file)
if (IS_ERR(ctx))
return PTR_ERR(ctx);
- if (ctx)
+ if (ctx) {
INIT_LIST_HEAD(&ctx->counters);
-
- msm_submitqueue_init(ctx);
+ msm_submitqueue_init(ctx);
+ }
file->driver_priv = ctx;
@@ -2146,7 +2151,9 @@ static int msm_pdev_probe(struct platform_device *pdev)
#ifdef CONFIG_OF
add_components(&pdev->dev, &match, "connectors");
+#ifndef CONFIG_QCOM_KGSL
add_components(&pdev->dev, &match, "gpus");
+#endif
#else
/* For non-DT case, it kinda sucks. We don't actually have a way
* to know whether or not we are waiting for certain devices (or if
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index 90a6b19ccf40..6a741a7ce0f6 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -534,13 +534,7 @@ static int sde_connector_atomic_get_property(struct drm_connector *connector,
idx = msm_property_index(&c_conn->property_info, property);
if (idx == CONNECTOR_PROP_RETIRE_FENCE)
- /*
- * Set a fence offset if not a virtual connector, so that the
- * fence signals after one additional commit rather than at the
- * end of the current one.
- */
- rc = sde_fence_create(&c_conn->retire_fence, val,
- c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
+ rc = sde_fence_create(&c_conn->retire_fence, val, 0);
else
/* get cached property value */
rc = msm_property_atomic_get(&c_conn->property_info,
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index f9b8c3966d74..0f563ac25da8 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -390,5 +390,22 @@ enum sde_csc_type sde_connector_get_csc_type(struct drm_connector *conn);
*/
int sde_connector_get_dpms(struct drm_connector *connector);
+/**
+ * sde_connector_needs_offset - adjust the output fence offset based on
+ * display type
+ * @connector: Pointer to drm connector object
+ * Returns: true if offset is required, false for all other cases.
+ */
+static inline bool sde_connector_needs_offset(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector)
+ return false;
+
+ c_conn = to_sde_connector(connector);
+ return (c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
+}
+
#endif /* _SDE_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index a0417a0dd12e..e99eba0dadb7 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -204,10 +204,15 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
idx = left_crtc_zpos_cnt[pstate->stage]++;
}
+ /* stage plane on right LM if it crosses the boundary */
+ lm_right = (lm_idx == LEFT_MIXER) &&
+ (plane->state->crtc_x + plane->state->crtc_w >
+ crtc_split_width);
+
/*
* program each mixer with two hw pipes in dual mixer mode,
*/
- if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS) {
+ if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS && lm_right) {
stage_cfg->stage[LEFT_MIXER][pstate->stage][1] =
sde_plane_pipe(plane, 1);
@@ -218,10 +223,7 @@ static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
flush_mask |= ctl->ops.get_bitmask_sspp(ctl,
sde_plane_pipe(plane, lm_idx ? 1 : 0));
- /* stage plane on right LM if it crosses the boundary */
- lm_right = (lm_idx == LEFT_MIXER) &&
- (plane->state->crtc_x + plane->state->crtc_w >
- crtc_split_width);
+
stage_cfg->stage[lm_idx][pstate->stage][idx] =
sde_plane_pipe(plane, lm_idx ? 1 : 0);
@@ -1674,19 +1676,28 @@ static int sde_crtc_atomic_get_property(struct drm_crtc *crtc,
struct sde_crtc *sde_crtc;
struct sde_crtc_state *cstate;
int i, ret = -EINVAL;
+ bool conn_offset = 0;
if (!crtc || !state) {
SDE_ERROR("invalid argument(s)\n");
} else {
sde_crtc = to_sde_crtc(crtc);
cstate = to_sde_crtc_state(state);
+
+ for (i = 0; i < cstate->num_connectors; ++i) {
+ conn_offset = sde_connector_needs_offset(
+ cstate->connectors[i]);
+ if (conn_offset)
+ break;
+ }
+
i = msm_property_index(&sde_crtc->property_info, property);
if (i == CRTC_PROP_OUTPUT_FENCE) {
int offset = sde_crtc_get_property(cstate,
CRTC_PROP_OUTPUT_FENCE_OFFSET);
- ret = sde_fence_create(
- &sde_crtc->output_fence, val, offset);
+ ret = sde_fence_create(&sde_crtc->output_fence, val,
+ offset + conn_offset);
if (ret)
SDE_ERROR("fence create failed\n");
} else {
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index 6e2ccfa8e428..9cbee5243e6d 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -1237,8 +1237,10 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
bool q16_data = true;
int idx;
struct sde_phy_plane *pp;
- uint32_t num_of_phy_planes = 0, maxlinewidth = 0xFFFF;
+ uint32_t num_of_phy_planes = 0;
int mode = 0;
+ uint32_t crtc_split_width;
+ bool is_across_mixer_boundary = false;
if (!plane) {
SDE_ERROR("invalid plane\n");
@@ -1252,6 +1254,7 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
pstate = to_sde_plane_state(plane->state);
crtc = state->crtc;
+ crtc_split_width = get_crtc_split_width(crtc);
fb = state->fb;
if (!crtc || !fb) {
SDE_ERROR_PLANE(psde, "invalid crtc %d or fb %d\n",
@@ -1348,17 +1351,17 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
}
}
- list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
- if (maxlinewidth > pp->pipe_sblk->maxlinewidth)
- maxlinewidth = pp->pipe_sblk->maxlinewidth;
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list)
num_of_phy_planes++;
- }
/*
* Only need to use one physical plane if plane width is still within
* the limitation.
*/
- if (maxlinewidth >= (src.x + src.w))
+ is_across_mixer_boundary = (plane->state->crtc_x < crtc_split_width) &&
+ (plane->state->crtc_x + plane->state->crtc_w >
+ crtc_split_width);
+ if (crtc_split_width >= (src.x + src.w) && !is_across_mixer_boundary)
num_of_phy_planes = 1;
if (num_of_phy_planes > 1) {
@@ -1369,9 +1372,10 @@ static int _sde_plane_mode_set(struct drm_plane *plane,
list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
/* Adjust offset for multi-pipe */
- src.x += src.w * pp->index;
- dst.x += dst.w * pp->index;
-
+ if (num_of_phy_planes > 1) {
+ src.x += src.w * pp->index;
+ dst.x += dst.w * pp->index;
+ }
pp->pipe_cfg.src_rect = src;
pp->pipe_cfg.dst_rect = dst;
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 3fb13c7a0814..78f74b883877 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -65,8 +65,8 @@ static const struct adreno_vbif_platform a5xx_vbif_platforms[] = {
};
static void a5xx_irq_storm_worker(struct work_struct *work);
-static int _read_fw2_block_header(uint32_t *header, uint32_t id,
- uint32_t major, uint32_t minor);
+static int _read_fw2_block_header(uint32_t *header, uint32_t remain,
+ uint32_t id, uint32_t major, uint32_t minor);
static void a5xx_gpmu_reset(struct work_struct *work);
static int a5xx_gpmu_init(struct adreno_device *adreno_dev);
@@ -709,6 +709,7 @@ static int _load_gpmu_firmware(struct adreno_device *adreno_dev)
if (data[1] != GPMU_FIRMWARE_ID)
goto err;
ret = _read_fw2_block_header(&data[2],
+ data[0] - 2,
GPMU_FIRMWARE_ID,
adreno_dev->gpucore->gpmu_major,
adreno_dev->gpucore->gpmu_minor);
@@ -1231,8 +1232,8 @@ void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on)
kgsl_regwrite(device, A5XX_RBBM_ISDB_CNT, on ? 0x00000182 : 0x00000180);
}
-static int _read_fw2_block_header(uint32_t *header, uint32_t id,
- uint32_t major, uint32_t minor)
+static int _read_fw2_block_header(uint32_t *header, uint32_t remain,
+ uint32_t id, uint32_t major, uint32_t minor)
{
uint32_t header_size;
int i = 1;
@@ -1242,7 +1243,8 @@ static int _read_fw2_block_header(uint32_t *header, uint32_t id,
header_size = header[0];
/* Headers have limited size and always occur as pairs of words */
- if (header_size > MAX_HEADER_SIZE || header_size % 2)
+ if (header_size > MAX_HEADER_SIZE || header_size >= remain ||
+ header_size % 2 || header_size == 0)
return -EINVAL;
/* Sequences must have an identifying id first thing in their header */
if (id == GPMU_SEQUENCE_ID) {
@@ -1306,8 +1308,8 @@ static void _load_regfile(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
const struct firmware *fw;
- uint32_t block_size = 0, block_total = 0, fw_size;
- uint32_t *block;
+ uint64_t block_size = 0, block_total = 0;
+ uint32_t fw_size, *block;
int ret = -EINVAL;
if (!adreno_dev->gpucore->regfw_name)
@@ -1329,7 +1331,8 @@ static void _load_regfile(struct adreno_device *adreno_dev)
/* All offset numbers calculated from file description */
while (block_total < fw_size) {
block_size = block[0];
- if (block_size >= fw_size || block_size < 2)
+ if (((block_total + block_size) >= fw_size)
+ || block_size < 5)
goto err;
if (block[1] != GPMU_SEQUENCE_ID)
goto err;
@@ -1337,6 +1340,7 @@ static void _load_regfile(struct adreno_device *adreno_dev)
/* For now ignore blocks other than the LM sequence */
if (block[4] == LM_SEQUENCE_ID) {
ret = _read_fw2_block_header(&block[2],
+ block_size - 2,
GPMU_SEQUENCE_ID,
adreno_dev->gpucore->lm_major,
adreno_dev->gpucore->lm_minor);
@@ -1344,6 +1348,9 @@ static void _load_regfile(struct adreno_device *adreno_dev)
goto err;
adreno_dev->lm_fw = fw;
+
+ if (block[2] > (block_size - 2))
+ goto err;
adreno_dev->lm_sequence = block + block[2] + 3;
adreno_dev->lm_size = block_size - block[2] - 2;
}
@@ -1356,7 +1363,7 @@ static void _load_regfile(struct adreno_device *adreno_dev)
err:
release_firmware(fw);
KGSL_PWR_ERR(device,
- "Register file failed to load sz=%d bsz=%d header=%d\n",
+ "Register file failed to load sz=%d bsz=%llu header=%d\n",
fw_size, block_size, ret);
return;
}
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index c46d5ee3c468..de4ba83903f9 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -2742,6 +2742,10 @@ static int _kgsl_gpumem_sync_cache(struct kgsl_mem_entry *entry,
int cacheop;
int mode;
+ /* Cache ops are not allowed on secure memory */
+ if (entry->memdesc.flags & KGSL_MEMFLAGS_SECURE)
+ return 0;
+
/*
* Flush is defined as (clean | invalidate). If both bits are set, then
* do a flush, otherwise check for the individual bits and clean or inv
@@ -3439,6 +3443,7 @@ long kgsl_ioctl_sparse_virt_free(struct kgsl_device_private *dev_priv,
return 0;
}
+/* entry->bind_lock must be held by the caller */
static int _sparse_add_to_bind_tree(struct kgsl_mem_entry *entry,
uint64_t v_offset,
struct kgsl_memdesc *memdesc,
@@ -3467,10 +3472,16 @@ static int _sparse_add_to_bind_tree(struct kgsl_mem_entry *entry,
parent = *node;
this = rb_entry(parent, struct sparse_bind_object, node);
- if (new->v_off < this->v_off)
+ if ((new->v_off < this->v_off) &&
+ ((new->v_off + new->size) <= this->v_off))
node = &parent->rb_left;
- else if (new->v_off > this->v_off)
+ else if ((new->v_off > this->v_off) &&
+ (new->v_off >= (this->v_off + this->size)))
node = &parent->rb_right;
+ else {
+ kfree(new);
+ return -EADDRINUSE;
+ }
}
rb_link_node(&new->node, parent, node);
@@ -3691,8 +3702,11 @@ static int _sparse_bind(struct kgsl_process_private *process,
return ret;
}
+ spin_lock(&virt_entry->bind_lock);
ret = _sparse_add_to_bind_tree(virt_entry, v_offset, memdesc,
p_offset, size, flags);
+ spin_unlock(&virt_entry->bind_lock);
+
if (ret == 0)
memdesc->cur_bindings += size / PAGE_SIZE;
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index 28ab4e52dab5..b3aa73f1a5a1 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -22,6 +22,7 @@
#include <linux/regmap.h>
#include <linux/delay.h>
#include <linux/qpnp/qpnp-revid.h>
+#include <linux/power_supply.h>
#define FG_ADC_RR_EN_CTL 0x46
#define FG_ADC_RR_SKIN_TEMP_LSB 0x50
@@ -192,8 +193,7 @@
#define FG_RR_ADC_STS_CHANNEL_READING_MASK 0x3
#define FG_RR_ADC_STS_CHANNEL_STS 0x2
-#define FG_RR_CONV_CONTINUOUS_TIME_MIN_US 50000
-#define FG_RR_CONV_CONTINUOUS_TIME_MAX_US 51000
+#define FG_RR_CONV_CONTINUOUS_TIME_MIN_MS 50
#define FG_RR_CONV_MAX_RETRY_CNT 50
#define FG_RR_TP_REV_VERSION1 21
#define FG_RR_TP_REV_VERSION2 29
@@ -235,6 +235,7 @@ struct rradc_chip {
struct device_node *revid_dev_node;
struct pmic_revid_data *pmic_fab_id;
int volt;
+ struct power_supply *usb_trig;
};
struct rradc_channels {
@@ -726,6 +727,24 @@ static int rradc_disable_continuous_mode(struct rradc_chip *chip)
return rc;
}
+static bool rradc_is_usb_present(struct rradc_chip *chip)
+{
+ union power_supply_propval pval;
+ int rc;
+ bool usb_present = false;
+
+ if (!chip->usb_trig) {
+ pr_debug("USB property not present\n");
+ return usb_present;
+ }
+
+ rc = power_supply_get_property(chip->usb_trig,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ usb_present = (rc < 0) ? 0 : pval.intval;
+
+ return usb_present;
+}
+
static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u8 *buf, u16 status)
{
@@ -745,8 +764,18 @@ static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
(retry_cnt < FG_RR_CONV_MAX_RETRY_CNT)) {
pr_debug("%s is not ready; nothing to read:0x%x\n",
rradc_chans[prop->channel].datasheet_name, buf[0]);
- usleep_range(FG_RR_CONV_CONTINUOUS_TIME_MIN_US,
- FG_RR_CONV_CONTINUOUS_TIME_MAX_US);
+
+ if (((prop->channel == RR_ADC_CHG_TEMP) ||
+ (prop->channel == RR_ADC_SKIN_TEMP) ||
+ (prop->channel == RR_ADC_USBIN_I) ||
+ (prop->channel == RR_ADC_DIE_TEMP)) &&
+ ((!rradc_is_usb_present(chip)))) {
+ pr_debug("USB not present for %d\n", prop->channel);
+ rc = -ENODATA;
+ break;
+ }
+
+ msleep(FG_RR_CONV_CONTINUOUS_TIME_MIN_MS);
retry_cnt++;
rc = rradc_read(chip, status, buf, 1);
if (rc < 0) {
@@ -764,7 +793,7 @@ static int rradc_check_status_ready_with_retry(struct rradc_chip *chip,
static int rradc_read_channel_with_continuous_mode(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u8 *buf)
{
- int rc = 0;
+ int rc = 0, ret = 0;
u16 status = 0;
rc = rradc_enable_continuous_mode(chip);
@@ -777,23 +806,25 @@ static int rradc_read_channel_with_continuous_mode(struct rradc_chip *chip,
rc = rradc_read(chip, status, buf, 1);
if (rc < 0) {
pr_err("status read failed:%d\n", rc);
- return rc;
+ ret = rc;
+ goto disable;
}
rc = rradc_check_status_ready_with_retry(chip, prop,
buf, status);
if (rc < 0) {
pr_err("Status read failed:%d\n", rc);
- return rc;
+ ret = rc;
}
+disable:
rc = rradc_disable_continuous_mode(chip);
if (rc < 0) {
pr_err("Failed to switch to non continuous mode\n");
- return rc;
+ ret = rc;
}
- return rc;
+ return ret;
}
static int rradc_enable_batt_id_channel(struct rradc_chip *chip, bool enable)
@@ -1149,6 +1180,10 @@ static int rradc_probe(struct platform_device *pdev)
indio_dev->channels = chip->iio_chans;
indio_dev->num_channels = chip->nchannels;
+ chip->usb_trig = power_supply_get_by_name("usb");
+ if (!chip->usb_trig)
+ pr_debug("Error obtaining usb power supply\n");
+
return devm_iio_device_register(dev, indio_dev);
}
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index b30739de79e7..6317478916ef 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1243,6 +1243,7 @@ static void arm_smmu_secure_pool_destroy(struct arm_smmu_domain *smmu_domain)
list_for_each_entry_safe(it, i, &smmu_domain->secure_pool_list, list) {
arm_smmu_unprepare_pgtable(smmu_domain, it->addr, it->size);
/* pages will be freed later (after being unassigned) */
+ list_del(&it->list);
kfree(it);
}
}
@@ -1956,10 +1957,20 @@ static void arm_smmu_destroy_domain_context(struct iommu_domain *domain)
cb_base = ARM_SMMU_CB_BASE(smmu) + ARM_SMMU_CB(smmu, cfg->cbndx);
writel_relaxed(0, cb_base + ARM_SMMU_CB_SCTLR);
- arm_smmu_tlb_inv_context(smmu_domain);
-
arm_smmu_disable_clocks(smmu_domain->smmu);
+ if (smmu_domain->pgtbl_ops) {
+ free_io_pgtable_ops(smmu_domain->pgtbl_ops);
+ /* unassign any freed page table memory */
+ if (arm_smmu_is_master_side_secure(smmu_domain)) {
+ arm_smmu_secure_domain_lock(smmu_domain);
+ arm_smmu_secure_pool_destroy(smmu_domain);
+ arm_smmu_unassign_table(smmu_domain);
+ arm_smmu_secure_domain_unlock(smmu_domain);
+ }
+ smmu_domain->pgtbl_ops = NULL;
+ }
+
free_irqs:
if (cfg->irptndx != INVALID_IRPTNDX) {
irq = smmu->irqs[smmu->num_global_irqs + cfg->irptndx];
diff --git a/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c
index f6d7f5fb8d32..8a49c7cf9f4a 100644
--- a/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c
+++ b/drivers/media/platform/msm/camera_v2/common/msm_camera_io_util.c
@@ -424,7 +424,7 @@ int msm_camera_config_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
curr_vreg = &cam_vreg[j];
reg_ptr[j] = regulator_get(dev,
curr_vreg->reg_name);
- if (IS_ERR(reg_ptr[j])) {
+ if (IS_ERR_OR_NULL(reg_ptr[j])) {
pr_err("%s: %s get failed\n",
__func__,
curr_vreg->reg_name);
@@ -531,7 +531,7 @@ int msm_camera_enable_vreg(struct device *dev, struct camera_vreg_t *cam_vreg,
continue;
} else
j = i;
- if (IS_ERR(reg_ptr[j])) {
+ if (IS_ERR_OR_NULL(reg_ptr[j])) {
pr_err("%s: %s null regulator\n",
__func__, cam_vreg[j].reg_name);
goto disable_vreg;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 63f5497e63b8..66c5ce11ea3d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -674,6 +674,7 @@ void msm_isp_process_reg_upd_epoch_irq(struct vfe_device *vfe_dev,
void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
+ uint32_t framedrop_period = 0;
stream_info->runtime_num_burst_capture = stream_info->num_burst_capture;
/**
@@ -682,9 +683,15 @@ void msm_isp_reset_framedrop(struct vfe_device *vfe_dev,
* by the request frame api
*/
if (!stream_info->controllable_output) {
- stream_info->current_framedrop_period =
+ framedrop_period =
msm_isp_get_framedrop_period(
stream_info->frame_skip_pattern);
+ if (stream_info->frame_skip_pattern == SKIP_ALL)
+ stream_info->current_framedrop_period =
+ MSM_VFE_STREAM_STOP_PERIOD;
+ else
+ stream_info->current_framedrop_period =
+ framedrop_period;
}
msm_isp_cfg_framedrop_reg(stream_info);
diff --git a/drivers/media/platform/msm/vidc/msm_vidc.c b/drivers/media/platform/msm/vidc/msm_vidc.c
index 2eaae18bc2e9..733aa4769941 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc.c
@@ -1198,6 +1198,8 @@ int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize)
{
struct msm_vidc_inst *inst = instance;
struct msm_vidc_capability *capability = NULL;
+ enum hal_video_codec codec;
+ int i;
if (!inst || !fsize) {
dprintk(VIDC_ERR, "%s: invalid parameter: %pK %pK\n",
@@ -1206,15 +1208,36 @@ int msm_vidc_enum_framesizes(void *instance, struct v4l2_frmsizeenum *fsize)
}
if (!inst->core)
return -EINVAL;
+ if (fsize->index != 0)
+ return -EINVAL;
+
+ codec = get_hal_codec(fsize->pixel_format);
+ if (codec == HAL_UNUSED_CODEC)
+ return -EINVAL;
- capability = &inst->capability;
- fsize->type = V4L2_FRMSIZE_TYPE_STEPWISE;
- fsize->stepwise.min_width = capability->width.min;
- fsize->stepwise.max_width = capability->width.max;
- fsize->stepwise.step_width = capability->width.step_size;
- fsize->stepwise.min_height = capability->height.min;
- fsize->stepwise.max_height = capability->height.max;
- fsize->stepwise.step_height = capability->height.step_size;
+ for (i = 0; i < VIDC_MAX_SESSIONS; i++) {
+ if (inst->core->capabilities[i].codec == codec) {
+ capability = &inst->core->capabilities[i];
+ break;
+ }
+ }
+
+ if (capability) {
+ fsize->type = capability->width.step_size == 1 &&
+ capability->height.step_size == 1 ?
+ V4L2_FRMSIZE_TYPE_CONTINUOUS :
+ V4L2_FRMSIZE_TYPE_STEPWISE;
+ fsize->stepwise.min_width = capability->width.min;
+ fsize->stepwise.max_width = capability->width.max;
+ fsize->stepwise.step_width = capability->width.step_size;
+ fsize->stepwise.min_height = capability->height.min;
+ fsize->stepwise.max_height = capability->height.max;
+ fsize->stepwise.step_height = capability->height.step_size;
+ } else {
+ dprintk(VIDC_ERR, "%s: Invalid Pixel Fmt %#x\n",
+ __func__, fsize->pixel_format);
+ return -EINVAL;
+ }
return 0;
}
EXPORT_SYMBOL(msm_vidc_enum_framesizes);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index c002fa5ff602..063e00517660 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -1224,16 +1224,16 @@ static int mmc_blk_ioctl_cmd(struct block_device *bdev,
ioc_err = __mmc_blk_ioctl_cmd(card, md, idata);
- mmc_put_card(card);
-
- err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
-
if (mmc_card_cmdq(card)) {
if (mmc_cmdq_halt(card->host, false))
pr_err("%s: %s: cmdq unhalt failed\n",
mmc_hostname(card->host), __func__);
}
+ mmc_put_card(card);
+
+ err = mmc_blk_ioctl_copy_to_user(ic_ptr, idata);
+
cmd_done:
mmc_blk_put(md);
cmd_err:
@@ -4000,7 +4000,7 @@ static int mmc_blk_cmdq_issue_rq(struct mmc_queue *mq, struct request *req)
} else {
pr_err("%s: %s: partition switch failed err = %d\n",
md->disk->disk_name, __func__, err);
- ret = 0;
+ ret = err;
goto out;
}
}
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 0da9c5caea13..372f1fbbde4c 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -3301,6 +3301,13 @@ static void _mmc_detect_change(struct mmc_host *host, unsigned long delay,
pm_wakeup_event(mmc_dev(host), 5000);
host->detect_change = 1;
+ /*
+ * Change in cd_gpio state, so make sure detection part is
+ * not overided because of manual resume.
+ */
+ if (cd_irq && mmc_bus_manual_resume(host))
+ host->ignore_bus_resume_flags = true;
+
mmc_schedule_delayed_work(&host->detect, delay);
}
@@ -4165,6 +4172,18 @@ int mmc_detect_card_removed(struct mmc_host *host)
}
EXPORT_SYMBOL(mmc_detect_card_removed);
+/*
+ * This should be called to make sure that detect work(mmc_rescan)
+ * is completed.Drivers may use this function from async schedule/probe
+ * contexts to make sure that the bootdevice detection is completed on
+ * completion of async_schedule.
+ */
+void mmc_flush_detect_work(struct mmc_host *host)
+{
+ flush_delayed_work(&host->detect);
+}
+EXPORT_SYMBOL(mmc_flush_detect_work);
+
void mmc_rescan(struct work_struct *work)
{
unsigned long flags;
@@ -4199,6 +4218,8 @@ void mmc_rescan(struct work_struct *work)
host->bus_ops->detect(host);
host->detect_change = 0;
+ if (host->ignore_bus_resume_flags)
+ host->ignore_bus_resume_flags = false;
/*
* Let mmc_bus_put() free the bus/bus_ops if we've found that
@@ -4456,7 +4477,8 @@ int mmc_pm_notify(struct notifier_block *notify_block,
spin_lock_irqsave(&host->lock, flags);
host->rescan_disable = 0;
- if (mmc_bus_manual_resume(host)) {
+ if (mmc_bus_manual_resume(host) &&
+ !host->ignore_bus_resume_flags) {
spin_unlock_irqrestore(&host->lock, flags);
break;
}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 9bef77ba29fd..21836eac001e 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1237,7 +1237,10 @@ static int mmc_sd_suspend(struct mmc_host *host)
if (!err) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_suspended(&host->card->dev);
- }
+ /* if suspend fails, force mmc_detect_change during resume */
+ } else if (mmc_bus_manual_resume(host))
+ host->ignore_bus_resume_flags = true;
+
MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index df3fce93b6d1..45d2f69f5f1a 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -4747,6 +4747,9 @@ static int sdhci_msm_probe(struct platform_device *pdev)
mmc_hostname(host->mmc), __func__, ret);
device_remove_file(&pdev->dev, &msm_host->auto_cmd21_attr);
}
+ if (sdhci_msm_is_bootdevice(&pdev->dev))
+ mmc_flush_detect_work(host->mmc);
+
/* Successful initialization */
goto out;
diff --git a/drivers/net/ethernet/msm/msm_rmnet_mhi.c b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
index de14dcc6f4ed..a342e39b9f43 100644
--- a/drivers/net/ethernet/msm/msm_rmnet_mhi.c
+++ b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
@@ -958,6 +958,7 @@ static void rmnet_mhi_cb(struct mhi_cb_info *cb_info)
{
struct rmnet_mhi_private *rmnet_mhi_ptr;
struct mhi_result *result;
+ char ifalias[IFALIASZ];
int r = 0;
if (!cb_info || !cb_info->result) {
@@ -979,9 +980,16 @@ static void rmnet_mhi_cb(struct mhi_cb_info *cb_info)
* as we set mhi_enabled = 0, we gurantee rest of
* driver will not touch any critical data.
*/
+ snprintf(ifalias, sizeof(ifalias), "%s", "unidentified_netdev");
write_lock_irq(&rmnet_mhi_ptr->pm_lock);
rmnet_mhi_ptr->mhi_enabled = 0;
write_unlock_irq(&rmnet_mhi_ptr->pm_lock);
+ /* Set unidentified_net_dev string to ifalias
+ * on error notification
+ */
+ rtnl_lock();
+ dev_set_alias(rmnet_mhi_ptr->dev, ifalias, strlen(ifalias));
+ rtnl_unlock();
if (cb_info->chan == rmnet_mhi_ptr->rx_channel) {
rmnet_log(rmnet_mhi_ptr, MSG_INFO,
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index f5aa88a76f17..c42d7eebf465 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -27,6 +27,8 @@
#include "qmi.h"
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
#define WCN3990_MAX_IRQ 12
@@ -48,6 +50,7 @@ const char *ce_name[WCN3990_MAX_IRQ] = {
#define ATH10K_SNOC_TARGET_WAIT 3000
#define ATH10K_SNOC_NUM_WARM_RESET_ATTEMPTS 3
#define SNOC_HIF_POWER_DOWN_DELAY 30
+#define ATH10K_MAX_PROP_SIZE 32
static void ath10k_snoc_buffer_cleanup(struct ath10k *ar);
static int ath10k_snoc_request_irq(struct ath10k *ar);
@@ -1248,6 +1251,326 @@ int ath10k_snoc_pm_notifier(struct notifier_block *nb,
return NOTIFY_DONE;
}
+static int ath10k_get_vreg_info(struct ath10k *ar, struct device *dev,
+ struct ath10k_wcn3990_vreg_info *vreg_info)
+{
+ int ret = 0;
+ char prop_name[ATH10K_MAX_PROP_SIZE];
+ struct regulator *reg;
+ const __be32 *prop;
+ int len = 0;
+ int i;
+
+ reg = devm_regulator_get_optional(dev, vreg_info->name);
+ if (PTR_ERR(reg) == -EPROBE_DEFER) {
+ ath10k_err(ar, "EPROBE_DEFER for regulator: %s\n",
+ vreg_info->name);
+ ret = PTR_ERR(reg);
+ goto out;
+ }
+
+ if (IS_ERR(reg)) {
+ ret = PTR_ERR(reg);
+
+ if (vreg_info->required) {
+ ath10k_err(ar, "Regulator %s doesn't exist: %d\n",
+ vreg_info->name, ret);
+ goto out;
+ } else {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "Optional regulator %s doesn't exist: %d\n",
+ vreg_info->name, ret);
+ goto done;
+ }
+ }
+
+ vreg_info->reg = reg;
+
+ snprintf(prop_name, ATH10K_MAX_PROP_SIZE,
+ "qcom,%s-config", vreg_info->name);
+
+ prop = of_get_property(dev->of_node, prop_name, &len);
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Got regulator cfg,prop: %s, len: %d\n",
+ prop_name, len);
+
+ if (!prop || len < (2 * sizeof(__be32))) {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Property %s %s\n", prop_name,
+ prop ? "invalid format" : "doesn't exist");
+ goto done;
+ }
+
+ for (i = 0; (i * sizeof(__be32)) < len; i++) {
+ switch (i) {
+ case 0:
+ vreg_info->min_v = be32_to_cpup(&prop[0]);
+ break;
+ case 1:
+ vreg_info->max_v = be32_to_cpup(&prop[1]);
+ break;
+ case 2:
+ vreg_info->load_ua = be32_to_cpup(&prop[2]);
+ break;
+ case 3:
+ vreg_info->settle_delay = be32_to_cpup(&prop[3]);
+ break;
+ default:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "%s, ignoring val %d\n",
+ prop_name, i);
+ break;
+ }
+ }
+
+done:
+ ath10k_dbg(ar, ATH10K_DBG_SNOC,
+ "vreg: %s, min_v: %u, max_v: %u, load: %u, delay: %lu\n",
+ vreg_info->name, vreg_info->min_v, vreg_info->max_v,
+ vreg_info->load_ua, vreg_info->settle_delay);
+
+ return 0;
+
+out:
+ return ret;
+}
+
+static int ath10k_get_clk_info(struct ath10k *ar, struct device *dev,
+ struct ath10k_wcn3990_clk_info *clk_info)
+{
+ struct clk *handle;
+ int ret = 0;
+
+ handle = devm_clk_get(dev, clk_info->name);
+ if (IS_ERR(handle)) {
+ ret = PTR_ERR(handle);
+ if (clk_info->required) {
+ ath10k_err(ar, "Clock %s isn't available: %d\n",
+ clk_info->name, ret);
+ goto out;
+ } else {
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Ignoring clk %s: %d\n",
+ clk_info->name,
+ ret);
+ ret = 0;
+ goto out;
+ }
+ }
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock: %s, freq: %u\n",
+ clk_info->name, clk_info->freq);
+
+ clk_info->handle = handle;
+out:
+ return ret;
+}
+
+static int ath10k_wcn3990_vreg_on(struct ath10k *ar)
+{
+ int ret = 0;
+ struct ath10k_wcn3990_vreg_info *vreg_info;
+ int i;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ for (i = 0; i < ATH10K_WCN3990_VREG_INFO_SIZE; i++) {
+ vreg_info = &ar_snoc->vreg[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Regulator %s being enabled\n",
+ vreg_info->name);
+
+ ret = regulator_set_voltage(vreg_info->reg, vreg_info->min_v,
+ vreg_info->max_v);
+ if (ret) {
+ ath10k_err(ar,
+ "vreg %s, set failed:min:%u,max:%u,ret: %d\n",
+ vreg_info->name, vreg_info->min_v,
+ vreg_info->max_v, ret);
+ break;
+ }
+
+ if (vreg_info->load_ua) {
+ ret = regulator_set_load(vreg_info->reg,
+ vreg_info->load_ua);
+ if (ret < 0) {
+ ath10k_err(ar,
+ "Reg %s, can't set load:%u,ret: %d\n",
+ vreg_info->name,
+ vreg_info->load_ua, ret);
+ break;
+ }
+ }
+
+ ret = regulator_enable(vreg_info->reg);
+ if (ret) {
+ ath10k_err(ar, "Regulator %s, can't enable: %d\n",
+ vreg_info->name, ret);
+ break;
+ }
+
+ if (vreg_info->settle_delay)
+ udelay(vreg_info->settle_delay);
+ }
+
+ if (!ret)
+ return 0;
+
+ for (; i >= 0; i--) {
+ vreg_info = &ar_snoc->vreg[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ regulator_disable(vreg_info->reg);
+ regulator_set_load(vreg_info->reg, 0);
+ regulator_set_voltage(vreg_info->reg, 0, vreg_info->max_v);
+ }
+
+ return ret;
+}
+
+static int ath10k_wcn3990_vreg_off(struct ath10k *ar)
+{
+ int ret = 0;
+ struct ath10k_wcn3990_vreg_info *vreg_info;
+ int i;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ for (i = ATH10K_WCN3990_VREG_INFO_SIZE - 1; i >= 0; i--) {
+ vreg_info = &ar_snoc->vreg[i];
+
+ if (!vreg_info->reg)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Regulator %s being disabled\n",
+ vreg_info->name);
+
+ ret = regulator_disable(vreg_info->reg);
+ if (ret)
+ ath10k_err(ar, "Regulator %s, can't disable: %d\n",
+ vreg_info->name, ret);
+
+ ret = regulator_set_load(vreg_info->reg, 0);
+ if (ret < 0)
+ ath10k_err(ar, "Regulator %s, can't set load: %d\n",
+ vreg_info->name, ret);
+
+ ret = regulator_set_voltage(vreg_info->reg, 0,
+ vreg_info->max_v);
+ if (ret)
+ ath10k_err(ar, "Regulator %s, can't set voltage: %d\n",
+ vreg_info->name, ret);
+ }
+
+ return ret;
+}
+
+static int ath10k_wcn3990_clk_init(struct ath10k *ar)
+{
+ struct ath10k_wcn3990_clk_info *clk_info;
+ int i;
+ int ret = 0;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) {
+ clk_info = &ar_snoc->clk[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock %s being enabled\n",
+ clk_info->name);
+
+ if (clk_info->freq) {
+ ret = clk_set_rate(clk_info->handle, clk_info->freq);
+
+ if (ret) {
+ ath10k_err(ar, "Clk %s,set err: %u,ret: %d\n",
+ clk_info->name, clk_info->freq,
+ ret);
+ break;
+ }
+ }
+
+ ret = clk_prepare_enable(clk_info->handle);
+ if (ret) {
+ ath10k_err(ar, "Clock %s, can't enable: %d\n",
+ clk_info->name, ret);
+ break;
+ }
+ }
+
+ if (ret == 0)
+ return 0;
+
+ for (; i >= 0; i--) {
+ clk_info = &ar_snoc->clk[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ clk_disable_unprepare(clk_info->handle);
+ }
+
+ return ret;
+}
+
+static int ath10k_wcn3990_clk_deinit(struct ath10k *ar)
+{
+ struct ath10k_wcn3990_clk_info *clk_info;
+ int i;
+ struct ath10k_snoc *ar_snoc = ath10k_snoc_priv(ar);
+
+ for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) {
+ clk_info = &ar_snoc->clk[i];
+
+ if (!clk_info->handle)
+ continue;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "Clock %s being disabled\n",
+ clk_info->name);
+
+ clk_disable_unprepare(clk_info->handle);
+ }
+
+ return 0;
+}
+
+static int ath10k_hw_power_on(struct ath10k *ar)
+{
+ int ret = 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "HW Power on\n");
+
+ ret = ath10k_wcn3990_vreg_on(ar);
+ if (ret)
+ goto out;
+
+ ret = ath10k_wcn3990_clk_init(ar);
+ if (ret)
+ goto vreg_off;
+
+ return ret;
+
+vreg_off:
+ ath10k_wcn3990_vreg_off(ar);
+out:
+ return ret;
+}
+
+static int ath10k_hw_power_off(struct ath10k *ar)
+{
+ int ret = 0;
+
+ ath10k_dbg(ar, ATH10K_DBG_SNOC, "HW Power off\n");
+
+ ath10k_wcn3990_clk_deinit(ar);
+
+ ret = ath10k_wcn3990_vreg_off(ar);
+
+ return ret;
+}
+
static const struct ath10k_hif_ops ath10k_snoc_hif_ops = {
.tx_sg = ath10k_snoc_hif_tx_sg,
.start = ath10k_snoc_hif_start,
@@ -1275,6 +1598,7 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
enum ath10k_hw_rev hw_rev;
struct device *dev;
u32 chip_id;
+ u32 i;
dev = &pdev->dev;
hw_rev = ATH10K_HW_WCN3990;
@@ -1308,22 +1632,43 @@ static int ath10k_snoc_probe(struct platform_device *pdev)
setup_timer(&ar_snoc->rx_post_retry, ath10k_snoc_rx_replenish_retry,
(unsigned long)ar);
+ memcpy(ar_snoc->vreg, vreg_cfg, sizeof(vreg_cfg));
+ for (i = 0; i < ATH10K_WCN3990_VREG_INFO_SIZE; i++) {
+ ret = ath10k_get_vreg_info(ar, dev, &ar_snoc->vreg[i]);
+ if (ret)
+ goto err_core_destroy;
+ }
+
+ memcpy(ar_snoc->clk, clk_cfg, sizeof(clk_cfg));
+ for (i = 0; i < ATH10K_WCN3990_CLK_INFO_SIZE; i++) {
+ ret = ath10k_get_clk_info(ar, dev, &ar_snoc->clk[i]);
+ if (ret)
+ goto err_core_destroy;
+ }
+
+ ret = ath10k_hw_power_on(ar);
+ if (ret) {
+ ath10k_err(ar, "failed to power on device: %d\n", ret);
+ goto err_stop_qmi_service;
+ }
+
ret = ath10k_snoc_claim(ar);
if (ret) {
ath10k_err(ar, "failed to claim device: %d\n", ret);
- goto err_stop_qmi_service;
+ goto err_hw_power_off;
}
+
ret = ath10k_snoc_bus_configure(ar);
if (ret) {
ath10k_err(ar, "failed to configure bus: %d\n", ret);
- goto err_stop_qmi_service;
+ goto err_hw_power_off;
}
ret = ath10k_snoc_alloc_pipes(ar);
if (ret) {
ath10k_err(ar, "failed to allocate copy engine pipes: %d\n",
ret);
- goto err_stop_qmi_service;
+ goto err_hw_power_off;
}
netif_napi_add(&ar->napi_dev, &ar->napi, ath10k_snoc_napi_poll,
@@ -1359,6 +1704,9 @@ err_free_irq:
err_free_pipes:
ath10k_snoc_free_pipes(ar);
+err_hw_power_off:
+ ath10k_hw_power_off(ar);
+
err_stop_qmi_service:
ath10k_snoc_stop_qmi_service(ar);
@@ -1389,6 +1737,7 @@ static int ath10k_snoc_remove(struct platform_device *pdev)
ath10k_snoc_release_resource(ar);
ath10k_snoc_free_pipes(ar);
ath10k_snoc_stop_qmi_service(ar);
+ ath10k_hw_power_off(ar);
ath10k_core_destroy(ar);
return 0;
diff --git a/drivers/net/wireless/ath/ath10k/snoc.h b/drivers/net/wireless/ath/ath10k/snoc.h
index d6e05ba18cb8..a02cb2ad928e 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.h
+++ b/drivers/net/wireless/ath/ath10k/snoc.h
@@ -17,6 +17,7 @@
#include "ce.h"
#include "pci.h"
#include "qmi.h"
+#include <linux/kernel.h>
#include <soc/qcom/service-locator.h>
#define ATH10K_SNOC_RX_POST_RETRY_MS 50
#define CE_POLL_PIPE 4
@@ -112,6 +113,38 @@ struct ath10k_snoc_ce_irq {
u32 irq_line;
};
+struct ath10k_wcn3990_vreg_info {
+ struct regulator *reg;
+ const char *name;
+ u32 min_v;
+ u32 max_v;
+ u32 load_ua;
+ unsigned long settle_delay;
+ bool required;
+};
+
+struct ath10k_wcn3990_clk_info {
+ struct clk *handle;
+ const char *name;
+ u32 freq;
+ bool required;
+};
+
+static struct ath10k_wcn3990_vreg_info vreg_cfg[] = {
+ {NULL, "vdd-0.8-cx-mx", 800000, 800000, 0, 0, false},
+ {NULL, "vdd-1.8-xo", 1800000, 1800000, 0, 0, false},
+ {NULL, "vdd-1.3-rfa", 1304000, 1304000, 0, 0, false},
+ {NULL, "vdd-3.3-ch0", 3312000, 3312000, 0, 0, false},
+};
+
+#define ATH10K_WCN3990_VREG_INFO_SIZE ARRAY_SIZE(vreg_cfg)
+
+static struct ath10k_wcn3990_clk_info clk_cfg[] = {
+ {NULL, "cxo_ref_clk_pin", 0, false},
+};
+
+#define ATH10K_WCN3990_CLK_INFO_SIZE ARRAY_SIZE(clk_cfg)
+
/* struct ath10k_snoc: SNOC info struct
* @dev: device structure
* @ar:ath10k base structure
@@ -157,6 +190,8 @@ struct ath10k_snoc {
atomic_t fw_crashed;
atomic_t pm_ops_inprogress;
struct ath10k_snoc_qmi_config qmi_cfg;
+ struct ath10k_wcn3990_vreg_info vreg[ATH10K_WCN3990_VREG_INFO_SIZE];
+ struct ath10k_wcn3990_clk_info clk[ATH10K_WCN3990_CLK_INFO_SIZE];
};
struct ath10k_event_pd_down_data {
diff --git a/drivers/net/wireless/ath/wil6210/ftm.c b/drivers/net/wireless/ath/wil6210/ftm.c
index 6891a38d7a59..5906b90b337d 100644
--- a/drivers/net/wireless/ath/wil6210/ftm.c
+++ b/drivers/net/wireless/ath/wil6210/ftm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -38,6 +38,9 @@
/* initial token to use on non-secure FTM measurement */
#define WIL_TOF_FTM_DEFAULT_INITIAL_TOKEN 2
+/* maximum AOA burst period, limited by FW */
+#define WIL_AOA_MAX_BURST_PERIOD 255
+
#define WIL_TOF_FTM_MAX_LCI_LENGTH (240)
#define WIL_TOF_FTM_MAX_LCR_LENGTH (240)
@@ -62,6 +65,7 @@ nla_policy wil_nl80211_ftm_peer_policy[
[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_FLAGS] = { .type = NLA_U32 },
[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS] = { .type = NLA_NESTED },
[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID] = { .type = NLA_U8 },
+ [QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD] = { .type = NLA_U16 },
[QCA_WLAN_VENDOR_ATTR_FTM_PEER_FREQ] = { .type = NLA_U32 },
};
@@ -311,8 +315,8 @@ wil_ftm_cfg80211_start_session(struct wil6210_priv *wil,
struct wmi_tof_session_start_cmd *cmd;
mutex_lock(&wil->ftm.lock);
- if (wil->ftm.session_started) {
- wil_err(wil, "FTM session already running\n");
+ if (wil->ftm.session_started || wil->ftm.aoa_started) {
+ wil_err(wil, "FTM or AOA session already running\n");
rc = -EAGAIN;
goto out;
}
@@ -356,6 +360,7 @@ wil_ftm_cfg80211_start_session(struct wil6210_priv *wil,
}
cmd->session_id = cpu_to_le32(WIL_FTM_FW_SESSION_ID);
+ cmd->aoa_type = request->aoa_type;
cmd->num_of_dest = cpu_to_le16(request->n_peers);
for (i = 0; i < request->n_peers; i++) {
ether_addr_copy(cmd->ftm_dest_info[i].dst_mac,
@@ -398,6 +403,8 @@ wil_ftm_cfg80211_start_session(struct wil6210_priv *wil,
request->peers[i].params.burst_duration;
cmd->ftm_dest_info[i].burst_period =
cpu_to_le16(request->peers[i].params.burst_period);
+ cmd->ftm_dest_info[i].num_burst_per_aoa_meas =
+ request->peers[i].aoa_burst_period;
}
rc = wmi_send(wil, WMI_TOF_SESSION_START_CMDID, cmd, cmd_len);
@@ -482,8 +489,8 @@ wil_aoa_cfg80211_start_measurement(struct wil6210_priv *wil,
mutex_lock(&wil->ftm.lock);
- if (wil->ftm.aoa_started) {
- wil_err(wil, "AOA measurement already running\n");
+ if (wil->ftm.aoa_started || wil->ftm.session_started) {
+ wil_err(wil, "AOA or FTM measurement already running\n");
rc = -EAGAIN;
goto out;
}
@@ -524,8 +531,8 @@ void wil_aoa_cfg80211_meas_result(struct wil6210_priv *wil,
mutex_lock(&wil->ftm.lock);
- if (!wil->ftm.aoa_started) {
- wil_info(wil, "AOA not started, not sending result\n");
+ if (!wil->ftm.aoa_started && !wil->ftm.session_started) {
+ wil_info(wil, "AOA/FTM not started, not sending result\n");
goto out;
}
@@ -678,6 +685,10 @@ void wil_aoa_evt_meas(struct wil6210_priv *wil,
int data_len = len - offsetof(struct wmi_aoa_meas_event, meas_data);
struct wil_aoa_meas_result *res;
+ if (data_len < 0) {
+ wil_err(wil, "AOA event too short (%d)\n", len);
+ return;
+ }
data_len = min_t(int, le16_to_cpu(evt->length), data_len);
res = kmalloc(sizeof(*res) + data_len, GFP_KERNEL);
@@ -749,6 +760,7 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
struct nlattr *tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MAX + 1];
struct nlattr *peer;
int rc, n_peers = 0, index = 0, tmp;
+ u32 aoa_type = 0;
if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
return -ENOTSUPP;
@@ -770,6 +782,14 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
return -EINVAL;
}
+ if (tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]) {
+ aoa_type = nla_get_u32(tb[QCA_WLAN_VENDOR_ATTR_AOA_TYPE]);
+ if (aoa_type >= QCA_WLAN_VENDOR_ATTR_AOA_TYPE_MAX) {
+ wil_err(wil, "invalid AOA type: %d\n", aoa_type);
+ return -EINVAL;
+ }
+ }
+
nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS],
tmp)
n_peers++;
@@ -793,6 +813,7 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
request->session_cookie =
nla_get_u64(tb[QCA_WLAN_VENDOR_ATTR_FTM_SESSION_COOKIE]);
+ request->aoa_type = aoa_type;
request->n_peers = n_peers;
nla_for_each_nested(peer, tb[QCA_WLAN_VENDOR_ATTR_FTM_MEAS_PEERS],
tmp) {
@@ -821,6 +842,18 @@ int wil_ftm_start_session(struct wiphy *wiphy, struct wireless_dev *wdev,
if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID])
request->peers[index].secure_token_id = nla_get_u8(
tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_SECURE_TOKEN_ID]);
+ if (tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD]) {
+ request->peers[index].aoa_burst_period = nla_get_u16(
+ tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_AOA_BURST_PERIOD]);
+ if (request->peers[index].aoa_burst_period >
+ WIL_AOA_MAX_BURST_PERIOD) {
+ wil_err(wil, "Invalid AOA burst period at index: %d\n",
+ index);
+ rc = -EINVAL;
+ goto out;
+ }
+ }
+
rc = wil_ftm_parse_meas_params(
wil,
tb2[QCA_WLAN_VENDOR_ATTR_FTM_PEER_MEAS_PARAMS],
diff --git a/drivers/net/wireless/ath/wil6210/ftm.h b/drivers/net/wireless/ath/wil6210/ftm.h
index 8efa292d5ff4..21923c27ec06 100644
--- a/drivers/net/wireless/ath/wil6210/ftm.h
+++ b/drivers/net/wireless/ath/wil6210/ftm.h
@@ -437,12 +437,14 @@ struct wil_ftm_meas_peer_info {
u32 flags; /* enum qca_wlan_vendor_attr_ftm_peer_meas_flags */
struct wil_ftm_meas_params params;
u8 secure_token_id;
+ u16 aoa_burst_period; /* 0 if no AOA, >0 every <value> bursts */
};
/* session request, passed to wil_ftm_cfg80211_start_session */
struct wil_ftm_session_request {
u64 session_cookie;
u32 n_peers;
+ u32 aoa_type; /* enum qca_wlan_vendor_attr_aoa_type */
/* keep last, variable size according to n_peers */
struct wil_ftm_meas_peer_info peers[0];
};
diff --git a/drivers/net/wireless/cnss/Kconfig b/drivers/net/wireless/cnss/Kconfig
index 863f766bccdb..051b709f53f0 100644
--- a/drivers/net/wireless/cnss/Kconfig
+++ b/drivers/net/wireless/cnss/Kconfig
@@ -12,7 +12,7 @@ config CNSS
config CNSS_ASYNC
bool "Enable/disable cnss pci platform driver asynchronous probe"
- depends on CNSS
+ depends on CNSS || CNSS2
---help---
If enabled, CNSS PCI platform driver would do asynchronous probe.
Using asynchronous probe will allow CNSS PCI platform driver to
diff --git a/drivers/net/wireless/cnss2/Makefile b/drivers/net/wireless/cnss2/Makefile
index 9d383c8daa43..b49d0898178b 100644
--- a/drivers/net/wireless/cnss2/Makefile
+++ b/drivers/net/wireless/cnss2/Makefile
@@ -5,5 +5,4 @@ cnss2-y += debug.o
cnss2-y += pci.o
cnss2-y += power.o
cnss2-y += qmi.o
-cnss2-y += utils.o
cnss2-y += wlan_firmware_service_v01.o
diff --git a/drivers/net/wireless/cnss2/main.c b/drivers/net/wireless/cnss2/main.c
index c033d843949f..ef46a0aadcc5 100644
--- a/drivers/net/wireless/cnss2/main.c
+++ b/drivers/net/wireless/cnss2/main.c
@@ -322,31 +322,6 @@ void cnss_remove_pm_qos(void)
}
EXPORT_SYMBOL(cnss_remove_pm_qos);
-u8 *cnss_common_get_wlan_mac_address(struct device *dev, u32 *num)
-{
- struct cnss_plat_data *plat_priv = cnss_bus_dev_to_plat_priv(dev);
- struct cnss_wlan_mac_info *wlan_mac_info;
- struct cnss_wlan_mac_addr *addr;
-
- if (!plat_priv)
- goto out;
-
- wlan_mac_info = &plat_priv->wlan_mac_info;
- if (!wlan_mac_info->is_wlan_mac_set) {
- cnss_pr_info("Platform driver doesn't have any MAC address!\n");
- goto out;
- }
-
- addr = &wlan_mac_info->wlan_mac_addr;
- *num = addr->no_of_mac_addr_set;
-
- return &addr->mac_addr[0][0];
-out:
- *num = 0;
- return NULL;
-}
-EXPORT_SYMBOL(cnss_common_get_wlan_mac_address);
-
int cnss_wlan_enable(struct device *dev,
struct cnss_wlan_enable_cfg *config,
enum cnss_driver_mode mode,
@@ -2343,6 +2318,9 @@ static struct platform_driver cnss_platform_driver = {
.name = "cnss2",
.owner = THIS_MODULE,
.of_match_table = cnss_of_match_table,
+#ifdef CONFIG_CNSS_ASYNC
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+#endif
},
};
diff --git a/drivers/net/wireless/cnss2/main.h b/drivers/net/wireless/cnss2/main.h
index a5f9ce37b0ea..4bf1c27d99de 100644
--- a/drivers/net/wireless/cnss2/main.h
+++ b/drivers/net/wireless/cnss2/main.h
@@ -97,16 +97,6 @@ struct cnss_bus_bw_info {
int current_bw_vote;
};
-struct cnss_wlan_mac_addr {
- u8 mac_addr[MAX_NO_OF_MAC_ADDR][ETH_ALEN];
- u32 no_of_mac_addr_set;
-};
-
-struct cnss_wlan_mac_info {
- struct cnss_wlan_mac_addr wlan_mac_addr;
- bool is_wlan_mac_set;
-};
-
struct cnss_fw_mem {
size_t size;
void *va;
@@ -185,7 +175,6 @@ struct cnss_plat_data {
struct cnss_wlan_driver *driver_ops;
enum cnss_driver_status driver_status;
u32 recovery_count;
- struct cnss_wlan_mac_info wlan_mac_info;
unsigned long driver_state;
struct list_head event_list;
spinlock_t event_lock; /* spinlock for driver work event handling */
diff --git a/drivers/net/wireless/cnss2/utils.c b/drivers/net/wireless/cnss2/utils.c
deleted file mode 100644
index 9ffe386e3677..000000000000
--- a/drivers/net/wireless/cnss2/utils.c
+++ /dev/null
@@ -1,129 +0,0 @@
-/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#define CNSS_MAX_CH_NUM 45
-
-#include <linux/module.h>
-#include <linux/slab.h>
-
-static DEFINE_MUTEX(unsafe_channel_list_lock);
-static DEFINE_MUTEX(dfs_nol_info_lock);
-
-static struct cnss_unsafe_channel_list {
- u16 unsafe_ch_count;
- u16 unsafe_ch_list[CNSS_MAX_CH_NUM];
-} unsafe_channel_list;
-
-static struct cnss_dfs_nol_info {
- void *dfs_nol_info;
- u16 dfs_nol_info_len;
-} dfs_nol_info;
-
-int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count)
-{
- mutex_lock(&unsafe_channel_list_lock);
- if ((!unsafe_ch_list) || (ch_count > CNSS_MAX_CH_NUM)) {
- mutex_unlock(&unsafe_channel_list_lock);
- return -EINVAL;
- }
-
- unsafe_channel_list.unsafe_ch_count = ch_count;
-
- if (ch_count != 0) {
- memcpy((char *)unsafe_channel_list.unsafe_ch_list,
- (char *)unsafe_ch_list, ch_count * sizeof(u16));
- }
- mutex_unlock(&unsafe_channel_list_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(cnss_set_wlan_unsafe_channel);
-
-int cnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list,
- u16 *ch_count, u16 buf_len)
-{
- mutex_lock(&unsafe_channel_list_lock);
- if (!unsafe_ch_list || !ch_count) {
- mutex_unlock(&unsafe_channel_list_lock);
- return -EINVAL;
- }
-
- if (buf_len < (unsafe_channel_list.unsafe_ch_count * sizeof(u16))) {
- mutex_unlock(&unsafe_channel_list_lock);
- return -ENOMEM;
- }
-
- *ch_count = unsafe_channel_list.unsafe_ch_count;
- memcpy((char *)unsafe_ch_list,
- (char *)unsafe_channel_list.unsafe_ch_list,
- unsafe_channel_list.unsafe_ch_count * sizeof(u16));
- mutex_unlock(&unsafe_channel_list_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(cnss_get_wlan_unsafe_channel);
-
-int cnss_wlan_set_dfs_nol(const void *info, u16 info_len)
-{
- void *temp;
- struct cnss_dfs_nol_info *dfs_info;
-
- mutex_lock(&dfs_nol_info_lock);
- if (!info || !info_len) {
- mutex_unlock(&dfs_nol_info_lock);
- return -EINVAL;
- }
-
- temp = kmalloc(info_len, GFP_KERNEL);
- if (!temp) {
- mutex_unlock(&dfs_nol_info_lock);
- return -ENOMEM;
- }
-
- memcpy(temp, info, info_len);
- dfs_info = &dfs_nol_info;
- kfree(dfs_info->dfs_nol_info);
-
- dfs_info->dfs_nol_info = temp;
- dfs_info->dfs_nol_info_len = info_len;
- mutex_unlock(&dfs_nol_info_lock);
-
- return 0;
-}
-EXPORT_SYMBOL(cnss_wlan_set_dfs_nol);
-
-int cnss_wlan_get_dfs_nol(void *info, u16 info_len)
-{
- int len;
- struct cnss_dfs_nol_info *dfs_info;
-
- mutex_lock(&dfs_nol_info_lock);
- if (!info || !info_len) {
- mutex_unlock(&dfs_nol_info_lock);
- return -EINVAL;
- }
-
- dfs_info = &dfs_nol_info;
-
- if (!dfs_info->dfs_nol_info || dfs_info->dfs_nol_info_len == 0) {
- mutex_unlock(&dfs_nol_info_lock);
- return -ENOENT;
- }
-
- len = min(info_len, dfs_info->dfs_nol_info_len);
-
- memcpy(info, dfs_info->dfs_nol_info, len);
- mutex_unlock(&dfs_nol_info_lock);
-
- return len;
-}
-EXPORT_SYMBOL(cnss_wlan_get_dfs_nol);
diff --git a/drivers/net/wireless/cnss_genl/cnss_nl.c b/drivers/net/wireless/cnss_genl/cnss_nl.c
index fafd9ce4b4c4..29dd4c999f2d 100644
--- a/drivers/net/wireless/cnss_genl/cnss_nl.c
+++ b/drivers/net/wireless/cnss_genl/cnss_nl.c
@@ -64,6 +64,8 @@ static const struct nla_policy cld80211_policy[CLD80211_ATTR_MAX + 1] = {
[CLD80211_ATTR_VENDOR_DATA] = { .type = NLA_NESTED },
[CLD80211_ATTR_DATA] = { .type = NLA_BINARY,
.len = CLD80211_MAX_NL_DATA },
+ [CLD80211_ATTR_META_DATA] = { .type = NLA_BINARY,
+ .len = CLD80211_MAX_NL_DATA },
};
static int cld80211_pre_doit(const struct genl_ops *ops, struct sk_buff *skb,
diff --git a/drivers/platform/msm/gpio-usbdetect.c b/drivers/platform/msm/gpio-usbdetect.c
index dc05d7108135..adf47fc32548 100644
--- a/drivers/platform/msm/gpio-usbdetect.c
+++ b/drivers/platform/msm/gpio-usbdetect.c
@@ -50,6 +50,7 @@ static irqreturn_t gpio_usbdetect_vbus_irq(int irq, void *data)
if (usb->vbus_state) {
dev_dbg(&usb->pdev->dev, "setting vbus notification\n");
extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB, 1);
+ extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_SPEED, 1);
} else {
dev_dbg(&usb->pdev->dev, "setting vbus removed notification\n");
extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB, 0);
@@ -85,6 +86,7 @@ static irqreturn_t gpio_usbdetect_id_irq_thread(int irq, void *data)
dev_dbg(&usb->pdev->dev, "starting usb HOST\n");
disable_irq(usb->vbus_det_irq);
extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_HOST, 1);
+ extcon_set_cable_state_(usb->extcon_dev, EXTCON_USB_SPEED, 1);
}
return IRQ_HANDLED;
}
@@ -186,6 +188,14 @@ static int gpio_usbdetect_probe(struct platform_device *pdev)
enable_irq_wake(usb->id_det_irq);
dev_set_drvdata(&pdev->dev, usb);
+ if (usb->id_det_irq) {
+ gpio_usbdetect_id_irq(usb->id_det_irq, usb);
+ if (!usb->id_state) {
+ gpio_usbdetect_id_irq_thread(usb->id_det_irq, usb);
+ return 0;
+ }
+ }
+
/* Read and report initial VBUS state */
gpio_usbdetect_vbus_irq(usb->vbus_det_irq, usb);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 4275e3d26157..ecbbe516266e 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -86,7 +86,9 @@ const char *ipa_event_name[] = {
__stringify(ADD_VLAN_IFACE),
__stringify(DEL_VLAN_IFACE),
__stringify(ADD_L2TP_VLAN_MAPPING),
- __stringify(DEL_L2TP_VLAN_MAPPING)
+ __stringify(DEL_L2TP_VLAN_MAPPING),
+ __stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
+ __stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
};
const char *ipa_hdr_l2_type_name[] = {
@@ -812,10 +814,11 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
eq = true;
} else {
rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl);
- if (rt_tbl)
- rt_tbl_idx = rt_tbl->idx;
+ if (rt_tbl == NULL ||
+ rt_tbl->cookie != IPA_RT_TBL_COOKIE)
+ rt_tbl_idx = ~0;
else
- rt_tbl_idx = ~0;
+ rt_tbl_idx = rt_tbl->idx;
bitmap = entry->rule.attrib.attrib_mask;
eq = false;
}
@@ -842,10 +845,11 @@ static ssize_t ipa_read_flt(struct file *file, char __user *ubuf, size_t count,
eq = true;
} else {
rt_tbl = ipa_id_find(entry->rule.rt_tbl_hdl);
- if (rt_tbl)
- rt_tbl_idx = rt_tbl->idx;
- else
+ if (rt_tbl == NULL ||
+ rt_tbl->cookie != IPA_RT_TBL_COOKIE)
rt_tbl_idx = ~0;
+ else
+ rt_tbl_idx = rt_tbl->idx;
bitmap = entry->rule.attrib.attrib_mask;
eq = false;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index 5dbd43b44540..12b43882ed5b 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -411,12 +411,15 @@ int copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
{
int i, j;
+ /* prevent multi-threads accessing num_q6_rule */
+ mutex_lock(&add_mux_channel_lock);
if (rule_req->filter_spec_list_valid == true) {
num_q6_rule = rule_req->filter_spec_list_len;
IPAWANDBG("Received (%d) install_flt_req\n", num_q6_rule);
} else {
num_q6_rule = 0;
IPAWANERR("got no UL rules from modem\n");
+ mutex_unlock(&add_mux_channel_lock);
return -EINVAL;
}
@@ -610,9 +613,11 @@ failure:
num_q6_rule = 0;
memset(ipa_qmi_ctx->q6_ul_filter_rule, 0,
sizeof(ipa_qmi_ctx->q6_ul_filter_rule));
+ mutex_unlock(&add_mux_channel_lock);
return -EINVAL;
success:
+ mutex_unlock(&add_mux_channel_lock);
return 0;
}
@@ -1622,9 +1627,12 @@ static int ipa_wwan_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
/* already got Q6 UL filter rules*/
if (ipa_qmi_ctx &&
ipa_qmi_ctx->modem_cfg_emb_pipe_flt
- == false)
+ == false) {
+ /* protect num_q6_rule */
+ mutex_lock(&add_mux_channel_lock);
rc = wwan_add_ul_flt_rule_to_ipa();
- else
+ mutex_unlock(&add_mux_channel_lock);
+ } else
rc = 0;
egress_set = true;
if (rc)
@@ -2687,6 +2695,9 @@ int rmnet_ipa_set_data_quota(struct wan_ioctl_set_data_quota *data)
enum ipa_upstream_type upstream_type;
int rc = 0;
+ /* prevent string buffer overflows */
+ data->interface_name[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->interface_name);
@@ -2978,6 +2989,10 @@ int rmnet_ipa_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
enum ipa_upstream_type upstream_type;
int rc = 0;
+ /* prevent string buffer overflows */
+ data->upstreamIface[IFNAMSIZ-1] = '\0';
+ data->tetherIface[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
@@ -3012,6 +3027,10 @@ int rmnet_ipa_query_tethering_stats_all(
int rc = 0;
memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
+
+ /* prevent string buffer overflows */
+ data->upstreamIface[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
@@ -3055,6 +3074,9 @@ int rmnet_ipa_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
+ /* prevent string buffer overflows */
+ data->upstreamIface[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index fd503f48f17c..73321df80ada 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -992,8 +992,52 @@ static long ipa3_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
break;
}
break;
+
+ case IPA_IOC_ADD_RT_RULE_EXT:
+ if (copy_from_user(header,
+ (const void __user *)arg,
+ sizeof(struct ipa_ioc_add_rt_rule_ext))) {
+ retval = -EFAULT;
+ break;
+ }
+ pre_entry =
+ ((struct ipa_ioc_add_rt_rule_ext *)header)->num_rules;
+ pyld_sz =
+ sizeof(struct ipa_ioc_add_rt_rule_ext) +
+ pre_entry * sizeof(struct ipa_rt_rule_add_ext);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg,
+ pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ /* add check in case user-space module compromised */
+ if (unlikely(
+ ((struct ipa_ioc_add_rt_rule_ext *)param)->num_rules
+ != pre_entry)) {
+ IPAERR(" prevent memory corruption(%d not match %d)\n",
+ ((struct ipa_ioc_add_rt_rule_ext *)param)->
+ num_rules,
+ pre_entry);
+ retval = -EINVAL;
+ break;
+ }
+ if (ipa3_add_rt_rule_ext(
+ (struct ipa_ioc_add_rt_rule_ext *)param)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
case IPA_IOC_ADD_RT_RULE_AFTER:
- if (copy_from_user(header, (u8 *)arg,
+ if (copy_from_user(header, (const void __user *)arg,
sizeof(struct ipa_ioc_add_rt_rule_after))) {
retval = -EFAULT;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index c7ab616cb5b8..71da7d28a451 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -67,7 +67,9 @@ const char *ipa3_event_name[] = {
__stringify(ADD_VLAN_IFACE),
__stringify(DEL_VLAN_IFACE),
__stringify(ADD_L2TP_VLAN_MAPPING),
- __stringify(DEL_L2TP_VLAN_MAPPING)
+ __stringify(DEL_L2TP_VLAN_MAPPING),
+ __stringify(IPA_PER_CLIENT_STATS_CONNECT_EVENT),
+ __stringify(IPA_PER_CLIENT_STATS_DISCONNECT_EVENT),
};
const char *ipa3_hdr_l2_type_name[] = {
@@ -867,10 +869,11 @@ static ssize_t ipa3_read_flt(struct file *file, char __user *ubuf, size_t count,
eq = true;
} else {
rt_tbl = ipa3_id_find(entry->rule.rt_tbl_hdl);
- if (rt_tbl)
- rt_tbl_idx = rt_tbl->idx;
+ if (rt_tbl == NULL ||
+ rt_tbl->cookie != IPA_RT_TBL_COOKIE)
+ rt_tbl_idx = ~0;
else
- rt_tbl_idx = ~0;
+ rt_tbl_idx = rt_tbl->idx;
bitmap = entry->rule.attrib.attrib_mask;
eq = false;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index c2fb87ab757b..a03d8978c6c2 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -1157,6 +1157,13 @@ int ipa3_add_flt_rule_after(struct ipa_ioc_add_flt_rule_after *rules)
goto bail;
}
+ if (entry->cookie != IPA_FLT_COOKIE) {
+ IPAERR_RL("Invalid cookie value = %u flt hdl id = %d\n",
+ entry->cookie, rules->add_after_hdl);
+ result = -EINVAL;
+ goto bail;
+ }
+
if (entry->tbl != tbl) {
IPAERR_RL("given entry does not match the table\n");
result = -EINVAL;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 5ff926a60129..89c7b66b98d6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -435,6 +435,7 @@ struct ipa3_rt_entry {
int id;
u16 prio;
u16 rule_id;
+ u16 rule_id_valid;
};
/**
@@ -1615,6 +1616,8 @@ int ipa3_del_hdr_proc_ctx_by_user(struct ipa_ioc_del_hdr_proc_ctx *hdls,
*/
int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules);
+int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules);
+
int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules);
int ipa3_del_rt_rule(struct ipa_ioc_del_rt_rule *hdls);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index 571852c076ea..4897c4dccf59 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -695,6 +695,57 @@ int ipa3_qmi_filter_request_ex_send(
resp.resp.error, "ipa_install_filter");
}
+/* sending ul-filter-install-request to modem*/
+int ipa3_qmi_ul_filter_request_send(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01 *req)
+{
+ struct ipa_configure_ul_firewall_rules_resp_msg_v01 resp;
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ IPAWANDBG("IPACM pass %u rules to Q6\n",
+ req->firewall_rules_list_len);
+
+ mutex_lock(&ipa3_qmi_lock);
+ if (ipa3_qmi_ctx != NULL) {
+ /* cache the qmi_filter_request */
+ memcpy(
+ &(ipa3_qmi_ctx->ipa_configure_ul_firewall_rules_req_msg_cache[
+ ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg]),
+ req,
+ sizeof(struct
+ ipa_configure_ul_firewall_rules_req_msg_v01));
+ ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg++;
+ ipa3_qmi_ctx->num_ipa_configure_ul_firewall_rules_req_msg %=
+ MAX_NUM_QMI_RULE_CACHE;
+ }
+ mutex_unlock(&ipa3_qmi_lock);
+
+ req_desc.max_msg_len =
+ QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01;
+ req_desc.ei_array =
+ ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei;
+
+ memset(&resp, 0,
+ sizeof(struct ipa_configure_ul_firewall_rules_resp_msg_v01));
+ resp_desc.max_msg_len =
+ QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_RESP_V01;
+ resp_desc.ei_array =
+ ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei;
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
+ req,
+ sizeof(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01),
+ &resp_desc, &resp, sizeof(resp),
+ QMI_SEND_REQ_TIMEOUT_MS);
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_INSTALL_UL_FIREWALL_RULES_REQ_V01, resp.resp.result,
+ resp.resp.error, "ipa_received_ul_firewall_filter");
+}
+
int ipa3_qmi_enable_force_clear_datapath_send(
struct ipa_enable_force_clear_datapath_req_msg_v01 *req)
{
@@ -880,6 +931,7 @@ static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
void *ind_cb_priv)
{
struct ipa_data_usage_quota_reached_ind_msg_v01 qmi_ind;
+ struct ipa_configure_ul_firewall_rules_ind_msg_v01 qmi_ul_firewall_ind;
struct msg_desc qmi_ind_desc;
int rc = 0;
@@ -888,7 +940,7 @@ static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
return;
}
- if (QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01 == msg_id) {
+ if (msg_id == QMI_IPA_DATA_USAGE_QUOTA_REACHED_IND_V01) {
memset(&qmi_ind, 0, sizeof(
struct ipa_data_usage_quota_reached_ind_msg_v01));
qmi_ind_desc.max_msg_len =
@@ -908,6 +960,36 @@ static void ipa3_q6_clnt_ind_cb(struct qmi_handle *handle, unsigned int msg_id,
ipa3_broadcast_quota_reach_ind(qmi_ind.apn.mux_id,
IPA_UPSTEAM_MODEM);
}
+
+ if (msg_id == QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01) {
+ memset(&qmi_ul_firewall_ind, 0, sizeof(
+ struct ipa_configure_ul_firewall_rules_ind_msg_v01));
+ qmi_ind_desc.max_msg_len =
+ QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_MAX_MSG_LEN_V01;
+ qmi_ind_desc.msg_id = QMI_IPA_INSTALL_UL_FIREWALL_RULES_IND_V01;
+ qmi_ind_desc.ei_array =
+ ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei;
+
+ rc = qmi_kernel_decode(
+ &qmi_ind_desc, &qmi_ul_firewall_ind, msg, msg_len);
+ if (rc < 0) {
+ IPAWANERR("Error decoding msg_id %d\n", msg_id);
+ return;
+ }
+
+ IPAWANDBG("UL firewall rules install indication on Q6");
+ if (qmi_ul_firewall_ind.result.is_success ==
+ QMI_IPA_UL_FIREWALL_STATUS_SUCCESS_V01) {
+ IPAWANDBG(" : Success\n");
+ IPAWANDBG
+ ("Mux ID : %d\n", qmi_ul_firewall_ind.result.mux_id);
+ } else if (qmi_ul_firewall_ind.result.is_success ==
+ QMI_IPA_UL_FIREWALL_STATUS_FAILURE_V01){
+ IPAWANERR(": Failure\n");
+ } else {
+ IPAWANERR(": Unexpected Result");
+ }
+ }
}
static void ipa3_q6_clnt_svc_arrive(struct work_struct *work)
@@ -1363,6 +1445,74 @@ int ipa3_qmi_stop_data_qouta(void)
resp.resp.error, "ipa_stop_data_usage_quota_req_msg_v01");
}
+int ipa3_qmi_enable_per_client_stats(
+ struct ipa_enable_per_client_stats_req_msg_v01 *req,
+ struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
+{
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ req_desc.max_msg_len =
+ QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id =
+ QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01;
+ req_desc.ei_array =
+ ipa3_enable_per_client_stats_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len =
+ QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id =
+ QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01;
+ resp_desc.ei_array =
+ ipa3_enable_per_client_stats_resp_msg_data_v01_ei;
+
+ IPAWANDBG("Sending QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01\n");
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+ sizeof(struct ipa_enable_per_client_stats_req_msg_v01),
+ &resp_desc, resp,
+ sizeof(struct ipa_enable_per_client_stats_resp_msg_v01),
+ QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+ IPAWANDBG("QMI_IPA_ENABLE_PER_CLIENT_STATS_RESP_V01 received\n");
+
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_ENABLE_PER_CLIENT_STATS_REQ_V01, resp->resp.result,
+ resp->resp.error, "ipa3_qmi_enable_per_client_stats");
+}
+
+int ipa3_qmi_get_per_client_packet_stats(
+ struct ipa_get_stats_per_client_req_msg_v01 *req,
+ struct ipa_get_stats_per_client_resp_msg_v01 *resp)
+{
+ struct msg_desc req_desc, resp_desc;
+ int rc;
+
+ req_desc.max_msg_len = QMI_IPA_GET_STATS_PER_CLIENT_REQ_MAX_MSG_LEN_V01;
+ req_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01;
+ req_desc.ei_array = ipa3_get_stats_per_client_req_msg_data_v01_ei;
+
+ resp_desc.max_msg_len =
+ QMI_IPA_GET_STATS_PER_CLIENT_RESP_MAX_MSG_LEN_V01;
+ resp_desc.msg_id = QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01;
+ resp_desc.ei_array = ipa3_get_stats_per_client_resp_msg_data_v01_ei;
+
+ IPAWANDBG("Sending QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01\n");
+
+ rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
+ sizeof(struct ipa_get_stats_per_client_req_msg_v01),
+ &resp_desc, resp,
+ sizeof(struct ipa_get_stats_per_client_resp_msg_v01),
+ QMI_SEND_STATS_REQ_TIMEOUT_MS);
+
+ IPAWANDBG("QMI_IPA_GET_STATS_PER_CLIENT_RESP_V01 received\n");
+
+ return ipa3_check_qmi_response(rc,
+ QMI_IPA_GET_STATS_PER_CLIENT_REQ_V01, resp->resp.result,
+ resp->resp.error,
+ "struct ipa_get_stats_per_client_req_msg_v01");
+}
+
void ipa3_qmi_init(void)
{
mutex_init(&ipa3_qmi_lock);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
index e6f1e2ce0b75..297dca6b88cf 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.h
@@ -32,54 +32,62 @@
#define IPAWANDBG(fmt, args...) \
do { \
- pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ pr_debug(DEV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAWANDBG_LOW(fmt, args...) \
do { \
- pr_debug(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ pr_debug(DEV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAWANERR(fmt, args...) \
do { \
- pr_err(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ pr_err(DEV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
#define IPAWANINFO(fmt, args...) \
do { \
- pr_info(DEV_NAME " %s:%d " fmt, __func__, __LINE__, ## args); \
+ pr_info(DEV_NAME " %s:%d " fmt, __func__,\
+ __LINE__, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
IPA_IPC_LOGGING(ipa_get_ipc_logbuf_low(), \
- DEV_NAME " %s:%d " fmt, ## args); \
+ DEV_NAME " %s:%d " fmt, ## args); \
} while (0)
extern struct ipa3_qmi_context *ipa3_qmi_ctx;
struct ipa3_qmi_context {
-struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
-u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
-int num_ipa_install_fltr_rule_req_msg;
-struct ipa_install_fltr_rule_req_msg_v01
+ struct ipa_ioc_ext_intf_prop q6_ul_filter_rule[MAX_NUM_Q6_RULE];
+ u32 q6_ul_filter_rule_hdl[MAX_NUM_Q6_RULE];
+ int num_ipa_install_fltr_rule_req_msg;
+ struct ipa_install_fltr_rule_req_msg_v01
ipa_install_fltr_rule_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
-int num_ipa_install_fltr_rule_req_ex_msg;
-struct ipa_install_fltr_rule_req_ex_msg_v01
+ int num_ipa_install_fltr_rule_req_ex_msg;
+ struct ipa_install_fltr_rule_req_ex_msg_v01
ipa_install_fltr_rule_req_ex_msg_cache[MAX_NUM_QMI_RULE_CACHE];
-int num_ipa_fltr_installed_notif_req_msg;
-struct ipa_fltr_installed_notif_req_msg_v01
+ int num_ipa_fltr_installed_notif_req_msg;
+ struct ipa_fltr_installed_notif_req_msg_v01
ipa_fltr_installed_notif_req_msg_cache[MAX_NUM_QMI_RULE_CACHE];
-bool modem_cfg_emb_pipe_flt;
+ int num_ipa_configure_ul_firewall_rules_req_msg;
+ struct ipa_configure_ul_firewall_rules_req_msg_v01
+ ipa_configure_ul_firewall_rules_req_msg_cache
+ [MAX_NUM_QMI_RULE_CACHE];
+ bool modem_cfg_emb_pipe_flt;
};
struct ipa3_rmnet_mux_val {
@@ -95,56 +103,69 @@ extern struct elem_info ipa3_init_modem_driver_req_msg_data_v01_ei[];
extern struct elem_info ipa3_init_modem_driver_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_indication_reg_req_msg_data_v01_ei[];
extern struct elem_info ipa3_indication_reg_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_master_driver_init_complt_ind_msg_data_v01_ei[];
+
+extern struct elem_info
+ ipa3_master_driver_init_complt_ind_msg_data_v01_ei[];
extern struct elem_info ipa3_install_fltr_rule_req_msg_data_v01_ei[];
extern struct elem_info ipa3_install_fltr_rule_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_fltr_installed_notif_req_msg_data_v01_ei[];
extern struct elem_info ipa3_fltr_installed_notif_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[];
+
+extern struct elem_info
+ ipa3_enable_force_clear_datapath_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_disable_force_clear_datapath_req_msg_data_v01_ei[];
extern struct elem_info
ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_config_req_msg_data_v01_ei[];
extern struct elem_info ipa3_config_resp_msg_data_v01_ei[];
extern struct elem_info ipa3_get_data_stats_req_msg_data_v01_ei[];
extern struct elem_info ipa3_get_data_stats_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_get_apn_data_stats_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_get_apn_data_stats_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_set_data_usage_quota_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_set_data_usage_quota_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[];
-extern struct elem_info ipa3_stop_data_usage_quota_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[];
-extern struct elem_info ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
-extern struct elem_info ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
-extern struct elem_info ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
-
- extern struct elem_info
- ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_ul_firewall_rule_type_data_v01_ei[];
- extern struct elem_info
- ipa3_ul_firewall_config_result_type_data_v01_ei[];
- extern struct elem_info
- ipa3_per_client_stats_info_type_data_v01_ei[];
- extern struct elem_info
- ipa3_enable_per_client_stats_req_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_enable_per_client_stats_resp_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_get_stats_per_client_req_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_get_stats_per_client_resp_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[];
- extern struct elem_info
- ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[];
+
+extern struct elem_info
+ ipa3_get_apn_data_stats_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_get_apn_data_stats_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_set_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_set_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_data_usage_quota_reached_ind_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_stop_data_usage_quota_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_stop_data_usage_quota_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_init_modem_driver_cmplt_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_init_modem_driver_cmplt_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_install_fltr_rule_req_ex_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_install_fltr_rule_resp_ex_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_ul_firewall_rule_type_data_v01_ei[];
+extern struct elem_info
+ ipa3_ul_firewall_config_result_type_data_v01_ei[];
+extern struct elem_info
+ ipa3_per_client_stats_info_type_data_v01_ei[];
+extern struct elem_info
+ ipa3_enable_per_client_stats_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_enable_per_client_stats_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_get_stats_per_client_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_get_stats_per_client_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_configure_ul_firewall_rules_req_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_configure_ul_firewall_rules_resp_msg_data_v01_ei[];
+extern struct elem_info
+ ipa3_configure_ul_firewall_rules_ind_msg_data_v01_ei[];
/**
* struct ipa3_rmnet_context - IPA rmnet context
@@ -173,6 +194,9 @@ int ipa3_qmi_filter_request_send(
int ipa3_qmi_filter_request_ex_send(
struct ipa_install_fltr_rule_req_ex_msg_v01 *req);
+int ipa3_qmi_ul_filter_request_send(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01 *req);
+
/* sending filter-installed-notify-request to modem*/
int ipa3_qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01
*req);
@@ -219,6 +243,16 @@ int rmnet_ipa3_query_tethering_stats_all(
struct wan_ioctl_query_tether_stats_all *data);
int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data);
+int rmnet_ipa3_set_lan_client_info(struct wan_ioctl_lan_client_info *data);
+
+int rmnet_ipa3_clear_lan_client_info(struct wan_ioctl_lan_client_info *data);
+
+int rmnet_ipa3_send_lan_client_msg(struct wan_ioctl_send_lan_client_msg *data);
+
+int rmnet_ipa3_enable_per_client_stats(bool *data);
+
+int rmnet_ipa3_query_per_client_stats(
+ struct wan_ioctl_query_per_client_stats *data);
int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
struct ipa_get_data_stats_resp_msg_v01 *resp);
@@ -232,6 +266,14 @@ int ipa3_qmi_stop_data_qouta(void);
void ipa3_q6_handshake_complete(bool ssr_bootup);
+int ipa3_qmi_enable_per_client_stats(
+ struct ipa_enable_per_client_stats_req_msg_v01 *req,
+ struct ipa_enable_per_client_stats_resp_msg_v01 *resp);
+
+int ipa3_qmi_get_per_client_packet_stats(
+ struct ipa_get_stats_per_client_req_msg_v01 *req,
+ struct ipa_get_stats_per_client_resp_msg_v01 *resp);
+
void ipa3_qmi_init(void);
void ipa3_qmi_cleanup(void);
@@ -252,6 +294,12 @@ static inline int ipa3_qmi_filter_request_send(
return -EPERM;
}
+static inline int ipa3_qmi_ul_filter_request_send(
+ struct ipa_configure_ul_firewall_rules_req_msg_v01 *req)
+{
+ return -EPERM;
+}
+
static inline int ipa3_qmi_filter_request_ex_send(
struct ipa_install_fltr_rule_req_ex_msg_v01 *req)
{
@@ -348,12 +396,28 @@ static inline int ipa3_qmi_stop_data_qouta(void)
static inline void ipa3_q6_handshake_complete(bool ssr_bootup) { }
+static inline int ipa3_qmi_enable_per_client_stats(
+ struct ipa_enable_per_client_stats_req_msg_v01 *req,
+ struct ipa_enable_per_client_stats_resp_msg_v01 *resp)
+{
+ return -EPERM;
+}
+
+static inline int ipa3_qmi_get_per_client_packet_stats(
+ struct ipa_get_stats_per_client_req_msg_v01 *req,
+ struct ipa_get_stats_per_client_resp_msg_v01 *resp)
+{
+ return -EPERM;
+}
+
static inline void ipa3_qmi_init(void)
{
+
}
static inline void ipa3_qmi_cleanup(void)
{
+
}
#endif /* CONFIG_RMNET_IPA3 */
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
index ff57e3bd48f0..b9af782b4f6e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_rt.c
@@ -918,7 +918,8 @@ static int __ipa_rt_validate_hndls(const struct ipa_rt_rule *rule,
static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
const struct ipa_rt_rule *rule,
struct ipa3_rt_tbl *tbl, struct ipa3_hdr_entry *hdr,
- struct ipa3_hdr_proc_ctx_entry *proc_ctx)
+ struct ipa3_hdr_proc_ctx_entry *proc_ctx,
+ u16 rule_id)
{
int id;
@@ -933,11 +934,16 @@ static int __ipa_create_rt_entry(struct ipa3_rt_entry **entry,
(*(entry))->tbl = tbl;
(*(entry))->hdr = hdr;
(*(entry))->proc_ctx = proc_ctx;
- id = ipa3_alloc_rule_id(&tbl->rule_ids);
- if (id < 0) {
- IPAERR("failed to allocate rule id\n");
- WARN_ON(1);
- goto alloc_rule_id_fail;
+ if (rule_id) {
+ id = rule_id;
+ (*(entry))->rule_id_valid = 1;
+ } else {
+ id = ipa3_alloc_rule_id(&tbl->rule_ids);
+ if (id < 0) {
+ IPAERR("failed to allocate rule id\n");
+ WARN_ON(1);
+ goto alloc_rule_id_fail;
+ }
}
(*(entry))->rule_id = id;
@@ -984,7 +990,8 @@ ipa_insert_failed:
}
static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
- const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl)
+ const struct ipa_rt_rule *rule, u8 at_rear, u32 *rule_hdl,
+ u16 rule_id)
{
struct ipa3_rt_tbl *tbl;
struct ipa3_rt_entry *entry;
@@ -1012,7 +1019,8 @@ static int __ipa_add_rt_rule(enum ipa_ip_type ip, const char *name,
goto error;
}
- if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+ if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx,
+ rule_id))
goto error;
if (at_rear)
@@ -1043,7 +1051,7 @@ static int __ipa_add_rt_rule_after(struct ipa3_rt_tbl *tbl,
if (__ipa_rt_validate_hndls(rule, &hdr, &proc_ctx))
goto error;
- if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx))
+ if (__ipa_create_rt_entry(&entry, rule, tbl, hdr, proc_ctx, 0))
goto error;
list_add(&entry->link, &((*add_after_entry)->link));
@@ -1087,8 +1095,54 @@ int ipa3_add_rt_rule(struct ipa_ioc_add_rt_rule *rules)
if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
&rules->rules[i].rule,
rules->rules[i].at_rear,
- &rules->rules[i].rt_rule_hdl)) {
- IPAERR_RL("failed to add rt rule %d\n", i);
+ &rules->rules[i].rt_rule_hdl,
+ 0)) {
+ IPAERR("failed to add rt rule %d\n", i);
+ rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
+ } else {
+ rules->rules[i].status = 0;
+ }
+ }
+
+ if (rules->commit)
+ if (ipa3_ctx->ctrl->ipa3_commit_rt(rules->ip)) {
+ ret = -EPERM;
+ goto bail;
+ }
+
+ ret = 0;
+bail:
+ mutex_unlock(&ipa3_ctx->lock);
+ return ret;
+}
+
+/**
+ * ipa3_add_rt_rule_ext() - Add the specified routing rules to SW with rule id
+ * and optionally commit to IPA HW
+ * @rules: [inout] set of routing rules to add
+ *
+ * Returns: 0 on success, negative on failure
+ *
+ * Note: Should not be called from atomic context
+ */
+int ipa3_add_rt_rule_ext(struct ipa_ioc_add_rt_rule_ext *rules)
+{
+ int i;
+ int ret;
+
+ if (rules == NULL || rules->num_rules == 0 || rules->ip >= IPA_IP_MAX) {
+ IPAERR("bad parm\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ipa3_ctx->lock);
+ for (i = 0; i < rules->num_rules; i++) {
+ if (__ipa_add_rt_rule(rules->ip, rules->rt_tbl_name,
+ &rules->rules[i].rule,
+ rules->rules[i].at_rear,
+ &rules->rules[i].rt_rule_hdl,
+ rules->rules[i].rule_id)) {
+ IPAERR("failed to add rt rule %d\n", i);
rules->rules[i].status = IPA_RT_STATUS_OF_ADD_FAILED;
} else {
rules->rules[i].status = 0;
@@ -1152,6 +1206,13 @@ int ipa3_add_rt_rule_after(struct ipa_ioc_add_rt_rule_after *rules)
goto bail;
}
+ if (entry->cookie != IPA_RT_RULE_COOKIE) {
+ IPAERR_RL("Invalid cookie value = %u rule %d in rt tbls\n",
+ entry->cookie, rules->add_after_hdl);
+ ret = -EINVAL;
+ goto bail;
+ }
+
if (entry->tbl != tbl) {
IPAERR_RL("given rt rule does not match the table\n");
ret = -EINVAL;
@@ -1229,7 +1290,9 @@ int __ipa3_del_rt_rule(u32 rule_hdl)
IPADBG("del rt rule tbl_idx=%d rule_cnt=%d rule_id=%d\n ref_cnt=%u",
entry->tbl->idx, entry->tbl->rule_cnt,
entry->rule_id, entry->tbl->ref_cnt);
- idr_remove(&entry->tbl->rule_ids, entry->rule_id);
+ /* if rule id was allocated from idr, remove it */
+ if (!entry->rule_id_valid)
+ idr_remove(&entry->tbl->rule_ids, entry->rule_id);
if (entry->tbl->rule_cnt == 0 && entry->tbl->ref_cnt == 0) {
if (__ipa_del_rt_tbl(entry->tbl))
IPAERR_RL("fail to del RT tbl\n");
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index 8fbde6675070..c810adc466b3 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -142,6 +142,10 @@ struct rmnet_ipa3_context {
u32 ipa3_to_apps_hdl;
struct mutex pipe_handle_guard;
struct mutex add_mux_channel_lock;
+ struct mutex per_client_stats_guard;
+ struct ipa_tether_device_info
+ tether_device
+ [IPACM_MAX_CLIENT_DEVICE_TYPES];
};
static struct rmnet_ipa3_context *rmnet_ipa3_ctx;
@@ -423,6 +427,8 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
{
int i, j;
+ /* prevent multi-threads accessing rmnet_ipa3_ctx->num_q6_rules */
+ mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock);
if (rule_req->filter_spec_ex_list_valid == true) {
rmnet_ipa3_ctx->num_q6_rules =
rule_req->filter_spec_ex_list_len;
@@ -431,6 +437,8 @@ int ipa3_copy_ul_filter_rule_to_ipa(struct ipa_install_fltr_rule_req_msg_v01
} else {
rmnet_ipa3_ctx->num_q6_rules = 0;
IPAWANERR("got no UL rules from modem\n");
+ mutex_unlock(&rmnet_ipa3_ctx->
+ add_mux_channel_lock);
return -EINVAL;
}
@@ -633,9 +641,13 @@ failure:
rmnet_ipa3_ctx->num_q6_rules = 0;
memset(ipa3_qmi_ctx->q6_ul_filter_rule, 0,
sizeof(ipa3_qmi_ctx->q6_ul_filter_rule));
+ mutex_unlock(&rmnet_ipa3_ctx->
+ add_mux_channel_lock);
return -EINVAL;
success:
+ mutex_unlock(&rmnet_ipa3_ctx->
+ add_mux_channel_lock);
return 0;
}
@@ -1437,8 +1449,13 @@ static int handle3_egress_format(struct net_device *dev,
if (rmnet_ipa3_ctx->num_q6_rules != 0) {
/* already got Q6 UL filter rules*/
- if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false)
+ if (ipa3_qmi_ctx->modem_cfg_emb_pipe_flt == false) {
+ /* prevent multi-threads accessing num_q6_rules */
+ mutex_lock(&rmnet_ipa3_ctx->add_mux_channel_lock);
rc = ipa3_wwan_add_ul_flt_rule_to_ipa();
+ mutex_unlock(&rmnet_ipa3_ctx->
+ add_mux_channel_lock);
+ }
if (rc)
IPAWANERR("install UL rules failed\n");
else
@@ -2571,7 +2588,9 @@ static void rmnet_ipa_free_msg(void *buff, u32 len, u32 type)
}
if (type != IPA_TETHERING_STATS_UPDATE_STATS &&
- type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS) {
+ type != IPA_TETHERING_STATS_UPDATE_NETWORK_STATS &&
+ type != IPA_PER_CLIENT_STATS_CONNECT_EVENT &&
+ type != IPA_PER_CLIENT_STATS_DISCONNECT_EVENT) {
IPAWANERR("Wrong type given. buff %p type %d\n",
buff, type);
}
@@ -2819,6 +2838,9 @@ int rmnet_ipa3_set_data_quota(struct wan_ioctl_set_data_quota *data)
enum ipa_upstream_type upstream_type;
int rc = 0;
+ /* prevent string buffer overflows */
+ data->interface_name[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->interface_name);
@@ -3111,6 +3133,10 @@ int rmnet_ipa3_query_tethering_stats(struct wan_ioctl_query_tether_stats *data,
enum ipa_upstream_type upstream_type;
int rc = 0;
+ /* prevent string buffer overflows */
+ data->upstreamIface[IFNAMSIZ-1] = '\0';
+ data->tetherIface[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
@@ -3145,6 +3171,10 @@ int rmnet_ipa3_query_tethering_stats_all(
int rc = 0;
memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
+
+ /* prevent string buffer overflows */
+ data->upstreamIface[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
@@ -3188,6 +3218,9 @@ int rmnet_ipa3_reset_tethering_stats(struct wan_ioctl_reset_tether_stats *data)
memset(&tether_stats, 0, sizeof(struct wan_ioctl_query_tether_stats));
+ /* prevent string buffer overflows */
+ data->upstreamIface[IFNAMSIZ-1] = '\0';
+
/* get IPA backhaul type */
upstream_type = find_upstream_type(data->upstreamIface);
@@ -3317,8 +3350,488 @@ void ipa3_q6_handshake_complete(bool ssr_bootup)
}
}
+static inline bool rmnet_ipa3_check_any_client_inited
+(
+ enum ipacm_per_client_device_type device_type
+)
+{
+ int i = 0;
+
+ for (; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) {
+ if (rmnet_ipa3_ctx->tether_device[device_type].
+ lan_client[i].client_idx != -1 &&
+ rmnet_ipa3_ctx->tether_device[device_type].
+ lan_client[i].inited) {
+ IPAWANERR("Found client index: %d which is inited\n",
+ i);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static inline int rmnet_ipa3_get_lan_client_info
+(
+ enum ipacm_per_client_device_type device_type,
+ uint8_t mac[]
+)
+{
+ int i = 0;
+
+ IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ mac[0], mac[1], mac[2],
+ mac[3], mac[4], mac[5]);
+
+ for (; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) {
+ if (memcmp(
+ rmnet_ipa3_ctx->tether_device[device_type].
+ lan_client[i].mac,
+ mac,
+ IPA_MAC_ADDR_SIZE) == 0) {
+ IPAWANDBG("Matched client index: %d\n", i);
+ return i;
+ }
+ }
+
+ return -EINVAL;
+}
+
+static inline int rmnet_ipa3_delete_lan_client_info
+(
+ enum ipacm_per_client_device_type device_type,
+ int lan_clnt_idx
+)
+{
+ struct ipa_lan_client *lan_client = NULL;
+ int i;
+
+ /* Check if the request is to clean up all clients. */
+ if (lan_clnt_idx == 0xffffffff) {
+ /* Reset the complete device info. */
+ memset(&rmnet_ipa3_ctx->tether_device[device_type], 0,
+ sizeof(struct ipa_tether_device_info));
+ rmnet_ipa3_ctx->tether_device[device_type].ul_src_pipe = -1;
+ for (i = 0; i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++)
+ rmnet_ipa3_ctx->tether_device[device_type].
+ lan_client[i].client_idx = -1;
+ } else {
+ lan_client =
+ &rmnet_ipa3_ctx->tether_device[device_type].
+ lan_client[lan_clnt_idx];
+ /* Reset the client info before sending the message. */
+ memset(lan_client, 0, sizeof(struct ipa_lan_client));
+ lan_client->client_idx = -1;
+
+ }
+ return 0;
+}
+
+/* rmnet_ipa3_set_lan_client_info() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SET_LAN_CLIENT_INFO.
+ * It is used to store LAN client information which
+ * is used to fetch the packet stats for a client.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_set_lan_client_info(
+ struct wan_ioctl_lan_client_info *data)
+{
+
+ struct ipa_lan_client *lan_client = NULL;
+
+
+ IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ data->mac[0], data->mac[1], data->mac[2],
+ data->mac[3], data->mac[4], data->mac[5]);
+
+ /* Check if Device type is valid. */
+ if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+ data->device_type < 0) {
+ IPAWANERR("Invalid Device type: %d\n", data->device_type);
+ return -EINVAL;
+ }
+
+ /* Check if Client index is valid. */
+ if (data->client_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS ||
+ data->client_idx < 0) {
+ IPAWANERR("Invalid Client Index: %d\n", data->client_idx);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ if (data->client_init) {
+ /* check if the client is already inited. */
+ if (rmnet_ipa3_ctx->tether_device[data->device_type]
+ .lan_client[data->client_idx].inited) {
+ IPAWANERR("Client already inited: %d:%d\n",
+ data->device_type, data->client_idx);
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -EINVAL;
+ }
+ }
+
+ lan_client =
+ &rmnet_ipa3_ctx->tether_device[data->device_type].
+ lan_client[data->client_idx];
+
+ memcpy(lan_client->mac, data->mac, IPA_MAC_ADDR_SIZE);
+
+ lan_client->client_idx = data->client_idx;
+
+ /* Update the Source pipe. */
+ rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe =
+ ipa3_get_ep_mapping(data->ul_src_pipe);
+
+ /* Update the header length if not set. */
+ if (!rmnet_ipa3_ctx->tether_device[data->device_type].hdr_len)
+ rmnet_ipa3_ctx->tether_device[data->device_type].hdr_len =
+ data->hdr_len;
+
+ lan_client->inited = true;
+
+ rmnet_ipa3_ctx->tether_device[data->device_type].num_clients++;
+
+ IPAWANDBG("Set the lan client info: %d, %d, %d\n",
+ lan_client->client_idx,
+ rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe,
+ rmnet_ipa3_ctx->tether_device[data->device_type].num_clients);
+
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+
+ return 0;
+}
+
+/* rmnet_ipa3_delete_lan_client_info() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_DELETE_LAN_CLIENT_INFO.
+ * It is used to delete LAN client information which
+ * is used to fetch the packet stats for a client.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_clear_lan_client_info(
+ struct wan_ioctl_lan_client_info *data)
+{
+
+ struct ipa_lan_client *lan_client = NULL;
+
+
+ IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ data->mac[0], data->mac[1], data->mac[2],
+ data->mac[3], data->mac[4], data->mac[5]);
+
+ /* Check if Device type is valid. */
+ if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+ data->device_type < 0) {
+ IPAWANERR("Invalid Device type: %d\n", data->device_type);
+ return -EINVAL;
+ }
+
+ /* Check if Client index is valid. */
+ if (data->client_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS ||
+ data->client_idx < 0) {
+ IPAWANERR("Invalid Client Index: %d\n", data->client_idx);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ lan_client =
+ &rmnet_ipa3_ctx->tether_device[data->device_type].
+ lan_client[data->client_idx];
+
+ if (!data->client_init) {
+ /* check if the client is already de-inited. */
+ if (!lan_client->inited) {
+ IPAWANERR("Client already de-inited: %d:%d\n",
+ data->device_type, data->client_idx);
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -EINVAL;
+ }
+ }
+
+ lan_client->inited = false;
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+
+ return 0;
+}
+
+
+/* rmnet_ipa3_send_lan_client_msg() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_SEND_LAN_CLIENT_MSG.
+ * It is used to send LAN client information to IPACM.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_send_lan_client_msg(
+ struct wan_ioctl_send_lan_client_msg *data)
+{
+ struct ipa_msg_meta msg_meta;
+ int rc;
+ struct ipa_lan_client_msg *lan_client;
+
+ /* Notify IPACM to reset the client index. */
+ lan_client = kzalloc(sizeof(struct ipa_lan_client_msg),
+ GFP_KERNEL);
+ if (!lan_client) {
+ IPAWANERR("Can't allocate memory for tether_info\n");
+ return -ENOMEM;
+ }
+ memset(&msg_meta, 0, sizeof(struct ipa_msg_meta));
+ memcpy(lan_client, &data->lan_client,
+ sizeof(struct ipa_lan_client_msg));
+ msg_meta.msg_type = data->client_event;
+ msg_meta.msg_len = sizeof(struct ipa_lan_client_msg);
+
+ rc = ipa_send_msg(&msg_meta, lan_client, rmnet_ipa_free_msg);
+ if (rc) {
+ IPAWANERR("ipa_send_msg failed: %d\n", rc);
+ kfree(lan_client);
+ return rc;
+ }
+ return 0;
+}
+
+/* rmnet_ipa3_enable_per_client_stats() -
+ * @data - IOCTL data
+ *
+ * This function handles WAN_IOC_ENABLE_PER_CLIENT_STATS.
+ * It is used to indicate Q6 to start capturing per client stats.
+ *
+ * Return codes:
+ * 0: Success
+ * -EINVAL: Invalid args provided
+ */
+int rmnet_ipa3_enable_per_client_stats(
+ bool *data)
+{
+ struct ipa_enable_per_client_stats_req_msg_v01 *req;
+ struct ipa_enable_per_client_stats_resp_msg_v01 *resp;
+ int rc;
+
+ req =
+ kzalloc(sizeof(struct ipa_enable_per_client_stats_req_msg_v01),
+ GFP_KERNEL);
+ if (!req) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ return -ENOMEM;
+ }
+ resp =
+ kzalloc(sizeof(struct ipa_enable_per_client_stats_resp_msg_v01),
+ GFP_KERNEL);
+ if (!resp) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ kfree(req);
+ return -ENOMEM;
+ }
+ memset(req, 0,
+ sizeof(struct ipa_enable_per_client_stats_req_msg_v01));
+ memset(resp, 0,
+ sizeof(struct ipa_enable_per_client_stats_resp_msg_v01));
+
+ if (*data)
+ req->enable_per_client_stats = 1;
+ else
+ req->enable_per_client_stats = 0;
+
+ rc = ipa3_qmi_enable_per_client_stats(req, resp);
+ if (rc) {
+ IPAWANERR("can't enable per client stats\n");
+ kfree(req);
+ kfree(resp);
+ return rc;
+ }
+
+ kfree(req);
+ kfree(resp);
+ return 0;
+}
+
+int rmnet_ipa3_query_per_client_stats(
+ struct wan_ioctl_query_per_client_stats *data)
+{
+ struct ipa_get_stats_per_client_req_msg_v01 *req;
+ struct ipa_get_stats_per_client_resp_msg_v01 *resp;
+ int rc, lan_clnt_idx, lan_clnt_idx1, i;
+ struct ipa_lan_client *lan_client = NULL;
+
+
+ IPAWANDBG("Client MAC %02x:%02x:%02x:%02x:%02x:%02x\n",
+ data->client_info[0].mac[0],
+ data->client_info[0].mac[1],
+ data->client_info[0].mac[2],
+ data->client_info[0].mac[3],
+ data->client_info[0].mac[4],
+ data->client_info[0].mac[5]);
+
+ /* Check if Device type is valid. */
+ if (data->device_type >= IPACM_MAX_CLIENT_DEVICE_TYPES ||
+ data->device_type < 0) {
+ IPAWANERR("Invalid Device type: %d\n", data->device_type);
+ return -EINVAL;
+ }
+
+ /* Check if num_clients is valid. */
+ if (data->num_clients != IPA_MAX_NUM_HW_PATH_CLIENTS &&
+ data->num_clients != 1) {
+ IPAWANERR("Invalid number of clients: %d\n", data->num_clients);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rmnet_ipa3_ctx->per_client_stats_guard);
+
+ if (data->num_clients == 1) {
+ /* Check if the client info is valid.*/
+ lan_clnt_idx1 = rmnet_ipa3_get_lan_client_info(
+ data->device_type,
+ data->client_info[0].mac);
+ if (lan_clnt_idx1 < 0) {
+ IPAWANERR("Client info not available return.\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -EINVAL;
+ }
+ lan_client =
+ &rmnet_ipa3_ctx->tether_device[data->device_type].
+ lan_client[lan_clnt_idx1];
+ /*
+ * Check if disconnect flag is set and
+ * see if all the clients info are cleared.
+ */
+ if (data->disconnect_clnt &&
+ lan_client->inited) {
+ IPAWANERR("Client not inited. Try again.\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -EAGAIN;
+ }
+
+ } else {
+ /* Max number of clients. */
+ /* Check if disconnect flag is set and
+ * see if all the clients info are cleared.
+ */
+ if (data->disconnect_clnt &&
+ rmnet_ipa3_check_any_client_inited(data->device_type)) {
+ IPAWANERR("CLient not inited. Try again.\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -EAGAIN;
+ }
+ lan_clnt_idx1 = 0xffffffff;
+ }
+
+ req = kzalloc(sizeof(struct ipa_get_stats_per_client_req_msg_v01),
+ GFP_KERNEL);
+ if (!req) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ return -ENOMEM;
+ }
+ resp = kzalloc(sizeof(struct ipa_get_stats_per_client_resp_msg_v01),
+ GFP_KERNEL);
+ if (!resp) {
+ IPAWANERR("Can't allocate memory for stats message\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ kfree(req);
+ return -ENOMEM;
+ }
+ memset(req, 0, sizeof(struct ipa_get_stats_per_client_req_msg_v01));
+ memset(resp, 0, sizeof(struct ipa_get_stats_per_client_resp_msg_v01));
+
+ if (data->reset_stats) {
+ req->reset_stats_valid = true;
+ req->reset_stats = true;
+ IPAWANDBG("fetch and reset the client stats\n");
+ }
+
+ req->client_id = lan_clnt_idx1;
+ req->src_pipe_id =
+ rmnet_ipa3_ctx->tether_device[data->device_type].ul_src_pipe;
+
+ IPAWANDBG("fetch the client stats for %d, %d\n", req->client_id,
+ req->src_pipe_id);
+
+ rc = ipa3_qmi_get_per_client_packet_stats(req, resp);
+ if (rc) {
+ IPAWANERR("can't get per client stats\n");
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ kfree(req);
+ kfree(resp);
+ return rc;
+ }
+
+ if (resp->per_client_stats_list_valid) {
+ for (i = 0; i < resp->per_client_stats_list_len
+ && i < IPA_MAX_NUM_HW_PATH_CLIENTS; i++) {
+ /* Subtract the header bytes from the DL bytes. */
+ data->client_info[i].ipv4_rx_bytes =
+ (resp->per_client_stats_list[i].num_dl_ipv4_bytes) -
+ (rmnet_ipa3_ctx->
+ tether_device[data->device_type].hdr_len *
+ resp->per_client_stats_list[i].num_dl_ipv4_pkts);
+ /* UL header bytes are subtracted by Q6. */
+ data->client_info[i].ipv4_tx_bytes =
+ resp->per_client_stats_list[i].num_ul_ipv4_bytes;
+ /* Subtract the header bytes from the DL bytes. */
+ data->client_info[i].ipv6_rx_bytes =
+ (resp->per_client_stats_list[i].num_dl_ipv6_bytes) -
+ (rmnet_ipa3_ctx->
+ tether_device[data->device_type].hdr_len *
+ resp->per_client_stats_list[i].num_dl_ipv6_pkts);
+ /* UL header bytes are subtracted by Q6. */
+ data->client_info[i].ipv6_tx_bytes =
+ resp->per_client_stats_list[i].num_ul_ipv6_bytes;
+
+ IPAWANDBG("tx_b_v4(%lu)v6(%lu)rx_b_v4(%lu) v6(%lu)\n",
+ (unsigned long int) data->client_info[i].ipv4_tx_bytes,
+ (unsigned long int) data->client_info[i].ipv6_tx_bytes,
+ (unsigned long int) data->client_info[i].ipv4_rx_bytes,
+ (unsigned long int) data->client_info[i].ipv6_rx_bytes);
+
+ /* Get the lan client index. */
+ lan_clnt_idx = resp->per_client_stats_list[i].client_id;
+ /* Check if lan_clnt_idx is valid. */
+ if (lan_clnt_idx < 0 ||
+ lan_clnt_idx >= IPA_MAX_NUM_HW_PATH_CLIENTS) {
+ IPAWANERR("Lan client index not valid.\n");
+ mutex_unlock(
+ &rmnet_ipa3_ctx->per_client_stats_guard);
+ kfree(req);
+ kfree(resp);
+ ipa_assert();
+ return -EINVAL;
+ }
+ memcpy(data->client_info[i].mac,
+ rmnet_ipa3_ctx->
+ tether_device[data->device_type].
+ lan_client[lan_clnt_idx].mac,
+ IPA_MAC_ADDR_SIZE);
+ }
+ }
+
+ if (data->disconnect_clnt) {
+ rmnet_ipa3_delete_lan_client_info(data->device_type,
+ lan_clnt_idx1);
+ }
+
+ mutex_unlock(&rmnet_ipa3_ctx->per_client_stats_guard);
+ kfree(req);
+ kfree(resp);
+ return 0;
+}
+
static int __init ipa3_wwan_init(void)
{
+ int i, j;
rmnet_ipa3_ctx = kzalloc(sizeof(*rmnet_ipa3_ctx), GFP_KERNEL);
if (!rmnet_ipa3_ctx) {
IPAWANERR("no memory\n");
@@ -3330,6 +3843,14 @@ static int __init ipa3_wwan_init(void)
mutex_init(&rmnet_ipa3_ctx->pipe_handle_guard);
mutex_init(&rmnet_ipa3_ctx->add_mux_channel_lock);
+ mutex_init(&rmnet_ipa3_ctx->per_client_stats_guard);
+ /* Reset the Lan Stats. */
+ for (i = 0; i < IPACM_MAX_CLIENT_DEVICE_TYPES; i++) {
+ rmnet_ipa3_ctx->tether_device[i].ul_src_pipe = -1;
+ for (j = 0; j < IPA_MAX_NUM_HW_PATH_CLIENTS; j++)
+ rmnet_ipa3_ctx->tether_device[i].
+ lan_client[j].client_idx = -1;
+ }
rmnet_ipa3_ctx->ipa3_to_apps_hdl = -1;
rmnet_ipa3_ctx->apps_to_ipa3_hdl = -1;
@@ -3352,6 +3873,7 @@ static void __exit ipa3_wwan_cleanup(void)
ipa3_qmi_cleanup();
mutex_destroy(&rmnet_ipa3_ctx->pipe_handle_guard);
mutex_destroy(&rmnet_ipa3_ctx->add_mux_channel_lock);
+ mutex_destroy(&rmnet_ipa3_ctx->per_client_stats_guard);
ret = subsys_notif_unregister_notifier(
rmnet_ipa3_ctx->subsys_notify_handle, &ipa3_ssr_notifier);
if (ret)
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
index 51bbec464e4d..dc1e5ce511a6 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa_fd_ioctl.c
@@ -50,6 +50,15 @@
#define WAN_IOC_QUERY_TETHER_STATS_ALL32 _IOWR(WAN_IOC_MAGIC, \
WAN_IOCTL_QUERY_TETHER_STATS_ALL, \
compat_uptr_t)
+#define WAN_IOCTL_ENABLE_PER_CLIENT_STATS32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_ENABLE_PER_CLIENT_STATS, \
+ compat_uptr_t)
+#define WAN_IOCTL_QUERY_PER_CLIENT_STATS32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_QUERY_PER_CLIENT_STATS, \
+ compat_uptr_t)
+#define WAN_IOCTL_SET_LAN_CLIENT_INFO32 _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_SET_LAN_CLIENT_INFO, \
+ compat_uptr_t)
#endif
static unsigned int dev_num = 1;
@@ -125,6 +134,34 @@ static long ipa3_wan_ioctl(struct file *filp,
}
break;
+ case WAN_IOC_ADD_UL_FLT_RULE:
+ IPAWANDBG("device %s got WAN_IOC_UL_ADD_FLT_RULE :>>>\n",
+ DRIVER_NAME);
+ pyld_sz =
+ sizeof(struct ipa_configure_ul_firewall_rules_req_msg_v01);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg,
+ pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (ipa3_qmi_ul_filter_request_send(
+ (struct ipa_configure_ul_firewall_rules_req_msg_v01 *)
+ param)) {
+ IPAWANDBG("IPACM->Q6 add ul filter rule failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
case WAN_IOC_ADD_FLT_RULE_INDEX:
IPAWANDBG("device %s got WAN_IOC_ADD_FLT_RULE_INDEX :>>>\n",
DRIVER_NAME);
@@ -316,6 +353,122 @@ static long ipa3_wan_ioctl(struct file *filp,
}
break;
+ case WAN_IOC_ENABLE_PER_CLIENT_STATS:
+ IPAWANDBG_LOW("got WAN_IOC_ENABLE_PER_CLIENT_STATS :>>>\n");
+ pyld_sz = sizeof(bool);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg,
+ pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ if (rmnet_ipa3_enable_per_client_stats(
+ (bool *)param)) {
+ IPAWANERR("WAN_IOC_ENABLE_PER_CLIENT_STATS failed\n");
+ retval = -EFAULT;
+ break;
+ }
+
+ break;
+
+ case WAN_IOC_QUERY_PER_CLIENT_STATS:
+ IPAWANDBG_LOW("got WAN_IOC_QUERY_PER_CLIENT_STATS :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_query_per_client_stats);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg,
+ pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+
+ retval = rmnet_ipa3_query_per_client_stats(
+ (struct wan_ioctl_query_per_client_stats *)param);
+ if (retval) {
+ IPAWANERR("WAN_IOC_QUERY_PER_CLIENT_STATS failed\n");
+ break;
+ }
+
+ if (copy_to_user((void __user *)arg, param, pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_SET_LAN_CLIENT_INFO:
+ IPAWANDBG_LOW("got WAN_IOC_SET_LAN_CLIENT_INFO :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_lan_client_info);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg,
+ pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa3_set_lan_client_info(
+ (struct wan_ioctl_lan_client_info *)param)) {
+ IPAWANERR("WAN_IOC_SET_LAN_CLIENT_INFO failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+ case WAN_IOC_CLEAR_LAN_CLIENT_INFO:
+ IPAWANDBG_LOW("got WAN_IOC_CLEAR_LAN_CLIENT_INFO :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_lan_client_info);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg,
+ pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa3_clear_lan_client_info(
+ (struct wan_ioctl_lan_client_info *)param)) {
+ IPAWANERR("WAN_IOC_CLEAR_LAN_CLIENT_INFO failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+
+ case WAN_IOC_SEND_LAN_CLIENT_MSG:
+ IPAWANDBG_LOW("got WAN_IOC_SEND_LAN_CLIENT_MSG :>>>\n");
+ pyld_sz = sizeof(struct wan_ioctl_send_lan_client_msg);
+ param = kzalloc(pyld_sz, GFP_KERNEL);
+ if (!param) {
+ retval = -ENOMEM;
+ break;
+ }
+ if (copy_from_user(param, (const void __user *)arg,
+ pyld_sz)) {
+ retval = -EFAULT;
+ break;
+ }
+ if (rmnet_ipa3_send_lan_client_msg(
+ (struct wan_ioctl_send_lan_client_msg *)
+ param)) {
+ IPAWANERR("IOC_SEND_LAN_CLIENT_MSG failed\n");
+ retval = -EFAULT;
+ break;
+ }
+ break;
+
+
default:
retval = -ENOTTY;
}
diff --git a/drivers/power/supply/qcom/qpnp-fg-gen3.c b/drivers/power/supply/qcom/qpnp-fg-gen3.c
index ea01a38015f3..79a80b6e6c7c 100644
--- a/drivers/power/supply/qcom/qpnp-fg-gen3.c
+++ b/drivers/power/supply/qcom/qpnp-fg-gen3.c
@@ -3727,6 +3727,7 @@ static int fg_notifier_cb(struct notifier_block *nb,
return NOTIFY_OK;
if ((strcmp(psy->desc->name, "battery") == 0)
+ || (strcmp(psy->desc->name, "parallel") == 0)
|| (strcmp(psy->desc->name, "usb") == 0)) {
/*
* We cannot vote for awake votable here as that takes
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index b1e9ad72a07e..df7aabfd7e2e 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -4057,6 +4057,14 @@ irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data)
struct smb_charger *chg = irq_data->parent_data;
chg->is_hdc = true;
+ /*
+ * Disable usb IRQs after the flag set and re-enable IRQs after
+ * the flag cleared in the delayed work queue, to avoid any IRQ
+ * storming during the delays
+ */
+ if (chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+ disable_irq_nosync(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
+
schedule_delayed_work(&chg->clear_hdc_work, msecs_to_jiffies(60));
return IRQ_HANDLED;
@@ -4234,6 +4242,8 @@ static void clear_hdc_work(struct work_struct *work)
clear_hdc_work.work);
chg->is_hdc = 0;
+ if (chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq)
+ enable_irq(chg->irq_info[HIGH_DUTY_CYCLE_IRQ].irq);
}
static void rdstd_cc2_detach_work(struct work_struct *work)
diff --git a/drivers/power/supply/qcom/step-chg-jeita.c b/drivers/power/supply/qcom/step-chg-jeita.c
index 06ecc7ea6e8a..acc0d772d44d 100644
--- a/drivers/power/supply/qcom/step-chg-jeita.c
+++ b/drivers/power/supply/qcom/step-chg-jeita.c
@@ -356,11 +356,21 @@ static void status_change_work(struct work_struct *work)
int reschedule_us;
int reschedule_jeita_work_us = 0;
int reschedule_step_work_us = 0;
+ union power_supply_propval pval = {0, };
+
+ if (!is_batt_available(chip)) {
+ __pm_relax(chip->step_chg_ws);
+ return;
+ }
- if (!is_batt_available(chip))
+ /* skip jeita and step if not charging */
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_STATUS, &pval);
+ if (pval.intval != POWER_SUPPLY_STATUS_CHARGING) {
+ __pm_relax(chip->step_chg_ws);
return;
+ }
- /* skip elapsed_us debounce for handling battery temperature */
rc = handle_jeita(chip);
if (rc > 0)
reschedule_jeita_work_us = rc;
diff --git a/drivers/pwm/pwm-qpnp.c b/drivers/pwm/pwm-qpnp.c
index 2531b74b4588..5e808150a3dd 100644
--- a/drivers/pwm/pwm-qpnp.c
+++ b/drivers/pwm/pwm-qpnp.c
@@ -1475,7 +1475,7 @@ static void qpnp_pwm_disable(struct pwm_chip *pwm_chip,
*/
int pwm_change_mode(struct pwm_device *pwm, enum pm_pwm_mode mode)
{
- int rc;
+ int rc = 0;
unsigned long flags;
struct qpnp_pwm_chip *chip;
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index ec28f6214e7b..35575c071760 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -173,6 +173,9 @@ void ufshcd_update_query_stats(struct ufs_hba *hba,
}
#endif
+#define PWR_INFO_MASK 0xF
+#define PWR_RX_OFFSET 4
+
#define UFSHCD_REQ_SENSE_SIZE 18
#define UFSHCD_ENABLE_INTRS (UTP_TRANSFER_REQ_COMPL |\
@@ -4653,8 +4656,9 @@ int ufshcd_change_power_mode(struct ufs_hba *hba,
int ret = 0;
/* if already configured to the requested pwr_mode */
- if (pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
- pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
+ if (!hba->restore_needed &&
+ pwr_mode->gear_rx == hba->pwr_info.gear_rx &&
+ pwr_mode->gear_tx == hba->pwr_info.gear_tx &&
pwr_mode->lane_rx == hba->pwr_info.lane_rx &&
pwr_mode->lane_tx == hba->pwr_info.lane_tx &&
pwr_mode->pwr_rx == hba->pwr_info.pwr_rx &&
@@ -6275,6 +6279,52 @@ static void ufshcd_update_uic_reg_hist(struct ufs_uic_err_reg_hist *reg_hist,
reg_hist->pos = (reg_hist->pos + 1) % UIC_ERR_REG_HIST_LENGTH;
}
+static void ufshcd_rls_handler(struct work_struct *work)
+{
+ struct ufs_hba *hba;
+ int ret = 0;
+ u32 mode;
+
+ hba = container_of(work, struct ufs_hba, rls_work);
+ ufshcd_scsi_block_requests(hba);
+ pm_runtime_get_sync(hba->dev);
+ ret = ufshcd_wait_for_doorbell_clr(hba, U64_MAX);
+ if (ret) {
+ dev_err(hba->dev,
+ "Timed out (%d) waiting for DB to clear\n",
+ ret);
+ goto out;
+ }
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_PWRMODE), &mode);
+ if (hba->pwr_info.pwr_rx != ((mode >> PWR_RX_OFFSET) & PWR_INFO_MASK))
+ hba->restore_needed = true;
+
+ if (hba->pwr_info.pwr_tx != (mode & PWR_INFO_MASK))
+ hba->restore_needed = true;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_RXGEAR), &mode);
+ if (hba->pwr_info.gear_rx != mode)
+ hba->restore_needed = true;
+
+ ufshcd_dme_get(hba, UIC_ARG_MIB(PA_TXGEAR), &mode);
+ if (hba->pwr_info.gear_tx != mode)
+ hba->restore_needed = true;
+
+ if (hba->restore_needed)
+ ret = ufshcd_config_pwr_mode(hba, &(hba->pwr_info));
+
+ if (ret)
+ dev_err(hba->dev, "%s: Failed setting power mode, err = %d\n",
+ __func__, ret);
+ else
+ hba->restore_needed = false;
+
+out:
+ ufshcd_scsi_unblock_requests(hba);
+ pm_runtime_put_sync(hba->dev);
+}
+
/**
* ufshcd_update_uic_error - check and set fatal UIC error flags.
* @hba: per-adapter instance
@@ -6314,6 +6364,8 @@ static irqreturn_t ufshcd_update_uic_error(struct ufs_hba *hba)
hba->full_init_linereset = true;
}
}
+ if (!hba->full_init_linereset)
+ schedule_work(&hba->rls_work);
}
retval |= IRQ_HANDLED;
}
@@ -9922,6 +9974,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
/* Initialize work queues */
INIT_WORK(&hba->eh_work, ufshcd_err_handler);
INIT_WORK(&hba->eeh_work, ufshcd_exception_event_handler);
+ INIT_WORK(&hba->rls_work, ufshcd_rls_handler);
/* Initialize UIC command mutex */
mutex_init(&hba->uic_cmd_mutex);
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index da3ad78d3405..dbc80848ed8b 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -854,6 +854,7 @@ struct ufs_hba {
/* Work Queues */
struct work_struct eh_work;
struct work_struct eeh_work;
+ struct work_struct rls_work;
/* HBA Errors */
u32 errors;
@@ -950,9 +951,10 @@ struct ufs_hba {
bool full_init_linereset;
struct pinctrl *pctrl;
-
+
int latency_hist_enabled;
struct io_latency_state io_lat_s;
+ bool restore_needed;
};
static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index f3debd14c27b..ad9bf3a2232d 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -1667,6 +1667,8 @@ void ch_purge_intent_lists(struct channel_ctx *ctx)
&ctx->local_rx_intent_list, list) {
ctx->notify_rx_abort(ctx, ctx->user_priv,
ptr_intent->pkt_priv);
+ ctx->transport_ptr->ops->deallocate_rx_intent(
+ ctx->transport_ptr->ops, ptr_intent);
list_del(&ptr_intent->list);
kfree(ptr_intent);
}
@@ -3765,6 +3767,8 @@ static void glink_dummy_xprt_ctx_release(struct rwref_lock *xprt_st_lock)
GLINK_INFO("%s: freeing transport [%s->%s]context\n", __func__,
xprt_ctx->name,
xprt_ctx->edge);
+ kfree(xprt_ctx->ops);
+ xprt_ctx->ops = NULL;
kfree(xprt_ctx);
}
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index 60d0f2a37026..7ede3e29dcf9 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -559,7 +559,7 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
char *fw_name_p;
void *mba_dp_virt;
dma_addr_t mba_dp_phys, mba_dp_phys_end;
- int ret, count;
+ int ret;
const u8 *data;
struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
@@ -624,10 +624,9 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
&mba_dp_phys, &mba_dp_phys_end, drv->mba_dp_size);
/* Load the MBA image into memory */
- count = fw->size;
- if (count <= SZ_1M) {
+ if (fw->size <= SZ_1M) {
/* Ensures memcpy is done for max 1MB fw size */
- memcpy(mba_dp_virt, data, count);
+ memcpy(mba_dp_virt, data, fw->size);
} else {
dev_err(pil->dev, "%s fw image loading into memory is failed due to fw size overflow\n",
__func__);
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index a49848808078..68199d9adb02 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -493,13 +493,10 @@ static void spcom_notify_state(void *handle, const void *priv, unsigned event)
ch->glink_state = event;
- /*
- * if spcom_notify_state() is called within glink_open()
- * then ch->glink_handle is not updated yet.
- */
- if (!ch->glink_handle) {
- pr_debug("update glink_handle, ch [%s].\n", ch->name);
- ch->glink_handle = handle;
+ if (!handle) {
+ pr_err("inavlid glink_handle, ch [%s].\n", ch->name);
+ mutex_unlock(&ch->lock);
+ return;
}
/* signal before unlock mutex & before calling glink */
@@ -512,8 +509,7 @@ static void spcom_notify_state(void *handle, const void *priv, unsigned event)
*/
pr_debug("call glink_queue_rx_intent() ch [%s].\n", ch->name);
- ret = glink_queue_rx_intent(ch->glink_handle,
- ch, ch->rx_buf_size);
+ ret = glink_queue_rx_intent(handle, ch, ch->rx_buf_size);
if (ret) {
pr_err("glink_queue_rx_intent() err [%d]\n", ret);
} else {
@@ -1028,10 +1024,12 @@ static int spcom_get_next_request_size(struct spcom_channel *ch)
ch->name, ch->actual_rx_size);
goto exit_ready;
}
+ mutex_unlock(&ch->lock); /* unlock while waiting */
pr_debug("Wait for Rx Done, ch [%s].\n", ch->name);
wait_for_completion(&ch->rx_done);
+ mutex_lock(&ch->lock); /* re-lock after waiting */
/* Check Rx Abort on SP reset */
if (ch->rx_abort) {
pr_err("rx aborted.\n");
diff --git a/drivers/soc/qcom/wcd-dsp-glink.c b/drivers/soc/qcom/wcd-dsp-glink.c
index 85c2b92f5474..ee88a8aaf850 100644
--- a/drivers/soc/qcom/wcd-dsp-glink.c
+++ b/drivers/soc/qcom/wcd-dsp-glink.c
@@ -634,6 +634,21 @@ static int wdsp_glink_ch_info_init(struct wdsp_glink_priv *wpriv,
memcpy(&ch[i]->ch_cfg, payload, ch_cfg_size);
payload += ch_cfg_size;
+ /* check ch name is valid string or not */
+ for (j = 0; j < WDSP_CH_NAME_MAX_LEN; j++) {
+ if (ch[i]->ch_cfg.name[j] == '\0')
+ break;
+ }
+
+ if (j == WDSP_CH_NAME_MAX_LEN) {
+ dev_err_ratelimited(wpriv->dev, "%s: Wrong channel name\n",
+ __func__);
+ kfree(ch[i]);
+ ch[i] = NULL;
+ ret = -EINVAL;
+ goto err_ch_mem;
+ }
+
mutex_init(&ch[i]->mutex);
ch[i]->wpriv = wpriv;
INIT_WORK(&ch[i]->lcl_ch_open_wrk, wdsp_glink_lcl_ch_open_wrk);
@@ -906,8 +921,6 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf,
ret = -EINVAL;
goto free_buf;
}
- dev_dbg(wpriv->dev, "%s: requested ch_name: %s, pkt_size: %zd\n",
- __func__, cpkt->ch_name, pkt_max_size);
for (i = 0; i < wpriv->no_of_channels; i++) {
if (wpriv->ch && wpriv->ch[i] &&
(!strcmp(cpkt->ch_name,
@@ -922,6 +935,8 @@ static ssize_t wdsp_glink_write(struct file *file, const char __user *buf,
ret = -EINVAL;
goto free_buf;
}
+ dev_dbg(wpriv->dev, "%s: requested ch_name: %s, pkt_size: %zd\n",
+ __func__, cpkt->ch_name, pkt_max_size);
ret = wait_event_timeout(tx_buf->ch->ch_connect_wait,
(tx_buf->ch->channel_state ==
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 3c0f68deee34..d92a33097461 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -58,7 +58,7 @@
/* time out to wait for USB cable status notification (in ms)*/
#define SM_INIT_TIMEOUT 30000
-
+#define DWC3_WAKEUP_SRC_TIMEOUT 5000
/* AHB2PHY register offsets */
#define PERIPH_SS_AHB2PHY_TOP_CFG 0x10
@@ -216,6 +216,7 @@ struct dwc3_msm {
struct notifier_block usbdev_nb;
bool hc_died;
bool xhci_ss_compliance_enable;
+ bool no_wakeup_src_in_hostmode;
struct extcon_dev *extcon_vbus;
struct extcon_dev *extcon_id;
@@ -2350,6 +2351,7 @@ static void dwc3_ext_event_notify(struct dwc3_msm *mdwc)
clear_bit(B_SUSPEND, &mdwc->inputs);
}
+ pm_stay_awake(mdwc->dev);
schedule_delayed_work(&mdwc->sm_work, 0);
}
@@ -2638,6 +2640,7 @@ static int dwc3_msm_id_notifier(struct notifier_block *nb,
if (mdwc->id_state != id) {
mdwc->id_state = id;
dbg_event(0xFF, "id_state", mdwc->id_state);
+ pm_stay_awake(mdwc->dev);
queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
}
@@ -2700,6 +2703,7 @@ static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
mdwc->vbus_active = event;
if (dwc->is_drd && !mdwc->in_restart) {
dbg_event(0xFF, "Q RW (vbus)", mdwc->vbus_active);
+ pm_stay_awake(mdwc->dev);
queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
}
done:
@@ -3099,6 +3103,11 @@ static int dwc3_msm_probe(struct platform_device *pdev)
mdwc->disable_host_mode_pm = of_property_read_bool(node,
"qcom,disable-host-mode-pm");
+ mdwc->no_wakeup_src_in_hostmode = of_property_read_bool(node,
+ "qcom,no-wakeup-src-in-hostmode");
+ if (mdwc->no_wakeup_src_in_hostmode)
+ dev_dbg(&pdev->dev, "dwc3 host not using wakeup source\n");
+
dwc3_set_notifier(&dwc3_msm_notify_event);
/* Assumes dwc3 is the first DT child of dwc3-msm */
@@ -3705,20 +3714,20 @@ static int dwc3_msm_gadget_vbus_draw(struct dwc3_msm *mdwc, unsigned mA)
union power_supply_propval pval = {0};
int ret, psy_type;
- if (mdwc->max_power == mA)
- return 0;
-
psy_type = get_psy_type(mdwc);
- if (psy_type == POWER_SUPPLY_TYPE_USB) {
- dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
- /* Set max current limit in uA */
- pval.intval = 1000 * mA;
- } else if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
+ if (psy_type == POWER_SUPPLY_TYPE_USB_FLOAT) {
pval.intval = -ETIMEDOUT;
- } else {
- return 0;
+ goto set_prop;
}
+ if (mdwc->max_power == mA || psy_type != POWER_SUPPLY_TYPE_USB)
+ return 0;
+
+ dev_info(mdwc->dev, "Avail curr from USB = %u\n", mA);
+ /* Set max current limit in uA */
+ pval.intval = 1000 * mA;
+
+set_prop:
ret = power_supply_set_property(mdwc->usb_psy,
POWER_SUPPLY_PROP_SDP_CURRENT_MAX, &pval);
if (ret) {
@@ -3892,12 +3901,14 @@ static void dwc3_otg_sm_work(struct work_struct *w)
mdwc->otg_state = OTG_STATE_A_IDLE;
goto ret;
}
+ pm_wakeup_event(mdwc->dev, DWC3_WAKEUP_SRC_TIMEOUT);
}
break;
case OTG_STATE_A_HOST:
if (test_bit(ID, &mdwc->inputs) || mdwc->hc_died) {
- dev_dbg(mdwc->dev, "id || hc_died\n");
+ dbg_event(0xFF, "id || hc_died", 0);
+ dev_dbg(mdwc->dev, "%s state id || hc_died\n", state);
dwc3_otg_start_host(mdwc, 0);
mdwc->otg_state = OTG_STATE_B_IDLE;
mdwc->vbus_retry_count = 0;
@@ -3908,6 +3919,7 @@ static void dwc3_otg_sm_work(struct work_struct *w)
dbg_event(0xFF, "XHCIResume", 0);
if (dwc)
pm_runtime_resume(&dwc->xhci->dev);
+ pm_wakeup_event(mdwc->dev, DWC3_WAKEUP_SRC_TIMEOUT);
}
break;
@@ -3923,6 +3935,34 @@ ret:
return;
}
+static int dwc3_msm_pm_prepare(struct device *dev)
+{
+ struct dwc3_msm *mdwc = dev_get_drvdata(dev);
+ struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ struct usb_hcd *hcd;
+ struct xhci_hcd *xhci;
+
+ dev_dbg(dev, "dwc3-msm PM prepare,lpm:%u\n", atomic_read(&dwc->in_lpm));
+ dbg_event(0xFF, "PM Prep", 0);
+ if (!mdwc->in_host_mode || !mdwc->no_wakeup_src_in_hostmode)
+ return 0;
+
+ hcd = dev_get_drvdata(&dwc->xhci->dev);
+ xhci = hcd_to_xhci(hcd);
+ flush_delayed_work(&mdwc->sm_work);
+
+ /* If in lpm then prevent usb core to runtime_resume from pm_suspend */
+ if (atomic_read(&dwc->in_lpm)) {
+ hcd_to_bus(hcd)->skip_resume = true;
+ hcd_to_bus(xhci->shared_hcd)->skip_resume = true;
+ } else {
+ hcd_to_bus(hcd)->skip_resume = false;
+ hcd_to_bus(xhci->shared_hcd)->skip_resume = false;
+ }
+
+ return 0;
+}
+
#ifdef CONFIG_PM_SLEEP
static int dwc3_msm_pm_suspend(struct device *dev)
{
@@ -3934,7 +3974,7 @@ static int dwc3_msm_pm_suspend(struct device *dev)
dbg_event(0xFF, "PM Sus", 0);
flush_workqueue(mdwc->dwc3_wq);
- if (!atomic_read(&dwc->in_lpm)) {
+ if (!atomic_read(&dwc->in_lpm) && !mdwc->no_wakeup_src_in_hostmode) {
dev_err(mdwc->dev, "Abort PM suspend!! (USB is outside LPM)\n");
return -EBUSY;
}
@@ -3958,8 +3998,13 @@ static int dwc3_msm_pm_resume(struct device *dev)
flush_workqueue(mdwc->dwc3_wq);
atomic_set(&mdwc->pm_suspended, 0);
+ /* Resume h/w in host mode as it may not be runtime suspended */
+ if (mdwc->no_wakeup_src_in_hostmode && !test_bit(ID, &mdwc->inputs))
+ dwc3_msm_resume(mdwc);
+
/* kick in otg state machine */
- queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
+ if (mdwc->vbus_active || !mdwc->id_state)
+ queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
return 0;
}
@@ -3996,6 +4041,7 @@ static int dwc3_msm_runtime_resume(struct device *dev)
#endif
static const struct dev_pm_ops dwc3_msm_dev_pm_ops = {
+ .prepare = dwc3_msm_pm_prepare,
SET_SYSTEM_SLEEP_PM_OPS(dwc3_msm_pm_suspend, dwc3_msm_pm_resume)
SET_RUNTIME_PM_OPS(dwc3_msm_runtime_suspend, dwc3_msm_runtime_resume,
dwc3_msm_runtime_idle)
diff --git a/drivers/usb/gadget/function/f_ccid.c b/drivers/usb/gadget/function/f_ccid.c
index 1a281833eadd..9523d67dfb15 100644
--- a/drivers/usb/gadget/function/f_ccid.c
+++ b/drivers/usb/gadget/function/f_ccid.c
@@ -206,6 +206,71 @@ static struct usb_descriptor_header *ccid_hs_descs[] = {
NULL,
};
+/* Super speed support: */
+static struct usb_endpoint_descriptor ccid_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(CCID_NOTIFY_MAXPACKET),
+ .bInterval = CCID_NOTIFY_INTERVAL + 4,
+};
+
+static struct usb_ss_ep_comp_descriptor ccid_ss_notify_comp_desc = {
+ .bLength = sizeof(ccid_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ccid_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ccid_ss_in_comp_desc = {
+ .bLength = sizeof(ccid_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ccid_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ccid_ss_out_comp_desc = {
+ .bLength = sizeof(ccid_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ccid_ss_descs[] = {
+ (struct usb_descriptor_header *) &ccid_interface_desc,
+ (struct usb_descriptor_header *) &ccid_class_desc,
+ (struct usb_descriptor_header *) &ccid_ss_notify_desc,
+ (struct usb_descriptor_header *) &ccid_ss_notify_comp_desc,
+ (struct usb_descriptor_header *) &ccid_ss_in_desc,
+ (struct usb_descriptor_header *) &ccid_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &ccid_ss_out_desc,
+ (struct usb_descriptor_header *) &ccid_ss_out_comp_desc,
+ NULL,
+};
+
static inline struct f_ccid *func_to_ccid(struct usb_function *f)
{
return container_of(f, struct f_ccid, function);
@@ -503,10 +568,7 @@ free_notify:
static void ccid_function_unbind(struct usb_configuration *c,
struct usb_function *f)
{
- if (gadget_is_dualspeed(c->cdev->gadget))
- usb_free_descriptors(f->hs_descriptors);
- usb_free_descriptors(f->fs_descriptors);
-
+ usb_free_all_descriptors(f);
}
static int ccid_function_bind(struct usb_configuration *c,
@@ -551,23 +613,26 @@ static int ccid_function_bind(struct usb_configuration *c,
ccid_dev->out = ep;
ep->driver_data = cdev;
- f->fs_descriptors = usb_copy_descriptors(ccid_fs_descs);
- if (!f->fs_descriptors)
- goto ep_auto_out_fail;
+ /*
+ * support all relevant hardware speeds... we expect that when
+ * hardware is dual speed, all bulk-capable endpoints work at
+ * both speeds
+ */
+ ccid_hs_in_desc.bEndpointAddress = ccid_fs_in_desc.bEndpointAddress;
+ ccid_hs_out_desc.bEndpointAddress = ccid_fs_out_desc.bEndpointAddress;
+ ccid_hs_notify_desc.bEndpointAddress =
+ ccid_fs_notify_desc.bEndpointAddress;
- if (gadget_is_dualspeed(cdev->gadget)) {
- ccid_hs_in_desc.bEndpointAddress =
- ccid_fs_in_desc.bEndpointAddress;
- ccid_hs_out_desc.bEndpointAddress =
- ccid_fs_out_desc.bEndpointAddress;
- ccid_hs_notify_desc.bEndpointAddress =
- ccid_fs_notify_desc.bEndpointAddress;
-
- /* copy descriptors, and track endpoint copies */
- f->hs_descriptors = usb_copy_descriptors(ccid_hs_descs);
- if (!f->hs_descriptors)
- goto ep_auto_out_fail;
- }
+
+ ccid_ss_in_desc.bEndpointAddress = ccid_fs_in_desc.bEndpointAddress;
+ ccid_ss_out_desc.bEndpointAddress = ccid_fs_out_desc.bEndpointAddress;
+ ccid_ss_notify_desc.bEndpointAddress =
+ ccid_fs_notify_desc.bEndpointAddress;
+
+ ret = usb_assign_descriptors(f, ccid_fs_descs, ccid_hs_descs,
+ ccid_ss_descs);
+ if (ret)
+ goto ep_auto_out_fail;
pr_debug("%s: CCID %s Speed, IN:%s OUT:%s\n", __func__,
gadget_is_dualspeed(cdev->gadget) ? "dual" : "full",
@@ -972,6 +1037,7 @@ static int ccid_bind_config(struct f_ccid *ccid_dev)
ccid_dev->function.name = FUNCTION_NAME;
ccid_dev->function.fs_descriptors = ccid_fs_descs;
ccid_dev->function.hs_descriptors = ccid_hs_descs;
+ ccid_dev->function.ss_descriptors = ccid_ss_descs;
ccid_dev->function.bind = ccid_function_bind;
ccid_dev->function.unbind = ccid_function_unbind;
ccid_dev->function.set_alt = ccid_function_set_alt;
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 1900870eee39..3b925d9b000e 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -885,8 +885,9 @@ static void gsi_ctrl_clear_cpkt_queues(struct f_gsi *gsi, bool skip_req_q)
{
struct gsi_ctrl_pkt *cpkt = NULL;
struct list_head *act, *tmp;
+ unsigned long flags;
- spin_lock(&gsi->c_port.lock);
+ spin_lock_irqsave(&gsi->c_port.lock, flags);
if (skip_req_q)
goto clean_resp_q;
@@ -901,7 +902,7 @@ clean_resp_q:
list_del(&cpkt->list);
gsi_ctrl_pkt_free(cpkt);
}
- spin_unlock(&gsi->c_port.lock);
+ spin_unlock_irqrestore(&gsi->c_port.lock, flags);
}
static int gsi_ctrl_send_cpkt_tomodem(struct f_gsi *gsi, void *buf, size_t len)
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 56a9cd62f2c4..c6998f086e12 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -325,6 +325,34 @@ static int xhci_plat_remove(struct platform_device *dev)
return 0;
}
+#ifdef CONFIG_PM_SLEEP
+static int xhci_plat_suspend(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (!xhci)
+ return 0;
+
+ dev_dbg(dev, "xhci-plat PM suspend\n");
+
+ return xhci_suspend(xhci, true);
+}
+
+static int xhci_plat_resume(struct device *dev)
+{
+ struct usb_hcd *hcd = dev_get_drvdata(dev);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+
+ if (!xhci)
+ return 0;
+
+ dev_dbg(dev, "xhci-plat PM resume\n");
+
+ return xhci_resume(xhci, false);
+}
+#endif
+
#ifdef CONFIG_PM
static int xhci_plat_runtime_idle(struct device *dev)
{
@@ -373,7 +401,7 @@ static int xhci_plat_runtime_resume(struct device *dev)
}
static const struct dev_pm_ops xhci_plat_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(NULL, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(xhci_plat_suspend, xhci_plat_resume)
SET_RUNTIME_PM_OPS(xhci_plat_runtime_suspend, xhci_plat_runtime_resume,
xhci_plat_runtime_idle)
};
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index aab1c7903288..641e0280ad5a 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -918,7 +918,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
struct usb_hcd *hcd = xhci_to_hcd(xhci);
u32 command;
- if (!hcd->state)
+ if (!hcd->state || xhci->suspended)
return 0;
if (hcd->state != HC_STATE_SUSPENDED ||
@@ -988,6 +988,7 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup)
/* step 5: remove core well power */
/* synchronize irq when using MSI-X */
xhci_msix_sync_irqs(xhci);
+ xhci->suspended = true;
return rc;
}
@@ -1007,7 +1008,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
int retval = 0;
bool comp_timer_running = false;
- if (!hcd->state)
+ if (!hcd->state || !xhci->suspended)
return 0;
/* Wait a bit if either of the roothubs need to settle from the
@@ -1141,6 +1142,7 @@ int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
/* Re-enable port polling. */
xhci_dbg(xhci, "%s: starting port polling.\n", __func__);
+ xhci->suspended = false;
set_bit(HCD_FLAG_POLL_RH, &xhci->shared_hcd->flags);
usb_hcd_poll_rh_status(xhci->shared_hcd);
set_bit(HCD_FLAG_POLL_RH, &hcd->flags);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 8fcec1be6b1a..7fc97d930657 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1667,6 +1667,7 @@ struct xhci_hcd {
/* Compliance Mode Recovery Data */
struct timer_list comp_mode_recovery_timer;
u32 port_status_u0;
+ bool suspended;
/* Compliance Mode Timer Triggered every 2 seconds */
#define COMP_MODE_RCVRY_MSECS 2000
};
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index f9f47da8a88b..3c0386ee5875 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -125,6 +125,12 @@ enum usbpd_control_msg_type {
MSG_VCONN_SWAP,
MSG_WAIT,
MSG_SOFT_RESET,
+ MSG_NOT_SUPPORTED = 0x10,
+ MSG_GET_SOURCE_CAP_EXTENDED,
+ MSG_GET_STATUS,
+ MSG_FR_SWAP,
+ MSG_GET_PPS_STATUS,
+ MSG_GET_COUNTRY_CODES,
};
enum usbpd_data_msg_type {
@@ -132,9 +138,29 @@ enum usbpd_data_msg_type {
MSG_REQUEST,
MSG_BIST,
MSG_SINK_CAPABILITIES,
+ MSG_BATTERY_STATUS,
+ MSG_ALERT,
+ MSG_GET_COUNTRY_INFO,
MSG_VDM = 0xF,
};
+enum usbpd_ext_msg_type {
+ MSG_SOURCE_CAPABILITIES_EXTENDED = 1,
+ MSG_STATUS,
+ MSG_GET_BATTERY_CAP,
+ MSG_GET_BATTERY_STATUS,
+ MSG_BATTERY_CAPABILITIES,
+ MSG_GET_MANUFACTURER_INFO,
+ MSG_MANUFACTURER_INFO,
+ MSG_SECURITY_REQUEST,
+ MSG_SECURITY_RESPONSE,
+ MSG_FIRMWARE_UPDATE_REQUEST,
+ MSG_FIRMWARE_UPDATE_RESPONSE,
+ MSG_PPS_STATUS,
+ MSG_COUNTRY_INFO,
+ MSG_COUNTRY_CODES,
+};
+
enum vdm_state {
VDM_NONE,
DISCOVERED_ID,
@@ -198,13 +224,30 @@ static void *usbpd_ipc_log;
#define PD_MAX_DATA_OBJ 7
+#define PD_SRC_CAP_EXT_DB_LEN 24
+#define PD_STATUS_DB_LEN 5
+#define PD_BATTERY_CAP_DB_LEN 9
+
+#define PD_MAX_EXT_MSG_LEN 260
+#define PD_MAX_EXT_MSG_LEGACY_LEN 26
+
#define PD_MSG_HDR(type, dr, pr, id, cnt, rev) \
- (((type) & 0xF) | ((dr) << 5) | (rev << 6) | \
+ (((type) & 0x1F) | ((dr) << 5) | (rev << 6) | \
((pr) << 8) | ((id) << 9) | ((cnt) << 12))
-#define PD_MSG_HDR_COUNT(hdr) (((hdr) >> 12) & 7)
-#define PD_MSG_HDR_TYPE(hdr) ((hdr) & 0xF)
-#define PD_MSG_HDR_ID(hdr) (((hdr) >> 9) & 7)
-#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
+#define PD_MSG_HDR_COUNT(hdr) (((hdr) >> 12) & 7)
+#define PD_MSG_HDR_TYPE(hdr) ((hdr) & 0x1F)
+#define PD_MSG_HDR_ID(hdr) (((hdr) >> 9) & 7)
+#define PD_MSG_HDR_REV(hdr) (((hdr) >> 6) & 3)
+#define PD_MSG_HDR_EXTENDED BIT(15)
+#define PD_MSG_HDR_IS_EXTENDED(hdr) ((hdr) & PD_MSG_HDR_EXTENDED)
+
+#define PD_MSG_EXT_HDR(chunked, num, req, size) \
+ (((chunked) << 15) | (((num) & 0xF) << 11) | \
+ ((req) << 10) | ((size) & 0x1FF))
+#define PD_MSG_EXT_HDR_IS_CHUNKED(ehdr) ((ehdr) & 0x8000)
+#define PD_MSG_EXT_HDR_CHUNK_NUM(ehdr) (((ehdr) >> 11) & 0xF)
+#define PD_MSG_EXT_HDR_REQ_CHUNK(ehdr) ((ehdr) & 0x400)
+#define PD_MSG_EXT_HDR_DATA_SIZE(ehdr) ((ehdr) & 0x1FF)
#define PD_RDO_FIXED(obj, gb, mismatch, usb_comm, no_usb_susp, curr1, curr2) \
(((obj) << 28) | ((gb) << 27) | ((mismatch) << 26) | \
@@ -291,19 +334,24 @@ static const u32 default_src_caps[] = { 0x36019096 }; /* VSafe5V @ 1.5A */
static const u32 default_snk_caps[] = { 0x2601912C }; /* VSafe5V @ 3A */
struct vdm_tx {
- u32 data[7];
+ u32 data[PD_MAX_DATA_OBJ];
int size;
};
struct rx_msg {
- u8 type;
- u8 len;
- u32 payload[7];
+ u16 hdr;
+ u16 data_len; /* size of payload in bytes */
struct list_head entry;
+ u8 payload[];
};
-#define IS_DATA(m, t) ((m) && ((m)->len) && ((m)->type == (t)))
-#define IS_CTRL(m, t) ((m) && !((m)->len) && ((m)->type == (t)))
+#define IS_DATA(m, t) ((m) && !PD_MSG_HDR_IS_EXTENDED((m)->hdr) && \
+ PD_MSG_HDR_COUNT((m)->hdr) && \
+ (PD_MSG_HDR_TYPE((m)->hdr) == (t)))
+#define IS_CTRL(m, t) ((m) && !PD_MSG_HDR_COUNT((m)->hdr) && \
+ (PD_MSG_HDR_TYPE((m)->hdr) == (t)))
+#define IS_EXT(m, t) ((m) && PD_MSG_HDR_IS_EXTENDED((m)->hdr) && \
+ (PD_MSG_HDR_TYPE((m)->hdr) == (t)))
struct usbpd {
struct device dev;
@@ -318,8 +366,10 @@ struct usbpd {
bool hard_reset_recvd;
struct list_head rx_q;
spinlock_t rx_lock;
+ struct rx_msg *rx_ext_msg;
u32 received_pdos[PD_MAX_DATA_OBJ];
+ u32 received_ado;
u16 src_cap_id;
u8 selected_pdo;
u8 requested_pdo;
@@ -351,6 +401,8 @@ struct usbpd {
bool pd_phy_opened;
bool send_request;
struct completion is_ready;
+ struct completion tx_chunk_request;
+ u8 next_tx_chunk;
struct mutex swap_lock;
struct dual_role_phy_instance *dual_role;
@@ -377,6 +429,19 @@ struct usbpd {
struct list_head svid_handlers;
struct list_head instance;
+
+ /* ext msg support */
+ bool send_get_src_cap_ext;
+ u8 src_cap_ext_db[PD_SRC_CAP_EXT_DB_LEN];
+ bool send_get_pps_status;
+ u32 pps_status_db;
+ u8 status_db[PD_STATUS_DB_LEN];
+ bool send_get_battery_cap;
+ u8 get_battery_cap_db;
+ u8 battery_cap_db[PD_BATTERY_CAP_DB_LEN];
+ u8 get_battery_status_db;
+ bool send_get_battery_status;
+ u32 battery_sts_dobj;
};
static LIST_HEAD(_usbpd); /* useful for debugging */
@@ -498,6 +563,57 @@ static int pd_send_msg(struct usbpd *pd, u8 msg_type, const u32 *data,
return 0;
}
+static int pd_send_ext_msg(struct usbpd *pd, u8 msg_type,
+ const u8 *data, size_t data_len, enum pd_sop_type sop)
+{
+ int ret;
+ size_t len_remain, chunk_len;
+ u8 chunked_payload[PD_MAX_DATA_OBJ * sizeof(u32)] = {0};
+ u16 hdr;
+ u16 ext_hdr;
+ u8 num_objs;
+
+ if (data_len > PD_MAX_EXT_MSG_LEN) {
+ usbpd_warn(&pd->dev, "Extended message length exceeds max, truncating...\n");
+ data_len = PD_MAX_EXT_MSG_LEN;
+ }
+
+ pd->next_tx_chunk = 0;
+ len_remain = data_len;
+ do {
+ ext_hdr = PD_MSG_EXT_HDR(1, pd->next_tx_chunk++, 0, data_len);
+ memcpy(chunked_payload, &ext_hdr, sizeof(ext_hdr));
+
+ chunk_len = min_t(size_t, len_remain,
+ PD_MAX_EXT_MSG_LEGACY_LEN);
+ memcpy(chunked_payload + sizeof(ext_hdr), data, chunk_len);
+
+ num_objs = DIV_ROUND_UP(chunk_len + sizeof(u16), sizeof(u32));
+ len_remain -= chunk_len;
+
+ reinit_completion(&pd->tx_chunk_request);
+ hdr = PD_MSG_HDR(msg_type, pd->current_dr, pd->current_pr,
+ pd->tx_msgid, num_objs, pd->spec_rev) |
+ PD_MSG_HDR_EXTENDED;
+ ret = pd_phy_write(hdr, chunked_payload,
+ num_objs * sizeof(u32), sop);
+ if (ret)
+ return ret;
+
+ pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
+
+ /* Wait for request chunk */
+ if (len_remain &&
+ !wait_for_completion_timeout(&pd->tx_chunk_request,
+ msecs_to_jiffies(SENDER_RESPONSE_TIME))) {
+ usbpd_err(&pd->dev, "Timed out waiting for chunk request\n");
+ return -EPROTO;
+ }
+ } while (len_remain);
+
+ return 0;
+}
+
static int pd_select_pdo(struct usbpd *pd, int pdo_pos, int uv, int ua)
{
int curr;
@@ -629,6 +745,150 @@ static void phy_sig_received(struct usbpd *pd, enum pd_sig_type sig)
kick_sm(pd, 0);
}
+struct pd_request_chunk {
+ struct work_struct w;
+ struct usbpd *pd;
+ u8 msg_type;
+ u8 chunk_num;
+ enum pd_sop_type sop;
+};
+
+static void pd_request_chunk_work(struct work_struct *w)
+{
+ struct pd_request_chunk *req =
+ container_of(w, struct pd_request_chunk, w);
+ struct usbpd *pd = req->pd;
+ unsigned long flags;
+ int ret;
+ u8 payload[4] = {0}; /* ext_hdr + padding */
+ u16 hdr = PD_MSG_HDR(req->msg_type, pd->current_dr, pd->current_pr,
+ pd->tx_msgid, 1, pd->spec_rev) | PD_MSG_HDR_EXTENDED;
+
+ *(u16 *)payload = PD_MSG_EXT_HDR(1, req->chunk_num, 1, 0);
+
+ ret = pd_phy_write(hdr, payload, sizeof(payload), req->sop);
+ if (!ret) {
+ pd->tx_msgid = (pd->tx_msgid + 1) & PD_MAX_MSG_ID;
+ } else {
+ usbpd_err(&pd->dev, "could not send chunk request\n");
+
+ /* queue what we have anyway */
+ spin_lock_irqsave(&pd->rx_lock, flags);
+ list_add_tail(&pd->rx_ext_msg->entry, &pd->rx_q);
+ spin_unlock_irqrestore(&pd->rx_lock, flags);
+
+ pd->rx_ext_msg = NULL;
+ }
+
+ kfree(req);
+}
+
+static struct rx_msg *pd_ext_msg_received(struct usbpd *pd, u16 header, u8 *buf,
+ size_t len, enum pd_sop_type sop)
+{
+ struct rx_msg *rx_msg;
+ u16 bytes_to_copy;
+ u16 ext_hdr = *(u16 *)buf;
+ u8 chunk_num;
+
+ if (!PD_MSG_EXT_HDR_IS_CHUNKED(ext_hdr)) {
+ usbpd_err(&pd->dev, "unchunked extended messages unsupported\n");
+ return NULL;
+ }
+
+ /* request for next Tx chunk */
+ if (PD_MSG_EXT_HDR_REQ_CHUNK(ext_hdr)) {
+ if (PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr) ||
+ PD_MSG_EXT_HDR_CHUNK_NUM(ext_hdr) !=
+ pd->next_tx_chunk) {
+ usbpd_err(&pd->dev, "invalid request chunk ext header 0x%02x\n",
+ ext_hdr);
+ return NULL;
+ }
+
+ if (!completion_done(&pd->tx_chunk_request))
+ complete(&pd->tx_chunk_request);
+
+ return NULL;
+ }
+
+ chunk_num = PD_MSG_EXT_HDR_CHUNK_NUM(ext_hdr);
+ if (!chunk_num) {
+ /* allocate new message if first chunk */
+ rx_msg = kzalloc(sizeof(*rx_msg) +
+ PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr),
+ GFP_KERNEL);
+ if (!rx_msg)
+ return NULL;
+
+ rx_msg->hdr = header;
+ rx_msg->data_len = PD_MSG_EXT_HDR_DATA_SIZE(ext_hdr);
+
+ if (rx_msg->data_len > PD_MAX_EXT_MSG_LEN) {
+ usbpd_warn(&pd->dev, "Extended message length exceeds max, truncating...\n");
+ rx_msg->data_len = PD_MAX_EXT_MSG_LEN;
+ }
+ } else {
+ if (!pd->rx_ext_msg) {
+ usbpd_err(&pd->dev, "missing first rx_ext_msg chunk\n");
+ return NULL;
+ }
+
+ rx_msg = pd->rx_ext_msg;
+ }
+
+ /*
+ * The amount to copy is derived as follows:
+ *
+ * - if extended data_len < 26, then copy data_len bytes
+ * - for chunks 0..N-2, copy 26 bytes
+ * - for the last chunk (N-1), copy the remainder
+ */
+ bytes_to_copy =
+ min((rx_msg->data_len - chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN),
+ PD_MAX_EXT_MSG_LEGACY_LEN);
+
+ /* check against received length to avoid overrun */
+ if (bytes_to_copy > len - sizeof(ext_hdr)) {
+ usbpd_warn(&pd->dev, "not enough bytes in chunk, expected:%u received:%zu\n",
+ bytes_to_copy, len - sizeof(ext_hdr));
+ bytes_to_copy = len - sizeof(ext_hdr);
+ }
+
+ memcpy(rx_msg->payload + chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN, buf + 2,
+ bytes_to_copy);
+
+ /* request next chunk? */
+ if ((rx_msg->data_len - chunk_num * PD_MAX_EXT_MSG_LEGACY_LEN) >
+ PD_MAX_EXT_MSG_LEGACY_LEN) {
+ struct pd_request_chunk *req;
+
+ if (pd->rx_ext_msg && pd->rx_ext_msg != rx_msg) {
+ usbpd_dbg(&pd->dev, "stale previous rx_ext_msg?\n");
+ kfree(pd->rx_ext_msg);
+ }
+
+ pd->rx_ext_msg = rx_msg;
+
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ goto queue_rx; /* return what we have anyway */
+
+ INIT_WORK(&req->w, pd_request_chunk_work);
+ req->pd = pd;
+ req->msg_type = PD_MSG_HDR_TYPE(header);
+ req->chunk_num = chunk_num + 1;
+ req->sop = sop;
+ queue_work(pd->wq, &req->w);
+
+ return NULL;
+ }
+
+queue_rx:
+ pd->rx_ext_msg = NULL;
+ return rx_msg; /* queue it for usbpd_sm */
+}
+
static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop,
u8 *buf, size_t len)
{
@@ -676,21 +936,31 @@ static void phy_msg_received(struct usbpd *pd, enum pd_sop_type sop,
return;
}
- rx_msg = kzalloc(sizeof(*rx_msg), GFP_KERNEL);
- if (!rx_msg)
- return;
+ /* if spec rev differs (i.e. is older), update PHY */
+ if (PD_MSG_HDR_REV(header) < pd->spec_rev)
+ pd->spec_rev = PD_MSG_HDR_REV(header);
+
+ usbpd_dbg(&pd->dev, "received message: type(%d) num_objs(%d)\n",
+ PD_MSG_HDR_TYPE(header), PD_MSG_HDR_COUNT(header));
+
+ if (!PD_MSG_HDR_IS_EXTENDED(header)) {
+ rx_msg = kzalloc(sizeof(*rx_msg) + len, GFP_KERNEL);
+ if (!rx_msg)
+ return;
- rx_msg->type = PD_MSG_HDR_TYPE(header);
- rx_msg->len = PD_MSG_HDR_COUNT(header);
- memcpy(&rx_msg->payload, buf, min(len, sizeof(rx_msg->payload)));
+ rx_msg->hdr = header;
+ rx_msg->data_len = len;
+ memcpy(rx_msg->payload, buf, len);
+ } else {
+ rx_msg = pd_ext_msg_received(pd, header, buf, len, sop);
+ if (!rx_msg)
+ return;
+ }
spin_lock_irqsave(&pd->rx_lock, flags);
list_add_tail(&rx_msg->entry, &pd->rx_q);
spin_unlock_irqrestore(&pd->rx_lock, flags);
- usbpd_dbg(&pd->dev, "received message: type(%d) len(%d)\n",
- rx_msg->type, rx_msg->len);
-
kick_sm(pd, 0);
}
@@ -1140,11 +1410,13 @@ EXPORT_SYMBOL(usbpd_send_svdm);
static void handle_vdm_rx(struct usbpd *pd, struct rx_msg *rx_msg)
{
- u32 vdm_hdr = rx_msg->payload[0];
- u32 *vdos = &rx_msg->payload[1];
+ u32 vdm_hdr =
+ rx_msg->data_len >= sizeof(u32) ? ((u32 *)rx_msg->payload)[0] : 0;
+
+ u32 *vdos = (u32 *)&rx_msg->payload[sizeof(u32)];
u16 svid = VDM_HDR_SVID(vdm_hdr);
u16 *psvid;
- u8 i, num_vdos = rx_msg->len - 1; /* num objects minus header */
+ u8 i, num_vdos = PD_MSG_HDR_COUNT(rx_msg->hdr) - 1;
u8 cmd = SVDM_HDR_CMD(vdm_hdr);
u8 cmd_type = SVDM_HDR_CMD_TYPE(vdm_hdr);
bool has_dp = false;
@@ -1757,7 +2029,7 @@ static void usbpd_sm(struct work_struct *w)
case PE_SRC_SEND_CAPABILITIES_WAIT:
if (IS_DATA(rx_msg, MSG_REQUEST)) {
- pd->rdo = rx_msg->payload[0];
+ pd->rdo = *(u32 *)rx_msg->payload;
usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
} else if (rx_msg) {
usbpd_err(&pd->dev, "Unexpected message received\n");
@@ -1780,7 +2052,7 @@ static void usbpd_sm(struct work_struct *w)
usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
}
} else if (IS_DATA(rx_msg, MSG_REQUEST)) {
- pd->rdo = rx_msg->payload[0];
+ pd->rdo = *(u32 *)rx_msg->payload;
usbpd_set_state(pd, PE_SRC_NEGOTIATE_CAPABILITY);
} else if (IS_CTRL(rx_msg, MSG_DR_SWAP)) {
if (pd->vdm_state == MODE_ENTERED) {
@@ -1822,6 +2094,15 @@ static void usbpd_sm(struct work_struct *w)
vconn_swap(pd);
} else if (IS_DATA(rx_msg, MSG_VDM)) {
handle_vdm_rx(pd, rx_msg);
+ } else if (rx_msg && pd->spec_rev == USBPD_REV_30) {
+ /* unhandled messages */
+ ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL, 0,
+ SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Not supported\n");
+ usbpd_set_state(pd, PE_SRC_SEND_SOFT_RESET);
+ }
+ break;
} else if (pd->send_pr_swap) {
pd->send_pr_swap = false;
ret = pd_send_msg(pd, MSG_PR_SWAP, NULL, 0, SOP_MSG);
@@ -2062,7 +2343,8 @@ static void usbpd_sm(struct work_struct *w)
usbpd_err(&pd->dev, "Error sending Sink Caps\n");
usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
}
- } else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP)) {
+ } else if (IS_CTRL(rx_msg, MSG_GET_SOURCE_CAP) &&
+ pd->spec_rev == USBPD_REV_20) {
ret = pd_send_msg(pd, MSG_SOURCE_CAPABILITIES,
default_src_caps,
ARRAY_SIZE(default_src_caps), SOP_MSG);
@@ -2085,7 +2367,8 @@ static void usbpd_sm(struct work_struct *w)
}
dr_swap(pd);
- } else if (IS_CTRL(rx_msg, MSG_PR_SWAP)) {
+ } else if (IS_CTRL(rx_msg, MSG_PR_SWAP) &&
+ pd->spec_rev == USBPD_REV_20) {
/* lock in current mode */
set_power_role(pd, pd->current_pr);
@@ -2103,7 +2386,8 @@ static void usbpd_sm(struct work_struct *w)
POWER_SUPPLY_PROP_PR_SWAP, &val);
usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
break;
- } else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP)) {
+ } else if (IS_CTRL(rx_msg, MSG_VCONN_SWAP) &&
+ pd->spec_rev == USBPD_REV_20) {
/*
* if VCONN is connected to VBUS, make sure we are
* not in high voltage contract, otherwise reject.
@@ -2131,6 +2415,120 @@ static void usbpd_sm(struct work_struct *w)
vconn_swap(pd);
} else if (IS_DATA(rx_msg, MSG_VDM)) {
handle_vdm_rx(pd, rx_msg);
+ } else if (pd->send_get_src_cap_ext && is_sink_tx_ok(pd)) {
+ pd->send_get_src_cap_ext = false;
+ ret = pd_send_msg(pd, MSG_GET_SOURCE_CAP_EXTENDED, NULL,
+ 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_src_cap_ext\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_SOURCE_CAPABILITIES_EXTENDED)) {
+ if (rx_msg->data_len != PD_SRC_CAP_EXT_DB_LEN) {
+ usbpd_err(&pd->dev, "Invalid src cap ext db\n");
+ break;
+ }
+ memcpy(&pd->src_cap_ext_db, rx_msg->payload,
+ sizeof(pd->src_cap_ext_db));
+ complete(&pd->is_ready);
+ } else if (pd->send_get_pps_status && is_sink_tx_ok(pd)) {
+ pd->send_get_pps_status = false;
+ ret = pd_send_msg(pd, MSG_GET_PPS_STATUS, NULL,
+ 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_pps_status\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_PPS_STATUS)) {
+ if (rx_msg->data_len != sizeof(pd->pps_status_db)) {
+ usbpd_err(&pd->dev, "Invalid pps status db\n");
+ break;
+ }
+ memcpy(&pd->pps_status_db, rx_msg->payload,
+ sizeof(pd->pps_status_db));
+ complete(&pd->is_ready);
+ } else if (IS_DATA(rx_msg, MSG_ALERT)) {
+ if (rx_msg->data_len != sizeof(pd->received_ado)) {
+ usbpd_err(&pd->dev, "Invalid ado\n");
+ break;
+ }
+ memcpy(&pd->received_ado, rx_msg->payload,
+ sizeof(pd->received_ado));
+ ret = pd_send_msg(pd, MSG_GET_STATUS, NULL,
+ 0, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_status\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_STATUS)) {
+ if (rx_msg->data_len != PD_STATUS_DB_LEN) {
+ usbpd_err(&pd->dev, "Invalid status db\n");
+ break;
+ }
+ memcpy(&pd->status_db, rx_msg->payload,
+ sizeof(pd->status_db));
+ kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
+ } else if (pd->send_get_battery_cap && is_sink_tx_ok(pd)) {
+ pd->send_get_battery_cap = false;
+ ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_CAP,
+ &pd->get_battery_cap_db, 1, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_battery_cap\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_BATTERY_CAPABILITIES)) {
+ if (rx_msg->data_len != PD_BATTERY_CAP_DB_LEN) {
+ usbpd_err(&pd->dev, "Invalid battery cap db\n");
+ break;
+ }
+ memcpy(&pd->battery_cap_db, rx_msg->payload,
+ sizeof(pd->battery_cap_db));
+ complete(&pd->is_ready);
+ } else if (pd->send_get_battery_status && is_sink_tx_ok(pd)) {
+ pd->send_get_battery_status = false;
+ ret = pd_send_ext_msg(pd, MSG_GET_BATTERY_STATUS,
+ &pd->get_battery_status_db, 1, SOP_MSG);
+ if (ret) {
+ dev_err(&pd->dev,
+ "Error sending get_battery_status\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ break;
+ }
+ kick_sm(pd, SENDER_RESPONSE_TIME);
+ } else if (rx_msg &&
+ IS_EXT(rx_msg, MSG_BATTERY_STATUS)) {
+ if (rx_msg->data_len != sizeof(pd->battery_sts_dobj)) {
+ usbpd_err(&pd->dev, "Invalid bat sts dobj\n");
+ break;
+ }
+ memcpy(&pd->battery_sts_dobj, rx_msg->payload,
+ sizeof(pd->battery_sts_dobj));
+ complete(&pd->is_ready);
+ } else if (rx_msg && pd->spec_rev == USBPD_REV_30) {
+ /* unhandled messages */
+ ret = pd_send_msg(pd, MSG_NOT_SUPPORTED, NULL, 0,
+ SOP_MSG);
+ if (ret) {
+ usbpd_err(&pd->dev, "Error sending Not supported\n");
+ usbpd_set_state(pd, PE_SNK_SEND_SOFT_RESET);
+ }
+ break;
} else if (pd->send_request) {
pd->send_request = false;
usbpd_set_state(pd, PE_SNK_SELECT_CAPABILITY);
@@ -2779,6 +3177,10 @@ static int usbpd_uevent(struct device *dev, struct kobj_uevent_env *env)
"explicit" : "implicit");
add_uevent_var(env, "ALT_MODE=%d", pd->vdm_state == MODE_ENTERED);
+ add_uevent_var(env, "ADO=%08x", pd->received_ado);
+ for (i = 0; i < PD_STATUS_DB_LEN; i++)
+ add_uevent_var(env, "SDB%d=%08x", i, pd->status_db[i]);
+
return 0;
}
@@ -3126,6 +3528,145 @@ static ssize_t hard_reset_store(struct device *dev,
}
static DEVICE_ATTR_WO(hard_reset);
+static int trigger_tx_msg(struct usbpd *pd, bool *msg_tx_flag)
+{
+ int ret = 0;
+
+ /* Only allowed if we are already in explicit sink contract */
+ if (pd->current_state != PE_SNK_READY || !is_sink_tx_ok(pd)) {
+ usbpd_err(&pd->dev, "%s: Cannot send msg\n", __func__);
+ ret = -EBUSY;
+ goto out;
+ }
+
+ reinit_completion(&pd->is_ready);
+ *msg_tx_flag = true;
+ kick_sm(pd, 0);
+
+ /* wait for operation to complete */
+ if (!wait_for_completion_timeout(&pd->is_ready,
+ msecs_to_jiffies(1000))) {
+ usbpd_err(&pd->dev, "%s: request timed out\n", __func__);
+ ret = -ETIMEDOUT;
+ }
+
+out:
+ *msg_tx_flag = false;
+ return ret;
+
+}
+
+static ssize_t get_src_cap_ext_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, ret, len = 0;
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->spec_rev == USBPD_REV_20)
+ return -EINVAL;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_src_cap_ext);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < PD_SRC_CAP_EXT_DB_LEN; i++)
+ len += snprintf(buf + len, PAGE_SIZE - len, "%d\n",
+ pd->src_cap_ext_db[i]);
+ return len;
+}
+static DEVICE_ATTR_RO(get_src_cap_ext);
+
+static ssize_t get_pps_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int ret;
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->spec_rev == USBPD_REV_20)
+ return -EINVAL;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_pps_status);
+ if (ret)
+ return ret;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pd->pps_status_db);
+}
+static DEVICE_ATTR_RO(get_pps_status);
+
+static ssize_t rx_ado_show(struct device *dev, struct device_attribute *attr,
+ char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ /* dump the ADO as a hex string */
+ return snprintf(buf, PAGE_SIZE, "%08x\n", pd->received_ado);
+}
+static DEVICE_ATTR_RO(rx_ado);
+
+static ssize_t get_battery_cap_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ int val, ret;
+
+ if (pd->spec_rev == USBPD_REV_20 || sscanf(buf, "%d\n", &val) != 1) {
+ pd->get_battery_cap_db = -EINVAL;
+ return -EINVAL;
+ }
+
+ pd->get_battery_cap_db = val;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_battery_cap);
+
+ return ret ? ret : size;
+}
+
+static ssize_t get_battery_cap_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ int i, len = 0;
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->get_battery_cap_db == -EINVAL)
+ return -EINVAL;
+
+ for (i = 0; i < PD_BATTERY_CAP_DB_LEN; i++)
+ len += snprintf(buf + len, PAGE_SIZE - len, "%d\n",
+ pd->battery_cap_db[i]);
+ return len;
+}
+static DEVICE_ATTR_RW(get_battery_cap);
+
+static ssize_t get_battery_status_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+ int val, ret;
+
+ if (pd->spec_rev == USBPD_REV_20 || sscanf(buf, "%d\n", &val) != 1) {
+ pd->get_battery_status_db = -EINVAL;
+ return -EINVAL;
+ }
+
+ pd->get_battery_status_db = val;
+
+ ret = trigger_tx_msg(pd, &pd->send_get_battery_status);
+
+ return ret ? ret : size;
+}
+
+static ssize_t get_battery_status_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ struct usbpd *pd = dev_get_drvdata(dev);
+
+ if (pd->get_battery_status_db == -EINVAL)
+ return -EINVAL;
+
+ return snprintf(buf, PAGE_SIZE, "%d\n", pd->battery_sts_dobj);
+}
+static DEVICE_ATTR_RW(get_battery_status);
+
static struct attribute *usbpd_attrs[] = {
&dev_attr_contract.attr,
&dev_attr_initial_pr.attr,
@@ -3145,6 +3686,11 @@ static struct attribute *usbpd_attrs[] = {
&dev_attr_rdo.attr,
&dev_attr_rdo_h.attr,
&dev_attr_hard_reset.attr,
+ &dev_attr_get_src_cap_ext.attr,
+ &dev_attr_get_pps_status.attr,
+ &dev_attr_rx_ado.attr,
+ &dev_attr_get_battery_cap.attr,
+ &dev_attr_get_battery_status.attr,
NULL,
};
ATTRIBUTE_GROUPS(usbpd);
@@ -3375,6 +3921,7 @@ struct usbpd *usbpd_create(struct device *parent)
INIT_LIST_HEAD(&pd->rx_q);
INIT_LIST_HEAD(&pd->svid_handlers);
init_completion(&pd->is_ready);
+ init_completion(&pd->tx_chunk_request);
pd->psy_nb.notifier_call = psy_changed;
ret = power_supply_reg_notifier(&pd->psy_nb);
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index 4c6a5e73406b..bc325a91a9bf 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -68,6 +68,7 @@ static int mdss_dp_process_phy_test_pattern_request(
struct mdss_dp_drv_pdata *dp);
static int mdss_dp_send_audio_notification(
struct mdss_dp_drv_pdata *dp, int val);
+static void mdss_dp_reset_sw_state(struct mdss_dp_drv_pdata *dp);
static inline void mdss_dp_reset_sink_count(struct mdss_dp_drv_pdata *dp)
{
@@ -1489,7 +1490,12 @@ static int mdss_dp_setup_main_link(struct mdss_dp_drv_pdata *dp, bool train)
pr_debug("enter\n");
mdss_dp_mainlink_ctrl(&dp->ctrl_io, true);
- mdss_dp_aux_set_sink_power_state(dp, SINK_POWER_ON);
+ ret = mdss_dp_aux_send_psm_request(dp, false);
+ if (ret) {
+ pr_err("Failed to exit low power mode, rc=%d\n", ret);
+ goto end;
+ }
+
reinit_completion(&dp->video_comp);
if (mdss_dp_is_phy_test_pattern_requested(dp))
@@ -1576,15 +1582,6 @@ static int mdss_dp_on_irq(struct mdss_dp_drv_pdata *dp_drv, bool lt_needed)
dp_drv->power_on = true;
- if (dp_drv->psm_enabled) {
- ret = mdss_dp_aux_send_psm_request(dp_drv, false);
- if (ret) {
- pr_err("Failed to exit low power mode, rc=%d\n",
- ret);
- goto exit_loop;
- }
- }
-
ret = mdss_dp_setup_main_link(dp_drv, lt_needed);
exit_loop:
@@ -1653,15 +1650,6 @@ int mdss_dp_on_hpd(struct mdss_dp_drv_pdata *dp_drv)
mdss_dp_configure_source_params(dp_drv, ln_map);
- if (dp_drv->psm_enabled) {
- ret = mdss_dp_aux_send_psm_request(dp_drv, false);
- if (ret) {
- pr_err("Failed to exit low power mode, rc=%d\n", ret);
- goto exit;
- }
- }
-
-
link_training:
dp_drv->power_on = true;
@@ -2989,6 +2977,7 @@ static int mdss_dp_sysfs_create(struct mdss_dp_drv_pdata *dp,
static void mdss_dp_mainlink_push_idle(struct mdss_panel_data *pdata)
{
+ bool cable_connected;
struct mdss_dp_drv_pdata *dp_drv = NULL;
const int idle_pattern_completion_timeout_ms = 3 * HZ / 100;
@@ -3009,6 +2998,14 @@ static void mdss_dp_mainlink_push_idle(struct mdss_panel_data *pdata)
return;
}
+ /* power down the sink if cable is still connected */
+ mutex_lock(&dp_drv->attention_lock);
+ cable_connected = dp_drv->cable_connected;
+ mutex_unlock(&dp_drv->attention_lock);
+ if (cable_connected && dp_drv->alt_mode.dp_status.hpd_high) {
+ if (mdss_dp_aux_send_psm_request(dp_drv, true))
+ pr_err("Failed to enter low power mode\n");
+ }
reinit_completion(&dp_drv->idle_comp);
mdss_dp_state_ctrl(&dp_drv->ctrl_io, ST_PUSH_IDLE);
if (!wait_for_completion_timeout(&dp_drv->idle_comp,
@@ -3129,6 +3126,10 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
pr_err("DP Controller not powered on\n");
break;
}
+ if (!atomic_read(&dp->notification_pending)) {
+ pr_debug("blank when cable is connected\n");
+ kthread_park(dp->ev_thread);
+ }
if (dp_is_hdcp_enabled(dp)) {
dp->hdcp_status = HDCP_STATE_INACTIVE;
@@ -3168,8 +3169,10 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
* when you connect DP sink while the
* device is in suspend state.
*/
- if ((!dp->power_on) && (dp->dp_initialized))
+ if ((!dp->power_on) && (dp->dp_initialized)) {
rc = mdss_dp_host_deinit(dp);
+ kthread_park(dp->ev_thread);
+ }
/*
* For DP suspend/resume use case, CHECK_PARAMS is
@@ -3181,8 +3184,11 @@ static int mdss_dp_event_handler(struct mdss_panel_data *pdata,
dp->suspend_vic = dp->vic;
break;
case MDSS_EVENT_RESUME:
- if (dp->suspend_vic != HDMI_VFRMT_UNKNOWN)
+ if (dp->suspend_vic != HDMI_VFRMT_UNKNOWN) {
dp_init_panel_info(dp, dp->suspend_vic);
+ mdss_dp_reset_sw_state(dp);
+ kthread_unpark(dp->ev_thread);
+ }
break;
default:
pr_debug("unhandled event=%d\n", event);
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index afa8e3db590f..983f5e34a515 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -218,10 +218,6 @@ struct dp_alt_mode {
#define ST_SEND_VIDEO BIT(7)
#define ST_PUSH_IDLE BIT(8)
-/* sink power state */
-#define SINK_POWER_ON 1
-#define SINK_POWER_OFF 2
-
#define DP_LINK_RATE_162 6 /* 1.62G = 270M * 6 */
#define DP_LINK_RATE_270 10 /* 2.70G = 270M * 10 */
#define DP_LINK_RATE_540 20 /* 5.40G = 270M * 20 */
@@ -1181,11 +1177,9 @@ void dp_aux_native_handler(struct mdss_dp_drv_pdata *dp, u32 isr);
void mdss_dp_aux_init(struct mdss_dp_drv_pdata *ep);
void mdss_dp_fill_link_cfg(struct mdss_dp_drv_pdata *ep);
-void mdss_dp_sink_power_down(struct mdss_dp_drv_pdata *ep);
void mdss_dp_lane_power_ctrl(struct mdss_dp_drv_pdata *ep, int up);
void mdss_dp_config_ctrl(struct mdss_dp_drv_pdata *ep);
char mdss_dp_gen_link_clk(struct mdss_dp_drv_pdata *dp);
-int mdss_dp_aux_set_sink_power_state(struct mdss_dp_drv_pdata *ep, char state);
int mdss_dp_aux_send_psm_request(struct mdss_dp_drv_pdata *dp, bool enable);
void mdss_dp_aux_send_test_response(struct mdss_dp_drv_pdata *ep);
void *mdss_dp_get_hdcp_data(struct device *dev);
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index c0632e8241a0..86946adfeeb0 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -2556,15 +2556,6 @@ static int dp_link_rate_down_shift(struct mdss_dp_drv_pdata *ep)
return ret;
}
-int mdss_dp_aux_set_sink_power_state(struct mdss_dp_drv_pdata *ep, char state)
-{
- int ret;
-
- ret = dp_aux_write_buf(ep, 0x600, &state, 1, 0);
- pr_debug("state=%d ret=%d\n", state, ret);
- return ret;
-}
-
static void dp_clear_training_pattern(struct mdss_dp_drv_pdata *ep)
{
int usleep_time;
diff --git a/drivers/video/fbdev/msm/mdss_dsi.c b/drivers/video/fbdev/msm/mdss_dsi.c
index 5f7e7c6bcde0..7b6153503af5 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.c
+++ b/drivers/video/fbdev/msm/mdss_dsi.c
@@ -968,7 +968,7 @@ static int mdss_dsi_cmd_flush(struct file *file, fl_owner_t id)
while (len >= sizeof(*dchdr)) {
dchdr = (struct dsi_ctrl_hdr *)bp;
dchdr->dlen = ntohs(dchdr->dlen);
- if (dchdr->dlen > len) {
+ if (dchdr->dlen > len || dchdr->dlen < 0) {
pr_err("%s: dtsi cmd=%x error, len=%d\n",
__func__, dchdr->dtype, dchdr->dlen);
kfree(buf);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_host.c b/drivers/video/fbdev/msm/mdss_dsi_host.c
index c766ff983045..fca1d37b40bb 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_host.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_host.c
@@ -1512,6 +1512,34 @@ static int mdss_dsi_wait4video_eng_busy(struct mdss_dsi_ctrl_pdata *ctrl)
return ret;
}
+static void mdss_dsi_wait4active_region(struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ int in_blanking = 0;
+ int retry_count = 0;
+
+ if (ctrl->panel_mode != DSI_VIDEO_MODE)
+ return;
+
+ while (retry_count != MAX_BTA_WAIT_RETRY) {
+ mdss_dsi_wait4video_eng_busy(ctrl);
+ in_blanking = ctrl->mdp_callback->fxn(
+ ctrl->mdp_callback->data,
+ MDP_INTF_CALLBACK_CHECK_LINE_COUNT);
+
+ if (in_blanking) {
+ pr_debug("%s: not in active region\n", __func__);
+ retry_count++;
+ } else
+ break;
+ };
+
+ if (retry_count == MAX_BTA_WAIT_RETRY)
+ MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl",
+ "dsi0_phy", "dsi1_ctrl", "dsi1_phy",
+ "vbif", "vbif_nrt", "dbg_bus",
+ "vbif_dbg_bus", "dsi_dbg_bus", "panic");
+}
+
/**
* mdss_dsi_bta_status_check() - Check dsi panel status through bta check
* @ctrl_pdata: pointer to the dsi controller structure
@@ -1527,8 +1555,6 @@ int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
int ret = 0;
unsigned long flag;
int ignore_underflow = 0;
- int retry_count = 0;
- int in_blanking = 0;
if (ctrl_pdata == NULL) {
pr_err("%s: Invalid input data\n", __func__);
@@ -1554,24 +1580,8 @@ int mdss_dsi_bta_status_check(struct mdss_dsi_ctrl_pdata *ctrl_pdata)
reinit_completion(&ctrl_pdata->bta_comp);
mdss_dsi_enable_irq(ctrl_pdata, DSI_BTA_TERM);
spin_unlock_irqrestore(&ctrl_pdata->mdp_lock, flag);
-wait:
- mdss_dsi_wait4video_eng_busy(ctrl_pdata);
- if (ctrl_pdata->panel_mode == DSI_VIDEO_MODE) {
- in_blanking = ctrl_pdata->mdp_callback->fxn(
- ctrl_pdata->mdp_callback->data,
- MDP_INTF_CALLBACK_CHECK_LINE_COUNT);
- /* Try for maximum of 5 attempts */
- if (in_blanking && (retry_count < MAX_BTA_WAIT_RETRY)) {
- pr_debug("%s: not in active region\n", __func__);
- retry_count++;
- goto wait;
- }
- }
- if (retry_count == MAX_BTA_WAIT_RETRY)
- MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl",
- "dsi0_phy", "dsi1_ctrl", "dsi1_phy",
- "vbif", "vbif_nrt", "dbg_bus",
- "vbif_dbg_bus", "dsi_dbg_bus", "panic");
+
+ mdss_dsi_wait4active_region(ctrl_pdata);
/* mask out overflow errors */
if (ignore_underflow)
@@ -1991,7 +2001,7 @@ do_send:
goto end;
}
- mdss_dsi_wait4video_eng_busy(ctrl);
+ mdss_dsi_wait4active_region(ctrl);
mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
if (use_dma_tpg)
@@ -2029,7 +2039,7 @@ skip_max_pkt_size:
wmb(); /* make sure the RDBK registers are cleared */
}
- mdss_dsi_wait4video_eng_busy(ctrl); /* video mode only */
+ mdss_dsi_wait4active_region(ctrl);
mdss_dsi_enable_irq(ctrl, DSI_CMD_TERM);
/* transmit read comamnd to client */
if (use_dma_tpg)
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.c b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
index af95a4a6dccd..a5a407708334 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.c
@@ -113,6 +113,7 @@ static void hdmi_tx_fps_work(struct work_struct *work);
static int hdmi_tx_pinctrl_set_state(struct hdmi_tx_ctrl *hdmi_ctrl,
enum hdmi_tx_power_module_type module, bool active);
static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *hdmi_ctrl);
+static void hdmi_panel_clear_hdr_infoframe(struct hdmi_tx_ctrl *hdmi_ctrl);
static int hdmi_tx_audio_info_setup(struct platform_device *pdev,
struct msm_ext_disp_audio_setup_params *params);
static int hdmi_tx_get_audio_edid_blk(struct platform_device *pdev,
@@ -1276,6 +1277,7 @@ static ssize_t hdmi_tx_sysfs_wta_hdr_stream(struct device *dev,
{
int ret = 0;
struct hdmi_tx_ctrl *ctrl = NULL;
+ u8 hdr_op;
ctrl = hdmi_tx_get_drvdata_from_sysfs_dev(dev);
if (!ctrl) {
@@ -1296,36 +1298,43 @@ static ssize_t hdmi_tx_sysfs_wta_hdr_stream(struct device *dev,
goto end;
}
- memcpy(&ctrl->hdr_data, buf, sizeof(struct mdp_hdr_stream));
+ memcpy(&ctrl->hdr_ctrl, buf, sizeof(struct mdp_hdr_stream_ctrl));
pr_debug("%s: 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
__func__,
- ctrl->hdr_data.eotf,
- ctrl->hdr_data.display_primaries_x[0],
- ctrl->hdr_data.display_primaries_y[0],
- ctrl->hdr_data.display_primaries_x[1],
- ctrl->hdr_data.display_primaries_y[1],
- ctrl->hdr_data.display_primaries_x[2],
- ctrl->hdr_data.display_primaries_y[2]);
+ ctrl->hdr_ctrl.hdr_stream.eotf,
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_x[0],
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_y[0],
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_x[1],
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_y[1],
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_x[2],
+ ctrl->hdr_ctrl.hdr_stream.display_primaries_y[2]);
pr_debug("%s: 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
__func__,
- ctrl->hdr_data.white_point_x,
- ctrl->hdr_data.white_point_y,
- ctrl->hdr_data.max_luminance,
- ctrl->hdr_data.min_luminance,
- ctrl->hdr_data.max_content_light_level,
- ctrl->hdr_data.max_average_light_level);
+ ctrl->hdr_ctrl.hdr_stream.white_point_x,
+ ctrl->hdr_ctrl.hdr_stream.white_point_y,
+ ctrl->hdr_ctrl.hdr_stream.max_luminance,
+ ctrl->hdr_ctrl.hdr_stream.min_luminance,
+ ctrl->hdr_ctrl.hdr_stream.max_content_light_level,
+ ctrl->hdr_ctrl.hdr_stream.max_average_light_level);
pr_debug("%s: 0x%04x 0x%04x 0x%04x 0x%04x 0x%04x\n",
__func__,
- ctrl->hdr_data.pixel_encoding,
- ctrl->hdr_data.colorimetry,
- ctrl->hdr_data.range,
- ctrl->hdr_data.bits_per_component,
- ctrl->hdr_data.content_type);
+ ctrl->hdr_ctrl.hdr_stream.pixel_encoding,
+ ctrl->hdr_ctrl.hdr_stream.colorimetry,
+ ctrl->hdr_ctrl.hdr_stream.range,
+ ctrl->hdr_ctrl.hdr_stream.bits_per_component,
+ ctrl->hdr_ctrl.hdr_stream.content_type);
+ hdr_op = hdmi_hdr_get_ops(ctrl->curr_hdr_state,
+ ctrl->hdr_ctrl.hdr_state);
- hdmi_panel_set_hdr_infoframe(ctrl);
+ if (hdr_op == HDR_SEND_INFO)
+ hdmi_panel_set_hdr_infoframe(ctrl);
+ else if (hdr_op == HDR_CLEAR_INFO)
+ hdmi_panel_clear_hdr_infoframe(ctrl);
+
+ ctrl->curr_hdr_state = ctrl->hdr_ctrl.hdr_state;
ret = strnlen(buf, PAGE_SIZE);
end:
@@ -2113,6 +2122,8 @@ static int hdmi_tx_init_features(struct hdmi_tx_ctrl *hdmi_ctrl,
goto err;
}
+ /* reset HDR state */
+ hdmi_ctrl->curr_hdr_state = HDR_DISABLE;
return 0;
err:
hdmi_tx_deinit_features(hdmi_ctrl, deinit_features);
@@ -2878,11 +2889,12 @@ static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *ctrl)
packet_header = type_code | (version << 8) | (length << 16);
DSS_REG_W(io, HDMI_GENERIC0_HDR, packet_header);
- packet_payload = (ctrl->hdr_data.eotf << 8);
+ packet_payload = (ctrl->hdr_ctrl.hdr_stream.eotf << 8);
if (hdmi_tx_metadata_type_one(ctrl)) {
- packet_payload |= (descriptor_id << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_x[0])
- << 24);
+ packet_payload |=
+ (descriptor_id << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_x[0]) << 24);
DSS_REG_W(io, HDMI_GENERIC0_0, packet_payload);
} else {
pr_debug("%s: Metadata Type 1 not supported\n", __func__);
@@ -2891,44 +2903,56 @@ static void hdmi_panel_set_hdr_infoframe(struct hdmi_tx_ctrl *ctrl)
}
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_x[0]))
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_y[0]) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_y[0]) << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_x[1]) << 24);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.display_primaries_x[0]))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[0]) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[0]) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_x[1]) << 24);
DSS_REG_W(io, HDMI_GENERIC0_1, packet_payload);
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_x[1]))
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_y[1]) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_y[1]) << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_x[2]) << 24);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.display_primaries_x[1]))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[1]) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[1]) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_x[2]) << 24);
DSS_REG_W(io, HDMI_GENERIC0_2, packet_payload);
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_x[2]))
- | (HDMI_GET_LSB(ctrl->hdr_data.display_primaries_y[2]) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.display_primaries_y[2]) << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.white_point_x) << 24);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.display_primaries_x[2]))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[2]) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.
+ display_primaries_y[2]) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.white_point_x) << 24);
DSS_REG_W(io, HDMI_GENERIC0_3, packet_payload);
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.white_point_x))
- | (HDMI_GET_LSB(ctrl->hdr_data.white_point_y) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.white_point_y) << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.max_luminance) << 24);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.white_point_x))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.white_point_y) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.white_point_y) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.max_luminance) << 24);
DSS_REG_W(io, HDMI_GENERIC0_4, packet_payload);
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.max_luminance))
- | (HDMI_GET_LSB(ctrl->hdr_data.min_luminance) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.min_luminance) << 16)
- | (HDMI_GET_LSB(ctrl->hdr_data.max_content_light_level) << 24);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.max_luminance))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.min_luminance) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.min_luminance) << 16)
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ max_content_light_level) << 24);
DSS_REG_W(io, HDMI_GENERIC0_5, packet_payload);
packet_payload =
- (HDMI_GET_MSB(ctrl->hdr_data.max_content_light_level))
- | (HDMI_GET_LSB(ctrl->hdr_data.max_average_light_level) << 8)
- | (HDMI_GET_MSB(ctrl->hdr_data.max_average_light_level) << 16);
+ (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.
+ max_content_light_level))
+ | (HDMI_GET_LSB(ctrl->hdr_ctrl.hdr_stream.
+ max_average_light_level) << 8)
+ | (HDMI_GET_MSB(ctrl->hdr_ctrl.hdr_stream.
+ max_average_light_level) << 16);
DSS_REG_W(io, HDMI_GENERIC0_6, packet_payload);
enable_packet_control:
@@ -2943,6 +2967,32 @@ enable_packet_control:
DSS_REG_W(io, HDMI_GEN_PKT_CTRL, packet_control);
}
+static void hdmi_panel_clear_hdr_infoframe(struct hdmi_tx_ctrl *ctrl)
+{
+ u32 packet_control = 0;
+ struct dss_io_data *io = NULL;
+
+ if (!ctrl) {
+ pr_err("%s: invalid input\n", __func__);
+ return;
+ }
+
+ if (!hdmi_tx_is_hdr_supported(ctrl)) {
+ pr_err("%s: Sink does not support HDR\n", __func__);
+ return;
+ }
+
+ io = &ctrl->pdata.io[HDMI_TX_CORE_IO];
+ if (!io->base) {
+ pr_err("%s: core io not inititalized\n", __func__);
+ return;
+ }
+
+ packet_control = DSS_REG_R_ND(io, HDMI_GEN_PKT_CTRL);
+ packet_control &= ~HDMI_GEN_PKT_CTRL_CLR_MASK;
+ DSS_REG_W(io, HDMI_GEN_PKT_CTRL, packet_control);
+}
+
static int hdmi_tx_audio_info_setup(struct platform_device *pdev,
struct msm_ext_disp_audio_setup_params *params)
{
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_tx.h b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
index 3469b8a5819f..ad02003631f6 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_tx.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_tx.h
@@ -21,6 +21,7 @@
#include "mdss_hdmi_audio.h"
#define MAX_SWITCH_NAME_SIZE 5
+#define HDMI_GEN_PKT_CTRL_CLR_MASK 0x7
enum hdmi_tx_io_type {
HDMI_TX_CORE_IO,
@@ -90,7 +91,7 @@ struct hdmi_tx_ctrl {
struct msm_ext_disp_audio_setup_params audio_params;
struct msm_ext_disp_init_data ext_audio_data;
struct work_struct fps_work;
- struct mdp_hdr_stream hdr_data;
+ struct mdp_hdr_stream_ctrl hdr_ctrl;
spinlock_t hpd_state_lock;
@@ -116,6 +117,7 @@ struct hdmi_tx_ctrl {
u8 hdcp_status;
u8 spd_vendor_name[9];
u8 spd_product_description[17];
+ u8 curr_hdr_state;
bool hdcp_feature_on;
bool hpd_disabled;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.c b/drivers/video/fbdev/msm/mdss_hdmi_util.c
index 827013d06412..5bc46d8c8f92 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.c
@@ -16,6 +16,7 @@
#include <linux/io.h>
#include <linux/delay.h>
#include <linux/msm_mdp.h>
+#include <linux/msm_mdp_ext.h>
#include "mdss_hdmi_util.h"
#define RESOLUTION_NAME_STR_LEN 30
@@ -1811,3 +1812,51 @@ int hdmi_hdcp2p2_ddc_read_rxstatus(struct hdmi_tx_ddc_ctrl *ctrl)
return rc;
}
+
+u8 hdmi_hdr_get_ops(u8 curr_state, u8 new_state)
+{
+
+ /** There could be 3 valid state transitions:
+ * 1. HDR_DISABLE -> HDR_ENABLE
+ *
+ * In this transition, we shall start sending
+ * HDR metadata with metadata from the HDR clip
+ *
+ * 2. HDR_ENABLE -> HDR_RESET
+ *
+ * In this transition, we will keep sending
+ * HDR metadata but with EOTF and metadata as 0
+ *
+ * 3. HDR_RESET -> HDR_ENABLE
+ *
+ * In this transition, we will start sending
+ * HDR metadata with metadata from the HDR clip
+ *
+ * 4. HDR_RESET -> HDR_DISABLE
+ *
+ * In this transition, we will stop sending
+ * metadata to the sink and clear PKT_CTRL register
+ * bits.
+ */
+
+ if ((curr_state == HDR_DISABLE)
+ && (new_state == HDR_ENABLE)) {
+ pr_debug("State changed HDR_DISABLE ---> HDR_ENABLE\n");
+ return HDR_SEND_INFO;
+ } else if ((curr_state == HDR_ENABLE)
+ && (new_state == HDR_RESET)) {
+ pr_debug("State changed HDR_ENABLE ---> HDR_RESET\n");
+ return HDR_SEND_INFO;
+ } else if ((curr_state == HDR_RESET)
+ && (new_state == HDR_ENABLE)) {
+ pr_debug("State changed HDR_RESET ---> HDR_ENABLE\n");
+ return HDR_SEND_INFO;
+ } else if ((curr_state == HDR_RESET)
+ && (new_state == HDR_DISABLE)) {
+ pr_debug("State changed HDR_RESET ---> HDR_DISABLE\n");
+ return HDR_CLEAR_INFO;
+ }
+
+ pr_debug("Unsupported OR no state change\n");
+ return HDR_UNSUPPORTED_OP;
+}
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.h b/drivers/video/fbdev/msm/mdss_hdmi_util.h
index 4fd659616bcc..fe554f8e9e67 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -425,6 +425,12 @@ enum hdmi_tx_hdcp2p2_rxstatus_intr_mask {
RXSTATUS_REAUTH_REQ = BIT(14),
};
+enum hdmi_hdr_op {
+ HDR_UNSUPPORTED_OP,
+ HDR_SEND_INFO,
+ HDR_CLEAR_INFO
+};
+
struct hdmi_tx_hdcp2p2_ddc_data {
enum hdmi_tx_hdcp2p2_rxstatus_intr_mask intr_mask;
u32 timeout_ms;
@@ -518,5 +524,5 @@ void hdmi_hdcp2p2_ddc_disable(struct hdmi_tx_ddc_ctrl *ctrl);
int hdmi_hdcp2p2_ddc_read_rxstatus(struct hdmi_tx_ddc_ctrl *ctrl);
int hdmi_utils_get_timeout_in_hysnc(struct msm_hdmi_mode_timing_info *timing,
u32 timeout_ms);
-
+u8 hdmi_hdr_get_ops(u8 curr_state, u8 new_state);
#endif /* __HDMI_UTIL_H__ */
diff --git a/include/linux/mmc/core.h b/include/linux/mmc/core.h
index 0065ffc9322b..08b3b8348fd7 100644
--- a/include/linux/mmc/core.h
+++ b/include/linux/mmc/core.h
@@ -170,6 +170,7 @@ extern int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
extern int mmc_get_ext_csd(struct mmc_card *card, u8 **new_ext_csd);
extern int mmc_set_auto_bkops(struct mmc_card *card, bool enable);
extern int mmc_suspend_clk_scaling(struct mmc_host *host);
+extern void mmc_flush_detect_work(struct mmc_host *);
#define MMC_ERASE_ARG 0x00000000
#define MMC_SECURE_ERASE_ARG 0x80000000
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index aea4c0f2ef5f..65a188eeeeb6 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -519,6 +519,7 @@ struct mmc_host {
unsigned int bus_resume_flags;
#define MMC_BUSRESUME_MANUAL_RESUME (1 << 0)
#define MMC_BUSRESUME_NEEDS_RESUME (1 << 1)
+ bool ignore_bus_resume_flags;
unsigned int sdio_irqs;
struct task_struct *sdio_irq_thread;
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 60d15a080d7c..9d3eda39bcd2 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -37,7 +37,7 @@ void rcu_cpu_stall_reset(void);
/*
* Note a virtualization-based context switch. This is simply a
* wrapper around rcu_note_context_switch(), which allows TINY_RCU
- * to save a few bytes.
+ * to save a few bytes. The caller must have disabled interrupts.
*/
static inline void rcu_virt_note_context_switch(int cpu)
{
diff --git a/include/net/cnss2.h b/include/net/cnss2.h
index 5409e1b15a25..f1d321299492 100644
--- a/include/net/cnss2.h
+++ b/include/net/cnss2.h
@@ -153,14 +153,8 @@ extern int cnss_get_platform_cap(struct cnss_platform_cap *cap);
extern int cnss_get_soc_info(struct device *dev, struct cnss_soc_info *info);
extern void cnss_set_driver_status(enum cnss_driver_status driver_status);
extern int cnss_request_bus_bandwidth(int bandwidth);
-extern int cnss_set_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 ch_count);
-extern int cnss_get_wlan_unsafe_channel(u16 *unsafe_ch_list, u16 *ch_count,
- u16 buf_len);
-extern int cnss_wlan_set_dfs_nol(const void *info, u16 info_len);
-extern int cnss_wlan_get_dfs_nol(void *info, u16 info_len);
extern int cnss_power_up(struct device *dev);
extern int cnss_power_down(struct device *dev);
-extern u8 *cnss_common_get_wlan_mac_address(struct device *dev, uint32_t *num);
extern void cnss_request_pm_qos(u32 qos_val);
extern void cnss_remove_pm_qos(void);
extern void cnss_lock_pm_sem(void);
diff --git a/include/net/cnss_nl.h b/include/net/cnss_nl.h
index 86c2fccc930e..b8a7cfdb7966 100644
--- a/include/net/cnss_nl.h
+++ b/include/net/cnss_nl.h
@@ -23,12 +23,16 @@
* @CLD80211_ATTR_VENDOR_DATA: Embed all other attributes in this nested
* attribute.
* @CLD80211_ATTR_DATA: Embed complete data in this attribute
+ * @CLD80211_ATTR_META_DATA: Embed meta data for above data. This will help
+ * wlan driver to peek into request message packet without opening up definition
+ * of complete request message.
*
* Any new message in future can be added as another attribute
*/
enum cld80211_attr {
CLD80211_ATTR_VENDOR_DATA = 1,
CLD80211_ATTR_DATA,
+ CLD80211_ATTR_META_DATA,
/* add new attributes above here */
__CLD80211_ATTR_AFTER_LAST,
diff --git a/include/uapi/linux/msm_ipa.h b/include/uapi/linux/msm_ipa.h
index 322fb09b8614..0bdfc9741d19 100644
--- a/include/uapi/linux/msm_ipa.h
+++ b/include/uapi/linux/msm_ipa.h
@@ -128,6 +128,17 @@
#define IPA_WAN_MSG_IPv6_ADDR_GW_LEN 4
/**
+ * max number of lan clients supported per device type
+ * for LAN stats via HW.
+ */
+#define IPA_MAX_NUM_HW_PATH_CLIENTS 16
+
+/**
+ * max number of destination pipes possible for a client.
+ */
+#define QMI_IPA_MAX_CLIENT_DST_PIPES 4
+
+/**
* the attributes of the rule (routing or filtering)
*/
#define IPA_FLT_TOS (1ul << 0)
@@ -447,7 +458,14 @@ enum ipa_vlan_l2tp_event {
IPA_VLAN_L2TP_EVENT_MAX,
};
-#define IPA_EVENT_MAX_NUM (IPA_VLAN_L2TP_EVENT_MAX)
+enum ipa_per_client_stats_event {
+ IPA_PER_CLIENT_STATS_CONNECT_EVENT = IPA_VLAN_L2TP_EVENT_MAX,
+ IPA_PER_CLIENT_STATS_DISCONNECT_EVENT,
+ IPA_PER_CLIENT_STATS_EVENT_MAX,
+ IPA_EVENT_MAX_NUM = IPA_PER_CLIENT_STATS_EVENT_MAX,
+};
+
+#define IPA_EVENT_MAX_NUM ((int)IPA_PER_CLIENT_STATS_EVENT_MAX)
#define IPA_EVENT_MAX ((int)IPA_EVENT_MAX_NUM)
/**
@@ -1061,6 +1079,48 @@ struct ipa_rt_rule_del {
};
/**
+ * struct ipa_rt_rule_add_ext - routing rule descriptor includes in
+ * and out parameters
+ * @rule: actual rule to be added
+ * @at_rear: add at back of routing table, it is NOT possible to add rules at
+ * the rear of the "default" routing tables
+ * @rt_rule_hdl: output parameter, handle to rule, valid when status is 0
+ * @status: output parameter, status of routing rule add operation,
+ * @rule_id: rule_id to be assigned to the routing rule. In case client
+ * specifies rule_id as 0 the driver will assign a new rule_id
+ * 0 for success,
+ * -1 for failure
+ */
+struct ipa_rt_rule_add_ext {
+ struct ipa_rt_rule rule;
+ uint8_t at_rear;
+ uint32_t rt_rule_hdl;
+ int status;
+ uint16_t rule_id;
+};
+
+/**
+ * struct ipa_ioc_add_rt_rule - routing rule addition parameters (supports
+ * multiple rules and commit with rule_id);
+ *
+ * all rules MUST be added to same table
+ * @commit: should rules be written to IPA HW also?
+ * @ip: IP family of rule
+ * @rt_tbl_name: name of routing table resource
+ * @num_rules: number of routing rules that follow
+ * @ipa_rt_rule_add_ext rules: all rules need to go back to back here,
+ * no pointers
+ */
+struct ipa_ioc_add_rt_rule_ext {
+ uint8_t commit;
+ enum ipa_ip_type ip;
+ char rt_tbl_name[IPA_RESOURCE_NAME_MAX];
+ uint8_t num_rules;
+ struct ipa_rt_rule_add_ext rules[0];
+};
+
+
+/**
* struct ipa_ioc_del_rt_rule - routing rule deletion parameters (supports
* multiple headers and commit)
* @commit: should rules be removed from IPA HW also?
@@ -1619,6 +1679,52 @@ enum ipacm_client_enum {
IPACM_CLIENT_WLAN,
IPACM_CLIENT_MAX
};
+
+enum ipacm_per_client_device_type {
+ IPACM_CLIENT_DEVICE_TYPE_USB = 0,
+ IPACM_CLIENT_DEVICE_TYPE_WLAN = 1,
+ IPACM_CLIENT_DEVICE_TYPE_ETH = 2
+};
+
+/**
+ * max number of device types supported.
+ */
+#define IPACM_MAX_CLIENT_DEVICE_TYPES 3
+
+/**
+ * @lanIface - Name of the lan interface
+ * @mac: Mac address of the client.
+ */
+struct ipa_lan_client_msg {
+ char lanIface[IPA_RESOURCE_NAME_MAX];
+ uint8_t mac[IPA_MAC_ADDR_SIZE];
+};
+
+/**
+ * struct ipa_lan_client - lan client data
+ * @mac: MAC Address of the client.
+ * @client_idx: Client Index.
+ * @inited: Bool to indicate whether client info is set.
+ */
+struct ipa_lan_client {
+ uint8_t mac[IPA_MAC_ADDR_SIZE];
+ int8_t client_idx;
+ uint8_t inited;
+};
+
+/**
+ * struct ipa_tether_device_info - tether device info indicated from IPACM
+ * @ul_src_pipe: Source pipe of the lan client.
+ * @hdr_len: Header length of the client.
+ * @num_clients: Number of clients connected.
+ */
+struct ipa_tether_device_info {
+ int32_t ul_src_pipe;
+ uint8_t hdr_len;
+ uint32_t num_clients;
+ struct ipa_lan_client lan_client[IPA_MAX_NUM_HW_PATH_CLIENTS];
+};
+
/**
* actual IOCTLs supported by IPA driver
*/
@@ -1631,6 +1737,9 @@ enum ipacm_client_enum {
#define IPA_IOC_ADD_RT_RULE _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_ADD_RT_RULE, \
struct ipa_ioc_add_rt_rule *)
+#define IPA_IOC_ADD_RT_RULE_EXT _IOWR(IPA_IOC_MAGIC, \
+ IPA_IOCTL_ADD_RT_RULE_EXT, \
+ struct ipa_ioc_add_rt_rule_ext *)
#define IPA_IOC_ADD_RT_RULE_AFTER _IOWR(IPA_IOC_MAGIC, \
IPA_IOCTL_ADD_RT_RULE_AFTER, \
struct ipa_ioc_add_rt_rule_after *)
diff --git a/include/uapi/linux/msm_mdp_ext.h b/include/uapi/linux/msm_mdp_ext.h
index da9ee3bcc525..61b5f8eaa7f9 100644
--- a/include/uapi/linux/msm_mdp_ext.h
+++ b/include/uapi/linux/msm_mdp_ext.h
@@ -821,4 +821,26 @@ struct mdp_hdr_stream {
uint32_t content_type;
uint32_t reserved[5];
};
+
+/* hdr hdmi state takes possible values of 1, 2 and 4 respectively */
+#define HDR_ENABLE (1 << 0)
+#define HDR_DISABLE (1 << 1)
+#define HDR_RESET (1 << 2)
+
+/*
+ * HDR Control
+ * This encapsulates the HDR metadata as well as a state control
+ * for the HDR metadata as required by the HDMI spec to send the
+ * relevant metadata depending on the state of the HDR playback.
+ * hdr_state: Controls HDR state, takes values HDR_ENABLE, HDR_DISABLE
+ * and HDR_RESET.
+ * hdr_meta: Metadata sent by the userspace for the HDR clip.
+ */
+
+#define DRM_MSM_EXT_PANEL_HDR_CTRL
+struct mdp_hdr_stream_ctrl {
+ __u8 hdr_state; /* HDR state */
+ struct mdp_hdr_stream hdr_stream; /* HDR metadata */
+};
+
#endif
diff --git a/include/uapi/linux/rmnet_ipa_fd_ioctl.h b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
index f04ac495a5c0..13dac9a1526d 100644
--- a/include/uapi/linux/rmnet_ipa_fd_ioctl.h
+++ b/include/uapi/linux/rmnet_ipa_fd_ioctl.h
@@ -33,6 +33,12 @@
#define WAN_IOCTL_QUERY_DL_FILTER_STATS 8
#define WAN_IOCTL_ADD_FLT_RULE_EX 9
#define WAN_IOCTL_QUERY_TETHER_STATS_ALL 10
+#define WAN_IOCTL_ADD_UL_FLT_RULE 11
+#define WAN_IOCTL_ENABLE_PER_CLIENT_STATS 12
+#define WAN_IOCTL_QUERY_PER_CLIENT_STATS 13
+#define WAN_IOCTL_SET_LAN_CLIENT_INFO 14
+#define WAN_IOCTL_CLEAR_LAN_CLIENT_INFO 15
+#define WAN_IOCTL_SEND_LAN_CLIENT_MSG 16
/* User space may not have this defined. */
#ifndef IFNAMSIZ
@@ -126,6 +132,57 @@ struct wan_ioctl_query_dl_filter_stats {
uint32_t index;
};
+struct wan_ioctl_send_lan_client_msg {
+ /* Lan client info. */
+ struct ipa_lan_client_msg lan_client;
+ /* Event to indicate whether client is
+ * connected or disconnected.
+ */
+ enum ipa_per_client_stats_event client_event;
+};
+
+struct wan_ioctl_lan_client_info {
+ /* Device type of the client. */
+ enum ipacm_per_client_device_type device_type;
+ /* MAC Address of the client. */
+ uint8_t mac[IPA_MAC_ADDR_SIZE];
+ /* Init client. */
+ uint8_t client_init;
+ /* Client Index */
+ int8_t client_idx;
+ /* Header length of the client. */
+ uint8_t hdr_len;
+ /* Source pipe of the lan client. */
+ enum ipa_client_type ul_src_pipe;
+};
+
+struct wan_ioctl_per_client_info {
+ /* MAC Address of the client. */
+ uint8_t mac[IPA_MAC_ADDR_SIZE];
+ /* Ipv4 UL traffic bytes. */
+ uint64_t ipv4_tx_bytes;
+ /* Ipv4 DL traffic bytes. */
+ uint64_t ipv4_rx_bytes;
+ /* Ipv6 UL traffic bytes. */
+ uint64_t ipv6_tx_bytes;
+ /* Ipv6 DL traffic bytes. */
+ uint64_t ipv6_rx_bytes;
+};
+
+struct wan_ioctl_query_per_client_stats {
+ /* Device type of the client. */
+ enum ipacm_per_client_device_type device_type;
+ /* Indicate whether to reset the stats (use 1) or not */
+ uint8_t reset_stats;
+ /* Indicates whether client is disconnected. */
+ uint8_t disconnect_clnt;
+ /* Number of clients. */
+ uint8_t num_clients;
+ /* Client information. */
+ struct wan_ioctl_per_client_info
+ client_info[IPA_MAX_NUM_HW_PATH_CLIENTS];
+};
+
#define WAN_IOC_ADD_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
WAN_IOCTL_ADD_FLT_RULE, \
struct ipa_install_fltr_rule_req_msg_v01 *)
@@ -170,4 +227,27 @@ struct wan_ioctl_query_dl_filter_stats {
WAN_IOCTL_QUERY_TETHER_STATS_ALL, \
struct wan_ioctl_query_tether_stats_all *)
+#define WAN_IOC_ADD_UL_FLT_RULE _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_ADD_UL_FLT_RULE, \
+ struct ipa_configure_ul_firewall_rules_req_msg_v01 *)
+
+#define WAN_IOC_ENABLE_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_ENABLE_PER_CLIENT_STATS, \
+ bool *)
+
+#define WAN_IOC_QUERY_PER_CLIENT_STATS _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_QUERY_PER_CLIENT_STATS, \
+ struct wan_ioctl_query_per_client_stats *)
+
+#define WAN_IOC_SET_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_SET_LAN_CLIENT_INFO, \
+ struct wan_ioctl_lan_client_info *)
+
+#define WAN_IOC_SEND_LAN_CLIENT_MSG _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_SEND_LAN_CLIENT_MSG, \
+ struct wan_ioctl_send_lan_client_msg *)
+
+#define WAN_IOC_CLEAR_LAN_CLIENT_INFO _IOWR(WAN_IOC_MAGIC, \
+ WAN_IOCTL_CLEAR_LAN_CLIENT_INFO, \
+ struct wan_ioctl_lan_client_info *)
#endif /* _RMNET_IPA_FD_IOCTL_H */
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index fa930a91b4aa..36e94588d1d9 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -2,7 +2,7 @@
* Video for Linux Two header file
*
* Copyright (C) 1999-2012 the contributors
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
@@ -590,6 +590,11 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_SGBRG10DPCM6 v4l2_fourcc('b', 'G', 'A', '6')
#define V4L2_PIX_FMT_SGRBG10DPCM6 v4l2_fourcc('B', 'D', '1', '6')
#define V4L2_PIX_FMT_SRGGB10DPCM6 v4l2_fourcc('b', 'R', 'A', '6')
+ /* 10bit raw bayer, plain16 packed */
+#define V4L2_PIX_FMT_SBGGRPLAIN16 v4l2_fourcc('B', 'G', '1', '6')
+#define V4L2_PIX_FMT_SGBRGPLAIN16 v4l2_fourcc('G', 'B', '1', '6')
+#define V4L2_PIX_FMT_SGRBGPLAIN16 v4l2_fourcc('G', 'R', '1', '6')
+#define V4L2_PIX_FMT_SRGGBPLAIN16 v4l2_fourcc('R', 'G', '1', '6')
/* compressed formats */
#define V4L2_PIX_FMT_MJPEG v4l2_fourcc('M', 'J', 'P', 'G') /* Motion-JPEG */
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index 077bb52e2d47..3fdb7545852e 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -2799,6 +2799,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
int retval = 0;
mutex_lock(&cgroup_mutex);
+ percpu_down_write(&cgroup_threadgroup_rwsem);
for_each_root(root) {
struct cgroup *from_cgrp;
@@ -2813,6 +2814,7 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
if (retval)
break;
}
+ percpu_up_write(&cgroup_threadgroup_rwsem);
mutex_unlock(&cgroup_mutex);
return retval;
@@ -4072,6 +4074,8 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
mutex_lock(&cgroup_mutex);
+ percpu_down_write(&cgroup_threadgroup_rwsem);
+
/* all tasks in @from are being moved, all csets are source */
spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &from->cset_links, cset_link)
@@ -4100,6 +4104,7 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
} while (task && !ret);
out_err:
cgroup_migrate_finish(&preloaded_csets);
+ percpu_up_write(&cgroup_threadgroup_rwsem);
mutex_unlock(&cgroup_mutex);
return ret;
}
diff --git a/kernel/locking/osq_lock.c b/kernel/locking/osq_lock.c
index 1e6a51cc25c4..99b8d991126f 100644
--- a/kernel/locking/osq_lock.c
+++ b/kernel/locking/osq_lock.c
@@ -106,32 +106,6 @@ bool osq_lock(struct optimistic_spin_queue *lock)
prev = decode_cpu(old);
node->prev = prev;
-
- /*
- * We need to avoid reordering of link updation sequence of osq.
- * A case in which the status of optimistic spin queue is
- * CPU6->CPU2 in which CPU6 has acquired the lock. At this point
- * if CPU0 comes in to acquire osq_lock, it will update the tail
- * count. After tail count update if CPU2 starts to unqueue itself
- * from optimistic spin queue, it will find updated tail count with
- * CPU0 and update CPU2 node->next to NULL in osq_wait_next(). If
- * reordering of following stores happen then prev->next where prev
- * being CPU2 would be updated to point to CPU0 node:
- * node->prev = prev;
- * WRITE_ONCE(prev->next, node);
- *
- * At this point if next instruction
- * WRITE_ONCE(next->prev, prev);
- * in CPU2 path is committed before the update of CPU0 node->prev =
- * prev then CPU0 node->prev will point to CPU6 node. At this point
- * if CPU0 path's node->prev = prev is committed resulting in change
- * of CPU0 prev back to CPU2 node. CPU2 node->next is NULL, so if
- * CPU0 gets into unqueue path of osq_lock it will keep spinning
- * in infinite loop as condition prev->next == node will never be
- * true.
- */
- smp_mb();
-
WRITE_ONCE(prev->next, node);
/*
diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c
index 2cb46d51d715..1ba183e7987c 100644
--- a/kernel/rcu/tree.c
+++ b/kernel/rcu/tree.c
@@ -248,24 +248,17 @@ static int rcu_gp_in_progress(struct rcu_state *rsp)
*/
void rcu_sched_qs(void)
{
- unsigned long flags;
-
- if (__this_cpu_read(rcu_sched_data.cpu_no_qs.s)) {
- trace_rcu_grace_period(TPS("rcu_sched"),
- __this_cpu_read(rcu_sched_data.gpnum),
- TPS("cpuqs"));
- __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
- if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
- return;
- local_irq_save(flags);
- if (__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp)) {
- __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
- rcu_report_exp_rdp(&rcu_sched_state,
- this_cpu_ptr(&rcu_sched_data),
- true);
- }
- local_irq_restore(flags);
- }
+ if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.s))
+ return;
+ trace_rcu_grace_period(TPS("rcu_sched"),
+ __this_cpu_read(rcu_sched_data.gpnum),
+ TPS("cpuqs"));
+ __this_cpu_write(rcu_sched_data.cpu_no_qs.b.norm, false);
+ if (!__this_cpu_read(rcu_sched_data.cpu_no_qs.b.exp))
+ return;
+ __this_cpu_write(rcu_sched_data.cpu_no_qs.b.exp, false);
+ rcu_report_exp_rdp(&rcu_sched_state,
+ this_cpu_ptr(&rcu_sched_data), true);
}
void rcu_bh_qs(void)
@@ -302,17 +295,16 @@ EXPORT_PER_CPU_SYMBOL_GPL(rcu_qs_ctr);
* We inform the RCU core by emulating a zero-duration dyntick-idle
* period, which we in turn do by incrementing the ->dynticks counter
* by two.
+ *
+ * The caller must have disabled interrupts.
*/
static void rcu_momentary_dyntick_idle(void)
{
- unsigned long flags;
struct rcu_data *rdp;
struct rcu_dynticks *rdtp;
int resched_mask;
struct rcu_state *rsp;
- local_irq_save(flags);
-
/*
* Yes, we can lose flag-setting operations. This is OK, because
* the flag will be set again after some delay.
@@ -342,13 +334,12 @@ static void rcu_momentary_dyntick_idle(void)
smp_mb__after_atomic(); /* Later stuff after QS. */
break;
}
- local_irq_restore(flags);
}
/*
* Note a context switch. This is a quiescent state for RCU-sched,
* and requires special handling for preemptible RCU.
- * The caller must have disabled preemption.
+ * The caller must have disabled interrupts.
*/
void rcu_note_context_switch(void)
{
@@ -378,9 +369,14 @@ EXPORT_SYMBOL_GPL(rcu_note_context_switch);
*/
void rcu_all_qs(void)
{
+ unsigned long flags;
+
barrier(); /* Avoid RCU read-side critical sections leaking down. */
- if (unlikely(raw_cpu_read(rcu_sched_qs_mask)))
+ if (unlikely(raw_cpu_read(rcu_sched_qs_mask))) {
+ local_irq_save(flags);
rcu_momentary_dyntick_idle();
+ local_irq_restore(flags);
+ }
this_cpu_inc(rcu_qs_ctr);
barrier(); /* Avoid RCU read-side critical sections leaking up. */
}
diff --git a/kernel/rcu/tree_plugin.h b/kernel/rcu/tree_plugin.h
index 32cbe72bf545..c6fc11d626f8 100644
--- a/kernel/rcu/tree_plugin.h
+++ b/kernel/rcu/tree_plugin.h
@@ -147,8 +147,8 @@ static void __init rcu_bootup_announce(void)
* the corresponding expedited grace period will also be the end of the
* normal grace period.
*/
-static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
- unsigned long flags) __releases(rnp->lock)
+static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp)
+ __releases(rnp->lock) /* But leaves rrupts disabled. */
{
int blkd_state = (rnp->gp_tasks ? RCU_GP_TASKS : 0) +
(rnp->exp_tasks ? RCU_EXP_TASKS : 0) +
@@ -236,7 +236,7 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
rnp->gp_tasks = &t->rcu_node_entry;
if (!rnp->exp_tasks && (blkd_state & RCU_EXP_BLKD))
rnp->exp_tasks = &t->rcu_node_entry;
- raw_spin_unlock(&rnp->lock);
+ raw_spin_unlock(&rnp->lock); /* rrupts remain disabled. */
/*
* Report the quiescent state for the expedited GP. This expedited
@@ -251,7 +251,6 @@ static void rcu_preempt_ctxt_queue(struct rcu_node *rnp, struct rcu_data *rdp,
} else {
WARN_ON_ONCE(t->rcu_read_unlock_special.b.exp_need_qs);
}
- local_irq_restore(flags);
}
/*
@@ -286,12 +285,11 @@ static void rcu_preempt_qs(void)
* predating the current grace period drain, in other words, until
* rnp->gp_tasks becomes NULL.
*
- * Caller must disable preemption.
+ * Caller must disable interrupts.
*/
static void rcu_preempt_note_context_switch(void)
{
struct task_struct *t = current;
- unsigned long flags;
struct rcu_data *rdp;
struct rcu_node *rnp;
@@ -301,7 +299,7 @@ static void rcu_preempt_note_context_switch(void)
/* Possibly blocking in an RCU read-side critical section. */
rdp = this_cpu_ptr(rcu_state_p->rda);
rnp = rdp->mynode;
- raw_spin_lock_irqsave(&rnp->lock, flags);
+ raw_spin_lock(&rnp->lock);
smp_mb__after_unlock_lock();
t->rcu_read_unlock_special.b.blocked = true;
t->rcu_blocked_node = rnp;
@@ -318,7 +316,7 @@ static void rcu_preempt_note_context_switch(void)
(rnp->qsmask & rdp->grpmask)
? rnp->gpnum
: rnp->gpnum + 1);
- rcu_preempt_ctxt_queue(rnp, rdp, flags);
+ rcu_preempt_ctxt_queue(rnp, rdp);
} else if (t->rcu_read_lock_nesting < 0 &&
t->rcu_read_unlock_special.s) {
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 4ecca604e64b..2dbe599d34d5 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -621,8 +621,7 @@ void resched_cpu(int cpu)
struct rq *rq = cpu_rq(cpu);
unsigned long flags;
- if (!raw_spin_trylock_irqsave(&rq->lock, flags))
- return;
+ raw_spin_lock_irqsave(&rq->lock, flags);
resched_curr(rq);
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
@@ -3512,7 +3511,6 @@ static void __sched notrace __schedule(bool preempt)
cpu = smp_processor_id();
rq = cpu_rq(cpu);
- rcu_note_context_switch();
prev = rq->curr;
/*
@@ -3531,13 +3529,16 @@ static void __sched notrace __schedule(bool preempt)
if (sched_feat(HRTICK))
hrtick_clear(rq);
+ local_irq_disable();
+ rcu_note_context_switch();
+
/*
* Make sure that signal_pending_state()->signal_pending() below
* can't be reordered with __set_current_state(TASK_INTERRUPTIBLE)
* done by the caller to avoid the race with signal_wake_up().
*/
smp_mb__before_spinlock();
- raw_spin_lock_irq(&rq->lock);
+ raw_spin_lock(&rq->lock);
lockdep_pin_lock(&rq->lock);
rq->clock_skip_update <<= 1; /* promote REQ to ACT */
diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c
index 8a2a489b2cd3..ede54061c554 100644
--- a/net/netfilter/xt_socket.c
+++ b/net/netfilter/xt_socket.c
@@ -237,7 +237,7 @@ socket_match(const struct sk_buff *skb, struct xt_action_param *par,
transparent = xt_socket_sk_is_transparent(sk);
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
- transparent)
+ transparent && sk_fullsock(sk))
pskb->mark = sk->sk_mark;
sock_gen_put(sk);
@@ -419,7 +419,7 @@ socket_mt6_v1_v2_v3(const struct sk_buff *skb, struct xt_action_param *par)
transparent = xt_socket_sk_is_transparent(sk);
if (info->flags & XT_SOCKET_RESTORESKMARK && !wildcard &&
- transparent)
+ transparent && sk_fullsock(sk))
pskb->mark = sk->sk_mark;
if (sk != skb->sk)
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 0727a6e9f780..86005410a22f 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -224,17 +224,16 @@ country BY: DFS-ETSI
(5490 - 5710 @ 160), (30), DFS
country BZ:
- (2402 - 2482 @ 40), (36)
- (5170 - 5330 @ 160), (27)
- (5490 - 5730 @ 160), (36)
- (5735 - 5835 @ 80), (36)
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5330 @ 160), (23)
+ (5490 - 5730 @ 160), (30)
+ (5735 - 5835 @ 80), (30)
country CA: DFS-FCC
(2402 - 2472 @ 40), (30)
(5170 - 5250 @ 80), (24), AUTO-BW
(5250 - 5330 @ 80), (24), DFS, AUTO-BW
- (5490 - 5590 @ 80), (24), DFS
- (5650 - 5730 @ 80), (24), DFS
+ (5490 - 5730 @ 160), (24), DFS
(5735 - 5835 @ 80), (30)
# 60 gHz band channels 1-3
(57240 - 63720 @ 2160), (40)
@@ -683,7 +682,13 @@ country IL: DFS-ETSI
country IN:
(2402 - 2482 @ 40), (20)
(5170 - 5330 @ 160), (23)
- (5735 - 5835 @ 80), (30)
+ (5735 - 5835 @ 80), (33)
+
+country IQ: DFS-ETSI
+ (2402 - 2482 @ 40), (20)
+ (5170 - 5250 @ 80), (23), AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5490 - 5710 @ 160), (30), DFS
country IS: DFS-ETSI
(2402 - 2482 @ 40), (20)
@@ -737,7 +742,6 @@ country JO:
country JP: DFS-JP
(2402 - 2482 @ 40), (20)
- (2474 - 2494 @ 20), (20), NO-OFDM
(5170 - 5250 @ 80), (20), AUTO-BW, NO-OUTDOOR
(5250 - 5330 @ 80), (20), DFS, AUTO-BW, NO-OUTDOOR
(5490 - 5710 @ 160), (20), DFS
@@ -759,7 +763,7 @@ country KH: DFS-ETSI
country KN: DFS-FCC
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (23), AUTO-BW
- (5250 - 5330 @ 80), (23), DFS, AUTO-BW
+ (5250 - 5330 @ 80), (30), DFS, AUTO-BW
(5490 - 5710 @ 160), (30), DFS
(5735 - 5815 @ 80), (30)
@@ -1010,7 +1014,7 @@ country MY: DFS-FCC
(5170 - 5250 @ 80), (24), AUTO-BW
(5250 - 5330 @ 80), (24), DFS, AUTO-BW
(5490 - 5650 @ 160), (24), DFS
- (5735 - 5815 @ 80), (24)
+ (5735 - 5835 @ 80), (24)
# 60 gHz band channels 1-3
(57240 - 63720 @ 2160), (40)
@@ -1090,7 +1094,7 @@ country OM: DFS-ETSI
(5490 - 5710 @ 160), (30), DFS
country PA:
- (2402 - 2472 @ 40), (30)
+ (2402 - 2472 @ 40), (36)
(5170 - 5250 @ 80), (23), AUT0-BW
(5250 - 5330 @ 80), (30), AUTO-BW
(5735 - 5835 @ 80), (36)
@@ -1375,9 +1379,9 @@ country TR: DFS-ETSI
country TT:
(2402 - 2482 @ 40), (20)
- (5170 - 5330 @ 160), (27)
- (5490 - 5730 @ 160), (36)
- (5735 - 5835 @ 80), (36)
+ (5170 - 5330 @ 160), (24)
+ (5490 - 5730 @ 160), (24)
+ (5735 - 5835 @ 80), (30)
# 60 gHz band channels 1-3, FCC
(57240 - 63720 @ 2160), (40)
@@ -1451,7 +1455,7 @@ country UY: DFS-FCC
country UZ: DFS-ETSI
(2402 - 2482 @ 40), (20)
(5170 - 5250 @ 80), (23), AUTO-BW
- (5250 - 5330 @ 80), (20), DFS, AUTO-BW
+ (5250 - 5330 @ 80), (23), DFS, AUTO-BW
country VC: DFS-ETSI
(2402 - 2482 @ 40), (20)
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
index a01c781acdf1..55eef61a01de 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -49,10 +49,10 @@
#define BUS_DOWN 1
/*
- * 50 Milliseconds sufficient for DSP bring up in the lpass
+ * 200 Milliseconds sufficient for DSP bring up in the lpass
* after Sub System Restart
*/
-#define ADSP_STATE_READY_TIMEOUT_MS 50
+#define ADSP_STATE_READY_TIMEOUT_MS 200
#define EAR_PMD 0
#define EAR_PMU 1
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
index 25c318c6c4e1..5f9dc9c0c392 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
@@ -215,6 +215,7 @@ static int msm_dig_cdc_codec_config_compander(struct snd_soc_codec *codec,
{
struct msm_dig_priv *dig_cdc = snd_soc_codec_get_drvdata(codec);
int comp_ch_bits_set = 0x03;
+ int comp_ch_value;
dev_dbg(codec->dev, "%s: event %d shift %d, enabled %d\n",
__func__, event, interp_n,
@@ -234,15 +235,40 @@ static int msm_dig_cdc_codec_config_compander(struct snd_soc_codec *codec,
dig_cdc->set_compander_mode(dig_cdc->handle, 0x00);
return 0;
};
+ comp_ch_value = snd_soc_read(codec,
+ MSM89XX_CDC_CORE_COMP0_B1_CTL);
+ if (interp_n == 0) {
+ if ((comp_ch_value & 0x02) == 0x02) {
+ dev_dbg(codec->dev,
+ "%s comp ch already enabled\n",
+ __func__);
+ return 0;
+ }
+ }
+ if (interp_n == 1) {
+ if ((comp_ch_value & 0x01) == 0x01) {
+ dev_dbg(codec->dev,
+ "%s comp ch already enabled\n",
+ __func__);
+ return 0;
+ }
+ }
dig_cdc->set_compander_mode(dig_cdc->handle, 0x08);
/* Enable Compander Clock */
snd_soc_update_bits(codec,
MSM89XX_CDC_CORE_COMP0_B2_CTL, 0x0F, 0x09);
snd_soc_update_bits(codec,
MSM89XX_CDC_CORE_CLK_RX_B2_CTL, 0x01, 0x01);
- snd_soc_update_bits(codec,
- MSM89XX_CDC_CORE_COMP0_B1_CTL,
- 1 << interp_n, 1 << interp_n);
+ if (dig_cdc->comp_enabled[MSM89XX_RX1]) {
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_COMP0_B1_CTL,
+ 0x02, 0x02);
+ }
+ if (dig_cdc->comp_enabled[MSM89XX_RX2]) {
+ snd_soc_update_bits(codec,
+ MSM89XX_CDC_CORE_COMP0_B1_CTL,
+ 0x01, 0x01);
+ }
snd_soc_update_bits(codec,
MSM89XX_CDC_CORE_COMP0_B3_CTL, 0xFF, 0x01);
snd_soc_update_bits(codec,
diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c
index 1613c5baa9c7..f995bf22c1c3 100644
--- a/sound/soc/codecs/wcd-dsp-mgr.c
+++ b/sound/soc/codecs/wcd-dsp-mgr.c
@@ -25,7 +25,8 @@
static char *wdsp_get_cmpnt_type_string(enum wdsp_cmpnt_type);
/* Component related macros */
-#define WDSP_GET_COMPONENT(wdsp, x) (&(wdsp->cmpnts[x]))
+#define WDSP_GET_COMPONENT(wdsp, x) ((x >= WDSP_CMPNT_TYPE_MAX || x < 0) ? \
+ NULL : (&(wdsp->cmpnts[x])))
#define WDSP_GET_CMPNT_TYPE_STR(x) wdsp_get_cmpnt_type_string(x)
/*
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 10883b0939d6..2bc911e63e12 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -4073,6 +4073,8 @@ static int tasha_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0xC0, 0xC0);
}
set_bit(HPH_PA_DELAY, &tasha->status_mask);
+ if (!(strcmp(w->name, "HPHR PA")))
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x40, 0x40);
break;
case SND_SOC_DAPM_POST_PMU:
if (!(strcmp(w->name, "ANC HPHR PA"))) {
@@ -4127,6 +4129,8 @@ static int tasha_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
tasha_codec_hph_post_pa_config(tasha, hph_mode, event);
if (!(strcmp(w->name, "ANC HPHR PA")))
snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x40, 0x00);
+ if (!(strcmp(w->name, "HPHR PA")))
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x40, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
/* 5ms sleep is required after PA is disabled as per
@@ -4166,6 +4170,8 @@ static int tasha_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
(test_bit(HPH_PA_DELAY, &tasha->status_mask))) {
snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0xC0, 0xC0);
}
+ if (!(strcmp(w->name, "HPHL PA")))
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x80, 0x80);
set_bit(HPH_PA_DELAY, &tasha->status_mask);
break;
case SND_SOC_DAPM_POST_PMU:
@@ -4222,6 +4228,8 @@ static int tasha_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
tasha_codec_hph_post_pa_config(tasha, hph_mode, event);
if (!(strcmp(w->name, "ANC HPHL PA")))
snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x80, 0x00);
+ if (!(strcmp(w->name, "HPHL PA")))
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x80, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
/* 5ms sleep is required after PA is disabled as per
@@ -4544,6 +4552,10 @@ static int tasha_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ if (!(strcmp(w->name, "RX INT2 DAC"))) {
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x20, 0x20);
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x10, 0x10);
+ }
if (tasha->anc_func) {
ret = tasha_codec_enable_anc(w, kcontrol, event);
/* 40 msec delay is needed to avoid click and pop */
@@ -4582,6 +4594,8 @@ static int tasha_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
}
break;
case SND_SOC_DAPM_PRE_PMD:
+ if (!(strcmp(w->name, "RX INT2 DAC")))
+ snd_soc_update_bits(codec, WCD9335_ANA_HPH, 0x30, 0x00);
if ((hph_mode == CLS_H_LP) &&
(TASHA_IS_1_1(wcd9xxx))) {
snd_soc_update_bits(codec, WCD9335_HPH_L_DAC_CTL,
@@ -11094,12 +11108,12 @@ static const struct snd_soc_dapm_widget tasha_dapm_widgets[] = {
0, 0, tasha_codec_ear_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, WCD9335_ANA_HPH,
- 5, 0, tasha_codec_hphl_dac_event,
+ SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, tasha_codec_hphl_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, WCD9335_ANA_HPH,
- 4, 0, tasha_codec_hphr_dac_event,
+ SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, tasha_codec_hphr_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_DAC_E("RX INT3 DAC", NULL, SND_SOC_NOPM,
@@ -11114,11 +11128,11 @@ static const struct snd_soc_dapm_widget tasha_dapm_widgets[] = {
SND_SOC_DAPM_DAC_E("RX INT6 DAC", NULL, SND_SOC_NOPM,
0, 0, tasha_codec_lineout_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("HPHL PA", WCD9335_ANA_HPH, 7, 0, NULL, 0,
+ SND_SOC_DAPM_PGA_E("HPHL PA", SND_SOC_NOPM, 0, 0, NULL, 0,
tasha_codec_enable_hphl_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("HPHR PA", WCD9335_ANA_HPH, 6, 0, NULL, 0,
+ SND_SOC_DAPM_PGA_E("HPHR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
tasha_codec_enable_hphr_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 26320fd01a5a..bfe471e73503 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -2014,6 +2014,8 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, WCD934X_ANA_RX_SUPPLIES,
0x02, 0x02);
}
+ if (!(strcmp(w->name, "HPHR PA")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x40);
break;
case SND_SOC_DAPM_POST_PMU:
if ((!(strcmp(w->name, "ANC HPHR PA")))) {
@@ -2112,6 +2114,8 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
0x10, 0x10);
if (!(strcmp(w->name, "ANC HPHR PA")))
snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x00);
+ if (!(strcmp(w->name, "HPHR PA")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x40, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
/*
@@ -2161,6 +2165,8 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
(test_bit(HPH_PA_DELAY, &tavil->status_mask)))
snd_soc_update_bits(codec, WCD934X_ANA_HPH,
0xC0, 0xC0);
+ if (!(strcmp(w->name, "HPHL PA")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x80, 0x80);
set_bit(HPH_PA_DELAY, &tavil->status_mask);
if (dsd_conf &&
(snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01)) {
@@ -2266,6 +2272,8 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
if (!(strcmp(w->name, "ANC HPHL PA")))
snd_soc_update_bits(codec, WCD934X_ANA_HPH,
0x80, 0x00);
+ if (!(strcmp(w->name, "HPHL PA")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x80, 0x00);
break;
case SND_SOC_DAPM_POST_PMD:
/*
@@ -2418,6 +2426,10 @@ static int tavil_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
+ if (!(strcmp(w->name, "RX INT2 DAC"))) {
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x20, 0x20);
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x10, 0x10);
+ }
if (tavil->anc_func) {
ret = tavil_codec_enable_anc(w, kcontrol, event);
/* 40 msec delay is needed to avoid click and pop */
@@ -2458,6 +2470,10 @@ static int tavil_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
WCD934X_CDC_RX2_RX_PATH_CFG0,
0x10, 0x10);
break;
+ case SND_SOC_DAPM_PRE_PMD:
+ if (!(strcmp(w->name, "RX INT2 DAC")))
+ snd_soc_update_bits(codec, WCD934X_ANA_HPH, 0x30, 0x00);
+ break;
case SND_SOC_DAPM_POST_PMD:
/* 1000us required as per HW requirement */
usleep_range(1000, 1100);
@@ -7365,12 +7381,12 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
0, 0, tavil_codec_ear_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, WCD934X_ANA_HPH,
- 5, 0, tavil_codec_hphl_dac_event,
+ SND_SOC_DAPM_DAC_E("RX INT1 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, tavil_codec_hphl_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, WCD934X_ANA_HPH,
- 4, 0, tavil_codec_hphr_dac_event,
+ SND_SOC_DAPM_DAC_E("RX INT2 DAC", NULL, SND_SOC_NOPM,
+ 0, 0, tavil_codec_hphr_dac_event,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
SND_SOC_DAPM_DAC_E("RX INT3 DAC", NULL, SND_SOC_NOPM,
@@ -7383,11 +7399,11 @@ static const struct snd_soc_dapm_widget tavil_dapm_widgets[] = {
SND_SOC_DAPM_PGA_E("EAR PA", WCD934X_ANA_EAR, 7, 0, NULL, 0,
tavil_codec_enable_ear_pa,
SND_SOC_DAPM_POST_PMU | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("HPHL PA", WCD934X_ANA_HPH, 7, 0, NULL, 0,
+ SND_SOC_DAPM_PGA_E("HPHL PA", SND_SOC_NOPM, 0, 0, NULL, 0,
tavil_codec_enable_hphl_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
- SND_SOC_DAPM_PGA_E("HPHR PA", WCD934X_ANA_HPH, 6, 0, NULL, 0,
+ SND_SOC_DAPM_PGA_E("HPHR PA", SND_SOC_NOPM, 0, 0, NULL, 0,
tavil_codec_enable_hphr_pa,
SND_SOC_DAPM_PRE_PMU | SND_SOC_DAPM_POST_PMU |
SND_SOC_DAPM_PRE_PMD | SND_SOC_DAPM_POST_PMD),
diff --git a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
index c462f682e160..471be3294881 100644
--- a/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-compress-q6-v2.c
@@ -397,12 +397,9 @@ static int msm_compr_set_volume(struct snd_compr_stream *cstream,
} else {
gain_list[0] = volume_l;
gain_list[1] = volume_r;
- /* force sending FR/FL/FC volume for mono */
- if (prtd->num_channels == 1) {
- gain_list[2] = volume_l;
- num_channels = 3;
- use_default = true;
- }
+ gain_list[2] = volume_l;
+ num_channels = 3;
+ use_default = true;
rc = q6asm_set_multich_gain(prtd->audio_client, num_channels,
gain_list, chmap, use_default);
}
diff --git a/sound/soc/msm/qdsp6v2/msm-lsm-client.c b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
index 35270e3340ec..ae6767d26921 100644
--- a/sound/soc/msm/qdsp6v2/msm-lsm-client.c
+++ b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
@@ -1167,7 +1167,7 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
case SNDRV_LSM_SET_FWK_MODE_CONFIG: {
u32 mode;
- if (copy_from_user(&mode, arg, sizeof(mode))) {
+ if (copy_from_user(&mode, (void __user *) arg, sizeof(mode))) {
dev_err(rtd->dev, "%s: %s: copy_frm_user failed\n",
__func__, "LSM_SET_FWK_MODE_CONFIG");
return -EFAULT;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
index b94eb6fbfeea..0d01803e634d 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-q6-v2.c
@@ -1448,12 +1448,13 @@ static int msm_pcm_add_compress_control(struct snd_soc_pcm_runtime *rtd)
if (pdata) {
if (!pdata->pcm) {
pdata->pcm = rtd->pcm;
- snd_soc_add_platform_controls(rtd->platform,
- pcm_compress_control,
- ARRAY_SIZE
- (pcm_compress_control));
- pr_debug("%s: add control success plt = %pK\n",
- __func__, rtd->platform);
+ ret = snd_soc_add_platform_controls(rtd->platform,
+ pcm_compress_control,
+ ARRAY_SIZE
+ (pcm_compress_control));
+ if (ret < 0)
+ pr_err("%s: failed add ctl %s. err = %d\n",
+ __func__, mixer_str, ret);
}
} else {
pr_err("%s: NULL pdata\n", __func__);
@@ -1603,24 +1604,47 @@ done:
return ret;
}
+static int msm_pcm_playback_pan_scale_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ uinfo->count = sizeof(struct asm_stream_pan_ctrl_params);
+ return 0;
+}
+
static int msm_pcm_playback_pan_scale_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int ret = 0;
int len = 0;
int i = 0;
- struct snd_pcm_usr *usr_info = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_component *usr_info = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_platform *platform;
+ struct msm_plat_data *pdata;
struct snd_pcm_substream *substream;
struct msm_audio *prtd;
struct asm_stream_pan_ctrl_params pan_param;
-
+ char *usr_value = NULL;
+ uint32_t *gain_ptr = NULL;
if (!usr_info) {
pr_err("%s: usr_info is null\n", __func__);
ret = -EINVAL;
goto done;
}
- substream = usr_info->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ platform = snd_soc_component_to_platform(usr_info);
+ if (!platform) {
+ pr_err("%s: platform is null\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ pdata = dev_get_drvdata(platform->dev);
+ if (!pdata) {
+ pr_err("%s: pdata is null\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
if (!substream) {
pr_err("%s substream not found\n", __func__);
ret = -EINVAL;
@@ -1637,54 +1661,71 @@ static int msm_pcm_playback_pan_scale_ctl_put(struct snd_kcontrol *kcontrol,
ret = -EINVAL;
goto done;
}
- pan_param.num_output_channels =
- ucontrol->value.integer.value[len++];
+ usr_value = (char *) ucontrol->value.bytes.data;
+ if (!usr_value) {
+ pr_err("%s ucontrol data is null\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ memcpy(&pan_param.num_output_channels, &usr_value[len],
+ sizeof(pan_param.num_output_channels));
+ len += sizeof(pan_param.num_output_channels);
if (pan_param.num_output_channels >
PCM_FORMAT_MAX_NUM_CHANNEL) {
ret = -EINVAL;
goto done;
}
- pan_param.num_input_channels =
- ucontrol->value.integer.value[len++];
+ memcpy(&pan_param.num_input_channels, &usr_value[len],
+ sizeof(pan_param.num_input_channels));
+ len += sizeof(pan_param.num_input_channels);
if (pan_param.num_input_channels >
PCM_FORMAT_MAX_NUM_CHANNEL) {
ret = -EINVAL;
goto done;
}
- if (ucontrol->value.integer.value[len++]) {
- for (i = 0; i < pan_param.num_output_channels; i++) {
- pan_param.output_channel_map[i] =
- ucontrol->value.integer.value[len++];
- }
+ if (usr_value[len++]) {
+ memcpy(pan_param.output_channel_map, &usr_value[len],
+ (pan_param.num_output_channels *
+ sizeof(pan_param.output_channel_map[0])));
+ len += (pan_param.num_output_channels *
+ sizeof(pan_param.output_channel_map[0]));
}
- if (ucontrol->value.integer.value[len++]) {
- for (i = 0; i < pan_param.num_input_channels; i++) {
- pan_param.input_channel_map[i] =
- ucontrol->value.integer.value[len++];
- }
+ if (usr_value[len++]) {
+ memcpy(pan_param.input_channel_map, &usr_value[len],
+ (pan_param.num_input_channels *
+ sizeof(pan_param.input_channel_map[0])));
+ len += (pan_param.num_input_channels *
+ sizeof(pan_param.input_channel_map[0]));
}
- if (ucontrol->value.integer.value[len++]) {
+ if (usr_value[len++]) {
+ gain_ptr = (uint32_t *) &usr_value[len];
for (i = 0; i < pan_param.num_output_channels *
pan_param.num_input_channels; i++) {
pan_param.gain[i] =
- !(ucontrol->value.integer.value[len++] > 0) ?
+ !(gain_ptr[i] > 0) ?
0 : 2 << 13;
+ len += sizeof(pan_param.gain[i]);
}
+ len += (pan_param.num_input_channels *
+ pan_param.num_output_channels * sizeof(pan_param.gain[0]));
}
ret = q6asm_set_mfc_panning_params(prtd->audio_client,
&pan_param);
len -= pan_param.num_output_channels *
- pan_param.num_input_channels;
- for (i = 0; i < pan_param.num_output_channels *
- pan_param.num_input_channels; i++) {
- /*
- * The data userspace passes is already in Q14 format.
- * For volume gain is in Q28.
- */
- pan_param.gain[i] =
- ucontrol->value.integer.value[len++] << 14;
+ pan_param.num_input_channels * sizeof(pan_param.gain[0]);
+ if (gain_ptr) {
+ for (i = 0; i < pan_param.num_output_channels *
+ pan_param.num_input_channels; i++) {
+ /*
+ * The data userspace passes is already in Q14 format.
+ * For volume gain is in Q28.
+ */
+ pan_param.gain[i] =
+ (gain_ptr[i]) << 14;
+ len += sizeof(pan_param.gain[i]);
+ }
}
ret = q6asm_set_vol_ctrl_gain_pair(prtd->audio_client,
&pan_param);
@@ -1701,40 +1742,60 @@ static int msm_pcm_playback_pan_scale_ctl_get(struct snd_kcontrol *kcontrol,
static int msm_add_stream_pan_scale_controls(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_pcm *pcm;
- struct snd_pcm_usr *pan_ctl_info;
- struct snd_kcontrol *kctl;
const char *playback_mixer_ctl_name = "Audio Stream";
const char *deviceNo = "NN";
const char *suffix = "Pan Scale Control";
- int ctl_len, ret = 0;
+ char *mixer_str = NULL;
+ int ctl_len;
+ int ret = 0;
+ struct msm_plat_data *pdata;
+ struct snd_kcontrol_new pan_scale_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "?",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = msm_pcm_playback_pan_scale_ctl_info,
+ .get = msm_pcm_playback_pan_scale_ctl_get,
+ .put = msm_pcm_playback_pan_scale_ctl_put,
+ .private_value = 0,
+ }
+ };
if (!rtd) {
- pr_err("%s: rtd is NULL\n", __func__);
- ret = -EINVAL;
- goto done;
+ pr_err("%s: NULL rtd\n", __func__);
+ return -EINVAL;
}
- pcm = rtd->pcm;
- ctl_len = strlen(playback_mixer_ctl_name) + 1 + strlen(deviceNo) + 1 +
- strlen(suffix) + 1;
-
- ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
- NULL, 1, ctl_len, rtd->dai_link->be_id,
- &pan_ctl_info);
-
- if (ret < 0) {
- pr_err("%s: failed add ctl %s. err = %d\n",
- __func__, suffix, ret);
+ ctl_len = strlen(playback_mixer_ctl_name) + 1 +
+ strlen(deviceNo) + 1 + strlen(suffix) + 1;
+ mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+ if (!mixer_str) {
+ ret = -ENOMEM;
goto done;
}
- kctl = pan_ctl_info->kctl;
- snprintf(kctl->id.name, ctl_len, "%s %d %s", playback_mixer_ctl_name,
- rtd->pcm->device, suffix);
- kctl->put = msm_pcm_playback_pan_scale_ctl_put;
- kctl->get = msm_pcm_playback_pan_scale_ctl_get;
- pr_debug("%s: Registering new mixer ctl = %s\n", __func__,
- kctl->id.name);
+
+ snprintf(mixer_str, ctl_len, "%s %d %s",
+ playback_mixer_ctl_name, rtd->pcm->device, suffix);
+ pan_scale_control[0].name = mixer_str;
+ pan_scale_control[0].private_value = rtd->dai_link->be_id;
+ pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+ pdata = dev_get_drvdata(rtd->platform->dev);
+ if (pdata) {
+ if (!pdata->pcm)
+ pdata->pcm = rtd->pcm;
+ ret = snd_soc_add_platform_controls(rtd->platform,
+ pan_scale_control,
+ ARRAY_SIZE
+ (pan_scale_control));
+ if (ret < 0)
+ pr_err("%s: failed add ctl %s. err = %d\n",
+ __func__, mixer_str, ret);
+ } else {
+ pr_err("%s: NULL pdata\n", __func__);
+ ret = -EINVAL;
+ }
+
+ kfree(mixer_str);
done:
return ret;
@@ -1746,18 +1807,28 @@ static int msm_pcm_playback_dnmix_ctl_get(struct snd_kcontrol *kcontrol,
return 0;
}
+static int msm_pcm_playback_dnmix_ctl_info(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_info *uinfo)
+{
+ uinfo->type = SNDRV_CTL_ELEM_TYPE_BYTES;
+ uinfo->count = sizeof(struct asm_stream_pan_ctrl_params);
+ return 0;
+}
+
static int msm_pcm_playback_dnmix_ctl_put(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
int ret = 0;
int len = 0;
- int i = 0;
- struct snd_pcm_usr *usr_info = snd_kcontrol_chip(kcontrol);
+
+ struct snd_soc_component *usr_info = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_platform *platform;
+ struct msm_plat_data *pdata;
struct snd_pcm_substream *substream;
struct msm_audio *prtd;
struct asm_stream_pan_ctrl_params dnmix_param;
-
- int be_id = ucontrol->value.integer.value[len++];
+ char *usr_value;
+ int be_id = 0;
int stream_id = 0;
if (!usr_info) {
@@ -1765,7 +1836,19 @@ static int msm_pcm_playback_dnmix_ctl_put(struct snd_kcontrol *kcontrol,
ret = -EINVAL;
goto done;
}
- substream = usr_info->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
+ platform = snd_soc_component_to_platform(usr_info);
+ if (!platform) {
+ pr_err("%s platform is null\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ pdata = dev_get_drvdata(platform->dev);
+ if (!pdata) {
+ pr_err("%s pdata is null\n", __func__);
+ ret = -EINVAL;
+ goto done;
+ }
+ substream = pdata->pcm->streams[SNDRV_PCM_STREAM_PLAYBACK].substream;
if (!substream) {
pr_err("%s substream not found\n", __func__);
ret = -EINVAL;
@@ -1781,40 +1864,51 @@ static int msm_pcm_playback_dnmix_ctl_put(struct snd_kcontrol *kcontrol,
ret = -EINVAL;
goto done;
}
+ usr_value = (char *) ucontrol->value.bytes.data;
+ if (!usr_value) {
+ pr_err("%s usrvalue is null\n", __func__);
+ goto done;
+ }
+ memcpy(&be_id, usr_value, sizeof(be_id));
+ len += sizeof(be_id);
stream_id = prtd->audio_client->session;
- dnmix_param.num_output_channels =
- ucontrol->value.integer.value[len++];
+ memcpy(&dnmix_param.num_output_channels, &usr_value[len],
+ sizeof(dnmix_param.num_output_channels));
+ len += sizeof(dnmix_param.num_output_channels);
if (dnmix_param.num_output_channels >
PCM_FORMAT_MAX_NUM_CHANNEL) {
ret = -EINVAL;
goto done;
}
- dnmix_param.num_input_channels =
- ucontrol->value.integer.value[len++];
+ memcpy(&dnmix_param.num_input_channels, &usr_value[len],
+ sizeof(dnmix_param.num_input_channels));
+ len += sizeof(dnmix_param.num_input_channels);
if (dnmix_param.num_input_channels >
PCM_FORMAT_MAX_NUM_CHANNEL) {
ret = -EINVAL;
goto done;
}
-
- if (ucontrol->value.integer.value[len++]) {
- for (i = 0; i < dnmix_param.num_output_channels; i++) {
- dnmix_param.output_channel_map[i] =
- ucontrol->value.integer.value[len++];
- }
- }
- if (ucontrol->value.integer.value[len++]) {
- for (i = 0; i < dnmix_param.num_input_channels; i++) {
- dnmix_param.input_channel_map[i] =
- ucontrol->value.integer.value[len++];
- }
- }
- if (ucontrol->value.integer.value[len++]) {
- for (i = 0; i < dnmix_param.num_output_channels *
- dnmix_param.num_input_channels; i++) {
- dnmix_param.gain[i] =
- ucontrol->value.integer.value[len++];
- }
+ if (usr_value[len++]) {
+ memcpy(dnmix_param.output_channel_map, &usr_value[len],
+ (dnmix_param.num_output_channels *
+ sizeof(dnmix_param.output_channel_map[0])));
+ len += (dnmix_param.num_output_channels *
+ sizeof(dnmix_param.output_channel_map[0]));
+ }
+ if (usr_value[len++]) {
+ memcpy(dnmix_param.input_channel_map, &usr_value[len],
+ (dnmix_param.num_input_channels *
+ sizeof(dnmix_param.input_channel_map[0])));
+ len += (dnmix_param.num_input_channels *
+ sizeof(dnmix_param.input_channel_map[0]));
+ }
+ if (usr_value[len++]) {
+ memcpy(dnmix_param.gain, (uint32_t *) &usr_value[len],
+ (dnmix_param.num_input_channels *
+ dnmix_param.num_output_channels *
+ sizeof(dnmix_param.gain[0])));
+ len += (dnmix_param.num_input_channels *
+ dnmix_param.num_output_channels * sizeof(dnmix_param.gain[0]));
}
msm_routing_set_downmix_control_data(be_id,
stream_id,
@@ -1826,39 +1920,58 @@ done:
static int msm_add_device_down_mix_controls(struct snd_soc_pcm_runtime *rtd)
{
- struct snd_pcm *pcm;
- struct snd_pcm_usr *usr_info;
- struct snd_kcontrol *kctl;
const char *playback_mixer_ctl_name = "Audio Device";
const char *deviceNo = "NN";
const char *suffix = "Downmix Control";
- int ctl_len, ret = 0;
+ char *mixer_str = NULL;
+ int ctl_len = 0, ret = 0;
+ struct msm_plat_data *pdata;
+ struct snd_kcontrol_new device_downmix_control[1] = {
+ {
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "?",
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .info = msm_pcm_playback_dnmix_ctl_info,
+ .get = msm_pcm_playback_dnmix_ctl_get,
+ .put = msm_pcm_playback_dnmix_ctl_put,
+ .private_value = 0,
+ }
+ };
if (!rtd) {
- pr_err("%s: rtd is NULL\n", __func__);
+ pr_err("%s NULL rtd\n", __func__);
ret = -EINVAL;
goto done;
}
-
- pcm = rtd->pcm;
ctl_len = strlen(playback_mixer_ctl_name) + 1 +
- strlen(deviceNo) + 1 + strlen(suffix) + 1;
- ret = snd_pcm_add_usr_ctls(pcm, SNDRV_PCM_STREAM_PLAYBACK,
- NULL, 1, ctl_len, rtd->dai_link->be_id,
- &usr_info);
- if (ret < 0) {
- pr_err("%s: downmix control add failed: %d\n",
- __func__, ret);
+ strlen(deviceNo) + 1 + strlen(suffix) + 1;
+ mixer_str = kzalloc(ctl_len, GFP_KERNEL);
+ if (!mixer_str) {
+ ret = -ENOMEM;
goto done;
}
- kctl = usr_info->kctl;
- snprintf(kctl->id.name, ctl_len, "%s %d %s",
- playback_mixer_ctl_name, rtd->pcm->device, suffix);
- kctl->put = msm_pcm_playback_dnmix_ctl_put;
- kctl->get = msm_pcm_playback_dnmix_ctl_get;
- pr_debug("%s: downmix control name = %s\n",
- __func__, playback_mixer_ctl_name);
+ snprintf(mixer_str, ctl_len, "%s %d %s",
+ playback_mixer_ctl_name, rtd->pcm->device, suffix);
+ device_downmix_control[0].name = mixer_str;
+ device_downmix_control[0].private_value = rtd->dai_link->be_id;
+ pr_debug("%s: Registering new mixer ctl %s\n", __func__, mixer_str);
+ pdata = dev_get_drvdata(rtd->platform->dev);
+ if (pdata) {
+ if (!pdata->pcm)
+ pdata->pcm = rtd->pcm;
+ ret = snd_soc_add_platform_controls(rtd->platform,
+ device_downmix_control,
+ ARRAY_SIZE
+ (device_downmix_control));
+ if (ret < 0)
+ pr_err("%s: failed add ctl %s. err = %d\n",
+ __func__, mixer_str, ret);
+ } else {
+ pr_err("%s: NULL pdata\n", __func__);
+ ret = -EINVAL;
+ }
+ kfree(mixer_str);
done:
return ret;
}
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 909461258179..c3d86e6cced2 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -44,7 +44,7 @@
#define TRUE 0x01
#define FALSE 0x00
-#define SESSION_MAX 8
+#define SESSION_MAX 9
#define ASM_MAX_CHANNELS 8
enum {
ASM_TOPOLOGY_CAL = 0,
@@ -1338,7 +1338,7 @@ int q6asm_audio_client_buf_alloc(unsigned int dir,
pr_debug("%s: session[%d]bufsz[%d]bufcnt[%d]\n", __func__, ac->session,
bufsz, bufcnt);
- if (ac->session <= 0 || ac->session > 8) {
+ if (ac->session <= 0 || ac->session > ASM_ACTIVE_STREAMS_ALLOWED) {
pr_err("%s: Session ID is invalid, session = %d\n", __func__,
ac->session);
goto fail;
@@ -1429,7 +1429,7 @@ int q6asm_audio_client_buf_alloc_contiguous(unsigned int dir,
__func__, ac->session,
bufsz, bufcnt);
- if (ac->session <= 0 || ac->session > 8) {
+ if (ac->session <= 0 || ac->session > ASM_ACTIVE_STREAMS_ALLOWED) {
pr_err("%s: Session ID is invalid, session = %d\n", __func__,
ac->session);
goto fail;
@@ -1738,7 +1738,7 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
return -EINVAL;
}
- if (ac->session <= 0 || ac->session > 8) {
+ if (ac->session <= 0 || ac->session > ASM_ACTIVE_STREAMS_ALLOWED) {
pr_err("%s: Session ID is invalid, session = %d\n", __func__,
ac->session);
return -EINVAL;