summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,osm.txt47
-rw-r--r--Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt9
-rw-r--r--Documentation/devicetree/bindings/gpu/adreno.txt51
-rw-r--r--Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt2
-rw-r--r--Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt9
-rw-r--r--arch/arm/boot/dts/qcom/Makefile3
-rw-r--r--arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-cdp.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-fluid.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mtp.dtsi6
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-skuk.dtsi327
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dts1
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi17
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-v2.1-interposer-msmfalcon-qrd.dts23
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-v2.1-interposer-msmfalcon-qrd.dtsi20
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi113
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt.dtsi2
-rw-r--r--arch/arm/configs/msmfalcon_defconfig2
-rw-r--r--arch/arm/mach-qcom/Kconfig32
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig6
-rw-r--r--arch/arm64/configs/msmcortex_defconfig6
-rw-r--r--arch/arm64/configs/msmfalcon-perf_defconfig25
-rw-r--r--arch/arm64/configs/msmfalcon_defconfig24
-rw-r--r--drivers/android/binder.c31
-rw-r--r--drivers/char/adsprpc.c2
-rw-r--r--drivers/clk/msm/clock-osm.c451
-rw-r--r--drivers/clk/qcom/gcc-msmfalcon.c15
-rw-r--r--drivers/clk/qcom/vdd-level-falcon.h42
-rw-r--r--drivers/crypto/Kconfig10
-rw-r--r--drivers/crypto/msm/qcedev.c68
-rw-r--r--drivers/gpio/qpnp-pin.c12
-rw-r--r--drivers/gpu/msm/adreno-gpulist.h2
-rw-r--r--drivers/gpu/msm/adreno_a3xx.c40
-rw-r--r--drivers/gpu/msm/kgsl.c6
-rw-r--r--drivers/gpu/msm/kgsl_mmu.c20
-rw-r--r--drivers/gpu/msm/kgsl_pool.c132
-rw-r--r--drivers/gpu/msm/kgsl_pool.h2
-rw-r--r--drivers/hwtracing/coresight/coresight-etm4x.c4
-rw-r--r--drivers/hwtracing/coresight/coresight-tmc.c6
-rw-r--r--drivers/hwtracing/coresight/coresight-tpiu.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_soc_api.c7
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c42
-rw-r--r--drivers/media/platform/msm/vidc/msm_venc.c5
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_common.c14
-rw-r--r--drivers/mfd/wcd9xxx-utils.c1
-rw-r--r--drivers/misc/qseecom.c322
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c14
-rw-r--r--drivers/power/qcom-charger/fg-core.h6
-rw-r--r--drivers/power/qcom-charger/qpnp-fg-gen3.c126
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c58
-rw-r--r--drivers/power/qcom-charger/smb-lib.c107
-rw-r--r--drivers/power/qcom-charger/smb-lib.h11
-rw-r--r--drivers/power/qcom-charger/smb-reg.h4
-rw-r--r--drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c4
-rw-r--r--drivers/soc/qcom/pil-q6v5-mss.c3
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_tal_glink.c51
-rw-r--r--drivers/soc/qcom/service-locator.c2
-rw-r--r--drivers/soc/qcom/service-notifier.c5
-rw-r--r--drivers/usb/host/xhci-mem.c30
-rw-r--r--drivers/video/fbdev/msm/mdss.h13
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c4
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_panel.c121
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c6
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c119
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h34
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c56
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_debug.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c18
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c114
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c309
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pipe.c17
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_util.c31
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.h89
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.c37
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.h34
-rw-r--r--include/linux/sched.h12
-rw-r--r--include/soc/qcom/qseecomi.h19
-rw-r--r--include/sound/soc-dai.h2
-rw-r--r--include/uapi/linux/msm_mdp_ext.h7
-rw-r--r--include/uapi/media/msm_sde_rotator.h6
-rw-r--r--kernel/cgroup.c124
-rw-r--r--kernel/fork.c1
-rw-r--r--kernel/sched/core.c60
-rw-r--r--kernel/sched/fair.c27
-rw-r--r--kernel/sched/hmp.c60
-rw-r--r--kernel/watchdog.c28
-rw-r--r--sound/core/pcm_native.c2
-rw-r--r--sound/soc/codecs/wcd9335.c35
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x.c132
-rw-r--r--sound/soc/msm/qdsp6v2/q6asm.c2
-rw-r--r--sound/usb/usb_audio_qmi_svc.c211
96 files changed, 3351 insertions, 792 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
index c4d651e36d02..782fb6c4124d 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
@@ -18,13 +18,15 @@ Properties:
Definition: Addresses and sizes for the memory of the OSM controller,
cluster PLL management, and APCS common register regions.
Optionally, the address of the efuse registers used to
- determine the pwrcl or perfcl speed-bins.
+ determine the pwrcl or perfcl speed-bins and/or the ACD
+ register space to initialize prior to enabling OSM.
+
- reg-names
Usage: required
Value type: <stringlist>
Definition: Address names. Must be "osm", "pwrcl_pll", "perfcl_pll",
- "apcs_common" and "debug". Optionally, "pwrcl_efuse" or
- "perfcl_efuse".
+ "apcs_common", and "debug". Optionally, "pwrcl_efuse",
+ "perfcl_efuse", "pwrcl_acd", or "perfcl_acd".
Must be specified in the same order as the corresponding
addresses are specified in the reg property.
@@ -216,6 +218,45 @@ Properties:
override values to write to the OSM controller for each
of the two clusters. Each tuple must contain three elements.
+- qcom,acdtd-val
+ Usage: required if pwrcl_acd or perfcl_acd registers are specified
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the values to program to the ACD
+ Tunable-Length Delay register for the power and performance
+ clusters.
+
+- qcom,acdcr-val
+ Usage: required if pwrcl_acd or perfcl_acd registers are specified
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the values for the ACD control register
+ for the power and performance clusters.
+
+- qcom,acdsscr-val
+ Usage: required if pwrcl_acd or perfcl_acd registers are specified
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the values for the ACD Soft Start Control
+ register for the power and performance clusters.
+
+- qcom,acdextint0-val
+ Usage: required if pwrcl_acd or perfcl_acd registers are specified
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the initial values for the ACD
+ external interface configuration register for the power
+ and performance clusters.
+
+- qcom,acdextint1-val
+ Usage: required if pwrcl_acd or perfcl_acd registers are specified
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the final values for the ACD
+ external interface configuration register for the power
+ and performance clusters.
+
+- qcom,acdautoxfer-val
+ Usage: required if pwrcl_acd or perfcl_acd registers are specified
+ Value type: <prop-encoded-array>
+ Definition: Array which defines the values for the ACD auto transfer
+ control register for the power and performance clusters.
+
- qcom,pwrcl-apcs-mem-acc-cfg
Usage: required if qcom,osm-no-tz is specified
Value type: <prop-encoded-array>
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index 68b8f09238e0..56ad8c361219 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -350,8 +350,13 @@ the fps window.
as below:
--> Reset GPIO value
--> Sleep value (in ms)
-- qcom,partial-update-enabled: Boolean used to enable partial
+- qcom,partial-update-enabled: String used to enable partial
panel update for command mode panels.
+ "none": partial update is disabled
+ "single_roi": default enable mode, only single roi is sent to panel
+ "dual_roi": two rois are merged into one big roi. Panel ddic should be able
+ to process two roi's along with the DCS command to send two rois.
+ disabled if property is not specified.
- qcom,mdss-dsi-horizontal-line-idle: List of width ranges (EC - SC) in pixels indicating
additional idle time in dsi clock cycles that is needed
to compensate for smaller line width.
@@ -632,7 +637,7 @@ Example:
qcom,mdss-tear-check-rd-ptr-trigger-intr = <1281>;
qcom,mdss-tear-check-frame-rate = <6000>;
qcom,mdss-dsi-reset-sequence = <1 2>, <0 10>, <1 10>;
- qcom,partial-update-enabled;
+ qcom,partial-update-enabled = "single_roi";
qcom,dcs-cmd-by-left;
qcom,mdss-dsi-lp11-init;
qcom,mdss-dsi-init-delay-us = <100>;
diff --git a/Documentation/devicetree/bindings/gpu/adreno.txt b/Documentation/devicetree/bindings/gpu/adreno.txt
index ca58f0da07ef..44c874a7a080 100644
--- a/Documentation/devicetree/bindings/gpu/adreno.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno.txt
@@ -155,6 +155,23 @@ GPU Quirks:
- qcom,gpu-quirk-dp2clockgating-disable:
Disable RB sampler data path clock gating optimization
+KGSL Memory Pools:
+- qcom,gpu-mempools: Container for sets of GPU mempools.Multiple sets
+ (pools) can be defined within qcom,gpu-mempools.
+ Each mempool defines a pool order, reserved pages,
+ allocation allowed.
+Properties:
+- compatible: Must be qcom,gpu-mempools.
+- qcom,mempool-max-pages: Max pages for all mempools, If not defined there is no limit.
+- qcom,gpu-mempool: Defines a set of mempools.
+
+Properties:
+- reg: Index of the pool (0 = lowest pool order).
+- qcom,mempool-page-size: Size of page.
+- qcom,mempool-reserved: Number of pages reserved at init time for a pool.
+- qcom,mempool-allocate: Allocate memory from the system memory when the
+ reserved pool exhausted.
+
The following properties are optional as collecting data via coresight might
not be supported for every chipset. The documentation for coresight
properties can be found in:
@@ -222,6 +239,40 @@ Example of A330 GPU in MSM8916:
coresight-child-list = <&funnel_in0>;
coresight-child-ports = <5>;
+ /* GPU Mempools */
+ qcom,gpu-mempools {
+ #address-cells= <1>;
+ #size-cells = <0>;
+ compatible = "qcom,gpu-mempools";
+
+ /* 4K Page Pool configuration */
+ qcom,gpu-mempool@0 {
+ reg = <0>;
+ qcom,mempool-page-size = <4096>;
+ qcom,mempool-reserved = <2048>;
+ qcom,mempool-allocate;
+ };
+ /* 8K Page Pool configuration */
+ qcom,gpu-mempool@1 {
+ reg = <1>;
+ qcom,mempool-page-size = <8192>;
+ qcom,mempool-reserved = <1024>;
+ qcom,mempool-allocate;
+ };
+ /* 64K Page Pool configuration */
+ qcom,gpu-mempool@2 {
+ reg = <2>;
+ qcom,mempool-page-size = <65536>;
+ qcom,mempool-reserved = <256>;
+ };
+ /* 1M Page Pool configuration */
+ qcom,gpu-mempool@3 {
+ reg = <3>;
+ qcom,mempool-page-size = <1048576>;
+ qcom,mempool-reserved = <32>;
+ };
+ };
+
/* Power levels */
qcom,gpu-pwrlevels-bins {
#address-cells = <1>;
diff --git a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt
index caabcd347a72..4f5e0a117b2d 100644
--- a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt
@@ -55,7 +55,7 @@ First Level Node - FG Gen3 device
be notified via the power supply framework. The userspace
will read 0% soc and immediately shutdown. If this property
is not specified, then the default value used will be
- 3100mV.
+ 2800mV.
- qcom,fg-vbatt-low-thr
Usage: optional
diff --git a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
index 82386ba9b082..7090426c68f8 100644
--- a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
@@ -129,6 +129,15 @@ Charger specific properties:
happen but the adapter won't be asked to switch to a higher
voltage point.
+- qcom,chg-inhibit-threshold-mv
+ Usage: optional
+ Value type: <u32>
+ Definition: Charge inhibit threshold in milli-volts. Charging will be
+ inhibited when the battery voltage is within this threshold
+ from Vfloat at charger insertion. If this is not specified
+ then charge inhibit will be disabled by default.
+ Allowed values are: 50, 100, 200, 300.
+
=============================================
Second Level Nodes - SMB2 Charger Peripherals
=============================================
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index 7ab87629378a..7810842100cd 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -128,7 +128,8 @@ dtb-$(CONFIG_ARCH_MSMCOBALT) += msmcobalt-sim.dtb \
apqcobalt-v2.1-cdp.dtb \
apqcobalt-v2.1-qrd.dtb \
msmcobalt-v2.1-interposer-msmfalcon-cdp.dtb \
- msmcobalt-v2.1-interposer-msmfalcon-mtp.dtb
+ msmcobalt-v2.1-interposer-msmfalcon-mtp.dtb \
+ msmcobalt-v2.1-interposer-msmfalcon-qrd.dtb
dtb-$(CONFIG_ARCH_MSMHAMSTER) += msmhamster-rumi.dtb
diff --git a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
index a5243aff4282..f4ca25668814 100644
--- a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
@@ -321,7 +321,7 @@
qcom,pmic-revid = <&pmicobalt_revid>;
io-channels = <&pmicobalt_rradc 0>;
io-channel-names = "rradc_batt_id";
- qcom,fg-esr-timer-awake = <64>;
+ qcom,fg-esr-timer-awake = <96>;
qcom,fg-esr-timer-asleep = <256>;
qcom,cycle-counter-en;
status = "okay";
@@ -334,7 +334,8 @@
<0x2 0x40 0x2 IRQ_TYPE_EDGE_BOTH>,
<0x2 0x40 0x3 IRQ_TYPE_EDGE_BOTH>,
<0x2 0x40 0x4 IRQ_TYPE_EDGE_BOTH>,
- <0x2 0x40 0x5 IRQ_TYPE_EDGE_BOTH>,
+ <0x2 0x40 0x5
+ IRQ_TYPE_EDGE_RISING>,
<0x2 0x40 0x6 IRQ_TYPE_EDGE_BOTH>,
<0x2 0x40 0x7 IRQ_TYPE_EDGE_BOTH>;
interrupt-names = "soc-update",
diff --git a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
index 40df8b7ff4de..48cf099b84a8 100644
--- a/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-agave-adp.dtsi
@@ -388,7 +388,7 @@
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,cont-splash-enabled;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,partial-update-enabled;
+ qcom,partial-update-enabled = "single_roi";
qcom,panel-roi-alignment = <720 128 720 64 720 64>;
};
@@ -432,7 +432,7 @@
qcom,5v-boost-gpio = <&pmi8994_gpios 8 0>;
qcom,cont-splash-enabled;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,partial-update-enabled;
+ qcom,partial-update-enabled = "single_roi";
qcom,panel-roi-alignment = <4 4 2 2 20 20>;
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
index 30646dba1cd9..34e41c2bf28f 100644
--- a/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-auto-cdp.dtsi
@@ -376,7 +376,7 @@
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,cont-splash-enabled;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,partial-update-enabled;
+ qcom,partial-update-enabled = "single_roi";
qcom,panel-roi-alignment = <720 128 720 64 720 64>;
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi
index 4855da387e21..1c85e13aa0f8 100644
--- a/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-cdp.dtsi
@@ -409,7 +409,7 @@
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,partial-update-enabled;
+ qcom,partial-update-enabled = "single_roi";
qcom,panel-roi-alignment = <720 128 720 64 720 64>;
};
@@ -432,7 +432,7 @@
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,partial-update-enabled;
+ qcom,partial-update-enabled = "single_roi";
/* panel supports slice height of 8/16/32/48/3840 */
qcom,panel-roi-alignment = <1080 8 1080 8 1080 8>;
};
@@ -480,7 +480,7 @@
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,5v-boost-gpio = <&pmi8994_gpios 8 0>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,partial-update-enabled;
+ qcom,partial-update-enabled = "single_roi";
qcom,panel-roi-alignment = <4 2 4 2 20 20>;
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-fluid.dtsi b/arch/arm/boot/dts/qcom/msm8996-fluid.dtsi
index 824d31afb7d8..550da56520f8 100644
--- a/arch/arm/boot/dts/qcom/msm8996-fluid.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-fluid.dtsi
@@ -629,7 +629,7 @@
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,partial-update-enabled;
+ qcom,partial-update-enabled = "single_roi";
/* panel supports slice height of 8/16/32/48/3840 */
qcom,panel-roi-alignment = <1080 8 1080 8 1080 8>;
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
index 3d534ff1550e..bd8aa7fe02f7 100644
--- a/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-mmxf-adp.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -343,7 +343,7 @@
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,cont-splash-enabled;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,partial-update-enabled;
+ qcom,partial-update-enabled = "single_roi";
qcom,panel-roi-alignment = <720 128 720 64 720 64>;
};
@@ -387,7 +387,7 @@
qcom,5v-boost-gpio = <&pmi8994_gpios 8 0>;
qcom,cont-splash-enabled;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,partial-update-enabled;
+ qcom,partial-update-enabled = "single_roi";
qcom,panel-roi-alignment = <4 4 2 2 20 20>;
};
diff --git a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
index 96279288d336..07cb98860498 100644
--- a/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-mtp.dtsi
@@ -397,7 +397,7 @@
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,partial-update-enabled;
+ qcom,partial-update-enabled = "single_roi";
qcom,panel-roi-alignment = <720 128 720 64 720 64>;
};
@@ -420,7 +420,7 @@
qcom,mdss-dsi-bl-min-level = <1>;
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,partial-update-enabled;
+ qcom,partial-update-enabled = "single_roi";
/* panel supports slice height of 8/16/32/48/3840 */
qcom,panel-roi-alignment = <1080 8 1080 8 1080 8>;
};
@@ -468,7 +468,7 @@
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,5v-boost-gpio = <&pmi8994_gpios 8 0>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,partial-update-enabled;
+ qcom,partial-update-enabled = "single_roi";
qcom,panel-roi-alignment = <4 4 2 2 20 20>;
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-skuk.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-skuk.dtsi
new file mode 100644
index 000000000000..a432f0710fe2
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msmcobalt-camera-sensor-skuk.dtsi
@@ -0,0 +1,327 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ led_flash0: qcom,camera-flash@0 {
+ cell-index = <0>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pmicobalt_flash0 &pmicobalt_flash1>;
+ qcom,switch-source = <&pmicobalt_switch0>;
+ status = "ok";
+ };
+
+ led_flash1: qcom,camera-flash@1 {
+ cell-index = <1>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pmicobalt_flash2>;
+ qcom,switch-source = <&pmicobalt_switch1>;
+ status = "ok";
+ };
+};
+
+&cci {
+ actuator0: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ gpios = <&tlmm 29 0>;
+ qcom,gpio-vaf = <0>;
+ qcom,gpio-req-tbl-num = <0>;
+ qcom,gpio-req-tbl-flags = <0>;
+ qcom,gpio-req-tbl-label = "CAM_VAF";
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_actuator_vaf_active>;
+ pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ };
+
+ actuator1: qcom,actuator@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ gpios = <&tlmm 29 0>;
+ qcom,gpio-vaf = <0>;
+ qcom,gpio-req-tbl-num = <0>;
+ qcom,gpio-req-tbl-flags = <0>;
+ qcom,gpio-req-tbl-label = "CAM_VAF";
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_actuator_vaf_active>;
+ pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ };
+
+ eeprom0: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pmcobalt_lvs1>;
+ cam_vana-supply = <&pmicobalt_bob>;
+ cam_vdig-supply = <&pmcobalt_s3>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1352000>;
+ qcom,cam-vreg-op-mode = <0 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>,
+ <&pmcobalt_gpios 20 0>,
+ <&tlmm 29 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VDIG",
+ "CAM_VANA";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk0_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk0_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ eeprom1: qcom,eeprom@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,eeprom";
+ cam_vdig-supply = <&pmcobalt_lvs1>;
+ cam_vio-supply = <&pmcobalt_lvs1>;
+ cam_vana-supply = <&pmicobalt_bob>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+ qcom,cam-vreg-min-voltage = <0 0 3312000>;
+ qcom,cam-vreg-max-voltage = <0 0 3600000>;
+ qcom,cam-vreg-op-mode = <0 0 80000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>,
+ <&tlmm 8 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1",
+ "CAM_VANA1";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk2_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk2_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ eeprom2: qcom,eeprom@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pmcobalt_lvs1>;
+ cam_vana-supply = <&pmcobalt_l22>;
+ cam_vdig-supply = <&pmcobalt_s3>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage =
+ <0 2864000 1352000>;
+ qcom,cam-vreg-max-voltage =
+ <0 2864000 1352000>;
+ qcom,cam-vreg-op-mode = <0 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 14 0>,
+ <&tlmm 28 0>,
+ <&pmcobalt_gpios 9 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VDIG";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk1_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk1_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@0 {
+ cell-index = <0>;
+ compatible = "qcom,camera";
+ reg = <0x0>;
+ qcom,special-support-sensors = "imx362_gt24c64a";
+ qcom,csiphy-sd-index = <0>;
+ qcom,csid-sd-index = <0>;
+ qcom,mount-angle = <270>;
+ qcom,eeprom-src = <&eeprom0>;
+ cam_vio-supply = <&pmcobalt_lvs1>;
+ cam_vana-supply = <&pmicobalt_bob>;
+ cam_vdig-supply = <&pmcobalt_s3>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage = <0 3312000 1352000>;
+ qcom,cam-vreg-max-voltage = <0 3600000 1352000>;
+ qcom,cam-vreg-op-mode = <0 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 13 0>,
+ <&tlmm 30 0>,
+ <&pmcobalt_gpios 20 0>,
+ <&tlmm 29 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VDIG",
+ "CAM_VANA";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk0_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk0_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@1 {
+ cell-index = <1>;
+ compatible = "qcom,camera";
+ reg = <0x1>;
+ qcom,csiphy-sd-index = <1>;
+ qcom,csid-sd-index = <1>;
+ qcom,mount-angle = <90>;
+ qcom,eeprom-src = <&eeprom1>;
+ cam_vdig-supply = <&pmcobalt_lvs1>;
+ cam_vio-supply = <&pmcobalt_lvs1>;
+ cam_vana-supply = <&pmicobalt_bob>;
+ qcom,cam-vreg-name = "cam_vdig", "cam_vio", "cam_vana";
+ qcom,cam-vreg-min-voltage = <0 0 3312000>;
+ qcom,cam-vreg-max-voltage = <0 0 3600000>;
+ qcom,cam-vreg-op-mode = <0 0 80000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>,
+ <&tlmm 8 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vana = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
+ "CAM_RESET1",
+ "CAM_VANA1";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk2_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk2_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@2 {
+ cell-index = <2>;
+ compatible = "qcom,camera";
+ reg = <0x02>;
+ qcom,csiphy-sd-index = <2>;
+ qcom,csid-sd-index = <2>;
+ qcom,mount-angle = <90>;
+ cam_vio-supply = <&pmcobalt_lvs1>;
+ cam_vana-supply = <&pmcobalt_l22>;
+ cam_vdig-supply = <&pmcobalt_s3>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage =
+ <0 2864000 1352000>;
+ qcom,cam-vreg-max-voltage =
+ <0 2864000 1352000>;
+ qcom,cam-vreg-op-mode = <0 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 15 0>,
+ <&tlmm 9 0>,
+ <&pmcobalt_gpios 9 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-req-tbl-num = <0 1 2>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VDIG";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss clk_mclk2_clk_src>,
+ <&clock_mmss clk_mmss_camss_mclk2_clk>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+};
+&pmcobalt_gpios {
+ gpio@c800 { /* GPIO 9 - CAMERA SENSOR 2 VDIG */
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <0>; /* VIN1 GPIO_LV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "ok";
+ };
+
+ gpio@d300 { /* GPIO 20 - CAMERA SENSOR 0 VDIG */
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <1>; /* VIN1 GPIO_MV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "ok";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
index 4822823aa63f..9b9b863c1847 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
@@ -382,7 +382,7 @@
qcom,mdss-dsi-bl-max-level = <4095>;
qcom,5v-boost-gpio = <&tlmm 51 0>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
- qcom,partial-update-enabled;
+ qcom,partial-update-enabled = "single_roi";
qcom,panel-roi-alignment = <4 2 4 2 20 20>;
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dts b/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dts
index 88a5e945436c..b5a94de9aab7 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dts
+++ b/arch/arm/boot/dts/qcom/msmcobalt-qrd-skuk.dts
@@ -15,6 +15,7 @@
#include "msmcobalt.dtsi"
#include "msmcobalt-qrd-skuk.dtsi"
+#include "msmcobalt-camera-sensor-skuk.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM COBALT SKUK";
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
index bb72cf3a0d2c..1ba5905bcc36 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
@@ -590,8 +590,9 @@
qcom,cpr-idle-cycles = <15>;
qcom,cpr-up-down-delay-time = <3000>;
qcom,cpr-step-quot-init-min = <11>;
- qcom,cpr-step-quot-init-max = <13>;
+ qcom,cpr-step-quot-init-max = <12>;
qcom,cpr-count-mode = <0>; /* All at once */
+ qcom,cpr-count-repeat = <1>;
qcom,cpr-down-error-step-limit = <1>;
qcom,cpr-up-error-step-limit = <1>;
qcom,cpr-corner-switch-delay-time = <209>;
@@ -618,7 +619,7 @@
thread@0 {
qcom,cpr-thread-id = <0>;
qcom,cpr-consecutive-up = <0>;
- qcom,cpr-consecutive-down = <0>;
+ qcom,cpr-consecutive-down = <2>;
qcom,cpr-up-threshold = <2>;
qcom,cpr-down-threshold = <2>;
@@ -740,7 +741,7 @@
qcom,cpr-aging-max-voltage-adjustment = <15000>;
qcom,cpr-aging-ref-corner = <22>;
- qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,cpr-aging-ro-scaling-factor = <1620>;
qcom,allow-aging-voltage-adjustment = <0>;
qcom,allow-aging-open-loop-voltage-adjustment =
<1>;
@@ -761,9 +762,10 @@
qcom,cpr-loop-time = <5000000>;
qcom,cpr-idle-cycles = <15>;
qcom,cpr-up-down-delay-time = <3000>;
- qcom,cpr-step-quot-init-min = <11>;
- qcom,cpr-step-quot-init-max = <13>;
+ qcom,cpr-step-quot-init-min = <9>;
+ qcom,cpr-step-quot-init-max = <14>;
qcom,cpr-count-mode = <0>; /* All at once */
+ qcom,cpr-count-repeat = <1>;
qcom,cpr-down-error-step-limit = <1>;
qcom,cpr-up-error-step-limit = <1>;
qcom,cpr-corner-switch-delay-time = <209>;
@@ -790,7 +792,7 @@
thread@0 {
qcom,cpr-thread-id = <0>;
qcom,cpr-consecutive-up = <0>;
- qcom,cpr-consecutive-down = <0>;
+ qcom,cpr-consecutive-down = <2>;
qcom,cpr-up-threshold = <2>;
qcom,cpr-down-threshold = <2>;
@@ -932,7 +934,7 @@
qcom,cpr-aging-max-voltage-adjustment = <15000>;
qcom,cpr-aging-ref-corner = <25>;
- qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,cpr-aging-ro-scaling-factor = <1700>;
qcom,allow-aging-voltage-adjustment = <0>;
qcom,allow-aging-open-loop-voltage-adjustment =
<1>;
@@ -959,6 +961,7 @@
qcom,cpr-step-quot-init-min = <8>;
qcom,cpr-step-quot-init-max = <12>;
qcom,cpr-count-mode = <0>; /* All-at-once min */
+ qcom,cpr-count-repeat = <1>;
vdd-supply = <&pm8005_s1>;
qcom,voltage-step = <4000>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2.1-interposer-msmfalcon-qrd.dts b/arch/arm/boot/dts/qcom/msmcobalt-v2.1-interposer-msmfalcon-qrd.dts
new file mode 100644
index 000000000000..69a5419503ac
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msmcobalt-v2.1-interposer-msmfalcon-qrd.dts
@@ -0,0 +1,23 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "msmcobalt-v2.1-interposer-msmfalcon-qrd.dtsi"
+
+/ {
+ model =
+ "Qualcomm Technologies, Inc. MSM COBALT v2.1 MSM FALCON Interposer QRD";
+ compatible = "qcom,msmcobalt-qrd", "qcom,msmcobalt", "qcom,qrd";
+ qcom,board-id = <0x03000b 0x80>;
+};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2.1-interposer-msmfalcon-qrd.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-v2.1-interposer-msmfalcon-qrd.dtsi
new file mode 100644
index 000000000000..6e639ff9e0bc
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msmcobalt-v2.1-interposer-msmfalcon-qrd.dtsi
@@ -0,0 +1,20 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <dt-bindings/interrupt-controller/irq.h>
+#include "msmcobalt-v2.1-interposer-msmfalcon.dtsi"
+
+&uartblsp2dm1 {
+ status = "ok";
+ pinctrl-names = "default";
+ pinctrl-0 = <&uart_console_active>;
+};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
index beecee843778..a0d1f1fa11af 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
@@ -26,6 +26,25 @@
&clock_cpu {
compatible = "qcom,cpu-clock-osm-msmcobalt-v2";
+ reg = <0x179c0000 0x4000>,
+ <0x17916000 0x1000>,
+ <0x17816000 0x1000>,
+ <0x179d1000 0x1000>,
+ <0x17914800 0x800>,
+ <0x17814800 0x800>,
+ <0x00784130 0x8>,
+ <0x1791101c 0x8>;
+ reg-names = "osm", "pwrcl_pll", "perfcl_pll",
+ "apcs_common", "pwrcl_acd", "perfcl_acd",
+ "perfcl_efuse", "debug";
+
+ qcom,acdtd-val = <0x00009611 0x00009611>;
+ qcom,acdcr-val = <0x002b5ffd 0x002b5ffd>;
+ qcom,acdsscr-val = <0x00000501 0x00000501>;
+ qcom,acdextint0-val = <0x2cf9ae8 0x2cf9ae8>;
+ qcom,acdextint1-val = <0x2cf9afc 0x2cf9afc>;
+ qcom,acdautoxfer-val = <0x00000015 0x00000015>;
+
/delete-property/ qcom,llm-sw-overr;
qcom,pwrcl-speedbin0-v0 =
< 300000000 0x0004000f 0x01200020 0x1 1 >,
@@ -422,48 +441,50 @@
<40000 24000 0 30000>,
<40000 24000 0 30000>,
<40000 24000 0 30000>,
- <40000 24000 0 30000>,
- <40000 24000 0 30000>,
- <40000 24000 0 30000>,
- <40000 24000 0 30000>,
- <40000 24000 0 30000>,
+ <25000 9000 (-15000) 15000>,
+ <25000 9000 (-15000) 15000>,
+ <25000 9000 (-15000) 15000>,
+ <25000 9000 (-15000) 15000>,
+ <25000 9000 (-15000) 15000>,
/* Speed bin 1 */
<40000 24000 0 30000>,
<40000 24000 0 30000>,
<40000 24000 0 30000>,
- <40000 24000 0 30000>,
- <40000 24000 0 30000>,
- <40000 24000 0 30000>,
- <40000 24000 0 30000>,
- <40000 24000 0 30000>;
+ <25000 9000 (-15000) 15000>,
+ <25000 9000 (-15000) 15000>,
+ <25000 9000 (-15000) 15000>,
+ <25000 9000 (-15000) 15000>,
+ <25000 9000 (-15000) 15000>;
qcom,cpr-closed-loop-voltage-fuse-adjustment =
/* Speed bin 0 */
<20000 26000 0 30000>,
<20000 26000 0 30000>,
<20000 26000 0 30000>,
- <20000 26000 0 30000>,
- <20000 26000 0 30000>,
- <20000 26000 0 30000>,
- <20000 26000 0 30000>,
- <20000 26000 0 30000>,
+ <5000 11000 (-15000) 15000>,
+ <5000 11000 (-15000) 15000>,
+ <5000 11000 (-15000) 15000>,
+ <5000 11000 (-15000) 15000>,
+ <5000 11000 (-15000) 15000>,
/* Speed bin 1 */
<20000 26000 0 30000>,
<20000 26000 0 30000>,
<20000 26000 0 30000>,
- <20000 26000 0 30000>,
- <20000 26000 0 30000>,
- <20000 26000 0 30000>,
- <20000 26000 0 30000>,
- <20000 26000 0 30000>;
+ <5000 11000 (-15000) 15000>,
+ <5000 11000 (-15000) 15000>,
+ <5000 11000 (-15000) 15000>,
+ <5000 11000 (-15000) 15000>,
+ <5000 11000 (-15000) 15000>;
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
qcom,cpr-aging-ref-corner = <22 22>;
- qcom,cpr-aging-ro-scaling-factor = <2950>;
- qcom,allow-aging-voltage-adjustment = <0>;
+ qcom,cpr-aging-ro-scaling-factor = <1620>;
+ qcom,allow-aging-voltage-adjustment =
+ <0 0 0 1 1 1 1 1>,
+ <0 0 0 1 1 1 1 1>;
};
&apc1_cpr {
@@ -583,48 +604,50 @@
<8000 0 0 52000>,
<8000 0 0 52000>,
<8000 0 0 52000>,
- <8000 0 0 52000>,
- <8000 0 0 52000>,
- <8000 0 0 52000>,
- <8000 0 0 52000>,
- <8000 0 0 52000>,
+ <(-7000) (-15000) (-15000) 37000>,
+ <(-7000) (-15000) (-15000) 37000>,
+ <(-7000) (-15000) (-15000) 37000>,
+ <(-7000) (-15000) (-15000) 37000>,
+ <(-7000) (-15000) (-15000) 37000>,
/* Speed bin 1 */
<8000 0 0 52000>,
<8000 0 0 52000>,
<8000 0 0 52000>,
- <8000 0 0 52000>,
- <8000 0 0 52000>,
- <8000 0 0 52000>,
- <8000 0 0 52000>,
- <8000 0 0 52000>;
+ <(-7000) (-15000) (-15000) 37000>,
+ <(-7000) (-15000) (-15000) 37000>,
+ <(-7000) (-15000) (-15000) 37000>,
+ <(-7000) (-15000) (-15000) 37000>,
+ <(-7000) (-15000) (-15000) 37000>;
qcom,cpr-closed-loop-voltage-fuse-adjustment =
/* Speed bin 0 */
<0 0 0 50000>,
<0 0 0 50000>,
<0 0 0 50000>,
- <0 0 0 50000>,
- <0 0 0 50000>,
- <0 0 0 50000>,
- <0 0 0 50000>,
- <0 0 0 50000>,
+ <(-15000) (-15000) (-15000) 35000>,
+ <(-15000) (-15000) (-15000) 35000>,
+ <(-15000) (-15000) (-15000) 35000>,
+ <(-15000) (-15000) (-15000) 35000>,
+ <(-15000) (-15000) (-15000) 35000>,
/* Speed bin 1 */
<0 0 0 50000>,
<0 0 0 50000>,
<0 0 0 50000>,
- <0 0 0 50000>,
- <0 0 0 50000>,
- <0 0 0 50000>,
- <0 0 0 50000>,
- <0 0 0 50000>;
+ <(-15000) (-15000) (-15000) 35000>,
+ <(-15000) (-15000) (-15000) 35000>,
+ <(-15000) (-15000) (-15000) 35000>,
+ <(-15000) (-15000) (-15000) 35000>,
+ <(-15000) (-15000) (-15000) 35000>;
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
qcom,cpr-aging-ref-corner = <30 26>;
- qcom,cpr-aging-ro-scaling-factor = <2950>;
- qcom,allow-aging-voltage-adjustment = <0>;
+ qcom,cpr-aging-ro-scaling-factor = <1700>;
+ qcom,allow-aging-voltage-adjustment =
+ <0 0 0 1 1 1 1 1>,
+ <0 0 0 1 1 1 1 1>;
};
&pm8005_s1 {
@@ -741,7 +764,7 @@
qcom,cpr-aging-max-voltage-adjustment = <15000>;
qcom,cpr-aging-ref-corner = <8>;
- qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,cpr-aging-ro-scaling-factor = <1620>;
qcom,allow-aging-voltage-adjustment = <0 0 1 1 1 1 1 1>;
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
index 3ad4b6b5622d..cbddad0bbcb0 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
@@ -1887,6 +1887,7 @@
usb-phy = <&qusb_phy0>, <&ssphy>;
tx-fifo-resize;
snps,nominal-elastic-buffer;
+ snps,disable-clk-gating;
snps,hird_thresh = <0x10>;
snps,num-gsi-evt-buffs = <0x3>;
};
@@ -3117,5 +3118,6 @@
#include "msm-audio-lpass.dtsi"
#include "msmcobalt-mdss.dtsi"
#include "msmcobalt-mdss-pll.dtsi"
+#include "msm-rdbg.dtsi"
#include "msmcobalt-blsp.dtsi"
#include "msmcobalt-audio.dtsi"
diff --git a/arch/arm/configs/msmfalcon_defconfig b/arch/arm/configs/msmfalcon_defconfig
index 0788a03ed219..085f6242b616 100644
--- a/arch/arm/configs/msmfalcon_defconfig
+++ b/arch/arm/configs/msmfalcon_defconfig
@@ -38,6 +38,7 @@ CONFIG_PARTITION_ADVANCED=y
# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSMFALCON=y
+CONFIG_ARCH_MSMTRITON=y
CONFIG_SMP=y
CONFIG_SCHED_MC=y
CONFIG_NR_CPUS=8
@@ -420,6 +421,7 @@ CONFIG_RMNET_IPA3=y
CONFIG_GPIO_USB_DETECT=y
CONFIG_USB_BAM=y
CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_MSM_GCC_FALCON=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_ARM_SMMU=y
CONFIG_IOMMU_DEBUG=y
diff --git a/arch/arm/mach-qcom/Kconfig b/arch/arm/mach-qcom/Kconfig
index d4d355531169..cbccbeb483c9 100644
--- a/arch/arm/mach-qcom/Kconfig
+++ b/arch/arm/mach-qcom/Kconfig
@@ -39,6 +39,38 @@ config ARCH_MSMFALCON
This enables support for the MSMFALCON chipset. If you do not
wish to build a kernel that runs on this chipset, say 'N' here.
+config ARCH_MSMTRITON
+ bool "Enable Support for Qualcomm MSMTRITON"
+ select CLKDEV_LOOKUP
+ select HAVE_CLK
+ select HAVE_CLK_PREPARE
+ select PM_OPP
+ select SOC_BUS
+ select MSM_IRQ
+ select THERMAL_WRITABLE_TRIPS
+ select ARM_GIC_V3
+ select ARM_AMBA
+ select SPARSE_IRQ
+ select MULTI_IRQ_HANDLER
+ select HAVE_ARM_ARCH_TIMER
+ select MAY_HAVE_SPARSE_IRQ
+ select MSM_PM if PM
+ select QMI_ENCDEC
+ select CPU_FREQ
+ select CPU_FREQ_MSM
+ select PM_DEVFREQ
+ select MSM_DEVFREQ_DEVBW
+ select DEVFREQ_SIMPLE_DEV
+ select DEVFREQ_GOV_MSM_BW_HWMON
+ select MSM_BIMC_BWMON
+ select MSM_QDSP6V2_CODECS
+ select MSM_AUDIO_QDSP6V2 if SND_SOC
+ select MSM_RPM_SMD
+ select MSM_JTAGV8 if CORESIGHT_ETMV4
+ help
+ This enables support for the MSMTRITON chipset. If you do not
+ wish to build a kernel that runs on this chipset, say 'N' here.
+
config ARCH_MSM8X60
bool "Enable support for MSM8X60"
select ARCH_SUPPORTS_BIG_ENDIAN
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 0bda100dfb5a..bf63a360eb06 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -380,9 +380,9 @@ CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
CONFIG_MSMB_JPEG=y
CONFIG_MSM_FD=y
CONFIG_MSM_JPEGDMA=y
-CONFIG_MSM_VIDC_V4L2=m
-CONFIG_MSM_VIDC_VMEM=m
-CONFIG_MSM_VIDC_GOVERNORS=m
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_VMEM=y
+CONFIG_MSM_VIDC_GOVERNORS=y
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
CONFIG_QCOM_KGSL=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 3568fe4ed29f..93dbc4f7e52f 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -383,9 +383,9 @@ CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
CONFIG_MSMB_JPEG=y
CONFIG_MSM_FD=y
CONFIG_MSM_JPEGDMA=y
-CONFIG_MSM_VIDC_V4L2=m
-CONFIG_MSM_VIDC_VMEM=m
-CONFIG_MSM_VIDC_GOVERNORS=m
+CONFIG_MSM_VIDC_V4L2=y
+CONFIG_MSM_VIDC_VMEM=y
+CONFIG_MSM_VIDC_GOVERNORS=y
CONFIG_MSM_SDE_ROTATOR=y
CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
CONFIG_QCOM_KGSL=y
diff --git a/arch/arm64/configs/msmfalcon-perf_defconfig b/arch/arm64/configs/msmfalcon-perf_defconfig
index eeb12ff4cc6d..df695f993ed9 100644
--- a/arch/arm64/configs/msmfalcon-perf_defconfig
+++ b/arch/arm64/configs/msmfalcon-perf_defconfig
@@ -13,13 +13,16 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_SCHED_HMP=y
CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
@@ -90,6 +93,7 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
@@ -277,6 +281,7 @@ CONFIG_KEYBOARD_GPIO=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
+CONFIG_SECURE_TOUCH=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_UINPUT=y
@@ -320,7 +325,9 @@ CONFIG_MSM_BCL_PERIPHERAL_CTL=y
CONFIG_BATTERY_BCL=y
CONFIG_QPNP_SMB2=y
CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
CONFIG_LIMITS_MONITOR=y
CONFIG_LIMITS_LITE_HW=y
CONFIG_THERMAL_MONITOR=y
@@ -379,10 +386,11 @@ CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
CONFIG_MSMB_JPEG=y
CONFIG_MSM_FD=y
CONFIG_MSM_JPEGDMA=y
-CONFIG_MSM_VIDC_V4L2=y
-CONFIG_MSM_VIDC_VMEM=y
-CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_VIDC_V4L2=m
+CONFIG_MSM_VIDC_VMEM=m
+CONFIG_MSM_VIDC_GOVERNORS=m
CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
CONFIG_QCOM_KGSL=y
CONFIG_FB=y
CONFIG_FB_ARMCLCD=y
@@ -436,6 +444,7 @@ CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_GSI=y
@@ -480,6 +489,7 @@ CONFIG_GPIO_USB_DETECT=y
CONFIG_SEEMP_CORE=y
CONFIG_USB_BAM=y
CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_MSM_GCC_FALCON=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_ARM_SMMU=y
@@ -495,6 +505,7 @@ CONFIG_MSM_GLINK_SMD_XPRT=y
CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
CONFIG_MSM_GLINK_SPI_XPRT=y
CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
CONFIG_MSM_SMEM_LOGGING=y
CONFIG_MSM_SMP2P=y
CONFIG_MSM_SMP2P_TEST=y
@@ -531,6 +542,8 @@ CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
CONFIG_MSM_RPM_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_QCOM_EARLY_RANDOM=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
@@ -542,16 +555,21 @@ CONFIG_DEVFREQ_SPDM=y
CONFIG_EXTCON=y
CONFIG_IIO=y
CONFIG_QCOM_RRADC=y
+CONFIG_QCOM_TADC=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
CONFIG_ARM_GIC_V3_ACL=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
+CONFIG_MSM_TZ_LOG=y
CONFIG_SENSORS_SSC=y
CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
@@ -574,6 +592,7 @@ CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_DEBUG_RODATA=y
CONFIG_DEBUG_ALIGN_RODATA=y
CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_EVENT=y
CONFIG_CORESIGHT_LINKS_AND_SINKS=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
CONFIG_CORESIGHT_QCOM_REPLICATOR=y
diff --git a/arch/arm64/configs/msmfalcon_defconfig b/arch/arm64/configs/msmfalcon_defconfig
index f86282078b65..8719eb7cb92e 100644
--- a/arch/arm64/configs/msmfalcon_defconfig
+++ b/arch/arm64/configs/msmfalcon_defconfig
@@ -13,13 +13,16 @@ CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_SCHED_HMP=y
CONFIG_SCHED_HMP_CSTATE_AWARE=y
+CONFIG_SCHED_CORE_CTL=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
+CONFIG_SCHED_TUNE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
@@ -89,6 +92,7 @@ CONFIG_IP_PNP_DHCP=y
CONFIG_INET_AH=y
CONFIG_INET_ESP=y
CONFIG_INET_IPCOMP=y
+CONFIG_INET_DIAG_DESTROY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
@@ -278,6 +282,7 @@ CONFIG_INPUT_JOYSTICK=y
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
+CONFIG_SECURE_TOUCH=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_KEYCHORD=y
@@ -323,7 +328,9 @@ CONFIG_MSM_BCL_PERIPHERAL_CTL=y
CONFIG_BATTERY_BCL=y
CONFIG_QPNP_SMB2=y
CONFIG_SMB138X_CHARGER=y
+CONFIG_QPNP_QNOVO=y
CONFIG_SENSORS_QPNP_ADC_VOLTAGE=y
+CONFIG_CPU_THERMAL=y
CONFIG_LIMITS_MONITOR=y
CONFIG_LIMITS_LITE_HW=y
CONFIG_THERMAL_MONITOR=y
@@ -382,10 +389,11 @@ CONFIG_MSM_V4L2_VIDEO_OVERLAY_DEVICE=y
CONFIG_MSMB_JPEG=y
CONFIG_MSM_FD=y
CONFIG_MSM_JPEGDMA=y
-CONFIG_MSM_VIDC_V4L2=y
-CONFIG_MSM_VIDC_VMEM=y
-CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_VIDC_V4L2=m
+CONFIG_MSM_VIDC_VMEM=m
+CONFIG_MSM_VIDC_GOVERNORS=m
CONFIG_MSM_SDE_ROTATOR=y
+CONFIG_MSM_SDE_ROTATOR_EVTLOG_DEBUG=y
CONFIG_QCOM_KGSL=y
CONFIG_FB=y
CONFIG_FB_VIRTUAL=y
@@ -438,6 +446,7 @@ CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_GSI=y
@@ -490,6 +499,7 @@ CONFIG_GPIO_USB_DETECT=y
CONFIG_SEEMP_CORE=y
CONFIG_USB_BAM=y
CONFIG_QCOM_CLK_SMD_RPM=y
+CONFIG_MSM_GCC_FALCON=y
CONFIG_REMOTE_SPINLOCK_MSM=y
CONFIG_IOMMU_IO_PGTABLE_FAST=y
CONFIG_IOMMU_IO_PGTABLE_FAST_SELFTEST=y
@@ -508,6 +518,7 @@ CONFIG_MSM_GLINK_SMD_XPRT=y
CONFIG_MSM_GLINK_SMEM_NATIVE_XPRT=y
CONFIG_MSM_GLINK_SPI_XPRT=y
CONFIG_MSM_SPCOM=y
+CONFIG_MSM_SPSS_UTILS=y
CONFIG_MSM_SMEM_LOGGING=y
CONFIG_MSM_SMP2P=y
CONFIG_MSM_SMP2P_TEST=y
@@ -551,6 +562,8 @@ CONFIG_MSM_RPM_RBCPR_STATS_V2_LOG=y
CONFIG_MSM_RPM_LOG=y
CONFIG_MSM_RPM_STATS_LOG=y
CONFIG_QSEE_IPC_IRQ_BRIDGE=y
+CONFIG_QCOM_SMCINVOKE=y
+CONFIG_QCOM_EARLY_RANDOM=y
CONFIG_MEM_SHARE_QMI_SERVICE=y
CONFIG_QCOM_BIMC_BWMON=y
CONFIG_ARM_MEMLAT_MON=y
@@ -562,6 +575,7 @@ CONFIG_DEVFREQ_SPDM=y
CONFIG_EXTCON=y
CONFIG_IIO=y
CONFIG_QCOM_RRADC=y
+CONFIG_QCOM_TADC=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
CONFIG_ARM_GIC_V3_ACL=y
@@ -574,6 +588,9 @@ CONFIG_EXT2_FS=y
CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_SECURITY=y
+CONFIG_EXT4_ENCRYPTION=y
+CONFIG_EXT4_FS_ENCRYPTION=y
+CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
@@ -639,6 +656,7 @@ CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_DEBUG_RODATA=y
CONFIG_FREE_PAGES_RDONLY=y
CONFIG_CORESIGHT=y
+CONFIG_CORESIGHT_EVENT=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
CONFIG_CORESIGHT_SOURCE_ETM4X=y
CONFIG_CORESIGHT_REMOTE_ETM=y
diff --git a/drivers/android/binder.c b/drivers/android/binder.c
index f80cfc36a354..a3f458fd2238 100644
--- a/drivers/android/binder.c
+++ b/drivers/android/binder.c
@@ -1074,7 +1074,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
static struct binder_ref *binder_get_ref(struct binder_proc *proc,
- uint32_t desc)
+ uint32_t desc, bool need_strong_ref)
{
struct rb_node *n = proc->refs_by_desc.rb_node;
struct binder_ref *ref;
@@ -1082,12 +1082,16 @@ static struct binder_ref *binder_get_ref(struct binder_proc *proc,
while (n) {
ref = rb_entry(n, struct binder_ref, rb_node_desc);
- if (desc < ref->desc)
+ if (desc < ref->desc) {
n = n->rb_left;
- else if (desc > ref->desc)
+ } else if (desc > ref->desc) {
n = n->rb_right;
- else
+ } else if (need_strong_ref && !ref->strong) {
+ binder_user_error("tried to use weak ref as strong ref\n");
+ return NULL;
+ } else {
return ref;
+ }
}
return NULL;
}
@@ -1357,7 +1361,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
pr_err("transaction release %d bad handle %d\n",
@@ -1452,7 +1457,7 @@ static void binder_transaction(struct binder_proc *proc,
if (tr->target.handle) {
struct binder_ref *ref;
- ref = binder_get_ref(proc, tr->target.handle);
+ ref = binder_get_ref(proc, tr->target.handle, true);
if (ref == NULL) {
binder_user_error("%d:%d got transaction to invalid handle\n",
proc->pid, thread->pid);
@@ -1649,7 +1654,9 @@ static void binder_transaction(struct binder_proc *proc,
fp->type = BINDER_TYPE_HANDLE;
else
fp->type = BINDER_TYPE_WEAK_HANDLE;
+ fp->binder = 0;
fp->handle = ref->desc;
+ fp->cookie = 0;
binder_inc_ref(ref, fp->type == BINDER_TYPE_HANDLE,
&thread->todo);
@@ -1661,7 +1668,8 @@ static void binder_transaction(struct binder_proc *proc,
} break;
case BINDER_TYPE_HANDLE:
case BINDER_TYPE_WEAK_HANDLE: {
- struct binder_ref *ref = binder_get_ref(proc, fp->handle);
+ struct binder_ref *ref = binder_get_ref(proc, fp->handle,
+ fp->type == BINDER_TYPE_HANDLE);
if (ref == NULL) {
binder_user_error("%d:%d got transaction with invalid handle, %d\n",
@@ -1696,7 +1704,9 @@ static void binder_transaction(struct binder_proc *proc,
return_error = BR_FAILED_REPLY;
goto err_binder_get_ref_for_node_failed;
}
+ fp->binder = 0;
fp->handle = new_ref->desc;
+ fp->cookie = 0;
binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
trace_binder_transaction_ref_to_ref(t, ref,
new_ref);
@@ -1750,6 +1760,7 @@ static void binder_transaction(struct binder_proc *proc,
binder_debug(BINDER_DEBUG_TRANSACTION,
" fd %d -> %d\n", fp->handle, target_fd);
/* TODO: fput? */
+ fp->binder = 0;
fp->handle = target_fd;
} break;
@@ -1880,7 +1891,9 @@ static int binder_thread_write(struct binder_proc *proc,
ref->desc);
}
} else
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target,
+ cmd == BC_ACQUIRE ||
+ cmd == BC_RELEASE);
if (ref == NULL) {
binder_user_error("%d:%d refcount change on invalid ref %d\n",
proc->pid, thread->pid, target);
@@ -2076,7 +2089,7 @@ static int binder_thread_write(struct binder_proc *proc,
if (get_user_preempt_disabled(cookie, (binder_uintptr_t __user *)ptr))
return -EFAULT;
ptr += sizeof(binder_uintptr_t);
- ref = binder_get_ref(proc, target);
+ ref = binder_get_ref(proc, target, false);
if (ref == NULL) {
binder_user_error("%d:%d %s invalid ref %d\n",
proc->pid, thread->pid,
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index ef8aaac6e0a2..7767086df849 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -490,7 +490,7 @@ static void fastrpc_mmap_free(struct fastrpc_mmap *map)
ion_free(fl->apps->client, map->handle);
if (sess->smmu.enabled) {
if (map->size || map->phys)
- msm_dma_unmap_sg(fl->sctx->dev,
+ msm_dma_unmap_sg(sess->dev,
map->table->sgl,
map->table->nents, DMA_BIDIRECTIONAL,
map->buf);
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index 5391ef456aae..3e45aee1c0f7 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -49,6 +49,7 @@ enum clk_osm_bases {
OSM_BASE,
PLL_BASE,
EFUSE_BASE,
+ ACD_BASE,
NUM_BASES,
};
@@ -228,11 +229,43 @@ enum clk_osm_trace_packet_id {
#define MSMCOBALTV2_PWRCL_BOOT_RATE 1555200000
#define MSMCOBALTV2_PERFCL_BOOT_RATE 1728000000
+/* ACD registers */
+#define ACD_HW_VERSION 0x0
+#define ACDCR 0x4
+#define ACDTD 0x8
+#define ACDSSCR 0x28
+#define ACD_EXTINT_CFG 0x30
+#define ACD_DCVS_SW 0x34
+#define ACD_GFMUX_CFG 0x3c
+#define ACD_READOUT_CFG 0x48
+#define ACD_AUTOXFER_CFG 0x80
+#define ACD_AUTOXFER 0x84
+#define ACD_AUTOXFER_CTL 0x88
+#define ACD_AUTOXFER_STATUS 0x8c
+#define ACD_WRITE_CTL 0x90
+#define ACD_WRITE_STATUS 0x94
+#define ACD_READOUT 0x98
+
+#define ACD_MASTER_ONLY_REG_ADDR 0x80
+#define ACD_WRITE_CTL_UPDATE_EN BIT(0)
+#define ACD_WRITE_CTL_SELECT_SHIFT 1
+#define ACD_GFMUX_CFG_SELECT BIT(0)
+#define ACD_AUTOXFER_START_CLEAR 0
+#define ACD_AUTOXFER_START_SET BIT(0)
+#define AUTO_XFER_DONE_MASK BIT(0)
+#define ACD_DCVS_SW_DCVS_IN_PRGR_SET BIT(0)
+#define ACD_DCVS_SW_DCVS_IN_PRGR_CLEAR 0
+#define ACD_LOCAL_TRANSFER_TIMEOUT_NS 500
+
static void __iomem *virt_base;
static void __iomem *debug_base;
#define lmh_lite_clk_src_source_val 1
+#define ACD_REG_RELATIVE_ADDR(addr) (addr / 4)
+#define ACD_REG_RELATIVE_ADDR_BITMASK(addr) \
+ (1 << (ACD_REG_RELATIVE_ADDR(addr)))
+
#define FIXDIV(div) (div ? (2 * (div) - 1) : (0))
#define F(f, s, div, m, n) \
@@ -341,6 +374,14 @@ struct clk_osm {
u32 apm_ctrl_status;
u32 osm_clk_rate;
u32 xo_clk_rate;
+ u32 acd_td;
+ u32 acd_cr;
+ u32 acd_sscr;
+ u32 acd_extint0_cfg;
+ u32 acd_extint1_cfg;
+ u32 acd_autoxfer_ctl;
+ u32 acd_debugfs_addr;
+ bool acd_init;
bool secure_init;
bool red_fsm_en;
bool boost_fsm_en;
@@ -394,6 +435,161 @@ static inline int clk_osm_mb(struct clk_osm *c, int base)
return readl_relaxed_no_log((char *)c->vbases[base] + VERSION_REG);
}
+static inline int clk_osm_acd_mb(struct clk_osm *c)
+{
+ return readl_relaxed_no_log((char *)c->vbases[ACD_BASE] +
+ ACD_HW_VERSION);
+}
+
+static inline void clk_osm_acd_master_write_reg(struct clk_osm *c,
+ u32 val, u32 offset)
+{
+ writel_relaxed(val, (char *)c->vbases[ACD_BASE] + offset);
+}
+
+static int clk_osm_acd_local_read_reg(struct clk_osm *c, u32 offset)
+{
+ u32 reg = 0;
+ int timeout;
+
+ if (offset >= ACD_MASTER_ONLY_REG_ADDR) {
+ pr_err("ACD register at offset=0x%x not locally readable\n",
+ offset);
+ return -EINVAL;
+ }
+
+ /* Set select field in read control register */
+ writel_relaxed(ACD_REG_RELATIVE_ADDR(offset),
+ (char *)c->vbases[ACD_BASE] + ACD_READOUT_CFG);
+
+ /* Clear write control register */
+ writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+ /* Set select and update_en fields in write control register */
+ reg = (ACD_REG_RELATIVE_ADDR(ACD_READOUT_CFG)
+ << ACD_WRITE_CTL_SELECT_SHIFT)
+ | ACD_WRITE_CTL_UPDATE_EN;
+ writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+ /* Ensure writes complete before polling */
+ clk_osm_acd_mb(c);
+
+ /* Poll write status register */
+ for (timeout = ACD_LOCAL_TRANSFER_TIMEOUT_NS; timeout > 0;
+ timeout -= 100) {
+ reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+ + ACD_WRITE_STATUS);
+ if ((reg & (ACD_REG_RELATIVE_ADDR_BITMASK(ACD_READOUT_CFG))))
+ break;
+ ndelay(100);
+ }
+
+ if (!timeout) {
+ pr_err("local read timed out, offset=0x%x status=0x%x\n",
+ offset, reg);
+ return -ETIMEDOUT;
+ }
+
+ reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+ + ACD_READOUT);
+ return reg;
+}
+
+static int clk_osm_acd_local_write_reg(struct clk_osm *c, u32 val, u32 offset)
+{
+ u32 reg = 0;
+ int timeout;
+
+ if (offset >= ACD_MASTER_ONLY_REG_ADDR) {
+ pr_err("ACD register at offset=0x%x not transferrable\n",
+ offset);
+ return -EINVAL;
+ }
+
+ /* Clear write control register */
+ writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+ /* Set select and update_en fields in write control register */
+ reg = (ACD_REG_RELATIVE_ADDR(offset) << ACD_WRITE_CTL_SELECT_SHIFT)
+ | ACD_WRITE_CTL_UPDATE_EN;
+ writel_relaxed(reg, (char *)c->vbases[ACD_BASE] + ACD_WRITE_CTL);
+
+ /* Ensure writes complete before polling */
+ clk_osm_acd_mb(c);
+
+ /* Poll write status register */
+ for (timeout = ACD_LOCAL_TRANSFER_TIMEOUT_NS; timeout > 0;
+ timeout -= 100) {
+ reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+ + ACD_WRITE_STATUS);
+ if ((reg & (ACD_REG_RELATIVE_ADDR_BITMASK(offset))))
+ break;
+ ndelay(100);
+ }
+
+ if (!timeout) {
+ pr_err("local write timed out, offset=0x%x val=0x%x status=0x%x\n",
+ offset, val, reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int clk_osm_acd_master_write_through_reg(struct clk_osm *c,
+ u32 val, u32 offset)
+{
+ writel_relaxed(val, (char *)c->vbases[ACD_BASE] + offset);
+
+ /* Ensure writes complete before transfer to local copy */
+ clk_osm_acd_mb(c);
+
+ return clk_osm_acd_local_write_reg(c, val, offset);
+}
+
+static int clk_osm_acd_auto_local_write_reg(struct clk_osm *c, u32 mask)
+{
+ u32 numregs, bitmask = mask;
+ u32 reg = 0;
+ int timeout;
+
+ /* count number of bits set in register mask */
+ for (numregs = 0; bitmask; numregs++)
+ bitmask &= bitmask - 1;
+
+ /* Program auto-transfter mask */
+ writel_relaxed(mask, (char *)c->vbases[ACD_BASE] + ACD_AUTOXFER_CFG);
+
+ /* Clear start field in auto-transfer register */
+ writel_relaxed(ACD_AUTOXFER_START_CLEAR,
+ (char *)c->vbases[ACD_BASE] + ACD_AUTOXFER);
+
+ /* Set start field in auto-transfer register */
+ writel_relaxed(ACD_AUTOXFER_START_SET,
+ (char *)c->vbases[ACD_BASE] + ACD_AUTOXFER);
+
+ /* Ensure writes complete before polling */
+ clk_osm_acd_mb(c);
+
+ /* Poll auto-transfer status register */
+ for (timeout = ACD_LOCAL_TRANSFER_TIMEOUT_NS * numregs;
+ timeout > 0; timeout -= 100) {
+ reg = readl_relaxed((char *)c->vbases[ACD_BASE]
+ + ACD_AUTOXFER_STATUS);
+ if (reg & AUTO_XFER_DONE_MASK)
+ break;
+ ndelay(100);
+ }
+
+ if (!timeout) {
+ pr_err("local register auto-transfer timed out, mask=0x%x registers=%d status=0x%x\n",
+ mask, numregs, reg);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
static inline int clk_osm_count_ns(struct clk_osm *c, u64 nsec)
{
u64 temp;
@@ -813,6 +1009,74 @@ static int clk_osm_parse_dt_configs(struct platform_device *pdev)
LLM_SW_OVERRIDE_CNT + i,
&perfcl_clk.llm_sw_overr[i]);
+ if (pwrcl_clk.acd_init || perfcl_clk.acd_init) {
+ rc = of_property_read_u32_array(of, "qcom,acdtd-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdtd-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_td = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_td = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdcr-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdcr-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_cr = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_cr = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdsscr-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdsscr-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_sscr = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_sscr = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdextint0-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdextint0-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_extint0_cfg = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_extint0_cfg = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdextint1-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdextint1-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_extint1_cfg = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_extint1_cfg = array[perfcl_clk.cluster_num];
+
+ rc = of_property_read_u32_array(of, "qcom,acdautoxfer-val",
+ array, MAX_CLUSTER_CNT);
+ if (rc) {
+ dev_err(&pdev->dev, "unable to find qcom,acdautoxfer-val property, rc=%d\n",
+ rc);
+ return -EINVAL;
+ }
+
+ pwrcl_clk.acd_autoxfer_ctl = array[pwrcl_clk.cluster_num];
+ perfcl_clk.acd_autoxfer_ctl = array[perfcl_clk.cluster_num];
+ }
+
rc = of_property_read_u32(of, "qcom,xo-clk-rate",
&pwrcl_clk.xo_clk_rate);
if (rc) {
@@ -1037,6 +1301,40 @@ static int clk_osm_resources_init(struct platform_device *pdev)
perfcl_clk.vbases[EFUSE_BASE] = vbase;
}
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "pwrcl_acd");
+ if (res) {
+ pbase = (unsigned long)res->start;
+ vbase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!vbase) {
+ dev_err(&pdev->dev, "Unable to map in pwrcl_acd base\n");
+ return -ENOMEM;
+ }
+ pwrcl_clk.pbases[ACD_BASE] = pbase;
+ pwrcl_clk.vbases[ACD_BASE] = vbase;
+ pwrcl_clk.acd_init = true;
+ } else {
+ pwrcl_clk.acd_init = false;
+ }
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "perfcl_acd");
+ if (res) {
+ pbase = (unsigned long)res->start;
+ vbase = devm_ioremap(&pdev->dev, res->start,
+ resource_size(res));
+ if (!vbase) {
+ dev_err(&pdev->dev, "Unable to map in perfcl_acd base\n");
+ return -ENOMEM;
+ }
+ perfcl_clk.pbases[ACD_BASE] = pbase;
+ perfcl_clk.vbases[ACD_BASE] = vbase;
+ perfcl_clk.acd_init = true;
+ } else {
+ perfcl_clk.acd_init = false;
+ }
+
vdd_pwrcl = devm_regulator_get(&pdev->dev, "vdd-pwrcl");
if (IS_ERR(vdd_pwrcl)) {
rc = PTR_ERR(vdd_pwrcl);
@@ -2402,6 +2700,55 @@ DEFINE_SIMPLE_ATTRIBUTE(debugfs_perf_state_deviation_corrected_irq_fops,
debugfs_set_perf_state_deviation_corrected_irq,
"%llu\n");
+static int debugfs_get_debug_reg(void *data, u64 *val)
+{
+ struct clk_osm *c = data;
+
+ if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR)
+ *val = readl_relaxed((char *)c->vbases[ACD_BASE] +
+ c->acd_debugfs_addr);
+ else
+ *val = clk_osm_acd_local_read_reg(c, c->acd_debugfs_addr);
+ return 0;
+}
+
+static int debugfs_set_debug_reg(void *data, u64 val)
+{
+ struct clk_osm *c = data;
+
+ if (c->acd_debugfs_addr >= ACD_MASTER_ONLY_REG_ADDR)
+ clk_osm_acd_master_write_reg(c, val, c->acd_debugfs_addr);
+ else
+ clk_osm_acd_master_write_through_reg(c, val,
+ c->acd_debugfs_addr);
+
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_acd_debug_reg_fops,
+ debugfs_get_debug_reg,
+ debugfs_set_debug_reg,
+ "0x%llx\n");
+
+static int debugfs_get_debug_reg_addr(void *data, u64 *val)
+{
+ struct clk_osm *c = data;
+
+ *val = c->acd_debugfs_addr;
+ return 0;
+}
+
+static int debugfs_set_debug_reg_addr(void *data, u64 val)
+{
+ struct clk_osm *c = data;
+
+ c->acd_debugfs_addr = val;
+ return 0;
+}
+DEFINE_SIMPLE_ATTRIBUTE(debugfs_acd_debug_reg_addr_fops,
+ debugfs_get_debug_reg_addr,
+ debugfs_set_debug_reg_addr,
+ "%llu\n");
+
static void populate_debugfs_dir(struct clk_osm *c)
{
struct dentry *temp;
@@ -2493,6 +2840,24 @@ static void populate_debugfs_dir(struct clk_osm *c)
goto exit;
}
+ temp = debugfs_create_file("acd_debug_reg",
+ S_IRUGO | S_IWUSR,
+ c->debugfs, c,
+ &debugfs_acd_debug_reg_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("debugfs_acd_debug_reg_fops debugfs file creation failed\n");
+ goto exit;
+ }
+
+ temp = debugfs_create_file("acd_debug_reg_addr",
+ S_IRUGO | S_IWUSR,
+ c->debugfs, c,
+ &debugfs_acd_debug_reg_addr_fops);
+ if (IS_ERR_OR_NULL(temp)) {
+ pr_err("debugfs_acd_debug_reg_addr_fops debugfs file creation failed\n");
+ goto exit;
+ }
+
exit:
if (IS_ERR_OR_NULL(temp))
debugfs_remove_recursive(c->debugfs);
@@ -2537,6 +2902,81 @@ static int clk_osm_panic_callback(struct notifier_block *nfb,
return NOTIFY_OK;
}
+static int clk_osm_acd_init(struct clk_osm *c)
+{
+
+ int rc = 0;
+ u32 auto_xfer_mask = 0;
+
+ if (!c->acd_init)
+ return 0;
+
+ c->acd_debugfs_addr = ACD_HW_VERSION;
+
+ /* Program ACD tunable-length delay register */
+ clk_osm_acd_master_write_reg(c, c->acd_td, ACDTD);
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDTD);
+
+ /* Program ACD control register */
+ clk_osm_acd_master_write_reg(c, c->acd_cr, ACDCR);
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDCR);
+
+ /* Program ACD soft start control register */
+ clk_osm_acd_master_write_reg(c, c->acd_sscr, ACDSSCR);
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACDSSCR);
+
+ /* Program initial ACD external interface configuration register */
+ clk_osm_acd_master_write_reg(c, c->acd_extint0_cfg, ACD_EXTINT_CFG);
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_EXTINT_CFG);
+
+ /* Program ACD auto-register transfer control register */
+ clk_osm_acd_master_write_reg(c, c->acd_autoxfer_ctl, ACD_AUTOXFER_CTL);
+
+ /* Ensure writes complete before transfers to local copy */
+ clk_osm_acd_mb(c);
+
+ /* Transfer master copies */
+ rc = clk_osm_acd_auto_local_write_reg(c, auto_xfer_mask);
+ if (rc)
+ return rc;
+
+ /* Switch CPUSS clock source to ACD clock */
+ rc = clk_osm_acd_master_write_through_reg(c, ACD_GFMUX_CFG_SELECT,
+ ACD_GFMUX_CFG);
+ if (rc)
+ return rc;
+
+ /* Program ACD_DCVS_SW */
+ rc = clk_osm_acd_master_write_through_reg(c,
+ ACD_DCVS_SW_DCVS_IN_PRGR_SET,
+ ACD_DCVS_SW);
+ if (rc)
+ return rc;
+
+ rc = clk_osm_acd_master_write_through_reg(c,
+ ACD_DCVS_SW_DCVS_IN_PRGR_CLEAR,
+ ACD_DCVS_SW);
+ if (rc)
+ return rc;
+
+ udelay(1);
+
+ /* Program final ACD external interface configuration register */
+ rc = clk_osm_acd_master_write_through_reg(c, c->acd_extint1_cfg,
+ ACD_EXTINT_CFG);
+ if (rc)
+ return rc;
+
+ /*
+ * ACDCR, ACDTD, ACDSSCR, ACD_EXTINT_CFG, ACD_GFMUX_CFG
+ * must be copied from master to local copy on PC exit.
+ */
+ auto_xfer_mask |= ACD_REG_RELATIVE_ADDR_BITMASK(ACD_GFMUX_CFG);
+ clk_osm_acd_master_write_reg(c, auto_xfer_mask, ACD_AUTOXFER_CFG);
+
+ return 0;
+}
+
static unsigned long init_rate = 300000000;
static unsigned long osm_clk_init_rate = 200000000;
@@ -2717,6 +3157,17 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
clk_osm_setup_cluster_pll(&perfcl_clk);
}
+ rc = clk_osm_acd_init(&pwrcl_clk);
+ if (rc) {
+ pr_err("failed to initialize ACD for pwrcl, rc=%d\n", rc);
+ return rc;
+ }
+ rc = clk_osm_acd_init(&perfcl_clk);
+ if (rc) {
+ pr_err("failed to initialize ACD for perfcl, rc=%d\n", rc);
+ return rc;
+ }
+
spin_lock_init(&pwrcl_clk.lock);
spin_lock_init(&perfcl_clk.lock);
diff --git a/drivers/clk/qcom/gcc-msmfalcon.c b/drivers/clk/qcom/gcc-msmfalcon.c
index 42b91d70aa54..b5f7e18cf495 100644
--- a/drivers/clk/qcom/gcc-msmfalcon.c
+++ b/drivers/clk/qcom/gcc-msmfalcon.c
@@ -35,8 +35,8 @@
#define F(f, s, h, m, n) { (f), (s), (2 * (h) - 1), (m), (n) }
-static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner, NULL);
-static DEFINE_VDD_REGULATORS(vdd_dig_ao, VDD_DIG_NUM, 1, vdd_corner, NULL);
+static DEFINE_VDD_REGULATORS(vdd_dig, VDD_DIG_NUM, 1, vdd_corner);
+static DEFINE_VDD_REGULATORS(vdd_dig_ao, VDD_DIG_NUM, 1, vdd_corner);
enum {
P_CORE_BI_PLL_TEST_SE,
@@ -2201,7 +2201,7 @@ static struct clk_branch gcc_ufs_axi_hw_ctl_clk = {
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_hw_ctl_ops,
+ .ops = &clk_branch2_ops,
},
},
};
@@ -2249,7 +2249,7 @@ static struct clk_branch gcc_ufs_ice_core_hw_ctl_clk = {
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_hw_ctl_ops,
+ .ops = &clk_branch2_ops,
},
},
};
@@ -2284,7 +2284,7 @@ static struct clk_branch gcc_ufs_phy_aux_hw_ctl_clk = {
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_hw_ctl_ops,
+ .ops = &clk_branch2_ops,
},
},
};
@@ -2355,7 +2355,7 @@ static struct clk_branch gcc_ufs_unipro_core_hw_ctl_clk = {
},
.num_parents = 1,
.flags = CLK_SET_RATE_PARENT,
- .ops = &clk_branch2_hw_ctl_ops,
+ .ops = &clk_branch2_ops,
},
},
};
@@ -2753,9 +2753,8 @@ MODULE_DEVICE_TABLE(of, gcc_falcon_match_table);
static int gcc_falcon_probe(struct platform_device *pdev)
{
- int ret = 0, i;
+ int ret = 0;
struct regmap *regmap;
- struct clk *clk;
regmap = qcom_cc_map(pdev, &gcc_falcon_desc);
if (IS_ERR(regmap))
diff --git a/drivers/clk/qcom/vdd-level-falcon.h b/drivers/clk/qcom/vdd-level-falcon.h
index e8699358cf91..d54e801ecc67 100644
--- a/drivers/clk/qcom/vdd-level-falcon.h
+++ b/drivers/clk/qcom/vdd-level-falcon.h
@@ -19,50 +19,52 @@
#define VDD_DIG_FMAX_MAP1(l1, f1) \
.vdd_class = &vdd_dig, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
+
#define VDD_DIG_FMAX_MAP2(l1, f1, l2, f2) \
.vdd_class = &vdd_dig, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
#define VDD_DIG_FMAX_MAP3(l1, f1, l2, f2, l3, f3) \
.vdd_class = &vdd_dig, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
[VDD_DIG_##l3] = (f3), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
+
#define VDD_DIG_FMAX_MAP4(l1, f1, l2, f2, l3, f3, l4, f4) \
.vdd_class = &vdd_dig, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
[VDD_DIG_##l3] = (f3), \
[VDD_DIG_##l4] = (f4), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
#define VDD_DIG_FMAX_MAP5(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5) \
.vdd_class = &vdd_dig, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
[VDD_DIG_##l3] = (f3), \
[VDD_DIG_##l4] = (f4), \
[VDD_DIG_##l5] = (f5), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
#define VDD_DIG_FMAX_MAP6(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5, l6, f6) \
.vdd_class = &vdd_dig, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
[VDD_DIG_##l3] = (f3), \
@@ -70,12 +72,12 @@
[VDD_DIG_##l5] = (f5), \
[VDD_DIG_##l6] = (f6), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
#define VDD_DIG_FMAX_MAP7(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5, l6, f6, \
l7, f7) \
.vdd_class = &vdd_dig, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
[VDD_DIG_##l3] = (f3), \
@@ -84,27 +86,27 @@
[VDD_DIG_##l6] = (f6), \
[VDD_DIG_##l7] = (f7), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
#define VDD_DIG_FMAX_MAP1_AO(l1, f1) \
.vdd_class = &vdd_dig_ao, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
#define VDD_DIG_FMAX_MAP3_AO(l1, f1, l2, f2, l3, f3) \
.vdd_class = &vdd_dig_ao, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
[VDD_DIG_##l3] = (f3), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
#define VDD_GPU_PLL_FMAX_MAP6(l1, f1, l2, f2, l3, f3, l4, f4, l5, f5, l6, f6) \
.vdd_class = &vdd_mx, \
- .fmax = (unsigned long[VDD_DIG_NUM]) { \
+ .rate_max = (unsigned long[VDD_DIG_NUM]) { \
[VDD_DIG_##l1] = (f1), \
[VDD_DIG_##l2] = (f2), \
[VDD_DIG_##l3] = (f3), \
@@ -112,7 +114,7 @@
[VDD_DIG_##l5] = (f5), \
[VDD_DIG_##l6] = (f6), \
}, \
- .num_fmax = VDD_DIG_NUM
+ .num_rate_max = VDD_DIG_NUM
enum vdd_dig_levels {
VDD_DIG_NONE,
diff --git a/drivers/crypto/Kconfig b/drivers/crypto/Kconfig
index 386c85fc714b..77aea1f41714 100644
--- a/drivers/crypto/Kconfig
+++ b/drivers/crypto/Kconfig
@@ -372,18 +372,18 @@ config CRYPTO_DEV_QCRYPTO
config CRYPTO_DEV_QCOM_MSM_QCE
tristate "Qualcomm Crypto Engine (QCE) module"
- select CRYPTO_DEV_QCE50 if ARCH_APQ8084 || ARCH_MSM8916 || ARCH_MSM8994 || ARCH_MSM8996 || ARCH_MSM8992 || ARCH_MSMTITANIUM || ARCH_MSM8909 || ARCH_MSMCOBALT || ARCH_MSMFALCON
+ select CRYPTO_DEV_QCE50 if ARCH_APQ8084 || ARCH_MSM8916 || ARCH_MSM8994 || ARCH_MSM8996 || ARCH_MSM8992 || ARCH_MSMTITANIUM || ARCH_MSM8909 || ARCH_MSMCOBALT || ARCH_MSMFALCON || ARCH_MSMTRITON
default n
help
This driver supports Qualcomm Crypto Engine in MSM7x30, MSM8660
MSM8x55, MSM8960, MSM9615, MSM8916, MSM8994, MSM8996, FSM9900,
- MSMTITANINUM, APQ8084, MSMCOBALT and MSMFALCON.
+ MSMTITANINUM, APQ8084, MSMCOBALT, MSMFALCON and MSMTRITON.
To compile this driver as a module, choose M here: the
For MSM7x30 MSM8660 and MSM8x55 the module is called qce
For MSM8960, APQ8064 and MSM9615 the module is called qce40
For MSM8974, MSM8916, MSM8994, MSM8996, MSM8992, MSMTITANIUM,
- APQ8084, MSMCOBALT and MSMFALCON the module is called qce50.
+ APQ8084, MSMCOBALT, MSMFALCON and MSMTRITON the module is called qce50.
config CRYPTO_DEV_QCEDEV
tristate "QCEDEV Interface to CE module"
@@ -391,8 +391,8 @@ config CRYPTO_DEV_QCEDEV
help
This driver supports Qualcomm QCEDEV Crypto in MSM7x30, MSM8660,
MSM8960, MSM9615, APQ8064, MSM8974, MSM8916, MSM8994, MSM8996,
- APQ8084, MSMCOBALT, MSMFALCON. This exposes the interface to the QCE hardware
- accelerator via IOCTLs.
+ APQ8084, MSMCOBALT, MSMFALCON, MSMTRITON. This exposes the
+ interface to the QCE hardware accelerator via IOCTLs.
To compile this driver as a module, choose M here: the
module will be called qcedev.
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index e63f061175ad..e2099c4e7877 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -1234,44 +1234,6 @@ static int qcedev_vbuf_ablk_cipher(struct qcedev_async_req *areq,
struct qcedev_cipher_op_req *saved_req;
struct qcedev_cipher_op_req *creq = &areq->cipher_op_req;
- /* Verify Source Address's */
- for (i = 0; i < areq->cipher_op_req.entries; i++)
- if (!access_ok(VERIFY_READ,
- (void __user *)areq->cipher_op_req.vbuf.src[i].vaddr,
- areq->cipher_op_req.vbuf.src[i].len))
- return -EFAULT;
-
- /* Verify Destination Address's */
- if (creq->in_place_op != 1) {
- for (i = 0, total = 0; i < QCEDEV_MAX_BUFFERS; i++) {
- if ((areq->cipher_op_req.vbuf.dst[i].vaddr != 0) &&
- (total < creq->data_len)) {
- if (!access_ok(VERIFY_WRITE,
- (void __user *)creq->vbuf.dst[i].vaddr,
- creq->vbuf.dst[i].len)) {
- pr_err("%s:DST WR_VERIFY err %d=0x%lx\n",
- __func__, i, (uintptr_t)
- creq->vbuf.dst[i].vaddr);
- return -EFAULT;
- }
- total += creq->vbuf.dst[i].len;
- }
- }
- } else {
- for (i = 0, total = 0; i < creq->entries; i++) {
- if (total < creq->data_len) {
- if (!access_ok(VERIFY_WRITE,
- (void __user *)creq->vbuf.src[i].vaddr,
- creq->vbuf.src[i].len)) {
- pr_err("%s:SRC WR_VERIFY err %d=0x%lx\n",
- __func__, i, (uintptr_t)
- creq->vbuf.src[i].vaddr);
- return -EFAULT;
- }
- total += creq->vbuf.src[i].len;
- }
- }
- }
total = 0;
if (areq->cipher_op_req.mode == QCEDEV_AES_MODE_CTR)
@@ -1569,6 +1531,36 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
__func__, total, req->data_len);
goto error;
}
+ /* Verify Source Address's */
+ for (i = 0, total = 0; i < req->entries; i++) {
+ if (total < req->data_len) {
+ if (!access_ok(VERIFY_READ,
+ (void __user *)req->vbuf.src[i].vaddr,
+ req->vbuf.src[i].len)) {
+ pr_err("%s:SRC RD_VERIFY err %d=0x%lx\n",
+ __func__, i, (uintptr_t)
+ req->vbuf.src[i].vaddr);
+ goto error;
+ }
+ total += req->vbuf.src[i].len;
+ }
+ }
+
+ /* Verify Destination Address's */
+ for (i = 0, total = 0; i < QCEDEV_MAX_BUFFERS; i++) {
+ if ((req->vbuf.dst[i].vaddr != 0) &&
+ (total < req->data_len)) {
+ if (!access_ok(VERIFY_WRITE,
+ (void __user *)req->vbuf.dst[i].vaddr,
+ req->vbuf.dst[i].len)) {
+ pr_err("%s:DST WR_VERIFY err %d=0x%lx\n",
+ __func__, i, (uintptr_t)
+ req->vbuf.dst[i].vaddr);
+ goto error;
+ }
+ total += req->vbuf.dst[i].len;
+ }
+ }
return 0;
error:
return -EINVAL;
diff --git a/drivers/gpio/qpnp-pin.c b/drivers/gpio/qpnp-pin.c
index 182c6074985e..483bb9338ac3 100644
--- a/drivers/gpio/qpnp-pin.c
+++ b/drivers/gpio/qpnp-pin.c
@@ -827,9 +827,17 @@ static int qpnp_pin_get(struct gpio_chip *gpio_chip, unsigned offset)
if (WARN_ON(!q_spec))
return -ENODEV;
+ if (is_gpio_lv_mv(q_spec)) {
+ mask = Q_REG_LV_MV_MODE_SEL_MASK;
+ shift = Q_REG_LV_MV_MODE_SEL_SHIFT;
+ } else {
+ mask = Q_REG_MODE_SEL_MASK;
+ shift = Q_REG_MODE_SEL_SHIFT;
+ }
+
/* gpio val is from RT status iff input is enabled */
- if ((q_spec->regs[Q_REG_I_MODE_CTL] & Q_REG_MODE_SEL_MASK)
- == QPNP_PIN_MODE_DIG_IN) {
+ if (q_reg_get(&q_spec->regs[Q_REG_I_MODE_CTL], shift, mask)
+ == QPNP_PIN_MODE_DIG_IN) {
rc = regmap_read(q_chip->regmap,
Q_REG_ADDR(q_spec, Q_REG_STATUS1), &val);
buf[0] = (u8)val;
diff --git a/drivers/gpu/msm/adreno-gpulist.h b/drivers/gpu/msm/adreno-gpulist.h
index 3615be45b6d9..a02ed40ba9d5 100644
--- a/drivers/gpu/msm/adreno-gpulist.h
+++ b/drivers/gpu/msm/adreno-gpulist.h
@@ -269,7 +269,7 @@ static const struct adreno_gpu_core adreno_gpulist[] = {
.patchid = ANY_ID,
.features = ADRENO_PREEMPTION | ADRENO_64BIT |
ADRENO_CONTENT_PROTECTION |
- ADRENO_GPMU | ADRENO_SPTP_PC,
+ ADRENO_GPMU | ADRENO_SPTP_PC | ADRENO_LM,
.pm4fw_name = "a530_pm4.fw",
.pfpfw_name = "a530_pfp.fw",
.zap_name = "a540_zap",
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index 3f5a9c6318f6..423071811b43 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -151,6 +151,43 @@ static const unsigned int _a3xx_pwron_fixup_fs_instructions[] = {
0x00000000, 0x03000000, 0x00000000, 0x00000000,
};
+static void a3xx_efuse_speed_bin(struct adreno_device *adreno_dev)
+{
+ unsigned int val;
+ unsigned int speed_bin[3];
+ struct kgsl_device *device = &adreno_dev->dev;
+
+ if (of_property_read_u32_array(device->pdev->dev.of_node,
+ "qcom,gpu-speed-bin", speed_bin, 3))
+ return;
+
+ adreno_efuse_read_u32(adreno_dev, speed_bin[0], &val);
+
+ adreno_dev->speed_bin = (val & speed_bin[1]) >> speed_bin[2];
+}
+
+static const struct {
+ int (*check)(struct adreno_device *adreno_dev);
+ void (*func)(struct adreno_device *adreno_dev);
+} a3xx_efuse_funcs[] = {
+ { adreno_is_a306a, a3xx_efuse_speed_bin },
+};
+
+static void a3xx_check_features(struct adreno_device *adreno_dev)
+{
+ unsigned int i;
+
+ if (adreno_efuse_map(adreno_dev))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(a3xx_efuse_funcs); i++) {
+ if (a3xx_efuse_funcs[i].check(adreno_dev))
+ a3xx_efuse_funcs[i].func(adreno_dev);
+ }
+
+ adreno_efuse_unmap(adreno_dev);
+}
+
/**
* _a3xx_pwron_fixup() - Initialize a special command buffer to run a
* post-power collapse shader workaround
@@ -604,6 +641,9 @@ static void a3xx_platform_setup(struct adreno_device *adreno_dev)
gpudev->vbif_xin_halt_ctrl0_mask =
A30X_VBIF_XIN_HALT_CTRL0_MASK;
}
+
+ /* Check efuse bits for various capabilties */
+ a3xx_check_features(adreno_dev);
}
static int a3xx_send_me_init(struct adreno_device *adreno_dev,
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index add4590bbb90..fe0715629825 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -4527,6 +4527,9 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
if (status)
goto error_close_mmu;
+ /* Initialize the memory pools */
+ kgsl_init_page_pools(device->pdev);
+
status = kgsl_allocate_global(device, &device->memstore,
KGSL_MEMSTORE_SIZE, 0, KGSL_MEMDESC_CONTIG, "memstore");
@@ -4581,9 +4584,6 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
/* Initialize common sysfs entries */
kgsl_pwrctrl_init_sysfs(device);
- /* Initialize the memory pools */
- kgsl_init_page_pools();
-
return 0;
error_free_memstore:
diff --git a/drivers/gpu/msm/kgsl_mmu.c b/drivers/gpu/msm/kgsl_mmu.c
index ba564b2851f9..f516b7cd245a 100644
--- a/drivers/gpu/msm/kgsl_mmu.c
+++ b/drivers/gpu/msm/kgsl_mmu.c
@@ -390,6 +390,13 @@ kgsl_mmu_map(struct kgsl_pagetable *pagetable,
if (!memdesc->gpuaddr)
return -EINVAL;
+ if (!(memdesc->flags & (KGSL_MEMFLAGS_SPARSE_VIRT |
+ KGSL_MEMFLAGS_SPARSE_PHYS))) {
+ /* Only global mappings should be mapped multiple times */
+ if (!kgsl_memdesc_is_global(memdesc) &&
+ (KGSL_MEMDESC_MAPPED & memdesc->priv))
+ return -EINVAL;
+ }
size = kgsl_memdesc_footprint(memdesc);
@@ -403,6 +410,9 @@ kgsl_mmu_map(struct kgsl_pagetable *pagetable,
atomic_inc(&pagetable->stats.entries);
KGSL_STATS_ADD(size, &pagetable->stats.mapped,
&pagetable->stats.max_mapped);
+
+ /* This is needed for non-sparse mappings */
+ memdesc->priv |= KGSL_MEMDESC_MAPPED;
}
return 0;
@@ -455,6 +465,13 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
if (memdesc->size == 0)
return -EINVAL;
+ if (!(memdesc->flags & (KGSL_MEMFLAGS_SPARSE_VIRT |
+ KGSL_MEMFLAGS_SPARSE_PHYS))) {
+ /* Only global mappings should be mapped multiple times */
+ if (!(KGSL_MEMDESC_MAPPED & memdesc->priv))
+ return -EINVAL;
+ }
+
if (PT_OP_VALID(pagetable, mmu_unmap)) {
uint64_t size;
@@ -464,6 +481,9 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
atomic_dec(&pagetable->stats.entries);
atomic_long_sub(size, &pagetable->stats.mapped);
+
+ if (!kgsl_memdesc_is_global(memdesc))
+ memdesc->priv &= ~KGSL_MEMDESC_MAPPED;
}
return ret;
diff --git a/drivers/gpu/msm/kgsl_pool.c b/drivers/gpu/msm/kgsl_pool.c
index f5402fdc7e57..6ecbab466c7c 100644
--- a/drivers/gpu/msm/kgsl_pool.c
+++ b/drivers/gpu/msm/kgsl_pool.c
@@ -21,6 +21,10 @@
#include "kgsl_device.h"
#include "kgsl_pool.h"
+#define KGSL_MAX_POOLS 4
+#define KGSL_MAX_POOL_ORDER 8
+#define KGSL_MAX_RESERVED_PAGES 4096
+
/**
* struct kgsl_page_pool - Structure to hold information for the pool
* @pool_order: Page order describing the size of the page
@@ -40,41 +44,10 @@ struct kgsl_page_pool {
struct list_head page_list;
};
-static struct kgsl_page_pool kgsl_pools[] = {
- {
- .pool_order = 0,
- .reserved_pages = 2048,
- .allocation_allowed = true,
- .list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[0].list_lock),
- .page_list = LIST_HEAD_INIT(kgsl_pools[0].page_list),
- },
-#ifndef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
- {
- .pool_order = 1,
- .reserved_pages = 1024,
- .allocation_allowed = true,
- .list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[1].list_lock),
- .page_list = LIST_HEAD_INIT(kgsl_pools[1].page_list),
- },
- {
- .pool_order = 4,
- .reserved_pages = 256,
- .allocation_allowed = false,
- .list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[2].list_lock),
- .page_list = LIST_HEAD_INIT(kgsl_pools[2].page_list),
- },
- {
- .pool_order = 8,
- .reserved_pages = 32,
- .allocation_allowed = false,
- .list_lock = __SPIN_LOCK_UNLOCKED(kgsl_pools[3].list_lock),
- .page_list = LIST_HEAD_INIT(kgsl_pools[3].page_list),
- },
+static struct kgsl_page_pool kgsl_pools[KGSL_MAX_POOLS];
+static int kgsl_num_pools;
+static int kgsl_pool_max_pages;
-#endif
-};
-
-#define KGSL_NUM_POOLS ARRAY_SIZE(kgsl_pools)
/* Returns KGSL pool corresponding to input page order*/
static struct kgsl_page_pool *
@@ -82,7 +55,7 @@ _kgsl_get_pool_from_order(unsigned int order)
{
int i;
- for (i = 0; i < KGSL_NUM_POOLS; i++) {
+ for (i = 0; i < kgsl_num_pools; i++) {
if (kgsl_pools[i].pool_order == order)
return &kgsl_pools[i];
}
@@ -154,7 +127,7 @@ static int kgsl_pool_size_total(void)
int i;
int total = 0;
- for (i = 0; i < KGSL_NUM_POOLS; i++)
+ for (i = 0; i < kgsl_num_pools; i++)
total += kgsl_pool_size(&kgsl_pools[i]);
return total;
}
@@ -207,7 +180,7 @@ kgsl_pool_reduce(unsigned int target_pages, bool exit)
total_pages = kgsl_pool_size_total();
- for (i = (KGSL_NUM_POOLS - 1); i >= 0; i--) {
+ for (i = (kgsl_num_pools - 1); i >= 0; i--) {
pool = &kgsl_pools[i];
/*
@@ -300,7 +273,7 @@ static int kgsl_pool_idx_lookup(unsigned int order)
{
int i;
- for (i = 0; i < KGSL_NUM_POOLS; i++)
+ for (i = 0; i < kgsl_num_pools; i++)
if (order == kgsl_pools[i].pool_order)
return i;
@@ -384,10 +357,13 @@ void kgsl_pool_free_page(struct page *page)
page_order = compound_order(page);
- pool = _kgsl_get_pool_from_order(page_order);
- if (pool != NULL) {
- _kgsl_pool_add_page(pool, page);
- return;
+ if (!kgsl_pool_max_pages ||
+ (kgsl_pool_size_total() < kgsl_pool_max_pages)) {
+ pool = _kgsl_get_pool_from_order(page_order);
+ if (pool != NULL) {
+ _kgsl_pool_add_page(pool, page);
+ return;
+ }
}
/* Give back to system as not added to pool */
@@ -398,7 +374,7 @@ static void kgsl_pool_reserve_pages(void)
{
int i, j;
- for (i = 0; i < KGSL_NUM_POOLS; i++) {
+ for (i = 0; i < kgsl_num_pools; i++) {
struct page *page;
for (j = 0; j < kgsl_pools[i].reserved_pages; j++) {
@@ -445,8 +421,76 @@ static struct shrinker kgsl_pool_shrinker = {
.batch = 0,
};
-void kgsl_init_page_pools(void)
+static void kgsl_pool_config(unsigned int order, unsigned int reserved_pages,
+ bool allocation_allowed)
{
+#ifdef CONFIG_ALLOC_BUFFERS_IN_4K_CHUNKS
+ if (order > 0) {
+ pr_info("%s: Pool order:%d not supprted.!!\n", __func__, order);
+ return;
+ }
+#endif
+ if ((order > KGSL_MAX_POOL_ORDER) ||
+ (reserved_pages > KGSL_MAX_RESERVED_PAGES))
+ return;
+
+ kgsl_pools[kgsl_num_pools].pool_order = order;
+ kgsl_pools[kgsl_num_pools].reserved_pages = reserved_pages;
+ kgsl_pools[kgsl_num_pools].allocation_allowed = allocation_allowed;
+ spin_lock_init(&kgsl_pools[kgsl_num_pools].list_lock);
+ INIT_LIST_HEAD(&kgsl_pools[kgsl_num_pools].page_list);
+ kgsl_num_pools++;
+}
+
+static void kgsl_of_parse_mempools(struct device_node *node)
+{
+ struct device_node *child;
+ unsigned int page_size, reserved_pages = 0;
+ bool allocation_allowed;
+
+ for_each_child_of_node(node, child) {
+ unsigned int index;
+
+ if (of_property_read_u32(child, "reg", &index))
+ return;
+
+ if (index >= KGSL_MAX_POOLS)
+ continue;
+
+ if (of_property_read_u32(child, "qcom,mempool-page-size",
+ &page_size))
+ return;
+
+ of_property_read_u32(child, "qcom,mempool-reserved",
+ &reserved_pages);
+
+ allocation_allowed = of_property_read_bool(child,
+ "qcom,mempool-allocate");
+
+ kgsl_pool_config(ilog2(page_size >> PAGE_SHIFT), reserved_pages,
+ allocation_allowed);
+ }
+}
+
+static void kgsl_of_get_mempools(struct device_node *parent)
+{
+ struct device_node *node;
+
+ node = of_find_compatible_node(parent, NULL, "qcom,gpu-mempools");
+ if (node != NULL) {
+ /* Get Max pages limit for mempool */
+ of_property_read_u32(node, "qcom,mempool-max-pages",
+ &kgsl_pool_max_pages);
+ kgsl_of_parse_mempools(node);
+ }
+}
+
+void kgsl_init_page_pools(struct platform_device *pdev)
+{
+
+ /* Get GPU mempools data and configure pools */
+ kgsl_of_get_mempools(pdev->dev.of_node);
+
/* Reserve the appropriate number of pages for each pool */
kgsl_pool_reserve_pages();
diff --git a/drivers/gpu/msm/kgsl_pool.h b/drivers/gpu/msm/kgsl_pool.h
index efbfa96f1498..d55e1ada123b 100644
--- a/drivers/gpu/msm/kgsl_pool.h
+++ b/drivers/gpu/msm/kgsl_pool.h
@@ -35,7 +35,7 @@ kgsl_gfp_mask(unsigned int page_order)
void kgsl_pool_free_sgt(struct sg_table *sgt);
void kgsl_pool_free_pages(struct page **pages, unsigned int page_count);
-void kgsl_init_page_pools(void);
+void kgsl_init_page_pools(struct platform_device *pdev);
void kgsl_exit_page_pools(void);
int kgsl_pool_alloc_page(int *page_size, struct page **pages,
unsigned int pages_len, unsigned int *align);
diff --git a/drivers/hwtracing/coresight/coresight-etm4x.c b/drivers/hwtracing/coresight/coresight-etm4x.c
index 5697ad3b1d13..a37b5ce9a6b2 100644
--- a/drivers/hwtracing/coresight/coresight-etm4x.c
+++ b/drivers/hwtracing/coresight/coresight-etm4x.c
@@ -2801,6 +2801,10 @@ static int etm4_probe(struct amba_device *adev, const struct amba_id *id)
put_online_cpus();
+ ret = clk_set_rate(adev->pclk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
+
pm_runtime_put(&adev->dev);
mutex_lock(&drvdata->mutex);
diff --git a/drivers/hwtracing/coresight/coresight-tmc.c b/drivers/hwtracing/coresight/coresight-tmc.c
index 294444d5f59e..cc8d957e0581 100644
--- a/drivers/hwtracing/coresight/coresight-tmc.c
+++ b/drivers/hwtracing/coresight/coresight-tmc.c
@@ -1545,8 +1545,8 @@ static ssize_t mem_size_store(struct device *dev,
mutex_lock(&drvdata->mem_lock);
if (kstrtoul(buf, 16, &val)) {
- return -EINVAL;
mutex_unlock(&drvdata->mem_lock);
+ return -EINVAL;
}
drvdata->mem_size = val;
@@ -1900,6 +1900,10 @@ static int tmc_probe(struct amba_device *adev, const struct amba_id *id)
drvdata->size = readl_relaxed(drvdata->base + TMC_RSZ) * 4;
}
+ ret = clk_set_rate(adev->pclk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
+
pm_runtime_put(&adev->dev);
if (drvdata->config_type == TMC_CONFIG_TYPE_ETR) {
diff --git a/drivers/hwtracing/coresight/coresight-tpiu.c b/drivers/hwtracing/coresight/coresight-tpiu.c
index 7baa1e750a23..3fd080b94069 100644
--- a/drivers/hwtracing/coresight/coresight-tpiu.c
+++ b/drivers/hwtracing/coresight/coresight-tpiu.c
@@ -162,6 +162,9 @@ static int tpiu_probe(struct amba_device *adev, const struct amba_id *id)
/* Disable tpiu to support older devices */
tpiu_disable_hw(drvdata);
+ ret = clk_set_rate(adev->pclk, CORESIGHT_CLK_RATE_TRACE);
+ if (ret)
+ return ret;
pm_runtime_put(&adev->dev);
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c b/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c
index d6bb18522e0c..033ff7bcfca5 100644
--- a/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c
+++ b/drivers/media/platform/msm/camera_v2/common/cam_soc_api.c
@@ -1040,8 +1040,11 @@ uint32_t msm_camera_unregister_bus_client(enum cam_bus_client id)
mutex_destroy(&g_cv[id].lock);
msm_bus_scale_unregister_client(g_cv[id].bus_client);
- msm_bus_cl_clear_pdata(g_cv[id].pdata);
- memset(&g_cv[id], 0, sizeof(struct msm_cam_bus_pscale_data));
+ g_cv[id].bus_client = 0;
+ g_cv[id].num_usecases = 0;
+ g_cv[id].num_paths = 0;
+ g_cv[id].vector_index = 0;
+ g_cv[id].dyn_vote = 0;
return 0;
}
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 106d76aae3bb..ab074ffbcdfb 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -1074,6 +1074,13 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
goto end;
}
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CPP,
+ CAM_AHB_NOMINAL_VOTE);
+ if (rc < 0) {
+ pr_err("%s:%d: failed to vote for AHB\n", __func__, __LINE__);
+ goto end;
+ }
+
msm_camera_io_w(0x1, cpp_dev->base + MSM_CPP_MICRO_CLKEN_CTL);
msm_camera_io_w(0x1, cpp_dev->base +
MSM_CPP_MICRO_BOOT_START);
@@ -1082,7 +1089,7 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
if (rc) {
pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
MSM_CPP_MSG_ID_CMD, rc);
- goto end;
+ goto vote;
}
msm_camera_io_w(0xFFFFFFFF, cpp_dev->base +
@@ -1092,7 +1099,7 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
if (rc) {
pr_err("%s:%d] poll rx empty failed %d",
__func__, __LINE__, rc);
- goto end;
+ goto vote;
}
/*Start firmware loading*/
msm_cpp_write(MSM_CPP_CMD_FW_LOAD, cpp_dev->base);
@@ -1102,7 +1109,7 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
if (rc) {
pr_err("%s:%d] poll rx empty failed %d",
__func__, __LINE__, rc);
- goto end;
+ goto vote;
}
for (i = 0; i < cpp_dev->fw->size/4; i++) {
msm_cpp_write(*ptr_bin, cpp_dev->base);
@@ -1111,7 +1118,7 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
if (rc) {
pr_err("%s:%d] poll rx empty failed %d",
__func__, __LINE__, rc);
- goto end;
+ goto vote;
}
}
ptr_bin++;
@@ -1124,21 +1131,21 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
if (rc) {
pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
MSM_CPP_MSG_ID_OK, rc);
- goto end;
+ goto vote;
}
rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_CMD);
if (rc) {
pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
MSM_CPP_MSG_ID_CMD, rc);
- goto end;
+ goto vote;
}
rc = msm_cpp_poll_rx_empty(cpp_dev->base);
if (rc) {
pr_err("%s:%d] poll rx empty failed %d",
__func__, __LINE__, rc);
- goto end;
+ goto vote;
}
/*Trigger MC to jump to start address*/
msm_cpp_write(MSM_CPP_CMD_EXEC_JUMP, cpp_dev->base);
@@ -1148,21 +1155,21 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
if (rc) {
pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
MSM_CPP_MSG_ID_CMD, rc);
- goto end;
+ goto vote;
}
rc = msm_cpp_poll(cpp_dev->base, 0x1);
if (rc) {
pr_err("%s:%d] poll command 0x1 failed %d", __func__, __LINE__,
rc);
- goto end;
+ goto vote;
}
rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_JUMP_ACK);
if (rc) {
pr_err("%s:%d] poll command %x failed %d", __func__, __LINE__,
MSM_CPP_MSG_ID_JUMP_ACK, rc);
- goto end;
+ goto vote;
}
rc = msm_cpp_poll(cpp_dev->base, MSM_CPP_MSG_ID_TRAILER);
@@ -1171,6 +1178,11 @@ static int32_t cpp_load_fw(struct cpp_device *cpp_dev, char *fw_name_bin)
MSM_CPP_MSG_ID_JUMP_ACK, rc);
}
+vote:
+ rc = cam_config_ahb_clk(NULL, 0, CAM_AHB_CLIENT_CPP,
+ CAM_AHB_SVS_VOTE);
+ if (rc < 0)
+ pr_err("%s:%d: failed to vote for AHB\n", __func__, __LINE__);
end:
return rc;
}
@@ -4186,7 +4198,7 @@ static int cpp_probe(struct platform_device *pdev)
cpp_dev->state = CPP_STATE_BOOT;
rc = cpp_init_hardware(cpp_dev);
if (rc < 0)
- goto cpp_probe_init_error;
+ goto bus_de_init;
media_entity_init(&cpp_dev->msm_sd.sd.entity, 0, NULL, 0);
cpp_dev->msm_sd.sd.entity.type = MEDIA_ENT_T_V4L2_SUBDEV;
@@ -4225,7 +4237,7 @@ static int cpp_probe(struct platform_device *pdev)
if (!cpp_dev->work) {
pr_err("no enough memory\n");
rc = -ENOMEM;
- goto cpp_probe_init_error;
+ goto bus_de_init;
}
INIT_WORK((struct work_struct *)cpp_dev->work, msm_cpp_do_timeout_work);
@@ -4245,6 +4257,12 @@ static int cpp_probe(struct platform_device *pdev)
else
CPP_DBG("FAILED.");
return rc;
+
+bus_de_init:
+ if (cpp_dev->bus_master_flag)
+ msm_cpp_deinit_bandwidth_mgr(cpp_dev);
+ else
+ msm_isp_deinit_bandwidth_mgr(ISP_CPP);
cpp_probe_init_error:
media_entity_cleanup(&cpp_dev->msm_sd.sd.entity);
msm_sd_unregister(&cpp_dev->msm_sd);
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index f071aae3ccab..0931242a5ec4 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -2940,7 +2940,10 @@ static int try_set_ctrl(struct msm_vidc_inst *inst, struct v4l2_ctrl *ctrl)
case V4L2_CID_MPEG_VIDC_SET_PERF_LEVEL:
switch (ctrl->val) {
case V4L2_CID_MPEG_VIDC_PERF_LEVEL_NOMINAL:
- inst->flags &= ~VIDC_TURBO;
+ if (inst->flags & VIDC_TURBO) {
+ inst->flags &= ~VIDC_TURBO;
+ msm_dcvs_init_load(inst);
+ }
break;
case V4L2_CID_MPEG_VIDC_PERF_LEVEL_TURBO:
inst->flags |= VIDC_TURBO;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_common.c b/drivers/media/platform/msm/vidc/msm_vidc_common.c
index e612c6ed11c7..fa2ad1754e77 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_common.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_common.c
@@ -5286,24 +5286,28 @@ void msm_comm_print_inst_info(struct msm_vidc_inst *inst)
int i = 0;
bool is_decode = false;
enum vidc_ports port;
+ bool is_secure = false;
if (!inst) {
- dprintk(VIDC_ERR, "%s - invalid param %p\n",
+ dprintk(VIDC_ERR, "%s - invalid param %pK\n",
__func__, inst);
return;
}
is_decode = inst->session_type == MSM_VIDC_DECODER;
port = is_decode ? OUTPUT_PORT : CAPTURE_PORT;
+ is_secure = inst->flags & VIDC_SECURE;
dprintk(VIDC_ERR,
- "%s session, Codec type: %s HxW: %d x %d fps: %d bitrate: %d bit-depth: %s\n",
- is_decode ? "Decode" : "Encode", inst->fmts[port].name,
+ "%s session, %s, Codec type: %s HxW: %d x %d fps: %d bitrate: %d bit-depth: %s\n",
+ is_decode ? "Decode" : "Encode",
+ is_secure ? "Secure" : "Non-Secure",
+ inst->fmts[port].name,
inst->prop.height[port], inst->prop.width[port],
inst->prop.fps, inst->prop.bitrate,
!inst->bit_depth ? "8" : "10");
dprintk(VIDC_ERR,
- "---Buffer details for inst: %p of type: %d---\n",
+ "---Buffer details for inst: %pK of type: %d---\n",
inst, inst->session_type);
mutex_lock(&inst->registeredbufs.lock);
dprintk(VIDC_ERR, "registered buffer list:\n");
@@ -5347,7 +5351,7 @@ static void msm_comm_print_debug_info(struct msm_vidc_inst *inst)
struct msm_vidc_inst *temp = NULL;
if (!inst || !inst->core) {
- dprintk(VIDC_ERR, "%s - invalid param %p %p\n",
+ dprintk(VIDC_ERR, "%s - invalid param %pK %pK\n",
__func__, inst, core);
return;
}
diff --git a/drivers/mfd/wcd9xxx-utils.c b/drivers/mfd/wcd9xxx-utils.c
index fab594992df3..344a0d5330aa 100644
--- a/drivers/mfd/wcd9xxx-utils.c
+++ b/drivers/mfd/wcd9xxx-utils.c
@@ -18,6 +18,7 @@
#include <linux/slab.h>
#include <linux/regmap.h>
#include <linux/delay.h>
+#include <linux/sched.h>
#include <linux/mfd/core.h>
#include <linux/mfd/wcd9xxx/pdata.h>
#include <linux/mfd/wcd9xxx/core.h>
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index ff838ebefba6..0bed86d2ccc0 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -131,6 +131,35 @@ static DEFINE_MUTEX(qsee_bw_mutex);
static DEFINE_MUTEX(app_access_lock);
static DEFINE_MUTEX(clk_access_lock);
+struct sglist_info {
+ uint32_t indexAndFlags;
+ uint32_t sizeOrCount;
+};
+
+/*
+ * The 31th bit indicates only one or multiple physical address inside
+ * the request buffer. If it is set, the index locates a single physical addr
+ * inside the request buffer, and `sizeOrCount` is the size of the memory being
+ * shared at that physical address.
+ * Otherwise, the index locates an array of {start, len} pairs (a
+ * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
+ * that array.
+ *
+ * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
+ * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
+ *
+ * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
+ */
+#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
+ ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
+
+#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
+
+#define FEATURE_ID_WHITELIST 15 /*whitelist feature id*/
+
+#define MAKE_WHITELIST_VERSION(major, minor, patch) \
+ (((major & 0x3FF) << 22) | ((minor & 0x3FF) << 12) | (patch & 0xFFF))
+
struct qseecom_registered_listener_list {
struct list_head list;
struct qseecom_register_listener_req svc;
@@ -145,6 +174,8 @@ struct qseecom_registered_listener_list {
bool listener_in_use;
/* wq for thread blocked on this listener*/
wait_queue_head_t listener_block_app_wq;
+ struct sglist_info sglistinfo_ptr[MAX_ION_FD];
+ uint32_t sglist_cnt;
};
struct qseecom_registered_app_list {
@@ -268,30 +299,6 @@ struct qseecom_listener_handle {
static struct qseecom_control qseecom;
-struct sglist_info {
- uint32_t indexAndFlags;
- uint32_t sizeOrCount;
-};
-
-/*
- * The 31th bit indicates only one or multiple physical address inside
- * the request buffer. If it is set, the index locates a single physical addr
- * inside the request buffer, and `sizeOrCount` is the size of the memory being
- * shared at that physical address.
- * Otherwise, the index locates an array of {start, len} pairs (a
- * "scatter/gather list"), and `sizeOrCount` gives the number of entries in
- * that array.
- *
- * The 30th bit indicates 64 or 32bit address; when it is set, physical addr
- * and scatter gather entry sizes are 64-bit values. Otherwise, 32-bit values.
- *
- * The bits [0:29] of `indexAndFlags` hold an offset into the request buffer.
- */
-#define SGLISTINFO_SET_INDEX_FLAG(c, s, i) \
- ((uint32_t)(((c & 1) << 31) | ((s & 1) << 30) | (i & 0x3fffffff)))
-
-#define SGLISTINFO_TABLE_SIZE (sizeof(struct sglist_info) * MAX_ION_FD)
-
struct qseecom_dev_handle {
enum qseecom_client_handle_type type;
union {
@@ -305,8 +312,9 @@ struct qseecom_dev_handle {
bool perf_enabled;
bool fast_load_enabled;
enum qseecom_bandwidth_request_mode mode;
- struct sglist_info *sglistinfo_ptr;
+ struct sglist_info sglistinfo_ptr[MAX_ION_FD];
uint32_t sglist_cnt;
+ bool use_legacy_cmd;
};
struct qseecom_key_id_usage_desc {
@@ -584,6 +592,34 @@ static int qseecom_scm_call2(uint32_t svc_id, uint32_t tz_cmd_id,
ret = scm_call2(smc_id, &desc);
break;
}
+ case QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST: {
+ struct qseecom_client_listener_data_irsp *req;
+ struct qseecom_client_listener_data_64bit_irsp *req_64;
+
+ smc_id =
+ TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID;
+ desc.arginfo =
+ TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ req =
+ (struct qseecom_client_listener_data_irsp *)
+ req_buf;
+ desc.args[0] = req->listener_id;
+ desc.args[1] = req->status;
+ desc.args[2] = req->sglistinfo_ptr;
+ desc.args[3] = req->sglistinfo_len;
+ } else {
+ req_64 =
+ (struct qseecom_client_listener_data_64bit_irsp *)
+ req_buf;
+ desc.args[0] = req_64->listener_id;
+ desc.args[1] = req_64->status;
+ desc.args[2] = req_64->sglistinfo_ptr;
+ desc.args[3] = req_64->sglistinfo_len;
+ }
+ ret = scm_call2(smc_id, &desc);
+ break;
+ }
case QSEOS_LOAD_EXTERNAL_ELF_COMMAND: {
struct qseecom_load_app_ireq *req;
struct qseecom_load_app_64bit_ireq *req_64bit;
@@ -1128,7 +1164,7 @@ static int qseecom_register_listener(struct qseecom_dev_handle *data,
return -EBUSY;
}
- new_entry = kmalloc(sizeof(*new_entry), GFP_KERNEL);
+ new_entry = kzalloc(sizeof(*new_entry), GFP_KERNEL);
if (!new_entry) {
pr_err("kmalloc failed\n");
return -ENOMEM;
@@ -1593,6 +1629,16 @@ static int __qseecom_qseos_fail_return_resp_tz(struct qseecom_dev_handle *data,
return ret;
}
+static void __qseecom_clean_listener_sglistinfo(
+ struct qseecom_registered_listener_list *ptr_svc)
+{
+ if (ptr_svc->sglist_cnt) {
+ memset(ptr_svc->sglistinfo_ptr, 0,
+ SGLISTINFO_TABLE_SIZE);
+ ptr_svc->sglist_cnt = 0;
+ }
+}
+
static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
struct qseecom_command_scm_resp *resp)
{
@@ -1601,9 +1647,14 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
uint32_t lstnr;
unsigned long flags;
struct qseecom_client_listener_data_irsp send_data_rsp;
+ struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
struct qseecom_registered_listener_list *ptr_svc = NULL;
sigset_t new_sigset;
sigset_t old_sigset;
+ uint32_t status;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+ struct sglist_info *table = NULL;
while (resp->result == QSEOS_RESULT_INCOMPLETE) {
lstnr = resp->data;
@@ -1677,15 +1728,42 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
data->client.app_id, lstnr, ret);
rc = -ENODEV;
- send_data_rsp.status = QSEOS_RESULT_FAILURE;
+ status = QSEOS_RESULT_FAILURE;
} else {
- send_data_rsp.status = QSEOS_RESULT_SUCCESS;
+ status = QSEOS_RESULT_SUCCESS;
}
qseecom.send_resp_flag = 0;
ptr_svc->send_resp_flag = 0;
- send_data_rsp.qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
- send_data_rsp.listener_id = lstnr;
+ table = ptr_svc->sglistinfo_ptr;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ send_data_rsp.listener_id = lstnr;
+ send_data_rsp.status = status;
+ send_data_rsp.sglistinfo_ptr =
+ (uint32_t)virt_to_phys(table);
+ send_data_rsp.sglistinfo_len =
+ SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&send_data_rsp;
+ cmd_len = sizeof(send_data_rsp);
+ } else {
+ send_data_rsp_64bit.listener_id = lstnr;
+ send_data_rsp_64bit.status = status;
+ send_data_rsp_64bit.sglistinfo_ptr =
+ virt_to_phys(table);
+ send_data_rsp_64bit.sglistinfo_len =
+ SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&send_data_rsp_64bit;
+ cmd_len = sizeof(send_data_rsp_64bit);
+ }
+ if (qseecom.whitelist_support == false)
+ *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+ else
+ *(uint32_t *)cmd_buf =
+ QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
if (ptr_svc)
msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
ptr_svc->sb_virt, ptr_svc->sb_length,
@@ -1695,10 +1773,9 @@ static int __qseecom_process_incomplete_cmd(struct qseecom_dev_handle *data,
__qseecom_enable_clk(CLK_QSEE);
ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
- (const void *)&send_data_rsp,
- sizeof(send_data_rsp), resp,
- sizeof(*resp));
+ cmd_buf, cmd_len, resp, sizeof(*resp));
ptr_svc->listener_in_use = false;
+ __qseecom_clean_listener_sglistinfo(ptr_svc);
if (ret) {
pr_err("scm_call() failed with err: %d (app_id = %d)\n",
ret, data->client.app_id);
@@ -1826,9 +1903,14 @@ static int __qseecom_reentrancy_process_incomplete_cmd(
uint32_t lstnr;
unsigned long flags;
struct qseecom_client_listener_data_irsp send_data_rsp;
+ struct qseecom_client_listener_data_64bit_irsp send_data_rsp_64bit;
struct qseecom_registered_listener_list *ptr_svc = NULL;
sigset_t new_sigset;
sigset_t old_sigset;
+ uint32_t status;
+ void *cmd_buf = NULL;
+ size_t cmd_len;
+ struct sglist_info *table = NULL;
while (ret == 0 && rc == 0 && resp->result == QSEOS_RESULT_INCOMPLETE) {
lstnr = resp->data;
@@ -1891,13 +1973,38 @@ static int __qseecom_reentrancy_process_incomplete_cmd(
pr_err("Abort clnt %d waiting on lstnr svc %d, ret %d",
data->client.app_id, lstnr, ret);
rc = -ENODEV;
- send_data_rsp.status = QSEOS_RESULT_FAILURE;
+ status = QSEOS_RESULT_FAILURE;
} else {
- send_data_rsp.status = QSEOS_RESULT_SUCCESS;
+ status = QSEOS_RESULT_SUCCESS;
}
-
- send_data_rsp.qsee_cmd_id = QSEOS_LISTENER_DATA_RSP_COMMAND;
- send_data_rsp.listener_id = lstnr;
+ table = ptr_svc->sglistinfo_ptr;
+ if (qseecom.qsee_version < QSEE_VERSION_40) {
+ send_data_rsp.listener_id = lstnr;
+ send_data_rsp.status = status;
+ send_data_rsp.sglistinfo_ptr =
+ (uint32_t)virt_to_phys(table);
+ send_data_rsp.sglistinfo_len = SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&send_data_rsp;
+ cmd_len = sizeof(send_data_rsp);
+ } else {
+ send_data_rsp_64bit.listener_id = lstnr;
+ send_data_rsp_64bit.status = status;
+ send_data_rsp_64bit.sglistinfo_ptr =
+ virt_to_phys(table);
+ send_data_rsp_64bit.sglistinfo_len =
+ SGLISTINFO_TABLE_SIZE;
+ dmac_flush_range((void *)table,
+ (void *)table + SGLISTINFO_TABLE_SIZE);
+ cmd_buf = (void *)&send_data_rsp_64bit;
+ cmd_len = sizeof(send_data_rsp_64bit);
+ }
+ if (qseecom.whitelist_support == false)
+ *(uint32_t *)cmd_buf = QSEOS_LISTENER_DATA_RSP_COMMAND;
+ else
+ *(uint32_t *)cmd_buf =
+ QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST;
if (ptr_svc)
msm_ion_do_cache_op(qseecom.ion_clnt, ptr_svc->ihandle,
ptr_svc->sb_virt, ptr_svc->sb_length,
@@ -1907,11 +2014,9 @@ static int __qseecom_reentrancy_process_incomplete_cmd(
__qseecom_enable_clk(CLK_QSEE);
ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
- (const void *)&send_data_rsp,
- sizeof(send_data_rsp), resp,
- sizeof(*resp));
-
+ cmd_buf, cmd_len, resp, sizeof(*resp));
ptr_svc->listener_in_use = false;
+ __qseecom_clean_listener_sglistinfo(ptr_svc);
wake_up_interruptible(&ptr_svc->listener_block_app_wq);
if (ret) {
@@ -2910,7 +3015,7 @@ static int __qseecom_send_cmd(struct qseecom_dev_handle *data,
cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
}
- if (qseecom.whitelist_support == false)
+ if (qseecom.whitelist_support == false || data->use_legacy_cmd == true)
*(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND;
else
*(uint32_t *)cmd_buf = QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
@@ -3015,6 +3120,8 @@ static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
struct qseecom_send_modfd_cmd_req *req = NULL;
struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
struct qseecom_registered_listener_list *this_lstnr = NULL;
+ uint32_t offset;
+ struct sg_table *sg_ptr;
if ((data->type != QSEECOM_LISTENER_SERVICE) &&
(data->type != QSEECOM_CLIENT_APP))
@@ -3036,7 +3143,6 @@ static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
}
for (i = 0; i < MAX_ION_FD; i++) {
- struct sg_table *sg_ptr = NULL;
if ((data->type != QSEECOM_LISTENER_SERVICE) &&
(req->ifd_data[i].fd > 0)) {
ihandle = ion_import_dma_buf(qseecom.ion_clnt,
@@ -3178,14 +3284,25 @@ static int __qseecom_update_cmd_buf(void *msg, bool cleanup,
ihandle, NULL, len,
ION_IOC_CLEAN_INV_CACHES);
if (data->type == QSEECOM_CLIENT_APP) {
+ offset = req->ifd_data[i].cmd_buf_offset;
data->sglistinfo_ptr[i].indexAndFlags =
SGLISTINFO_SET_INDEX_FLAG(
- (sg_ptr->nents == 1), 0,
- req->ifd_data[i].cmd_buf_offset);
+ (sg_ptr->nents == 1), 0, offset);
data->sglistinfo_ptr[i].sizeOrCount =
(sg_ptr->nents == 1) ?
sg->length : sg_ptr->nents;
data->sglist_cnt = i + 1;
+ } else {
+ offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+ + (uintptr_t)lstnr_resp->resp_buf_ptr -
+ (uintptr_t)this_lstnr->sb_virt);
+ this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+ SGLISTINFO_SET_INDEX_FLAG(
+ (sg_ptr->nents == 1), 0, offset);
+ this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+ (sg_ptr->nents == 1) ?
+ sg->length : sg_ptr->nents;
+ this_lstnr->sglist_cnt = i + 1;
}
}
/* Deallocate the handle */
@@ -3258,6 +3375,8 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
struct qseecom_send_modfd_cmd_req *req = NULL;
struct qseecom_send_modfd_listener_resp *lstnr_resp = NULL;
struct qseecom_registered_listener_list *this_lstnr = NULL;
+ uint32_t offset;
+ struct sg_table *sg_ptr;
if ((data->type != QSEECOM_LISTENER_SERVICE) &&
(data->type != QSEECOM_CLIENT_APP))
@@ -3279,7 +3398,6 @@ static int __qseecom_update_cmd_buf_64(void *msg, bool cleanup,
}
for (i = 0; i < MAX_ION_FD; i++) {
- struct sg_table *sg_ptr = NULL;
if ((data->type != QSEECOM_LISTENER_SERVICE) &&
(req->ifd_data[i].fd > 0)) {
ihandle = ion_import_dma_buf(qseecom.ion_clnt,
@@ -3396,14 +3514,25 @@ cleanup:
ihandle, NULL, len,
ION_IOC_CLEAN_INV_CACHES);
if (data->type == QSEECOM_CLIENT_APP) {
+ offset = req->ifd_data[i].cmd_buf_offset;
data->sglistinfo_ptr[i].indexAndFlags =
SGLISTINFO_SET_INDEX_FLAG(
- (sg_ptr->nents == 1), 1,
- req->ifd_data[i].cmd_buf_offset);
+ (sg_ptr->nents == 1), 1, offset);
data->sglistinfo_ptr[i].sizeOrCount =
(sg_ptr->nents == 1) ?
sg->length : sg_ptr->nents;
data->sglist_cnt = i + 1;
+ } else {
+ offset = (lstnr_resp->ifd_data[i].cmd_buf_offset
+ + (uintptr_t)lstnr_resp->resp_buf_ptr -
+ (uintptr_t)this_lstnr->sb_virt);
+ this_lstnr->sglistinfo_ptr[i].indexAndFlags =
+ SGLISTINFO_SET_INDEX_FLAG(
+ (sg_ptr->nents == 1), 1, offset);
+ this_lstnr->sglistinfo_ptr[i].sizeOrCount =
+ (sg_ptr->nents == 1) ?
+ sg->length : sg_ptr->nents;
+ this_lstnr->sglist_cnt = i + 1;
}
}
/* Deallocate the handle */
@@ -4122,21 +4251,12 @@ int qseecom_start_app(struct qseecom_handle **handle,
data->client.user_virt_sb_base = 0;
data->client.ihandle = NULL;
- /* Allocate sglistinfo buffer for kernel client */
- data->sglistinfo_ptr = kzalloc(SGLISTINFO_TABLE_SIZE, GFP_KERNEL);
- if (!(data->sglistinfo_ptr)) {
- kfree(data);
- kfree(*handle);
- *handle = NULL;
- return -ENOMEM;
- }
init_waitqueue_head(&data->abort_wq);
data->client.ihandle = ion_alloc(qseecom.ion_clnt, size, 4096,
ION_HEAP(ION_QSECOM_HEAP_ID), 0);
if (IS_ERR_OR_NULL(data->client.ihandle)) {
pr_err("Ion client could not retrieve the handle\n");
- kfree(data->sglistinfo_ptr);
kfree(data);
kfree(*handle);
*handle = NULL;
@@ -4239,7 +4359,6 @@ int qseecom_start_app(struct qseecom_handle **handle,
return 0;
err:
- kfree(data->sglistinfo_ptr);
kfree(data);
kfree(*handle);
*handle = NULL;
@@ -4287,7 +4406,6 @@ int qseecom_shutdown_app(struct qseecom_handle **handle)
mutex_unlock(&app_access_lock);
if (ret == 0) {
- kzfree(data->sglistinfo_ptr);
kzfree(data);
kzfree(*handle);
kzfree(kclient);
@@ -4353,8 +4471,11 @@ int qseecom_send_command(struct qseecom_handle *handle, void *send_buf,
}
perf_enabled = true;
}
+ if (!strcmp(data->client.app_name, "securemm"))
+ data->use_legacy_cmd = true;
ret = __qseecom_send_cmd(data, &req);
+ data->use_legacy_cmd = false;
if (qseecom.support_bus_scaling)
__qseecom_add_bw_scale_down_timer(
QSEECOM_SEND_CMD_CRYPTO_TIMEOUT);
@@ -7030,6 +7151,7 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
wake_up_all(&data->abort_wq);
if (ret)
pr_err("failed qseecom_send_mod_resp: %d\n", ret);
+ __qseecom_clean_data_sglistinfo(data);
break;
}
case QSEECOM_QTEEC_IOCTL_OPEN_SESSION_REQ: {
@@ -7179,12 +7301,6 @@ static int qseecom_open(struct inode *inode, struct file *file)
data->mode = INACTIVE;
init_waitqueue_head(&data->abort_wq);
atomic_set(&data->ioctl_count, 0);
-
- data->sglistinfo_ptr = kzalloc(SGLISTINFO_TABLE_SIZE, GFP_KERNEL);
- if (!(data->sglistinfo_ptr)) {
- kzfree(data);
- return -ENOMEM;
- }
return ret;
}
@@ -7239,7 +7355,6 @@ static int qseecom_release(struct inode *inode, struct file *file)
if (data->perf_enabled == true)
qsee_disable_clock_vote(data, CLK_DFAB);
}
- kfree(data->sglistinfo_ptr);
kfree(data);
return ret;
@@ -7988,73 +8103,14 @@ out:
}
/*
- * Check if whitelist feature is supported by making a test scm_call
- * to send a whitelist command to an invalid app ID 0
+ * Check whitelist feature, and if TZ feature version is < 1.0.0,
+ * then whitelist feature is not supported.
*/
static int qseecom_check_whitelist_feature(void)
{
- struct qseecom_client_send_data_ireq send_data_req = {0};
- struct qseecom_client_send_data_64bit_ireq send_data_req_64bit = {0};
- struct qseecom_command_scm_resp resp;
- uint32_t buf_size = 128;
- void *buf = NULL;
- void *cmd_buf = NULL;
- size_t cmd_len;
- int ret = 0;
- phys_addr_t pa;
+ int version = scm_get_feat_version(FEATURE_ID_WHITELIST);
- buf = kzalloc(buf_size, GFP_KERNEL);
- if (!buf)
- return -ENOMEM;
- pa = virt_to_phys(buf);
- if (qseecom.qsee_version < QSEE_VERSION_40) {
- send_data_req.qsee_cmd_id =
- QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
- send_data_req.app_id = 0;
- send_data_req.req_ptr = (uint32_t)pa;
- send_data_req.req_len = buf_size;
- send_data_req.rsp_ptr = (uint32_t)pa;
- send_data_req.rsp_len = buf_size;
- send_data_req.sglistinfo_ptr = (uint32_t)pa;
- send_data_req.sglistinfo_len = buf_size;
- cmd_buf = (void *)&send_data_req;
- cmd_len = sizeof(struct qseecom_client_send_data_ireq);
- } else {
- send_data_req_64bit.qsee_cmd_id =
- QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST;
- send_data_req_64bit.app_id = 0;
- send_data_req_64bit.req_ptr = (uint64_t)pa;
- send_data_req_64bit.req_len = buf_size;
- send_data_req_64bit.rsp_ptr = (uint64_t)pa;
- send_data_req_64bit.rsp_len = buf_size;
- send_data_req_64bit.sglistinfo_ptr = (uint64_t)pa;
- send_data_req_64bit.sglistinfo_len = buf_size;
- cmd_buf = (void *)&send_data_req_64bit;
- cmd_len = sizeof(struct qseecom_client_send_data_64bit_ireq);
- }
- ret = qseecom_scm_call(SCM_SVC_TZSCHEDULER, 1,
- cmd_buf, cmd_len,
- &resp, sizeof(resp));
-/*
- * If this cmd exists and whitelist is supported, scm_call return -2 (scm
- * driver remap it to -EINVAL) and resp.result 0xFFFFFFED(-19); Otherwise,
- * scm_call return -1 (remap to -EIO).
- */
- if (ret == -EIO) {
- qseecom.whitelist_support = false;
- ret = 0;
- } else if (ret == -EINVAL &&
- resp.result == QSEOS_RESULT_FAIL_SEND_CMD_NO_THREAD) {
- qseecom.whitelist_support = true;
- ret = 0;
- } else {
- pr_info("Check whitelist with ret = %d, result = 0x%x\n",
- ret, resp.result);
- qseecom.whitelist_support = false;
- ret = 0;
- }
- kfree(buf);
- return ret;
+ return version >= MAKE_WHITELIST_VERSION(1, 0, 0);
}
static int qseecom_probe(struct platform_device *pdev)
@@ -8305,11 +8361,7 @@ static int qseecom_probe(struct platform_device *pdev)
qseecom.qsee_perf_client = msm_bus_scale_register_client(
qseecom_platform_support);
- rc = qseecom_check_whitelist_feature();
- if (rc) {
- rc = -EINVAL;
- goto exit_destroy_ion_client;
- }
+ qseecom.whitelist_support = qseecom_check_whitelist_feature();
pr_warn("qseecom.whitelist_support = %d\n",
qseecom.whitelist_support);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 3c2a6d4620ba..7e15bcfc84ff 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -1165,8 +1165,11 @@ void ipa_update_repl_threshold(enum ipa_client_type ipa_client)
* Determine how many buffers/descriptors remaining will
* cause to drop below the yellow WM bar.
*/
- ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys)
- / ep->sys->rx_buff_sz;
+ if (ep->sys->rx_buff_sz)
+ ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys)
+ / ep->sys->rx_buff_sz;
+ else
+ ep->rx_replenish_threshold = 0;
}
/**
@@ -1361,8 +1364,11 @@ int ipa2_setup_sys_pipe(struct ipa_sys_connect_params *sys_in, u32 *clnt_hdl)
* Determine how many buffers/descriptors remaining will
* cause to drop below the yellow WM bar.
*/
- ep->rx_replenish_threshold = ipa_get_sys_yellow_wm(ep->sys)
- / ep->sys->rx_buff_sz;
+ if (ep->sys->rx_buff_sz)
+ ep->rx_replenish_threshold =
+ ipa_get_sys_yellow_wm(ep->sys) / ep->sys->rx_buff_sz;
+ else
+ ep->rx_replenish_threshold = 0;
/* Only when the WAN pipes are setup, actual threshold will
* be read from the register. So update LAN_CONS ep again with
* right value.
diff --git a/drivers/power/qcom-charger/fg-core.h b/drivers/power/qcom-charger/fg-core.h
index a703b208f6e4..3f8f66f1f7d8 100644
--- a/drivers/power/qcom-charger/fg-core.h
+++ b/drivers/power/qcom-charger/fg-core.h
@@ -222,7 +222,6 @@ struct fg_batt_props {
int float_volt_uv;
int vbatt_full_mv;
int fastchg_curr_ma;
- int batt_id_kohm;
};
struct fg_cyc_ctr_data {
@@ -260,6 +259,7 @@ struct fg_chip {
struct power_supply *batt_psy;
struct power_supply *usb_psy;
struct power_supply *dc_psy;
+ struct power_supply *parallel_psy;
struct iio_channel *batt_id_chan;
struct fg_memif *sram;
struct fg_irq_info *irqs;
@@ -278,7 +278,7 @@ struct fg_chip {
u32 batt_soc_base;
u32 batt_info_base;
u32 mem_if_base;
- int batt_id;
+ int batt_id_kohms;
int status;
int charge_done;
int last_soc;
@@ -289,8 +289,10 @@ struct fg_chip {
bool battery_missing;
bool fg_restarting;
bool charge_full;
+ bool charge_empty;
bool recharge_soc_adjusted;
bool ki_coeff_dischg_en;
+ bool esr_fcc_ctrl_en;
struct completion soc_update;
struct completion soc_ready;
struct delayed_work profile_load_work;
diff --git a/drivers/power/qcom-charger/qpnp-fg-gen3.c b/drivers/power/qcom-charger/qpnp-fg-gen3.c
index 4ee94b990382..00a3f3ecdc20 100644
--- a/drivers/power/qcom-charger/qpnp-fg-gen3.c
+++ b/drivers/power/qcom-charger/qpnp-fg-gen3.c
@@ -655,12 +655,42 @@ static int fg_get_msoc_raw(struct fg_chip *chip, int *val)
return 0;
}
+#define DEBUG_BATT_ID_KOHMS 7
+static bool is_debug_batt_id(struct fg_chip *chip)
+{
+ int batt_id_delta = 0;
+
+ if (!chip->batt_id_kohms)
+ return false;
+
+ batt_id_delta = abs(chip->batt_id_kohms - DEBUG_BATT_ID_KOHMS);
+ if (batt_id_delta <= 1) {
+ fg_dbg(chip, FG_POWER_SUPPLY, "Debug battery id: %dKohms\n",
+ chip->batt_id_kohms);
+ return true;
+ }
+
+ return false;
+}
+
#define FULL_CAPACITY 100
#define FULL_SOC_RAW 255
+#define DEBUG_BATT_SOC 67
+#define EMPTY_SOC 0
static int fg_get_prop_capacity(struct fg_chip *chip, int *val)
{
int rc, msoc;
+ if (is_debug_batt_id(chip)) {
+ *val = DEBUG_BATT_SOC;
+ return 0;
+ }
+
+ if (chip->charge_empty) {
+ *val = EMPTY_SOC;
+ return 0;
+ }
+
if (chip->charge_full) {
*val = FULL_CAPACITY;
return 0;
@@ -725,7 +755,7 @@ static int fg_get_batt_profile(struct fg_chip *chip)
}
batt_id /= 1000;
- chip->batt_id = batt_id;
+ chip->batt_id_kohms = batt_id;
batt_node = of_find_node_by_name(node, "qcom,battery-data");
if (!batt_node) {
pr_err("Batterydata not available\n");
@@ -879,6 +909,17 @@ static bool is_charger_available(struct fg_chip *chip)
return true;
}
+static bool is_parallel_charger_available(struct fg_chip *chip)
+{
+ if (!chip->parallel_psy)
+ chip->parallel_psy = power_supply_get_by_name("parallel");
+
+ if (!chip->parallel_psy)
+ return false;
+
+ return true;
+}
+
static int fg_save_learned_cap_to_sram(struct fg_chip *chip)
{
int16_t cc_mah;
@@ -1351,6 +1392,72 @@ static int fg_adjust_recharge_soc(struct fg_chip *chip)
return 0;
}
+static int fg_esr_fcc_config(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0, };
+ int rc;
+ bool parallel_en = false;
+
+ if (is_parallel_charger_available(chip)) {
+ rc = power_supply_get_property(chip->parallel_psy,
+ POWER_SUPPLY_PROP_CHARGING_ENABLED, &prop);
+ if (rc < 0) {
+ pr_err("Error in reading charging_enabled from parallel_psy, rc=%d\n",
+ rc);
+ return rc;
+ }
+ parallel_en = prop.intval;
+ }
+
+ fg_dbg(chip, FG_POWER_SUPPLY, "status: %d parallel_en: %d esr_fcc_ctrl_en: %d\n",
+ chip->status, parallel_en, chip->esr_fcc_ctrl_en);
+
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING && parallel_en) {
+ if (chip->esr_fcc_ctrl_en)
+ return 0;
+
+ /*
+ * When parallel charging is enabled, configure ESR FCC to
+ * 300mA to trigger an ESR pulse. Without this, FG can ask
+ * the main charger to increase FCC when it is supposed to
+ * decrease it.
+ */
+ rc = fg_masked_write(chip, BATT_INFO_ESR_FAST_CRG_CFG(chip),
+ ESR_FAST_CRG_IVAL_MASK |
+ ESR_FAST_CRG_CTL_EN_BIT,
+ ESR_FCC_300MA | ESR_FAST_CRG_CTL_EN_BIT);
+ if (rc < 0) {
+ pr_err("Error in writing to %04x, rc=%d\n",
+ BATT_INFO_ESR_FAST_CRG_CFG(chip), rc);
+ return rc;
+ }
+
+ chip->esr_fcc_ctrl_en = true;
+ } else {
+ if (!chip->esr_fcc_ctrl_en)
+ return 0;
+
+ /*
+ * If we're here, then it means either the device is not in
+ * charging state or parallel charging is disabled. Disable
+ * ESR fast charge current control in SW.
+ */
+ rc = fg_masked_write(chip, BATT_INFO_ESR_FAST_CRG_CFG(chip),
+ ESR_FAST_CRG_CTL_EN_BIT, 0);
+ if (rc < 0) {
+ pr_err("Error in writing to %04x, rc=%d\n",
+ BATT_INFO_ESR_FAST_CRG_CFG(chip), rc);
+ return rc;
+ }
+
+ chip->esr_fcc_ctrl_en = false;
+ }
+
+ fg_dbg(chip, FG_STATUS, "esr_fcc_ctrl_en set to %d\n",
+ chip->esr_fcc_ctrl_en);
+ return 0;
+}
+
static void status_change_work(struct work_struct *work)
{
struct fg_chip *chip = container_of(work,
@@ -1398,6 +1505,10 @@ static void status_change_work(struct work_struct *work)
rc = fg_adjust_ki_coeff_dischg(chip);
if (rc < 0)
pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+
+ rc = fg_esr_fcc_config(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting FCC for ESR, rc=%d\n", rc);
out:
pm_relax(chip->dev);
}
@@ -1556,6 +1667,10 @@ static void dump_sram(u8 *buf, int len)
}
}
+#define PROFILE_LOAD_BIT BIT(0)
+#define BOOTLOADER_LOAD_BIT BIT(1)
+#define BOOTLOADER_RESTART_BIT BIT(2)
+#define HLOS_RESTART_BIT BIT(3)
static bool is_profile_load_required(struct fg_chip *chip)
{
u8 buf[PROFILE_COMP_LEN], val;
@@ -1570,7 +1685,7 @@ static bool is_profile_load_required(struct fg_chip *chip)
}
/* Check if integrity bit is set */
- if (val == 0x01) {
+ if (val & PROFILE_LOAD_BIT) {
fg_dbg(chip, FG_STATUS, "Battery profile integrity bit is set\n");
rc = fg_sram_read(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
buf, PROFILE_COMP_LEN, FG_IMA_DEFAULT);
@@ -1728,7 +1843,7 @@ static void profile_load_work(struct work_struct *work)
fg_dbg(chip, FG_STATUS, "SOC is ready\n");
/* Set the profile integrity bit */
- val = 0x1;
+ val = HLOS_RESTART_BIT | PROFILE_LOAD_BIT;
rc = fg_sram_write(chip, PROFILE_INTEGRITY_WORD,
PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
if (rc < 0) {
@@ -2274,6 +2389,7 @@ static irqreturn_t fg_empty_soc_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
+ chip->charge_empty = true;
if (is_charger_available(chip))
power_supply_changed(chip->batt_psy);
@@ -2495,7 +2611,7 @@ static int fg_parse_ki_coefficients(struct fg_chip *chip)
}
#define DEFAULT_CUTOFF_VOLT_MV 3200
-#define DEFAULT_EMPTY_VOLT_MV 3100
+#define DEFAULT_EMPTY_VOLT_MV 2800
#define DEFAULT_CHG_TERM_CURR_MA 100
#define DEFAULT_SYS_TERM_CURR_MA -125
#define DEFAULT_DELTA_SOC_THR 1
@@ -2612,7 +2728,7 @@ static int fg_parse_dt(struct fg_chip *chip)
rc = fg_get_batt_profile(chip);
if (rc < 0)
pr_warn("profile for batt_id=%dKOhms not found..using OTP, rc:%d\n",
- chip->batt_id, rc);
+ chip->batt_id_kohms, rc);
/* Read all the optional properties below */
rc = of_property_read_u32(node, "qcom,fg-cutoff-voltage", &temp);
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index f9d76c56aa2e..a00ad8343a88 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -225,6 +225,7 @@ struct smb_dt_props {
s32 step_cc_delta[STEP_CHARGING_MAX_STEPS];
struct device_node *revid_dev_node;
int float_option;
+ int chg_inhibit_thr_mv;
bool hvdcp_disable;
};
@@ -335,6 +336,14 @@ static int smb2_parse_dt(struct smb2 *chip)
chip->dt.hvdcp_disable = of_property_read_bool(node,
"qcom,hvdcp-disable");
+ of_property_read_u32(node, "qcom,chg-inhibit-threshold-mv",
+ &chip->dt.chg_inhibit_thr_mv);
+ if ((chip->dt.chg_inhibit_thr_mv < 0 ||
+ chip->dt.chg_inhibit_thr_mv > 300)) {
+ pr_err("qcom,chg-inhibit-threshold-mv is incorrect\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -1213,6 +1222,40 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
+ switch (chip->dt.chg_inhibit_thr_mv) {
+ case 50:
+ rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+ CHARGE_INHIBIT_THRESHOLD_MASK,
+ CHARGE_INHIBIT_THRESHOLD_50MV);
+ break;
+ case 100:
+ rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+ CHARGE_INHIBIT_THRESHOLD_MASK,
+ CHARGE_INHIBIT_THRESHOLD_100MV);
+ break;
+ case 200:
+ rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+ CHARGE_INHIBIT_THRESHOLD_MASK,
+ CHARGE_INHIBIT_THRESHOLD_200MV);
+ break;
+ case 300:
+ rc = smblib_masked_write(chg, CHARGE_INHIBIT_THRESHOLD_CFG_REG,
+ CHARGE_INHIBIT_THRESHOLD_MASK,
+ CHARGE_INHIBIT_THRESHOLD_300MV);
+ break;
+ case 0:
+ rc = smblib_masked_write(chg, CHGR_CFG2_REG,
+ CHARGER_INHIBIT_BIT, 0);
+ default:
+ break;
+ }
+
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure charge inhibit threshold rc=%d\n",
+ rc);
+ return rc;
+ }
+
return rc;
}
@@ -1241,6 +1284,7 @@ static int smb2_setup_wa_flags(struct smb2 *chip)
switch (pmic_rev_id->pmic_subtype) {
case PMICOBALT_SUBTYPE:
+ chip->chg.wa_flags |= BOOST_BACK_WA;
if (pmic_rev_id->rev4 == PMICOBALT_V1P1_REV4) /* PMI rev 1.1 */
chg->wa_flags |= QC_CHARGER_DETECTION_WA_BIT;
break;
@@ -1451,7 +1495,8 @@ static struct smb2_irq_info smb2_irqs[] = {
},
{
.name = "switcher-power-ok",
- .handler = smblib_handle_debug,
+ .handler = smblib_handle_switcher_power_ok,
+ .storm_data = {true, 1000, 3},
},
};
@@ -1746,6 +1791,16 @@ static int smb2_remove(struct platform_device *pdev)
return 0;
}
+static void smb2_shutdown(struct platform_device *pdev)
+{
+ struct smb2 *chip = platform_get_drvdata(pdev);
+ struct smb_charger *chg = &chip->chg;
+
+ smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+ HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT, 0);
+ smblib_write(chg, CMD_HVDCP_2_REG, FORCE_5V_BIT);
+}
+
static const struct of_device_id match_table[] = {
{ .compatible = "qcom,qpnp-smb2", },
{ },
@@ -1759,6 +1814,7 @@ static struct platform_driver smb2_driver = {
},
.probe = smb2_probe,
.remove = smb2_remove,
+ .shutdown = smb2_shutdown,
};
module_platform_driver(smb2_driver);
diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c
index 198e77469bbe..9cbba5a34195 100644
--- a/drivers/power/qcom-charger/smb-lib.c
+++ b/drivers/power/qcom-charger/smb-lib.c
@@ -490,7 +490,7 @@ static int try_rerun_apsd_for_hvdcp(struct smb_charger *chg)
/* ensure hvdcp is enabled */
if (!get_effective_result(chg->hvdcp_disable_votable)) {
apsd_result = smblib_get_apsd_result(chg);
- if (apsd_result->pst == POWER_SUPPLY_TYPE_USB_HVDCP) {
+ if (apsd_result->bit & (QC_2P0_BIT | QC_3P0_BIT)) {
/* rerun APSD */
smblib_dbg(chg, PR_MISC, "rerun APSD\n");
smblib_masked_write(chg, CMD_APSD_REG,
@@ -596,7 +596,11 @@ static int smblib_usb_suspend_vote_callback(struct votable *votable, void *data,
{
struct smb_charger *chg = data;
- return smblib_set_usb_suspend(chg, suspend);
+ /* resume input if suspend is invalid */
+ if (suspend < 0)
+ suspend = 0;
+
+ return smblib_set_usb_suspend(chg, (bool)suspend);
}
static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
@@ -604,10 +608,11 @@ static int smblib_dc_suspend_vote_callback(struct votable *votable, void *data,
{
struct smb_charger *chg = data;
+ /* resume input if suspend is invalid */
if (suspend < 0)
- suspend = false;
+ suspend = 0;
- return smblib_set_dc_suspend(chg, suspend);
+ return smblib_set_dc_suspend(chg, (bool)suspend);
}
static int smblib_fcc_max_vote_callback(struct votable *votable, void *data,
@@ -2027,6 +2032,52 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
return rc;
}
+int smblib_reg_block_update(struct smb_charger *chg,
+ struct reg_info *entry)
+{
+ int rc = 0;
+
+ while (entry && entry->reg) {
+ rc = smblib_read(chg, entry->reg, &entry->bak);
+ if (rc < 0) {
+ dev_err(chg->dev, "Error in reading %s rc=%d\n",
+ entry->desc, rc);
+ break;
+ }
+ entry->bak &= entry->mask;
+
+ rc = smblib_masked_write(chg, entry->reg,
+ entry->mask, entry->val);
+ if (rc < 0) {
+ dev_err(chg->dev, "Error in writing %s rc=%d\n",
+ entry->desc, rc);
+ break;
+ }
+ entry++;
+ }
+
+ return rc;
+}
+
+int smblib_reg_block_restore(struct smb_charger *chg,
+ struct reg_info *entry)
+{
+ int rc = 0;
+
+ while (entry && entry->reg) {
+ rc = smblib_masked_write(chg, entry->reg,
+ entry->mask, entry->bak);
+ if (rc < 0) {
+ dev_err(chg->dev, "Error in writing %s rc=%d\n",
+ entry->desc, rc);
+ break;
+ }
+ entry++;
+ }
+
+ return rc;
+}
+
int smblib_set_prop_pd_in_hard_reset(struct smb_charger *chg,
const union power_supply_propval *val)
{
@@ -2230,11 +2281,8 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
}
}
- if (!chg->dpdm_reg)
- goto skip_dpdm_float;
-
if (chg->vbus_present) {
- if (!regulator_is_enabled(chg->dpdm_reg)) {
+ if (chg->dpdm_reg && !regulator_is_enabled(chg->dpdm_reg)) {
smblib_dbg(chg, PR_MISC, "enabling DPDM regulator\n");
rc = regulator_enable(chg->dpdm_reg);
if (rc < 0)
@@ -2242,7 +2290,14 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
rc);
}
} else {
- if (regulator_is_enabled(chg->dpdm_reg)) {
+ if (chg->wa_flags & BOOST_BACK_WA) {
+ vote(chg->usb_suspend_votable,
+ BOOST_BACK_VOTER, false, 0);
+ vote(chg->dc_suspend_votable,
+ BOOST_BACK_VOTER, false, 0);
+ }
+
+ if (chg->dpdm_reg && regulator_is_enabled(chg->dpdm_reg)) {
smblib_dbg(chg, PR_MISC, "disabling DPDM regulator\n");
rc = regulator_disable(chg->dpdm_reg);
if (rc < 0)
@@ -2251,7 +2306,6 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
}
}
-skip_dpdm_float:
power_supply_changed(chg->usb_psy);
smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s %s\n",
irq_data->name, chg->vbus_present ? "attached" : "detached");
@@ -2683,6 +2737,39 @@ irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data)
return IRQ_HANDLED;
}
+irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data)
+{
+ struct smb_irq_data *irq_data = data;
+ struct smb_charger *chg = irq_data->parent_data;
+ int rc;
+ u8 stat;
+
+ if (!(chg->wa_flags & BOOST_BACK_WA))
+ return IRQ_HANDLED;
+
+ rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ if ((stat & USE_USBIN_BIT) &&
+ get_effective_result(chg->usb_suspend_votable))
+ return IRQ_HANDLED;
+
+ if ((stat & USE_DCIN_BIT) &&
+ get_effective_result(chg->dc_suspend_votable))
+ return IRQ_HANDLED;
+
+ if (is_storming(&irq_data->storm_data)) {
+ smblib_dbg(chg, PR_MISC, "reverse boost detected; suspending input\n");
+ vote(chg->usb_suspend_votable, BOOST_BACK_VOTER, true, 0);
+ vote(chg->dc_suspend_votable, BOOST_BACK_VOTER, true, 0);
+ }
+
+ return IRQ_HANDLED;
+}
+
/***************
* Work Queues *
***************/
diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h
index 4be06ffcfb25..2809ddadbd90 100644
--- a/drivers/power/qcom-charger/smb-lib.h
+++ b/drivers/power/qcom-charger/smb-lib.h
@@ -46,6 +46,7 @@ enum print_reason {
#define VBUS_CC_SHORT_VOTER "VBUS_CC_SHORT_VOTER"
#define LEGACY_CABLE_VOTER "LEGACY_CABLE_VOTER"
#define PD_INACTIVE_VOTER "PD_INACTIVE_VOTER"
+#define BOOST_BACK_VOTER "BOOST_BACK_VOTER"
enum smb_mode {
PARALLEL_MASTER = 0,
@@ -55,6 +56,7 @@ enum smb_mode {
enum {
QC_CHARGER_DETECTION_WA_BIT = BIT(0),
+ BOOST_BACK_WA = BIT(1),
};
struct smb_regulator {
@@ -116,6 +118,14 @@ struct smb_iio {
struct iio_channel *batt_i_chan;
};
+struct reg_info {
+ u16 reg;
+ u8 mask;
+ u8 val;
+ u8 bak;
+ const char *desc;
+};
+
struct smb_charger {
struct device *dev;
char *name;
@@ -240,6 +250,7 @@ irqreturn_t smblib_handle_icl_change(int irq, void *data);
irqreturn_t smblib_handle_usb_typec_change(int irq, void *data);
irqreturn_t smblib_handle_dc_plugin(int irq, void *data);
irqreturn_t smblib_handle_high_duty_cycle(int irq, void *data);
+irqreturn_t smblib_handle_switcher_power_ok(int irq, void *data);
int smblib_get_prop_input_suspend(struct smb_charger *chg,
union power_supply_propval *val);
diff --git a/drivers/power/qcom-charger/smb-reg.h b/drivers/power/qcom-charger/smb-reg.h
index a74fcf730a8c..2aed4cf294a2 100644
--- a/drivers/power/qcom-charger/smb-reg.h
+++ b/drivers/power/qcom-charger/smb-reg.h
@@ -184,6 +184,10 @@ enum {
#define CHARGE_INHIBIT_THRESHOLD_CFG_REG (CHGR_BASE + 0x72)
#define CHARGE_INHIBIT_THRESHOLD_MASK GENMASK(1, 0)
+#define CHARGE_INHIBIT_THRESHOLD_50MV 0
+#define CHARGE_INHIBIT_THRESHOLD_100MV 1
+#define CHARGE_INHIBIT_THRESHOLD_200MV 2
+#define CHARGE_INHIBIT_THRESHOLD_300MV 3
#define RECHARGE_THRESHOLD_CFG_REG (CHGR_BASE + 0x73)
#define RECHARGE_THRESHOLD_MASK GENMASK(1, 0)
diff --git a/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
index e4c8f1f446df..a876484859eb 100644
--- a/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
+++ b/drivers/soc/qcom/msm_bus/msm_bus_dbg_voter.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
*
* This program is Mree software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -133,7 +133,7 @@ static ssize_t bus_floor_vote_store_api(struct device *dev,
return 0;
}
- if (sscanf(buf, "%s %llu", name, &vote_khz) != 2) {
+ if (sscanf(buf, "%9s %llu", name, &vote_khz) != 2) {
pr_err("%s:return error", __func__);
return -EINVAL;
}
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index 31a5ae89174e..bf6b11194111 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -277,7 +277,8 @@ static int pil_mss_loadable_init(struct modem_data *drv,
q6->restart_reg_sec = true;
}
- q6->restart_reg = devm_ioremap_resource(&pdev->dev, res);
+ q6->restart_reg = devm_ioremap(&pdev->dev,
+ res->start, resource_size(res));
if (!q6->restart_reg)
return -ENOMEM;
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
index e8969a5e533b..45ac48eb2241 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
@@ -35,6 +35,7 @@
struct apr_tx_buf {
struct list_head list;
+ struct apr_pkt_priv pkt_priv;
char buf[APR_MAX_BUF];
};
@@ -67,29 +68,28 @@ static char *svc_names[APR_DEST_MAX][APR_CLIENT_MAX] = {
static struct apr_svc_ch_dev
apr_svc_ch[APR_DL_MAX][APR_DEST_MAX][APR_CLIENT_MAX];
-static int apr_get_free_buf(int len, void **buf)
+static struct apr_tx_buf *apr_get_free_buf(int len)
{
struct apr_tx_buf *tx_buf;
unsigned long flags;
- if (!buf || len > APR_MAX_BUF) {
+ if (len > APR_MAX_BUF) {
pr_err("%s: buf too large [%d]\n", __func__, len);
- return -EINVAL;
+ return ERR_PTR(-EINVAL);
}
spin_lock_irqsave(&buf_list.lock, flags);
if (list_empty(&buf_list.list)) {
spin_unlock_irqrestore(&buf_list.lock, flags);
pr_err("%s: No buf available\n", __func__);
- return -ENOMEM;
+ return ERR_PTR(-ENOMEM);
}
tx_buf = list_first_entry(&buf_list.list, struct apr_tx_buf, list);
list_del(&tx_buf->list);
spin_unlock_irqrestore(&buf_list.lock, flags);
- *buf = tx_buf->buf;
- return 0;
+ return tx_buf;
}
static void apr_buf_add_tail(const void *buf)
@@ -130,16 +130,22 @@ int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
{
int rc = 0, retries = 0;
void *pkt_data = NULL;
+ struct apr_tx_buf *tx_buf;
+ struct apr_pkt_priv *pkt_priv_ptr = pkt_priv;
if (!apr_ch->handle || !pkt_priv)
return -EINVAL;
if (pkt_priv->pkt_owner == APR_PKT_OWNER_DRIVER) {
- rc = apr_get_free_buf(len, &pkt_data);
- if (rc)
+ tx_buf = apr_get_free_buf(len);
+ if (IS_ERR_OR_NULL(tx_buf)) {
+ rc = -EINVAL;
goto exit;
-
- memcpy(pkt_data, data, len);
+ }
+ memcpy(tx_buf->buf, data, len);
+ memcpy(&tx_buf->pkt_priv, pkt_priv, sizeof(tx_buf->pkt_priv));
+ pkt_priv_ptr = &tx_buf->pkt_priv;
+ pkt_data = tx_buf->buf;
} else {
pkt_data = data;
}
@@ -148,7 +154,7 @@ int apr_tal_write(struct apr_svc_ch_dev *apr_ch, void *data,
if (rc == -EAGAIN)
udelay(50);
- rc = __apr_tal_write(apr_ch, pkt_data, pkt_priv, len);
+ rc = __apr_tal_write(apr_ch, pkt_data, pkt_priv_ptr, len);
} while (rc == -EAGAIN && retries++ < APR_MAXIMUM_NUM_OF_RETRIES);
if (rc < 0) {
@@ -180,6 +186,28 @@ void apr_tal_notify_rx(void *handle, const void *priv, const void *pkt_priv,
glink_rx_done(apr_ch->handle, ptr, true);
}
+static void apr_tal_notify_tx_abort(void *handle, const void *priv,
+ const void *pkt_priv)
+{
+ struct apr_pkt_priv *apr_pkt_priv_ptr =
+ (struct apr_pkt_priv *)pkt_priv;
+ struct apr_tx_buf *list_node;
+
+ if (!apr_pkt_priv_ptr) {
+ pr_err("%s: Invalid pkt_priv\n", __func__);
+ return;
+ }
+
+ pr_debug("%s: tx_abort received for apr_pkt_priv_ptr:%pK\n",
+ __func__, apr_pkt_priv_ptr);
+
+ if (apr_pkt_priv_ptr->pkt_owner == APR_PKT_OWNER_DRIVER) {
+ list_node = container_of(apr_pkt_priv_ptr,
+ struct apr_tx_buf, pkt_priv);
+ apr_buf_add_tail(list_node->buf);
+ }
+}
+
void apr_tal_notify_tx_done(void *handle, const void *priv,
const void *pkt_priv, const void *ptr)
{
@@ -315,6 +343,7 @@ struct apr_svc_ch_dev *apr_tal_open(uint32_t clnt, uint32_t dest, uint32_t dl,
open_cfg.notify_state = apr_tal_notify_state;
open_cfg.notify_rx_intent_req = apr_tal_notify_rx_intent_req;
open_cfg.notify_remote_rx_intent = apr_tal_notify_remote_rx_intent;
+ open_cfg.notify_tx_abort = apr_tal_notify_tx_abort;
open_cfg.priv = apr_ch;
open_cfg.transport = "smem";
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index 24018c544b06..2b708732760f 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -149,7 +149,7 @@ static void service_locator_recv_msg(struct work_struct *work)
do {
pr_debug("Notified about a Receive event\n");
ret = qmi_recv_msg(service_locator.clnt_handle);
- if (ret != -ENOMSG)
+ if (ret < 0)
pr_err("Error receiving message rc:%d. Retrying...\n",
ret);
} while (ret == 0);
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index 8cba88742cb8..a244bc168136 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -162,7 +162,7 @@ static void root_service_clnt_recv_msg(struct work_struct *work)
data->instance_id);
} while ((ret = qmi_recv_msg(data->clnt_handle)) == 0);
- pr_info("Notified about a Receive event (instance-id: %d)\n",
+ pr_debug("Notified about a Receive event (instance-id: %d)\n",
data->instance_id);
}
@@ -227,7 +227,8 @@ static void root_service_service_ind_cb(struct qmi_handle *handle,
struct qmi_client_info *data = (struct qmi_client_info *)ind_cb_priv;
struct service_notif_info *service_notif;
struct msg_desc ind_desc;
- struct qmi_servreg_notif_state_updated_ind_msg_v01 ind_msg;
+ struct qmi_servreg_notif_state_updated_ind_msg_v01 ind_msg = {
+ QMI_STATE_MIN_VAL, "", 0xFFFF };
int rc;
ind_desc.msg_id = SERVREG_NOTIF_STATE_UPDATED_IND_MSG;
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index ac298e632d73..29dc6ab252b1 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -1789,6 +1789,8 @@ void xhci_free_command(struct xhci_hcd *xhci,
int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned intr_num)
{
int size;
+ u32 iman_reg;
+ u64 erdp_reg;
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
struct device *dev = xhci_to_hcd(xhci)->self.controller;
@@ -1800,14 +1802,38 @@ int xhci_sec_event_ring_cleanup(struct usb_hcd *hcd, unsigned intr_num)
size =
sizeof(struct xhci_erst_entry)*(xhci->sec_erst[intr_num].num_entries);
- if (xhci->sec_erst[intr_num].entries)
+ if (xhci->sec_erst[intr_num].entries) {
+ /*
+ * disable irq, ack pending interrupt and clear EHB for xHC to
+ * generate interrupt again when new event ring is setup
+ */
+ iman_reg =
+ readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+ iman_reg &= ~IMAN_IE;
+ writel_relaxed(iman_reg,
+ &xhci->sec_ir_set[intr_num]->irq_pending);
+ iman_reg =
+ readl_relaxed(&xhci->sec_ir_set[intr_num]->irq_pending);
+ if (iman_reg & IMAN_IP)
+ writel_relaxed(iman_reg,
+ &xhci->sec_ir_set[intr_num]->irq_pending);
+ /* make sure IP gets cleared before clearing EHB */
+ mb();
+
+ erdp_reg = xhci_read_64(xhci,
+ &xhci->sec_ir_set[intr_num]->erst_dequeue);
+ xhci_write_64(xhci, erdp_reg | ERST_EHB,
+ &xhci->sec_ir_set[intr_num]->erst_dequeue);
+
dma_free_coherent(dev, size, xhci->sec_erst[intr_num].entries,
xhci->sec_erst[intr_num].erst_dma_addr);
- xhci->sec_erst[intr_num].entries = NULL;
+ xhci->sec_erst[intr_num].entries = NULL;
+ }
xhci_dbg_trace(xhci, trace_xhci_dbg_init, "Freed SEC ERST#%d",
intr_num);
if (xhci->sec_event_ring[intr_num])
xhci_ring_free(xhci, xhci->sec_event_ring[intr_num]);
+
xhci->sec_event_ring[intr_num] = NULL;
xhci_dbg_trace(xhci, trace_xhci_dbg_init,
"Freed sec event ring");
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index 1e93a5b2e9ba..5a24a1995af9 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -178,6 +178,7 @@ enum mdss_hw_capabilities {
MDSS_CAPS_CWB_SUPPORTED,
MDSS_CAPS_MDP_VOTE_CLK_NOT_SUPPORTED,
MDSS_CAPS_AVR_SUPPORTED,
+ MDSS_CAPS_SEC_DETACH_SMMU,
MDSS_CAPS_MAX,
};
@@ -221,6 +222,7 @@ struct mdss_smmu_client {
bool domain_attached;
bool handoff_pending;
void __iomem *mmu_base;
+ int domain;
};
struct mdss_mdp_qseed3_lut_tbl {
@@ -327,6 +329,7 @@ struct mdss_data_type {
u32 wfd_mode;
u32 has_no_lut_read;
atomic_t sd_client_count;
+ atomic_t sc_client_count;
u8 has_wb_ad;
u8 has_non_scalar_rgb;
bool has_src_split;
@@ -519,6 +522,8 @@ struct mdss_data_type {
u32 max_dest_scaler_input_width;
u32 max_dest_scaler_output_width;
struct mdss_mdp_destination_scaler *ds;
+ u32 sec_disp_en;
+ u32 sec_cam_en;
};
extern struct mdss_data_type *mdss_res;
@@ -579,6 +584,14 @@ static inline int mdss_get_sd_client_cnt(void)
return atomic_read(&mdss_res->sd_client_count);
}
+static inline int mdss_get_sc_client_cnt(void)
+{
+ if (!mdss_res)
+ return 0;
+ else
+ return atomic_read(&mdss_res->sc_client_count);
+}
+
static inline void mdss_set_quirk(struct mdss_data_type *mdata,
enum mdss_hw_quirk bit)
{
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index 9014e3a02d21..f9dba99a653d 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -527,8 +527,8 @@ char mdss_dp_gen_link_clk(struct mdss_panel_info *pinfo, char lane_cnt)
* Any changes in the section of code should
* consider this limitation.
*/
- min_link_rate = pinfo->clk_rate
- / (lane_cnt * encoding_factx10);
+ min_link_rate = (u32)div_u64(pinfo->clk_rate,
+ (lane_cnt * encoding_factx10));
min_link_rate /= ln_to_link_ratio;
min_link_rate = (min_link_rate * pinfo->bpp);
min_link_rate = (u32)div_u64(min_link_rate * 10,
diff --git a/drivers/video/fbdev/msm/mdss_dsi.h b/drivers/video/fbdev/msm/mdss_dsi.h
index 7091dc2f38b9..3536cb2d294d 100644
--- a/drivers/video/fbdev/msm/mdss_dsi.h
+++ b/drivers/video/fbdev/msm/mdss_dsi.h
@@ -468,6 +468,7 @@ struct mdss_dsi_ctrl_pdata {
bool cmd_sync_wait_trigger;
struct mdss_rect roi;
+ struct mdss_dsi_dual_pu_roi dual_roi;
struct pwm_device *pwm_bl;
u32 pclk_rate;
u32 byte_clk_rate;
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index bd0c2ad32c05..7c36bb627043 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -447,28 +447,82 @@ static int mdss_dsi_roi_merge(struct mdss_dsi_ctrl_pdata *ctrl,
static char caset[] = {0x2a, 0x00, 0x00, 0x03, 0x00}; /* DTYPE_DCS_LWRITE */
static char paset[] = {0x2b, 0x00, 0x00, 0x05, 0x00}; /* DTYPE_DCS_LWRITE */
+/*
+ * Some panels can support multiple ROIs as part of the below commands
+ */
+static char caset_dual[] = {0x2a, 0x00, 0x00, 0x03, 0x00, 0x03,
+ 0x00, 0x00, 0x00, 0x00};/* DTYPE_DCS_LWRITE */
+static char paset_dual[] = {0x2b, 0x00, 0x00, 0x05, 0x00, 0x03,
+ 0x00, 0x00, 0x00, 0x00};/* DTYPE_DCS_LWRITE */
+
/* pack into one frame before sent */
static struct dsi_cmd_desc set_col_page_addr_cmd[] = {
{{DTYPE_DCS_LWRITE, 0, 0, 0, 1, sizeof(caset)}, caset}, /* packed */
{{DTYPE_DCS_LWRITE, 1, 0, 0, 1, sizeof(paset)}, paset},
};
+/* pack into one frame before sent */
+static struct dsi_cmd_desc set_dual_col_page_addr_cmd[] = { /*packed*/
+ {{DTYPE_DCS_LWRITE, 0, 0, 0, 1, sizeof(caset_dual)}, caset_dual},
+ {{DTYPE_DCS_LWRITE, 1, 0, 0, 1, sizeof(paset_dual)}, paset_dual},
+};
+
+
+static void __mdss_dsi_send_col_page_addr(struct mdss_dsi_ctrl_pdata *ctrl,
+ struct mdss_rect *roi, bool dual_roi)
+{
+ if (dual_roi) {
+ struct mdss_rect *first, *second;
+
+ first = &ctrl->panel_data.panel_info.dual_roi.first_roi;
+ second = &ctrl->panel_data.panel_info.dual_roi.second_roi;
+
+ caset_dual[1] = (((first->x) & 0xFF00) >> 8);
+ caset_dual[2] = (((first->x) & 0xFF));
+ caset_dual[3] = (((first->x - 1 + first->w) & 0xFF00) >> 8);
+ caset_dual[4] = (((first->x - 1 + first->w) & 0xFF));
+ /* skip the MPU setting byte*/
+ caset_dual[6] = (((second->x) & 0xFF00) >> 8);
+ caset_dual[7] = (((second->x) & 0xFF));
+ caset_dual[8] = (((second->x - 1 + second->w) & 0xFF00) >> 8);
+ caset_dual[9] = (((second->x - 1 + second->w) & 0xFF));
+ set_dual_col_page_addr_cmd[0].payload = caset_dual;
+
+ paset_dual[1] = (((first->y) & 0xFF00) >> 8);
+ paset_dual[2] = (((first->y) & 0xFF));
+ paset_dual[3] = (((first->y - 1 + first->h) & 0xFF00) >> 8);
+ paset_dual[4] = (((first->y - 1 + first->h) & 0xFF));
+ /* skip the MPU setting byte */
+ paset_dual[6] = (((second->y) & 0xFF00) >> 8);
+ paset_dual[7] = (((second->y) & 0xFF));
+ paset_dual[8] = (((second->y - 1 + second->h) & 0xFF00) >> 8);
+ paset_dual[9] = (((second->y - 1 + second->h) & 0xFF));
+ set_dual_col_page_addr_cmd[1].payload = paset_dual;
+ } else {
+ caset[1] = (((roi->x) & 0xFF00) >> 8);
+ caset[2] = (((roi->x) & 0xFF));
+ caset[3] = (((roi->x - 1 + roi->w) & 0xFF00) >> 8);
+ caset[4] = (((roi->x - 1 + roi->w) & 0xFF));
+ set_col_page_addr_cmd[0].payload = caset;
+
+ paset[1] = (((roi->y) & 0xFF00) >> 8);
+ paset[2] = (((roi->y) & 0xFF));
+ paset[3] = (((roi->y - 1 + roi->h) & 0xFF00) >> 8);
+ paset[4] = (((roi->y - 1 + roi->h) & 0xFF));
+ set_col_page_addr_cmd[1].payload = paset;
+ }
+ pr_debug("%s Sending 2A 2B cmnd with dual_roi=%d\n", __func__,
+ dual_roi);
+
+}
static void mdss_dsi_send_col_page_addr(struct mdss_dsi_ctrl_pdata *ctrl,
struct mdss_rect *roi, int unicast)
{
struct dcs_cmd_req cmdreq;
+ struct mdss_panel_info *pinfo = &ctrl->panel_data.panel_info;
+ bool dual_roi = pinfo->dual_roi.enabled;
- caset[1] = (((roi->x) & 0xFF00) >> 8);
- caset[2] = (((roi->x) & 0xFF));
- caset[3] = (((roi->x - 1 + roi->w) & 0xFF00) >> 8);
- caset[4] = (((roi->x - 1 + roi->w) & 0xFF));
- set_col_page_addr_cmd[0].payload = caset;
-
- paset[1] = (((roi->y) & 0xFF00) >> 8);
- paset[2] = (((roi->y) & 0xFF));
- paset[3] = (((roi->y - 1 + roi->h) & 0xFF00) >> 8);
- paset[4] = (((roi->y - 1 + roi->h) & 0xFF));
- set_col_page_addr_cmd[1].payload = paset;
+ __mdss_dsi_send_col_page_addr(ctrl, roi, dual_roi);
memset(&cmdreq, 0, sizeof(cmdreq));
cmdreq.cmds_cnt = 2;
@@ -478,7 +532,9 @@ static void mdss_dsi_send_col_page_addr(struct mdss_dsi_ctrl_pdata *ctrl,
cmdreq.rlen = 0;
cmdreq.cb = NULL;
- cmdreq.cmds = set_col_page_addr_cmd;
+ /* Send default or dual roi 2A/2B cmd */
+ cmdreq.cmds = dual_roi ? set_dual_col_page_addr_cmd :
+ set_col_page_addr_cmd;
mdss_dsi_cmdlist_put(ctrl, &cmdreq);
}
@@ -1837,20 +1893,28 @@ error:
pinfo->esd_check_enabled = false;
}
-static int mdss_dsi_parse_panel_features(struct device_node *np,
- struct mdss_dsi_ctrl_pdata *ctrl)
+static void mdss_dsi_parse_partial_update_caps(struct device_node *np,
+ struct mdss_dsi_ctrl_pdata *ctrl)
{
struct mdss_panel_info *pinfo;
-
- if (!np || !ctrl) {
- pr_err("%s: Invalid arguments\n", __func__);
- return -ENODEV;
- }
+ const char *data;
pinfo = &ctrl->panel_data.panel_info;
- pinfo->partial_update_supported = of_property_read_bool(np,
- "qcom,partial-update-enabled");
+ data = of_get_property(np, "qcom,partial-update-enabled", NULL);
+ if (data && !strcmp(data, "single_roi"))
+ pinfo->partial_update_supported =
+ PU_SINGLE_ROI;
+ else if (data && !strcmp(data, "dual_roi"))
+ pinfo->partial_update_supported =
+ PU_DUAL_ROI;
+ else if (data && !strcmp(data, "none"))
+ pinfo->partial_update_supported =
+ PU_NOT_SUPPORTED;
+ else
+ pinfo->partial_update_supported =
+ PU_NOT_SUPPORTED;
+
if (pinfo->mipi.mode == DSI_CMD_MODE) {
pinfo->partial_update_enabled = pinfo->partial_update_supported;
pr_info("%s: partial_update_enabled=%d\n", __func__,
@@ -1862,6 +1926,21 @@ static int mdss_dsi_parse_panel_features(struct device_node *np,
"qcom,partial-update-roi-merge");
}
}
+}
+
+static int mdss_dsi_parse_panel_features(struct device_node *np,
+ struct mdss_dsi_ctrl_pdata *ctrl)
+{
+ struct mdss_panel_info *pinfo;
+
+ if (!np || !ctrl) {
+ pr_err("%s: Invalid arguments\n", __func__);
+ return -ENODEV;
+ }
+
+ pinfo = &ctrl->panel_data.panel_info;
+
+ mdss_dsi_parse_partial_update_caps(np, ctrl);
pinfo->dcs_cmd_by_left = of_property_read_bool(np,
"qcom,dcs-cmd-by-left");
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index bcd23d3c19f2..08e06c75522a 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -564,7 +564,7 @@ static ssize_t mdss_fb_get_panel_info(struct device *dev,
"min_fps=%d\nmax_fps=%d\npanel_name=%s\n"
"primary_panel=%d\nis_pluggable=%d\ndisplay_id=%s\n"
"is_cec_supported=%d\nis_pingpong_split=%d\n"
- "dfps_porch_mode=%d\n",
+ "dfps_porch_mode=%d\npu_roi_cnt=%d\ndual_dsi=%d",
pinfo->partial_update_enabled,
pinfo->roi_alignment.xstart_pix_align,
pinfo->roi_alignment.width_pix_align,
@@ -577,7 +577,8 @@ static ssize_t mdss_fb_get_panel_info(struct device *dev,
pinfo->panel_name, pinfo->is_prim_panel,
pinfo->is_pluggable, pinfo->display_id,
pinfo->is_cec_supported, is_pingpong_split(mfd),
- dfps_porch_mode);
+ dfps_porch_mode, pinfo->partial_update_enabled,
+ is_panel_split(mfd));
return ret;
}
@@ -3282,6 +3283,7 @@ int mdss_fb_atomic_commit(struct fb_info *info,
mfd->msm_fb_backup.atomic_commit = true;
mfd->msm_fb_backup.disp_commit.l_roi = commit_v1->left_roi;
mfd->msm_fb_backup.disp_commit.r_roi = commit_v1->right_roi;
+ mfd->msm_fb_backup.disp_commit.flags = commit_v1->flags;
mutex_lock(&mfd->mdp_sync_pt_data.sync_mutex);
atomic_inc(&mfd->mdp_sync_pt_data.commit_cnt);
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 04e8fa4ba576..1dae41391795 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -47,6 +47,8 @@
#include <linux/msm-bus-board.h>
#include <soc/qcom/scm.h>
#include <soc/qcom/rpm-smd.h>
+#include "soc/qcom/secure_buffer.h"
+#include <asm/cacheflush.h>
#include "mdss.h"
#include "mdss_fb.h"
@@ -64,6 +66,8 @@
#define RES_1080p (1088*1920)
#define RES_UHD (3840*2160)
+#define MDP_DEVICE_ID 0x1A
+
struct mdss_data_type *mdss_res;
static u32 mem_protect_sd_ctrl_id;
@@ -87,6 +91,7 @@ struct msm_mdp_interface mdp5 = {
#define MEM_PROTECT_SD_CTRL 0xF
#define MEM_PROTECT_SD_CTRL_FLAT 0x14
+#define MEM_PROTECT_SD_CTRL_SWITCH 0x18
static DEFINE_SPINLOCK(mdp_lock);
static DEFINE_SPINLOCK(mdss_mdp_intr_lock);
@@ -1329,7 +1334,9 @@ int mdss_iommu_ctrl(int enable)
if (mdata->iommu_ref_cnt == 0) {
rc = mdss_smmu_detach(mdata);
if (mdss_has_quirk(mdata,
- MDSS_QUIRK_MIN_BUS_VOTE))
+ MDSS_QUIRK_MIN_BUS_VOTE) &&
+ (!mdata->sec_disp_en ||
+ !mdata->sec_cam_en))
mdss_bus_scale_set_quota(MDSS_HW_RT,
0, 0);
}
@@ -1985,6 +1992,7 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
mdata->pixel_ram_size = 50 * 1024;
mdata->rects_per_sspp[MDSS_MDP_PIPE_TYPE_DMA] = 2;
+ mem_protect_sd_ctrl_id = MEM_PROTECT_SD_CTRL_SWITCH;
set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
set_bit(MDSS_QOS_REMAPPER, mdata->mdss_qos_map);
set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
@@ -2015,6 +2023,7 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
mdata->has_wb_ubwc = true;
set_bit(MDSS_CAPS_10_BIT_SUPPORTED, mdata->mdss_caps_map);
set_bit(MDSS_CAPS_AVR_SUPPORTED, mdata->mdss_caps_map);
+ set_bit(MDSS_CAPS_SEC_DETACH_SMMU, mdata->mdss_caps_map);
break;
default:
mdata->max_target_zorder = 4; /* excluding base layer */
@@ -4939,29 +4948,115 @@ static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on)
}
}
-int mdss_mdp_secure_display_ctrl(unsigned int enable)
+int mdss_mdp_secure_session_ctrl(unsigned int enable, u64 flags)
{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
struct sd_ctrl_req {
unsigned int enable;
} __attribute__ ((__packed__)) request;
unsigned int resp = -1;
int ret = 0;
+ uint32_t sid_info;
struct scm_desc desc;
- desc.args[0] = request.enable = enable;
- desc.arginfo = SCM_ARGS(1);
+ if (test_bit(MDSS_CAPS_SEC_DETACH_SMMU, mdata->mdss_caps_map)) {
+ /*
+ * Prepare syscall to hypervisor to switch the secure_vmid
+ * between secure and non-secure contexts
+ */
+ /* MDP secure SID */
+ sid_info = 0x1;
+ desc.arginfo = SCM_ARGS(4, SCM_VAL, SCM_RW, SCM_VAL, SCM_VAL);
+ desc.args[0] = MDP_DEVICE_ID;
+ desc.args[1] = SCM_BUFFER_PHYS(&sid_info);
+ desc.args[2] = sizeof(uint32_t);
+
- if (!is_scm_armv8()) {
- ret = scm_call(SCM_SVC_MP, MEM_PROTECT_SD_CTRL,
- &request, sizeof(request), &resp, sizeof(resp));
- } else {
- ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ pr_debug("Enable/Disable: %d, Flags %llx\n", enable, flags);
+ if (enable) {
+ if (flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION) {
+ desc.args[3] = VMID_CP_SEC_DISPLAY;
+ mdata->sec_disp_en = 1;
+ } else if (flags & MDP_SECURE_CAMERA_OVERLAY_SESSION) {
+ desc.args[3] = VMID_CP_CAMERA_PREVIEW;
+ mdata->sec_cam_en = 1;
+ } else {
+ return 0;
+ }
+
+ /* detach smmu contexts */
+ ret = mdss_smmu_detach(mdata);
+ if (ret) {
+ pr_err("Error while detaching smmu contexts ret = %d\n",
+ ret);
+ return -EINVAL;
+ }
+
+ /* let the driver think smmu is still attached */
+ mdata->iommu_attached = true;
+
+ dmac_flush_range(&sid_info, &sid_info + 1);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
mem_protect_sd_ctrl_id), &desc);
- resp = desc.ret[0];
- }
+ if (ret) {
+ pr_err("Error scm_call MEM_PROTECT_SD_CTRL(%u) ret=%dm resp=%x\n",
+ enable, ret, resp);
+ return -EINVAL;
+ }
+ resp = desc.ret[0];
- pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x",
+ pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x\n",
+ enable, ret, resp);
+ } else {
+ desc.args[3] = VMID_CP_PIXEL;
+ if (flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
+ mdata->sec_disp_en = 0;
+ else if (flags & MDP_SECURE_CAMERA_OVERLAY_SESSION)
+ mdata->sec_cam_en = 0;
+
+ dmac_flush_range(&sid_info, &sid_info + 1);
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ mem_protect_sd_ctrl_id), &desc);
+ if (ret)
+ MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl",
+ "dsi0_phy", "dsi1_ctrl",
+ "dsi1_phy", "vbif", "vbif_nrt",
+ "dbg_bus", "vbif_dbg_bus",
+ "panic");
+ resp = desc.ret[0];
+
+ pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x\n",
+ enable, ret, resp);
+
+ /* re-attach smmu contexts */
+ mdata->iommu_attached = false;
+ ret = mdss_smmu_attach(mdata);
+ if (ret) {
+ pr_err("Error while attaching smmu contexts ret = %d\n",
+ ret);
+ return -EINVAL;
+ }
+ }
+ MDSS_XLOG(enable);
+ } else {
+ desc.args[0] = request.enable = enable;
+ desc.arginfo = SCM_ARGS(1);
+
+ if (!is_scm_armv8()) {
+ ret = scm_call(SCM_SVC_MP, MEM_PROTECT_SD_CTRL,
+ &request,
+ sizeof(request),
+ &resp,
+ sizeof(resp));
+ } else {
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_MP,
+ mem_protect_sd_ctrl_id), &desc);
+ resp = desc.ret[0];
+ }
+
+ pr_debug("scm_call MEM_PROTECT_SD_CTRL(%u): ret=%d, resp=%x\n",
enable, ret, resp);
+ }
if (ret)
return ret;
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index 93f5f9a51a63..e1c3841c82de 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -92,6 +92,8 @@
*/
#define ENABLE_PIXEL_EXT_ONLY 0x80000000
+/* Pipe flag to indicate this pipe contains secure camera buffer */
+#define MDP_SECURE_CAMERA_OVERLAY_SESSION 0x100000000
/**
* Destination Scaler control flags setting
*
@@ -562,6 +564,8 @@ struct mdss_mdp_mixer {
bool valid_roi;
bool roi_changed;
struct mdss_rect roi;
+ bool dsc_enabled;
+ bool dsc_merge_enabled;
u8 cursor_enabled;
u16 cursor_hotx;
@@ -627,7 +631,7 @@ struct mdss_mdp_img_data {
dma_addr_t addr;
unsigned long len;
u32 offset;
- u32 flags;
+ u64 flags;
u32 dir;
u32 domain;
bool mapped;
@@ -811,7 +815,7 @@ struct mdss_mdp_pipe {
struct file *file;
bool is_handed_off;
- u32 flags;
+ u64 flags;
u32 bwc_mode;
/* valid only when pipe's output is crossing both layer mixers */
@@ -827,6 +831,9 @@ struct mdss_mdp_pipe {
struct mdss_mdp_format_params *src_fmt;
struct mdss_mdp_plane_sizes src_planes;
+ /* flag to re-store roi in case of pu dual-roi validation error */
+ bool restore_roi;
+
/* compression ratio from the source format */
struct mult_factor comp_ratio;
@@ -916,6 +923,7 @@ struct mdss_overlay_private {
u32 splash_mem_addr;
u32 splash_mem_size;
u32 sd_enabled;
+ u32 sc_enabled;
struct sw_sync_timeline *vsync_timeline;
struct mdss_mdp_vsync_handler vsync_retire_handler;
@@ -1289,6 +1297,15 @@ static inline void mdss_update_sd_client(struct mdss_data_type *mdata,
atomic_add_unless(&mdss_res->sd_client_count, -1, 0);
}
+static inline void mdss_update_sc_client(struct mdss_data_type *mdata,
+ bool status)
+{
+ if (status)
+ atomic_inc(&mdata->sc_client_count);
+ else
+ atomic_add_unless(&mdss_res->sc_client_count, -1, 0);
+}
+
static inline int mdss_mdp_get_wb_ctl_support(struct mdss_data_type *mdata,
bool rotator_session)
{
@@ -1506,6 +1523,7 @@ static inline bool mdss_mdp_is_map_needed(struct mdss_data_type *mdata,
struct mdss_mdp_img_data *data)
{
u32 is_secure_ui = data->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION;
+ u64 is_secure_camera = data->flags & MDP_SECURE_CAMERA_OVERLAY_SESSION;
/*
* For ULT Targets we need SMMU Map, to issue map call for secure Display.
@@ -1513,6 +1531,10 @@ static inline bool mdss_mdp_is_map_needed(struct mdss_data_type *mdata,
if (is_secure_ui && !mdss_has_quirk(mdata, MDSS_QUIRK_NEED_SECURE_MAP))
return false;
+ if (is_secure_camera && test_bit(MDSS_CAPS_SEC_DETACH_SMMU,
+ mdata->mdss_caps_map))
+ return false;
+
return true;
}
@@ -1569,7 +1591,7 @@ unsigned long mdss_mdp_get_clk_rate(u32 clk_idx, bool locked);
int mdss_mdp_vsync_clk_enable(int enable, bool locked);
void mdss_mdp_clk_ctrl(int enable);
struct mdss_data_type *mdss_mdp_get_mdata(void);
-int mdss_mdp_secure_display_ctrl(unsigned int enable);
+int mdss_mdp_secure_session_ctrl(unsigned int enable, u64 flags);
int mdss_mdp_overlay_init(struct msm_fb_data_type *mfd);
int mdss_mdp_dfps_update_params(struct msm_fb_data_type *mfd,
@@ -1603,7 +1625,7 @@ int mdss_mdp_overlay_start(struct msm_fb_data_type *mfd);
void mdss_mdp_overlay_set_chroma_sample(
struct mdss_mdp_pipe *pipe);
int mdp_pipe_tune_perf(struct mdss_mdp_pipe *pipe,
- u32 flags);
+ u64 flags);
int mdss_mdp_overlay_setup_scaling(struct mdss_mdp_pipe *pipe);
struct mdss_mdp_pipe *mdss_mdp_pipe_assign(struct mdss_data_type *mdata,
struct mdss_mdp_mixer *mixer, u32 ndx,
@@ -1836,7 +1858,7 @@ struct mult_factor *mdss_mdp_get_comp_factor(u32 format,
int mdss_mdp_data_map(struct mdss_mdp_data *data, bool rotator, int dir);
void mdss_mdp_data_free(struct mdss_mdp_data *data, bool rotator, int dir);
int mdss_mdp_data_get_and_validate_size(struct mdss_mdp_data *data,
- struct msmfb_data *planes, int num_planes, u32 flags,
+ struct msmfb_data *planes, int num_planes, u64 flags,
struct device *dev, bool rotator, int dir,
struct mdp_layer_buffer *buffer);
u32 mdss_get_panel_framerate(struct msm_fb_data_type *mfd);
@@ -1847,7 +1869,7 @@ void mdss_mdp_intersect_rect(struct mdss_rect *res_rect,
const struct mdss_rect *sci_rect);
void mdss_mdp_crop_rect(struct mdss_rect *src_rect,
struct mdss_rect *dst_rect,
- const struct mdss_rect *sci_rect);
+ const struct mdss_rect *sci_rect, bool normalize);
void rect_copy_mdss_to_mdp(struct mdp_rect *user, struct mdss_rect *kernel);
void rect_copy_mdp_to_mdss(struct mdp_rect *user, struct mdss_rect *kernel);
bool mdss_rect_overlap_check(struct mdss_rect *rect1, struct mdss_rect *rect2);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index 1d61653b76bb..9ed44937efe6 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -728,7 +728,7 @@ int mdss_mdp_get_pipe_overlap_bw(struct mdss_mdp_pipe *pipe,
/* crop rectangles */
if (roi && !mixer->ctl->is_video_mode && !pipe->src_split_req)
- mdss_mdp_crop_rect(&src, &dst, roi);
+ mdss_mdp_crop_rect(&src, &dst, roi, true);
/*
* when doing vertical decimation lines will be skipped, hence there is
@@ -1108,7 +1108,7 @@ int mdss_mdp_perf_calc_pipe(struct mdss_mdp_pipe *pipe,
/* crop rectangles */
if (roi && !mixer->ctl->is_video_mode && !pipe->src_split_req)
- mdss_mdp_crop_rect(&src, &dst, roi);
+ mdss_mdp_crop_rect(&src, &dst, roi, true);
pr_debug("v_total=%d, xres=%d fps=%d\n", v_total, xres, fps);
pr_debug("src(w,h)(%d,%d) dst(w,h)(%d,%d) dst_y=%d bpp=%d yuv=%d\n",
@@ -2743,6 +2743,7 @@ static inline void __dsc_enable(struct mdss_mdp_mixer *mixer)
{
mdss_mdp_pingpong_write(mixer->pingpong_base,
MDSS_MDP_REG_PP_DSC_MODE, 1);
+ mixer->dsc_enabled = true;
}
static inline void __dsc_disable(struct mdss_mdp_mixer *mixer)
@@ -2762,6 +2763,13 @@ static inline void __dsc_disable(struct mdss_mdp_mixer *mixer)
return;
}
writel_relaxed(0, offset + MDSS_MDP_REG_DSC_COMMON_MODE);
+ mixer->dsc_enabled = false;
+ mixer->dsc_merge_enabled = false;
+}
+
+static bool __is_dsc_merge_enabled(u32 common_mode)
+{
+ return common_mode & BIT(1);
}
static void __dsc_config(struct mdss_mdp_mixer *mixer,
@@ -2774,6 +2782,7 @@ static void __dsc_config(struct mdss_mdp_mixer *mixer,
u32 initial_lines = dsc->initial_lines;
bool is_cmd_mode = !(mode & BIT(2));
+ mixer->dsc_merge_enabled = __is_dsc_merge_enabled(mode);
data = mdss_mdp_pingpong_read(mixer->pingpong_base,
MDSS_MDP_REG_PP_DCE_DATA_OUT_SWAP);
data |= BIT(18); /* endian flip */
@@ -2928,11 +2937,6 @@ static void __dsc_config_thresh(struct mdss_mdp_mixer *mixer,
}
}
-static bool __is_dsc_merge_enabled(u32 common_mode)
-{
- return common_mode & BIT(1);
-}
-
static bool __dsc_is_3d_mux_enabled(struct mdss_mdp_ctl *ctl,
struct mdss_panel_info *pinfo)
{
@@ -3291,8 +3295,19 @@ void mdss_mdp_ctl_dsc_setup(struct mdss_mdp_ctl *ctl,
struct mdss_mdp_ctl *sctl;
struct mdss_panel_info *spinfo;
- if (!is_dsc_compression(pinfo))
+ /*
+ * Check for dynamic resolution switch from DSC On to DSC Off
+ * and disable DSC
+ */
+ if ((ctl->pending_mode_switch == SWITCH_RESOLUTION) &&
+ ctl->is_master &&
+ (!is_dsc_compression(pinfo))) {
+ if (ctl->mixer_left && ctl->mixer_left->dsc_enabled)
+ __dsc_disable(ctl->mixer_left);
+ if (ctl->mixer_right && ctl->mixer_right->dsc_enabled)
+ __dsc_disable(ctl->mixer_right);
return;
+ }
if (!ctl->is_master) {
pr_debug("skip slave ctl because master will program for both\n");
@@ -3703,6 +3718,30 @@ skip_intf_reconfig:
ctl->mixer_right->width = ctl->width / 2;
ctl->mixer_right->height = ctl->height;
}
+
+ /*
+ * If we are transitioning from DSC On + DSC Merge to DSC Off
+ * the 3D mux needs to be enabled
+ */
+ if (!is_dsc_compression(&pdata->panel_info) &&
+ ctl->mixer_left &&
+ ctl->mixer_left->dsc_enabled &&
+ ctl->mixer_left->dsc_merge_enabled) {
+ ctl->opmode |= MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+ MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT;
+ }
+
+ /*
+ * If we are transitioning from DSC Off to DSC On + DSC Merge
+ * the 3D mux needs to be disabled
+ */
+ if (is_dsc_compression(&pdata->panel_info) &&
+ ctl->mixer_left &&
+ !ctl->mixer_left->dsc_enabled &&
+ pdata->panel_info.dsc_enc_total != 1) {
+ ctl->opmode &= ~(MDSS_MDP_CTL_OP_PACK_3D_ENABLE |
+ MDSS_MDP_CTL_OP_PACK_3D_H_ROW_INT);
+ }
} else {
/*
* Handles MDP_SPLIT_MODE_NONE, MDP_DUAL_LM_DUAL_DISPLAY and
@@ -3717,7 +3756,6 @@ skip_intf_reconfig:
ctl->border_x_off = pdata->panel_info.lcdc.border_left;
ctl->border_y_off = pdata->panel_info.lcdc.border_top;
-
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_debug.c b/drivers/video/fbdev/msm/mdss_mdp_debug.c
index 4c4fa9ea98d0..711d2d222c7d 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_debug.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_debug.c
@@ -1868,7 +1868,7 @@ static void __dump_pipe(struct seq_file *s, struct mdss_mdp_pipe *pipe)
int smps[4];
int i;
- seq_printf(s, "\nSSPP #%d type=%s ndx=%x flags=0x%08x play_cnt=%u xin_id=%d\n",
+ seq_printf(s, "\nSSPP #%d type=%s ndx=%x flags=0x%16llx play_cnt=%u xin_id=%d\n",
pipe->num, mdss_mdp_pipetype2str(pipe->type),
pipe->ndx, pipe->flags, pipe->play_cnt, pipe->xin_id);
seq_printf(s, "\tstage=%d alpha=0x%x transp=0x%x blend_op=%d\n",
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index 4eb121f01aca..f08af5d6edd3 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -2029,8 +2029,22 @@ static void mdss_mdp_cmd_dsc_reconfig(struct mdss_mdp_ctl *ctl)
return;
pinfo = &ctl->panel_data->panel_info;
- if (pinfo->compression_mode != COMPRESSION_DSC)
- return;
+ if (pinfo->compression_mode != COMPRESSION_DSC) {
+ /*
+ * Check for a dynamic resolution switch from DSC On to
+ * DSC Off and call mdss_mdp_ctl_dsc_setup to disable DSC
+ */
+ if (ctl->pending_mode_switch == SWITCH_RESOLUTION) {
+ if (ctl->mixer_left && ctl->mixer_left->dsc_enabled)
+ changed = true;
+ if (is_split_lm(ctl->mfd) &&
+ ctl->mixer_right &&
+ ctl->mixer_right->dsc_enabled)
+ changed = true;
+ } else {
+ return;
+ }
+ }
changed = ctl->mixer_left->roi_changed;
if (is_split_lm(ctl->mfd))
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index 353d07ad64ac..20fcc26bb4bf 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -522,6 +522,56 @@ static void __update_avr_info(struct mdss_mdp_ctl *ctl,
}
/*
+ * __validate_dual_partial_update() - validation function for
+ * dual partial update ROIs
+ *
+ * - This function uses the commit structs "left_roi" and "right_roi"
+ * to pass the first and second ROI information for the multiple
+ * partial update feature.
+ * - Supports only SINGLE DSI with a max of 2 PU ROIs.
+ * - Not supported along with destination scalar.
+ * - Not supported when source-split is disabled.
+ * - Not supported with ping-pong split enabled.
+ */
+static int __validate_dual_partial_update(
+ struct mdss_mdp_ctl *ctl, struct mdp_layer_commit_v1 *commit)
+{
+ struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+ struct mdss_data_type *mdata = ctl->mdata;
+ struct mdss_rect first_roi, second_roi;
+ int ret = 0;
+ struct mdp_destination_scaler_data *ds_data = commit->dest_scaler;
+
+ if (!mdata->has_src_split
+ || (is_panel_split(ctl->mfd))
+ || (is_pingpong_split(ctl->mfd))
+ || (ds_data && commit->dest_scaler_cnt &&
+ ds_data->flags & MDP_DESTSCALER_ENABLE)) {
+ pr_err("Invalid mode multi pu src_split:%d, split_mode:%d, ds_cnt:%d\n",
+ mdata->has_src_split, ctl->mfd->split_mode,
+ commit->dest_scaler_cnt);
+ ret = -EINVAL;
+ goto end;
+ }
+
+ rect_copy_mdp_to_mdss(&commit->left_roi, &first_roi);
+ rect_copy_mdp_to_mdss(&commit->right_roi, &second_roi);
+
+ if (!is_valid_pu_dual_roi(pinfo, &first_roi, &second_roi))
+ ret = -EINVAL;
+
+ MDSS_XLOG(ctl->num, first_roi.x, first_roi.y, first_roi.w, first_roi.h,
+ second_roi.x, second_roi.y, second_roi.w, second_roi.h,
+ ret);
+ pr_debug("Multiple PU ROIs - roi0:{%d,%d,%d,%d}, roi1{%d,%d,%d,%d}, ret:%d\n",
+ first_roi.x, first_roi.y, first_roi.w, first_roi.h,
+ second_roi.x, second_roi.y, second_roi.w,
+ second_roi.h, ret);
+end:
+ return ret;
+}
+
+/*
* __layer_needs_src_split() - check needs source split configuration
* @layer: input layer
*
@@ -936,7 +986,7 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
{
int ret = 0;
u32 left_lm_w = left_lm_w_from_mfd(mfd);
- u32 flags;
+ u64 flags;
struct mdss_mdp_mixer *mixer = NULL;
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
@@ -978,6 +1028,8 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
pipe->flags |= MDP_BWC_EN;
if (layer->flags & MDP_LAYER_PP)
pipe->flags |= MDP_OVERLAY_PP_CFG_EN;
+ if (layer->flags & MDP_LAYER_SECURE_CAMERA_SESSION)
+ pipe->flags |= MDP_SECURE_CAMERA_OVERLAY_SESSION;
pipe->scaler.enable = (layer->flags & SCALER_ENABLED);
pipe->is_fg = layer->flags & MDP_LAYER_FORGROUND;
@@ -1000,6 +1052,7 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
pipe->is_handed_off = false;
pipe->async_update = (layer->flags & MDP_LAYER_ASYNC) ? true : false;
pipe->csc_coeff_set = layer->color_space;
+ pipe->restore_roi = false;
if (mixer->ctl) {
pipe->dst.x += mixer->ctl->border_x_off;
@@ -1007,7 +1060,7 @@ static int __configure_pipe_params(struct msm_fb_data_type *mfd,
pr_debug("border{%d,%d}\n", mixer->ctl->border_x_off,
mixer->ctl->border_y_off);
}
- pr_debug("src{%d,%d,%d,%d}, dst{%d,%d,%d,%d}\n",
+ pr_debug("pipe:%d src{%d,%d,%d,%d}, dst{%d,%d,%d,%d}\n", pipe->num,
pipe->src.x, pipe->src.y, pipe->src.w, pipe->src.h,
pipe->dst.x, pipe->dst.y, pipe->dst.w, pipe->dst.h);
@@ -1348,7 +1401,7 @@ static struct mdss_mdp_data *__map_layer_buffer(struct msm_fb_data_type *mfd,
struct mdp_layer_buffer *buffer;
struct msmfb_data image;
int i, ret;
- u32 flags;
+ u64 flags;
struct mdss_mdp_validate_info_t *vitem;
for (i = 0; i < layer_count; i++) {
@@ -1374,7 +1427,8 @@ static struct mdss_mdp_data *__map_layer_buffer(struct msm_fb_data_type *mfd,
}
flags = (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
- MDP_SECURE_DISPLAY_OVERLAY_SESSION));
+ MDP_SECURE_DISPLAY_OVERLAY_SESSION |
+ MDP_SECURE_CAMERA_OVERLAY_SESSION));
if (buffer->planes[0].fd < 0) {
pr_err("invalid file descriptor for layer buffer\n");
@@ -1585,34 +1639,48 @@ end:
}
/*
- * __validate_secure_display() - validate secure display
+ * __validate_secure_session() - validate various secure sessions
*
* This function travers through used pipe list and checks if any pipe
- * is with secure display enabled flag. It fails if client tries to stage
- * unsecure content with secure display session.
+ * is with secure display, secure video and secure camera enabled flag.
+ * It fails if client tries to stage unsecure content with
+ * secure display session and secure camera with secure video sessions.
*
*/
-static int __validate_secure_display(struct mdss_overlay_private *mdp5_data)
+static int __validate_secure_session(struct mdss_overlay_private *mdp5_data)
{
struct mdss_mdp_pipe *pipe, *tmp;
uint32_t sd_pipes = 0, nonsd_pipes = 0;
+ uint32_t secure_vid_pipes = 0, secure_cam_pipes = 0;
mutex_lock(&mdp5_data->list_lock);
list_for_each_entry_safe(pipe, tmp, &mdp5_data->pipes_used, list) {
if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
sd_pipes++;
+ else if (pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+ secure_vid_pipes++;
+ else if (pipe->flags & MDP_SECURE_CAMERA_OVERLAY_SESSION)
+ secure_cam_pipes++;
else
nonsd_pipes++;
}
mutex_unlock(&mdp5_data->list_lock);
- pr_debug("pipe count:: secure display:%d non-secure:%d\n",
- sd_pipes, nonsd_pipes);
+ pr_debug("pipe count:: secure display:%d non-secure:%d secure-vid:%d,secure-cam:%d\n",
+ sd_pipes, nonsd_pipes, secure_vid_pipes, secure_cam_pipes);
- if ((sd_pipes || mdss_get_sd_client_cnt()) && nonsd_pipes) {
+ if ((sd_pipes || mdss_get_sd_client_cnt()) &&
+ (nonsd_pipes || secure_vid_pipes ||
+ secure_cam_pipes)) {
pr_err("non-secure layer validation request during secure display session\n");
- pr_err(" secure client cnt:%d secure pipe cnt:%d non-secure pipe cnt:%d\n",
- mdss_get_sd_client_cnt(), sd_pipes, nonsd_pipes);
+ pr_err(" secure client cnt:%d secure pipe:%d non-secure pipe:%d, secure-vid:%d, secure-cam:%d\n",
+ mdss_get_sd_client_cnt(), sd_pipes, nonsd_pipes,
+ secure_vid_pipes, secure_cam_pipes);
+ return -EINVAL;
+ } else if (secure_cam_pipes && (secure_vid_pipes || sd_pipes)) {
+ pr_err(" incompatible layers during secure camera session\n");
+ pr_err("secure-camera cnt:%d secure video:%d secure display:%d\n",
+ secure_cam_pipes, secure_vid_pipes, sd_pipes);
return -EINVAL;
} else {
return 0;
@@ -2388,7 +2456,7 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
validate_skip:
__handle_free_list(mdp5_data, validate_info_list, layer_count);
- ret = __validate_secure_display(mdp5_data);
+ ret = __validate_secure_session(mdp5_data);
validate_exit:
pr_debug("err=%d total_layer:%d left:%d right:%d rec0_rel_ndx=0x%x rec1_rel_ndx=0x%x rec0_destroy_ndx=0x%x rec1_destroy_ndx=0x%x processed=%d\n",
@@ -2625,6 +2693,7 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
{
struct mdss_overlay_private *mdp5_data;
struct mdp_destination_scaler_data *ds_data;
+ struct mdss_panel_info *pinfo;
int rc = 0;
if (!mfd || !commit) {
@@ -2658,6 +2727,23 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
}
}
+ pinfo = mfd->panel_info;
+ if (pinfo->partial_update_enabled == PU_DUAL_ROI) {
+ if (commit->flags & MDP_COMMIT_PARTIAL_UPDATE_DUAL_ROI) {
+ rc = __validate_dual_partial_update(mdp5_data->ctl,
+ commit);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("Multiple pu pre-validate fail\n");
+ return rc;
+ }
+ }
+ } else {
+ if (commit->flags & MDP_COMMIT_PARTIAL_UPDATE_DUAL_ROI) {
+ pr_err("Multiple partial update not supported!\n");
+ return -EINVAL;
+ }
+ }
+
ds_data = commit->dest_scaler;
if (ds_data && commit->dest_scaler_cnt &&
(ds_data->flags & MDP_DESTSCALER_ENABLE)) {
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 9bdc66232dd5..664850a1a617 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -396,7 +396,7 @@ int mdss_mdp_overlay_req_check(struct msm_fb_data_type *mfd,
}
int mdp_pipe_tune_perf(struct mdss_mdp_pipe *pipe,
- u32 flags)
+ u64 flags)
{
struct mdss_data_type *mdata = pipe->mixer_left->ctl->mdata;
struct mdss_mdp_perf_params perf;
@@ -1188,11 +1188,10 @@ static void __overlay_pipe_cleanup(struct msm_fb_data_type *mfd,
list_move(&buf->buf_list, &mdp5_data->bufs_freelist);
/*
- * in case of secure UI, the buffer needs to be released as
- * soon as session is closed.
+ * free the buffers on the same cycle instead of waiting for
+ * next kickoff
*/
- if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)
- mdss_mdp_overlay_buf_free(mfd, buf);
+ mdss_mdp_overlay_buf_free(mfd, buf);
}
mdss_mdp_pipe_destroy(pipe);
@@ -1477,7 +1476,7 @@ static int __overlay_queue_pipes(struct msm_fb_data_type *mfd)
*/
if (mdss_get_sd_client_cnt() &&
!(pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION)) {
- pr_warn("Non secure pipe during secure display: %u: %08X, skip\n",
+ pr_warn("Non secure pipe during secure display: %u: %16llx, skip\n",
pipe->num, pipe->flags);
continue;
}
@@ -1861,12 +1860,109 @@ int mdss_mode_switch_post(struct msm_fb_data_type *mfd, u32 mode)
return rc;
}
+static void __restore_pipe(struct mdss_mdp_pipe *pipe)
+{
+
+ if (!pipe->restore_roi)
+ return;
+
+ pr_debug("restoring pipe:%d dst from:{%d,%d,%d,%d} to:{%d,%d,%d,%d}\n",
+ pipe->num, pipe->dst.x, pipe->dst.y,
+ pipe->dst.w, pipe->dst.h, pipe->layer.dst_rect.x,
+ pipe->layer.dst_rect.y, pipe->layer.dst_rect.w,
+ pipe->layer.dst_rect.h);
+ pr_debug("restoring pipe:%d src from:{%d,%d,%d,%d} to:{%d,%d,%d,%d}\n",
+ pipe->num, pipe->src.x, pipe->src.y,
+ pipe->src.w, pipe->src.h, pipe->layer.src_rect.x,
+ pipe->layer.src_rect.y, pipe->layer.src_rect.w,
+ pipe->layer.src_rect.h);
+
+ pipe->src.x = pipe->layer.src_rect.x;
+ pipe->src.y = pipe->layer.src_rect.y;
+ pipe->src.w = pipe->layer.src_rect.w;
+ pipe->src.h = pipe->layer.src_rect.h;
+
+ pipe->dst.x = pipe->layer.dst_rect.x;
+ pipe->dst.y = pipe->layer.dst_rect.y;
+ pipe->dst.w = pipe->layer.dst_rect.w;
+ pipe->dst.h = pipe->layer.dst_rect.h;
+}
+
+ /**
+ * __crop_adjust_pipe_rect() - Adjust pipe roi for dual partial
+ * update feature.
+ * @pipe: pipe to check against.
+ * @dual_roi: roi's for the dual partial roi.
+ *
+ * For dual PU ROI case, the layer mixer is configured
+ * by merging the two width aligned ROIs (first_roi and
+ * second_roi) vertically. So, the y-offset of all the
+ * pipes belonging to the second_roi needs to adjusted
+ * accordingly. Also the cropping of the pipe's src/dst
+ * rect has to be done with respect to the ROI the pipe
+ * is intersecting with, before the adjustment.
+ */
+static int __crop_adjust_pipe_rect(struct mdss_mdp_pipe *pipe,
+ struct mdss_dsi_dual_pu_roi *dual_roi)
+{
+ u32 adjust_h;
+ u32 roi_y_pos;
+ int ret = 0;
+
+ pipe->restore_roi = false;
+ if (mdss_rect_overlap_check(&pipe->dst, &dual_roi->first_roi)) {
+ mdss_mdp_crop_rect(&pipe->src, &pipe->dst,
+ &dual_roi->first_roi, false);
+ pipe->restore_roi = true;
+
+ } else if (mdss_rect_overlap_check(&pipe->dst, &dual_roi->second_roi)) {
+ mdss_mdp_crop_rect(&pipe->src, &pipe->dst,
+ &dual_roi->second_roi, false);
+ adjust_h = dual_roi->second_roi.y;
+ roi_y_pos = dual_roi->first_roi.y + dual_roi->first_roi.h;
+
+ if (adjust_h > roi_y_pos) {
+ adjust_h = adjust_h - roi_y_pos;
+ pipe->dst.y -= adjust_h;
+ } else {
+ pr_err("wrong y-pos adjust_y:%d roi_y_pos:%d\n",
+ adjust_h, roi_y_pos);
+ ret = -EINVAL;
+ }
+ pipe->restore_roi = true;
+
+ } else {
+ ret = -EINVAL;
+ }
+
+ pr_debug("crop/adjusted p:%d src:{%d,%d,%d,%d} dst:{%d,%d,%d,%d} r:%d\n",
+ pipe->num, pipe->src.x, pipe->src.y,
+ pipe->src.w, pipe->src.h, pipe->dst.x,
+ pipe->dst.y, pipe->dst.w, pipe->dst.h,
+ pipe->restore_roi);
+
+ if (ret) {
+ pr_err("dual roi error p%d dst{%d,%d,%d,%d}",
+ pipe->num, pipe->dst.x, pipe->dst.y, pipe->dst.w,
+ pipe->dst.h);
+ pr_err(" roi1{%d,%d,%d,%d} roi2{%d,%d,%d,%d}\n",
+ dual_roi->first_roi.x, dual_roi->first_roi.y,
+ dual_roi->first_roi.w, dual_roi->first_roi.h,
+ dual_roi->second_roi.x, dual_roi->second_roi.y,
+ dual_roi->second_roi.w, dual_roi->second_roi.h);
+ }
+
+ return ret;
+}
+
static void __validate_and_set_roi(struct msm_fb_data_type *mfd,
struct mdp_display_commit *commit)
{
struct mdss_mdp_pipe *pipe;
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_panel_info *pinfo = &ctl->panel_data->panel_info;
+ struct mdss_dsi_dual_pu_roi *dual_roi = &pinfo->dual_roi;
struct mdss_rect l_roi = {0}, r_roi = {0};
struct mdp_rect tmp_roi = {0};
bool skip_partial_update = true;
@@ -1881,6 +1977,39 @@ static void __validate_and_set_roi(struct msm_fb_data_type *mfd,
rect_copy_mdp_to_mdss(&commit->l_roi, &l_roi);
rect_copy_mdp_to_mdss(&commit->r_roi, &r_roi);
+ /*
+ * In case of dual partial update ROI, update the two ROIs to dual_roi
+ * struct and combine both the ROIs and assign it as a merged ROI in
+ * l_roi, as MDP would need only the merged ROI information for all
+ * LM settings.
+ */
+ if (pinfo->partial_update_enabled == PU_DUAL_ROI) {
+ if (commit->flags & MDP_COMMIT_PARTIAL_UPDATE_DUAL_ROI) {
+
+ if (!is_valid_pu_dual_roi(pinfo, &l_roi, &r_roi)) {
+ pr_err("Invalid dual roi - fall back to full screen update\n");
+ goto set_roi;
+ }
+
+ dual_roi->first_roi = (struct mdss_rect)
+ {l_roi.x, l_roi.y, l_roi.w, l_roi.h};
+ dual_roi->second_roi = (struct mdss_rect)
+ {r_roi.x, r_roi.y, r_roi.w, r_roi.h};
+ dual_roi->enabled = true;
+
+ l_roi.h += r_roi.h;
+ memset(&r_roi, 0, sizeof(struct mdss_rect));
+
+ pr_debug("Dual ROI - first_roi:{%d,%d,%d,%d}, second_roi:{%d,%d,%d,%d}\n",
+ dual_roi->first_roi.x, dual_roi->first_roi.y,
+ dual_roi->first_roi.w, dual_roi->first_roi.h,
+ dual_roi->second_roi.x, dual_roi->second_roi.y,
+ dual_roi->second_roi.w, dual_roi->second_roi.h);
+ } else {
+ dual_roi->enabled = false;
+ }
+ }
+
pr_debug("input: l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
l_roi.x, l_roi.y, l_roi.w, l_roi.h,
r_roi.x, r_roi.y, r_roi.w, r_roi.h);
@@ -1926,12 +2055,24 @@ static void __validate_and_set_roi(struct msm_fb_data_type *mfd,
}
list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
+ pr_debug("pipe:%d src:{%d,%d,%d,%d} dst:{%d,%d,%d,%d}\n",
+ pipe->num, pipe->src.x, pipe->src.y,
+ pipe->src.w, pipe->src.h, pipe->dst.x,
+ pipe->dst.y, pipe->dst.w, pipe->dst.h);
+
+ if (dual_roi->enabled) {
+ if (__crop_adjust_pipe_rect(pipe, dual_roi)) {
+ skip_partial_update = true;
+ break;
+ }
+ }
+
if (!__is_roi_valid(pipe, &l_roi, &r_roi)) {
skip_partial_update = true;
- pr_err("error. invalid pu config for pipe%d: %d,%d,%d,%d\n",
- pipe->num,
- pipe->dst.x, pipe->dst.y,
- pipe->dst.w, pipe->dst.h);
+ pr_err("error. invalid pu config for pipe%d: %d,%d,%d,%d, dual_pu_roi:%d\n",
+ pipe->num, pipe->dst.x, pipe->dst.y,
+ pipe->dst.w, pipe->dst.h,
+ dual_roi->enabled);
break;
}
}
@@ -1946,17 +2087,114 @@ set_roi:
ctl->mixer_right->width,
ctl->mixer_right->height};
}
+
+ if (pinfo->partial_update_enabled == PU_DUAL_ROI) {
+ if (dual_roi->enabled) {
+ /* we failed pu validation, restore pipes */
+ list_for_each_entry(pipe,
+ &mdp5_data->pipes_used, list)
+ __restore_pipe(pipe);
+ }
+ dual_roi->enabled = false;
+ }
}
- pr_debug("after processing: %s l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d\n",
+ pr_debug("after processing: %s l_roi:-> %d %d %d %d r_roi:-> %d %d %d %d, dual_pu_roi:%d\n",
(l_roi.w && l_roi.h && r_roi.w && r_roi.h) ? "left+right" :
((l_roi.w && l_roi.h) ? "left-only" : "right-only"),
l_roi.x, l_roi.y, l_roi.w, l_roi.h,
- r_roi.x, r_roi.y, r_roi.w, r_roi.h);
+ r_roi.x, r_roi.y, r_roi.w, r_roi.h,
+ dual_roi->enabled);
mdss_mdp_set_roi(ctl, &l_roi, &r_roi);
}
+/*
+ * Enables/disable secure (display or camera) sessions
+ */
+static int __overlay_secure_ctrl(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
+ struct mdss_mdp_pipe *pipe;
+ int ret = 0;
+ int sd_in_pipe = 0;
+ int sc_in_pipe = 0;
+
+ list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
+ if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION) {
+ sd_in_pipe = 1;
+ pr_debug("Secure pipe: %u : %16llx\n",
+ pipe->num, pipe->flags);
+ } else if (pipe->flags & MDP_SECURE_CAMERA_OVERLAY_SESSION) {
+ sc_in_pipe = 1;
+ pr_debug("Secure camera: %u: %16llx\n",
+ pipe->num, pipe->flags);
+ }
+ }
+
+ if ((!sd_in_pipe && !mdp5_data->sd_enabled) ||
+ (sd_in_pipe && mdp5_data->sd_enabled) ||
+ (!sc_in_pipe && !mdp5_data->sc_enabled) ||
+ (sc_in_pipe && mdp5_data->sc_enabled))
+ return ret;
+
+ /* Secure Display */
+ if (!mdp5_data->sd_enabled && sd_in_pipe) {
+ if (!mdss_get_sd_client_cnt()) {
+ /*wait for ping pong done */
+ if (ctl->ops.wait_pingpong)
+ mdss_mdp_display_wait4pingpong(ctl, true);
+ ret = mdss_mdp_secure_session_ctrl(1,
+ MDP_SECURE_DISPLAY_OVERLAY_SESSION);
+ if (ret)
+ return ret;
+ }
+ mdp5_data->sd_enabled = 1;
+ mdss_update_sd_client(mdp5_data->mdata, true);
+ } else if (mdp5_data->sd_enabled && !sd_in_pipe) {
+ /* disable the secure display on last client */
+ if (mdss_get_sd_client_cnt() == 1) {
+ if (ctl->ops.wait_pingpong)
+ mdss_mdp_display_wait4pingpong(ctl, true);
+ ret = mdss_mdp_secure_session_ctrl(0,
+ MDP_SECURE_DISPLAY_OVERLAY_SESSION);
+ if (ret)
+ return ret;
+ }
+ mdss_update_sd_client(mdp5_data->mdata, false);
+ mdp5_data->sd_enabled = 0;
+ }
+
+ /* Secure Camera */
+ if (!mdp5_data->sc_enabled && sc_in_pipe) {
+ if (!mdss_get_sc_client_cnt()) {
+ if (ctl->ops.wait_pingpong)
+ mdss_mdp_display_wait4pingpong(ctl, true);
+ ret = mdss_mdp_secure_session_ctrl(1,
+ MDP_SECURE_CAMERA_OVERLAY_SESSION);
+ if (ret)
+ return ret;
+ }
+ mdp5_data->sc_enabled = 1;
+ mdss_update_sc_client(mdp5_data->mdata, true);
+ } else if (mdp5_data->sc_enabled && !sc_in_pipe) {
+ /* disable the secure camera on last client */
+ if (mdss_get_sc_client_cnt() == 1) {
+ if (ctl->ops.wait_pingpong)
+ mdss_mdp_display_wait4pingpong(ctl, true);
+ ret = mdss_mdp_secure_session_ctrl(0,
+ MDP_SECURE_CAMERA_OVERLAY_SESSION);
+ if (ret)
+ return ret;
+ }
+ mdss_update_sc_client(mdp5_data->mdata, false);
+ mdp5_data->sc_enabled = 0;
+ }
+
+ return ret;
+}
+
int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
struct mdp_display_commit *data)
{
@@ -1964,7 +2202,6 @@ int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
struct mdss_mdp_pipe *pipe, *tmp;
struct mdss_mdp_ctl *ctl = mfd_to_ctl(mfd);
int ret = 0;
- int sd_in_pipe = 0;
struct mdss_mdp_commit_cb commit_cb;
if (!ctl)
@@ -1995,30 +2232,12 @@ int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
mutex_unlock(ctl->shared_lock);
return ret;
}
- mutex_lock(&mdp5_data->list_lock);
-
- /*
- * check if there is a secure display session
- */
- list_for_each_entry(pipe, &mdp5_data->pipes_used, list) {
- if (pipe->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION) {
- sd_in_pipe = 1;
- pr_debug("Secure pipe: %u : %08X\n",
- pipe->num, pipe->flags);
- }
- }
- /*
- * start secure display session if there is secure display session and
- * sd_enabled is not true.
- */
- if (!mdp5_data->sd_enabled && sd_in_pipe) {
- if (!mdss_get_sd_client_cnt())
- ret = mdss_mdp_secure_display_ctrl(1);
- if (!ret) {
- mdp5_data->sd_enabled = 1;
- mdss_update_sd_client(mdp5_data->mdata, true);
- }
+ mutex_lock(&mdp5_data->list_lock);
+ ret = __overlay_secure_ctrl(mfd);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("secure operation failed %d\n", ret);
+ goto commit_fail;
}
if (!ctl->shared_lock)
@@ -2108,19 +2327,6 @@ int mdss_mdp_overlay_kickoff(struct msm_fb_data_type *mfd,
}
mutex_lock(&mdp5_data->ov_lock);
- /*
- * If there is no secure display session and sd_enabled, disable the
- * secure display session
- */
- if (mdp5_data->sd_enabled && !sd_in_pipe && !ret) {
- /* disable the secure display on last client */
- if (mdss_get_sd_client_cnt() == 1)
- ret = mdss_mdp_secure_display_ctrl(0);
- if (!ret) {
- mdss_update_sd_client(mdp5_data->mdata, false);
- mdp5_data->sd_enabled = 0;
- }
- }
mdss_fb_update_notify_update(mfd);
commit_fail:
@@ -2272,7 +2478,7 @@ static int mdss_mdp_overlay_queue(struct msm_fb_data_type *mfd,
struct mdss_mdp_data *src_data;
struct mdp_layer_buffer buffer;
int ret;
- u32 flags;
+ u64 flags;
pipe = __overlay_find_pipe(mfd, req->id);
if (!pipe) {
@@ -2298,7 +2504,8 @@ static int mdss_mdp_overlay_queue(struct msm_fb_data_type *mfd,
pr_warn("Unexpected buffer queue to a solid fill pipe\n");
flags = (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
- MDP_SECURE_DISPLAY_OVERLAY_SESSION));
+ MDP_SECURE_DISPLAY_OVERLAY_SESSION |
+ MDP_SECURE_CAMERA_OVERLAY_SESSION));
mutex_lock(&mdp5_data->list_lock);
src_data = mdss_mdp_overlay_buf_alloc(mfd, pipe);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pipe.c b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
index 8f211a977aa4..e370a80ad998 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pipe.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
@@ -1878,7 +1878,8 @@ static void mdss_mdp_pipe_stride_update(struct mdss_mdp_pipe *pipe)
if (pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_NONE) {
memcpy(&ystride, &pipe->src_planes.ystride,
sizeof(u32) * MAX_PLANES);
- if (pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+ if (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
+ MDP_SECURE_CAMERA_OVERLAY_SESSION))
secure = 0xF;
} else {
if (pipe->multirect.num == MDSS_MDP_PIPE_RECT0) {
@@ -1891,12 +1892,14 @@ static void mdss_mdp_pipe_stride_update(struct mdss_mdp_pipe *pipe)
ystride[0] = rec0_pipe->src_planes.ystride[0];
ystride[2] = rec0_pipe->src_planes.ystride[2];
- if (rec0_pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+ if (rec0_pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
+ MDP_SECURE_CAMERA_OVERLAY_SESSION))
secure |= 0x5;
ystride[1] = rec1_pipe->src_planes.ystride[0];
ystride[3] = rec1_pipe->src_planes.ystride[2];
- if (rec1_pipe->flags & MDP_SECURE_OVERLAY_SESSION)
+ if (rec1_pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
+ MDP_SECURE_CAMERA_OVERLAY_SESSION))
secure |= 0xA;
}
@@ -1998,7 +2001,7 @@ static int mdss_mdp_image_setup(struct mdss_mdp_pipe *pipe,
dst.x -= left_lm_w_from_mfd(pipe->mfd);
}
- mdss_mdp_crop_rect(&src, &dst, &roi);
+ mdss_mdp_crop_rect(&src, &dst, &roi, true);
if (mdata->has_src_split && is_right_mixer) {
/*
@@ -2320,11 +2323,13 @@ static int mdss_mdp_pipe_solidfill_setup(struct mdss_mdp_pipe *pipe)
}
format = MDSS_MDP_FMT_SOLID_FILL;
- secure = (pipe->flags & MDP_SECURE_OVERLAY_SESSION ? 0xF : 0x0);
+ secure = (pipe->flags & (MDP_SECURE_OVERLAY_SESSION |
+ MDP_SECURE_CAMERA_OVERLAY_SESSION)
+ ? 0xF : 0x0);
/* support ARGB color format only */
unpack = (C3_ALPHA << 24) | (C2_R_Cr << 16) |
- (C1_B_Cb << 8) | (C0_G_Y << 0);
+ (C0_G_Y << 8) | (C1_B_Cb << 0);
if (pipe->scaler.enable)
opmode |= (1 << 31);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_util.c b/drivers/video/fbdev/msm/mdss_mdp_util.c
index 8b0ebc3fdf05..199c2b66d90e 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_util.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_util.c
@@ -241,7 +241,7 @@ void mdss_mdp_intersect_rect(struct mdss_rect *res_rect,
void mdss_mdp_crop_rect(struct mdss_rect *src_rect,
struct mdss_rect *dst_rect,
- const struct mdss_rect *sci_rect)
+ const struct mdss_rect *sci_rect, bool normalize)
{
struct mdss_rect res;
mdss_mdp_intersect_rect(&res, dst_rect, sci_rect);
@@ -253,9 +253,17 @@ void mdss_mdp_crop_rect(struct mdss_rect *src_rect,
src_rect->w = res.w;
src_rect->h = res.h;
}
- *dst_rect = (struct mdss_rect)
- {(res.x - sci_rect->x), (res.y - sci_rect->y),
- res.w, res.h};
+
+ /* adjust dest rect based on the sci_rect starting */
+ if (normalize) {
+ *dst_rect = (struct mdss_rect) {(res.x - sci_rect->x),
+ (res.y - sci_rect->y), res.w, res.h};
+
+ /* return the actual cropped intersecting rect */
+ } else {
+ *dst_rect = (struct mdss_rect) {res.x, res.y,
+ res.w, res.h};
+ }
}
}
@@ -963,16 +971,17 @@ static int mdss_mdp_put_img(struct mdss_mdp_img_data *data, bool rotator,
data->srcp_dma_buf = NULL;
}
}
- } else if (data->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION) {
+ } else if ((data->flags & MDP_SECURE_DISPLAY_OVERLAY_SESSION) ||
+ (data->flags & MDP_SECURE_CAMERA_OVERLAY_SESSION)) {
/*
- * skip memory unmapping - secure display uses physical
- * address which does not require buffer unmapping
+ * skip memory unmapping - secure display and camera uses
+ * physical address which does not require buffer unmapping
*
* For LT targets in secure display usecase, srcp_dma_buf will
* be filled due to map call which will be unmapped above.
*
*/
- pr_debug("skip memory unmapping for secure display content\n");
+ pr_debug("skip memory unmapping for secure display/camera content\n");
} else {
return -ENOMEM;
}
@@ -1188,7 +1197,7 @@ err_unmap:
}
static int mdss_mdp_data_get(struct mdss_mdp_data *data,
- struct msmfb_data *planes, int num_planes, u32 flags,
+ struct msmfb_data *planes, int num_planes, u64 flags,
struct device *dev, bool rotator, int dir)
{
int i, rc = 0;
@@ -1201,7 +1210,7 @@ static int mdss_mdp_data_get(struct mdss_mdp_data *data,
rc = mdss_mdp_get_img(&planes[i], &data->p[i], dev, rotator,
dir);
if (rc) {
- pr_err("failed to get buf p=%d flags=%x\n", i, flags);
+ pr_err("failed to get buf p=%d flags=%llx\n", i, flags);
while (i > 0) {
i--;
mdss_mdp_put_img(&data->p[i], rotator, dir);
@@ -1251,7 +1260,7 @@ void mdss_mdp_data_free(struct mdss_mdp_data *data, bool rotator, int dir)
}
int mdss_mdp_data_get_and_validate_size(struct mdss_mdp_data *data,
- struct msmfb_data *planes, int num_planes, u32 flags,
+ struct msmfb_data *planes, int num_planes, u64 flags,
struct device *dev, bool rotator, int dir,
struct mdp_layer_buffer *buffer)
{
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index 18a93f9d3c3e..0483e3d42873 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -137,6 +137,25 @@ enum {
SIM_HW_TE_MODE,
};
+
+/*
+ * enum partial_update_mode - Different modes for partial update feature
+ *
+ * @PU_NOT_SUPPORTED: Feature is not supported on target.
+ * @PU_SINGLE_ROI: Default mode, only one ROI is triggered to the
+ * panel(one on each DSI in case of split dsi)
+ * @PU_DUAL_ROI: Support for sending two roi's that are clubbed
+ * together as one big single ROI. This is only
+ * supported on certain panels that have this
+ * capability in their DDIC.
+ *
+ */
+enum {
+ PU_NOT_SUPPORTED = 0,
+ PU_SINGLE_ROI,
+ PU_DUAL_ROI,
+};
+
struct mdss_rect {
u16 x;
u16 y;
@@ -664,6 +683,50 @@ struct mdss_panel_roi_alignment {
u32 min_height;
};
+
+/*
+ * Nomeclature used to represent partial ROI in case of
+ * dual roi when the panel supports it. Region marked (XXX) is
+ * the extended roi to align with the second roi since LM output
+ * has to be rectangle.
+ *
+ * For single ROI, only the first ROI will be used in the struct.
+ * DSI driver will merge it based on the partial_update_roi_merge
+ * property.
+ *
+ * -------------------------------
+ * | DSI0 | DSI1 |
+ * -------------------------------
+ * | | |
+ * | | |
+ * | =========|=======----+ |
+ * | | | |XXXX| |
+ * | | First| Roi |XXXX| |
+ * | | | |XXXX| |
+ * | =========|=======----+ |
+ * | | |
+ * | | |
+ * | | |
+ * | +----================= |
+ * | |XXXX| | | |
+ * | |XXXX| Second Roi | |
+ * | |XXXX| | | |
+ * | +----====|============ |
+ * | | |
+ * | | |
+ * | | |
+ * | | |
+ * | | |
+ * ------------------------------
+ *
+ */
+
+struct mdss_dsi_dual_pu_roi {
+ struct mdss_rect first_roi;
+ struct mdss_rect second_roi;
+ bool enabled;
+};
+
struct mdss_panel_info {
u32 xres;
u32 yres;
@@ -689,6 +752,7 @@ struct mdss_panel_info {
u32 vic; /* video identification code */
u32 deep_color;
struct mdss_rect roi;
+ struct mdss_dsi_dual_pu_roi dual_roi;
int pwm_pmic_gpio;
int pwm_lpg_chan;
int pwm_period;
@@ -723,8 +787,8 @@ struct mdss_panel_info {
u32 cont_splash_enabled;
bool esd_rdy;
- bool partial_update_supported; /* value from dts if pu is supported */
- bool partial_update_enabled; /* is pu currently allowed */
+ u32 partial_update_supported; /* value from dts if pu is supported */
+ u32 partial_update_enabled; /* is pu currently allowed */
u32 dcs_cmd_by_left;
u32 partial_update_roi_merge;
struct ion_handle *splash_ihdl;
@@ -1000,6 +1064,27 @@ static inline bool is_lm_configs_dsc_compatible(struct mdss_panel_info *pinfo,
return true;
}
+static inline bool is_valid_pu_dual_roi(struct mdss_panel_info *pinfo,
+ struct mdss_rect *first_roi, struct mdss_rect *second_roi)
+{
+ if ((first_roi->x != second_roi->x) || (first_roi->w != second_roi->w)
+ || (first_roi->y > second_roi->y)
+ || ((first_roi->y + first_roi->h) > second_roi->y)
+ || (is_dsc_compression(pinfo) &&
+ !is_lm_configs_dsc_compatible(pinfo,
+ first_roi->w, first_roi->h) &&
+ !is_lm_configs_dsc_compatible(pinfo,
+ second_roi->w, second_roi->h))) {
+ pr_err("Invalid multiple PU ROIs, roi0:{%d,%d,%d,%d}, roi1{%d,%d,%d,%d}\n",
+ first_roi->x, first_roi->y, first_roi->w,
+ first_roi->h, second_roi->x, second_roi->y,
+ second_roi->w, second_roi->h);
+ return false;
+ }
+
+ return true;
+}
+
int mdss_register_panel(struct platform_device *pdev,
struct mdss_panel_data *pdata);
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
index eab7bcaaa156..2239791fdad0 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.c
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -162,12 +162,15 @@ end:
}
/*
- * mdss_smmu_v2_attach()
+ * mdss_smmu_attach_v2()
*
* Associates each configured VA range with the corresponding smmu context
* bank device. Enables the clks as smmu_v2 requires voting it before the usage.
* And iommu attach is done only once during the initial attach and it is never
* detached as smmu v2 uses a feature called 'retention'.
+ * Only detach the secure and non-secure contexts in case of secure display
+ * case and secure contexts for secure camera use cases for the platforms
+ * which have caps MDSS_CAPS_SEC_DETACH_SMMU enabled
*/
static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
{
@@ -191,7 +194,9 @@ static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
}
mdss_smmu->handoff_pending = false;
- if (!mdss_smmu->domain_attached) {
+ if (!mdss_smmu->domain_attached &&
+ mdss_smmu_is_valid_domain_condition(mdata,
+ i, true)) {
rc = arm_iommu_attach_device(mdss_smmu->dev,
mdss_smmu->mmu_mapping);
if (rc) {
@@ -229,10 +234,11 @@ err:
}
/*
- * mdss_smmu_v2_detach()
+ * mdss_smmu_detach_v2()
*
- * Only disables the clks as it is not required to detach the iommu mapped
- * VA range from the device in smmu_v2 as explained in the mdss_smmu_v2_attach
+ * Disables the clks only when it is not required to detach the iommu mapped
+ * VA range (as long as not in secure display use case)
+ * from the device in smmu_v2 as explained in the mdss_smmu_v2_attach
*/
static int mdss_smmu_detach_v2(struct mdss_data_type *mdata)
{
@@ -245,8 +251,24 @@ static int mdss_smmu_detach_v2(struct mdss_data_type *mdata)
continue;
mdss_smmu = mdss_smmu_get_cb(i);
- if (mdss_smmu && mdss_smmu->dev && !mdss_smmu->handoff_pending)
- mdss_smmu_enable_power(mdss_smmu, false);
+ if (mdss_smmu && mdss_smmu->dev) {
+ if (!mdss_smmu->handoff_pending &&
+ mdss_smmu->domain_attached &&
+ mdss_smmu_is_valid_domain_condition(mdata,
+ i, false)) {
+ /*
+ * if entering in secure display or
+ * secure camera use case(for secured contexts
+ * leave the smmu clocks on and only detach the
+ * smmu contexts
+ */
+ arm_iommu_detach_device(mdss_smmu->dev);
+ mdss_smmu->domain_attached = false;
+ pr_debug("iommu v2 domain[%i] detached\n", i);
+ } else {
+ mdss_smmu_enable_power(mdss_smmu, false);
+ }
+ }
}
mutex_unlock(&mdp_iommu_lock);
@@ -609,6 +631,7 @@ int mdss_smmu_probe(struct platform_device *pdev)
}
mdss_smmu = &mdata->mdss_smmu[smmu_domain.domain];
+ mdss_smmu->domain = smmu_domain.domain;
mp = &mdss_smmu->mp;
memset(mp, 0, sizeof(struct dss_module_power));
diff --git a/drivers/video/fbdev/msm/mdss_smmu.h b/drivers/video/fbdev/msm/mdss_smmu.h
index a987066cc773..f7e6e275c16a 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.h
+++ b/drivers/video/fbdev/msm/mdss_smmu.h
@@ -73,6 +73,38 @@ static inline bool mdss_smmu_is_valid_domain_type(struct mdss_data_type *mdata,
return true;
}
+static inline bool mdss_smmu_is_valid_domain_condition(
+ struct mdss_data_type *mdata,
+ int domain_type,
+ bool is_attach)
+{
+ if (is_attach) {
+ if (test_bit(MDSS_CAPS_SEC_DETACH_SMMU,
+ mdata->mdss_caps_map) &&
+ (mdata->sec_disp_en ||
+ (mdata->sec_cam_en &&
+ domain_type == MDSS_IOMMU_DOMAIN_SECURE))) {
+ pr_debug("SMMU attach not attempted, sd:%d, sc:%d\n",
+ mdata->sec_disp_en, mdata->sec_cam_en);
+ return false;
+ } else {
+ return true;
+ }
+ } else {
+ if (test_bit(MDSS_CAPS_SEC_DETACH_SMMU,
+ mdata->mdss_caps_map) &&
+ (mdata->sec_disp_en ||
+ (mdata->sec_cam_en &&
+ domain_type == MDSS_IOMMU_DOMAIN_SECURE))) {
+ pr_debug("SMMU detach attempted, sd:%d, sc:%d\n",
+ mdata->sec_disp_en, mdata->sec_cam_en);
+ return true;
+ } else {
+ return false;
+ }
+ }
+}
+
static inline struct mdss_smmu_client *mdss_smmu_get_cb(u32 domain)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
@@ -96,7 +128,7 @@ static inline int is_mdss_iommu_attached(void)
return mdata ? mdata->iommu_attached : false;
}
-static inline int mdss_smmu_get_domain_type(u32 flags, bool rotator)
+static inline int mdss_smmu_get_domain_type(u64 flags, bool rotator)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
int type;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index b1351226b102..4f6711f31939 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -446,6 +446,7 @@ extern unsigned int hardlockup_panic;
void lockup_detector_init(void);
extern void watchdog_enable(unsigned int cpu);
extern void watchdog_disable(unsigned int cpu);
+extern bool watchdog_configured(unsigned int cpu);
#else
static inline void touch_softlockup_watchdog_sched(void)
{
@@ -468,6 +469,14 @@ static inline void watchdog_enable(unsigned int cpu)
static inline void watchdog_disable(unsigned int cpu)
{
}
+static inline bool watchdog_configured(unsigned int cpu)
+{
+ /*
+ * Predend the watchdog is always configured.
+ * We will be waiting for the watchdog to be enabled in core isolation
+ */
+ return true;
+}
#endif
#ifdef CONFIG_DETECT_HUNG_TASK
@@ -2393,6 +2402,7 @@ struct cpu_cycle_counter_cb {
};
#ifdef CONFIG_SCHED_HMP
+extern void free_task_load_ptrs(struct task_struct *p);
extern int sched_set_window(u64 window_start, unsigned int window_size);
extern unsigned long sched_get_busy(int cpu);
extern void sched_get_cpus_busy(struct sched_load *busy,
@@ -2418,6 +2428,8 @@ extern int sched_set_group_id(struct task_struct *p, unsigned int group_id);
extern unsigned int sched_get_group_id(struct task_struct *p);
#else /* CONFIG_SCHED_HMP */
+static inline void free_task_load_ptrs(struct task_struct *p) { }
+
static inline u64 sched_ktime_clock(void)
{
return 0;
diff --git a/include/soc/qcom/qseecomi.h b/include/soc/qcom/qseecomi.h
index b0a8d67f50fa..e33fd9fc1841 100644
--- a/include/soc/qcom/qseecomi.h
+++ b/include/soc/qcom/qseecomi.h
@@ -68,6 +68,7 @@ enum qseecom_qceos_cmd_id {
QSEOS_CLIENT_SEND_DATA_COMMAND_WHITELIST = 0x1C,
QSEOS_TEE_OPEN_SESSION_WHITELIST = 0x1D,
QSEOS_TEE_INVOKE_COMMAND_WHITELIST = 0x1E,
+ QSEOS_LISTENER_DATA_RSP_COMMAND_WHITELIST = 0x1F,
QSEOS_FSM_LTEOTA_REQ_CMD = 0x109,
QSEOS_FSM_LTEOTA_REQ_RSP_CMD = 0x110,
QSEOS_FSM_IKE_REQ_CMD = 0x203,
@@ -217,6 +218,16 @@ __packed struct qseecom_client_listener_data_irsp {
uint32_t qsee_cmd_id;
uint32_t listener_id;
uint32_t status;
+ uint32_t sglistinfo_ptr;
+ uint32_t sglistinfo_len;
+};
+
+__packed struct qseecom_client_listener_data_64bit_irsp {
+ uint32_t qsee_cmd_id;
+ uint32_t listener_id;
+ uint32_t status;
+ uint64_t sglistinfo_ptr;
+ uint32_t sglistinfo_len;
};
/*
@@ -703,4 +714,12 @@ __packed struct qseecom_continue_blocked_request_ireq {
TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_BUF_RW, \
TZ_SYSCALL_PARAM_TYPE_VAL)
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_ID \
+ TZ_SYSCALL_CREATE_SMC_ID(TZ_OWNER_QSEE_OS, TZ_SVC_LISTENER, 0x05)
+
+#define TZ_OS_LISTENER_RESPONSE_HANDLER_WITH_WHITELIST_PARAM_ID \
+ TZ_SYSCALL_CREATE_PARAM_ID_4( \
+ TZ_SYSCALL_PARAM_TYPE_VAL, TZ_SYSCALL_PARAM_TYPE_VAL, \
+ TZ_SYSCALL_PARAM_TYPE_BUF_RW, TZ_SYSCALL_PARAM_TYPE_VAL)
+
#endif /* __QSEECOMI_H_ */
diff --git a/include/sound/soc-dai.h b/include/sound/soc-dai.h
index 80bfd1e8f813..4cbe6a37d121 100644
--- a/include/sound/soc-dai.h
+++ b/include/sound/soc-dai.h
@@ -269,7 +269,7 @@ struct snd_soc_dai {
struct snd_soc_dai_driver *driver;
/* DAI runtime info */
- unsigned int capture_active:1; /* stream is in use */
+ unsigned int capture_active; /* stream is in use */
unsigned int playback_active; /* stream is in use */
unsigned int symmetric_rates:1;
unsigned int symmetric_channels:1;
diff --git a/include/uapi/linux/msm_mdp_ext.h b/include/uapi/linux/msm_mdp_ext.h
index 811d8b4e1994..1a71e860ba48 100644
--- a/include/uapi/linux/msm_mdp_ext.h
+++ b/include/uapi/linux/msm_mdp_ext.h
@@ -96,6 +96,10 @@ LAYER FLAG CONFIGURATION
*/
#define MDP_LAYER_MULTIRECT_PARALLEL_MODE 0x2000
+
+/* Flag indicates that layer is associated with secure camera session */
+#define MDP_LAYER_SECURE_CAMERA_SESSION 0x4000
+
/**********************************************************************
DESTINATION SCALER FLAG CONFIGURATION
**********************************************************************/
@@ -147,6 +151,9 @@ VALIDATE/COMMIT FLAG CONFIGURATION
*/
#define MDP_COMMIT_AVR_ONE_SHOT_MODE 0x10
+/* Flag to indicate dual partial ROI update */
+#define MDP_COMMIT_PARTIAL_UPDATE_DUAL_ROI 0x20
+
/* Flag to enable concurrent writeback for the frame */
#define MDP_COMMIT_CWB_EN 0x800
diff --git a/include/uapi/media/msm_sde_rotator.h b/include/uapi/media/msm_sde_rotator.h
index 4487edf0c854..12976e3f14d7 100644
--- a/include/uapi/media/msm_sde_rotator.h
+++ b/include/uapi/media/msm_sde_rotator.h
@@ -86,4 +86,10 @@ struct msm_sde_rotator_fence {
/* SDE Rotator private control ID's */
#define V4L2_CID_SDE_ROTATOR_SECURE (V4L2_CID_USER_BASE + 0x1000)
+/*
+ * This control Id indicates this context is associated with the
+ * secure camera
+ */
+#define V4L2_CID_SDE_ROTATOR_SECURE_CAMERA (V4L2_CID_USER_BASE + 0x2000)
+
#endif /* __UAPI_MSM_SDE_ROTATOR_H__ */
diff --git a/kernel/cgroup.c b/kernel/cgroup.c
index e2e784ad9e0f..ae83d9602aa0 100644
--- a/kernel/cgroup.c
+++ b/kernel/cgroup.c
@@ -781,6 +781,8 @@ static void put_css_set_locked(struct css_set *cset)
static void put_css_set(struct css_set *cset)
{
+ unsigned long flags;
+
/*
* Ensure that the refcount doesn't hit zero while any readers
* can see it. Similar to atomic_dec_and_lock(), but for an
@@ -789,9 +791,9 @@ static void put_css_set(struct css_set *cset)
if (atomic_add_unless(&cset->refcount, -1, 1))
return;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irqsave(&css_set_lock, flags);
put_css_set_locked(cset);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irqrestore(&css_set_lock, flags);
}
/*
@@ -1014,11 +1016,11 @@ static struct css_set *find_css_set(struct css_set *old_cset,
/* First see if we already have a cgroup group that matches
* the desired set */
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
cset = find_existing_css_set(old_cset, cgrp, template);
if (cset)
get_css_set(cset);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
if (cset)
return cset;
@@ -1046,7 +1048,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
* find_existing_css_set() */
memcpy(cset->subsys, template, sizeof(cset->subsys));
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
/* Add reference counts and links from the new css_set. */
list_for_each_entry(link, &old_cset->cgrp_links, cgrp_link) {
struct cgroup *c = link->cgrp;
@@ -1072,7 +1074,7 @@ static struct css_set *find_css_set(struct css_set *old_cset,
css_get(css);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return cset;
}
@@ -1136,7 +1138,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
* Release all the links from cset_links to this hierarchy's
* root cgroup
*/
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry_safe(link, tmp_link, &cgrp->cset_links, cset_link) {
list_del(&link->cset_link);
@@ -1144,7 +1146,7 @@ static void cgroup_destroy_root(struct cgroup_root *root)
kfree(link);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
if (!list_empty(&root->root_list)) {
list_del(&root->root_list);
@@ -1548,11 +1550,11 @@ static int rebind_subsystems(struct cgroup_root *dst_root,
ss->root = dst_root;
css->cgroup = dcgrp;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
hash_for_each(css_set_table, i, cset, hlist)
list_move_tail(&cset->e_cset_node[ss->id],
&dcgrp->e_csets[ss->id]);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
src_root->subsys_mask &= ~(1 << ssid);
scgrp->subtree_control &= ~(1 << ssid);
@@ -1829,7 +1831,7 @@ static void cgroup_enable_task_cg_lists(void)
{
struct task_struct *p, *g;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
if (use_task_css_set_links)
goto out_unlock;
@@ -1854,8 +1856,12 @@ static void cgroup_enable_task_cg_lists(void)
* entry won't be deleted though the process has exited.
* Do it while holding siglock so that we don't end up
* racing against cgroup_exit().
+ *
+ * Interrupts were already disabled while acquiring
+ * the css_set_lock, so we do not need to disable it
+ * again when acquiring the sighand->siglock here.
*/
- spin_lock_irq(&p->sighand->siglock);
+ spin_lock(&p->sighand->siglock);
if (!(p->flags & PF_EXITING)) {
struct css_set *cset = task_css_set(p);
@@ -1864,11 +1870,11 @@ static void cgroup_enable_task_cg_lists(void)
list_add_tail(&p->cg_list, &cset->tasks);
get_css_set(cset);
}
- spin_unlock_irq(&p->sighand->siglock);
+ spin_unlock(&p->sighand->siglock);
} while_each_thread(g, p);
read_unlock(&tasklist_lock);
out_unlock:
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
}
static void init_cgroup_housekeeping(struct cgroup *cgrp)
@@ -1973,13 +1979,13 @@ static int cgroup_setup_root(struct cgroup_root *root, unsigned long ss_mask)
* Link the root cgroup in this hierarchy into all the css_set
* objects.
*/
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
hash_for_each(css_set_table, i, cset, hlist) {
link_css_set(&tmp_links, cset, root_cgrp);
if (css_set_populated(cset))
cgroup_update_populated(root_cgrp, true);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
BUG_ON(!list_empty(&root_cgrp->self.children));
BUG_ON(atomic_read(&root->nr_cgrps) != 1);
@@ -2212,7 +2218,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
char *path = NULL;
mutex_lock(&cgroup_mutex);
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
root = idr_get_next(&cgroup_hierarchy_idr, &hierarchy_id);
@@ -2225,7 +2231,7 @@ char *task_cgroup_path(struct task_struct *task, char *buf, size_t buflen)
path = buf;
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex);
return path;
}
@@ -2400,7 +2406,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
* the new cgroup. There are no failure cases after here, so this
* is the commit point.
*/
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(cset, &tset->src_csets, mg_node) {
list_for_each_entry_safe(task, tmp_task, &cset->mg_tasks, cg_list) {
struct css_set *from_cset = task_css_set(task);
@@ -2411,7 +2417,7 @@ static int cgroup_taskset_migrate(struct cgroup_taskset *tset,
put_css_set_locked(from_cset);
}
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
/*
* Migration is committed, all target tasks are now on dst_csets.
@@ -2440,13 +2446,13 @@ out_cancel_attach:
}
}
out_release_tset:
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_splice_init(&tset->dst_csets, &tset->src_csets);
list_for_each_entry_safe(cset, tmp_cset, &tset->src_csets, mg_node) {
list_splice_tail_init(&cset->mg_tasks, &cset->tasks);
list_del_init(&cset->mg_node);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return ret;
}
@@ -2463,14 +2469,14 @@ static void cgroup_migrate_finish(struct list_head *preloaded_csets)
lockdep_assert_held(&cgroup_mutex);
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry_safe(cset, tmp_cset, preloaded_csets, mg_preload_node) {
cset->mg_src_cgrp = NULL;
cset->mg_dst_cset = NULL;
list_del_init(&cset->mg_preload_node);
put_css_set_locked(cset);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
}
/**
@@ -2620,7 +2626,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
* already PF_EXITING could be freed from underneath us unless we
* take an rcu_read_lock.
*/
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
rcu_read_lock();
task = leader;
do {
@@ -2629,7 +2635,7 @@ static int cgroup_migrate(struct task_struct *leader, bool threadgroup,
break;
} while_each_thread(leader, task);
rcu_read_unlock();
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return cgroup_taskset_migrate(&tset, cgrp);
}
@@ -2650,7 +2656,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
int ret;
/* look up all src csets */
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
rcu_read_lock();
task = leader;
do {
@@ -2660,7 +2666,7 @@ static int cgroup_attach_task(struct cgroup *dst_cgrp,
break;
} while_each_thread(leader, task);
rcu_read_unlock();
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
/* prepare dst csets and commit */
ret = cgroup_migrate_prepare_dst(dst_cgrp, &preloaded_csets);
@@ -2748,9 +2754,9 @@ static int cgroup_procs_write_permission(struct task_struct *task,
struct cgroup *cgrp;
struct inode *inode;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
cgrp = task_cgroup_from_root(task, &cgrp_dfl_root);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
while (!cgroup_is_descendant(dst_cgrp, cgrp))
cgrp = cgroup_parent(cgrp);
@@ -2851,9 +2857,9 @@ int cgroup_attach_task_all(struct task_struct *from, struct task_struct *tsk)
if (root == &cgrp_dfl_root)
continue;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
from_cgrp = task_cgroup_from_root(from, root);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
retval = cgroup_attach_task(from_cgrp, tsk, false);
if (retval)
@@ -2978,7 +2984,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
percpu_down_write(&cgroup_threadgroup_rwsem);
/* look up all csses currently attached to @cgrp's subtree */
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
css_for_each_descendant_pre(css, cgroup_css(cgrp, NULL)) {
struct cgrp_cset_link *link;
@@ -2990,14 +2996,14 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
cgroup_migrate_add_src(link->cset, cgrp,
&preloaded_csets);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
/* NULL dst indicates self on default hierarchy */
ret = cgroup_migrate_prepare_dst(NULL, &preloaded_csets);
if (ret)
goto out_finish;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(src_cset, &preloaded_csets, mg_preload_node) {
struct task_struct *task, *ntask;
@@ -3009,7 +3015,7 @@ static int cgroup_update_dfl_csses(struct cgroup *cgrp)
list_for_each_entry_safe(task, ntask, &src_cset->tasks, cg_list)
cgroup_taskset_add(task, &tset);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
ret = cgroup_taskset_migrate(&tset, cgrp);
out_finish:
@@ -3692,10 +3698,10 @@ static int cgroup_task_count(const struct cgroup *cgrp)
int count = 0;
struct cgrp_cset_link *link;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &cgrp->cset_links, cset_link)
count += atomic_read(&link->cset->refcount);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return count;
}
@@ -4033,7 +4039,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
memset(it, 0, sizeof(*it));
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
it->ss = css->ss;
@@ -4046,7 +4052,7 @@ void css_task_iter_start(struct cgroup_subsys_state *css,
css_task_iter_advance_css_set(it);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
}
/**
@@ -4064,7 +4070,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
it->cur_task = NULL;
}
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
if (it->task_pos) {
it->cur_task = list_entry(it->task_pos, struct task_struct,
@@ -4073,7 +4079,7 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
css_task_iter_advance(it);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return it->cur_task;
}
@@ -4087,10 +4093,10 @@ struct task_struct *css_task_iter_next(struct css_task_iter *it)
void css_task_iter_end(struct css_task_iter *it)
{
if (it->cur_cset) {
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_del(&it->iters_node);
put_css_set_locked(it->cur_cset);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
}
if (it->cur_task)
@@ -4119,10 +4125,10 @@ int cgroup_transfer_tasks(struct cgroup *to, struct cgroup *from)
mutex_lock(&cgroup_mutex);
/* all tasks in @from are being moved, all csets are source */
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &from->cset_links, cset_link)
cgroup_migrate_add_src(link->cset, to, &preloaded_csets);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
ret = cgroup_migrate_prepare_dst(to, &preloaded_csets);
if (ret)
@@ -5226,10 +5232,10 @@ static int cgroup_destroy_locked(struct cgroup *cgrp)
*/
cgrp->self.flags &= ~CSS_ONLINE;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &cgrp->cset_links, cset_link)
link->cset->dead = true;
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
/* initiate massacre of all css's */
for_each_css(css, ssid, cgrp)
@@ -5488,7 +5494,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
goto out;
mutex_lock(&cgroup_mutex);
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
for_each_root(root) {
struct cgroup_subsys *ss;
@@ -5540,7 +5546,7 @@ int proc_cgroup_show(struct seq_file *m, struct pid_namespace *ns,
retval = 0;
out_unlock:
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
mutex_unlock(&cgroup_mutex);
kfree(buf);
out:
@@ -5701,13 +5707,13 @@ void cgroup_post_fork(struct task_struct *child,
if (use_task_css_set_links) {
struct css_set *cset;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
cset = task_css_set(current);
if (list_empty(&child->cg_list)) {
get_css_set(cset);
css_set_move_task(child, NULL, cset, false);
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
}
/*
@@ -5751,9 +5757,9 @@ void cgroup_exit(struct task_struct *tsk)
cset = task_css_set(tsk);
if (!list_empty(&tsk->cg_list)) {
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
css_set_move_task(tsk, cset, NULL, false);
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
} else {
get_css_set(cset);
}
@@ -5819,7 +5825,9 @@ static void cgroup_release_agent(struct work_struct *work)
if (!pathbuf || !agentbuf)
goto out;
+ spin_lock_irq(&css_set_lock);
path = cgroup_path(cgrp, pathbuf, PATH_MAX);
+ spin_unlock_irq(&css_set_lock);
if (!path)
goto out;
@@ -5966,7 +5974,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
if (!name_buf)
return -ENOMEM;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
rcu_read_lock();
cset = rcu_dereference(current->cgroups);
list_for_each_entry(link, &cset->cgrp_links, cgrp_link) {
@@ -5977,7 +5985,7 @@ static int current_css_set_cg_links_read(struct seq_file *seq, void *v)
c->root->hierarchy_id, name_buf);
}
rcu_read_unlock();
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
kfree(name_buf);
return 0;
}
@@ -5988,7 +5996,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
struct cgroup_subsys_state *css = seq_css(seq);
struct cgrp_cset_link *link;
- spin_lock_bh(&css_set_lock);
+ spin_lock_irq(&css_set_lock);
list_for_each_entry(link, &css->cgroup->cset_links, cset_link) {
struct css_set *cset = link->cset;
struct task_struct *task;
@@ -6011,7 +6019,7 @@ static int cgroup_css_links_read(struct seq_file *seq, void *v)
overflow:
seq_puts(seq, " ...\n");
}
- spin_unlock_bh(&css_set_lock);
+ spin_unlock_irq(&css_set_lock);
return 0;
}
diff --git a/kernel/fork.c b/kernel/fork.c
index 8a5962276788..a46ce4505066 100644
--- a/kernel/fork.c
+++ b/kernel/fork.c
@@ -1654,6 +1654,7 @@ bad_fork_cleanup_audit:
bad_fork_cleanup_perf:
perf_event_free_task(p);
bad_fork_cleanup_policy:
+ free_task_load_ptrs(p);
#ifdef CONFIG_NUMA
mpol_put(p->mempolicy);
bad_fork_cleanup_threadgroup_lock:
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f3b1688b3be7..e7196c3a3457 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1912,7 +1912,7 @@ void scheduler_ipi(void)
/*
* Check if someone kicked us for doing the nohz idle load balance.
*/
- if (unlikely(got_nohz_idle_kick())) {
+ if (unlikely(got_nohz_idle_kick()) && !cpu_isolated(cpu)) {
this_rq()->idle_balance = 1;
raise_softirq_irqoff(SCHED_SOFTIRQ);
}
@@ -2269,17 +2269,7 @@ void sched_exit(struct task_struct *p)
reset_task_stats(p);
p->ravg.mark_start = wallclock;
p->ravg.sum_history[0] = EXITING_TASK_MARKER;
-
- kfree(p->ravg.curr_window_cpu);
- kfree(p->ravg.prev_window_cpu);
-
- /*
- * update_task_ravg() can be called for exiting tasks. While the
- * function itself ensures correct behavior, the corresponding
- * trace event requires that these pointers be NULL.
- */
- p->ravg.curr_window_cpu = NULL;
- p->ravg.prev_window_cpu = NULL;
+ free_task_load_ptrs(p);
enqueue_task(rq, p, 0);
clear_ed_task(p, rq);
@@ -2384,10 +2374,12 @@ int sysctl_numa_balancing(struct ctl_table *table, int write,
int sched_fork(unsigned long clone_flags, struct task_struct *p)
{
unsigned long flags;
- int cpu = get_cpu();
+ int cpu;
- __sched_fork(clone_flags, p);
init_new_task_load(p, false);
+ cpu = get_cpu();
+
+ __sched_fork(clone_flags, p);
/*
* We mark the process as running here. This guarantees that
* nobody will actually run it, and a signal or other external
@@ -5570,7 +5562,6 @@ static void set_rq_offline(struct rq *rq);
int do_isolation_work_cpu_stop(void *data)
{
- unsigned long flags;
unsigned int cpu = smp_processor_id();
struct rq *rq = cpu_rq(cpu);
@@ -5578,23 +5569,35 @@ int do_isolation_work_cpu_stop(void *data)
irq_migrate_all_off_this_cpu();
+ local_irq_disable();
+
sched_ttwu_pending();
- /* Update our root-domain */
- raw_spin_lock_irqsave(&rq->lock, flags);
+ raw_spin_lock(&rq->lock);
+
+ /*
+ * Temporarily mark the rq as offline. This will allow us to
+ * move tasks off the CPU.
+ */
if (rq->rd) {
BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
set_rq_offline(rq);
}
migrate_tasks(rq, false);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
+
+ if (rq->rd)
+ set_rq_online(rq);
+ raw_spin_unlock(&rq->lock);
/*
* We might have been in tickless state. Clear NOHZ flags to avoid
* us being kicked for helping out with balancing
*/
nohz_balance_clear_nohz_mask(cpu);
+
+ clear_hmp_request(cpu);
+ local_irq_enable();
return 0;
}
@@ -5693,6 +5696,22 @@ int sched_isolate_cpu(int cpu)
if (++cpu_isolation_vote[cpu] > 1)
goto out;
+ /*
+ * There is a race between watchdog being enabled by hotplug and
+ * core isolation disabling the watchdog. When a CPU is hotplugged in
+ * and the hotplug lock has been released the watchdog thread might
+ * not have run yet to enable the watchdog.
+ * We have to wait for the watchdog to be enabled before proceeding.
+ */
+ if (!watchdog_configured(cpu)) {
+ msleep(20);
+ if (!watchdog_configured(cpu)) {
+ --cpu_isolation_vote[cpu];
+ ret_code = -EBUSY;
+ goto out;
+ }
+ }
+
set_cpu_isolated(cpu, true);
cpumask_clear_cpu(cpu, &avail_cpus);
@@ -5703,7 +5722,6 @@ int sched_isolate_cpu(int cpu)
migrate_sync_cpu(cpu, cpumask_first(&avail_cpus));
stop_cpus(cpumask_of(cpu), do_isolation_work_cpu_stop, 0);
- clear_hmp_request(cpu);
calc_load_migrate(rq);
update_max_interval();
sched_update_group_capacities(cpu);
@@ -5745,10 +5763,6 @@ int sched_unisolate_cpu_unlocked(int cpu)
raw_spin_lock_irqsave(&rq->lock, flags);
rq->age_stamp = sched_clock_cpu(cpu);
- if (rq->rd) {
- BUG_ON(!cpumask_test_cpu(cpu, rq->rd->span));
- set_rq_online(rq);
- }
raw_spin_unlock_irqrestore(&rq->lock, flags);
}
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 21a60beb8288..1674b1054f83 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8121,8 +8121,11 @@ static struct rq *find_busiest_queue_hmp(struct lb_env *env,
int max_nr_big = 0, nr_big;
bool find_big = !!(env->flags & LBF_BIG_TASK_ACTIVE_BALANCE);
int i;
+ cpumask_t cpus;
- for_each_cpu(i, sched_group_cpus(group)) {
+ cpumask_andnot(&cpus, sched_group_cpus(group), cpu_isolated_mask);
+
+ for_each_cpu(i, &cpus) {
struct rq *rq = cpu_rq(i);
u64 cumulative_runnable_avg =
rq->hmp_stats.cumulative_runnable_avg;
@@ -8285,6 +8288,15 @@ static int need_active_balance(struct lb_env *env)
sd->cache_nice_tries + NEED_ACTIVE_BALANCE_THRESHOLD);
}
+static int group_balance_cpu_not_isolated(struct sched_group *sg)
+{
+ cpumask_t cpus;
+
+ cpumask_and(&cpus, sched_group_cpus(sg), sched_group_mask(sg));
+ cpumask_andnot(&cpus, &cpus, cpu_isolated_mask);
+ return cpumask_first(&cpus);
+}
+
static int should_we_balance(struct lb_env *env)
{
struct sched_group *sg = env->sd->groups;
@@ -8302,7 +8314,8 @@ static int should_we_balance(struct lb_env *env)
sg_mask = sched_group_mask(sg);
/* Try to find first idle cpu */
for_each_cpu_and(cpu, sg_cpus, env->cpus) {
- if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu))
+ if (!cpumask_test_cpu(cpu, sg_mask) || !idle_cpu(cpu) ||
+ cpu_isolated(cpu))
continue;
balance_cpu = cpu;
@@ -8310,7 +8323,7 @@ static int should_we_balance(struct lb_env *env)
}
if (balance_cpu == -1)
- balance_cpu = group_balance_cpu(sg);
+ balance_cpu = group_balance_cpu_not_isolated(sg);
/*
* First idle cpu or the first cpu(busiest) in this sched group
@@ -8530,7 +8543,8 @@ no_move:
* ->active_balance_work. Once set, it's cleared
* only after active load balance is finished.
*/
- if (!busiest->active_balance) {
+ if (!busiest->active_balance &&
+ !cpu_isolated(cpu_of(busiest))) {
busiest->active_balance = 1;
busiest->push_cpu = this_cpu;
active_balance = 1;
@@ -9198,12 +9212,15 @@ static void nohz_idle_balance(struct rq *this_rq, enum cpu_idle_type idle)
/* Earliest time when we have to do rebalance again */
unsigned long next_balance = jiffies + 60*HZ;
int update_next_balance = 0;
+ cpumask_t cpus;
if (idle != CPU_IDLE ||
!test_bit(NOHZ_BALANCE_KICK, nohz_flags(this_cpu)))
goto end;
- for_each_cpu(balance_cpu, nohz.idle_cpus_mask) {
+ cpumask_andnot(&cpus, nohz.idle_cpus_mask, cpu_isolated_mask);
+
+ for_each_cpu(balance_cpu, &cpus) {
if (balance_cpu == this_cpu || !idle_cpu(balance_cpu))
continue;
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 627da2346337..30391aae0822 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -1624,6 +1624,20 @@ unsigned int cpu_temp(int cpu)
return 0;
}
+void free_task_load_ptrs(struct task_struct *p)
+{
+ kfree(p->ravg.curr_window_cpu);
+ kfree(p->ravg.prev_window_cpu);
+
+ /*
+ * update_task_ravg() can be called for exiting tasks. While the
+ * function itself ensures correct behavior, the corresponding
+ * trace event requires that these pointers be NULL.
+ */
+ p->ravg.curr_window_cpu = NULL;
+ p->ravg.prev_window_cpu = NULL;
+}
+
void init_new_task_load(struct task_struct *p, bool idle_task)
{
int i;
@@ -1636,8 +1650,8 @@ void init_new_task_load(struct task_struct *p, bool idle_task)
memset(&p->ravg, 0, sizeof(struct ravg));
p->cpu_cycles = 0;
- p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_ATOMIC);
- p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_ATOMIC);
+ p->ravg.curr_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
+ p->ravg.prev_window_cpu = kcalloc(nr_cpu_ids, sizeof(u32), GFP_KERNEL);
/* Don't have much choice. CPU frequency would be bogus */
BUG_ON(!p->ravg.curr_window_cpu || !p->ravg.prev_window_cpu);
@@ -1814,6 +1828,7 @@ static void group_load_in_freq_domain(struct cpumask *cpus,
}
}
+static inline u64 freq_policy_load(struct rq *rq, u64 load);
/*
* Should scheduler alert governor for changing frequency?
*
@@ -1864,6 +1879,7 @@ static int send_notification(struct rq *rq, int check_pred, int check_groups)
_group_load_in_cpu(cpu_of(rq), &group_load, NULL);
new_load = rq->prev_runnable_sum + group_load;
+ new_load = freq_policy_load(rq, new_load);
raw_spin_unlock_irqrestore(&rq->lock, flags);
read_unlock(&related_thread_group_lock);
@@ -3296,7 +3312,7 @@ void sched_get_cpus_busy(struct sched_load *busy,
u64 load[cpus], group_load[cpus];
u64 nload[cpus], ngload[cpus];
u64 pload[cpus];
- unsigned int cur_freq[cpus], max_freq[cpus];
+ unsigned int max_freq[cpus];
int notifier_sent = 0;
int early_detection[cpus];
int cpu, i = 0;
@@ -3336,10 +3352,9 @@ void sched_get_cpus_busy(struct sched_load *busy,
update_task_ravg(rq->curr, rq, TASK_UPDATE, sched_ktime_clock(),
0);
- cur_freq[i] = cpu_cycles_to_freq(rq->cc.cycles, rq->cc.time);
account_load_subtractions(rq);
- load[i] = rq->old_busy_time = rq->prev_runnable_sum;
+ load[i] = rq->prev_runnable_sum;
nload[i] = rq->nt_prev_runnable_sum;
pload[i] = rq->hmp_stats.pred_demands_sum;
rq->old_estimated_time = pload[i];
@@ -3360,7 +3375,6 @@ void sched_get_cpus_busy(struct sched_load *busy,
rq->cluster->notifier_sent = 0;
}
early_detection[i] = (rq->ed_task != NULL);
- cur_freq[i] = cpu_cur_freq(cpu);
max_freq[i] = cpu_max_freq(cpu);
i++;
}
@@ -3403,6 +3417,8 @@ void sched_get_cpus_busy(struct sched_load *busy,
nload[i] += ngload[i];
load[i] = freq_policy_load(rq, load[i]);
+ rq->old_busy_time = load[i];
+
/*
* Scale load in reference to cluster max_possible_freq.
*
@@ -3433,33 +3449,11 @@ skip_early:
goto exit_early;
}
- /*
- * When the load aggregation is controlled by
- * sched_freq_aggregate_threshold, allow reporting loads
- * greater than 100 @ Fcur to ramp up the frequency
- * faster.
- */
- if (notifier_sent || (aggregate_load &&
- sched_freq_aggregate_threshold)) {
- load[i] = scale_load_to_freq(load[i], max_freq[i],
- cpu_max_possible_freq(cpu));
- nload[i] = scale_load_to_freq(nload[i], max_freq[i],
- cpu_max_possible_freq(cpu));
- } else {
- load[i] = scale_load_to_freq(load[i], max_freq[i],
- cur_freq[i]);
- nload[i] = scale_load_to_freq(nload[i], max_freq[i],
- cur_freq[i]);
- if (load[i] > window_size)
- load[i] = window_size;
- if (nload[i] > window_size)
- nload[i] = window_size;
-
- load[i] = scale_load_to_freq(load[i], cur_freq[i],
- cpu_max_possible_freq(cpu));
- nload[i] = scale_load_to_freq(nload[i], cur_freq[i],
- cpu_max_possible_freq(cpu));
- }
+ load[i] = scale_load_to_freq(load[i], max_freq[i],
+ cpu_max_possible_freq(cpu));
+ nload[i] = scale_load_to_freq(nload[i], max_freq[i],
+ cpu_max_possible_freq(cpu));
+
pload[i] = scale_load_to_freq(pload[i], max_freq[i],
rq->cluster->max_possible_freq);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 7f21591c8ec5..f2813e137b23 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -588,17 +588,13 @@ static void watchdog_set_prio(unsigned int policy, unsigned int prio)
sched_setscheduler(current, policy, &param);
}
-/* Must be called with hotplug lock (lock_device_hotplug()) held. */
void watchdog_enable(unsigned int cpu)
{
struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
unsigned int *enabled = raw_cpu_ptr(&watchdog_en);
- lock_device_hotplug_assert();
-
if (*enabled)
return;
- *enabled = 1;
/* kick off the timer for the hardlockup detector */
hrtimer_init(hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
@@ -614,24 +610,40 @@ void watchdog_enable(unsigned int cpu)
/* initialize timestamp */
watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
__touch_watchdog();
+
+ /*
+ * Need to ensure above operations are observed by other CPUs before
+ * indicating that timer is enabled. This is to synchronize core
+ * isolation and hotplug. Core isolation will wait for this flag to be
+ * set.
+ */
+ mb();
+ *enabled = 1;
}
-/* Must be called with hotplug lock (lock_device_hotplug()) held. */
void watchdog_disable(unsigned int cpu)
{
struct hrtimer *hrtimer = raw_cpu_ptr(&watchdog_hrtimer);
unsigned int *enabled = raw_cpu_ptr(&watchdog_en);
- lock_device_hotplug_assert();
-
if (!*enabled)
return;
- *enabled = 0;
watchdog_set_prio(SCHED_NORMAL, 0);
hrtimer_cancel(hrtimer);
/* disable the perf event */
watchdog_nmi_disable(cpu);
+
+ /*
+ * No need for barrier here since disabling the watchdog is
+ * synchronized with hotplug lock
+ */
+ *enabled = 0;
+}
+
+bool watchdog_configured(unsigned int cpu)
+{
+ return *per_cpu_ptr(&watchdog_en, cpu);
}
static void watchdog_cleanup(unsigned int cpu, bool online)
diff --git a/sound/core/pcm_native.c b/sound/core/pcm_native.c
index 3f26597d8d46..1f062aaa5414 100644
--- a/sound/core/pcm_native.c
+++ b/sound/core/pcm_native.c
@@ -2017,7 +2017,7 @@ static int snd_pcm_hw_rule_sample_bits(struct snd_pcm_hw_params *params,
static unsigned int rates[] = { 5512, 8000, 11025, 16000, 22050, 32000, 44100,
48000, 64000, 88200, 96000, 176400, 192000,
- 384000 };
+ 352800, 384000 };
const struct snd_pcm_hw_constraint_list snd_pcm_known_rates = {
.count = ARRAY_SIZE(rates),
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 281db1d07f57..393b8b4b8084 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -714,6 +714,16 @@ struct hpf_work {
struct delayed_work dwork;
};
+#define WCD9335_SPK_ANC_EN_DELAY_MS 350
+static int spk_anc_en_delay = WCD9335_SPK_ANC_EN_DELAY_MS;
+module_param(spk_anc_en_delay, int, S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(spk_anc_en_delay, "delay to enable anc in speaker path");
+
+struct spk_anc_work {
+ struct tasha_priv *tasha;
+ struct delayed_work dwork;
+};
+
struct tx_mute_work {
struct tasha_priv *tasha;
u8 decimator;
@@ -836,6 +846,7 @@ struct tasha_priv {
int ear_spkr_gain;
struct hpf_work tx_hpf_work[TASHA_NUM_DECIMATORS];
struct tx_mute_work tx_mute_dwork[TASHA_NUM_DECIMATORS];
+ struct spk_anc_work spk_anc_dwork;
struct mutex codec_mutex;
int hph_l_gain;
int hph_r_gain;
@@ -4278,6 +4289,21 @@ static int tasha_codec_enable_lineout_pa(struct snd_soc_dapm_widget *w,
return ret;
}
+static void tasha_spk_anc_update_callback(struct work_struct *work)
+{
+ struct spk_anc_work *spk_anc_dwork;
+ struct tasha_priv *tasha;
+ struct delayed_work *delayed_work;
+ struct snd_soc_codec *codec;
+
+ delayed_work = to_delayed_work(work);
+ spk_anc_dwork = container_of(delayed_work, struct spk_anc_work, dwork);
+ tasha = spk_anc_dwork->tasha;
+ codec = tasha->codec;
+
+ snd_soc_update_bits(codec, WCD9335_CDC_RX7_RX_PATH_CFG0, 0x10, 0x10);
+}
+
static int tasha_codec_enable_spk_anc(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
@@ -4295,10 +4321,11 @@ static int tasha_codec_enable_spk_anc(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
ret = tasha_codec_enable_anc(w, kcontrol, event);
- snd_soc_update_bits(codec, WCD9335_CDC_RX7_RX_PATH_CFG0,
- 0x10, 0x10);
+ schedule_delayed_work(&tasha->spk_anc_dwork.dwork,
+ msecs_to_jiffies(spk_anc_en_delay));
break;
case SND_SOC_DAPM_POST_PMD:
+ cancel_delayed_work_sync(&tasha->spk_anc_dwork.dwork);
snd_soc_update_bits(codec, WCD9335_CDC_RX7_RX_PATH_CFG0,
0x10, 0x00);
ret = tasha_codec_enable_anc(w, kcontrol, event);
@@ -13687,6 +13714,10 @@ static int tasha_codec_probe(struct snd_soc_codec *codec)
tasha_tx_mute_update_callback);
}
+ tasha->spk_anc_dwork.tasha = tasha;
+ INIT_DELAYED_WORK(&tasha->spk_anc_dwork.dwork,
+ tasha_spk_anc_update_callback);
+
mutex_lock(&tasha->codec_mutex);
snd_soc_dapm_disable_pin(dapm, "ANC LINEOUT1");
snd_soc_dapm_disable_pin(dapm, "ANC LINEOUT2");
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 6745aec41388..d989ae3bed6c 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -546,6 +546,16 @@ struct tx_mute_work {
struct delayed_work dwork;
};
+#define WCD934X_SPK_ANC_EN_DELAY_MS 350
+static int spk_anc_en_delay = WCD934X_SPK_ANC_EN_DELAY_MS;
+module_param(spk_anc_en_delay, int, S_IRUGO | S_IWUSR | S_IWGRP);
+MODULE_PARM_DESC(spk_anc_en_delay, "delay to enable anc in speaker path");
+
+struct spk_anc_work {
+ struct tavil_priv *tavil;
+ struct delayed_work dwork;
+};
+
struct hpf_work {
struct tavil_priv *tavil;
u8 decimator;
@@ -610,6 +620,7 @@ struct tavil_priv {
struct work_struct tavil_add_child_devices_work;
struct hpf_work tx_hpf_work[WCD934X_NUM_DECIMATORS];
struct tx_mute_work tx_mute_dwork[WCD934X_NUM_DECIMATORS];
+ struct spk_anc_work spk_anc_dwork;
unsigned int vi_feed_value;
@@ -1775,6 +1786,21 @@ static int tavil_codec_enable_rx_bias(struct snd_soc_dapm_widget *w,
return 0;
}
+static void tavil_spk_anc_update_callback(struct work_struct *work)
+{
+ struct spk_anc_work *spk_anc_dwork;
+ struct tavil_priv *tavil;
+ struct delayed_work *delayed_work;
+ struct snd_soc_codec *codec;
+
+ delayed_work = to_delayed_work(work);
+ spk_anc_dwork = container_of(delayed_work, struct spk_anc_work, dwork);
+ tavil = spk_anc_dwork->tavil;
+ codec = tavil->codec;
+
+ snd_soc_update_bits(codec, WCD934X_CDC_RX7_RX_PATH_CFG0, 0x10, 0x10);
+}
+
static int tavil_codec_enable_spkr_anc(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol,
int event)
@@ -1792,10 +1818,11 @@ static int tavil_codec_enable_spkr_anc(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
ret = tavil_codec_enable_anc(w, kcontrol, event);
- snd_soc_update_bits(codec, WCD934X_CDC_RX7_RX_PATH_CFG0,
- 0x10, 0x10);
+ schedule_delayed_work(&tavil->spk_anc_dwork.dwork,
+ msecs_to_jiffies(spk_anc_en_delay));
break;
case SND_SOC_DAPM_POST_PMD:
+ cancel_delayed_work_sync(&tavil->spk_anc_dwork.dwork);
snd_soc_update_bits(codec, WCD934X_CDC_RX7_RX_PATH_CFG0,
0x10, 0x00);
ret = tavil_codec_enable_anc(w, kcontrol, event);
@@ -4182,7 +4209,7 @@ int tavil_micbias_control(struct snd_soc_codec *codec,
post_dapm_on, &tavil->mbhc->wcd_mbhc);
break;
case MICB_DISABLE:
- if (tavil->pullup_ref[micb_index] > 0)
+ if (tavil->micb_ref[micb_index] > 0)
tavil->micb_ref[micb_index]--;
if ((tavil->micb_ref[micb_index] == 0) &&
(tavil->pullup_ref[micb_index] > 0))
@@ -4362,6 +4389,9 @@ static const struct reg_sequence tavil_hph_reset_tbl[] = {
{ WCD934X_HPH_RDAC_LDO_CTL, 0x33 },
{ WCD934X_HPH_RDAC_CHOP_CLK_LP_CTL, 0x00 },
{ WCD934X_HPH_REFBUFF_UHQA_CTL, 0xA8 },
+};
+
+static const struct reg_sequence tavil_hph_reset_tbl_1_0[] = {
{ WCD934X_HPH_REFBUFF_LP_CTL, 0x0A },
{ WCD934X_HPH_L_DAC_CTL, 0x00 },
{ WCD934X_HPH_R_DAC_CTL, 0x00 },
@@ -4383,6 +4413,28 @@ static const struct reg_sequence tavil_hph_reset_tbl[] = {
{ WCD934X_HPH_NEW_INT_PA_RDAC_MISC3, 0x00 },
};
+static const struct reg_sequence tavil_hph_reset_tbl_1_1[] = {
+ { WCD934X_HPH_REFBUFF_LP_CTL, 0x0E },
+ { WCD934X_HPH_L_DAC_CTL, 0x00 },
+ { WCD934X_HPH_R_DAC_CTL, 0x00 },
+ { WCD934X_HPH_NEW_ANA_HPH2, 0x00 },
+ { WCD934X_HPH_NEW_ANA_HPH3, 0x00 },
+ { WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL, 0x40 },
+ { WCD934X_HPH_NEW_INT_RDAC_HD2_CTL, 0x81 },
+ { WCD934X_HPH_NEW_INT_RDAC_VREF_CTL, 0x10 },
+ { WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL, 0x00 },
+ { WCD934X_HPH_NEW_INT_RDAC_MISC1, 0x81 },
+ { WCD934X_HPH_NEW_INT_PA_MISC1, 0x22 },
+ { WCD934X_HPH_NEW_INT_PA_MISC2, 0x00 },
+ { WCD934X_HPH_NEW_INT_PA_RDAC_MISC, 0x00 },
+ { WCD934X_HPH_NEW_INT_HPH_TIMER1, 0xFE },
+ { WCD934X_HPH_NEW_INT_HPH_TIMER2, 0x2 },
+ { WCD934X_HPH_NEW_INT_HPH_TIMER3, 0x4e},
+ { WCD934X_HPH_NEW_INT_HPH_TIMER4, 0x54 },
+ { WCD934X_HPH_NEW_INT_PA_RDAC_MISC2, 0x00 },
+ { WCD934X_HPH_NEW_INT_PA_RDAC_MISC3, 0x00 },
+};
+
static const struct tavil_reg_mask_val tavil_pa_disable[] = {
{ WCD934X_CDC_RX1_RX_PATH_CTL, 0x30, 0x10 }, /* RX1 mute enable */
{ WCD934X_CDC_RX2_RX_PATH_CTL, 0x30, 0x10 }, /* RX2 mute enable */
@@ -4408,6 +4460,7 @@ static const struct tavil_reg_mask_val tavil_ocp_en_seq_1[] = {
/* LO-HIFI */
static const struct tavil_reg_mask_val tavil_pre_pa_en_lohifi[] = {
{ WCD934X_HPH_NEW_INT_HPH_TIMER1, 0x02, 0x00 },
+ { WCD934X_FLYBACK_VNEG_CTRL_4, 0xf0, 0x80 },
{ WCD934X_HPH_NEW_INT_PA_MISC2, 0x20, 0x20 },
{ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL, 0xf0, 0x40 },
{ WCD934X_HPH_CNP_WG_CTL, 0x80, 0x00 },
@@ -4449,6 +4502,7 @@ static void tavil_codec_hph_reg_recover(struct tavil_priv *tavil,
struct regmap *map, int pa_status)
{
int i;
+ unsigned int reg;
blocking_notifier_call_chain(&tavil->mbhc->notifier,
WCD_EVENT_OCP_OFF,
@@ -4470,6 +4524,12 @@ static void tavil_codec_hph_reg_recover(struct tavil_priv *tavil,
/* Restore to HW defaults */
regmap_multi_reg_write(map, tavil_hph_reset_tbl,
ARRAY_SIZE(tavil_hph_reset_tbl));
+ if (TAVIL_IS_1_1(tavil->wcd9xxx))
+ regmap_multi_reg_write(map, tavil_hph_reset_tbl_1_1,
+ ARRAY_SIZE(tavil_hph_reset_tbl_1_1));
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ regmap_multi_reg_write(map, tavil_hph_reset_tbl_1_0,
+ ARRAY_SIZE(tavil_hph_reset_tbl_1_0));
for (i = 0; i < ARRAY_SIZE(tavil_ocp_en_seq); i++)
regmap_write_bits(map, tavil_ocp_en_seq[i].reg,
@@ -4483,13 +4543,23 @@ pa_en_restore:
__func__, pa_status);
/* Disable PA and other registers before restoring */
- for (i = 0; i < ARRAY_SIZE(tavil_pa_disable); i++)
+ for (i = 0; i < ARRAY_SIZE(tavil_pa_disable); i++) {
+ if (TAVIL_IS_1_1(tavil->wcd9xxx) &&
+ (tavil_pa_disable[i].reg == WCD934X_HPH_CNP_WG_CTL))
+ continue;
regmap_write_bits(map, tavil_pa_disable[i].reg,
tavil_pa_disable[i].mask,
tavil_pa_disable[i].val);
+ }
regmap_multi_reg_write(map, tavil_hph_reset_tbl,
ARRAY_SIZE(tavil_hph_reset_tbl));
+ if (TAVIL_IS_1_1(tavil->wcd9xxx))
+ regmap_multi_reg_write(map, tavil_hph_reset_tbl_1_1,
+ ARRAY_SIZE(tavil_hph_reset_tbl_1_1));
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ regmap_multi_reg_write(map, tavil_hph_reset_tbl_1_0,
+ ARRAY_SIZE(tavil_hph_reset_tbl_1_0));
for (i = 0; i < ARRAY_SIZE(tavil_ocp_en_seq_1); i++)
regmap_write_bits(map, tavil_ocp_en_seq_1[i].reg,
@@ -4497,17 +4567,37 @@ pa_en_restore:
tavil_ocp_en_seq_1[i].val);
if (tavil->hph_mode == CLS_H_LOHIFI) {
- for (i = 0; i < ARRAY_SIZE(tavil_pre_pa_en_lohifi); i++)
+ for (i = 0; i < ARRAY_SIZE(tavil_pre_pa_en_lohifi); i++) {
+ reg = tavil_pre_pa_en_lohifi[i].reg;
+ if ((TAVIL_IS_1_1(tavil->wcd9xxx)) &&
+ ((reg == WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL) ||
+ (reg == WCD934X_HPH_CNP_WG_CTL) ||
+ (reg == WCD934X_HPH_REFBUFF_LP_CTL)))
+ continue;
regmap_write_bits(map,
tavil_pre_pa_en_lohifi[i].reg,
tavil_pre_pa_en_lohifi[i].mask,
tavil_pre_pa_en_lohifi[i].val);
+ }
} else {
- for (i = 0; i < ARRAY_SIZE(tavil_pre_pa_en); i++)
+ for (i = 0; i < ARRAY_SIZE(tavil_pre_pa_en); i++) {
+ reg = tavil_pre_pa_en[i].reg;
+ if ((TAVIL_IS_1_1(tavil->wcd9xxx)) &&
+ ((reg == WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL) ||
+ (reg == WCD934X_HPH_CNP_WG_CTL) ||
+ (reg == WCD934X_HPH_REFBUFF_LP_CTL)))
+ continue;
regmap_write_bits(map, tavil_pre_pa_en[i].reg,
tavil_pre_pa_en[i].mask,
tavil_pre_pa_en[i].val);
+ }
}
+
+ if (TAVIL_IS_1_1(tavil->wcd9xxx)) {
+ regmap_write(map, WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L, 0x84);
+ regmap_write(map, WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R, 0x84);
+ }
+
regmap_write_bits(map, WCD934X_ANA_HPH, 0x0C, pa_status & 0x0C);
regmap_write_bits(map, WCD934X_ANA_HPH, 0x30, 0x30);
/* wait for 100usec after HPH DAC is enabled */
@@ -4516,10 +4606,14 @@ pa_en_restore:
/* Sleep for 7msec after PA is enabled */
usleep_range(7000, 7100);
- for (i = 0; i < ARRAY_SIZE(tavil_post_pa_en); i++)
+ for (i = 0; i < ARRAY_SIZE(tavil_post_pa_en); i++) {
+ if ((TAVIL_IS_1_1(tavil->wcd9xxx)) &&
+ (tavil_post_pa_en[i].reg == WCD934X_HPH_CNP_WG_CTL))
+ continue;
regmap_write_bits(map, tavil_post_pa_en[i].reg,
tavil_post_pa_en[i].mask,
tavil_post_pa_en[i].val);
+ }
end:
tavil->mbhc->is_hph_recover = true;
@@ -5049,14 +5143,18 @@ static int tavil_mad_input_put(struct snd_kcontrol *kcontrol,
u32 adc, i, mic_bias_found = 0;
int ret = 0;
char *mad_input;
+ bool is_adc2_input = false;
tavil_mad_input = ucontrol->value.integer.value[0];
if (strnstr(tavil_conn_mad_text[tavil_mad_input], "NOTUSED",
sizeof("NOTUSED"))) {
- dev_err(codec->dev,
+ dev_dbg(codec->dev,
"%s: Unsupported tavil_mad_input = %s\n",
__func__, tavil_conn_mad_text[tavil_mad_input]);
+ /* Make sure the MAD register is updated */
+ snd_soc_update_bits(codec, WCD934X_ANA_MAD_SETUP,
+ 0x88, 0x00);
return -EINVAL;
}
@@ -5085,14 +5183,16 @@ static int tavil_mad_input_put(struct snd_kcontrol *kcontrol,
snprintf(mad_amic_input_widget, 6, "%s%u", "AMIC", adc);
mad_input_widget = mad_amic_input_widget;
+ if (adc == 2)
+ is_adc2_input = true;
} else {
/* DMIC type input widget*/
mad_input_widget = tavil_conn_mad_text[tavil_mad_input];
}
dev_dbg(codec->dev,
- "%s: tavil input widget = %s\n", __func__,
- mad_input_widget);
+ "%s: tavil input widget = %s, adc_input = %s\n", __func__,
+ mad_input_widget, is_adc2_input ? "true" : "false");
for (i = 0; i < card->num_of_dapm_routes; i++) {
if (!strcmp(card->of_dapm_routes[i].sink, mad_input_widget)) {
@@ -5137,7 +5237,13 @@ static int tavil_mad_input_put(struct snd_kcontrol *kcontrol,
0x0F, tavil_mad_input);
snd_soc_update_bits(codec, WCD934X_ANA_MAD_SETUP,
0x07, mic_bias_found);
-
+ /* for adc2 input, mad should be in micbias mode with BG enabled */
+ if (is_adc2_input)
+ snd_soc_update_bits(codec, WCD934X_ANA_MAD_SETUP,
+ 0x88, 0x88);
+ else
+ snd_soc_update_bits(codec, WCD934X_ANA_MAD_SETUP,
+ 0x88, 0x00);
return 0;
}
@@ -8837,6 +8943,10 @@ static int tavil_soc_codec_probe(struct snd_soc_codec *codec)
tavil_tx_mute_update_callback);
}
+ tavil->spk_anc_dwork.tavil = tavil;
+ INIT_DELAYED_WORK(&tavil->spk_anc_dwork.dwork,
+ tavil_spk_anc_update_callback);
+
tavil_mclk2_reg_defaults(tavil);
/* DSD initialization */
diff --git a/sound/soc/msm/qdsp6v2/q6asm.c b/sound/soc/msm/qdsp6v2/q6asm.c
index 0ea94cb52bfb..2874a334dfdd 100644
--- a/sound/soc/msm/qdsp6v2/q6asm.c
+++ b/sound/soc/msm/qdsp6v2/q6asm.c
@@ -1658,8 +1658,10 @@ static int32_t q6asm_callback(struct apr_client_data *data, void *priv)
ac->apr = NULL;
atomic_set(&ac->time_flag, 0);
atomic_set(&ac->cmd_state, 0);
+ atomic_set(&ac->mem_state, 0);
wake_up(&ac->time_wait);
wake_up(&ac->cmd_wait);
+ wake_up(&ac->mem_wait);
mutex_unlock(&ac->cmd_lock);
return 0;
}
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 9f7d4e2cb532..22468eee62db 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -68,6 +68,9 @@ struct intf_info {
size_t xfer_buf_size;
phys_addr_t xfer_buf_pa;
u8 *xfer_buf;
+ u8 pcm_card_num;
+ u8 pcm_dev_num;
+ u8 direction;
bool in_use;
};
@@ -115,6 +118,7 @@ struct uaudio_qmi_svc {
struct qmi_handle *uaudio_svc_hdl;
void *curr_conn;
struct work_struct recv_msg_work;
+ struct work_struct qmi_disconnect_work;
struct workqueue_struct *uaudio_wq;
ktime_t t_request_recvd;
ktime_t t_resp_sent;
@@ -385,7 +389,7 @@ static void uaudio_iommu_unmap(enum mem_type mtype, unsigned long va,
static int prepare_qmi_response(struct snd_usb_substream *subs,
struct qmi_uaudio_stream_resp_msg_v01 *resp, u32 xfer_buf_len,
- int card_num)
+ int card_num, int pcm_dev_num)
{
int ret = -ENODEV;
struct usb_interface *iface;
@@ -411,6 +415,14 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
goto err;
}
+ if (uadev[card_num].info &&
+ uadev[card_num].info[subs->interface].in_use) {
+ pr_err("%s interface# %d already in use card# %d\n", __func__,
+ subs->interface, card_num);
+ ret = -EBUSY;
+ goto err;
+ }
+
alts = &iface->altsetting[subs->altset_idx];
altsd = get_iface_desc(alts);
protocol = altsd->bInterfaceProtocol;
@@ -627,12 +639,6 @@ skip_sync:
kref_get(&uadev[card_num].kref);
}
- if (uadev[card_num].info[subs->interface].in_use) {
- pr_err("%s interface# %d already in use card# %d\n", __func__,
- subs->interface, card_num);
- goto unmap_xfer_buf;
- }
-
uadev[card_num].card_num = card_num;
/* cache intf specific info to use it for unmap and free xfer buf */
@@ -644,6 +650,9 @@ skip_sync:
uadev[card_num].info[subs->interface].xfer_buf_pa = xfer_buf_pa;
uadev[card_num].info[subs->interface].xfer_buf_size = len;
uadev[card_num].info[subs->interface].xfer_buf = xfer_buf;
+ uadev[card_num].info[subs->interface].pcm_card_num = card_num;
+ uadev[card_num].info[subs->interface].pcm_dev_num = pcm_dev_num;
+ uadev[card_num].info[subs->interface].direction = subs->direction;
uadev[card_num].info[subs->interface].in_use = true;
set_bit(card_num, &uaudio_qdev->card_slot);
@@ -665,9 +674,71 @@ err:
return ret;
}
-void uaudio_disconnect_cb(struct snd_usb_audio *chip)
+static void uaudio_dev_intf_cleanup(struct usb_device *udev,
+ struct intf_info *info)
+{
+ uaudio_iommu_unmap(MEM_XFER_RING, info->data_xfer_ring_va,
+ info->data_xfer_ring_size);
+ info->data_xfer_ring_va = 0;
+ info->data_xfer_ring_size = 0;
+
+ uaudio_iommu_unmap(MEM_XFER_RING, info->sync_xfer_ring_va,
+ info->sync_xfer_ring_size);
+ info->sync_xfer_ring_va = 0;
+ info->sync_xfer_ring_size = 0;
+
+ uaudio_iommu_unmap(MEM_XFER_BUF, info->xfer_buf_va,
+ info->xfer_buf_size);
+ info->xfer_buf_va = 0;
+
+ usb_free_coherent(udev, info->xfer_buf_size,
+ info->xfer_buf, info->xfer_buf_pa);
+ info->xfer_buf_size = 0;
+ info->xfer_buf = NULL;
+ info->xfer_buf_pa = 0;
+
+ info->in_use = false;
+}
+
+static void uaudio_dev_cleanup(struct uaudio_dev *dev)
{
- int ret, if_idx;
+ int if_idx;
+
+ /* free xfer buffer and unmap xfer ring and buf per interface */
+ for (if_idx = 0; if_idx < dev->num_intf; if_idx++) {
+ if (!dev->info[if_idx].in_use)
+ continue;
+ uaudio_dev_intf_cleanup(dev->udev, &dev->info[if_idx]);
+ pr_debug("%s: release resources: intf# %d card# %d\n", __func__,
+ if_idx, dev->card_num);
+ }
+
+ /* iommu_unmap dcba iova for a usb device */
+ uaudio_iommu_unmap(MEM_DCBA, dev->dcba_iova, dev->dcba_size);
+
+ dev->dcba_iova = 0;
+ dev->dcba_size = 0;
+ dev->num_intf = 0;
+
+ /* free interface info */
+ kfree(dev->info);
+ dev->info = NULL;
+
+ clear_bit(dev->card_num, &uaudio_qdev->card_slot);
+
+ /* all audio devices are disconnected */
+ if (!uaudio_qdev->card_slot) {
+ uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
+ usb_sec_event_ring_cleanup(dev->udev, uaudio_qdev->intr_num);
+ pr_debug("%s: all audio devices disconnected\n", __func__);
+ }
+
+ dev->udev = NULL;
+}
+
+static void uaudio_disconnect_cb(struct snd_usb_audio *chip)
+{
+ int ret;
struct uaudio_dev *dev;
int card_num = chip->card_num;
struct uaudio_qmi_svc *svc = uaudio_svc;
@@ -713,57 +784,7 @@ void uaudio_disconnect_cb(struct snd_usb_audio *chip)
mutex_lock(&chip->dev_lock);
}
- /* free xfer buffer and unmap xfer ring and buf per interface */
- for (if_idx = 0; if_idx < dev->num_intf; if_idx++) {
- if (!dev->info[if_idx].in_use)
- continue;
- usb_free_coherent(dev->udev,
- dev->info[if_idx].xfer_buf_size,
- dev->info[if_idx].xfer_buf,
- dev->info[if_idx].xfer_buf_pa);
-
- uaudio_iommu_unmap(MEM_XFER_RING,
- dev->info[if_idx].data_xfer_ring_va,
- dev->info[if_idx].data_xfer_ring_size);
- dev->info[if_idx].data_xfer_ring_va = 0;
- dev->info[if_idx].data_xfer_ring_size = 0;
-
- uaudio_iommu_unmap(MEM_XFER_RING,
- dev->info[if_idx].sync_xfer_ring_va,
- dev->info[if_idx].sync_xfer_ring_size);
- dev->info[if_idx].sync_xfer_ring_va = 0;
- dev->info[if_idx].sync_xfer_ring_size = 0;
-
- uaudio_iommu_unmap(MEM_XFER_BUF,
- dev->info[if_idx].xfer_buf_va,
- dev->info[if_idx].xfer_buf_size);
- dev->info[if_idx].xfer_buf_va = 0;
- dev->info[if_idx].xfer_buf_size = 0;
- pr_debug("%s: release resources: intf# %d card# %d\n", __func__,
- if_idx, card_num);
- }
-
- /* iommu_unmap dcba iova for a usb device */
- uaudio_iommu_unmap(MEM_DCBA, dev->dcba_iova, dev->dcba_size);
-
- dev->dcba_iova = 0;
- dev->dcba_size = 0;
- dev->num_intf = 0;
-
- /* free interface info */
- kfree(dev->info);
- dev->info = NULL;
-
- clear_bit(card_num, &uaudio_qdev->card_slot);
-
- /* all audio devices are disconnected */
- if (!uaudio_qdev->card_slot) {
- uaudio_iommu_unmap(MEM_EVENT_RING, IOVA_BASE, PAGE_SIZE);
- usb_sec_event_ring_cleanup(dev->udev, uaudio_qdev->intr_num);
- pr_debug("%s: all audio devices disconnected\n", __func__);
- }
-
- dev->udev = NULL;
+ uaudio_dev_cleanup(dev);
done:
mutex_unlock(&chip->dev_lock);
}
@@ -789,7 +810,7 @@ static void uaudio_dev_release(struct kref *kref)
}
/* maps audio format received over QMI to asound.h based pcm format */
-int map_pcm_format(unsigned int fmt_received)
+static int map_pcm_format(unsigned int fmt_received)
{
switch (fmt_received) {
case USB_QMI_PCM_FORMAT_S8:
@@ -903,7 +924,7 @@ static int handle_uaudio_stream_req(void *req_h, void *req)
if (!ret && req_msg->enable)
ret = prepare_qmi_response(subs, &resp, req_msg->xfer_buff_size,
- pcm_card_num);
+ pcm_card_num, pcm_dev_num);
mutex_unlock(&chip->dev_lock);
@@ -912,31 +933,7 @@ response:
if (intf_num >= 0) {
mutex_lock(&chip->dev_lock);
info = &uadev[pcm_card_num].info[intf_num];
- uaudio_iommu_unmap(MEM_XFER_RING,
- info->data_xfer_ring_va,
- info->data_xfer_ring_size);
- info->data_xfer_ring_va = 0;
- info->data_xfer_ring_size = 0;
-
- uaudio_iommu_unmap(MEM_XFER_RING,
- info->sync_xfer_ring_va,
- info->sync_xfer_ring_size);
- info->sync_xfer_ring_va = 0;
- info->sync_xfer_ring_size = 0;
-
- uaudio_iommu_unmap(MEM_XFER_BUF,
- info->xfer_buf_va,
- info->xfer_buf_size);
- info->xfer_buf_va = 0;
-
- usb_free_coherent(uadev[pcm_card_num].udev,
- info->xfer_buf_size,
- info->xfer_buf,
- info->xfer_buf_pa);
- info->xfer_buf_size = 0;
- info->xfer_buf = NULL;
- info->xfer_buf_pa = 0;
- info->in_use = false;
+ uaudio_dev_intf_cleanup(uadev[pcm_card_num].udev, info);
pr_debug("%s:release resources: intf# %d card# %d\n",
__func__, intf_num, pcm_card_num);
mutex_unlock(&chip->dev_lock);
@@ -980,6 +977,43 @@ static int uaudio_qmi_svc_connect_cb(struct qmi_handle *handle,
return 0;
}
+static void uaudio_qmi_disconnect_work(struct work_struct *w)
+{
+ struct intf_info *info;
+ int idx, if_idx;
+ struct snd_usb_substream *subs;
+ struct snd_usb_audio *chip = NULL;
+
+ /* find all active intf for set alt 0 and cleanup usb audio dev */
+ for (idx = 0; idx < SNDRV_CARDS; idx++) {
+ if (!atomic_read(&uadev[idx].in_use))
+ continue;
+
+ for (if_idx = 0; if_idx < uadev[idx].num_intf; if_idx++) {
+ if (!uadev[idx].info || !uadev[idx].info[if_idx].in_use)
+ continue;
+ info = &uadev[idx].info[if_idx];
+ subs = find_snd_usb_substream(info->pcm_card_num,
+ info->pcm_dev_num,
+ info->direction,
+ &chip,
+ uaudio_disconnect_cb);
+ if (!subs || !chip || atomic_read(&chip->shutdown)) {
+ pr_debug("%s:no subs for c#%u, dev#%u dir%u\n",
+ __func__, info->pcm_card_num,
+ info->pcm_dev_num,
+ info->direction);
+ continue;
+ }
+ snd_usb_enable_audio_stream(subs, 0);
+ }
+ atomic_set(&uadev[idx].in_use, 0);
+ mutex_lock(&chip->dev_lock);
+ uaudio_dev_cleanup(&uadev[idx]);
+ mutex_unlock(&chip->dev_lock);
+ }
+}
+
static int uaudio_qmi_svc_disconnect_cb(struct qmi_handle *handle,
void *conn_h)
{
@@ -991,6 +1025,8 @@ static int uaudio_qmi_svc_disconnect_cb(struct qmi_handle *handle,
}
svc->curr_conn = NULL;
+ queue_work(svc->uaudio_wq, &svc->qmi_disconnect_work);
+
return 0;
}
@@ -1195,6 +1231,7 @@ static int uaudio_qmi_svc_init(void)
}
INIT_WORK(&svc->recv_msg_work, uaudio_qmi_svc_recv_msg);
+ INIT_WORK(&svc->qmi_disconnect_work, uaudio_qmi_disconnect_work);
uaudio_svc = svc;