summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorLinux Build Service Account <lnxbuild@localhost>2016-05-31 12:11:36 -0600
committerLinux Build Service Account <lnxbuild@localhost>2016-05-31 12:11:36 -0600
commit3ebf81bef3da6dd1f12f70773866e0063ac3f368 (patch)
tree20f20f9902f9df8344fa007a78f64d8e5668dcea
parent89c198ac7fb60408d3f786ef2bcfaab2a56d4cbe (diff)
parentcd9403e96df5ef8eaefabdb67da22f0a9887964c (diff)
Promotion of kernel.lnx.4.4-160526.
CRs Change ID Subject -------------------------------------------------------------------------------------------------------------- 972519 I35151c460b4350ebd414b67c655684c2019f799f trace: prevent NULL pointer dereference 1013947 I7df9aeb55a95185077c679a217ed8772eb83c8b9 arm64: defconfig: update config options for msm-perf_def 1021612 I312444176373f73f02aa0ceddf5e114a39702641 ARM: msm: dts: fix register dump offsets/ranges for msmc 1019272 I38c637936b398f2fb1665c8233ed5e49e83bf296 thermal: qpnp-temp-alarm: update thermal callback parame 1020529 Ia4f54bfee8111f9f039f772a8bcc7c9a0400d5aa edac: cortex: Update the error strings to reflect Kryo2x 1005061 I4ed9f1c6ad089f80dcd19762fda151ce1572f471 msm: ipa3: WA for incorrect state retention for GSI chan 1019256 I87cca1215134e6d406f60d54f6d0430978eeae9c icnss: Add API to return the CE IRQ number 1015545 I5aad7032f3f8048216a41765f1cf91fde98f6ade msm: ipa3: fix odu debugfs 1006937 Ib8cb979136def6696861a7835bcde763dabe874f net: Warn for cloned packets in ingress path on SMP syst 1008023 Id9949bef91835318a7b344753983eea0aeab7bdc msm: ipa3: add support for TX of sk_buff's with paged da 1021612 I4fbc9aa1f30d36d35a9ad181185761e697cbbef7 msm: mdss: Fix qseed3 clk calculation overflow 1019188 Ib52e6551ac67215dab2bc5770ddcf037568f8b77 net: rmnet_data: Fix use after free when sending MAP com 989851 Ifa42fbd475665a0ca581c907ce5432584ea0e7ed msm: mdss: fix possible out-of-bounds and overflow issue 1016956 I906005680b4cc90cc38dc3d403beebf7aa515ad7 usb: dwc3: Add support handle type-c plug orientation 1019798 I7bccd68866457bb0635ae5166ec935f9e82ba760 soc: qcom: print raw data for gladiator error registers 1017182 I32f312f11fcbebbff0799120448d6e8f0d9ec98d ARM: dts: msm: Add v4l2 rotator node to msm8996 1020265 Id19733e6e075a427c4aa745b5bedc93f29a2dd4f ARM: dts: msm: Add nfc device to msmcobalt 988990 I19aa5983316bec4a87811c8aa8b54f770001c45f msm: mdss: Adding support for destination scaler 993024 I32b0e57c8e958b7e5f1d647e37e46fda052b3d1e ARM: dts: msm: Support partial goods for msmcobalt 1013948 If024f55095a951329976b6c2736ad5760eae1f4f arm64: defconfig: update config options for msmcortex-pe 1020515 I2c1fb7dcc698142f9ce42f40164521b8a78268e1 defconfig: msm: Remove incorrect ISPIF version 1013147 Iae6804bcb3121e0852ec5d14d0939623b97a6e67 qcom-charger: Don't automatically set USB_PD type when P 988990 I9a4b9701e078fa39783f33f023eef2da75c1c162 ARM: dts: msm: add extra destination scaler properties f 1020505 I87d18778fef81671c5e7cc261cc70ce07c662933 regulator: cpr3-regulator: support corner bands with no 1019888 I1a8241c1e0a349394351be2ef98381e24f0c4ff4 defconfig: msmcortex: enable qrng drivers 1003367 I75089e210a6fc72683dcf98cdd4da9d6ab3e6fcf msm: kgsl: Correction of VBIF debug bus data in snapshot 1005061 Id849055526bf70e0cc8161239b4530a7fc575744 ARM: dts: msm: enable WA for IPA channel 20 for msmcobal 1002974 Ic0dedbadc0dd2125bd2a7bcc152972c0555e07f8 msm: kgsl: Defer adding the mem entry to a process 1017182 I6fc5f90512d8024439d56d7c72ae2160df460f7a defconfig: msm: sde: Add config for v4l2 sde rotator dri 1006067 I6add3800c40cd09f6e6e0cf2720e69059bd83cbc msm: kgsl: Avoid race condition in ioctl_syncsource_dest 1013147 I77c5875ee8514395a82fac0109b7cff1d507250b usb: pd: Update power_supply type to USB_PD after PD is 1021612 I62a3bd31997be05181de98307089e2a69d98ab7b msm: mdss: fix amortized prefill calculations 1019888 I2c808713aaac42345b97665a8990f5bbb9b9145e ARM: dts: Add qrng driver support for msmcobalt 1013913 I9a17c83d6613ff37cede4a7bb52612465e4d0101 regulator: labibb: Fix slew rate calculation in LAB/IBB 1016956 Idd236136c9f0a9163b4ae7a8405c412f1d69ca9e usb: pd: Add support to notify plug orientation via extc 1016956 I893c0b729015cd22791d168453309168246961e2 usb: phy: qmp: Configure phy lane based on plug orientat 972998 I6a99fa6961e9205d7d9ccb470873c26adde8a91f ARM: dts: msm: Change csi clock voting from ispif node 1020505 I6b9d663b44c96dafba26ad25bcfc4b61c8c86d56 regulator: cpr3-regulator: support step quot for CPRh co 977896 I71e6047620066323721c6d542034ddd4b2950e7f sched: Aggregate for frequency 992942 Iaf90ab4c1d17f903d03458d76cab1b4c0a5c8836 msm: camera: isp: Fix warning and errors based on static 1013787 Ieb0a7aa1b1b5f23220854092dcc2119d29c57146 msm: camera: sensor: Add support for 3B read 1017182 If634894768b02d124ceab071a9eca1c36f258600 msm: mdss: Export rotator interrupt and share rotator sm Change-Id: I15d2c47b635d84cffdac17adffff8274b6f8e3f4 CRs-Fixed: 1005061, 993024, 989851, 1017182, 1020505, 1021612, 1020529, 1019256, 1003367, 1006937, 1019798, 1016956, 1019272, 1013913, 972519, 1019888, 1013787, 1006067, 1015545, 1019188, 1020515, 1013147, 977896, 1008023, 1002974, 988990, 1013947, 992942, 972998, 1013948, 1020265
-rw-r--r--Documentation/devicetree/bindings/fb/mdss-mdp.txt6
-rw-r--r--Documentation/devicetree/bindings/platform/msm/ipa.txt4
-rw-r--r--Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt10
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-camera.dtsi20
-rw-r--r--arch/arm/boot/dts/qcom/msm8996-mdss.dtsi65
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi31
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mdss-pll.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi32
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi31
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi62
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt.dtsi19
-rw-r--r--arch/arm64/configs/msm-perf_defconfig158
-rw-r--r--arch/arm64/configs/msm_defconfig3
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig107
-rw-r--r--arch/arm64/configs/msmcortex_defconfig3
-rw-r--r--drivers/edac/cortex_arm64_edac.c38
-rw-r--r--drivers/gpu/msm/a5xx_reg.h2
-rw-r--r--drivers/gpu/msm/adreno_a5xx_snapshot.c12
-rw-r--r--drivers/gpu/msm/kgsl.c17
-rw-r--r--drivers/gpu/msm/kgsl_sync.c36
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c3
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c13
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c5
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c16
-rw-r--r--drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c14
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c13
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h2
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/odu_bridge.c1
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c18
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c239
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h10
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c8
-rw-r--r--drivers/power/qcom-charger/smb-lib.c12
-rw-r--r--drivers/regulator/cpr3-regulator.c18
-rw-r--r--drivers/regulator/cprh-kbss-regulator.c8
-rw-r--r--drivers/regulator/qpnp-labibb-regulator.c38
-rw-r--r--drivers/soc/qcom/gladiator_erp_v2.c60
-rw-r--r--drivers/soc/qcom/icnss.c19
-rw-r--r--drivers/thermal/qpnp-temp-alarm.c14
-rw-r--r--drivers/usb/dwc3/dwc3-msm.c54
-rw-r--r--drivers/usb/pd/policy_engine.c79
-rw-r--r--drivers/usb/phy/phy-msm-ssusb-qmp.c22
-rw-r--r--drivers/video/fbdev/msm/mdss.h6
-rw-r--r--drivers/video/fbdev/msm/mdss_debug.c47
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c129
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c162
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h110
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c99
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_hwio.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c241
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pipe.c35
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp.c148
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.h5
-rw-r--r--include/linux/sched.h10
-rw-r--r--include/linux/sched/sysctl.h1
-rw-r--r--include/linux/usb/msm_hsusb.h2
-rw-r--r--include/soc/qcom/icnss.h1
-rw-r--r--include/trace/events/sched.h115
-rw-r--r--kernel/sched/core.c735
-rw-r--r--kernel/sched/fair.c26
-rw-r--r--kernel/sched/sched.h26
-rw-r--r--kernel/sched/sched_avg.c2
-rw-r--r--kernel/sysctl.c7
-rw-r--r--kernel/trace/trace_event_perf.c3
-rw-r--r--net/core/dev.c6
-rw-r--r--net/rmnet_data/rmnet_data_stats.h3
-rw-r--r--net/rmnet_data/rmnet_map_command.c10
70 files changed, 2595 insertions, 673 deletions
diff --git a/Documentation/devicetree/bindings/fb/mdss-mdp.txt b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
index d014ce7f3f0b..93cacc07e623 100644
--- a/Documentation/devicetree/bindings/fb/mdss-mdp.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-mdp.txt
@@ -396,6 +396,10 @@ Optional properties:
to mdp block. Xin id property is not valid for mdp
internal blocks like ctl, lm, dspp. It should set
to 0xff for such blocks.
+- qcom,max-dest-scaler-input-width: This 32 bit value provides
+ maximum width to the input of destination scaler.
+- qcom,max-dest-scaler-output-width: This 32 bit value provides
+ maximum width to the output of destination scaler.
Fudge Factors: Fudge factors are used to boost demand for
resources like bus bandswidth, clk rate etc. to
@@ -691,6 +695,8 @@ Example:
qcom,max-mixer-width = <2048>;
qcom,max-pipe-width = <2048>;
+ qcom,max-dest-scaler-input-width = <2048>;
+ qcom,max-dest-scaler-output-width = <2560>;
qcom,max-clk-rate = <320000000>;
qcom,vbif-settings = <0x0004 0x00000001>,
<0x00D8 0x00000707>;
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index 74c95f8f65f9..a407b736d481 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -65,7 +65,9 @@ memory allocation over a PCIe bridge
- qcom,use-rg10-limitation-mitigation: Boolean context flag to activate
the mitigation to register group 10
AP access limitation
-
+- qcom,do-not-use-ch-gsi-20: Boolean context flag to activate
+ software workaround for IPA limitation
+ to not use GSI physical channel 20
- qcom,tethered-flow-control: Boolean context flag to indicate whether
apps based flow control is needed for tethered
call.
diff --git a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
index 88a1c7d478c7..b9143cfc2587 100644
--- a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
@@ -97,6 +97,16 @@ KBSS specific properties:
voltage may be increased as the result of a single
CPR measurement.
+- qcom,cpr-step-quot-fixed
+ Usage: optional
+ Value type: <u32>
+ Definition: Fixed step quotient value used by controller for applying
+ the SDELTA margin adjustments on the programmed target
+ quotient values. The step quotient is the number of
+ additional ring oscillator ticks observed for each
+ qcom,voltage-step increase in vdd-supply output voltage.
+ Supported values: 0 - 63.
+
- qcom,cpr-voltage-settling-time
Usage: optional
Value type: <u32>
diff --git a/arch/arm/boot/dts/qcom/msm8996-camera.dtsi b/arch/arm/boot/dts/qcom/msm8996-camera.dtsi
index 1e87bd910dcd..3422e5e7f500 100644
--- a/arch/arm/boot/dts/qcom/msm8996-camera.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-camera.dtsi
@@ -256,18 +256,22 @@
"vfe0_clk_src", "camss_vfe_vfe0_clk", "camss_csi_vfe0_clk",
"vfe1_clk_src", "camss_vfe_vfe1_clk", "camss_csi_vfe1_clk";
qcom,clock-rates = <0 0 0 0
- 200000000 0 0 0
- 200000000 0 0 0
- 200000000 0 0 0
- 200000000 0 0 0
+ 0 0 0 0
+ 0 0 0 0
+ 0 0 0 0
+ 0 0 0 0
0 0 0
0 0 0>;
qcom,clock-control = "NO_SET_RATE", "NO_SET_RATE",
"NO_SET_RATE", "NO_SET_RATE",
- "SET_RATE", "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
- "SET_RATE", "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
- "SET_RATE", "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
- "SET_RATE", "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+ "INIT_RATE",
+ "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+ "INIT_RATE",
+ "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+ "INIT_RATE",
+ "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
+ "INIT_RATE",
+ "NO_SET_RATE", "NO_SET_RATE", "NO_SET_RATE",
"INIT_RATE", "NO_SET_RATE", "NO_SET_RATE",
"INIT_RATE", "NO_SET_RATE", "NO_SET_RATE";
status = "ok";
diff --git a/arch/arm/boot/dts/qcom/msm8996-mdss.dtsi b/arch/arm/boot/dts/qcom/msm8996-mdss.dtsi
index 5c01812d7d6e..51e7cc5f50bf 100644
--- a/arch/arm/boot/dts/qcom/msm8996-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8996-mdss.dtsi
@@ -18,6 +18,8 @@
<0x009b8000 0x1040>;
reg-names = "mdp_phys", "vbif_phys", "vbif_nrt_phys";
interrupts = <0 83 0>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
vdd-supply = <&gdsc_mdss>;
#address-cells = <1>;
@@ -87,14 +89,12 @@
qcom,mdss-pipe-cursor-clk-ctrl-offsets = <0x3A8 16 15>,
<0x3B0 16 15>;
-
qcom,mdss-ctl-off = <0x00002000 0x00002200 0x00002400
- 0x00002600 0x00002800>;
+ 0x00002600>;
qcom,mdss-mixer-intf-off = <0x00045000 0x00046000
0x00047000 0x0004A000>;
- qcom,mdss-mixer-wb-off = <0x00048000 0x00049000>;
qcom,mdss-dspp-off = <0x00055000 0x00057000>;
- qcom,mdss-wb-off = <0x00065000 0x00065800 0x00066000>;
+ qcom,mdss-wb-off = <0x00066000>;
qcom,mdss-intf-off = <0x0006B000 0x0006B800
0x0006C000 0x0006C800>;
qcom,mdss-pingpong-off = <0x00071000 0x00071800
@@ -103,6 +103,7 @@
qcom,mdss-ppb-ctl-off = <0x00000330 0x00000338>;
qcom,mdss-ppb-cfg-off = <0x00000334 0x0000033C>;
qcom,mdss-has-pingpong-split;
+ qcom,mdss-has-separate-rotator;
qcom,mdss-ad-off = <0x0079000 0x00079800 0x0007a000>;
qcom,mdss-cdm-off = <0x0007a200>;
@@ -111,7 +112,6 @@
qcom,mdss-has-source-split;
qcom,mdss-highest-bank-bit = <0x2>;
qcom,mdss-has-decimation;
- qcom,mdss-has-rotator-downscale;
qcom,mdss-idle-power-collapse-enabled;
clocks = <&clock_mmss clk_mdss_ahb_clk>,
<&clock_mmss clk_mdss_axi_clk>,
@@ -530,11 +530,15 @@
};
mdss_rotator: qcom,mdss_rotator {
- compatible = "qcom,mdss_rotator";
- qcom,mdss-wb-count = <2>;
- qcom,mdss-has-downscale;
- qcom,mdss-has-ubwc;
- qcom,mdss-has-reg-bus;
+ compatible = "qcom,sde_rotator";
+ reg = <0x00900000 0x90000>,
+ <0x009b8000 0x1040>;
+ reg-names = "mdp_phys",
+ "rot_vbif_phys";
+ qcom,mdss-wb-count = <1>;
+ qcom,mdss-wb-id = <1>;
+ qcom,mdss-ctl-id = <4>;
+ qcom,mdss-highest-bank-bit = <0x2>;
/* Bus Scale Settings */
qcom,msm-bus,name = "mdss_rotator";
qcom,msm-bus,num-cases = <3>;
@@ -550,7 +554,44 @@
qcom,supply-names = "rot-mmagic-mdss-gdsc", "rot-vdd";
clocks = <&clock_mmss clk_mmss_misc_ahb_clk>,
- <&clock_mmss clk_mdss_rotator_vote_clk>;
- clock-names = "iface_clk", "rot_core_clk";
+ <&clock_mmss clk_mdss_rotator_vote_clk>,
+ <&clock_mmss clk_mdss_ahb_clk>,
+ <&clock_mmss clk_mdss_axi_clk>,
+ <&clock_mmss clk_mdp_clk_src>,
+ <&clock_mmss clk_mdss_mdp_vote_clk>;
+ clock-names = "iface_clk", "rot_core_clk",
+ "mdss_ahb_clk", "mdss_axi_clk", "mdp_clk_src",
+ "mdss_mdp_vote_clk";
+
+ interrupt-parent = <&mdss_mdp>;
+ interrupts = <32 0>;
+
+ /* VBIF QoS remapper settings*/
+ qcom,mdss-rot-vbif-qos-setting = <1 1 1 1>;
+
+ qcom,mdss-default-ot-rd-limit = <32>;
+ qcom,mdss-default-ot-wr-limit = <16>;
+
+ smmu_rot_unsecure: qcom,smmu_rot_unsec_cb {
+ compatible = "qcom,smmu_sde_rot_unsec";
+ iommus = <&rot_smmu 0>;
+ gdsc-mdss-supply = <&gdsc_mmagic_mdss>;
+ clocks = <&clock_mmss clk_smmu_rot_ahb_clk>,
+ <&clock_mmss clk_mmagic_mdss_axi_clk>,
+ <&clock_mmss clk_smmu_rot_axi_clk>;
+ clock-names = "rot_ahb_clk", "mmagic_mdss_axi_clk",
+ "rot_axi_clk";
+ };
+
+ smmu_rot_secure: qcom,smmu_rot_sec_cb {
+ compatible = "qcom,smmu_sde_rot_sec";
+ iommus = <&rot_smmu 1>;
+ gdsc-mdss-supply = <&gdsc_mmagic_mdss>;
+ clocks = <&clock_mmss clk_smmu_rot_ahb_clk>,
+ <&clock_mmss clk_mmagic_mdss_axi_clk>,
+ <&clock_mmss clk_smmu_rot_axi_clk>;
+ clock-names = "rot_ahb_clk", "mmagic_mdss_axi_clk",
+ "rot_axi_clk";
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
index 4173152f7b8c..242945846272 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
@@ -107,6 +107,15 @@
qcom,master-en = <1>;
status = "okay";
};
+
+ /* GPIO 21 (NFC_CLK_REQ) */
+ gpio@d400 {
+ qcom,mode = <0>;
+ qcom,vin-sel = <1>;
+ qcom,src-sel = <0>;
+ qcom,master-en = <1>;
+ status = "okay";
+ };
};
&i2c_5 {
@@ -134,6 +143,28 @@
};
};
+&i2c_6 { /* BLSP1 QUP6 (NFC) */
+ status = "okay";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&tlmm 92 0x00>;
+ qcom,nq-ven = <&tlmm 12 0x00>;
+ qcom,nq-firm = <&tlmm 93 0x00>;
+ qcom,nq-clkreq = <&pmcobalt_gpios 21 0x00>;
+ qcom,nq-esepwr = <&tlmm 116 0x00>;
+ interrupt-parent = <&tlmm>;
+ qcom,clk-src = "BBCLK3";
+ interrupts = <92 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active", "nfc_suspend";
+ pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+ pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+ clocks = <&clock_gcc clk_ln_bb_clk3_pin>;
+ clock-names = "ref_clk";
+ };
+};
+
&mdss_mdp {
qcom,mdss-pref-prim-intf = "dsi";
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mdss-pll.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mdss-pll.dtsi
index 97c63e02b716..2e8011554119 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mdss-pll.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mdss-pll.dtsi
@@ -13,6 +13,7 @@
&soc {
mdss_dsi0_pll: qcom,mdss_dsi_pll@c994400 {
compatible = "qcom,mdss_dsi_pll_cobalt";
+ status = "ok";
label = "MDSS DSI 0 PLL";
cell-index = <0>;
#clock-cells = <1>;
@@ -46,6 +47,7 @@
mdss_dsi1_pll: qcom,mdss_dsi_pll@c996400 {
compatible = "qcom,mdss_dsi_pll_cobalt";
+ status = "ok";
label = "MDSS DSI 1 PLL";
cell-index = <1>;
#clock-cells = <1>;
@@ -78,6 +80,7 @@
mdss_dp_pll: qcom,mdss_dp_pll@ca20000 {
compatible = "qcom,mdss_dp_pll_cobalt";
+ status = "ok";
label = "MDSS DP PLL";
cell-index = <0>;
#clock-cells = <1>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
index fc43c1e4b205..9b1cc2fcfebc 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mdss.dtsi
@@ -13,6 +13,7 @@
&soc {
mdss_mdp: qcom,mdss_mdp@c900000 {
compatible = "qcom,mdss_mdp";
+ status = "ok";
reg = <0x0c900000 0x90000>,
<0x0c9b0000 0x1040>;
reg-names = "mdp_phys", "vbif_phys";
@@ -38,6 +39,9 @@
qcom,max-mixer-width = <2560>;
qcom,max-pipe-width = <2560>;
+ qcom,max-dest-scaler-input-width = <2048>;
+ qcom,max-dest-scaler-output-width = <2560>;
+
/* VBIF QoS remapper settings*/
qcom,mdss-vbif-qos-rt-setting = <1 2 2 2>;
qcom,vbif-settings = <0x00ac 0x00000040>;
@@ -136,20 +140,20 @@
<0x2506c 0x00000000>,
<0x2706c 0x00000000>;
- qcom,regs-dump-mdp = <0x01000 0x01454>,
- <0x02000 0x02064>,
- <0x02200 0x02264>,
- <0x02400 0x02464>,
- <0x02600 0x02664>,
- <0x02800 0x02864>,
- <0x05000 0x05150>,
- <0x05200 0x05230>,
- <0x07000 0x07150>,
- <0x07200 0x07230>,
- <0x09000 0x09150>,
- <0x09200 0x09230>,
- <0x0b000 0x0b150>,
- <0x0b200 0x0b230>,
+ qcom,regs-dump-mdp = <0x01000 0x01458>,
+ <0x02000 0x02094>,
+ <0x02200 0x02294>,
+ <0x02400 0x02494>,
+ <0x02600 0x02694>,
+ <0x02800 0x02894>,
+ <0x05000 0x05154>,
+ <0x05a00 0x05b00>,
+ <0x07000 0x07154>,
+ <0x07a00 0x07b00>,
+ <0x09000 0x09154>,
+ <0x09a00 0x09b00>,
+ <0x0b000 0x0b154>,
+ <0x0ba00 0x0bb00>,
<0x25000 0x25184>,
<0x27000 0x27184>,
<0x29000 0x29184>,
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
index d087faed9a7c..7225ba84eaeb 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
@@ -107,6 +107,15 @@
qcom,master-en = <1>;
status = "okay";
};
+
+ /* GPIO 21 (NFC_CLK_REQ) */
+ gpio@d400 {
+ qcom,mode = <0>;
+ qcom,vin-sel = <1>;
+ qcom,src-sel = <0>;
+ qcom,master-en = <1>;
+ status = "okay";
+ };
};
&i2c_5 {
@@ -134,6 +143,28 @@
};
};
+&i2c_6 { /* BLSP1 QUP6 (NFC) */
+ status = "okay";
+ nq@28 {
+ compatible = "qcom,nq-nci";
+ reg = <0x28>;
+ qcom,nq-irq = <&tlmm 92 0x00>;
+ qcom,nq-ven = <&tlmm 12 0x00>;
+ qcom,nq-firm = <&tlmm 93 0x00>;
+ qcom,nq-clkreq = <&pmcobalt_gpios 21 0x00>;
+ qcom,nq-esepwr = <&tlmm 116 0x00>;
+ interrupt-parent = <&tlmm>;
+ qcom,clk-src = "BBCLK3";
+ interrupts = <92 0>;
+ interrupt-names = "nfc_irq";
+ pinctrl-names = "nfc_active", "nfc_suspend";
+ pinctrl-0 = <&nfc_int_active &nfc_enable_active>;
+ pinctrl-1 = <&nfc_int_suspend &nfc_enable_suspend>;
+ clocks = <&clock_gcc clk_ln_bb_clk3_pin>;
+ clock-names = "ref_clk";
+ };
+};
+
&pmicobalt_haptics {
status = "okay";
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi
index 29f02486398e..6d6986232c08 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-pinctrl.dtsi
@@ -218,6 +218,68 @@
};
};
+ nfc {
+ nfc_int_active: nfc_int_active {
+ /* active state */
+ mux {
+ /* GPIO 92 NFC Read Interrupt */
+ pins = "gpio92";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio92";
+ drive-strength = <6>; /* 6 MA */
+ bias-pull-up;
+ };
+ };
+
+ nfc_int_suspend: nfc_int_suspend {
+ /* sleep state */
+ mux {
+ /* GPIO 92 NFC Read Interrupt */
+ pins = "gpio92";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio92";
+ drive-strength = <6>; /* 6 MA */
+ bias-pull-up;
+ };
+ };
+
+ nfc_enable_active: nfc_enable_active {
+ /* active state */
+ mux {
+ /* 12: NFC ENABLE 116:ESE Enable */
+ pins = "gpio12", "gpio116";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio12", "gpio116";
+ drive-strength = <6>; /* 6 MA */
+ bias-pull-up;
+ };
+ };
+
+ nfc_enable_suspend: nfc_enable_suspend {
+ /* sleep state */
+ mux {
+ /* 12: NFC ENABLE 116:ESE Enable */
+ pins = "gpio12", "gpio116";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio12", "gpio116";
+ drive-strength = <6>; /* 6 MA */
+ bias-disable;
+ };
+ };
+ };
+
i2c_7 {
i2c_7_active: i2c_7_active {
mux {
diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
index b0a5970bd93e..88ca36aa1aaa 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
@@ -859,6 +859,7 @@
qcom,use-gsi;
qcom,use-ipa-tethering-bridge;
qcom,modem-cfg-emb-pipe-flt;
+ qcom,do-not-use-ch-gsi-20;
qcom,ipa-wdi2;
clock-names = "core_clk";
clocks = <&clock_gcc clk_ipa_clk>;
@@ -1767,7 +1768,8 @@
0xcdc /* USB3_PHY_LFPS_RXTERM_IRQ_CLEAR */
0xc04 /* USB3_PHY_POWER_DOWN_CONTROL */
0xc00 /* USB3_PHY_SW_RESET */
- 0xc08>; /* USB3_PHY_START */
+ 0xc08 /* USB3_PHY_START */
+ 0xa00>; /* USB3PHY_PCS_MISC_TYPEC_CTRL */
clocks = <&clock_gcc clk_gcc_usb3_phy_aux_clk>,
<&clock_gcc clk_gcc_usb3_phy_pipe_clk>,
@@ -2148,6 +2150,21 @@
qcom,ce-opp-freq = <171430000>;
};
+ qcom_rng: qrng@793000 {
+ compatible = "qcom,msm-rng";
+ reg = <0x793000 0x1000>;
+ qcom,msm-rng-iface-clk;
+ qcom,no-qrng-config;
+ qcom,msm-bus,name = "msm-rng-noc";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <1 618 0 0>, /* No vote */
+ <1 618 0 800>; /* 100 MB/s */
+ clocks = <&clock_gcc clk_gcc_prng_ahb_clk>;
+ clock-names = "iface_clk";
+ };
+
mitigation_profile0: qcom,limit_info-0 {
qcom,temperature-sensor = <&sensor_information1>;
qcom,hotplug-mitigation-enable;
diff --git a/arch/arm64/configs/msm-perf_defconfig b/arch/arm64/configs/msm-perf_defconfig
index 0d1979fdd628..9ebd3fa96505 100644
--- a/arch/arm64/configs/msm-perf_defconfig
+++ b/arch/arm64/configs/msm-perf_defconfig
@@ -1,66 +1,58 @@
CONFIG_LOCALVERSION="-perf"
# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
+# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_TASK_XACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
-CONFIG_LOG_BUF_SHIFT=14
-CONFIG_CGROUP_DEBUG=y
+CONFIG_LOG_CPU_MAX_BUF_SHIFT=15
+CONFIG_CGROUPS=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_CPUACCT=y
-CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
+CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_SCHED_HMP=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
-# CONFIG_NET_NS is not set
-CONFIG_SCHED_AUTOGROUP=y
+# CONFIG_PID_NS is not set
+CONFIG_RELAY=y
CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KALLSYMS_ALL=y
+# CONFIG_MEMBARRIER is not set
CONFIG_EMBEDDED=y
+# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_JUMP_LABEL=y
-CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
-# CONFIG_IOSCHED_DEADLINE is not set
+CONFIG_PARTITION_ADVANCED=y
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSM8996=y
CONFIG_PCI=y
CONFIG_PCI_MSM=y
-CONFIG_PCI_HOST_GENERIC=y
CONFIG_ENABLE_FP_SIMD_SETTINGS=y
CONFIG_SCHED_MC=y
-CONFIG_NR_CPUS=16
+CONFIG_NR_CPUS=8
CONFIG_PREEMPT=y
-CONFIG_KSM=y
-CONFIG_TRANSPARENT_HUGEPAGE=y
+CONFIG_HZ_100=y
CONFIG_CMA=y
-CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y
CONFIG_SECCOMP=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
-CONFIG_CMDLINE="console=ttyAMA0"
# CONFIG_EFI is not set
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
@@ -69,11 +61,8 @@ CONFIG_PM_AUTOSLEEP=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
-CONFIG_PM_DEBUG=y
-CONFIG_ARM_CPUIDLE=y
CONFIG_CPU_FREQ=y
CONFIG_SCHED_FREQ_INPUT=y
-CONFIG_CPU_FREQ_STAT_DETAILS=y
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
@@ -92,7 +81,6 @@ CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_LRO is not set
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -186,7 +174,6 @@ CONFIG_IP6_NF_RAW=y
CONFIG_BRIDGE_NF_EBTABLES=y
CONFIG_BRIDGE_EBT_BROUTE=y
CONFIG_L2TP=y
-CONFIG_L2TP_DEBUGFS=y
CONFIG_L2TP_V3=y
CONFIG_L2TP_IP=y
CONFIG_L2TP_ETH=y
@@ -213,30 +200,36 @@ CONFIG_NET_ACT_SKBEDIT=y
CONFIG_RMNET_DATA=y
CONFIG_RMNET_DATA_FC=y
CONFIG_RMNET_DATA_DEBUG_PKT=y
-CONFIG_BPF_JIT=y
CONFIG_SOCKEV_NLMCAST=y
CONFIG_BT=y
+CONFIG_BT_RFCOMM=y
+CONFIG_BT_RFCOMM_TTY=y
+CONFIG_BT_BNEP=y
+CONFIG_BT_BNEP_MC_FILTER=y
+CONFIG_BT_BNEP_PROTO_FILTER=y
+CONFIG_BT_HIDP=y
+# CONFIG_BT_HS is not set
+# CONFIG_BT_LE is not set
+# CONFIG_BT_DEBUGFS is not set
CONFIG_MSM_BT_POWER=y
-CONFIG_BTFM_SLIM=y
-CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
+CONFIG_CFG80211_INTERNAL_REGDB=y
+# CONFIG_CFG80211_CRDA_SUPPORT is not set
CONFIG_RFKILL=y
-CONFIG_NET_9P=y
-CONFIG_NET_9P_VIRTIO=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_DMA_CMA=y
+# CONFIG_PNP_DEBUG_MESSAGES is not set
+CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_VIRTIO_BLK=y
CONFIG_UID_STAT=y
CONFIG_QSEECOM=y
CONFIG_HDCP_QSEECOM=y
+CONFIG_UID_CPUTIME=y
+CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=y
@@ -247,13 +240,6 @@ CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
CONFIG_SCSI_UFS_QCOM_ICE=y
-CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
-CONFIG_SATA_AHCI_PLATFORM=y
-CONFIG_AHCI_CEVA=y
-CONFIG_AHCI_XGENE=y
-CONFIG_PATA_PLATFORM=y
-CONFIG_PATA_OF_PLATFORM=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_CRYPT=y
@@ -264,17 +250,19 @@ CONFIG_NETDEVICES=y
CONFIG_BONDING=y
CONFIG_DUMMY=y
CONFIG_TUN=y
-CONFIG_VIRTIO_NET=y
-CONFIG_SKY2=y
CONFIG_MSM_RMNET_MHI=y
-CONFIG_SMC91X=y
-CONFIG_SMSC911X=y
CONFIG_PPP=y
CONFIG_PPP_BSDCOMP=y
CONFIG_PPP_DEFLATE=y
+CONFIG_PPP_FILTER=y
CONFIG_PPP_MPPE=y
+CONFIG_PPP_MULTILINK=y
+CONFIG_PPPOE=y
+CONFIG_PPPOL2TP=y
CONFIG_PPPOLAC=y
CONFIG_PPPOPNS=y
+CONFIG_PPP_ASYNC=y
+CONFIG_PPP_SYNC_TTY=y
CONFIG_USB_USBNET=y
CONFIG_WCNSS_MEM_PRE_ALLOC=y
CONFIG_CNSS_CRYPTO=y
@@ -283,7 +271,6 @@ CONFIG_WIL6210=m
CONFIG_CNSS_PCI=y
CONFIG_CLD_LL_CORE=y
CONFIG_BUS_AUTO_SUSPEND=y
-CONFIG_INPUT_JOYDEV=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
CONFIG_KEYBOARD_GPIO=y
@@ -302,28 +289,17 @@ CONFIG_INPUT_KEYCHORD=y
CONFIG_INPUT_UINPUT=y
CONFIG_INPUT_GPIO=y
# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_AMBAKMI=y
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVMEM is not set
# CONFIG_DEVKMEM is not set
-CONFIG_SERIAL_8250=y
-CONFIG_SERIAL_8250_CONSOLE=y
-CONFIG_SERIAL_8250_DW=y
-CONFIG_SERIAL_AMBA_PL011=y
-CONFIG_SERIAL_AMBA_PL011_CONSOLE=y
-CONFIG_SERIAL_MSM=y
-CONFIG_SERIAL_MSM_CONSOLE=y
CONFIG_SERIAL_MSM_HS=y
-CONFIG_SERIAL_OF_PLATFORM=y
CONFIG_SERIAL_MSM_SMD=y
-CONFIG_SERIAL_XILINX_PS_UART=y
-CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
CONFIG_DIAG_CHAR=y
-CONFIG_VIRTIO_CONSOLE=y
CONFIG_HW_RANDOM=y
CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
+# CONFIG_ACPI_I2C_OPREGION is not set
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_QUP=y
CONFIG_I2C_MSM_V2=y
@@ -331,7 +307,6 @@ CONFIG_SLIMBUS=y
CONFIG_SLIMBUS_MSM_NGD=y
CONFIG_SOUNDWIRE=y
CONFIG_SPI=y
-CONFIG_SPI_PL022=y
CONFIG_SPI_QUP=y
CONFIG_SPI_SPIDEV=y
CONFIG_SPMI=y
@@ -373,7 +348,6 @@ CONFIG_MEDIA_SUPPORT=y
CONFIG_MEDIA_CAMERA_SUPPORT=y
CONFIG_MEDIA_CONTROLLER=y
CONFIG_VIDEO_V4L2_SUBDEV_API=y
-CONFIG_VIDEO_ADV_DEBUG=y
CONFIG_V4L_PLATFORM_DRIVERS=y
CONFIG_SOC_CAMERA=y
CONFIG_SOC_CAMERA_PLATFORM=y
@@ -392,8 +366,6 @@ CONFIG_MSM_CSIPHY=y
CONFIG_MSM_CSID=y
CONFIG_MSM_EEPROM=y
CONFIG_MSM_ISPIF=y
-CONFIG_MSM_ISPIF_V1=y
-CONFIG_MSM_ISPIF_V2=y
CONFIG_IMX134=y
CONFIG_IMX132=y
CONFIG_OV9724=y
@@ -409,9 +381,9 @@ CONFIG_MSM_JPEGDMA=y
CONFIG_MSM_VIDC_V4L2=y
CONFIG_MSM_VIDC_VMEM=y
CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
CONFIG_QCOM_KGSL=y
CONFIG_FB=y
-CONFIG_FB_ARMCLCD=y
CONFIG_FB_MSM=y
CONFIG_FB_MSM_MDSS=y
CONFIG_FB_MSM_MDSS_WRITEBACK=y
@@ -427,17 +399,8 @@ CONFIG_SND_USB_AUDIO=y
CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSM8996=y
CONFIG_UHID=y
-CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_KENSINGTON=y
-CONFIG_HID_LOGITECH=y
CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_XHCI_HCD=y
@@ -448,11 +411,13 @@ CONFIG_USB_OHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=y
CONFIG_USB_DWC3=y
CONFIG_USB_ISP1760=y
+CONFIG_USB_SERIAL=y
CONFIG_USB_MSM_SSPHY_QMP=y
CONFIG_MSM_QUSB_PHY=y
CONFIG_USB_ULPI=y
CONFIG_USB_GADGET=y
CONFIG_USB_GADGET_VBUS_DRAW=500
+CONFIG_USB_GADGET_STORAGE_NUM_BUFFERS=4
CONFIG_USB_CONFIGFS=y
CONFIG_USB_CONFIGFS_SERIAL=y
CONFIG_USB_CONFIGFS_NCM=y
@@ -468,19 +433,13 @@ CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_CDEV=y
CONFIG_USB_CONFIGFS_F_QDSS=y
CONFIG_MMC=y
-CONFIG_MMC_ARMMMCI=y
+CONFIG_MMC_BLOCK_MINORS=32
CONFIG_MMC_SDHCI=y
CONFIG_MMC_SDHCI_PLTFM=y
-CONFIG_MMC_SPI=y
-CONFIG_MMC_DW=y
-CONFIG_MMC_DW_EXYNOS=y
CONFIG_LEDS_QPNP=y
CONFIG_LEDS_QPNP_FLASH=y
CONFIG_LEDS_QPNP_WLED=y
-CONFIG_LEDS_SYSCON=y
CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_CPU=y
CONFIG_SWITCH=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_QPNP=y
@@ -493,9 +452,6 @@ CONFIG_QCOM_BAM_DMA=y
CONFIG_QCOM_SPS_DMA=y
CONFIG_UIO=y
CONFIG_UIO_MSM_SHAREDMEM=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_BALLOON=y
-CONFIG_VIRTIO_MMIO=y
CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_TIMED_GPIO=y
@@ -509,9 +465,6 @@ CONFIG_SPS=y
CONFIG_SPS_SUPPORT_NDP_BAM=y
CONFIG_IPA=y
CONFIG_RMNET_IPA=y
-CONFIG_GSI=y
-CONFIG_IPA3=y
-CONFIG_RMNET_IPA3=y
CONFIG_GPIO_USB_DETECT=y
CONFIG_MSM_MHI=y
CONFIG_MSM_MHI_UCI=y
@@ -526,7 +479,6 @@ CONFIG_IOMMU_TESTS=y
CONFIG_MSM_SMEM=y
CONFIG_QPNP_HAPTIC=y
CONFIG_MSM_SMD=y
-CONFIG_MSM_SMD_DEBUG=y
CONFIG_MSM_GLINK=y
CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
CONFIG_MSM_GLINK_SMD_XPRT=y
@@ -570,58 +522,44 @@ CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_EXTCON=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
-CONFIG_PHY_XGENE=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
-CONFIG_MSM_TZ_LOG=y
CONFIG_SENSORS_SSC=y
CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_FANOTIFY=y
-CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
-CONFIG_QUOTA=y
-CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y
-CONFIG_CUSE=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_HUGETLBFS=y
CONFIG_ECRYPT_FS=y
CONFIG_ECRYPT_FS_MESSAGING=y
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_9P_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
-CONFIG_VIRTUALIZATION=y
-CONFIG_KVM=y
CONFIG_PRINTK_TIME=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
-CONFIG_LOCKUP_DETECTOR=y
CONFIG_PANIC_TIMEOUT=5
CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_DEBUG_BUGVERBOSE is not set
CONFIG_IPC_LOGGING=y
-CONFIG_SCHED_TRACER=y
CONFIG_CPU_FREQ_SWITCH_PROFILER=y
-CONFIG_MEMTEST=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_DEBUG_RODATA=y
CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_KEYS=y
CONFIG_PFK=y
CONFIG_SECURITY=y
+CONFIG_SECURITY_NETWORK=y
CONFIG_SECURITY_SELINUX=y
-CONFIG_SECURITY_SMACK=y
CONFIG_CRYPTO_ECHAINIV=y
CONFIG_CRYPTO_XCBC=y
CONFIG_CRYPTO_MD4=y
+CONFIG_CRYPTO_SHA512=y
CONFIG_CRYPTO_TWOFISH=y
CONFIG_CRYPTO_ANSI_CPRNG=y
CONFIG_CRYPTO_DEV_QCRYPTO=y
diff --git a/arch/arm64/configs/msm_defconfig b/arch/arm64/configs/msm_defconfig
index cf1b6ebf8a7c..e14f34013216 100644
--- a/arch/arm64/configs/msm_defconfig
+++ b/arch/arm64/configs/msm_defconfig
@@ -393,8 +393,6 @@ CONFIG_MSM_CSIPHY=y
CONFIG_MSM_CSID=y
CONFIG_MSM_EEPROM=y
CONFIG_MSM_ISPIF=y
-CONFIG_MSM_ISPIF_V1=y
-CONFIG_MSM_ISPIF_V2=y
CONFIG_IMX134=y
CONFIG_IMX132=y
CONFIG_OV9724=y
@@ -410,6 +408,7 @@ CONFIG_MSM_JPEGDMA=y
CONFIG_MSM_VIDC_V4L2=y
CONFIG_MSM_VIDC_VMEM=y
CONFIG_MSM_VIDC_GOVERNORS=y
+CONFIG_MSM_SDE_ROTATOR=y
CONFIG_QCOM_KGSL=y
CONFIG_FB=y
CONFIG_FB_ARMCLCD=y
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index f0a81fa39e30..d826a839beac 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -1,69 +1,59 @@
CONFIG_LOCALVERSION="-perf"
# CONFIG_LOCALVERSION_AUTO is not set
-CONFIG_POSIX_MQUEUE=y
-CONFIG_FHANDLE=y
CONFIG_AUDIT=y
# CONFIG_AUDITSYSCALL is not set
CONFIG_NO_HZ=y
CONFIG_HIGH_RES_TIMERS=y
-CONFIG_BSD_PROCESS_ACCT=y
-CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_TASKSTATS=y
-CONFIG_TASK_DELAY_ACCT=y
-CONFIG_TASK_XACCT=y
-CONFIG_TASK_IO_ACCOUNTING=y
+CONFIG_IRQ_TIME_ACCOUNTING=y
+CONFIG_RCU_EXPERT=y
+CONFIG_RCU_FAST_NO_HZ=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
-CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_CPUACCT=y
-CONFIG_MEMCG=y
-CONFIG_MEMCG_SWAP=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_HUGETLB=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_SCHED_HMP=y
CONFIG_SCHED_HMP_CSTATE_AWARE=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
-# CONFIG_NET_NS is not set
+# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_KALLSYMS_ALL=y
+# CONFIG_MEMBARRIER is not set
CONFIG_EMBEDDED=y
# CONFIG_SLUB_DEBUG is not set
# CONFIG_COMPAT_BRK is not set
CONFIG_PROFILING=y
-CONFIG_JUMP_LABEL=y
-CONFIG_CC_STACKPROTECTOR_STRONG=y
+CONFIG_CC_STACKPROTECTOR_REGULAR=y
CONFIG_MODULES=y
CONFIG_MODULE_UNLOAD=y
CONFIG_MODULE_FORCE_UNLOAD=y
CONFIG_MODVERSIONS=y
-# CONFIG_BLK_DEV_BSG is not set
CONFIG_PARTITION_ADVANCED=y
-# CONFIG_IOSCHED_DEADLINE is not set
CONFIG_ARCH_QCOM=y
CONFIG_ARCH_MSMCOBALT=y
CONFIG_ARCH_MSMHAMSTER=y
CONFIG_PCI=y
CONFIG_PCI_MSM=y
-CONFIG_PCI_HOST_GENERIC=y
CONFIG_SCHED_MC=y
-CONFIG_NR_CPUS=16
+CONFIG_NR_CPUS=8
CONFIG_PREEMPT=y
-CONFIG_KSM=y
-CONFIG_TRANSPARENT_HUGEPAGE=y
-CONFIG_CLEANCACHE=y
+CONFIG_HZ_100=y
+CONFIG_ARM64_REG_REBALANCE_ON_CTX_SW=y
CONFIG_CMA=y
CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
+CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y
CONFIG_SECCOMP=y
CONFIG_ARMV8_DEPRECATED=y
CONFIG_SWP_EMULATION=y
-CONFIG_CP15_BARRIER_EMULATION=y
-CONFIG_SETEND_EMULATION=y
+# CONFIG_EFI is not set
CONFIG_BUILD_ARM64_APPENDED_DTB_IMAGE=y
# CONFIG_CORE_DUMP_DEFAULT_ELF_HEADERS is not set
CONFIG_COMPAT=y
@@ -71,15 +61,14 @@ CONFIG_PM_AUTOSLEEP=y
CONFIG_PM_WAKELOCKS=y
CONFIG_PM_WAKELOCKS_LIMIT=0
# CONFIG_PM_WAKELOCKS_GC is not set
-CONFIG_ARM_CPUIDLE=y
CONFIG_CPU_FREQ=y
CONFIG_SCHED_FREQ_INPUT=y
-# CONFIG_CPU_FREQ_STAT is not set
CONFIG_CPU_FREQ_GOV_POWERSAVE=y
CONFIG_CPU_FREQ_GOV_USERSPACE=y
CONFIG_CPU_FREQ_GOV_ONDEMAND=y
CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+CONFIG_CPU_BOOST=y
CONFIG_NET=y
CONFIG_PACKET=y
CONFIG_UNIX=y
@@ -92,7 +81,6 @@ CONFIG_IP_MULTIPLE_TABLES=y
CONFIG_IP_ROUTE_VERBOSE=y
CONFIG_IP_PNP=y
CONFIG_IP_PNP_DHCP=y
-CONFIG_IP_PNP_BOOTP=y
# CONFIG_INET_LRO is not set
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
@@ -186,7 +174,6 @@ CONFIG_IP6_NF_RAW=y
CONFIG_BRIDGE_NF_EBTABLES=y
CONFIG_BRIDGE_EBT_BROUTE=y
CONFIG_L2TP=y
-CONFIG_L2TP_DEBUGFS=y
CONFIG_L2TP_V3=y
CONFIG_L2TP_IP=y
CONFIG_L2TP_ETH=y
@@ -213,7 +200,6 @@ CONFIG_NET_ACT_SKBEDIT=y
CONFIG_RMNET_DATA=y
CONFIG_RMNET_DATA_FC=y
CONFIG_RMNET_DATA_DEBUG_PKT=y
-CONFIG_BPF_JIT=y
CONFIG_SOCKEV_NLMCAST=y
CONFIG_BT=y
CONFIG_MSM_BT_POWER=y
@@ -222,22 +208,19 @@ CONFIG_BTFM_SLIM_WCN3990=y
CONFIG_CFG80211=y
CONFIG_CFG80211_INTERNAL_REGDB=y
CONFIG_RFKILL=y
-CONFIG_NET_9P=y
-CONFIG_NET_9P_VIRTIO=y
CONFIG_NFC_NQ=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
-CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
-CONFIG_DEVTMPFS=y
-CONFIG_DEVTMPFS_MOUNT=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
CONFIG_DMA_CMA=y
+# CONFIG_PNP_DEBUG_MESSAGES is not set
+CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_BLK_DEV_RAM=y
CONFIG_BLK_DEV_RAM_SIZE=8192
-CONFIG_VIRTIO_BLK=y
CONFIG_UID_STAT=y
CONFIG_QSEECOM=y
+CONFIG_SCSI=y
CONFIG_BLK_DEV_SD=y
CONFIG_CHR_DEV_SG=y
CONFIG_CHR_DEV_SCH=y
@@ -247,13 +230,6 @@ CONFIG_SCSI_SCAN_ASYNC=y
CONFIG_SCSI_UFSHCD=y
CONFIG_SCSI_UFSHCD_PLATFORM=y
CONFIG_SCSI_UFS_QCOM=y
-CONFIG_ATA=y
-CONFIG_SATA_AHCI=y
-CONFIG_SATA_AHCI_PLATFORM=y
-CONFIG_AHCI_CEVA=y
-CONFIG_AHCI_XGENE=y
-CONFIG_PATA_PLATFORM=y
-CONFIG_PATA_OF_PLATFORM=y
CONFIG_MD=y
CONFIG_BLK_DEV_DM=y
CONFIG_DM_UEVENT=y
@@ -261,7 +237,6 @@ CONFIG_DM_VERITY=y
CONFIG_NETDEVICES=y
CONFIG_BONDING=y
CONFIG_TUN=y
-CONFIG_VIRTIO_NET=y
CONFIG_SKY2=y
CONFIG_RNDIS_IPA=y
CONFIG_SMSC911X=y
@@ -279,6 +254,7 @@ CONFIG_CLD_LL_CORE=y
CONFIG_QPNP_POWER_ON=y
CONFIG_INPUT_EVDEV=y
CONFIG_INPUT_KEYRESET=y
+# CONFIG_INPUT_MOUSE is not set
CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_RMI_DEV_v21=y
@@ -287,7 +263,6 @@ CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_UINPUT=y
# CONFIG_SERIO_SERPORT is not set
-CONFIG_SERIO_AMBAKMI=y
# CONFIG_VT is not set
# CONFIG_LEGACY_PTYS is not set
# CONFIG_DEVMEM is not set
@@ -295,8 +270,8 @@ CONFIG_SERIO_AMBAKMI=y
CONFIG_SERIAL_MSM_HS=y
CONFIG_SERIAL_MSM_SMD=y
CONFIG_DIAG_CHAR=y
-CONFIG_VIRTIO_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MSM_V2=y
@@ -399,17 +374,8 @@ CONFIG_SND=y
CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSMCOBALT=y
CONFIG_UHID=y
-CONFIG_HID_A4TECH=y
CONFIG_HID_APPLE=y
-CONFIG_HID_BELKIN=y
-CONFIG_HID_CHERRY=y
-CONFIG_HID_CHICONY=y
-CONFIG_HID_CYPRESS=y
-CONFIG_HID_EZKEY=y
-CONFIG_HID_KENSINGTON=y
-CONFIG_HID_LOGITECH=y
CONFIG_HID_MICROSOFT=y
-CONFIG_HID_MONTEREY=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_XHCI_HCD=y
@@ -453,8 +419,6 @@ CONFIG_LEDS_QPNP_FLASH_V2=y
CONFIG_LEDS_QPNP_WLED=y
CONFIG_LEDS_SYSCON=y
CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_HEARTBEAT=y
-CONFIG_LEDS_TRIGGER_CPU=y
CONFIG_SWITCH=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_QPNP=y
@@ -462,9 +426,6 @@ CONFIG_DMADEVICES=y
CONFIG_QCOM_SPS_DMA=y
CONFIG_UIO=y
CONFIG_UIO_MSM_SHAREDMEM=y
-CONFIG_VIRTIO_PCI=y
-CONFIG_VIRTIO_BALLOON=y
-CONFIG_VIRTIO_MMIO=y
CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_TIMED_GPIO=y
@@ -489,7 +450,6 @@ CONFIG_ARM_SMMU=y
CONFIG_MSM_SMEM=y
CONFIG_QPNP_HAPTIC=y
CONFIG_MSM_SMD=y
-CONFIG_MSM_SMD_DEBUG=y
CONFIG_MSM_GLINK=y
CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
CONFIG_MSM_GLINK_SMD_XPRT=y
@@ -510,9 +470,7 @@ CONFIG_QCOM_SCM=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
CONFIG_ICNSS=y
-CONFIG_MSM_RUN_QUEUE_STATS=y
CONFIG_MSM_BOOT_STATS=y
-CONFIG_QCOM_CPUSS_DUMP=y
CONFIG_MSM_SUBSYSTEM_RESTART=y
CONFIG_MSM_PIL=y
CONFIG_MSM_PIL_SSR_GENERIC=y
@@ -529,36 +487,22 @@ CONFIG_QCOM_DEVFREQ_DEVBW=y
CONFIG_EXTCON=y
CONFIG_PWM=y
CONFIG_PWM_QPNP=y
-CONFIG_ARM_GIC_V3_ACL=y
-CONFIG_PHY_XGENE=y
CONFIG_ANDROID=y
CONFIG_ANDROID_BINDER_IPC=y
-CONFIG_MSM_TZ_LOG=y
CONFIG_SENSORS_SSC=y
CONFIG_EXT2_FS=y
+CONFIG_EXT2_FS_XATTR=y
CONFIG_EXT3_FS=y
CONFIG_EXT4_FS_SECURITY=y
-CONFIG_FANOTIFY=y
-CONFIG_FANOTIFY_ACCESS_PERMISSIONS=y
-CONFIG_QUOTA=y
-CONFIG_AUTOFS4_FS=y
CONFIG_FUSE_FS=y
-CONFIG_CUSE=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
-CONFIG_HUGETLBFS=y
-CONFIG_EFIVAR_FS=y
# CONFIG_MISC_FILESYSTEMS is not set
-CONFIG_NFS_FS=y
-CONFIG_NFS_V4=y
-CONFIG_ROOT_NFS=y
-CONFIG_9P_FS=y
CONFIG_NLS_CODEPAGE_437=y
CONFIG_NLS_ISO8859_1=y
CONFIG_PRINTK_TIME=y
-CONFIG_DYNAMIC_DEBUG=y
CONFIG_DEBUG_INFO=y
CONFIG_MAGIC_SYSRQ=y
CONFIG_PANIC_TIMEOUT=5
@@ -566,12 +510,11 @@ CONFIG_SCHEDSTATS=y
CONFIG_TIMER_STATS=y
# CONFIG_DEBUG_PREEMPT is not set
CONFIG_IPC_LOGGING=y
-CONFIG_FUNCTION_TRACER=y
-CONFIG_SCHED_TRACER=y
CONFIG_CPU_FREQ_SWITCH_PROFILER=y
-CONFIG_MEMTEST=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_DEBUG_RODATA=y
+CONFIG_DEBUG_ALIGN_RODATA=y
+CONFIG_KEYS=y
CONFIG_SECURITY=y
CONFIG_SECURITY_SELINUX=y
CONFIG_SECURITY_SMACK=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 8eec686715a4..a8bf9867180e 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -308,7 +308,8 @@ CONFIG_SERIAL_XILINX_PS_UART=y
CONFIG_SERIAL_XILINX_PS_UART_CONSOLE=y
CONFIG_DIAG_CHAR=y
CONFIG_VIRTIO_CONSOLE=y
-# CONFIG_HW_RANDOM is not set
+CONFIG_HW_RANDOM=y
+CONFIG_HW_RANDOM_MSM_LEGACY=y
CONFIG_MSM_ADSPRPC=y
CONFIG_I2C_CHARDEV=y
CONFIG_I2C_MSM_V2=y
diff --git a/drivers/edac/cortex_arm64_edac.c b/drivers/edac/cortex_arm64_edac.c
index 1fa8e9a62082..91ff9cf44339 100644
--- a/drivers/edac/cortex_arm64_edac.c
+++ b/drivers/edac/cortex_arm64_edac.c
@@ -95,6 +95,12 @@
#define A57_L2_UE 7
#define L2_EXT_UE 8
#define CCI_UE 9
+#define KRYO2XX_SILVER_L1_CE 10
+#define KRYO2XX_SILVER_L1_UE 11
+#define KRYO2XX_SILVER_L2_CE 12
+#define KRYO2XX_SILVER_L2_UE 13
+#define KRYO2XX_GOLD_L2_CE 14
+#define KRYO2XX_GOLD_L2_UE 15
#ifdef CONFIG_EDAC_CORTEX_ARM64_PANIC_ON_UE
#define ARM64_ERP_PANIC_ON_UE 1
@@ -157,6 +163,12 @@ static const struct errors_edac errors[] = {
{"A57 L2 Uncorrectable Error", edac_device_handle_ue },
{"L2 External Error", edac_device_handle_ue },
{"CCI Error", edac_device_handle_ue },
+ {"Kryo2xx Silver L1 Correctable Error", edac_device_handle_ce },
+ {"Kryo2xx Silver L1 Uncorrectable Error", edac_device_handle_ue },
+ {"Kryo2xx Silver L2 Correctable Error", edac_device_handle_ce },
+ {"Kryo2xx Silver L2 Uncorrectable Error", edac_device_handle_ue },
+ {"Kryo2xx Gold L2 Correctable Error", edac_device_handle_ce },
+ {"Kryo2xx Gold L2 Uncorrectable Error", edac_device_handle_ue },
};
#define read_l2merrsr_el1 ({ \
@@ -317,11 +329,11 @@ static void ca53_parse_cpumerrsr(struct erp_local_data *ed)
(int) A53_CPUMERRSR_OTHER(cpumerrsr));
if (ed->err == SBE)
- errors[A53_L1_CE].func(ed->drv->edev_ctl, smp_processor_id(),
- L1_CACHE, errors[A53_L1_CE].msg);
+ errors[KRYO2XX_SILVER_L1_CE].func(ed->drv->edev_ctl, smp_processor_id(),
+ L1_CACHE, errors[KRYO2XX_SILVER_L1_CE].msg);
else if (ed->err == DBE)
- errors[A53_L1_UE].func(ed->drv->edev_ctl, smp_processor_id(),
- L1_CACHE, errors[A53_L1_UE].msg);
+ errors[KRYO2XX_SILVER_L1_UE].func(ed->drv->edev_ctl, smp_processor_id(),
+ L1_CACHE, errors[KRYO2XX_SILVER_L1_UE].msg);
write_cpumerrsr_el1(0);
}
@@ -375,11 +387,11 @@ static void ca53_parse_l2merrsr(struct erp_local_data *ed)
(int) A53_L2MERRSR_OTHER(l2merrsr));
if (ed->err == SBE)
- errors[A53_L2_CE].func(ed->drv->edev_ctl, smp_processor_id(),
- L2_CACHE, errors[A53_L2_CE].msg);
+ errors[KRYO2XX_SILVER_L2_CE].func(ed->drv->edev_ctl, smp_processor_id(),
+ L2_CACHE, errors[KRYO2XX_SILVER_L2_CE].msg);
else if (ed->err == DBE)
- errors[A53_L2_UE].func(ed->drv->edev_ctl, smp_processor_id(),
- L2_CACHE, errors[A53_L2_UE].msg);
+ errors[KRYO2XX_SILVER_L2_UE].func(ed->drv->edev_ctl, smp_processor_id(),
+ L2_CACHE, errors[KRYO2XX_SILVER_L2_UE].msg);
write_l2merrsr_el1(0);
}
@@ -530,7 +542,7 @@ static void kryo2xx_gold_parse_l2merrsr(struct erp_local_data *ed)
if (KRYO2XX_GOLD_L2MERRSR_FATAL(l2merrsr))
ed->err = DBE;
- edac_printk(KERN_CRIT, EDAC_CPU, "Gold L2 %s Error detected\n",
+ edac_printk(KERN_CRIT, EDAC_CPU, "Kryo2xx Gold L2 %s Error detected\n",
err_name[ed->err]);
kryo2xx_gold_print_error_state_regs();
if (ed->err == DBE)
@@ -551,11 +563,11 @@ static void kryo2xx_gold_parse_l2merrsr(struct erp_local_data *ed)
(int) KRYO2XX_GOLD_L2MERRSR_OTHER(l2merrsr));
if (ed->err == SBE) {
- errors[A57_L2_CE].func(ed->drv->edev_ctl, smp_processor_id(),
- L2_CACHE, errors[A57_L2_CE].msg);
+ errors[KRYO2XX_GOLD_L2_CE].func(ed->drv->edev_ctl, smp_processor_id(),
+ L2_CACHE, errors[KRYO2XX_GOLD_L2_CE].msg);
} else if (ed->err == DBE) {
- errors[A57_L2_UE].func(ed->drv->edev_ctl, smp_processor_id(),
- L2_CACHE, errors[A57_L2_UE].msg);
+ errors[KRYO2XX_GOLD_L2_UE].func(ed->drv->edev_ctl, smp_processor_id(),
+ L2_CACHE, errors[KRYO2XX_GOLD_L2_UE].msg);
}
write_l2merrsr_el1(0);
}
diff --git a/drivers/gpu/msm/a5xx_reg.h b/drivers/gpu/msm/a5xx_reg.h
index 372bfad48a09..913cedb885ad 100644
--- a/drivers/gpu/msm/a5xx_reg.h
+++ b/drivers/gpu/msm/a5xx_reg.h
@@ -728,7 +728,7 @@
#define A5XX_VBIF_TEST_BUS2_CTRL0 0x3087
#define A5XX_VBIF_TEST_BUS2_CTRL1 0x3088
-#define A5XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK 0xF
+#define A5XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK 0x1FF
#define A5XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT 0x0
#define A5XX_VBIF_TEST_BUS_OUT 0x308c
diff --git a/drivers/gpu/msm/adreno_a5xx_snapshot.c b/drivers/gpu/msm/adreno_a5xx_snapshot.c
index dc5dee7ce0c9..4f368a8f93f3 100644
--- a/drivers/gpu/msm/adreno_a5xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a5xx_snapshot.c
@@ -126,7 +126,7 @@ static const struct adreno_debugbus_block a5xx_debugbus_blocks[] = {
};
#define A5XX_NUM_AXI_ARB_BLOCKS 2
-#define A5XX_NUM_XIN_BLOCKS 5
+#define A5XX_NUM_XIN_BLOCKS 4
/* a5xx_snapshot_cp_pm4() - Dump PM4 data in snapshot */
static size_t a5xx_snapshot_cp_pm4(struct kgsl_device *device, u8 *buf,
@@ -202,11 +202,11 @@ static size_t a5xx_snapshot_vbif_debugbus(struct kgsl_device *device,
/*
* Total number of VBIF data words considering 3 sections:
* 2 arbiter blocks of 16 words
- * 5 AXI XIN blocks of 4 dwords each
- * 5 core clock side XIN blocks of 5 dwords each
+ * 4 AXI XIN blocks of 18 dwords each
+ * 4 core clock side XIN blocks of 12 dwords each
*/
unsigned int dwords = (16 * A5XX_NUM_AXI_ARB_BLOCKS) +
- (4 * A5XX_NUM_XIN_BLOCKS) + (5 * A5XX_NUM_XIN_BLOCKS);
+ (18 * A5XX_NUM_XIN_BLOCKS) + (12 * A5XX_NUM_XIN_BLOCKS);
unsigned int *data = (unsigned int *)(buf + sizeof(*header));
size_t size;
unsigned int reg_clk;
@@ -244,7 +244,7 @@ static size_t a5xx_snapshot_vbif_debugbus(struct kgsl_device *device,
/* XIN blocks AXI side */
for (i = 0; i < A5XX_NUM_XIN_BLOCKS; i++) {
kgsl_regwrite(device, A5XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
- for (j = 0; j < 4; j++) {
+ for (j = 0; j < 18; j++) {
kgsl_regwrite(device, A5XX_VBIF_TEST_BUS2_CTRL1,
((j & A5XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_MASK)
<< A5XX_VBIF_TEST_BUS2_CTRL1_DATA_SEL_SHIFT));
@@ -257,7 +257,7 @@ static size_t a5xx_snapshot_vbif_debugbus(struct kgsl_device *device,
/* XIN blocks core clock side */
for (i = 0; i < A5XX_NUM_XIN_BLOCKS; i++) {
kgsl_regwrite(device, A5XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
- for (j = 0; j < 5; j++) {
+ for (j = 0; j < 12; j++) {
kgsl_regwrite(device, A5XX_VBIF_TEST_BUS1_CTRL1,
((j & A5XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_MASK)
<< A5XX_VBIF_TEST_BUS1_CTRL1_DATA_SEL_SHIFT));
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 51dc781b2bd4..2563591f376e 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -388,6 +388,17 @@ kgsl_mem_entry_untrack_gpuaddr(struct kgsl_process_private *process,
kgsl_mmu_put_gpuaddr(pagetable, &entry->memdesc);
}
+/* Commit the entry to the process so it can be accessed by other operations */
+static void kgsl_mem_entry_commit_process(struct kgsl_mem_entry *entry)
+{
+ if (!entry)
+ return;
+
+ spin_lock(&entry->priv->mem_lock);
+ idr_replace(&entry->priv->mem_idr, entry, entry->id);
+ spin_unlock(&entry->priv->mem_lock);
+}
+
/**
* kgsl_mem_entry_attach_process - Attach a mem_entry to its owner process
* @entry: the memory entry
@@ -418,7 +429,8 @@ kgsl_mem_entry_attach_process(struct kgsl_mem_entry *entry,
idr_preload(GFP_KERNEL);
spin_lock(&process->mem_lock);
- id = idr_alloc(&process->mem_idr, entry, 1, 0, GFP_NOWAIT);
+ /* Allocate the ID but don't attach the pointer just yet */
+ id = idr_alloc(&process->mem_idr, NULL, 1, 0, GFP_NOWAIT);
spin_unlock(&process->mem_lock);
idr_preload_end();
@@ -2317,6 +2329,7 @@ long kgsl_ioctl_gpuobj_import(struct kgsl_device_private *dev_priv,
trace_kgsl_mem_map(entry, fd);
+ kgsl_mem_entry_commit_process(entry);
return 0;
unmap:
@@ -2580,6 +2593,7 @@ long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
trace_kgsl_mem_map(entry, param->fd);
+ kgsl_mem_entry_commit_process(entry);
return result;
error_attach:
@@ -2971,6 +2985,7 @@ static struct kgsl_mem_entry *gpumem_alloc_entry(
entry->memdesc.size);
trace_kgsl_mem_alloc(entry);
+ kgsl_mem_entry_commit_process(entry);
return entry;
err:
kfree(entry);
diff --git a/drivers/gpu/msm/kgsl_sync.c b/drivers/gpu/msm/kgsl_sync.c
index c93178fed4ca..358b3b038899 100644
--- a/drivers/gpu/msm/kgsl_sync.c
+++ b/drivers/gpu/msm/kgsl_sync.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -466,23 +466,23 @@ long kgsl_ioctl_syncsource_create(struct kgsl_device_private *dev_priv,
goto out;
}
+ kref_init(&syncsource->refcount);
+ syncsource->private = private;
+
idr_preload(GFP_KERNEL);
spin_lock(&private->syncsource_lock);
id = idr_alloc(&private->syncsource_idr, syncsource, 1, 0, GFP_NOWAIT);
- spin_unlock(&private->syncsource_lock);
- idr_preload_end();
-
if (id > 0) {
- kref_init(&syncsource->refcount);
syncsource->id = id;
- syncsource->private = private;
-
param->id = id;
ret = 0;
} else {
ret = id;
}
+ spin_unlock(&private->syncsource_lock);
+ idr_preload_end();
+
out:
if (ret) {
if (syncsource && syncsource->oneshot)
@@ -540,25 +540,23 @@ long kgsl_ioctl_syncsource_destroy(struct kgsl_device_private *dev_priv,
{
struct kgsl_syncsource_destroy *param = data;
struct kgsl_syncsource *syncsource = NULL;
- struct kgsl_process_private *private;
-
- syncsource = kgsl_syncsource_get(dev_priv->process_priv,
- param->id);
+ struct kgsl_process_private *private = dev_priv->process_priv;
- if (syncsource == NULL)
- return -EINVAL;
+ spin_lock(&private->syncsource_lock);
+ syncsource = idr_find(&private->syncsource_idr, param->id);
- private = syncsource->private;
+ if (syncsource) {
+ idr_remove(&private->syncsource_idr, param->id);
+ syncsource->id = 0;
+ }
- spin_lock(&private->syncsource_lock);
- idr_remove(&private->syncsource_idr, param->id);
- syncsource->id = 0;
spin_unlock(&private->syncsource_lock);
+ if (syncsource == NULL)
+ return -EINVAL;
+
/* put reference from syncsource creation */
kgsl_syncsource_put(syncsource);
- /* put reference from getting the syncsource above */
- kgsl_syncsource_put(syncsource);
return 0;
}
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index f648908d334e..d3f6fa3fa52d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -85,6 +85,7 @@ struct msm_isp_bufq *msm_isp_get_bufq(
/* bufq_handle cannot be 0 */
if ((bufq_handle == 0) ||
+ bufq_index >= BUF_MGR_NUM_BUF_Q ||
(bufq_index > buf_mgr->num_buf_q))
return NULL;
@@ -1331,8 +1332,6 @@ static int msm_isp_buf_mgr_debug(struct msm_isp_buf_mgr *buf_mgr,
for (i = 0; i < BUF_MGR_NUM_BUF_Q; i++) {
bufq = &buf_mgr->bufq[i];
- if (!bufq)
- continue;
spin_lock_irqsave(&bufq->bufq_lock, flags);
if (!bufq->bufq_handle) {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 96f4d15c6437..cea181bcd2e0 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -60,7 +60,7 @@ int msm_isp_axi_create_stream(struct vfe_device *vfe_dev,
struct msm_vfe_axi_shared_data *axi_data,
struct msm_vfe_axi_stream_request_cmd *stream_cfg_cmd)
{
- int i = stream_cfg_cmd->stream_src;
+ uint32_t i = stream_cfg_cmd->stream_src;
if (i >= VFE_AXI_SRC_MAX) {
pr_err("%s:%d invalid stream_src %d\n", __func__, __LINE__,
@@ -1682,6 +1682,7 @@ static void msm_isp_handle_done_buf_frame_id_mismatch(
struct msm_isp_event_data error_event;
int ret = 0;
+ memset(&error_event, 0, sizeof(error_event));
error_event.frame_id =
vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
error_event.u.error_info.err_type =
@@ -1705,7 +1706,7 @@ static int msm_isp_process_done_buf(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info, struct msm_isp_buffer *buf,
struct timeval *time_stamp, uint32_t frame_id)
{
- int rc, ret;
+ int rc;
unsigned long flags;
struct msm_isp_event_data buf_event;
uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
@@ -1767,7 +1768,7 @@ static int msm_isp_process_done_buf(struct vfe_device *vfe_dev,
if (rc == -EFAULT) {
msm_isp_halt_send_error(vfe_dev,
ISP_EVENT_BUF_FATAL_ERROR);
- return ret;
+ return rc;
}
if (!rc) {
ISP_DBG("%s:%d vfe_id %d Buffer dropped %d\n",
@@ -1824,7 +1825,7 @@ static int msm_isp_process_done_buf(struct vfe_device *vfe_dev,
if (rc == -EFAULT) {
msm_isp_halt_send_error(vfe_dev,
ISP_EVENT_BUF_FATAL_ERROR);
- return ret;
+ return rc;
}
}
@@ -2403,7 +2404,8 @@ static int msm_isp_update_dual_HW_ms_info_at_stop(
static int msm_isp_update_dual_HW_axi(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- int rc, vfe_id;
+ int rc = 0;
+ int vfe_id;
uint32_t stream_idx = HANDLE_TO_IDX(stream_info->stream_handle);
struct dual_vfe_resource *dual_vfe_res = NULL;
@@ -2868,6 +2870,7 @@ static int msm_isp_return_empty_buffer(struct vfe_device *vfe_dev,
return rc;
}
+ memset(&error_event, 0, sizeof(error_event));
error_event.frame_id = frame_id;
error_event.u.error_info.err_type = ISP_ERROR_RETURN_EMPTY_BUFFER;
error_event.u.error_info.session_id = stream_info->session_id;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index 18e87c49c518..e98c99fcb62d 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -118,7 +118,7 @@ static int msm_isp_stats_cfg_ping_pong_address(struct vfe_device *vfe_dev,
= buf;
}
}
- } else if (!vfe_dev->is_split) {
+ } else {
if (buf)
vfe_dev->hw_info->vfe_ops.stats_ops.
update_ping_pong_addr(
@@ -289,8 +289,7 @@ static int32_t msm_isp_stats_configure(struct vfe_device *vfe_dev,
if (rc < 0) {
pr_err("%s:%d failed: stats buf divert rc %d\n",
__func__, __LINE__, rc);
- if (0 == result)
- result = rc;
+ result = rc;
}
}
if (is_composite && comp_stats_type_mask) {
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
index ba77695ab955..e1c6f4d29be0 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_util.c
@@ -706,6 +706,7 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
unsigned int cmd, void *arg)
{
long rc = 0;
+ long rc2 = 0;
struct vfe_device *vfe_dev = v4l2_get_subdevdata(sd);
if (!vfe_dev || !vfe_dev->vfe_base) {
@@ -785,7 +786,9 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
if (atomic_read(&vfe_dev->error_info.overflow_state)
!= HALT_ENFORCED) {
rc = msm_isp_stats_reset(vfe_dev);
- rc |= msm_isp_axi_reset(vfe_dev, arg);
+ rc2 = msm_isp_axi_reset(vfe_dev, arg);
+ if (!rc && rc2)
+ rc = rc2;
} else {
pr_err_ratelimited("%s: no HW reset, halt enforced.\n",
__func__);
@@ -797,7 +800,9 @@ static long msm_isp_ioctl_unlocked(struct v4l2_subdev *sd,
if (atomic_read(&vfe_dev->error_info.overflow_state)
!= HALT_ENFORCED) {
rc = msm_isp_stats_restart(vfe_dev);
- rc |= msm_isp_axi_restart(vfe_dev, arg);
+ rc2 = msm_isp_axi_restart(vfe_dev, arg);
+ if (!rc && rc2)
+ rc = rc2;
} else {
pr_err_ratelimited("%s: no AXI restart, halt enforced.\n",
__func__);
@@ -1581,8 +1586,6 @@ void msm_isp_update_error_frame_count(struct vfe_device *vfe_dev)
{
struct msm_vfe_error_info *error_info = &vfe_dev->error_info;
error_info->info_dump_frame_count++;
- if (error_info->info_dump_frame_count == 0)
- error_info->info_dump_frame_count++;
}
@@ -1681,6 +1684,7 @@ static void msm_isp_process_overflow_irq(
if (atomic_read(&vfe_dev->error_info.overflow_state)
!= HALT_ENFORCED) {
+ memset(&error_event, 0, sizeof(error_event));
error_event.frame_id =
vfe_dev->axi_data.src_info[VFE_PIX_0].frame_id;
error_event.u.error_info.err_type =
@@ -1698,10 +1702,8 @@ void msm_isp_reset_burst_count_and_frame_drop(
stream_info->stream_type != BURST_STREAM) {
return;
}
- if (stream_info->stream_type == BURST_STREAM &&
- stream_info->num_burst_capture != 0) {
+ if (stream_info->num_burst_capture != 0)
msm_isp_reset_framedrop(vfe_dev, stream_info);
- }
}
static void msm_isp_enqueue_tasklet_cmd(struct vfe_device *vfe_dev,
diff --git a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
index d0d3b8d8dfcb..4c0991e0dd26 100644
--- a/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
+++ b/drivers/media/platform/msm/camera_v2/ispif/msm_ispif.c
@@ -453,23 +453,23 @@ static void msm_ispif_sel_csid_core(struct ispif_device *ispif,
switch (intftype) {
case PIX0:
data &= ~(BIT(1) | BIT(0));
- data |= csid;
+ data |= (uint32_t) csid;
break;
case RDI0:
data &= ~(BIT(5) | BIT(4));
- data |= (csid << 4);
+ data |= ((uint32_t) csid) << 4;
break;
case PIX1:
data &= ~(BIT(9) | BIT(8));
- data |= (csid << 8);
+ data |= ((uint32_t) csid) << 8;
break;
case RDI1:
data &= ~(BIT(13) | BIT(12));
- data |= (csid << 12);
+ data |= ((uint32_t) csid) << 12;
break;
case RDI2:
data &= ~(BIT(21) | BIT(20));
- data |= (csid << 20);
+ data |= ((uint32_t) csid) << 20;
break;
}
@@ -545,9 +545,9 @@ static void msm_ispif_enable_intf_cids(struct ispif_device *ispif,
data = msm_camera_io_r(ispif->base + intf_addr);
if (enable)
- data |= cid_mask;
+ data |= (uint32_t) cid_mask;
else
- data &= ~cid_mask;
+ data &= ~((uint32_t) cid_mask);
msm_camera_io_w_mb(data, ispif->base + intf_addr);
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
index 1af1e3fe0d7c..b1c23823c122 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.c
@@ -841,13 +841,12 @@ static int32_t msm_cci_i2c_read(struct v4l2_subdev *sd,
goto ERROR;
}
- if (read_cfg->addr_type == MSM_CAMERA_I2C_BYTE_ADDR)
- val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4) |
- ((read_cfg->addr & 0xFF) << 8);
- if (read_cfg->addr_type == MSM_CAMERA_I2C_WORD_ADDR)
- val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4) |
- (((read_cfg->addr & 0xFF00) >> 8) << 8) |
- ((read_cfg->addr & 0xFF) << 16);
+ val = CCI_I2C_WRITE_DISABLE_P_CMD | (read_cfg->addr_type << 4);
+ for (i = 0; i < read_cfg->addr_type; i++) {
+ val |= ((read_cfg->addr >> (i << 3)) & 0xFF) <<
+ ((read_cfg->addr_type - i) << 3);
+ }
+
rc = msm_cci_write_i2c_queue(cci_dev, val, master, queue);
if (rc < 0) {
CDBG("%s failed line %d\n", __func__, __LINE__);
diff --git a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
index 2cc03aedd22e..6e39d814bd73 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/cci/msm_cci.h
@@ -90,7 +90,7 @@ struct msm_camera_cci_gpio_cfg {
};
struct msm_camera_cci_i2c_read_cfg {
- uint16_t addr;
+ uint32_t addr;
enum msm_camera_i2c_reg_addr_type addr_type;
uint8_t *data;
uint16_t num_byte;
diff --git a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
index 4a31b93ec37f..8f911d362477 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/io/msm_camera_cci_i2c.c
@@ -27,7 +27,8 @@ int32_t msm_camera_cci_i2c_read(struct msm_camera_i2c_client *client,
struct msm_camera_cci_ctrl cci_ctrl;
if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
- && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_3B_ADDR)
|| (data_type != MSM_CAMERA_I2C_BYTE_DATA
&& data_type != MSM_CAMERA_I2C_WORD_DATA))
return rc;
@@ -63,7 +64,8 @@ int32_t msm_camera_cci_i2c_read_seq(struct msm_camera_i2c_client *client,
struct msm_camera_cci_ctrl cci_ctrl;
if ((client->addr_type != MSM_CAMERA_I2C_BYTE_ADDR
- && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR)
+ && client->addr_type != MSM_CAMERA_I2C_WORD_ADDR
+ && client->addr_type != MSM_CAMERA_I2C_3B_ADDR)
|| num_byte == 0)
return rc;
diff --git a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
index 521bdafab76f..9f05ff6b4d9c 100644
--- a/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
+++ b/drivers/platform/msm/ipa/ipa_clients/odu_bridge.c
@@ -792,6 +792,7 @@ static void odu_debugfs_init(void)
goto fail;
}
+ return;
fail:
debugfs_remove_recursive(dent);
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 858693c1fc44..535ed4d767b4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -2634,6 +2634,15 @@ static int ipa3_setup_apps_pipes(void)
struct ipa_sys_connect_params sys_in;
int result = 0;
+ if (ipa3_ctx->gsi_ch20_wa) {
+ IPADBG("Allocating GSI physical channel 20\n");
+ result = ipa_gsi_ch20_wa();
+ if (result) {
+ IPAERR("ipa_gsi_ch20_wa failed %d\n", result);
+ goto fail_cmd;
+ }
+ }
+
/* CMD OUT (AP->IPA) */
memset(&sys_in, 0, sizeof(struct ipa_sys_connect_params));
sys_in.client = IPA_CLIENT_APPS_CMD_PROD;
@@ -3984,6 +3993,7 @@ static int ipa3_pre_init(const struct ipa3_plat_drv_res *resource_p,
ipa3_ctx->transport_prototype = resource_p->transport_prototype;
ipa3_ctx->ee = resource_p->ee;
ipa3_ctx->apply_rg10_wa = resource_p->apply_rg10_wa;
+ ipa3_ctx->gsi_ch20_wa = resource_p->gsi_ch20_wa;
ipa3_ctx->ipa3_active_clients_logging.log_rdy = false;
/* default aggregation parameters */
@@ -4482,6 +4492,7 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_drv_res->ipa_wdi2 = false;
ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
ipa_drv_res->apply_rg10_wa = false;
+ ipa_drv_res->gsi_ch20_wa = false;
smmu_disable_htw = of_property_read_bool(pdev->dev.of_node,
"qcom,smmu-disable-htw");
@@ -4667,6 +4678,13 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_drv_res->apply_rg10_wa
? "True" : "False");
+ ipa_drv_res->gsi_ch20_wa =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,do-not-use-ch-gsi-20");
+ IPADBG(": GSI CH 20 WA is = %s\n",
+ ipa_drv_res->apply_rg10_wa
+ ? "Needed" : "Not needed");
+
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 0ca7b662aa17..8d0fe0c9d205 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -921,6 +921,7 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
nbytes = scnprintf(dbg_buff, IPA_MAX_MSG_LEN,
"sw_tx=%u\n"
"hw_tx=%u\n"
+ "tx_non_linear=%u\n"
"tx_compl=%u\n"
"wan_rx=%u\n"
"stat_compl=%u\n"
@@ -936,6 +937,7 @@ static ssize_t ipa3_read_stats(struct file *file, char __user *ubuf,
"flow_disable=%u\n",
ipa3_ctx->stats.tx_sw_pkts,
ipa3_ctx->stats.tx_hw_pkts,
+ ipa3_ctx->stats.tx_non_linear,
ipa3_ctx->stats.tx_pkts_compl,
ipa3_ctx->stats.rx_pkts,
ipa3_ctx->stats.stat_compl,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 3b5ce662e3a4..e88dc349be7b 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -55,6 +55,10 @@
#define IPA_GSI_MAX_CH_LOW_WEIGHT 15
#define IPA_GSI_EVT_RING_INT_MODT 3200 /* 0.1s under 32KHz clock */
+#define IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC 10
+/* The below virtual channel cannot be used by any entity */
+#define IPA_GSI_CH_20_WA_VIRT_CHAN 29
+
static struct sk_buff *ipa3_get_skb_ipa_rx(unsigned int len, gfp_t flags);
static void ipa3_replenish_wlan_rx_cache(struct ipa3_sys_context *sys);
static void ipa3_replenish_rx_cache(struct ipa3_sys_context *sys);
@@ -109,11 +113,19 @@ static void ipa3_wq_write_done_common(struct ipa3_sys_context *sys,
list_del(&tx_pkt->link);
sys->len--;
spin_unlock_bh(&sys->spinlock);
- if (!tx_pkt->no_unmap_dma)
- dma_unmap_single(ipa3_ctx->pdev,
+ if (!tx_pkt->no_unmap_dma) {
+ if (tx_pkt->type != IPA_DATA_DESC_SKB_PAGED) {
+ dma_unmap_single(ipa3_ctx->pdev,
tx_pkt->mem.phys_base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(ipa3_ctx->pdev,
+ next_pkt->mem.phys_base,
+ next_pkt->mem.size,
+ DMA_TO_DEVICE);
+ }
+ }
if (tx_pkt->callback)
tx_pkt->callback(tx_pkt->user1, tx_pkt->user2);
@@ -543,25 +555,48 @@ int ipa3_send(struct ipa3_sys_context *sys,
}
tx_pkt->type = desc[i].type;
- tx_pkt->mem.base = desc[i].pyld;
- tx_pkt->mem.size = desc[i].len;
- if (!desc[i].dma_address_valid) {
- tx_pkt->mem.phys_base =
- dma_map_single(ipa3_ctx->pdev,
- tx_pkt->mem.base,
- tx_pkt->mem.size,
- DMA_TO_DEVICE);
- if (!tx_pkt->mem.phys_base) {
- IPAERR("failed to do dma map.\n");
- fail_dma_wrap = 1;
- goto failure;
+ if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
+ tx_pkt->mem.base = desc[i].pyld;
+ tx_pkt->mem.size = desc[i].len;
+
+ if (!desc[i].dma_address_valid) {
+ tx_pkt->mem.phys_base =
+ dma_map_single(ipa3_ctx->pdev,
+ tx_pkt->mem.base,
+ tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ if (!tx_pkt->mem.phys_base) {
+ IPAERR("failed to do dma map.\n");
+ fail_dma_wrap = 1;
+ goto failure;
+ }
+ } else {
+ tx_pkt->mem.phys_base =
+ desc[i].dma_address;
+ tx_pkt->no_unmap_dma = true;
}
} else {
- tx_pkt->mem.phys_base = desc[i].dma_address;
- tx_pkt->no_unmap_dma = true;
+ tx_pkt->mem.base = desc[i].frag;
+ tx_pkt->mem.size = desc[i].len;
+
+ if (!desc[i].dma_address_valid) {
+ tx_pkt->mem.phys_base =
+ skb_frag_dma_map(ipa3_ctx->pdev,
+ desc[i].frag,
+ 0, tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ if (!tx_pkt->mem.phys_base) {
+ IPAERR("dma map failed\n");
+ fail_dma_wrap = 1;
+ goto failure;
+ }
+ } else {
+ tx_pkt->mem.phys_base =
+ desc[i].dma_address;
+ tx_pkt->no_unmap_dma = true;
+ }
}
-
tx_pkt->sys = sys;
tx_pkt->callback = desc[i].callback;
tx_pkt->user1 = desc[i].user1;
@@ -660,9 +695,15 @@ failure:
for (j = 0; j < i; j++) {
next_pkt = list_next_entry(tx_pkt, link);
list_del(&tx_pkt->link);
- dma_unmap_single(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
+ if (desc[i].type != IPA_DATA_DESC_SKB_PAGED) {
+ dma_unmap_single(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
+ tx_pkt->mem.size,
+ DMA_TO_DEVICE);
+ } else {
+ dma_unmap_page(ipa3_ctx->pdev, tx_pkt->mem.phys_base,
tx_pkt->mem.size,
DMA_TO_DEVICE);
+ }
kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
tx_pkt = next_pkt;
}
@@ -671,9 +712,9 @@ failure:
if (fail_dma_wrap)
kmem_cache_free(ipa3_ctx->tx_pkt_wrapper_cache, tx_pkt);
- if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI)
+ if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
kfree(gsi_xfer_elem_array);
- else {
+ } else {
if (transfer.iovec_phys) {
if (num_desc == IPA_NUM_DESC_PER_SW_TX) {
dma_pool_free(ipa3_ctx->dma_pool,
@@ -1530,20 +1571,42 @@ static void ipa3_tx_cmd_comp(void *user1, int user2)
int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
struct ipa_tx_meta *meta)
{
- struct ipa3_desc desc[3];
+ struct ipa3_desc *desc;
+ struct ipa3_desc _desc[3];
int dst_ep_idx;
struct ipahal_imm_cmd_ip_packet_init cmd;
struct ipahal_imm_cmd_pyld *cmd_pyld = NULL;
struct ipa3_sys_context *sys;
int src_ep_idx;
+ int num_frags, f;
- memset(desc, 0, 3 * sizeof(struct ipa3_desc));
+ if (unlikely(!ipa3_ctx)) {
+ IPAERR("IPA3 driver was not initialized\n");
+ return -EINVAL;
+ }
if (skb->len == 0) {
IPAERR("packet size is 0\n");
return -EINVAL;
}
+ num_frags = skb_shinfo(skb)->nr_frags;
+ if (num_frags) {
+ /* 1 desc for tag to resolve status out-of-order issue;
+ * 1 desc is needed for the linear portion of skb;
+ * 1 desc may be needed for the PACKET_INIT;
+ * 1 desc for each frag
+ */
+ desc = kzalloc(sizeof(*desc) * (num_frags + 3), GFP_ATOMIC);
+ if (!desc) {
+ IPAERR("failed to alloc desc array\n");
+ goto fail_mem;
+ }
+ } else {
+ memset(_desc, 0, 3 * sizeof(struct ipa3_desc));
+ desc = &_desc[0];
+ }
+
/*
* USB_CONS: PKT_INIT ep_idx = dst pipe
* Q6_CONS: PKT_INIT ep_idx = sender pipe
@@ -1558,14 +1621,14 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
if (-1 == src_ep_idx) {
IPAERR("Client %u is not mapped\n",
IPA_CLIENT_APPS_LAN_WAN_PROD);
- return -EFAULT;
+ goto fail_gen;
}
dst_ep_idx = ipa3_get_ep_mapping(dst);
} else {
src_ep_idx = ipa3_get_ep_mapping(dst);
if (-1 == src_ep_idx) {
IPAERR("Client %u is not mapped\n", dst);
- return -EFAULT;
+ goto fail_gen;
}
if (meta && meta->pkt_init_dst_ep_valid)
dst_ep_idx = meta->pkt_init_dst_ep;
@@ -1603,7 +1666,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
desc[1].callback = ipa3_tx_cmd_comp;
desc[1].user1 = cmd_pyld;
desc[2].pyld = skb->data;
- desc[2].len = skb->len;
+ desc[2].len = skb_headlen(skb);
desc[2].type = IPA_DATA_DESC_SKB;
desc[2].callback = ipa3_tx_comp_usr_notify_release;
desc[2].user1 = skb;
@@ -1616,8 +1679,22 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
desc[2].dma_address = meta->dma_address;
}
- if (ipa3_send(sys, 3, desc, true)) {
- IPAERR("fail to send immediate command\n");
+ for (f = 0; f < num_frags; f++) {
+ desc[3+f].frag = &skb_shinfo(skb)->frags[f];
+ desc[3+f].type = IPA_DATA_DESC_SKB_PAGED;
+ desc[3+f].len = skb_frag_size(desc[3+f].frag);
+ }
+ /* don't free skb till frag mappings are released */
+ if (num_frags) {
+ desc[3+f-1].callback = desc[2].callback;
+ desc[3+f-1].user1 = desc[2].user1;
+ desc[3+f-1].user2 = desc[2].user2;
+ desc[2].callback = NULL;
+ }
+
+ if (ipa3_send(sys, num_frags + 3, desc, true)) {
+ IPAERR("fail to send skb %p num_frags %u SWP\n",
+ skb, num_frags);
goto fail_send;
}
IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_sw_pkts);
@@ -1629,7 +1706,7 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
desc[0].type = IPA_IMM_CMD_DESC;
desc[0].callback = ipa3_tag_destroy_imm;
desc[1].pyld = skb->data;
- desc[1].len = skb->len;
+ desc[1].len = skb_headlen(skb);
desc[1].type = IPA_DATA_DESC_SKB;
desc[1].callback = ipa3_tx_comp_usr_notify_release;
desc[1].user1 = skb;
@@ -1639,19 +1716,44 @@ int ipa3_tx_dp(enum ipa_client_type dst, struct sk_buff *skb,
desc[1].dma_address_valid = true;
desc[1].dma_address = meta->dma_address;
}
-
- if (ipa3_send(sys, 2, desc, true)) {
- IPAERR("fail to send skb\n");
- goto fail_gen;
+ if (num_frags == 0) {
+ if (ipa3_send(sys, 2, desc, true)) {
+ IPAERR("fail to send skb %p HWP\n", skb);
+ goto fail_gen;
+ }
+ } else {
+ for (f = 0; f < num_frags; f++) {
+ desc[2+f].frag = &skb_shinfo(skb)->frags[f];
+ desc[2+f].type = IPA_DATA_DESC_SKB_PAGED;
+ desc[2+f].len = skb_frag_size(desc[2+f].frag);
+ }
+ /* don't free skb till frag mappings are released */
+ desc[2+f-1].callback = desc[1].callback;
+ desc[2+f-1].user1 = desc[1].user1;
+ desc[2+f-1].user2 = desc[1].user2;
+ desc[1].callback = NULL;
+
+ if (ipa3_send(sys, num_frags + 2, desc, true)) {
+ IPAERR("fail to send skb %p num_frags %u HWP\n",
+ skb, num_frags);
+ goto fail_gen;
+ }
}
IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_hw_pkts);
}
+ if (num_frags) {
+ kfree(desc);
+ IPA_STATS_INC_CNT(ipa3_ctx->stats.tx_non_linear);
+ }
return 0;
fail_send:
ipahal_destroy_imm_cmd(cmd_pyld);
fail_gen:
+ if (num_frags)
+ kfree(desc);
+fail_mem:
return -EFAULT;
}
@@ -3597,7 +3699,6 @@ static void ipa_dma_gsi_irq_rx_notify_cb(struct gsi_chan_xfer_notify *notify)
}
}
-
static int ipa_gsi_setup_channel(struct ipa_sys_connect_params *in,
struct ipa3_ep_context *ep)
{
@@ -3891,3 +3992,75 @@ static uint64_t pointer_to_tag_wa(struct ipa3_tx_pkt_wrapper *tx_pkt)
}
return (unsigned long)tx_pkt & 0x0000FFFFFFFFFFFF;
}
+
+/**
+ * ipa_gsi_ch20_wa() - software workaround for IPA GSI channel 20
+ *
+ * A hardware limitation requires to avoid using GSI physical channel 20.
+ * This function allocates GSI physical channel 20 and holds it to prevent
+ * others to use it.
+ *
+ * Return codes: 0 on success, negative on failure
+ */
+int ipa_gsi_ch20_wa(void)
+{
+ struct gsi_chan_props gsi_channel_props;
+ dma_addr_t dma_addr;
+ int result;
+ int i;
+ unsigned long chan_hdl[IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC];
+ unsigned long chan_hdl_to_keep;
+
+
+ memset(&gsi_channel_props, 0, sizeof(gsi_channel_props));
+ gsi_channel_props.prot = GSI_CHAN_PROT_GPI;
+ gsi_channel_props.dir = GSI_CHAN_DIR_TO_GSI;
+ gsi_channel_props.evt_ring_hdl = ~0;
+ gsi_channel_props.re_size = GSI_CHAN_RE_SIZE_16B;
+ gsi_channel_props.ring_len = 4 * gsi_channel_props.re_size;
+ gsi_channel_props.ring_base_vaddr =
+ dma_alloc_coherent(ipa3_ctx->pdev, gsi_channel_props.ring_len,
+ &dma_addr, 0);
+ gsi_channel_props.ring_base_addr = dma_addr;
+ gsi_channel_props.use_db_eng = GSI_CHAN_DB_MODE;
+ gsi_channel_props.max_prefetch = GSI_ONE_PREFETCH_SEG;
+ gsi_channel_props.low_weight = 1;
+ gsi_channel_props.err_cb = ipa_gsi_chan_err_cb;
+ gsi_channel_props.xfer_cb = ipa_gsi_irq_tx_notify_cb;
+
+ /* first allocate channels up to channel 20 */
+ for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
+ gsi_channel_props.ch_id = i;
+ result = gsi_alloc_channel(&gsi_channel_props,
+ ipa3_ctx->gsi_dev_hdl,
+ &chan_hdl[i]);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("failed to alloc channel %d err %d\n",
+ i, result);
+ return result;
+ }
+ }
+
+ /* allocate channel 20 */
+ gsi_channel_props.ch_id = IPA_GSI_CH_20_WA_VIRT_CHAN;
+ result = gsi_alloc_channel(&gsi_channel_props, ipa3_ctx->gsi_dev_hdl,
+ &chan_hdl_to_keep);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("failed to alloc channel %d err %d\n",
+ i, result);
+ return result;
+ }
+
+ /* release all other channels */
+ for (i = 0; i < IPA_GSI_CH_20_WA_NUM_CH_TO_ALLOC; i++) {
+ result = gsi_dealloc_channel(chan_hdl[i]);
+ if (result != GSI_STATUS_SUCCESS) {
+ IPAERR("failed to dealloc channel %d err %d\n",
+ i, result);
+ return result;
+ }
+ }
+
+ /* DMA memory shall not be freed as it is used by channel 20 */
+ return 0;
+}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 2331adb8d7e1..47dfeade9328 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -703,7 +703,8 @@ struct ipa3_sys_context {
enum ipa3_desc_type {
IPA_DATA_DESC,
IPA_DATA_DESC_SKB,
- IPA_IMM_CMD_DESC
+ IPA_DATA_DESC_SKB_PAGED,
+ IPA_IMM_CMD_DESC,
};
/**
@@ -767,6 +768,7 @@ struct ipa3_dma_xfer_wrapper {
* struct ipa3_desc - IPA descriptor
* @type: skb or immediate command or plain old data
* @pyld: points to skb
+ * @frag: points to paged fragment
* or kmalloc'ed immediate command parameters/plain old data
* @dma_address: dma mapped address of pyld
* @dma_address_valid: valid field for dma_address
@@ -780,6 +782,7 @@ struct ipa3_dma_xfer_wrapper {
struct ipa3_desc {
enum ipa3_desc_type type;
void *pyld;
+ skb_frag_t *frag;
dma_addr_t dma_address;
bool dma_address_valid;
u16 len;
@@ -889,6 +892,7 @@ struct ipa3_stats {
u32 lan_repl_rx_empty;
u32 flow_enable;
u32 flow_disable;
+ u32 tx_non_linear;
};
struct ipa3_active_clients {
@@ -1348,6 +1352,7 @@ struct ipa3_ready_cb_info {
* @ipa_num_pipes: The number of pipes used by IPA HW
* @skip_uc_pipe_reset: Indicates whether pipe reset via uC needs to be avoided
* @apply_rg10_wa: Indicates whether to use register group 10 workaround
+ * @gsi_ch20_wa: Indicates whether to apply GSI physical channel 20 workaround
* @w_lock: Indicates the wakeup source.
* @wakelock_ref_cnt: Indicates the number of times wakelock is acquired
* @ipa_initialization_complete: Indicates that IPA is fully initialized
@@ -1460,6 +1465,7 @@ struct ipa3_context {
unsigned long gsi_dev_hdl;
u32 ee;
bool apply_rg10_wa;
+ bool gsi_ch20_wa;
bool smmu_present;
bool smmu_s1_bypass;
unsigned long peer_bam_iova;
@@ -1513,6 +1519,7 @@ struct ipa3_plat_drv_res {
bool skip_uc_pipe_reset;
enum ipa_transport_type transport_prototype;
bool apply_rg10_wa;
+ bool gsi_ch20_wa;
bool tethered_flow_control;
};
@@ -2181,4 +2188,5 @@ void ipa3_dec_release_wakelock(void);
int ipa3_load_fws(const struct firmware *firmware);
int ipa3_register_ipa_ready_cb(void (*ipa_ready_cb)(void *), void *user_data);
const char *ipa_hw_error_str(enum ipa3_hw_errors err_type);
+int ipa_gsi_ch20_wa(void);
#endif /* _IPA3_I_H_ */
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index 7d5b03b654cf..ab4ab0c31e08 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -209,6 +209,14 @@ static int smb2_usb_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CURRENT_MAX:
rc = smblib_set_prop_usb_current_max(chg, val);
break;
+ case POWER_SUPPLY_PROP_TYPE:
+ if (chg->pd_active && val->intval == POWER_SUPPLY_TYPE_USB_PD) {
+ chg->usb_psy_desc.type = val->intval;
+ } else {
+ pr_err("set type %d not allowed\n", val->intval);
+ rc = -EINVAL;
+ }
+ break;
case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
rc = smblib_set_prop_typec_power_role(chg, val);
break;
diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c
index 1e66507f013b..ebbc8e15f2b8 100644
--- a/drivers/power/qcom-charger/smb-lib.c
+++ b/drivers/power/qcom-charger/smb-lib.c
@@ -256,10 +256,9 @@ static int smblib_update_usb_type(struct smb_charger *chg)
int rc = 0;
const struct apsd_result *apsd_result;
- if (chg->pd_active) {
- chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_USB_PD;
+ /* if PD is active, APSD is disabled so won't have a valid result */
+ if (chg->pd_active)
return rc;
- }
apsd_result = smblib_get_apsd_result(chg);
chg->usb_psy_desc.type = apsd_result->pst;
@@ -271,12 +270,7 @@ static int smblib_detach_usb(struct smb_charger *chg)
int rc;
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
-
- rc = smblib_update_usb_type(chg);
- if (rc < 0) {
- dev_err(chg->dev, "Couldn't update usb type rc=%d\n", rc);
- return rc;
- }
+ chg->usb_psy_desc.type = POWER_SUPPLY_TYPE_UNKNOWN;
/* reconfigure allowed voltage for HVDCP */
rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG,
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index 931227b476ac..9f5c67bc2d6f 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -1204,6 +1204,14 @@ static void cprh_controller_program_sdelta(
corner_band = &vreg->corner_band[i];
sdelta = corner_band->sdelta;
+ if (!sdelta->allow_core_count_adj && !sdelta->allow_temp_adj) {
+ /*
+ * Per-online-core and per-temperature margin
+ * adjustments are disabled for this corner band.
+ */
+ continue;
+ }
+
if (vreg->allow_core_count_adj)
cpr3_write_temp_core_margin(ctrl,
CPRH_MARGIN_TEMP_CORE_VBAND(0, i),
@@ -1297,6 +1305,16 @@ static int cpr3_regulator_init_cprh(struct cpr3_controller *ctrl)
(ctrl->up_error_step_limit
<< CPR4_SAW_ERROR_STEP_LIMIT_UP_SHIFT));
+ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+ CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_STEP_QUOT_MASK,
+ ctrl->step_quot_fixed
+ << CPR4_MARGIN_ADJ_CTL_KV_MARGIN_ADJ_STEP_QUOT_SHIFT);
+
+ cpr3_masked_write(ctrl, CPR4_REG_MARGIN_ADJ_CTL,
+ CPR4_MARGIN_ADJ_CTL_PER_RO_KV_MARGIN_EN,
+ (ctrl->use_dynamic_step_quot
+ ? CPR4_MARGIN_ADJ_CTL_PER_RO_KV_MARGIN_EN : 0));
+
if (ctrl->voltage_settling_time) {
/*
* Configure the settling timer used to account for
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index 04a8206325c8..ffd3db1a6dff 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -1298,6 +1298,14 @@ static int cprh_kbss_init_controller(struct cpr3_controller *ctrl)
ctrl->saw_use_unit_mV = of_property_read_bool(ctrl->dev->of_node,
"qcom,cpr-saw-use-unit-mV");
+ /*
+ * Use fixed step quotient if specified otherwise use dynamically
+ * calculated per RO step quotient
+ */
+ of_property_read_u32(ctrl->dev->of_node, "qcom,cpr-step-quot-fixed",
+ &ctrl->step_quot_fixed);
+ ctrl->use_dynamic_step_quot = !ctrl->step_quot_fixed;
+
of_property_read_u32(ctrl->dev->of_node,
"qcom,cpr-voltage-settling-time",
&ctrl->voltage_settling_time);
diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c
index 5dd772a1abed..55ba6cfc25c2 100644
--- a/drivers/regulator/qpnp-labibb-regulator.c
+++ b/drivers/regulator/qpnp-labibb-regulator.c
@@ -899,9 +899,8 @@ static int qpnp_lab_dt_init(struct qpnp_labibb *labibb,
val);
if (rc) {
- pr_err("qpnp_lab_regulator_set_voltage write register %x failed rc = %d\n",
- REG_LAB_VOLTAGE, rc);
-
+ pr_err("write to register %x failed rc = %d\n", REG_LAB_VOLTAGE,
+ rc);
return rc;
}
@@ -1519,8 +1518,9 @@ static int qpnp_lab_regulator_set_voltage(struct regulator_dev *rdev,
return 0;
if (min_uV < labibb->lab_vreg.min_volt) {
- pr_err("qpnp_lab_regulator_set_voltage failed, min_uV %d is less than min_volt %d",
- min_uV, labibb->lab_vreg.min_volt);
+ pr_err("min_uV %d is less than min_volt %d", min_uV,
+ labibb->lab_vreg.min_volt);
+ return -EINVAL;
}
val = DIV_ROUND_UP(min_uV - labibb->lab_vreg.min_volt,
@@ -1528,7 +1528,7 @@ static int qpnp_lab_regulator_set_voltage(struct regulator_dev *rdev,
new_uV = val * labibb->lab_vreg.step_size + labibb->lab_vreg.min_volt;
if (new_uV > max_uV) {
- pr_err("qpnp_lab_regulator_set_voltage unable to set voltage (%d %d)\n",
+ pr_err("unable to set voltage %d (min:%d max:%d)\n", new_uV,
min_uV, max_uV);
return -EINVAL;
}
@@ -1540,14 +1540,16 @@ static int qpnp_lab_regulator_set_voltage(struct regulator_dev *rdev,
val | LAB_VOLTAGE_OVERRIDE_EN);
if (rc) {
- pr_err("qpnp_lab_regulator_set_voltage write register %x failed rc = %d\n",
- REG_LAB_VOLTAGE, rc);
-
+ pr_err("write to register %x failed rc = %d\n", REG_LAB_VOLTAGE,
+ rc);
return rc;
}
- if (new_uV > labibb->lab_vreg.curr_volt)
+ if (new_uV > labibb->lab_vreg.curr_volt) {
+ val = DIV_ROUND_UP(new_uV - labibb->lab_vreg.curr_volt,
+ labibb->lab_vreg.step_size);
udelay(val * labibb->lab_vreg.slew_rate);
+ }
labibb->lab_vreg.curr_volt = new_uV;
return 0;
@@ -2299,8 +2301,8 @@ static int qpnp_ibb_regulator_set_voltage(struct regulator_dev *rdev,
return 0;
if (min_uV < labibb->ibb_vreg.min_volt) {
- pr_err("qpnp_ibb_regulator_set_voltage failed, min_uV %d is less than min_volt %d",
- min_uV, labibb->ibb_vreg.min_volt);
+ pr_err("min_uV %d is less than min_volt %d", min_uV,
+ labibb->ibb_vreg.min_volt);
return -EINVAL;
}
@@ -2309,7 +2311,7 @@ static int qpnp_ibb_regulator_set_voltage(struct regulator_dev *rdev,
new_uV = val * labibb->ibb_vreg.step_size + labibb->ibb_vreg.min_volt;
if (new_uV > max_uV) {
- pr_err("qpnp_ibb_regulator_set_voltage unable to set voltage (%d %d)\n",
+ pr_err("unable to set voltage %d (min:%d max:%d)\n", new_uV,
min_uV, max_uV);
return -EINVAL;
}
@@ -2321,14 +2323,16 @@ static int qpnp_ibb_regulator_set_voltage(struct regulator_dev *rdev,
val | IBB_VOLTAGE_OVERRIDE_EN);
if (rc) {
- pr_err("qpnp_ibb_regulator_set_voltage write register %x failed rc = %d\n",
- REG_IBB_VOLTAGE, rc);
-
+ pr_err("write to register %x failed rc = %d\n", REG_IBB_VOLTAGE,
+ rc);
return rc;
}
- if (new_uV > labibb->ibb_vreg.curr_volt)
+ if (new_uV > labibb->ibb_vreg.curr_volt) {
+ val = DIV_ROUND_UP(new_uV - labibb->ibb_vreg.curr_volt,
+ labibb->ibb_vreg.step_size);
udelay(val * labibb->ibb_vreg.slew_rate);
+ }
labibb->ibb_vreg.curr_volt = new_uV;
return 0;
diff --git a/drivers/soc/qcom/gladiator_erp_v2.c b/drivers/soc/qcom/gladiator_erp_v2.c
index 09b0ae4f27ae..20bb97f1fb16 100644
--- a/drivers/soc/qcom/gladiator_erp_v2.c
+++ b/drivers/soc/qcom/gladiator_erp_v2.c
@@ -40,6 +40,7 @@
#define GLADIATOR_ERRLOG7 0x1038
#define GLADIATOR_ERRLOG8 0x103C
#define OBSERVER_0_ID_COREID 0x8000
+#define OBSERVER_0_ID_REVISIONID 0x8004
#define OBSERVER_0_FAULTEN 0x8008
#define OBSERVER_0_ERRVLD 0x800C
#define OBSERVER_0_ERRCLR 0x8010
@@ -53,7 +54,6 @@
#define OBSERVER_0_ERRLOG7 0x8030
#define OBSERVER_0_ERRLOG8 0x8034
#define OBSERVER_0_STALLEN 0x8038
-#define OBSERVER_0_REVISIONID 0x8004
#define GLD_TRANS_OPCODE_MASK 0xE
#define GLD_TRANS_OPCODE_SHIFT 1
@@ -128,6 +128,11 @@ enum obs_err_code {
};
enum err_log {
+ ID_COREID,
+ ID_REVISIONID,
+ FAULTEN,
+ ERRVLD,
+ ERRCLR,
ERR_LOG0,
ERR_LOG1,
ERR_LOG2,
@@ -138,6 +143,7 @@ enum err_log {
ERR_LOG7,
ERR_LOG8,
STALLEN,
+ MAX_NUM,
};
enum type_logger_error {
@@ -475,6 +481,15 @@ static u32 get_gld_offset(unsigned int err_log)
u32 offset = 0;
switch (err_log) {
+ case FAULTEN:
+ offset = GLADIATOR_FAULTEN;
+ break;
+ case ERRVLD:
+ offset = GLADIATOR_ERRVLD;
+ break;
+ case ERRCLR:
+ offset = GLADIATOR_ERRCLR;
+ break;
case ERR_LOG0:
offset = GLADIATOR_ERRLOG0;
break;
@@ -514,6 +529,21 @@ static u32 get_obs_offset(unsigned int err_log)
u32 offset = 0;
switch (err_log) {
+ case ID_COREID:
+ offset = OBSERVER_0_ID_COREID;
+ break;
+ case ID_REVISIONID:
+ offset = OBSERVER_0_ID_REVISIONID;
+ break;
+ case FAULTEN:
+ offset = OBSERVER_0_FAULTEN;
+ break;
+ case ERRVLD:
+ offset = OBSERVER_0_ERRVLD;
+ break;
+ case ERRCLR:
+ offset = OBSERVER_0_ERRCLR;
+ break;
case ERR_LOG0:
offset = OBSERVER_0_ERRLOG0;
break;
@@ -573,7 +603,7 @@ static void decode_gld_errlog5(struct msm_gladiator_data *msm_gld_data)
static irqreturn_t msm_gladiator_isr(int irq, void *dev_id)
{
u32 err_reg;
- unsigned int err_log;
+ unsigned int err_log, err_buf[MAX_NUM];
struct msm_gladiator_data *msm_gld_data = dev_id;
@@ -591,9 +621,31 @@ static irqreturn_t msm_gladiator_isr(int irq, void *dev_id)
clear_gladiator_error(msm_gld_data->gladiator_virt_base);
return IRQ_HANDLED;
}
- pr_alert("GLADIATOR ERROR DETECTED\n");
+ pr_alert("Gladiator Error Detected:\n");
+ if (gld_err_valid) {
+ for (err_log = FAULTEN; err_log <= ERR_LOG8; err_log++) {
+ err_buf[err_log] = readl_relaxed(
+ msm_gld_data->gladiator_virt_base +
+ get_gld_offset(err_log));
+ }
+ pr_alert("Main log register data:\n%08x %08x %08x %08x\n%08x %08x %08x %08x\n%08x %08x %08x\n",
+ err_buf[0], err_buf[1], err_buf[2], err_buf[3], err_buf[4], err_buf[5], err_buf[6],
+ err_buf[7], err_buf[8], err_buf[9], err_buf[10]);
+ }
+
+ if (obsrv_err_valid) {
+ for (err_log = ID_COREID; err_log <= STALLEN; err_log++) {
+ err_buf[err_log] = readl_relaxed(
+ msm_gld_data->gladiator_virt_base +
+ get_obs_offset(err_log));
+ }
+ pr_alert("Observer log register data:\n%08x %08x %08x %08x\n%08x %08x %08x %08x\n%08x %08x %08x %08x\n%08x\n",
+ err_buf[0], err_buf[1], err_buf[2], err_buf[3], err_buf[4], err_buf[5], err_buf[6], err_buf[7],
+ err_buf[8], err_buf[9], err_buf[10], err_buf[11], err_buf[12]);
+ }
+
if (gld_err_valid) {
- pr_alert("GLADIATOR error log register data:\n");
+ pr_alert("Main error log register data:\n");
for (err_log = ERR_LOG0; err_log <= ERR_LOG8; err_log++) {
/* skip log register 7 as its reserved */
if (err_log == ERR_LOG7)
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 435b43f0c10f..4ecdf74741f5 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -1309,6 +1309,9 @@ int icnss_get_ce_id(int irq)
{
int i;
+ if (!penv || !penv->pdev)
+ return -ENODEV;
+
for (i = 0; i < ICNSS_MAX_IRQ_REGISTRATIONS; i++) {
if (penv->ce_irqs[i] == irq)
return i;
@@ -1318,6 +1321,22 @@ int icnss_get_ce_id(int irq)
}
EXPORT_SYMBOL(icnss_get_ce_id);
+int icnss_get_irq(int ce_id)
+{
+ int irq;
+
+ if (!penv || !penv->pdev)
+ return -ENODEV;
+
+ if (ce_id >= ICNSS_MAX_IRQ_REGISTRATIONS)
+ return -EINVAL;
+
+ irq = penv->ce_irqs[ce_id];
+
+ return irq;
+}
+EXPORT_SYMBOL(icnss_get_irq);
+
static struct clk *icnss_clock_init(struct device *dev, const char *cname)
{
struct clk *c;
diff --git a/drivers/thermal/qpnp-temp-alarm.c b/drivers/thermal/qpnp-temp-alarm.c
index 3b33fbaa5e25..cba1091be60f 100644
--- a/drivers/thermal/qpnp-temp-alarm.c
+++ b/drivers/thermal/qpnp-temp-alarm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -87,7 +87,7 @@ struct qpnp_tm_chip {
struct thermal_zone_device *tz_dev;
const char *tm_name;
enum qpnp_tm_adc_type adc_type;
- unsigned long temperature;
+ int temperature;
enum thermal_device_mode mode;
unsigned int thresh;
unsigned int stage;
@@ -236,7 +236,7 @@ static int qpnp_tm_update_temp_no_adc(struct qpnp_tm_chip *chip)
}
static int qpnp_tz_get_temp_no_adc(struct thermal_zone_device *thermal,
- unsigned long *temperature)
+ int *temperature)
{
struct qpnp_tm_chip *chip = thermal->devdata;
int rc;
@@ -254,7 +254,7 @@ static int qpnp_tz_get_temp_no_adc(struct thermal_zone_device *thermal,
}
static int qpnp_tz_get_temp_qpnp_adc(struct thermal_zone_device *thermal,
- unsigned long *temperature)
+ int *temperature)
{
struct qpnp_tm_chip *chip = thermal->devdata;
int rc;
@@ -332,7 +332,7 @@ static int qpnp_tz_get_trip_type(struct thermal_zone_device *thermal,
}
static int qpnp_tz_get_trip_temp(struct thermal_zone_device *thermal,
- int trip, unsigned long *temperature)
+ int trip, int *temperature)
{
struct qpnp_tm_chip *chip = thermal->devdata;
int thresh_temperature;
@@ -361,7 +361,7 @@ static int qpnp_tz_get_trip_temp(struct thermal_zone_device *thermal,
}
static int qpnp_tz_get_crit_temp(struct thermal_zone_device *thermal,
- unsigned long *temperature)
+ int *temperature)
{
struct qpnp_tm_chip *chip = thermal->devdata;
@@ -420,7 +420,7 @@ static void qpnp_tm_work(struct work_struct *work)
if (chip->stage != chip->prev_stage) {
chip->prev_stage = chip->stage;
- pr_crit("%s: PMIC Temp Alarm - stage=%u, threshold=%u, temperature=%lu mC\n",
+ pr_crit("%s: PMIC Temp Alarm - stage=%u, threshold=%u, temperature=%d mC\n",
chip->tm_name, chip->stage, chip->thresh,
chip->temperature);
diff --git a/drivers/usb/dwc3/dwc3-msm.c b/drivers/usb/dwc3/dwc3-msm.c
index 2b0299c293e2..13ba52ad7b62 100644
--- a/drivers/usb/dwc3/dwc3-msm.c
+++ b/drivers/usb/dwc3/dwc3-msm.c
@@ -122,6 +122,13 @@ enum dwc3_id_state {
DWC3_ID_FLOAT,
};
+/* for type c cable */
+enum plug_orientation {
+ ORIENTATION_NONE,
+ ORIENTATION_CC1,
+ ORIENTATION_CC2,
+};
+
/* Input bits to state machine (mdwc->inputs) */
#define ID 0
@@ -196,6 +203,7 @@ struct dwc3_msm {
atomic_t in_p3;
unsigned int lpm_to_suspend_delay;
bool init;
+ enum plug_orientation typec_orientation;
};
#define USB_HSPHY_3P3_VOL_MIN 3050000 /* uV */
@@ -1998,6 +2006,11 @@ static int dwc3_msm_resume(struct dwc3_msm *mdwc)
/* Resume SS PHY */
if (mdwc->lpm_flags & MDWC3_SS_PHY_SUSPEND) {
+ mdwc->ss_phy->flags &= ~(PHY_LANE_A | PHY_LANE_B);
+ if (mdwc->typec_orientation == ORIENTATION_CC1)
+ mdwc->ss_phy->flags |= PHY_LANE_A;
+ if (mdwc->typec_orientation == ORIENTATION_CC2)
+ mdwc->ss_phy->flags |= PHY_LANE_B;
usb_phy_set_suspend(mdwc->ss_phy, 0);
mdwc->ss_phy->flags &= ~DEVICE_IN_SS_MODE;
mdwc->lpm_flags &= ~MDWC3_SS_PHY_SUSPEND;
@@ -2333,18 +2346,36 @@ static int dwc3_msm_id_notifier(struct notifier_block *nb,
unsigned long event, void *ptr)
{
struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, id_nb);
+ struct extcon_dev *edev = ptr;
enum dwc3_id_state id;
+ int cc_state;
+
+ if (!edev) {
+ dev_err(mdwc->dev, "%s: edev null\n", __func__);
+ goto done;
+ }
id = event ? DWC3_ID_GROUND : DWC3_ID_FLOAT;
dev_dbg(mdwc->dev, "host:%ld (id:%d) event received\n", event, id);
+ cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
+ if (cc_state < 0) {
+ dev_err(mdwc->dev, "%s: failed to get cc state\n", __func__);
+ goto done;
+ }
+
+ mdwc->typec_orientation = cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
+
+ dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
+
if (mdwc->id_state != id) {
mdwc->id_state = id;
dbg_event(0xFF, "id_state", mdwc->id_state);
queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
}
+done:
return NOTIFY_DONE;
}
@@ -2353,18 +2384,35 @@ static int dwc3_msm_vbus_notifier(struct notifier_block *nb,
{
struct dwc3_msm *mdwc = container_of(nb, struct dwc3_msm, vbus_nb);
struct dwc3 *dwc = platform_get_drvdata(mdwc->dwc3);
+ struct extcon_dev *edev = ptr;
+ int cc_state;
+
+ if (!edev) {
+ dev_err(mdwc->dev, "%s: edev null\n", __func__);
+ goto done;
+ }
dev_dbg(mdwc->dev, "vbus:%ld event received\n", event);
if (mdwc->vbus_active == event)
return NOTIFY_DONE;
+ cc_state = extcon_get_cable_state_(edev, EXTCON_USB_CC);
+ if (cc_state < 0) {
+ dev_err(mdwc->dev, "%s: failed to get cc state\n", __func__);
+ goto done;
+ }
+
+ mdwc->typec_orientation = cc_state ? ORIENTATION_CC2 : ORIENTATION_CC1;
+
+ dbg_event(0xFF, "cc_state", mdwc->typec_orientation);
+
mdwc->vbus_active = event;
if (dwc->is_drd && !mdwc->in_restart) {
dbg_event(0xFF, "Q RW (vbus)", mdwc->vbus_active);
queue_work(mdwc->dwc3_wq, &mdwc->resume_work);
}
-
+done:
return NOTIFY_DONE;
}
@@ -2767,10 +2815,10 @@ static int dwc3_msm_probe(struct platform_device *pdev)
/* Update initial VBUS/ID state from extcon */
if (mdwc->extcon_vbus && extcon_get_cable_state_(mdwc->extcon_vbus,
EXTCON_USB))
- dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, NULL);
+ dwc3_msm_vbus_notifier(&mdwc->vbus_nb, true, mdwc->extcon_vbus);
if (mdwc->extcon_id && extcon_get_cable_state_(mdwc->extcon_id,
EXTCON_USB_HOST))
- dwc3_msm_id_notifier(&mdwc->id_nb, true, NULL);
+ dwc3_msm_id_notifier(&mdwc->id_nb, true, mdwc->extcon_id);
device_create_file(&pdev->dev, &dev_attr_mode);
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 05468398d34b..de0d06e3b60d 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -118,6 +118,12 @@ enum usbpd_data_msg_type {
MSG_VDM = 0xF,
};
+enum plug_orientation {
+ ORIENTATION_NONE,
+ ORIENTATION_CC1,
+ ORIENTATION_CC2,
+};
+
/* Timeouts (in ms) */
#define ERROR_RECOVERY_TIME 25
#define SENDER_RESPONSE_TIME 30
@@ -239,10 +245,41 @@ static LIST_HEAD(_usbpd); /* useful for debugging */
static const unsigned int usbpd_extcon_cable[] = {
EXTCON_USB,
EXTCON_USB_HOST,
+ EXTCON_USB_CC,
EXTCON_NONE,
};
-static const u32 usbpd_extcon_exclusive[] = {0xffffffff, 0};
+/* EXTCON_USB and EXTCON_USB_HOST are mutually exclusive */
+static const u32 usbpd_extcon_exclusive[] = {0x3, 0};
+
+static enum plug_orientation usbpd_get_plug_orientation(struct usbpd *pd)
+{
+ int ret;
+ union power_supply_propval val;
+
+ ret = power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_TYPEC_CC_ORIENTATION, &val);
+ if (ret)
+ return ORIENTATION_NONE;
+
+ return val.intval;
+}
+
+static bool is_cable_flipped(struct usbpd *pd)
+{
+ enum plug_orientation cc;
+
+ cc = usbpd_get_plug_orientation(pd);
+ if (cc == ORIENTATION_CC2)
+ return true;
+
+ /*
+ * ORIENTATION_CC1 or ORIENTATION_NONE.
+ * Return value for ORIENTATION_NONE is
+ * "dont care" as disconnect handles it.
+ */
+ return false;
+}
static int set_power_role(struct usbpd *pd, enum power_role pr)
{
@@ -479,6 +516,10 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
pd->pd_phy_opened = true;
}
+ val.intval = 1;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+
pd->in_pr_swap = false;
pd->current_state = PE_SRC_SEND_CAPABILITIES;
dev_dbg(&pd->dev, "Enter %s\n",
@@ -489,16 +530,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
queue_delayed_work(pd->wq, &pd->sm_work, 0);
break;
- /* reset counters */
- pd->hard_reset_count = 0;
- pd->caps_count = 0;
- pd->pd_connected = true; /* we know peer is PD capable */
-
- /* wait for REQUEST */
- queue_delayed_work(pd->wq, &pd->sm_work,
- msecs_to_jiffies(SENDER_RESPONSE_TIME * 3));
- break;
-
case PE_SRC_NEGOTIATE_CAPABILITY:
if (PD_RDO_OBJ_POS(pd->rdo) != 1) {
/* send Reject */
@@ -612,6 +643,9 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
if (pd->psy_type == POWER_SUPPLY_TYPE_USB ||
pd->psy_type == POWER_SUPPLY_TYPE_USB_CDP)
extcon_set_cable_state_(pd->extcon,
+ EXTCON_USB_CC,
+ is_cable_flipped(pd));
+ extcon_set_cable_state_(pd->extcon,
EXTCON_USB, 1);
}
@@ -705,6 +739,8 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
extcon_set_cable_state_(pd->extcon, EXTCON_USB_HOST, 0);
pd->current_dr = DR_UFP;
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB_CC,
+ is_cable_flipped(pd));
extcon_set_cable_state_(pd->extcon, EXTCON_USB, 1);
pd_phy_update_roles(pd->current_dr, pd->current_pr);
}
@@ -739,10 +775,14 @@ static void dr_swap(struct usbpd *pd)
{
if (pd->current_dr == DR_DFP) {
extcon_set_cable_state_(pd->extcon, EXTCON_USB_HOST, 0);
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB_CC,
+ is_cable_flipped(pd));
extcon_set_cable_state_(pd->extcon, EXTCON_USB, 1);
pd->current_dr = DR_UFP;
} else if (pd->current_dr == DR_UFP) {
extcon_set_cable_state_(pd->extcon, EXTCON_USB, 0);
+ extcon_set_cable_state_(pd->extcon, EXTCON_USB_CC,
+ is_cable_flipped(pd));
extcon_set_cable_state_(pd->extcon, EXTCON_USB_HOST, 1);
pd->current_dr = DR_DFP;
}
@@ -865,10 +905,17 @@ static void usbpd_sm(struct work_struct *w)
if (pd->caps_count == 5 && pd->current_dr == DR_DFP) {
/* Likely not PD-capable, start host now */
extcon_set_cable_state_(pd->extcon,
+ EXTCON_USB_CC, is_cable_flipped(pd));
+ extcon_set_cable_state_(pd->extcon,
EXTCON_USB_HOST, 1);
} else if (pd->caps_count >= PD_CAPS_COUNT) {
dev_dbg(&pd->dev, "Src CapsCounter exceeded, disabling PD\n");
usbpd_set_state(pd, PE_SRC_DISABLED);
+
+ val.intval = 0;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_PD_ACTIVE,
+ &val);
break;
}
@@ -880,6 +927,11 @@ static void usbpd_sm(struct work_struct *w)
/* transmit was successful if GoodCRC was received */
pd->caps_count = 0;
pd->hard_reset_count = 0;
+ pd->pd_connected = true; /* we know peer is PD capable */
+
+ val.intval = POWER_SUPPLY_TYPE_USB_PD;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_TYPE, &val);
/* wait for REQUEST */
pd->current_state = PE_SRC_SEND_CAPABILITIES_WAIT;
@@ -973,6 +1025,11 @@ static void usbpd_sm(struct work_struct *w)
val.intval = 1;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_ACTIVE, &val);
+
+ val.intval = POWER_SUPPLY_TYPE_USB_PD;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_TYPE, &val);
+
usbpd_set_state(pd, PE_SNK_EVALUATE_CAPABILITY);
} else if (pd->hard_reset_count < 3) {
usbpd_set_state(pd, PE_SNK_HARD_RESET);
diff --git a/drivers/usb/phy/phy-msm-ssusb-qmp.c b/drivers/usb/phy/phy-msm-ssusb-qmp.c
index ed1b2d2d7730..50b63f912638 100644
--- a/drivers/usb/phy/phy-msm-ssusb-qmp.c
+++ b/drivers/usb/phy/phy-msm-ssusb-qmp.c
@@ -47,6 +47,13 @@ enum core_ldo_levels {
#define ALFPS_DTCT_EN BIT(1)
#define ARCVR_DTCT_EVENT_SEL BIT(4)
+/* PCIE_USB3_PHY_PCS_MISC_TYPEC_CTRL bits */
+
+/* 0 - selects Lane A. 1 - selects Lane B */
+#define SW_PORTSELECT BIT(0)
+/* port select mux: 1 - sw control. 0 - HW control*/
+#define SW_PORTSELECT_MX BIT(1)
+
enum qmp_phy_rev_reg {
USB3_PHY_PCS_STATUS,
USB3_PHY_AUTONOMOUS_MODE_CTRL,
@@ -54,6 +61,7 @@ enum qmp_phy_rev_reg {
USB3_PHY_POWER_DOWN_CONTROL,
USB3_PHY_SW_RESET,
USB3_PHY_START,
+ USB3_PHY_PCS_MISC_TYPEC_CTRL,
USB3_PHY_REG_MAX,
};
@@ -246,7 +254,7 @@ static int msm_ssphy_qmp_init(struct usb_phy *uphy)
{
struct msm_ssphy_qmp *phy = container_of(uphy, struct msm_ssphy_qmp,
phy);
- int ret;
+ int ret, val;
unsigned init_timeout_usec = INIT_MAX_TIME_USEC;
const struct qmp_reg_val *reg = NULL;
@@ -286,6 +294,18 @@ static int msm_ssphy_qmp_init(struct usb_phy *uphy)
return ret;
}
+ /* perform lane selection */
+ val = -EINVAL;
+ if (phy->phy.flags & PHY_LANE_A)
+ val = SW_PORTSELECT_MX;
+
+ if (phy->phy.flags & PHY_LANE_B)
+ val = SW_PORTSELECT | SW_PORTSELECT_MX;
+
+ if (val > 0)
+ writel_relaxed(val,
+ phy->base + phy->phy_reg[USB3_PHY_PCS_MISC_TYPEC_CTRL]);
+
writel_relaxed(0x03, phy->base + phy->phy_reg[USB3_PHY_START]);
writel_relaxed(0x00, phy->base + phy->phy_reg[USB3_PHY_SW_RESET]);
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index f957ee082514..29cae8ac6166 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -335,6 +335,8 @@ struct mdss_data_type {
u32 default_ot_wr_limit;
struct irq_domain *irq_domain;
+ u32 *mdp_irq_raw;
+ u32 *mdp_irq_export;
u32 *mdp_irq_mask;
u32 mdp_hist_irq_mask;
u32 mdp_intf_irq_mask;
@@ -503,6 +505,10 @@ struct mdss_data_type {
u32 bcolor1;
u32 bcolor2;
struct mdss_scaler_block *scaler_off;
+
+ u32 max_dest_scaler_input_width;
+ u32 max_dest_scaler_output_width;
+ struct mdss_mdp_destination_scaler *ds;
};
extern struct mdss_data_type *mdss_res;
diff --git a/drivers/video/fbdev/msm/mdss_debug.c b/drivers/video/fbdev/msm/mdss_debug.c
index d1a516637506..39848366a55b 100644
--- a/drivers/video/fbdev/msm/mdss_debug.c
+++ b/drivers/video/fbdev/msm/mdss_debug.c
@@ -111,11 +111,11 @@ static ssize_t panel_debug_base_offset_read(struct file *file,
if (*ppos)
return 0; /* the end */
- len = snprintf(buf, sizeof(buf), "0x%02zx %zd\n", dbg->off, dbg->cnt);
- if (len < 0)
+ len = snprintf(buf, sizeof(buf), "0x%02zx %zx\n", dbg->off, dbg->cnt);
+ if (len < 0 || len >= sizeof(buf))
return 0;
- if (copy_to_user(buff, buf, len))
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
return -EFAULT;
*ppos += len; /* increase offset */
@@ -244,7 +244,11 @@ static ssize_t panel_debug_base_reg_read(struct file *file,
if (mdata->debug_inf.debug_enable_clock)
mdata->debug_inf.debug_enable_clock(0);
- if (copy_to_user(user_buf, panel_reg_buf, len))
+ if (len < 0 || len >= sizeof(panel_reg_buf))
+ return 0;
+
+ if ((count < sizeof(panel_reg_buf))
+ || (copy_to_user(user_buf, panel_reg_buf, len)))
goto read_reg_fail;
kfree(rx_buf);
@@ -403,7 +407,7 @@ static ssize_t mdss_debug_base_offset_read(struct file *file,
{
struct mdss_debug_base *dbg = file->private_data;
int len = 0;
- char buf[24];
+ char buf[24] = {'\0'};
if (!dbg)
return -ENODEV;
@@ -412,10 +416,10 @@ static ssize_t mdss_debug_base_offset_read(struct file *file,
return 0; /* the end */
len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
- if (len < 0)
+ if (len < 0 || len >= sizeof(buf))
return 0;
- if (copy_to_user(buff, buf, len))
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
return -EFAULT;
*ppos += len; /* increase offset */
@@ -759,7 +763,7 @@ static ssize_t mdss_debug_factor_read(struct file *file,
{
struct mult_factor *factor = file->private_data;
int len = 0;
- char buf[32];
+ char buf[32] = {'\0'};
if (!factor)
return -ENODEV;
@@ -769,10 +773,10 @@ static ssize_t mdss_debug_factor_read(struct file *file,
len = snprintf(buf, sizeof(buf), "%d/%d\n",
factor->numer, factor->denom);
- if (len < 0)
+ if (len < 0 || len >= sizeof(buf))
return 0;
- if (copy_to_user(buff, buf, len))
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
return -EFAULT;
*ppos += len; /* increase offset */
@@ -803,6 +807,8 @@ static ssize_t mdss_debug_perf_mode_write(struct file *file,
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
+ buf[count] = 0; /* end of string */
+
if (sscanf(buf, "%d", &perf_mode) != 1)
return -EFAULT;
@@ -823,7 +829,7 @@ static ssize_t mdss_debug_perf_mode_read(struct file *file,
{
struct mdss_perf_tune *perf_tune = file->private_data;
int len = 0;
- char buf[40];
+ char buf[40] = {'\0'};
if (!perf_tune)
return -ENODEV;
@@ -833,10 +839,10 @@ static ssize_t mdss_debug_perf_mode_read(struct file *file,
len = snprintf(buf, sizeof(buf), "min_mdp_clk %lu min_bus_vote %llu\n",
perf_tune->min_mdp_clk, perf_tune->min_bus_vote);
- if (len < 0)
+ if (len < 0 || len >= sizeof(buf))
return 0;
- if (copy_to_user(buff, buf, len))
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
return -EFAULT;
*ppos += len; /* increase offset */
@@ -856,7 +862,7 @@ static ssize_t mdss_debug_perf_panic_read(struct file *file,
{
struct mdss_data_type *mdata = file->private_data;
int len = 0;
- char buf[40];
+ char buf[40] = {'\0'};
if (!mdata)
return -ENODEV;
@@ -866,10 +872,10 @@ static ssize_t mdss_debug_perf_panic_read(struct file *file,
len = snprintf(buf, sizeof(buf), "%d\n",
!mdata->has_panic_ctrl);
- if (len < 0)
+ if (len < 0 || len >= sizeof(buf))
return 0;
- if (copy_to_user(buff, buf, len))
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
return -EFAULT;
*ppos += len; /* increase offset */
@@ -932,9 +938,14 @@ static ssize_t mdss_debug_perf_panic_write(struct file *file,
if (!mdata)
return -EFAULT;
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
if (copy_from_user(buf, user_buf, count))
return -EFAULT;
+ buf[count] = 0; /* end of string */
+
if (sscanf(buf, "%d", &disable_panic) != 1)
return -EFAULT;
@@ -1004,10 +1015,10 @@ static ssize_t mdss_debug_perf_bw_limit_read(struct file *file,
temp_settings++;
}
- if (len < 0)
+ if (len < 0 || len >= sizeof(buf))
return 0;
- if (copy_to_user(buff, buf, len))
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
return -EFAULT;
*ppos += len; /* increase offset */
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index 0e26de90900c..d6db871e5b1b 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -116,6 +116,17 @@ static int mdss_fb_send_panel_event(struct msm_fb_data_type *mfd,
int event, void *arg);
static void mdss_fb_set_mdp_sync_pt_threshold(struct msm_fb_data_type *mfd,
int type);
+
+static inline void __user *to_user_ptr(uint64_t address)
+{
+ return (void __user *)(uintptr_t)address;
+}
+
+static inline uint64_t __user to_user_u64(void *ptr)
+{
+ return (uint64_t)((uintptr_t)ptr);
+}
+
void mdss_fb_no_update_notify_timer_cb(unsigned long data)
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)data;
@@ -3294,6 +3305,11 @@ static int mdss_fb_pan_display_sub(struct fb_var_screeninfo *var,
{
struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ if (!mfd || !var) {
+ pr_err("Invalid parameters mfd:%p var:%p\n", mfd, var);
+ return -EINVAL;
+ }
+
if (!mfd->op_enable)
return -EPERM;
@@ -4269,6 +4285,101 @@ err:
return ret;
}
+static int __mdss_fb_copy_destscaler_data(struct fb_info *info,
+ struct mdp_layer_commit *commit)
+{
+ int i;
+ int ret = 0;
+ u32 data_size;
+ struct mdp_destination_scaler_data __user *ds_data_user;
+ struct mdp_destination_scaler_data *ds_data = NULL;
+ void __user *scale_data_user;
+ struct mdp_scale_data_v2 *scale_data = NULL;
+ struct msm_fb_data_type *mfd = (struct msm_fb_data_type *)info->par;
+ struct mdss_data_type *mdata;
+
+ if (!mfd || !mfd->mdp.private1) {
+ pr_err("mfd is NULL or operation not permitted\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ mdata = mfd_to_mdata(mfd);
+ if (!mdata) {
+ pr_err("mdata is NULL or not initialized\n");
+ ret = -EINVAL;
+ goto err;
+ }
+
+ if (commit->commit_v1.dest_scaler_cnt >
+ mdata->scaler_off->ndest_scalers) {
+ pr_err("Commit destination scaler cnt larger than HW setting, commit cnt=%d\n",
+ commit->commit_v1.dest_scaler_cnt);
+ ret = -EINVAL;
+ goto err;
+ }
+
+ ds_data_user = (struct mdp_destination_scaler_data *)
+ commit->commit_v1.dest_scaler;
+ data_size = commit->commit_v1.dest_scaler_cnt *
+ sizeof(struct mdp_destination_scaler_data);
+ ds_data = kzalloc(data_size, GFP_KERNEL);
+ if (!ds_data) {
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ret = copy_from_user(ds_data, ds_data_user, data_size);
+ if (ret) {
+ pr_err("dest scaler data copy from user failed\n");
+ goto err;
+ }
+
+ commit->commit_v1.dest_scaler = ds_data;
+
+ for (i = 0; i < commit->commit_v1.dest_scaler_cnt; i++) {
+ scale_data = NULL;
+
+ if (ds_data[i].scale) {
+ scale_data_user = to_user_ptr(ds_data[i].scale);
+ data_size = sizeof(struct mdp_scale_data_v2);
+
+ scale_data = kzalloc(data_size, GFP_KERNEL);
+ if (!scale_data) {
+ ds_data[i].scale = 0;
+ ret = -ENOMEM;
+ goto err;
+ }
+
+ ds_data[i].scale = to_user_u64(scale_data);
+ }
+
+ if (scale_data && (ds_data[i].flags &
+ (MDP_DESTSCALER_SCALE_UPDATE |
+ MDP_DESTSCALER_ENHANCER_UPDATE))) {
+ ret = copy_from_user(scale_data, scale_data_user,
+ data_size);
+ if (ret) {
+ pr_err("scale data copy from user failed\n");
+ goto err;
+ }
+ }
+ }
+
+ return ret;
+
+err:
+ if (ds_data) {
+ for (i = 0; i < commit->commit_v1.dest_scaler_cnt; i++) {
+ scale_data = to_user_ptr(ds_data[i].scale);
+ kfree(scale_data);
+ }
+ kfree(ds_data);
+ }
+
+ return ret;
+}
+
static int mdss_fb_atomic_commit_ioctl(struct fb_info *info,
unsigned long *argp, struct file *file)
{
@@ -4279,6 +4390,8 @@ static int mdss_fb_atomic_commit_ioctl(struct fb_info *info,
struct mdp_input_layer __user *input_layer_list;
struct mdp_output_layer *output_layer = NULL;
struct mdp_output_layer __user *output_layer_user;
+ struct mdp_destination_scaler_data *ds_data = NULL;
+ struct mdp_destination_scaler_data __user *ds_data_user;
ret = copy_from_user(&commit, argp, sizeof(struct mdp_layer_commit));
if (ret) {
@@ -4356,6 +4469,16 @@ static int mdss_fb_atomic_commit_ioctl(struct fb_info *info,
}
}
+ ds_data_user = commit.commit_v1.dest_scaler;
+ if (ds_data_user) {
+ ret = __mdss_fb_copy_destscaler_data(info, &commit);
+ if (ret) {
+ pr_err("copy dest scaler failed\n");
+ goto err;
+ }
+ ds_data = commit.commit_v1.dest_scaler;
+ }
+
ATRACE_BEGIN("ATOMIC_COMMIT");
ret = mdss_fb_atomic_commit(info, &commit, file);
if (ret)
@@ -4372,6 +4495,7 @@ static int mdss_fb_atomic_commit_ioctl(struct fb_info *info,
commit.commit_v1.input_layers = input_layer_list;
commit.commit_v1.output_layer = output_layer_user;
+ commit.commit_v1.dest_scaler = ds_data_user;
rc = copy_to_user(argp, &commit,
sizeof(struct mdp_layer_commit));
if (rc) {
@@ -4397,6 +4521,11 @@ err:
}
kfree(layer_list);
kfree(output_layer);
+ if (ds_data) {
+ for (i = 0; i < commit.commit_v1.dest_scaler_cnt; i++)
+ kfree(to_user_ptr(ds_data[i].scale));
+ kfree(ds_data);
+ }
return ret;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 6ca1883da1bb..03ff3ea2c6ac 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -291,6 +291,26 @@ static void mdss_irq_mask(struct irq_data *data)
spin_lock_irqsave(&mdp_lock, irq_flags);
mdata->mdss_util->disable_irq(&mdss_misc_hw);
spin_unlock_irqrestore(&mdp_lock, irq_flags);
+ } else if (data->hwirq < 64) {
+ /* MDP_INTR is mapped as logical interrupt 32-63. */
+ u32 irq = BIT(data->hwirq - 32);
+
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+ if (!(mdata->mdp_irq_mask[0] & irq)) {
+ pr_debug("%pS: MDP IRQ-%x is NOT set, mask=%x\n",
+ __builtin_return_address(0),
+ irq, mdata->mdp_irq_mask[0]);
+ } else {
+ pr_debug("%pS: MDP IRQ mask old=%x new=%x\n",
+ __builtin_return_address(0),
+ mdata->mdp_irq_mask[0], irq);
+ mdata->mdp_irq_mask[0] &= ~irq;
+ writel_relaxed(mdata->mdp_irq_mask[0], mdata->mdp_base +
+ MDSS_MDP_REG_INTR_EN);
+ if (!is_mdp_irq_enabled())
+ mdata->mdss_util->disable_irq(&mdss_mdp_hw);
+ }
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
}
}
@@ -308,6 +328,25 @@ static void mdss_irq_unmask(struct irq_data *data)
spin_lock_irqsave(&mdp_lock, irq_flags);
mdata->mdss_util->enable_irq(&mdss_misc_hw);
spin_unlock_irqrestore(&mdp_lock, irq_flags);
+ } else if (data->hwirq < 64) {
+ /* MDP_INTR is mapped as logical interrupt 32-63. */
+ u32 irq = BIT(data->hwirq - 32);
+
+ spin_lock_irqsave(&mdp_lock, irq_flags);
+ if (mdata->mdp_irq_mask[0] & irq) {
+ pr_debug("%pS: MDP IRQ-0x%x is already set, mask=%x\n",
+ __builtin_return_address(0),
+ irq, mdata->mdp_irq_mask[0]);
+ } else {
+ pr_debug("%pS: MDP IRQ mask old=%x new=%x\n",
+ __builtin_return_address(0),
+ mdata->mdp_irq_mask[0], irq);
+ mdata->mdp_irq_mask[0] |= irq;
+ writel_relaxed(mdata->mdp_irq_mask[0], mdata->mdp_base +
+ MDSS_MDP_REG_INTR_EN);
+ mdata->mdss_util->enable_irq(&mdss_mdp_hw);
+ }
+ spin_unlock_irqrestore(&mdp_lock, irq_flags);
}
}
@@ -315,6 +354,8 @@ static struct irq_chip mdss_irq_chip = {
.name = "mdss",
.irq_mask = mdss_irq_mask,
.irq_unmask = mdss_irq_unmask,
+ /* avoid lazy disable by defining irq_disable explicitly */
+ .irq_disable = mdss_irq_mask,
};
static int mdss_irq_domain_map(struct irq_domain *d,
@@ -347,10 +388,23 @@ static irqreturn_t mdss_irq_handler(int irq, void *ptr)
mdss_mdp_hw.irq_info->irq_buzy = true;
if (intr & MDSS_INTR_MDP) {
+ u32 mdp_irq_export;
+
spin_lock(&mdp_lock);
mdata->mdss_util->irq_dispatch(MDSS_HW_MDP, irq, ptr);
spin_unlock(&mdp_lock);
intr &= ~MDSS_INTR_MDP;
+
+ /* export MDP_INTR as logical interrupts 32-63 */
+ mdp_irq_export = mdata->mdp_irq_raw[0] &
+ mdata->mdp_irq_export[0];
+ while (mdp_irq_export) {
+ irq_hw_number_t hwirq = fls(mdp_irq_export) - 1;
+
+ generic_handle_irq(irq_find_mapping(
+ mdata->irq_domain, hwirq + 32));
+ mdp_irq_export &= ~(1 << hwirq);
+ }
}
if (intr & MDSS_INTR_DSI0) {
@@ -999,7 +1053,9 @@ irqreturn_t mdss_mdp_isr(int irq, void *ptr)
u32 isr, mask, hist_isr, hist_mask;
int i, j;
- if (!mdata->clk_ena)
+ /* Bypass if clock is not enabled and no export irq is requested. */
+ if (!mdata->clk_ena &&
+ !(mdata->mdp_irq_mask[0] & mdata->mdp_irq_export[0]))
return IRQ_HANDLED;
for (i = 0; i < ARRAY_SIZE(mdp_intr_reg); i++) {
@@ -1010,6 +1066,11 @@ irqreturn_t mdss_mdp_isr(int irq, void *ptr)
continue;
mask = readl_relaxed(mdata->mdp_base + reg.en_off);
+
+ /* Process only non-export irq */
+ mdata->mdp_irq_raw[i] = isr;
+ isr = isr & ~mdata->mdp_irq_export[i];
+
writel_relaxed(isr, mdata->mdp_base + reg.clr_off);
pr_debug("%s: reg:%d isr=%x mask=%x\n",
@@ -1940,9 +2001,10 @@ static u32 mdss_mdp_res_init(struct mdss_data_type *mdata)
static u32 mdss_mdp_scaler_init(struct mdss_data_type *mdata,
struct device *dev)
{
- int ret;
+ int ret = -EINVAL;
struct device_node *node;
u32 prop_val;
+ int len = 0;
if (!dev)
return -EPERM;
@@ -1978,8 +2040,7 @@ static u32 mdss_mdp_scaler_init(struct mdss_data_type *mdata,
}
mdata->scaler_off->vig_scaler_lut_off = prop_val;
mdata->scaler_off->has_dest_scaler =
- of_property_read_bool(mdata->pdev->dev.of_node,
- "qcom,mdss-has-dest-scaler");
+ of_property_read_bool(node, "qcom,mdss-has-dest-scaler");
if (mdata->scaler_off->has_dest_scaler) {
ret = of_property_read_u32(node,
"qcom,mdss-dest-block-off",
@@ -1991,40 +2052,65 @@ static u32 mdss_mdp_scaler_init(struct mdss_data_type *mdata,
}
mdata->scaler_off->dest_base = mdata->mdss_io.base +
prop_val;
- mdata->scaler_off->ndest_scalers =
- mdss_mdp_parse_dt_prop_len(mdata->pdev,
- "qcom,mdss-dest-scalers-off");
+
+ if (!of_find_property(node, "qcom,mdss-dest-scaler-off", &len)
+ || (len < 1)) {
+ pr_err("find property %s failed ret %d\n",
+ "qcom,mdss-dest-scaler-off", ret);
+ return -EINVAL;
+ }
+ mdata->scaler_off->ndest_scalers = len/sizeof(u32);
+
mdata->scaler_off->dest_scaler_off =
- devm_kzalloc(&mdata->pdev->dev, sizeof(u32) *
+ devm_kzalloc(dev, sizeof(u32) *
mdata->scaler_off->ndest_scalers,
GFP_KERNEL);
if (!mdata->scaler_off->dest_scaler_off) {
- kfree(mdata->scaler_off->dest_scaler_off);
return -ENOMEM;
}
- ret = mdss_mdp_parse_dt_handler(mdata->pdev,
+ ret = of_property_read_u32_array(node,
"qcom,mdss-dest-scaler-off",
mdata->scaler_off->dest_scaler_off,
mdata->scaler_off->ndest_scalers);
if (ret)
- return -EINVAL;
+ return ret;
+
mdata->scaler_off->dest_scaler_lut_off =
- devm_kzalloc(&mdata->pdev->dev, sizeof(u32) *
+ devm_kzalloc(dev, sizeof(u32) *
mdata->scaler_off->ndest_scalers,
GFP_KERNEL);
if (!mdata->scaler_off->dest_scaler_lut_off) {
- kfree(mdata->scaler_off->dest_scaler_lut_off);
return -ENOMEM;
}
- ret = mdss_mdp_parse_dt_handler(mdata->pdev,
- "qcom,mdss-dest-scalers-lut-off",
+ ret = of_property_read_u32_array(node,
+ "qcom,mdss-dest-scaler-lut-off",
mdata->scaler_off->dest_scaler_lut_off,
mdata->scaler_off->ndest_scalers);
if (ret)
- return -EINVAL;
+ return ret;
+
+ ret = of_property_read_u32(dev->of_node,
+ "qcom,max-dest-scaler-input-width",
+ &mdata->max_dest_scaler_input_width);
+ if (ret) {
+ pr_debug("read property %s failed ret %d\n",
+ "qcom,max-dest-scaler-input-width",
+ ret);
+ }
+
+ ret = of_property_read_u32(dev->of_node,
+ "qcom,max-dest-scaler-output-width",
+ &mdata->max_dest_scaler_output_width);
+ if (ret) {
+ pr_debug("read property %s failed ret %d\n",
+ "qcom,max-dest-scaler-output-width",
+ ret);
+ }
+
+ ret = mdss_mdp_ds_addr_setup(mdata);
}
- return 0;
+ return ret;
}
/**
@@ -2322,6 +2408,16 @@ ssize_t mdss_mdp_show_capabilities(struct device *dev,
if (mdata->clk_factor.numer)
SPRINT("clk_fudge_factor=%u,%u\n", mdata->clk_factor.numer,
mdata->clk_factor.denom);
+ if (test_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map)) {
+ SPRINT("max_dest_scaler_input_width=%u\n",
+ mdata->max_dest_scaler_input_width);
+ SPRINT("max_dest_scaler_output_width=%u\n",
+ mdata->max_dest_scaler_output_width);
+ SPRINT("dest_scaler_count=%u\n",
+ mdata->scaler_off->ndest_scalers);
+ SPRINT("max_dest_scale_up=%u\n", MAX_UPSCALE_RATIO);
+ }
+
SPRINT("features=");
if (mdata->has_bwc)
SPRINT(" bwc");
@@ -2556,7 +2652,7 @@ static int mdss_mdp_probe(struct platform_device *pdev)
mdss_mdp_hw.ptr = mdata;
/* export misc. interrupts to external driver */
- mdata->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 32,
+ mdata->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 64,
&mdss_irq_domain_ops, mdata);
if (!mdata->irq_domain) {
pr_err("unable to add linear domain\n");
@@ -2679,6 +2775,36 @@ static int mdss_mdp_probe(struct platform_device *pdev)
if (mdss_res->mdp_irq_mask == NULL)
return -ENOMEM;
+ mdss_res->mdp_irq_raw = kcalloc(ARRAY_SIZE(mdp_intr_reg),
+ sizeof(u32), GFP_KERNEL);
+ if (mdss_res->mdp_irq_raw == NULL) {
+ kfree(mdss_res->mdp_irq_mask);
+ mdss_res->mdp_irq_mask = NULL;
+ return -ENOMEM;
+ }
+
+ mdss_res->mdp_irq_export = kcalloc(ARRAY_SIZE(mdp_intr_reg),
+ sizeof(u32), GFP_KERNEL);
+ if (mdss_res->mdp_irq_export == NULL) {
+ kfree(mdss_res->mdp_irq_mask);
+ kfree(mdss_res->mdp_irq_raw);
+ mdss_res->mdp_irq_mask = NULL;
+ mdss_res->mdp_irq_raw = NULL;
+ return -ENOMEM;
+ }
+
+ /*
+ * If rotator is indicated as separate prior to 2.0, it means
+ * rotator block in WB0 & WB1 are serviced by external driver.
+ * In that case, specify WB0 & WB1 irq as export in mdp_irq_export;
+ * otherwise, set mdp_irq_export to zero will disable mdp irq
+ * export.
+ */
+ if (mdss_res->has_separate_rotator &&
+ (mdata->mdp_rev < MDSS_MDP_HW_REV_200))
+ mdss_res->mdp_irq_export[0] = MDSS_MDP_INTR_WB_0_DONE |
+ MDSS_MDP_INTR_WB_1_DONE;
+
pr_info("mdss version = 0x%x, bootloader display is %s\n",
mdata->mdp_rev, display_on ? "on" : "off");
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index bedbc10714c1..22d6b2f7dbad 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -86,6 +86,36 @@
#define MAX_LAYER_COUNT 0xC
+/**
+ * Destination Scaler control flags setting
+ *
+ * @DS_ENABLE: Setting the bit indicates Destination Scaler is enabled. Unset
+ * the bit indicates Destination Scaler is disable.
+ * @DS_DUAL_MODE: Setting the bit indicates Left and Right Destination Scaler
+ * are operated in Dual mode.
+ * @DS_LEFT: Setting the bit indicates current Destination Scaler is assigned
+ * with the Left LM. DS_LEFT and DS_DUAL_MODE can be used
+ * together.
+ * @DS_RIGHT: Setting the bit indicates current Destination Scaler is assigned
+ * with the Right LM. DS_RIGHT and DS_DUAL_MODE can be used
+ * together.
+ * @DS_SCALE_UPDATE: Setting the bit indicates current Destination Scaler
+ * QSEED3 parameters needs to be updated.
+ * @DS_ENHANCER_UPDATE: Setting this bit indicates current Desitnation Scaler
+ * QSEED3 Detial enhancer parameters need to be updated.
+ */
+#define DS_ENABLE BIT(0)
+#define DS_DUAL_MODE BIT(1)
+#define DS_LEFT BIT(2)
+#define DS_RIGHT BIT(3)
+#define DS_SCALE_UPDATE BIT(4)
+#define DS_ENHANCER_UPDATE BIT(5)
+
+/**
+ * Destination Scaler DUAL mode overfetch pixel count
+ */
+#define MDSS_MDP_DS_OVERFETCH_SIZE 5
+
/* hw cursor can only be setup in highest mixer stage */
#define HW_CURSOR_STAGE(mdata) \
(((mdata)->max_target_zorder + MDSS_MDP_STAGE_0) - 1)
@@ -310,6 +340,25 @@ struct mdss_mdp_writeback {
u8 supported_output_formats[BITS_TO_BYTES(MDP_IMGTYPE_LIMIT1)];
};
+/*
+ * Destination scaler info
+ * destination scaler is hard wired to DSPP0/1 and LM0/1
+ * Input dimension is always matching to LM output dimension
+ * Output dimension is the Panel/WB dimension
+ * In bypass mode (off), input and output dimension is the same
+ */
+struct mdss_mdp_destination_scaler {
+ u32 num;
+ char __iomem *ds_base;
+ char __iomem *scaler_base;
+ char __iomem *lut_base;
+ u16 src_width;
+ u16 src_height;
+ u32 flags;
+ struct mdp_scale_data_v2 scaler;
+};
+
+
struct mdss_mdp_ctl_intfs_ops {
int (*start_fnc)(struct mdss_mdp_ctl *ctl);
int (*stop_fnc)(struct mdss_mdp_ctl *ctl, int panel_power_state);
@@ -462,6 +511,8 @@ struct mdss_mdp_mixer {
char __iomem *base;
char __iomem *dspp_base;
char __iomem *pingpong_base;
+ /* Destination Scaler is hard wired to each mixer */
+ struct mdss_mdp_destination_scaler *ds;
u8 type;
u8 params_changed;
u16 width;
@@ -1049,12 +1100,63 @@ static inline int mdss_mdp_line_buffer_width(void)
return MAX_LINE_BUFFER_WIDTH;
}
+static inline int is_dest_scaling_enable(struct mdss_mdp_mixer *mixer)
+{
+ return (test_bit(MDSS_CAPS_DEST_SCALER, mdss_res->mdss_caps_map) &&
+ mixer && mixer->ds && (mixer->ds->flags & DS_ENABLE));
+}
+
+static inline u32 get_ds_input_width(struct mdss_mdp_mixer *mixer)
+{
+ struct mdss_mdp_destination_scaler *ds;
+
+ ds = mixer->ds;
+ if (ds)
+ return ds->src_width;
+
+ return 0;
+}
+
+static inline u32 get_ds_input_height(struct mdss_mdp_mixer *mixer)
+{
+ struct mdss_mdp_destination_scaler *ds;
+
+ ds = mixer->ds;
+ if (ds)
+ return ds->src_height;
+
+ return 0;
+}
+
+static inline u32 get_ds_output_width(struct mdss_mdp_mixer *mixer)
+{
+ struct mdss_mdp_destination_scaler *ds;
+
+ ds = mixer->ds;
+ if (ds)
+ return ds->scaler.dst_width;
+
+ return 0;
+}
+
+static inline u32 get_ds_output_height(struct mdss_mdp_mixer *mixer)
+{
+ struct mdss_mdp_destination_scaler *ds;
+
+ ds = mixer->ds;
+ if (ds)
+ return ds->scaler.dst_height;
+
+ return 0;
+}
+
static inline u32 get_panel_yres(struct mdss_panel_info *pinfo)
{
u32 yres;
yres = pinfo->yres + pinfo->lcdc.border_top +
pinfo->lcdc.border_bottom;
+
return yres;
}
@@ -1064,6 +1166,7 @@ static inline u32 get_panel_xres(struct mdss_panel_info *pinfo)
xres = pinfo->xres + pinfo->lcdc.border_left +
pinfo->lcdc.border_right;
+
return xres;
}
@@ -1235,12 +1338,6 @@ static inline bool mdss_mdp_is_ubwc_supported(struct mdss_data_type *mdata)
return mdata->has_ubwc;
}
-static inline bool mdss_mdp_is_wb_rotator_supported(
- struct mdss_data_type *mdata)
-{
- return mdata && !mdata->has_separate_rotator;
-}
-
static inline int mdss_mdp_is_cdm_supported(struct mdss_data_type *mdata,
u32 intf_type, u32 mixer_type)
{
@@ -1653,6 +1750,7 @@ int mdss_mdp_ctl_addr_setup(struct mdss_data_type *mdata, u32 *ctl_offsets,
u32 len);
int mdss_mdp_wb_addr_setup(struct mdss_data_type *mdata,
u32 num_wb, u32 num_intf_wb);
+int mdss_mdp_ds_addr_setup(struct mdss_data_type *mdata);
void mdss_mdp_pipe_clk_force_off(struct mdss_mdp_pipe *pipe);
int mdss_mdp_pipe_fetch_halt(struct mdss_mdp_pipe *pipe, bool is_recovery);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index 6626c7eb2326..fcea868ff082 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -556,12 +556,12 @@ static u32 __calc_qseed3_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
struct mdss_rect src, struct mdss_rect dst, u32 src_h,
u32 fps, u32 v_total)
{
- u32 active_line_cycle, backfill_cycle, total_cycle;
- u32 ver_dwnscale;
- u32 active_line;
- u32 backfill_line;
+ u64 active_line_cycle, backfill_cycle, total_cycle;
+ u64 ver_dwnscale;
+ u64 active_line;
+ u64 backfill_line;
- ver_dwnscale = (src_h << PHASE_STEP_SHIFT) / dst.h;
+ ver_dwnscale = ((u64)src_h << PHASE_STEP_SHIFT) / dst.h;
if (ver_dwnscale > (MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM
<< PHASE_STEP_SHIFT)) {
@@ -584,12 +584,12 @@ static u32 __calc_qseed3_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
total_cycle = active_line_cycle + backfill_cycle;
- pr_debug("line: active=%d backfill=%d vds=%d\n",
+ pr_debug("line: active=%lld backfill=%lld vds=%lld\n",
active_line, backfill_line, ver_dwnscale);
- pr_debug("cycle: total=%d active=%d backfill=%d\n",
+ pr_debug("cycle: total=%lld active=%lld backfill=%lld\n",
total_cycle, active_line_cycle, backfill_cycle);
- return total_cycle * (fps * v_total);
+ return (u32)total_cycle * (fps * v_total);
}
static inline bool __is_vert_downscaling(u32 src_h,
@@ -1205,6 +1205,7 @@ static void mdss_mdp_perf_calc_mixer(struct mdss_mdp_mixer *mixer,
struct mdss_panel_info *pinfo = NULL;
int fps = DEFAULT_FRAME_RATE;
u32 v_total = 0, bpp = MDSS_MDP_WB_OUTPUT_BPP;
+ u32 h_total = 0;
int i;
u32 max_clk_rate = 0;
u64 bw_overlap_max = 0;
@@ -1235,6 +1236,10 @@ static void mdss_mdp_perf_calc_mixer(struct mdss_mdp_mixer *mixer,
fps = mdss_panel_get_framerate(pinfo);
v_total = mdss_panel_get_vtotal(pinfo);
}
+ if (is_dest_scaling_enable(mixer))
+ h_total = get_ds_output_width(mixer);
+ else
+ h_total = mixer->width;
} else {
v_total = mixer->height;
}
@@ -1248,7 +1253,11 @@ static void mdss_mdp_perf_calc_mixer(struct mdss_mdp_mixer *mixer,
pinfo = NULL;
}
- perf->mdp_clk_rate = mixer->width * v_total * fps;
+ /*
+ * with destination scaling, the increase of clock
+ * calculation should depends on output of size of DS setting.
+ */
+ perf->mdp_clk_rate = h_total * v_total * fps;
perf->mdp_clk_rate =
mdss_mdp_clk_fudge_factor(mixer, perf->mdp_clk_rate);
@@ -3429,8 +3438,15 @@ int mdss_mdp_ctl_setup(struct mdss_mdp_ctl *ctl)
split_ctl = mdss_mdp_get_split_ctl(ctl);
- width = get_panel_width(ctl);
- height = get_panel_yres(pinfo);
+ if (is_dest_scaling_enable(ctl->mixer_left)) {
+ width = get_ds_input_width(ctl->mixer_left);
+ height = get_ds_input_height(ctl->mixer_left);
+ if (ctl->panel_data->next && is_pingpong_split(ctl->mfd))
+ width *= 2;
+ } else {
+ width = get_panel_width(ctl);
+ height = get_panel_yres(pinfo);
+ }
max_mixer_width = ctl->mdata->max_mixer_width;
@@ -3593,8 +3609,13 @@ int mdss_mdp_ctl_reconfig(struct mdss_mdp_ctl *ctl,
ctl->opmode |= (ctl->intf_num << 4);
skip_intf_reconfig:
- ctl->width = get_panel_xres(&pdata->panel_info);
- ctl->height = get_panel_yres(&pdata->panel_info);
+ if (is_dest_scaling_enable(ctl->mixer_left)) {
+ ctl->width = get_ds_input_width(ctl->mixer_left);
+ ctl->height = get_ds_input_height(ctl->mixer_left);
+ } else {
+ ctl->width = get_panel_xres(&pdata->panel_info);
+ ctl->height = get_panel_yres(&pdata->panel_info);
+ }
if (ctl->mixer_left) {
ctl->mixer_left->width = ctl->width;
ctl->mixer_left->height = ctl->height;
@@ -3741,11 +3762,6 @@ int mdss_mdp_ctl_split_display_setup(struct mdss_mdp_ctl *ctl,
return -ENODEV;
}
- sctl->width = get_panel_xres(&pdata->panel_info);
- sctl->height = get_panel_yres(&pdata->panel_info);
-
- sctl->roi = (struct mdss_rect){0, 0, sctl->width, sctl->height};
-
if (!ctl->mixer_left) {
ctl->mixer_left = mdss_mdp_mixer_alloc(ctl,
MDSS_MDP_MIXER_TYPE_INTF,
@@ -3764,6 +3780,16 @@ int mdss_mdp_ctl_split_display_setup(struct mdss_mdp_ctl *ctl,
return -ENOMEM;
}
+ if (is_dest_scaling_enable(mixer)) {
+ sctl->width = get_ds_input_width(mixer);
+ sctl->height = get_ds_input_height(mixer);
+ } else {
+ sctl->width = get_panel_xres(&pdata->panel_info);
+ sctl->height = get_panel_yres(&pdata->panel_info);
+ }
+
+ sctl->roi = (struct mdss_rect){0, 0, sctl->width, sctl->height};
+
mixer->is_right_mixer = true;
mixer->width = sctl->width;
mixer->height = sctl->height;
@@ -4942,6 +4968,43 @@ int mdss_mdp_wb_addr_setup(struct mdss_data_type *mdata,
return 0;
}
+int mdss_mdp_ds_addr_setup(struct mdss_data_type *mdata)
+{
+ struct mdss_mdp_destination_scaler *ds;
+ struct mdss_mdp_mixer *mixer = mdata->mixer_intf;
+ u32 num_ds_block;
+ int i;
+
+ num_ds_block = mdata->scaler_off->ndest_scalers;
+ ds = devm_kcalloc(&mdata->pdev->dev, num_ds_block,
+ sizeof(struct mdss_mdp_destination_scaler),
+ GFP_KERNEL);
+ if (!ds) {
+ pr_err("unable to setup ds: kzalloc failed\n");
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < num_ds_block; i++) {
+ ds[i].num = i;
+ ds[i].ds_base = mdata->scaler_off->dest_base;
+ ds[i].scaler_base = mdata->scaler_off->dest_base +
+ mdata->scaler_off->dest_scaler_off[i];
+ ds[i].lut_base = mdata->scaler_off->dest_base +
+ mdata->scaler_off->dest_scaler_lut_off[i];
+
+ /*
+ * Assigning destination scaler to each LM. There is no dynamic
+ * assignment because destination scaler and LM are hard wired.
+ */
+ if (i < mdata->nmixers_intf)
+ mixer[i].ds = &ds[i];
+ }
+
+ mdata->ds = ds;
+
+ return 0;
+}
+
struct mdss_mdp_mixer *mdss_mdp_mixer_get(struct mdss_mdp_ctl *ctl, int mux)
{
struct mdss_mdp_mixer *mixer = NULL;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_hwio.h b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
index 47a7740e0c09..74ab902f6e8e 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_hwio.h
+++ b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
@@ -346,6 +346,10 @@ enum mdss_mdp_sspp_chroma_samp_type {
#define MDSS_MDP_REG_SCALER_MISR_SIGNATURE_0 0x74
#define MDSS_MDP_REG_SCALER_MISR_SIGNATURE_1 0x78
+/* Destination scaler TOP registers */
+#define MDSS_MDP_REG_DEST_SCALER_OP_MODE 0x00
+#define MDSS_MDP_REG_DEST_SCALER_HW_VERSION 0x10
+
#define SCALER_EN BIT(0)
#define SCALER_DIR_EN BIT(4)
#define SCALER_DE_EN BIT(8)
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
index 6fcefb56a739..237157c81515 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
@@ -784,7 +784,7 @@ static int mdss_mdp_wb_wait4comp(struct mdss_mdp_ctl *ctl, void *arg)
NULL, NULL);
if (rc == 0) {
- mask = BIT(ctx->intr_type + ctx->intf_num);
+ mask = mdss_mdp_get_irq_mask(ctx->intr_type, ctx->intf_num);
isr = readl_relaxed(ctl->mdata->mdp_base +
MDSS_MDP_REG_INTR_STATUS);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index 8da8840f30ec..91b91dcc7960 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -62,6 +62,208 @@ struct mdss_mdp_validate_info_t {
struct mdss_mdp_pipe_multirect_params multirect;
};
+static inline void *u64_to_ptr(uint64_t address)
+{
+ return (void *)(uintptr_t)address;
+}
+
+static int __dest_scaler_data_setup(struct mdp_destination_scaler_data *ds_data,
+ struct mdss_mdp_destination_scaler *ds,
+ u32 max_input_width, u32 max_output_width)
+{
+ struct mdp_scale_data_v2 *scale;
+
+ ds->flags = (ds_data->flags & MDP_DESTSCALER_ENABLE) ? DS_ENABLE : 0;
+
+ if (ds_data->flags & (MDP_DESTSCALER_SCALE_UPDATE |
+ MDP_DESTSCALER_ENHANCER_UPDATE)) {
+ if (!ds_data->scale) {
+ pr_err("NULL scale data\n");
+ return -EFAULT;
+ }
+ scale = u64_to_ptr(ds_data->scale);
+
+ if (scale->src_width[0] > max_input_width) {
+ pr_err("Exceed max input width for dest scaler-%d: %d\n",
+ ds_data->dest_scaler_ndx,
+ scale->src_width[0]);
+ return -EINVAL;
+ }
+ if (scale->dst_width > max_output_width) {
+ pr_err("Exceed max output width for dest scaler-%d: %d\n",
+ ds_data->dest_scaler_ndx,
+ scale->dst_width);
+ return -EINVAL;
+ }
+
+ memcpy(&ds->scaler, scale, sizeof(*scale));
+ if (ds_data->flags & MDP_DESTSCALER_SCALE_UPDATE)
+ ds->flags |= DS_SCALE_UPDATE;
+ if (ds_data->flags & MDP_DESTSCALER_ENHANCER_UPDATE)
+ ds->flags |= DS_ENHANCER_UPDATE;
+ ds->src_width = scale->src_width[0];
+ ds->src_height = scale->src_height[0];
+ }
+
+ if (ds_data->flags == 0) {
+ pr_debug("Disabling destination scaler-%d\n",
+ ds_data->dest_scaler_ndx);
+ }
+
+ return 0;
+}
+
+static int mdss_mdp_destination_scaler_pre_validate(struct mdss_mdp_ctl *ctl,
+ struct mdp_destination_scaler_data *ds_data)
+{
+ struct mdss_data_type *mdata;
+ struct mdss_panel_info *pinfo;
+
+ mdata = ctl->mdata;
+
+ /*
+ * we need to quickly check for any scale update, and adjust the mixer
+ * width and height accordingly. Otherwise, layer validate will fail
+ * when we switch between scaling factor or disabling scaling.
+ */
+ if (test_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map) && ds_data) {
+ if (ctl->mixer_left) {
+ /*
+ * Any scale update from usermode, we will update the
+ * mixer width and height with the given LM width and
+ * height.
+ */
+ pinfo = &ctl->panel_data->panel_info;
+ if ((ds_data->lm_width > get_panel_xres(pinfo)) ||
+ (ds_data->lm_height > get_panel_yres(pinfo)) ||
+ (ds_data->lm_width == 0) ||
+ (ds_data->lm_height == 0)) {
+ pr_err("Invalid LM width / height setting\n");
+ return -EINVAL;
+ }
+
+ ctl->width = ds_data->lm_width;
+ ctl->height = ds_data->lm_height;
+
+ ctl->mixer_left->width = ds_data->lm_width;
+ ctl->mixer_left->height = ds_data->lm_height;
+ pr_debug("Update mixer-left width/height: %dx%d\n",
+ ds_data->lm_width, ds_data->lm_width);
+
+ }
+
+ if (ctl->mixer_right) {
+ /*
+ * Split display both left and right should have the
+ * same width and height
+ */
+ ctl->mixer_right->width = ds_data->lm_width;
+ ctl->mixer_right->height = ds_data->lm_height;
+ pr_info("Update mixer-right width/height: %dx%d\n",
+ ds_data->lm_width, ds_data->lm_height);
+
+ /*
+ * For split display, CTL width should be equal to
+ * whole panel size
+ */
+ ctl->width += ds_data->lm_width;
+ }
+
+ pr_debug("Updated CTL width:%d, height:%d\n",
+ ctl->width, ctl->height);
+ }
+
+ return 0;
+}
+
+static int mdss_mdp_validate_destination_scaler(struct msm_fb_data_type *mfd,
+ struct mdp_destination_scaler_data *ds_data,
+ u32 ds_mode)
+{
+ int ret = 0;
+ struct mdss_data_type *mdata;
+ struct mdss_mdp_ctl *ctl;
+ struct mdss_mdp_destination_scaler *ds_left = NULL;
+ struct mdss_mdp_destination_scaler *ds_right = NULL;
+
+ if (ds_data) {
+ mdata = mfd_to_mdata(mfd);
+ ctl = mfd_to_ctl(mfd);
+
+ if (ctl->mixer_left)
+ ds_left = ctl->mixer_left->ds;
+
+ if (ctl->mixer_right)
+ ds_right = ctl->mixer_right->ds;
+
+ switch (ds_mode) {
+ case DS_DUAL_MODE:
+ if (!ds_left || !ds_right) {
+ pr_err("Cannot support DUAL mode dest scaling\n");
+ return -EINVAL;
+ }
+
+ ret = __dest_scaler_data_setup(&ds_data[0], ds_left,
+ mdata->max_dest_scaler_input_width -
+ MDSS_MDP_DS_OVERFETCH_SIZE,
+ mdata->max_dest_scaler_output_width);
+ if (ret)
+ return ret;
+
+ ret = __dest_scaler_data_setup(&ds_data[1], ds_right,
+ mdata->max_dest_scaler_input_width -
+ MDSS_MDP_DS_OVERFETCH_SIZE,
+ mdata->max_dest_scaler_output_width);
+ if (ret)
+ return ret;
+
+ ds_left->flags &= ~(DS_LEFT|DS_RIGHT);
+ ds_left->flags |= DS_DUAL_MODE;
+ ds_right->flags &= ~(DS_LEFT|DS_RIGHT);
+ ds_right->flags |= DS_DUAL_MODE;
+ break;
+
+ case DS_LEFT:
+ if (!ds_left) {
+ pr_err("LM in ctl does not support Destination Scaler\n");
+ return -EINVAL;
+ }
+ ds_left->flags &= ~(DS_DUAL_MODE|DS_RIGHT);
+ ds_left->flags |= DS_LEFT;
+
+ ret = __dest_scaler_data_setup(&ds_data[0], ds_left,
+ mdata->max_dest_scaler_input_width,
+ mdata->max_dest_scaler_output_width);
+ break;
+
+ case DS_RIGHT:
+ if (!ds_right) {
+ pr_err("Cannot setup DS_RIGHT because only single DS assigned to ctl\n");
+ return -EINVAL;
+ }
+
+ ds_right->flags &= ~(DS_DUAL_MODE|DS_LEFT);
+ ds_right->flags |= DS_RIGHT;
+
+ ret = __dest_scaler_data_setup(&ds_data[0], ds_right,
+ mdata->max_dest_scaler_input_width,
+ mdata->max_dest_scaler_output_width);
+ break;
+ }
+
+ } else {
+ pr_err("NULL destionation scaler data\n");
+ return -EFAULT;
+ }
+
+ if (ds_left)
+ pr_debug("DS_LEFT: flags=0x%X\n", ds_left->flags);
+ if (ds_right)
+ pr_debug("DS_RIGHT: flags=0x%X\n", ds_right->flags);
+
+ return ret;
+}
+
/*
* __layer_needs_src_split() - check needs source split configuration
* @layer: input layer
@@ -1605,11 +1807,13 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
u32 left_lm_w = left_lm_w_from_mfd(mfd);
u32 mixer_mux, dst_x;
int layer_count = commit->input_layer_cnt;
+ u32 ds_mode = 0;
struct mdss_mdp_pipe *pipe, *tmp, *left_blend_pipe;
struct mdss_mdp_pipe *right_plist[MAX_PIPES_PER_LM] = {0};
struct mdss_mdp_pipe *left_plist[MAX_PIPES_PER_LM] = {0};
struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+ struct mdss_data_type *mdata = mfd_to_mdata(mfd);
struct mdss_mdp_mixer *mixer = NULL;
struct mdp_input_layer *layer, *prev_layer, *layer_list;
@@ -1863,6 +2067,37 @@ static int __validate_layers(struct msm_fb_data_type *mfd,
layer->z_order -= MDSS_MDP_STAGE_0;
}
+ if (test_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map) &&
+ commit->dest_scaler) {
+ /*
+ * Find out which DS block to use based on LM assignment
+ */
+ if ((left_cnt > 0) && (right_cnt > 0) &&
+ (commit->dest_scaler_cnt == 2))
+ ds_mode = DS_DUAL_MODE;
+ else if ((left_cnt > 0) && (right_cnt == 0) &&
+ (commit->dest_scaler_cnt == 1))
+ ds_mode = DS_LEFT;
+ else if ((left_cnt == 0) && (right_cnt > 0) &&
+ (commit->dest_scaler_cnt == 1))
+ ds_mode = DS_RIGHT;
+ else {
+ pr_err("Commit destination scaler count not matching with LM assignment, DS-cnt:%d\n",
+ commit->dest_scaler_cnt);
+ ret = -EINVAL;
+ goto validate_exit;
+ }
+
+ ret = mdss_mdp_validate_destination_scaler(mfd,
+ commit->dest_scaler,
+ ds_mode);
+ if (ret) {
+ pr_err("fail to validate destination scaler\n");
+ layer->error_code = ret;
+ goto validate_exit;
+ }
+ }
+
ret = mdss_mdp_perf_bw_check(mdp5_data->ctl, left_plist, left_cnt,
right_plist, right_cnt);
if (ret) {
@@ -2130,6 +2365,12 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
}
}
+ if (mdss_mdp_destination_scaler_pre_validate(mdp5_data->ctl,
+ commit->dest_scaler)) {
+ pr_err("Destination scaler pre-validate failed\n");
+ return -EINVAL;
+ }
+
return __validate_layers(mfd, file, commit);
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pipe.c b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
index e0ead6c1c5b7..a2a8855ac473 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pipe.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
@@ -45,6 +45,8 @@
#define QSEED3_DEFAULT_PRELAOD_H 0x4
#define QSEED3_DEFAULT_PRELAOD_V 0x3
+#define TS_CLK 19200000
+
static DEFINE_MUTEX(mdss_mdp_sspp_lock);
static DEFINE_MUTEX(mdss_mdp_smp_lock);
@@ -2375,7 +2377,7 @@ bool mdss_mdp_is_amortizable_pipe(struct mdss_mdp_pipe *pipe,
struct mdss_mdp_mixer *mixer, struct mdss_data_type *mdata)
{
/* do not apply for rotator or WB */
- return ((pipe->src.y > mdata->prefill_data.ts_threshold) &&
+ return ((pipe->dst.y > mdata->prefill_data.ts_threshold) &&
(mixer->type == MDSS_MDP_MIXER_TYPE_INTF));
}
@@ -2393,7 +2395,7 @@ static inline void __get_ordered_rects(struct mdss_mdp_pipe *pipe,
*high_pipe = pipe->multirect.next;
/* if pipes are not in order, order them according to position */
- if ((*low_pipe)->src.y > (*high_pipe)->src.y) {
+ if ((*low_pipe)->dst.y > (*high_pipe)->dst.y) {
*low_pipe = pipe->multirect.next;
*high_pipe = pipe;
}
@@ -2403,7 +2405,7 @@ static u32 __get_ts_count(struct mdss_mdp_pipe *pipe,
struct mdss_mdp_mixer *mixer, bool is_low_pipe)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
- u32 ts_diff, ts_ypos;
+ u32 ts_diff, ts_ypos, rate_factor;
struct mdss_mdp_pipe *low_pipe, *high_pipe;
u32 ts_count = 0;
u32 v_total, fps, h_total, xres;
@@ -2419,8 +2421,10 @@ static u32 __get_ts_count(struct mdss_mdp_pipe *pipe,
if (mdss_mdp_is_amortizable_pipe(pipe, mixer, mdata)) {
ts_diff = mdata->prefill_data.ts_threshold -
mdata->prefill_data.ts_end;
- ts_ypos = pipe->src.y - ts_diff;
- ts_count = mult_frac(ts_ypos, 19200000, fps * v_total);
+ ts_ypos = pipe->dst.y - ts_diff;
+ rate_factor = TS_CLK / fps;
+ ts_count = mult_frac(ts_ypos, rate_factor, v_total);
+ MDSS_XLOG(ts_diff, ts_ypos, rate_factor, ts_count);
}
} else { /* high pipe */
@@ -2428,8 +2432,10 @@ static u32 __get_ts_count(struct mdss_mdp_pipe *pipe,
if (pipe &&
pipe->multirect.mode == MDSS_MDP_PIPE_MULTIRECT_SERIAL) {
__get_ordered_rects(pipe, &low_pipe, &high_pipe);
- ts_count = high_pipe->src.y - low_pipe->src.y - 1;
- ts_count = mult_frac(ts_count, 19200000, fps * v_total);
+ ts_ypos = high_pipe->dst.y - low_pipe->dst.y - 1;
+ rate_factor = TS_CLK / fps;
+ ts_count = mult_frac(ts_ypos, rate_factor, v_total);
+ MDSS_XLOG(ts_ypos, rate_factor, ts_count);
}
}
@@ -2446,7 +2452,14 @@ static u32 __calc_ts_bytes(struct mdss_rect *src, u32 fps, u32 bpp)
ts_bytes = mult_frac(ts_bytes,
mdata->prefill_data.ts_rate.numer,
mdata->prefill_data.ts_rate.denom);
- ts_bytes /= 19200000;
+ ts_bytes = DIV_ROUND_UP(ts_bytes, TS_CLK);
+
+ pr_debug("ts:%d, w:%d h:%d fps:%d bpp:%d\n", ts_bytes,
+ src->w, src->h, fps, bpp);
+ MDSS_XLOG(ts_bytes, src->w, src->h, fps, bpp);
+
+ if (ts_bytes == 0)
+ ts_bytes = 1;
return ts_bytes;
}
@@ -2513,9 +2526,9 @@ static u32 __get_ts_bytes(struct mdss_mdp_pipe *pipe,
/* amortize depending on the lower pipe amortization */
if (mdss_mdp_is_amortizable_pipe(low_pipe, mixer, mdata))
ts_bytes = DIV_ROUND_UP_ULL(max(low_pipe_bw,
- high_pipe_bw), 19200000);
+ high_pipe_bw), TS_CLK);
else
- ts_bytes = DIV_ROUND_UP_ULL(high_pipe_bw, 19200000);
+ ts_bytes = DIV_ROUND_UP_ULL(high_pipe_bw, TS_CLK);
break;
default:
pr_err("unknown multirect mode!\n");
@@ -2570,6 +2583,8 @@ static int mdss_mdp_set_ts_pipe(struct mdss_mdp_pipe *pipe)
ts_rec1 = ts_count_low;
}
+ mdss_mdp_vsync_clk_enable(1, false);
+
mdss_mdp_pipe_qos_ctrl(pipe, false, MDSS_MDP_PIPE_QOS_VBLANK_AMORTIZE);
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_TRAFFIC_SHAPER, ts_bytes);
mdss_mdp_pipe_write(pipe, MDSS_MDP_REG_SSPP_TRAFFIC_SHAPER_PREFILL,
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index 715f4428e81a..f92d4bb9ed1d 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -221,6 +221,26 @@ struct mdp_csc_cfg mdp_csc_10bit_convert[MDSS_MDP_MAX_CSC] = {
},
};
+static struct mdss_mdp_format_params dest_scaler_fmt = {
+ .format = MDP_XBGR_2101010,
+ .flag = 0,
+ .fetch_planes = MDSS_MDP_PLANE_INTERLEAVED,
+ .unpack_tight = 1,
+ .unpack_align_msb = 0,
+ .alpha_enable = 0,
+ .unpack_count = 4,
+ .bpp = 4,
+ .fetch_mode = MDSS_MDP_FETCH_LINEAR,
+ .element = { C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr },
+ .bits = {
+ [C3_ALPHA] = 3,
+ [C2_R_Cr] = 3,
+ [C0_G_Y] = 3,
+ [C1_B_Cb] = 3,
+ },
+ .unpack_dx_format = 1,
+};
+
#define CSC_MV_OFF 0x0
#define CSC_BV_OFF 0x2C
#define CSC_LV_OFF 0x14
@@ -1589,48 +1609,15 @@ static void mdss_mdp_scaler_detail_enhance_cfg(
}
}
-int mdss_mdp_qseed3_setup(struct mdss_mdp_pipe *pipe,
- int location, int id)
+int mdss_mdp_qseed3_setup(struct mdp_scale_data_v2 *scaler,
+ char __iomem *offset,
+ char __iomem *lut_offset,
+ struct mdss_mdp_format_params *fmt)
{
int rc = 0;
- struct mdp_scale_data_v2 *scaler;
- struct mdss_data_type *mdata;
- char __iomem *offset, *lut_offset;
- struct mdss_mdp_format_params *fmt;
uint32_t op_mode = 0;
uint32_t phase_init, preload, src_y_rgb, src_uv, dst;
- mdata = mdss_mdp_get_mdata();
- /* SRC pipe QSEED3 Configuration */
- if (location == SSPP_VIG) {
- scaler = &pipe->scaler;
- offset = pipe->base + mdata->scaler_off->vig_scaler_off;
- lut_offset = pipe->base + mdata->scaler_off->vig_scaler_lut_off;
- fmt = pipe->src_fmt;
- } else if (location == DSPP) {
- /* Destination scaler QSEED3 Configuration */
- if ((mdata->scaler_off->has_dest_scaler) &&
- (id < mdata->scaler_off->ndest_scalers)) {
- /* TODO :point to the destination params */
- scaler = NULL;
- offset = mdata->scaler_off->dest_base +
- mdata->scaler_off->dest_scaler_off[id];
- lut_offset = mdata->scaler_off->dest_base +
- mdata->scaler_off->dest_scaler_lut_off[id];
- /*TODO : set pixel fmt to RGB101010 */
- return -ENOSYS;
- } else {
- return -EINVAL;
- }
- } else {
- return -EINVAL;
- }
-
- if (!scaler) {
- pr_debug("scaler pointer is NULL\n");
- return 0;
- }
-
pr_debug("scaler->enable=%d", scaler->enable);
if (scaler->enable) {
@@ -1650,8 +1637,6 @@ int mdss_mdp_qseed3_setup(struct mdss_mdp_pipe *pipe,
ALPHA_FILTER_CFG;
}
- /* TODO:if src_fmt is 10 bits program the bitwidth
- * accordingly */
if (!fmt->unpack_dx_format)
op_mode |= 0x1 << SCALER_BIT_WIDTH;
@@ -1750,12 +1735,24 @@ static int mdss_mdp_scale_setup(struct mdss_mdp_pipe *pipe,
{
struct mdss_data_type *mdata;
int rc = 0;
+ char __iomem *offset, *lut_offset;
mdata = mdss_mdp_get_mdata();
- if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map))
- rc = mdss_mdp_qseed3_setup(pipe, pp_blk, 0);
- else
+
+ if (test_bit(MDSS_CAPS_QSEED3, mdata->mdss_caps_map)) {
+ if (pp_blk == SSPP_VIG) {
+ offset = pipe->base + mdata->scaler_off->vig_scaler_off;
+ lut_offset = pipe->base +
+ mdata->scaler_off->vig_scaler_lut_off;
+
+ rc = mdss_mdp_qseed3_setup(&pipe->scaler, offset,
+ lut_offset, pipe->src_fmt);
+ } else {
+ rc = -EINVAL;
+ }
+ } else {
rc = mdss_mdp_qseed2_setup(pipe);
+ }
if (rc)
pr_err("scale setup on pipe %d type %d failed ret %d\n",
@@ -2432,6 +2429,71 @@ dspp_exit:
return ret;
}
+static int pp_dest_scaler_setup(struct mdss_mdp_mixer *mixer)
+{
+ struct mdss_mdp_ctl *ctl;
+ struct mdss_data_type *mdata;
+ struct mdss_mdp_destination_scaler *ds;
+ int ret = 0;
+ u32 op_mode;
+ u32 mask;
+ char *ds_offset;
+
+ if (!mixer || !mixer->ctl || !mixer->ctl->mdata)
+ return -EINVAL;
+
+ ctl = mixer->ctl;
+ mdata = ctl->mdata;
+ ds = mixer->ds;
+
+ if (!test_bit(MDSS_CAPS_DEST_SCALER, mdata->mdss_caps_map) || !ds)
+ return 0;
+
+ ds_offset = ds->ds_base;
+ op_mode = readl_relaxed(MDSS_MDP_REG_DEST_SCALER_OP_MODE +
+ ds_offset);
+
+ mask = BIT(ds->num);
+ if (ds->flags & DS_ENABLE)
+ op_mode |= mask;
+ else
+ op_mode &= ~mask;
+
+ if (ds->flags & DS_DUAL_MODE)
+ op_mode |= BIT(16);
+ else
+ op_mode &= ~BIT(16);
+
+ writel_relaxed(op_mode, MDSS_MDP_REG_DEST_SCALER_OP_MODE + ds_offset);
+
+ if (ds->flags & DS_SCALE_UPDATE) {
+ ret = mdss_mdp_qseed3_setup(&ds->scaler,
+ ds->scaler_base, ds->lut_base,
+ &dest_scaler_fmt);
+ if (ret) {
+ pr_err("Failed setup destination scaler\n");
+ return ret;
+ }
+ /*
+ * Clearing the flag because we don't need to program the block
+ * for each commit if there is no change.
+ */
+ ds->flags &= ~DS_SCALE_UPDATE;
+ }
+
+ if (ds->flags & DS_ENHANCER_UPDATE) {
+ mdss_mdp_scaler_detail_enhance_cfg(&ds->scaler.detail_enhance,
+ ds->scaler_base);
+ ds->flags &= ~DS_ENHANCER_UPDATE;
+ }
+
+ /* Destinations scaler shared the flush with DSPP in control */
+ if (ds->flags & DS_ENABLE)
+ ctl->flush_bits |= BIT(13 + ds->num);
+
+ return 0;
+}
+
int mdss_mdp_pp_setup(struct mdss_mdp_ctl *ctl)
{
int ret = 0;
@@ -2521,11 +2583,13 @@ int mdss_mdp_pp_setup_locked(struct mdss_mdp_ctl *ctl)
}
if (ctl->mixer_left) {
+ pp_dest_scaler_setup(ctl->mixer_left);
pp_mixer_setup(ctl->mixer_left);
pp_dspp_setup(disp_num, ctl->mixer_left);
pp_ppb_setup(ctl->mixer_left);
}
if (ctl->mixer_right) {
+ pp_dest_scaler_setup(ctl->mixer_right);
pp_mixer_setup(ctl->mixer_right);
pp_dspp_setup(disp_num, ctl->mixer_right);
pp_ppb_setup(ctl->mixer_right);
diff --git a/drivers/video/fbdev/msm/mdss_smmu.h b/drivers/video/fbdev/msm/mdss_smmu.h
index 19ec3d2d1e2d..a987066cc773 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.h
+++ b/drivers/video/fbdev/msm/mdss_smmu.h
@@ -61,15 +61,14 @@ static inline bool is_mdss_smmu_compatible_device(const char *str)
* mdss_smmu_is_valid_domain_type()
*
* Used to check if rotator smmu domain is defined or not by checking if
- * vbif base is defined and wb rotator exists. As those are associated.
+ * vbif base is defined. As those are associated.
*/
static inline bool mdss_smmu_is_valid_domain_type(struct mdss_data_type *mdata,
int domain_type)
{
if ((domain_type == MDSS_IOMMU_DOMAIN_ROT_UNSECURE ||
domain_type == MDSS_IOMMU_DOMAIN_ROT_SECURE) &&
- (!mdss_mdp_is_wb_rotator_supported(mdata) ||
- !mdss_mdp_is_nrt_vbif_base_defined(mdata)))
+ !mdss_mdp_is_nrt_vbif_base_defined(mdata))
return false;
return true;
}
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7e107c3d7a5c..61a5c00e66cd 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -328,6 +328,16 @@ enum task_event {
IRQ_UPDATE = 5,
};
+/* Note: this need to be in sync with migrate_type_names array */
+enum migrate_types {
+ GROUP_TO_RQ,
+ RQ_TO_GROUP,
+ RQ_TO_RQ,
+ GROUP_TO_GROUP,
+};
+
+extern const char *migrate_type_names[];
+
#include <linux/spinlock.h>
/*
diff --git a/include/linux/sched/sysctl.h b/include/linux/sched/sysctl.h
index 84bac3e07709..2ac84af88802 100644
--- a/include/linux/sched/sysctl.h
+++ b/include/linux/sched/sysctl.h
@@ -75,6 +75,7 @@ extern unsigned int sysctl_sched_restrict_cluster_spill;
#if defined(CONFIG_SCHED_FREQ_INPUT)
extern unsigned int sysctl_sched_new_task_windows;
extern unsigned int sysctl_sched_pred_alert_freq;
+extern unsigned int sysctl_sched_freq_aggregate;
#endif
#else /* CONFIG_SCHED_HMP */
diff --git a/include/linux/usb/msm_hsusb.h b/include/linux/usb/msm_hsusb.h
index b1a0cfd6a8ce..89effb61d153 100644
--- a/include/linux/usb/msm_hsusb.h
+++ b/include/linux/usb/msm_hsusb.h
@@ -174,6 +174,8 @@ struct msm_usb_cable {
#define PHY_CHARGER_CONNECTED BIT(3)
#define PHY_VBUS_VALID_OVERRIDE BIT(4)
#define DEVICE_IN_SS_MODE BIT(5)
+#define PHY_LANE_A BIT(6)
+#define PHY_LANE_B BIT(7)
#define USB_NUM_BUS_CLOCKS 3
diff --git a/include/soc/qcom/icnss.h b/include/soc/qcom/icnss.h
index f688b56c5798..6b250c68aaee 100644
--- a/include/soc/qcom/icnss.h
+++ b/include/soc/qcom/icnss.h
@@ -97,5 +97,6 @@ extern int icnss_ce_request_irq(unsigned int ce_id,
unsigned long flags, const char *name, void *ctx);
extern int icnss_get_ce_id(int irq);
extern int icnss_set_fw_debug_mode(bool enablefwlog);
+extern int icnss_get_irq(int ce_id);
#endif /* _ICNSS_WLAN_H_ */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 6c5fa35e2875..81415b78ef39 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -9,6 +9,8 @@
#include <linux/binfmts.h>
struct rq;
+struct group_cpu_time;
+struct migration_sum_data;
extern const char *task_event_names[];
/*
@@ -269,9 +271,10 @@ TRACE_EVENT(sched_set_boost,
TRACE_EVENT(sched_update_task_ravg,
TP_PROTO(struct task_struct *p, struct rq *rq, enum task_event evt,
- u64 wallclock, u64 irqtime, u32 cycles, u32 exec_time),
+ u64 wallclock, u64 irqtime, u32 cycles, u32 exec_time,
+ struct group_cpu_time *cpu_time),
- TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time),
+ TP_ARGS(p, rq, evt, wallclock, irqtime, cycles, exec_time, cpu_time),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -290,8 +293,12 @@ TRACE_EVENT(sched_update_task_ravg,
__field( int, cpu )
#ifdef CONFIG_SCHED_FREQ_INPUT
__field(unsigned int, pred_demand )
- __field( u64, cs )
- __field( u64, ps )
+ __field( u64, rq_cs )
+ __field( u64, rq_ps )
+ __field( u64, grp_cs )
+ __field( u64, grp_ps )
+ __field( u64, grp_nt_cs )
+ __field( u64, grp_nt_ps )
__field( u32, curr_window )
__field( u32, prev_window )
__field( u64, nt_cs )
@@ -318,8 +325,12 @@ TRACE_EVENT(sched_update_task_ravg,
__entry->irqtime = irqtime;
#ifdef CONFIG_SCHED_FREQ_INPUT
__entry->pred_demand = p->ravg.pred_demand;
- __entry->cs = rq->curr_runnable_sum;
- __entry->ps = rq->prev_runnable_sum;
+ __entry->rq_cs = rq->curr_runnable_sum;
+ __entry->rq_ps = rq->prev_runnable_sum;
+ __entry->grp_cs = cpu_time ? cpu_time->curr_runnable_sum : 0;
+ __entry->grp_ps = cpu_time ? cpu_time->prev_runnable_sum : 0;
+ __entry->grp_nt_cs = cpu_time ? cpu_time->nt_curr_runnable_sum : 0;
+ __entry->grp_nt_ps = cpu_time ? cpu_time->nt_prev_runnable_sum : 0;
__entry->curr_window = p->ravg.curr_window;
__entry->prev_window = p->ravg.prev_window;
__entry->nt_cs = rq->nt_curr_runnable_sum;
@@ -330,7 +341,7 @@ TRACE_EVENT(sched_update_task_ravg,
TP_printk("wc %llu ws %llu delta %llu event %s cpu %d cur_freq %u cur_pid %d task %d (%s) ms %llu delta %llu demand %u sum %u irqtime %llu"
#ifdef CONFIG_SCHED_FREQ_INPUT
- " pred_demand %u cs %llu ps %llu cur_window %u prev_window %u nt_cs %llu nt_ps %llu active_wins %u"
+ " pred_demand %u rq_cs %llu rq_ps %llu cur_window %u prev_window %u nt_cs %llu nt_ps %llu active_wins %u grp_cs %lld grp_ps %lld, grp_nt_cs %llu, grp_nt_ps: %llu"
#endif
, __entry->wallclock, __entry->win_start, __entry->delta,
task_event_names[__entry->evt], __entry->cpu,
@@ -339,10 +350,12 @@ TRACE_EVENT(sched_update_task_ravg,
__entry->delta_m, __entry->demand,
__entry->sum, __entry->irqtime
#ifdef CONFIG_SCHED_FREQ_INPUT
- , __entry->pred_demand, __entry->cs, __entry->ps,
+ , __entry->pred_demand, __entry->rq_cs, __entry->rq_ps,
__entry->curr_window, __entry->prev_window,
__entry->nt_cs, __entry->nt_ps,
- __entry->active_windows
+ __entry->active_windows,
+ __entry->grp_cs, __entry->grp_ps,
+ __entry->grp_nt_cs, __entry->grp_nt_ps
#endif
)
);
@@ -506,31 +519,62 @@ TRACE_EVENT(sched_update_pred_demand,
TRACE_EVENT(sched_migration_update_sum,
- TP_PROTO(struct rq *rq, struct task_struct *p),
+ TP_PROTO(struct task_struct *p, enum migrate_types migrate_type, struct migration_sum_data *d),
- TP_ARGS(rq, p),
+ TP_ARGS(p, migrate_type, d),
TP_STRUCT__entry(
- __field(int, cpu )
+ __field(int, tcpu )
__field(int, pid )
__field( u64, cs )
__field( u64, ps )
__field( s64, nt_cs )
__field( s64, nt_ps )
+ __field(enum migrate_types, migrate_type )
+ __field( s64, src_cs )
+ __field( s64, src_ps )
+ __field( s64, dst_cs )
+ __field( s64, dst_ps )
+ __field( s64, src_nt_cs )
+ __field( s64, src_nt_ps )
+ __field( s64, dst_nt_cs )
+ __field( s64, dst_nt_ps )
),
TP_fast_assign(
- __entry->cpu = cpu_of(rq);
- __entry->cs = rq->curr_runnable_sum;
- __entry->ps = rq->prev_runnable_sum;
- __entry->nt_cs = (s64)rq->nt_curr_runnable_sum;
- __entry->nt_ps = (s64)rq->nt_prev_runnable_sum;
+ __entry->tcpu = task_cpu(p);
__entry->pid = p->pid;
- ),
-
- TP_printk("cpu %d: cs %llu ps %llu nt_cs %lld nt_ps %lld pid %d",
- __entry->cpu, __entry->cs, __entry->ps,
- __entry->nt_cs, __entry->nt_ps, __entry->pid)
+ __entry->migrate_type = migrate_type;
+ __entry->src_cs = d->src_rq ?
+ d->src_rq->curr_runnable_sum :
+ d->src_cpu_time->curr_runnable_sum;
+ __entry->src_ps = d->src_rq ?
+ d->src_rq->prev_runnable_sum :
+ d->src_cpu_time->prev_runnable_sum;
+ __entry->dst_cs = d->dst_rq ?
+ d->dst_rq->curr_runnable_sum :
+ d->dst_cpu_time->curr_runnable_sum;
+ __entry->dst_ps = d->dst_rq ?
+ d->dst_rq->prev_runnable_sum :
+ d->dst_cpu_time->prev_runnable_sum;
+ __entry->src_nt_cs = d->src_rq ?
+ d->src_rq->nt_curr_runnable_sum :
+ d->src_cpu_time->nt_curr_runnable_sum;
+ __entry->src_nt_ps = d->src_rq ?
+ d->src_rq->nt_prev_runnable_sum :
+ d->src_cpu_time->nt_prev_runnable_sum;
+ __entry->dst_nt_cs = d->dst_rq ?
+ d->dst_rq->nt_curr_runnable_sum :
+ d->dst_cpu_time->nt_curr_runnable_sum;
+ __entry->dst_nt_ps = d->dst_rq ?
+ d->dst_rq->nt_prev_runnable_sum :
+ d->dst_cpu_time->nt_prev_runnable_sum;
+ ),
+
+ TP_printk("pid %d task_cpu %d migrate_type %s src_cs %llu src_ps %llu dst_cs %lld dst_ps %lld src_nt_cs %llu src_nt_ps %llu dst_nt_cs %lld dst_nt_ps %lld",
+ __entry->pid, __entry->tcpu, migrate_type_names[__entry->migrate_type],
+ __entry->src_cs, __entry->src_ps, __entry->dst_cs, __entry->dst_ps,
+ __entry->src_nt_cs, __entry->src_nt_ps, __entry->dst_nt_cs, __entry->dst_nt_ps)
);
TRACE_EVENT(sched_get_busy,
@@ -562,15 +606,17 @@ TRACE_EVENT(sched_get_busy,
TRACE_EVENT(sched_freq_alert,
- TP_PROTO(int cpu, int pd_notif, u64 old_load, u64 new_load,
- u64 old_pred, u64 new_pred),
+ TP_PROTO(int cpu, int pd_notif, int check_groups, struct rq *rq,
+ u64 new_load),
- TP_ARGS(cpu, pd_notif, old_load, new_load, old_pred, new_pred),
+ TP_ARGS(cpu, pd_notif, check_groups, rq, new_load),
TP_STRUCT__entry(
__field( int, cpu )
__field( int, pd_notif )
- __field( u64, old_load )
+ __field( int, check_groups )
+ __field( u64, old_busy_time )
+ __field( u64, ps )
__field( u64, new_load )
__field( u64, old_pred )
__field( u64, new_pred )
@@ -579,17 +625,18 @@ TRACE_EVENT(sched_freq_alert,
TP_fast_assign(
__entry->cpu = cpu;
__entry->pd_notif = pd_notif;
- __entry->old_load = old_load;
+ __entry->check_groups = check_groups;
+ __entry->old_busy_time = rq->old_busy_time;
+ __entry->ps = rq->prev_runnable_sum;
__entry->new_load = new_load;
- __entry->old_pred = old_pred;
- __entry->new_pred = new_pred;
+ __entry->old_pred = rq->old_estimated_time;
+ __entry->new_pred = rq->hmp_stats.pred_demands_sum;
),
- TP_printk("cpu %d pd_notif=%d old_load=%llu new_load=%llu "
- "old_pred=%llu new_pred=%llu",
- __entry->cpu, __entry->pd_notif, __entry->old_load,
- __entry->new_load, __entry->old_pred,
- __entry->new_pred)
+ TP_printk("cpu %d pd_notif=%d check_groups %d old_busy_time=%llu prev_sum=%lld new_load=%llu old_pred=%llu new_pred=%llu",
+ __entry->cpu, __entry->pd_notif, __entry->check_groups,
+ __entry->old_busy_time, __entry->ps, __entry->new_load,
+ __entry->old_pred, __entry->new_pred)
);
#endif /* CONFIG_SCHED_FREQ_INPUT */
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0b55bbbd7431..87e93b3f3b4e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -97,6 +97,9 @@ const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
"TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE",
"IRQ_UPDATE"};
+const char *migrate_type_names[] = {"GROUP_TO_RQ", "RQ_TO_GROUP",
+ "RQ_TO_RQ", "GROUP_TO_GROUP"};
+
ATOMIC_NOTIFIER_HEAD(migration_notifier_head);
ATOMIC_NOTIFIER_HEAD(load_alert_notifier_head);
@@ -1864,6 +1867,61 @@ __read_mostly unsigned int sched_major_task_runtime = 10000000;
static unsigned int sync_cpu;
+static LIST_HEAD(related_thread_groups);
+static DEFINE_RWLOCK(related_thread_group_lock);
+
+#define for_each_related_thread_group(grp) \
+ list_for_each_entry(grp, &related_thread_groups, list)
+
+/*
+ * Demand aggregation for frequency purpose:
+ *
+ * 'sched_freq_aggregate' controls aggregation of cpu demand of related threads
+ * for frequency determination purpose. This aggregation is done per-cluster.
+ *
+ * CPU demand of tasks from various related groups is aggregated per-cluster and
+ * added to the "max_busy_cpu" in that cluster, where max_busy_cpu is determined
+ * by just rq->prev_runnable_sum.
+ *
+ * Some examples follow, which assume:
+ * Cluster0 = CPU0-3, Cluster1 = CPU4-7
+ * One related thread group A that has tasks A0, A1, A2
+ *
+ * A->cpu_time[X].curr/prev_sum = counters in which cpu execution stats of
+ * tasks belonging to group A are accumulated when they run on cpu X.
+ *
+ * CX->curr/prev_sum = counters in which cpu execution stats of all tasks
+ * not belonging to group A are accumulated when they run on cpu X
+ *
+ * Lets say the stats for window M was as below:
+ *
+ * C0->prev_sum = 1ms, A->cpu_time[0].prev_sum = 5ms
+ * Task A0 ran 5ms on CPU0
+ * Task B0 ran 1ms on CPU0
+ *
+ * C1->prev_sum = 5ms, A->cpu_time[1].prev_sum = 6ms
+ * Task A1 ran 4ms on CPU1
+ * Task A2 ran 2ms on CPU1
+ * Task B1 ran 5ms on CPU1
+ *
+ * C2->prev_sum = 0ms, A->cpu_time[2].prev_sum = 0
+ * CPU2 idle
+ *
+ * C3->prev_sum = 0ms, A->cpu_time[3].prev_sum = 0
+ * CPU3 idle
+ *
+ * In this case, CPU1 was most busy going by just its prev_sum counter. Demand
+ * from all group A tasks are added to CPU1. IOW, at end of window M, cpu busy
+ * time reported to governor will be:
+ *
+ *
+ * C0 busy time = 1ms
+ * C1 busy time = 5 + 5 + 6 = 16ms
+ *
+ */
+static __read_mostly unsigned int sched_freq_aggregate;
+__read_mostly unsigned int sysctl_sched_freq_aggregate;
+
#define EXITING_TASK_MARKER 0xdeaddead
static inline int exiting_task(struct task_struct *p)
@@ -1955,12 +2013,67 @@ static inline unsigned int load_to_freq(struct rq *rq, u64 load)
return freq;
}
-/* Should scheduler alert governor for changing frequency? */
-static int send_notification(struct rq *rq, int check_pred)
+static inline struct group_cpu_time *
+_group_cpu_time(struct related_thread_group *grp, int cpu);
+
+/*
+ * Return load from all related group in given cpu.
+ * Caller must ensure that related_thread_group_lock is held.
+ */
+static void _group_load_in_cpu(int cpu, u64 *grp_load, u64 *new_grp_load)
+{
+ struct related_thread_group *grp;
+
+ for_each_related_thread_group(grp) {
+ struct group_cpu_time *cpu_time;
+
+ cpu_time = _group_cpu_time(grp, cpu);
+ *grp_load += cpu_time->prev_runnable_sum;
+ if (new_grp_load)
+ *new_grp_load += cpu_time->nt_prev_runnable_sum;
+ }
+}
+
+/*
+ * Return load from all related groups in given frequency domain.
+ * Caller must ensure that related_thread_group_lock is held.
+ */
+static void group_load_in_freq_domain(struct cpumask *cpus,
+ u64 *grp_load, u64 *new_grp_load)
+{
+ struct related_thread_group *grp;
+ int j;
+
+ for_each_related_thread_group(grp) {
+ for_each_cpu(j, cpus) {
+ struct group_cpu_time *cpu_time;
+
+ cpu_time = _group_cpu_time(grp, j);
+ *grp_load += cpu_time->prev_runnable_sum;
+ *new_grp_load += cpu_time->nt_prev_runnable_sum;
+ }
+ }
+}
+
+/*
+ * Should scheduler alert governor for changing frequency?
+ *
+ * @check_pred - evaluate frequency based on the predictive demand
+ * @check_groups - add load from all related groups on given cpu
+ *
+ * check_groups is set to 1 if a "related" task movement/wakeup is triggering
+ * the notification check. To avoid "re-aggregation" of demand in such cases,
+ * we check whether the migrated/woken tasks demand (along with demand from
+ * existing tasks on the cpu) can be met on target cpu
+ *
+ */
+
+static int send_notification(struct rq *rq, int check_pred, int check_groups)
{
unsigned int cur_freq, freq_required;
unsigned long flags;
int rc = 0;
+ u64 group_load = 0, new_load;
if (!sched_enable_hmp)
return 0;
@@ -1982,8 +2095,22 @@ static int send_notification(struct rq *rq, int check_pred)
if (freq_required < cur_freq + sysctl_sched_pred_alert_freq)
return 0;
} else {
+ read_lock(&related_thread_group_lock);
+ /*
+ * Protect from concurrent update of rq->prev_runnable_sum and
+ * group cpu load
+ */
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ if (check_groups)
+ _group_load_in_cpu(cpu_of(rq), &group_load, NULL);
+
+ new_load = rq->prev_runnable_sum + group_load;
+
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ read_unlock(&related_thread_group_lock);
+
cur_freq = load_to_freq(rq, rq->old_busy_time);
- freq_required = load_to_freq(rq, rq->prev_runnable_sum);
+ freq_required = load_to_freq(rq, new_load);
if (nearly_same_freq(cur_freq, freq_required))
return 0;
@@ -1993,6 +2120,8 @@ static int send_notification(struct rq *rq, int check_pred)
if (!rq->notifier_sent) {
rq->notifier_sent = 1;
rc = 1;
+ trace_sched_freq_alert(cpu_of(rq), check_pred, check_groups, rq,
+ new_load);
}
raw_spin_unlock_irqrestore(&rq->lock, flags);
@@ -2000,17 +2129,13 @@ static int send_notification(struct rq *rq, int check_pred)
}
/* Alert governor if there is a need to change frequency */
-void check_for_freq_change(struct rq *rq, bool check_pred)
+void check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups)
{
int cpu = cpu_of(rq);
- if (!send_notification(rq, check_pred))
+ if (!send_notification(rq, check_pred, check_groups))
return;
- trace_sched_freq_alert(cpu, check_pred, rq->old_busy_time,
- rq->prev_runnable_sum, rq->old_estimated_time,
- rq->hmp_stats.pred_demands_sum);
-
atomic_notifier_call_chain(
&load_alert_notifier_head, 0,
(void *)(long)cpu);
@@ -2031,11 +2156,21 @@ static int account_busy_for_cpu_time(struct rq *rq, struct task_struct *p,
if (event == TASK_WAKE)
return 0;
- if (event == PUT_PREV_TASK || event == IRQ_UPDATE ||
- event == TASK_UPDATE)
+ if (event == PUT_PREV_TASK || event == IRQ_UPDATE)
return 1;
- /* Only TASK_MIGRATE && PICK_NEXT_TASK left */
+ /*
+ * TASK_UPDATE can be called on sleeping task, when its moved between
+ * related groups
+ */
+ if (event == TASK_UPDATE) {
+ if (rq->curr == p)
+ return 1;
+
+ return p->on_rq ? sched_freq_account_wait_time : 0;
+ }
+
+ /* TASK_MIGRATE, PICK_NEXT_TASK left */
return sched_freq_account_wait_time;
}
@@ -2262,6 +2397,15 @@ void update_task_pred_demand(struct rq *rq, struct task_struct *p, int event)
event != PICK_NEXT_TASK)))
return;
+ /*
+ * TASK_UPDATE can be called on sleeping task, when its moved between
+ * related groups
+ */
+ if (event == TASK_UPDATE) {
+ if (!p->on_rq && !sched_freq_account_wait_time)
+ return;
+ }
+
new = calc_pred_demand(rq, p);
old = p->ravg.pred_demand;
@@ -2290,7 +2434,14 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
u64 window_start = rq->window_start;
u32 window_size = sched_ravg_window;
u64 delta;
+ u64 *curr_runnable_sum = &rq->curr_runnable_sum;
+ u64 *prev_runnable_sum = &rq->prev_runnable_sum;
+ u64 *nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+ u64 *nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+ int flip_counters = 0;
+ int prev_sum_reset = 0;
bool new_task;
+ struct related_thread_group *grp;
new_window = mark_start < window_start;
if (new_window) {
@@ -2302,6 +2453,51 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
new_task = is_new_task(p);
+ grp = p->grp;
+ if (grp && sched_freq_aggregate) {
+ /* cpu_time protected by rq_lock */
+ struct group_cpu_time *cpu_time =
+ _group_cpu_time(grp, cpu_of(rq));
+
+ curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ prev_runnable_sum = &cpu_time->prev_runnable_sum;
+
+ nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+ if (cpu_time->window_start != rq->window_start) {
+ int nr_windows;
+
+ delta = rq->window_start - cpu_time->window_start;
+ nr_windows = div64_u64(delta, window_size);
+ if (nr_windows > 1)
+ prev_sum_reset = 1;
+
+ cpu_time->window_start = rq->window_start;
+ flip_counters = 1;
+ }
+
+ if (p_is_curr_task && new_window) {
+ u64 curr_sum = rq->curr_runnable_sum;
+ u64 nt_curr_sum = rq->nt_curr_runnable_sum;
+
+ if (nr_full_windows)
+ curr_sum = nt_curr_sum = 0;
+
+ rq->prev_runnable_sum = curr_sum;
+ rq->nt_prev_runnable_sum = nt_curr_sum;
+
+ rq->curr_runnable_sum = 0;
+ rq->nt_curr_runnable_sum = 0;
+ }
+ } else {
+ if (p_is_curr_task && new_window) {
+ flip_counters = 1;
+ if (nr_full_windows)
+ prev_sum_reset = 1;
+ }
+ }
+
/* Handle per-task window rollover. We don't care about the idle
* task or exiting tasks. */
if (new_window && !is_idle_task(p) && !exiting_task(p)) {
@@ -2314,6 +2510,20 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
p->ravg.curr_window = 0;
}
+ if (flip_counters) {
+ u64 curr_sum = *curr_runnable_sum;
+ u64 nt_curr_sum = *nt_curr_runnable_sum;
+
+ if (prev_sum_reset)
+ curr_sum = nt_curr_sum = 0;
+
+ *prev_runnable_sum = curr_sum;
+ *nt_prev_runnable_sum = nt_curr_sum;
+
+ *curr_runnable_sum = 0;
+ *nt_curr_runnable_sum = 0;
+ }
+
if (!account_busy_for_cpu_time(rq, p, irqtime, event)) {
/* account_busy_for_cpu_time() = 0, so no update to the
* task's current window needs to be made. This could be
@@ -2331,19 +2541,8 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
/* A new window has started. The RQ demand must be rolled
* over if p is the current task. */
if (p_is_curr_task) {
- u64 prev_sum = 0, nt_prev_sum = 0;
-
- /* p is either idle task or an exiting task */
- if (!nr_full_windows) {
- prev_sum = rq->curr_runnable_sum;
- nt_prev_sum = rq->nt_curr_runnable_sum;
- }
-
- rq->prev_runnable_sum = prev_sum;
- rq->curr_runnable_sum = 0;
- rq->nt_prev_runnable_sum = nt_prev_sum;
- rq->nt_curr_runnable_sum = 0;
-
+ /* p is idle task */
+ BUG_ON(p != rq->idle);
} else if (heavy_task_wakeup(p, rq, event)) {
/* A new window has started. If p is a waking
* heavy task its prev_window contribution is faked
@@ -2353,9 +2552,9 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
* can be controlled via the sched_heavy_task
* tunable. */
p->ravg.prev_window = p->ravg.demand;
- rq->prev_runnable_sum += p->ravg.demand;
+ *prev_runnable_sum += p->ravg.demand;
if (new_task)
- rq->nt_prev_runnable_sum += p->ravg.demand;
+ *nt_prev_runnable_sum += p->ravg.demand;
}
return;
@@ -2373,9 +2572,10 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
else
delta = irqtime;
delta = scale_exec_time(delta, rq, cc);
- rq->curr_runnable_sum += delta;
+ *curr_runnable_sum += delta;
if (new_task)
- rq->nt_curr_runnable_sum += delta;
+ *nt_curr_runnable_sum += delta;
+
if (!is_idle_task(p) && !exiting_task(p))
p->ravg.curr_window += delta;
@@ -2409,15 +2609,17 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
if (!exiting_task(p))
p->ravg.prev_window = delta;
}
- rq->prev_runnable_sum += delta;
+
+ *prev_runnable_sum += delta;
if (new_task)
- rq->nt_prev_runnable_sum += delta;
+ *nt_prev_runnable_sum += delta;
/* Account piece of busy time in the current window. */
delta = scale_exec_time(wallclock - window_start, rq, cc);
- rq->curr_runnable_sum += delta;
+ *curr_runnable_sum += delta;
if (new_task)
- rq->nt_curr_runnable_sum += delta;
+ *nt_curr_runnable_sum += delta;
+
if (!exiting_task(p))
p->ravg.curr_window = delta;
@@ -2444,12 +2646,6 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
cc);
if (!is_idle_task(p) && !exiting_task(p))
p->ravg.prev_window += delta;
-
- rq->nt_prev_runnable_sum = rq->nt_curr_runnable_sum;
- if (new_task)
- rq->nt_prev_runnable_sum += delta;
-
- delta += rq->curr_runnable_sum;
} else {
/* Since at least one full window has elapsed,
* the contribution to the previous window is the
@@ -2457,27 +2653,20 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
delta = scale_exec_time(window_size, rq, cc);
if (!is_idle_task(p) && !exiting_task(p))
p->ravg.prev_window = delta;
-
- if (new_task)
- rq->nt_prev_runnable_sum = delta;
- else
- rq->nt_prev_runnable_sum = 0;
}
- /*
- * Rollover for normal runnable sum is done here by overwriting
- * the values in prev_runnable_sum and curr_runnable_sum.
- * Rollover for new task runnable sum has completed by previous
- * if-else statement.
- */
- rq->prev_runnable_sum = delta;
+
+ /* Rollover is done here by overwriting the values in
+ * prev_runnable_sum and curr_runnable_sum. */
+ *prev_runnable_sum += delta;
+ if (new_task)
+ *nt_prev_runnable_sum += delta;
/* Account piece of busy time in the current window. */
delta = scale_exec_time(wallclock - window_start, rq, cc);
- rq->curr_runnable_sum = delta;
+ *curr_runnable_sum += delta;
if (new_task)
- rq->nt_curr_runnable_sum = delta;
- else
- rq->nt_curr_runnable_sum = 0;
+ *nt_curr_runnable_sum += delta;
+
if (!is_idle_task(p) && !exiting_task(p))
p->ravg.curr_window = delta;
@@ -2500,12 +2689,8 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
/* Roll window over. If IRQ busy time was just in the current
* window then that is all that need be accounted. */
- rq->prev_runnable_sum = rq->curr_runnable_sum;
- rq->nt_prev_runnable_sum = rq->nt_curr_runnable_sum;
- rq->nt_curr_runnable_sum = 0;
if (mark_start > window_start) {
- rq->curr_runnable_sum = scale_exec_time(irqtime, rq,
- cc);
+ *curr_runnable_sum = scale_exec_time(irqtime, rq, cc);
return;
}
@@ -2515,7 +2700,7 @@ static void update_cpu_busy_time(struct task_struct *p, struct rq *rq,
if (delta > window_size)
delta = window_size;
delta = scale_exec_time(delta, rq, cc);
- rq->prev_runnable_sum += delta;
+ *prev_runnable_sum += delta;
/* Process the remaining IRQ busy time in the current window. */
delta = wallclock - window_start;
@@ -2820,7 +3005,8 @@ update_task_ravg(struct task_struct *p, struct rq *rq, int event,
update_task_pred_demand(rq, p, event);
done:
trace_sched_update_task_ravg(p, rq, event, wallclock, irqtime,
- cc.cycles, cc.time);
+ cc.cycles, cc.time,
+ _group_cpu_time(p->grp, cpu_of(rq)));
p->ravg.mark_start = wallclock;
@@ -3002,7 +3188,8 @@ enum reset_reason_code {
ACCOUNT_WAIT_TIME_CHANGE,
HIST_SIZE_CHANGE,
MIGRATION_FIXUP_CHANGE,
- FREQ_ACCOUNT_WAIT_TIME_CHANGE
+ FREQ_ACCOUNT_WAIT_TIME_CHANGE,
+ FREQ_AGGREGATE_CHANGE,
};
const char *sched_window_reset_reasons[] = {
@@ -3021,6 +3208,7 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
u64 start_ts = sched_ktime_clock();
int reason = WINDOW_CHANGE;
unsigned int old = 0, new = 0;
+ struct related_thread_group *grp;
disable_window_stats();
@@ -3028,11 +3216,26 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
local_irq_save(flags);
+ read_lock(&related_thread_group_lock);
+
for_each_possible_cpu(cpu) {
struct rq *rq = cpu_rq(cpu);
raw_spin_lock(&rq->lock);
}
+ list_for_each_entry(grp, &related_thread_groups, list) {
+ int j;
+
+ for_each_possible_cpu(j) {
+ struct group_cpu_time *cpu_time;
+ /* Protected by rq lock */
+ cpu_time = _group_cpu_time(grp, j);
+ memset(cpu_time, 0, sizeof(struct group_cpu_time));
+ if (window_start)
+ cpu_time->window_start = window_start;
+ }
+ }
+
if (window_size) {
sched_ravg_window = window_size * TICK_NSEC;
set_hmp_defaults();
@@ -3081,6 +3284,12 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
new = sysctl_sched_freq_account_wait_time;
sched_freq_account_wait_time =
sysctl_sched_freq_account_wait_time;
+ } else if (sched_freq_aggregate !=
+ sysctl_sched_freq_aggregate) {
+ reason = FREQ_AGGREGATE_CHANGE;
+ old = sched_freq_aggregate;
+ new = sysctl_sched_freq_aggregate;
+ sched_freq_aggregate = sysctl_sched_freq_aggregate;
}
#endif
@@ -3089,6 +3298,8 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
raw_spin_unlock(&rq->lock);
}
+ read_unlock(&related_thread_group_lock);
+
local_irq_restore(flags);
trace_sched_reset_all_window_stats(window_start, window_size,
@@ -3097,13 +3308,17 @@ void reset_all_window_stats(u64 window_start, unsigned int window_size)
#ifdef CONFIG_SCHED_FREQ_INPUT
+static inline void
+sync_window_start(struct rq *rq, struct group_cpu_time *cpu_time);
+
void sched_get_cpus_busy(struct sched_load *busy,
const struct cpumask *query_cpus)
{
unsigned long flags;
struct rq *rq;
const int cpus = cpumask_weight(query_cpus);
- u64 load[cpus], nload[cpus];
+ u64 load[cpus], group_load[cpus];
+ u64 nload[cpus], ngload[cpus];
u64 pload[cpus];
unsigned int cur_freq[cpus], max_freq[cpus];
int notifier_sent[cpus];
@@ -3111,6 +3326,9 @@ void sched_get_cpus_busy(struct sched_load *busy,
int cpu, i = 0;
unsigned int window_size;
struct cpu_cycle cc;
+ u64 max_prev_sum = 0;
+ int max_busy_cpu = cpumask_first(query_cpus);
+ struct related_thread_group *grp;
if (unlikely(cpus == 0))
return;
@@ -3120,6 +3338,8 @@ void sched_get_cpus_busy(struct sched_load *busy,
* current task may have been executing for a long time. Ensure
* that the window stats are current by doing an update.
*/
+ read_lock(&related_thread_group_lock);
+
local_irq_save(flags);
for_each_cpu(cpu, query_cpus)
raw_spin_lock(&cpu_rq(cpu)->lock);
@@ -3137,6 +3357,49 @@ void sched_get_cpus_busy(struct sched_load *busy,
nload[i] = rq->nt_prev_runnable_sum;
pload[i] = rq->hmp_stats.pred_demands_sum;
rq->old_estimated_time = pload[i];
+
+ if (load[i] > max_prev_sum) {
+ max_prev_sum = load[i];
+ max_busy_cpu = cpu;
+ }
+
+ notifier_sent[i] = rq->notifier_sent;
+ early_detection[i] = (rq->ed_task != NULL);
+ rq->notifier_sent = 0;
+ cur_freq[i] = cpu_cur_freq(cpu);
+ max_freq[i] = cpu_max_freq(cpu);
+ i++;
+ }
+
+ for_each_related_thread_group(grp) {
+ for_each_cpu(cpu, query_cpus) {
+ /* Protected by rq_lock */
+ struct group_cpu_time *cpu_time =
+ _group_cpu_time(grp, cpu);
+ sync_window_start(cpu_rq(cpu), cpu_time);
+ }
+ }
+
+ i = 0;
+ for_each_cpu(cpu, query_cpus) {
+ group_load[i] = 0;
+ ngload[i] = 0;
+
+ if (early_detection[i])
+ goto skip_early;
+
+ rq = cpu_rq(cpu);
+ if (!notifier_sent[i]) {
+ if (cpu == max_busy_cpu)
+ group_load_in_freq_domain(
+ &rq->freq_domain_cpumask,
+ &group_load[i], &ngload[i]);
+ } else {
+ _group_load_in_cpu(cpu, &group_load[i], &ngload[i]);
+ }
+
+ load[i] += group_load[i];
+ nload[i] += ngload[i];
/*
* Scale load in reference to cluster max_possible_freq.
*
@@ -3146,11 +3409,7 @@ void sched_get_cpus_busy(struct sched_load *busy,
load[i] = scale_load_to_cpu(load[i], cpu);
nload[i] = scale_load_to_cpu(nload[i], cpu);
pload[i] = scale_load_to_cpu(pload[i], cpu);
-
- notifier_sent[i] = rq->notifier_sent;
- early_detection[i] = (rq->ed_task != NULL);
- rq->notifier_sent = 0;
- max_freq[i] = cpu_max_freq(cpu);
+skip_early:
i++;
}
@@ -3158,6 +3417,8 @@ void sched_get_cpus_busy(struct sched_load *busy,
raw_spin_unlock(&(cpu_rq(cpu))->lock);
local_irq_restore(flags);
+ read_unlock(&related_thread_group_lock);
+
i = 0;
for_each_cpu(cpu, query_cpus) {
rq = cpu_rq(cpu);
@@ -3205,17 +3466,6 @@ exit_early:
}
}
-unsigned long sched_get_busy(int cpu)
-{
- struct cpumask query_cpu = CPU_MASK_NONE;
- struct sched_load busy;
-
- cpumask_set_cpu(cpu, &query_cpu);
- sched_get_cpus_busy(&busy, &query_cpu);
-
- return busy.prev_load;
-}
-
void sched_set_io_is_busy(int val)
{
sched_io_is_busy = val;
@@ -3267,7 +3517,14 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
struct rq *src_rq = task_rq(p);
struct rq *dest_rq = cpu_rq(new_cpu);
u64 wallclock;
+ u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
+ u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
+ u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
+ u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
+ int migrate_type;
+ struct migration_sum_data d;
bool new_task;
+ struct related_thread_group *grp;
if (!sched_enable_hmp || !sched_migration_fixup ||
(!p->on_rq && p->state != TASK_WAKING))
@@ -3298,22 +3555,62 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
update_task_cpu_cycles(p, new_cpu);
new_task = is_new_task(p);
+ /* Protected by rq_lock */
+ grp = p->grp;
+ if (grp && sched_freq_aggregate) {
+ struct group_cpu_time *cpu_time;
+
+ migrate_type = GROUP_TO_GROUP;
+ /* Protected by rq_lock */
+ cpu_time = _group_cpu_time(grp, cpu_of(src_rq));
+ d.src_rq = NULL;
+ d.src_cpu_time = cpu_time;
+ src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+ src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+
+ /* Protected by rq_lock */
+ cpu_time = _group_cpu_time(grp, cpu_of(dest_rq));
+ d.dst_rq = NULL;
+ d.dst_cpu_time = cpu_time;
+ dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+ dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+ sync_window_start(dest_rq, cpu_time);
+ } else {
+ migrate_type = RQ_TO_RQ;
+ d.src_rq = src_rq;
+ d.src_cpu_time = NULL;
+ d.dst_rq = dest_rq;
+ d.dst_cpu_time = NULL;
+ src_curr_runnable_sum = &src_rq->curr_runnable_sum;
+ src_prev_runnable_sum = &src_rq->prev_runnable_sum;
+ src_nt_curr_runnable_sum = &src_rq->nt_curr_runnable_sum;
+ src_nt_prev_runnable_sum = &src_rq->nt_prev_runnable_sum;
+
+ dst_curr_runnable_sum = &dest_rq->curr_runnable_sum;
+ dst_prev_runnable_sum = &dest_rq->prev_runnable_sum;
+ dst_nt_curr_runnable_sum = &dest_rq->nt_curr_runnable_sum;
+ dst_nt_prev_runnable_sum = &dest_rq->nt_prev_runnable_sum;
+ }
if (p->ravg.curr_window) {
- src_rq->curr_runnable_sum -= p->ravg.curr_window;
- dest_rq->curr_runnable_sum += p->ravg.curr_window;
+ *src_curr_runnable_sum -= p->ravg.curr_window;
+ *dst_curr_runnable_sum += p->ravg.curr_window;
if (new_task) {
- src_rq->nt_curr_runnable_sum -= p->ravg.curr_window;
- dest_rq->nt_curr_runnable_sum += p->ravg.curr_window;
+ *src_nt_curr_runnable_sum -= p->ravg.curr_window;
+ *dst_nt_curr_runnable_sum += p->ravg.curr_window;
}
}
if (p->ravg.prev_window) {
- src_rq->prev_runnable_sum -= p->ravg.prev_window;
- dest_rq->prev_runnable_sum += p->ravg.prev_window;
+ *src_prev_runnable_sum -= p->ravg.prev_window;
+ *dst_prev_runnable_sum += p->ravg.prev_window;
if (new_task) {
- src_rq->nt_prev_runnable_sum -= p->ravg.prev_window;
- dest_rq->nt_prev_runnable_sum += p->ravg.prev_window;
+ *src_nt_prev_runnable_sum -= p->ravg.prev_window;
+ *dst_nt_prev_runnable_sum += p->ravg.prev_window;
}
}
@@ -3323,13 +3620,11 @@ static void fixup_busy_time(struct task_struct *p, int new_cpu)
dest_rq->ed_task = p;
}
- BUG_ON((s64)src_rq->prev_runnable_sum < 0);
- BUG_ON((s64)src_rq->curr_runnable_sum < 0);
- BUG_ON((s64)src_rq->nt_prev_runnable_sum < 0);
- BUG_ON((s64)src_rq->nt_curr_runnable_sum < 0);
-
- trace_sched_migration_update_sum(src_rq, p);
- trace_sched_migration_update_sum(dest_rq, p);
+ trace_sched_migration_update_sum(p, migrate_type, &d);
+ BUG_ON((s64)*src_prev_runnable_sum < 0);
+ BUG_ON((s64)*src_curr_runnable_sum < 0);
+ BUG_ON((s64)*src_nt_prev_runnable_sum < 0);
+ BUG_ON((s64)*src_nt_curr_runnable_sum < 0);
done:
if (p->state == TASK_WAKING)
@@ -3368,10 +3663,6 @@ static void check_for_up_down_migrate_update(const struct cpumask *cpus)
update_up_down_migrate();
}
-static LIST_HEAD(related_thread_groups);
-static DEFINE_RWLOCK(related_thread_group_lock);
-static int nr_related_thread_groups;
-
/* Return cluster which can offer required capacity for group */
static struct sched_cluster *
best_cluster(struct related_thread_group *grp, u64 total_demand)
@@ -3421,6 +3712,199 @@ static void set_preferred_cluster(struct related_thread_group *grp)
raw_spin_unlock(&grp->lock);
}
+#define ADD_TASK 0
+#define REM_TASK 1
+
+#ifdef CONFIG_SCHED_FREQ_INPUT
+
+static struct cpu_cycle
+update_task_ravg(struct task_struct *p, struct rq *rq,
+ int event, u64 wallclock, u64 irqtime);
+
+static inline void free_group_cputime(struct related_thread_group *grp)
+{
+ free_percpu(grp->cpu_time);
+}
+
+static int alloc_group_cputime(struct related_thread_group *grp)
+{
+ int i;
+ struct group_cpu_time *cpu_time;
+ int cpu = raw_smp_processor_id();
+ struct rq *rq = cpu_rq(cpu);
+ u64 window_start = rq->window_start;
+
+ grp->cpu_time = alloc_percpu(struct group_cpu_time);
+ if (!grp->cpu_time)
+ return -ENOMEM;
+
+ for_each_possible_cpu(i) {
+ cpu_time = per_cpu_ptr(grp->cpu_time, i);
+ memset(cpu_time, 0, sizeof(struct group_cpu_time));
+ cpu_time->window_start = window_start;
+ }
+
+ return 0;
+}
+
+/*
+ * A group's window_start may be behind. When moving it forward, flip prev/curr
+ * counters. When moving forward > 1 window, prev counter is set to 0
+ */
+static inline void
+sync_window_start(struct rq *rq, struct group_cpu_time *cpu_time)
+{
+ u64 delta;
+ int nr_windows;
+ u64 curr_sum = cpu_time->curr_runnable_sum;
+ u64 nt_curr_sum = cpu_time->nt_curr_runnable_sum;
+
+ delta = rq->window_start - cpu_time->window_start;
+ if (!delta)
+ return;
+
+ nr_windows = div64_u64(delta, sched_ravg_window);
+ if (nr_windows > 1)
+ curr_sum = nt_curr_sum = 0;
+
+ cpu_time->prev_runnable_sum = curr_sum;
+ cpu_time->curr_runnable_sum = 0;
+
+ cpu_time->nt_prev_runnable_sum = nt_curr_sum;
+ cpu_time->nt_curr_runnable_sum = 0;
+
+ cpu_time->window_start = rq->window_start;
+}
+
+/*
+ * Task's cpu usage is accounted in:
+ * rq->curr/prev_runnable_sum, when its ->grp is NULL
+ * grp->cpu_time[cpu]->curr/prev_runnable_sum, when its ->grp is !NULL
+ *
+ * Transfer task's cpu usage between those counters when transitioning between
+ * groups
+ */
+static void transfer_busy_time(struct rq *rq, struct related_thread_group *grp,
+ struct task_struct *p, int event)
+{
+ u64 wallclock;
+ struct group_cpu_time *cpu_time;
+ u64 *src_curr_runnable_sum, *dst_curr_runnable_sum;
+ u64 *src_prev_runnable_sum, *dst_prev_runnable_sum;
+ u64 *src_nt_curr_runnable_sum, *dst_nt_curr_runnable_sum;
+ u64 *src_nt_prev_runnable_sum, *dst_nt_prev_runnable_sum;
+ struct migration_sum_data d;
+ int migrate_type;
+
+ if (!sched_freq_aggregate)
+ return;
+
+ wallclock = sched_ktime_clock();
+
+ update_task_ravg(rq->curr, rq, TASK_UPDATE, wallclock, 0);
+ update_task_ravg(p, rq, TASK_UPDATE, wallclock, 0);
+
+ /* cpu_time protected by related_thread_group_lock, grp->lock rq_lock */
+ cpu_time = _group_cpu_time(grp, cpu_of(rq));
+ if (event == ADD_TASK) {
+ sync_window_start(rq, cpu_time);
+ migrate_type = RQ_TO_GROUP;
+ d.src_rq = rq;
+ d.src_cpu_time = NULL;
+ d.dst_rq = NULL;
+ d.dst_cpu_time = cpu_time;
+ src_curr_runnable_sum = &rq->curr_runnable_sum;
+ dst_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ src_prev_runnable_sum = &rq->prev_runnable_sum;
+ dst_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+
+ src_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+ dst_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ src_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+ dst_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+ } else if (event == REM_TASK) {
+ migrate_type = GROUP_TO_RQ;
+ d.src_rq = NULL;
+ d.src_cpu_time = cpu_time;
+ d.dst_rq = rq;
+ d.dst_cpu_time = NULL;
+
+ /*
+ * In case of REM_TASK, cpu_time->window_start would be
+ * uptodate, because of the update_task_ravg() we called
+ * above on the moving task. Hence no need for
+ * sync_window_start()
+ */
+ src_curr_runnable_sum = &cpu_time->curr_runnable_sum;
+ dst_curr_runnable_sum = &rq->curr_runnable_sum;
+ src_prev_runnable_sum = &cpu_time->prev_runnable_sum;
+ dst_prev_runnable_sum = &rq->prev_runnable_sum;
+
+ src_nt_curr_runnable_sum = &cpu_time->nt_curr_runnable_sum;
+ dst_nt_curr_runnable_sum = &rq->nt_curr_runnable_sum;
+ src_nt_prev_runnable_sum = &cpu_time->nt_prev_runnable_sum;
+ dst_nt_prev_runnable_sum = &rq->nt_prev_runnable_sum;
+ }
+
+ *src_curr_runnable_sum -= p->ravg.curr_window;
+ *dst_curr_runnable_sum += p->ravg.curr_window;
+
+ *src_prev_runnable_sum -= p->ravg.prev_window;
+ *dst_prev_runnable_sum += p->ravg.prev_window;
+
+ if (is_new_task(p)) {
+ *src_nt_curr_runnable_sum -= p->ravg.curr_window;
+ *dst_nt_curr_runnable_sum += p->ravg.curr_window;
+ *src_nt_prev_runnable_sum -= p->ravg.prev_window;
+ *dst_nt_prev_runnable_sum += p->ravg.prev_window;
+ }
+
+ trace_sched_migration_update_sum(p, migrate_type, &d);
+
+ BUG_ON((s64)*src_curr_runnable_sum < 0);
+ BUG_ON((s64)*src_prev_runnable_sum < 0);
+}
+
+static inline struct group_cpu_time *
+task_group_cpu_time(struct task_struct *p, int cpu)
+{
+ return _group_cpu_time(rcu_dereference(p->grp), cpu);
+}
+
+static inline struct group_cpu_time *
+_group_cpu_time(struct related_thread_group *grp, int cpu)
+{
+ return grp ? per_cpu_ptr(grp->cpu_time, cpu) : NULL;
+}
+
+#else /* CONFIG_SCHED_FREQ_INPUT */
+
+static inline void free_group_cputime(struct related_thread_group *grp) { }
+
+static inline int alloc_group_cputime(struct related_thread_group *grp)
+{
+ return 0;
+}
+
+static inline void transfer_busy_time(struct rq *rq,
+ struct related_thread_group *grp, struct task_struct *p, int event)
+{
+}
+
+static struct group_cpu_time *
+task_group_cpu_time(struct task_struct *p, int cpu)
+{
+ return NULL;
+}
+
+static inline struct group_cpu_time *
+_group_cpu_time(struct related_thread_group *grp, int cpu)
+{
+ return NULL;
+}
+
+#endif
+
struct related_thread_group *alloc_related_thread_group(int group_id)
{
struct related_thread_group *grp;
@@ -3429,6 +3913,11 @@ struct related_thread_group *alloc_related_thread_group(int group_id)
if (!grp)
return ERR_PTR(-ENOMEM);
+ if (alloc_group_cputime(grp)) {
+ kfree(grp);
+ return ERR_PTR(-ENOMEM);
+ }
+
grp->id = group_id;
INIT_LIST_HEAD(&grp->tasks);
INIT_LIST_HEAD(&grp->list);
@@ -3449,6 +3938,16 @@ struct related_thread_group *lookup_related_thread_group(unsigned int group_id)
return NULL;
}
+/* See comments before preferred_cluster() */
+static void free_related_thread_group(struct rcu_head *rcu)
+{
+ struct related_thread_group *grp = container_of(rcu, struct
+ related_thread_group, rcu);
+
+ free_group_cputime(grp);
+ kfree(grp);
+}
+
static void remove_task_from_group(struct task_struct *p)
{
struct related_thread_group *grp = p->grp;
@@ -3458,6 +3957,7 @@ static void remove_task_from_group(struct task_struct *p)
raw_spin_lock(&grp->lock);
rq = __task_rq_lock(p);
+ transfer_busy_time(rq, p->grp, p, REM_TASK);
list_del_init(&p->grp_list);
rcu_assign_pointer(p->grp, NULL);
__task_rq_unlock(rq);
@@ -3471,9 +3971,7 @@ static void remove_task_from_group(struct task_struct *p)
if (empty_group) {
list_del(&grp->list);
- nr_related_thread_groups--;
- /* See comments before preferred_cluster() */
- kfree_rcu(grp, rcu);
+ call_rcu(&grp->rcu, free_related_thread_group);
}
}
@@ -3489,8 +3987,9 @@ add_task_to_group(struct task_struct *p, struct related_thread_group *grp)
* reference of p->grp in various hot-paths
*/
rq = __task_rq_lock(p);
- rcu_assign_pointer(p->grp, grp);
+ transfer_busy_time(rq, grp, p, ADD_TASK);
list_add(&p->grp_list, &grp->tasks);
+ rcu_assign_pointer(p->grp, grp);
__task_rq_unlock(rq);
_set_preferred_cluster(grp);
@@ -3539,7 +4038,6 @@ redo:
} else if (!grp && new) {
/* New group - use object allocated before */
destroy = 0;
- nr_related_thread_groups++;
list_add(&new->list, &related_thread_groups);
grp = new;
}
@@ -3550,8 +4048,10 @@ redo:
done:
raw_spin_unlock_irqrestore(&p->pi_lock, flags);
- if (destroy)
+ if (new && destroy) {
+ free_group_cputime(new);
kfree(new);
+ }
return rc;
}
@@ -3898,13 +4398,19 @@ static void notify_migration(int src_cpu, int dest_cpu, bool src_cpu_dead,
struct task_struct *p)
{
struct migration_notify_data mnd;
+ bool check_groups;
+
+ rcu_read_lock();
+ check_groups = rcu_access_pointer(p->grp) != NULL;
+ rcu_read_unlock();
if (!same_freq_domain(src_cpu, dest_cpu)) {
if (!src_cpu_dead)
- check_for_freq_change(cpu_rq(src_cpu), false);
- check_for_freq_change(cpu_rq(dest_cpu), false);
+ check_for_freq_change(cpu_rq(src_cpu), false,
+ check_groups);
+ check_for_freq_change(cpu_rq(dest_cpu), false, check_groups);
} else {
- check_for_freq_change(cpu_rq(dest_cpu), true);
+ check_for_freq_change(cpu_rq(dest_cpu), true, check_groups);
}
if (task_notify_on_migrate(p)) {
@@ -4771,6 +5277,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
struct related_thread_group *grp = NULL;
#endif
bool freq_notif_allowed = !(wake_flags & WF_NO_NOTIFIER);
+ bool check_group = false;
wake_flags &= ~WF_NO_NOTIFIER;
@@ -4846,6 +5353,7 @@ try_to_wake_up(struct task_struct *p, unsigned int state, int wake_flags)
if (update_preferred_cluster(grp, p, old_load))
set_preferred_cluster(grp);
rcu_read_unlock();
+ check_group = grp != NULL;
p->sched_contributes_to_load = !!task_contributes_to_load(p);
p->state = TASK_WAKING;
@@ -4894,12 +5402,14 @@ out:
if (freq_notif_allowed) {
if (!same_freq_domain(src_cpu, cpu)) {
- check_for_freq_change(cpu_rq(cpu), false);
- check_for_freq_change(cpu_rq(src_cpu), false);
+ check_for_freq_change(cpu_rq(cpu),
+ false, check_group);
+ check_for_freq_change(cpu_rq(src_cpu),
+ false, check_group);
} else if (heavy_task) {
- check_for_freq_change(cpu_rq(cpu), false);
+ check_for_freq_change(cpu_rq(cpu), false, false);
} else if (success) {
- check_for_freq_change(cpu_rq(cpu), true);
+ check_for_freq_change(cpu_rq(cpu), true, false);
}
}
@@ -10543,6 +11053,7 @@ void __init sched_init(void)
rq->nt_curr_runnable_sum = rq->nt_prev_runnable_sum = 0;
rq->old_busy_time = 0;
rq->old_estimated_time = 0;
+ rq->old_busy_time_group = 0;
rq->notifier_sent = 0;
rq->hmp_stats.pred_demands_sum = 0;
#endif
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 0288a331e311..a33eddb7b17d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -32,9 +32,8 @@
#include <linux/task_work.h>
#include <linux/ratelimit.h>
-#include <trace/events/sched.h>
-
#include "sched.h"
+#include <trace/events/sched.h>
/*
* Targeted preemption latency for CPU-bound tasks:
@@ -4059,6 +4058,9 @@ static inline int invalid_value_freq_input(unsigned int *data)
if (data == &sysctl_sched_freq_account_wait_time)
return !(*data == 0 || *data == 1);
+ if (data == &sysctl_sched_freq_aggregate)
+ return !(*data == 0 || *data == 1);
+
return 0;
}
#else
@@ -7674,6 +7676,7 @@ enum fbq_type { regular, remote, all };
LBF_BIG_TASK_ACTIVE_BALANCE)
#define LBF_IGNORE_BIG_TASKS 0x100
#define LBF_IGNORE_PREFERRED_CLUSTER_TASKS 0x200
+#define LBF_MOVED_RELATED_THREAD_GROUP_TASK 0x400
struct lb_env {
struct sched_domain *sd;
@@ -7916,6 +7919,8 @@ static void detach_task(struct task_struct *p, struct lb_env *env)
deactivate_task(env->src_rq, p, 0);
double_lock_balance(env->src_rq, env->dst_rq);
set_task_cpu(p, env->dst_cpu);
+ if (rcu_access_pointer(p->grp))
+ env->flags |= LBF_MOVED_RELATED_THREAD_GROUP_TASK;
double_unlock_balance(env->src_rq, env->dst_rq);
}
@@ -9575,10 +9580,13 @@ no_move:
/* Assumes one 'busiest' cpu that we pulled tasks from */
if (!same_freq_domain(this_cpu, cpu_of(busiest))) {
- check_for_freq_change(this_rq, false);
- check_for_freq_change(busiest, false);
+ int check_groups = !!(env.flags &
+ LBF_MOVED_RELATED_THREAD_GROUP_TASK);
+
+ check_for_freq_change(this_rq, false, check_groups);
+ check_for_freq_change(busiest, false, check_groups);
} else {
- check_for_freq_change(this_rq, true);
+ check_for_freq_change(this_rq, true, false);
}
}
if (likely(!active_balance)) {
@@ -9876,10 +9884,12 @@ out_unlock:
local_irq_enable();
if (moved && !same_freq_domain(busiest_cpu, target_cpu)) {
- check_for_freq_change(busiest_rq, false);
- check_for_freq_change(target_rq, false);
+ int check_groups = !!(env.flags &
+ LBF_MOVED_RELATED_THREAD_GROUP_TASK);
+ check_for_freq_change(busiest_rq, false, check_groups);
+ check_for_freq_change(target_rq, false, check_groups);
} else if (moved) {
- check_for_freq_change(target_rq, true);
+ check_for_freq_change(target_rq, true, false);
}
if (per_cpu(dbs_boost_needed, target_cpu)) {
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index a66d8a12051c..df9b972195e5 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -409,6 +409,16 @@ struct related_thread_group {
struct sched_cluster *preferred_cluster;
struct rcu_head rcu;
u64 last_update;
+#ifdef CONFIG_SCHED_FREQ_INPUT
+ struct group_cpu_time __percpu *cpu_time; /* one per cluster */
+#endif
+};
+
+struct migration_sum_data {
+ struct rq *src_rq, *dst_rq;
+#ifdef CONFIG_SCHED_FREQ_INPUT
+ struct group_cpu_time *src_cpu_time, *dst_cpu_time;
+#endif
};
extern struct list_head cluster_head;
@@ -741,7 +751,7 @@ struct rq {
struct task_struct *ed_task;
#ifdef CONFIG_SCHED_FREQ_INPUT
- unsigned int old_busy_time;
+ u64 old_busy_time, old_busy_time_group;
int notifier_sent;
u64 old_estimated_time;
#endif
@@ -1337,7 +1347,16 @@ static inline int update_preferred_cluster(struct related_thread_group *grp,
#ifdef CONFIG_SCHED_FREQ_INPUT
#define PRED_DEMAND_DELTA ((s64)new_pred_demand - p->ravg.pred_demand)
-extern void check_for_freq_change(struct rq *rq, bool check_cra);
+extern void
+check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups);
+
+struct group_cpu_time {
+ u64 curr_runnable_sum;
+ u64 prev_runnable_sum;
+ u64 nt_curr_runnable_sum;
+ u64 nt_prev_runnable_sum;
+ u64 window_start;
+};
/* Is frequency of two cpus synchronized with each other? */
static inline int same_freq_domain(int src_cpu, int dst_cpu)
@@ -1355,7 +1374,8 @@ static inline int same_freq_domain(int src_cpu, int dst_cpu)
#define sched_migration_fixup 0
#define PRED_DEMAND_DELTA (0)
-static inline void check_for_freq_change(struct rq *rq, bool check_cra) { }
+static inline void
+check_for_freq_change(struct rq *rq, bool check_pred, bool check_groups) { }
static inline int same_freq_domain(int src_cpu, int dst_cpu)
{
diff --git a/kernel/sched/sched_avg.c b/kernel/sched/sched_avg.c
index cdb1d7c53849..c70e0466c36c 100644
--- a/kernel/sched/sched_avg.c
+++ b/kernel/sched/sched_avg.c
@@ -18,9 +18,9 @@
#include <linux/hrtimer.h>
#include <linux/sched.h>
#include <linux/math64.h>
-#include <trace/events/sched.h>
#include "sched.h"
+#include <trace/events/sched.h>
static DEFINE_PER_CPU(u64, nr_prod_sum);
static DEFINE_PER_CPU(u64, last_time);
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 1da3b96368b1..825be75ca1a3 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -472,6 +472,13 @@ static struct ctl_table kern_table[] = {
.proc_handler = proc_dointvec_minmax,
.extra1 = &zero,
},
+ {
+ .procname = "sched_freq_aggregate",
+ .data = &sysctl_sched_freq_aggregate,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_window_update_handler,
+ },
#endif
{
.procname = "sched_boost",
diff --git a/kernel/trace/trace_event_perf.c b/kernel/trace/trace_event_perf.c
index cc9f7a9319be..731f6484b811 100644
--- a/kernel/trace/trace_event_perf.c
+++ b/kernel/trace/trace_event_perf.c
@@ -256,7 +256,8 @@ int perf_trace_add(struct perf_event *p_event, int flags)
void perf_trace_del(struct perf_event *p_event, int flags)
{
struct trace_event_call *tp_event = p_event->tp_event;
- hlist_del_rcu(&p_event->hlist_entry);
+ if (!hlist_unhashed(&p_event->hlist_entry))
+ hlist_del_rcu(&p_event->hlist_entry);
tp_event->class->reg(tp_event, TRACE_REG_PERF_DEL, p_event);
}
diff --git a/net/core/dev.c b/net/core/dev.c
index b9b64e517511..51b156ed09d7 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3535,13 +3535,13 @@ static int netif_rx_internal(struct sk_buff *skb)
{
int ret;
- WARN_ONCE(skb_cloned(skb), "Cloned packet from dev %s\n",
- skb->dev->name);
-
net_timestamp_check(netdev_tstamp_prequeue, skb);
trace_netif_rx(skb);
#ifdef CONFIG_RPS
+ WARN_ONCE(skb_cloned(skb), "Cloned packet from dev %s\n",
+ skb->dev->name);
+
if (static_key_false(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
diff --git a/net/rmnet_data/rmnet_data_stats.h b/net/rmnet_data/rmnet_data_stats.h
index d5924f1e0768..1581d9f0c5f6 100644
--- a/net/rmnet_data/rmnet_data_stats.h
+++ b/net/rmnet_data/rmnet_data_stats.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,6 +39,7 @@ enum rmnet_skb_free_e {
RMNET_STATS_SKBFREE_DEAGG_UNKOWN_IP_TYP,
RMNET_STATS_SKBFREE_DEAGG_DATA_LEN_0,
RMNET_STATS_SKBFREE_INGRESS_BAD_MAP_CKSUM,
+ RMNET_STATS_SKBFREE_MAPC_UNSUPPORTED,
RMNET_STATS_SKBFREE_MAX
};
diff --git a/net/rmnet_data/rmnet_map_command.c b/net/rmnet_data/rmnet_map_command.c
index 733fa241665b..4bcfa10db486 100644
--- a/net/rmnet_data/rmnet_map_command.c
+++ b/net/rmnet_data/rmnet_map_command.c
@@ -93,10 +93,12 @@ static uint8_t rmnet_map_do_flow_control(struct sk_buff *skb,
LOGD("dev:%s, qos_id:0x%08X, ip_family:%hd, fc_seq %hd, en:%d",
skb->dev->name, qos_id, ip_family & 3, fc_seq, enable);
- if (r)
+ if (r) {
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_UNSUPPORTED);
return RMNET_MAP_COMMAND_UNSUPPORTED;
- else
+ } else {
return RMNET_MAP_COMMAND_ACK;
+ }
}
/**
@@ -188,8 +190,10 @@ rx_handler_result_t rmnet_map_command(struct sk_buff *skb,
rmnet_map_command_stats[RMNET_MAP_COMMAND_UNKNOWN]++;
LOGM("Uknown MAP command: %d", command_name);
rc = RMNET_MAP_COMMAND_UNSUPPORTED;
+ rmnet_kfree_skb(skb, RMNET_STATS_SKBFREE_MAPC_UNSUPPORTED);
break;
}
- rmnet_map_send_ack(skb, rc, config);
+ if (rc == RMNET_MAP_COMMAND_ACK)
+ rmnet_map_send_ack(skb, rc, config);
return RX_HANDLER_CONSUMED;
}