summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-bus.dtsi55
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi100
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-gpu.dtsi80
-rw-r--r--block/Kconfig9
-rw-r--r--block/bio.c6
-rw-r--r--block/blk-core.c468
-rw-r--r--block/blk.h9
-rw-r--r--drivers/clk/msm/clock-debug.c5
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c107
-rw-r--r--drivers/misc/qseecom.c1
-rw-r--r--drivers/mmc/core/Kconfig11
-rw-r--r--drivers/mmc/core/Makefile1
-rw-r--r--drivers/mmc/core/debugfs.c25
-rw-r--r--drivers/mmc/core/host.c3
-rw-r--r--drivers/mmc/core/mmc.c5
-rw-r--r--drivers/mmc/core/ring_buffer.c123
-rw-r--r--drivers/mmc/core/sd.c4
-rw-r--r--drivers/mmc/core/sdio.c5
-rw-r--r--drivers/mmc/host/cmdq_hci.c33
-rw-r--r--drivers/mmc/host/sdhci-msm.c7
-rw-r--r--drivers/mmc/host/sdhci.c35
-rw-r--r--drivers/power/supply/qcom/smb-lib.c5
-rw-r--r--drivers/power/supply/qcom/smb138x-charger.c6
-rw-r--r--drivers/scsi/ufs/ufs-qcom-ice.c80
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c4
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h3
-rw-r--r--drivers/scsi/ufs/ufshcd.c11
-rw-r--r--drivers/soc/qcom/service-notifier.c4
-rw-r--r--drivers/spi/spi_qsd.c195
-rw-r--r--drivers/spi/spi_qsd.h14
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c25
-rw-r--r--drivers/usb/gadget/function/f_qc_rndis.c1
-rw-r--r--drivers/usb/gadget/function/u_ctrl_qti.c2
-rw-r--r--drivers/usb/gadget/function/u_data_ipa.c8
-rw-r--r--drivers/usb/phy/phy-msm-usb.c2
-rw-r--r--include/linux/blk_types.h13
-rw-r--r--include/linux/cpumask.h2
-rw-r--r--include/linux/mmc/host.h2
-rw-r--r--include/linux/mmc/ring_buffer.h55
-rw-r--r--include/linux/usb/composite.h2
-rw-r--r--kernel/sched/hmp.c1
-rw-r--r--security/pfe/pfk_kc.c50
42 files changed, 1230 insertions, 347 deletions
diff --git a/arch/arm/boot/dts/qcom/sdm660-bus.dtsi b/arch/arm/boot/dts/qcom/sdm660-bus.dtsi
index 93c615639be9..68ff96829d4f 100644
--- a/arch/arm/boot/dts/qcom/sdm660-bus.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-bus.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -34,7 +34,6 @@
label = "fab-a2noc";
qcom,fab-dev;
qcom,base-name = "a2noc-base";
- qcom,bypass-qos-prg;
qcom,bus-type = <1>;
qcom,qos-off = <4096>;
qcom,base-offset = <16384>;
@@ -44,16 +43,16 @@
qcom,node-qos-clks {
clock-names =
"clk-ipa-clk",
- "clk-sdcc1-ahb-no-rate",
- "clk-sdcc2-ahb-no-rate",
- "clk-blsp1-ahb-no-rate",
- "clk-blsp2-ahb-no-rate";
+ "clk-ufs-axi-clk",
+ "clk-aggre2-ufs-axi-no-rate",
+ "clk-aggre2-usb3-axi-cfg-no-rate",
+ "clk-cfg-noc-usb2-axi-no-rate";
clocks =
<&clock_rpmcc RPM_IPA_CLK>,
- <&clock_gcc GCC_SDCC1_AHB_CLK>,
- <&clock_gcc GCC_SDCC2_AHB_CLK>,
- <&clock_gcc GCC_BLSP1_AHB_CLK>,
- <&clock_gcc GCC_BLSP2_AHB_CLK>;
+ <&clock_gcc GCC_UFS_AXI_CLK>,
+ <&clock_gcc GCC_AGGRE2_UFS_AXI_CLK>,
+ <&clock_gcc GCC_AGGRE2_USB3_AXI_CLK>,
+ <&clock_gcc GCC_CFG_NOC_USB2_AXI_CLK>;
};
};
@@ -63,7 +62,6 @@
qcom,fab-dev;
qcom,base-name = "bimc-base";
qcom,bus-type = <2>;
- qcom,bypass-qos-prg;
qcom,util-fact = <153>;
clock-names = "bus_clk", "bus_a_clk";
clocks = <&clock_rpmcc BIMC_MSMBUS_CLK>,
@@ -75,7 +73,6 @@
label = "fab-cnoc";
qcom,fab-dev;
qcom,base-name = "cnoc-base";
- qcom,bypass-qos-prg;
qcom,bus-type = <1>;
clock-names = "bus_clk", "bus_a_clk";
clocks = <&clock_rpmcc CNOC_MSMBUS_CLK>,
@@ -87,7 +84,6 @@
label = "fab-gnoc";
qcom,virt-dev;
qcom,base-name = "gnoc-base";
- qcom,bypass-qos-prg;
};
fab_mnoc: fab-mnoc {
@@ -95,7 +91,6 @@
label = "fab-mnoc";
qcom,fab-dev;
qcom,base-name = "mnoc-base";
- qcom,bypass-qos-prg;
qcom,bus-type = <1>;
qcom,qos-off = <4096>;
qcom,base-offset = <20480>;
@@ -103,27 +98,13 @@
clock-names = "bus_clk", "bus_a_clk";
clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
<&clock_rpmcc MMSSNOC_AXI_A_CLK>;
- clk-camss-ahb-no-rate-supply =
- <&gdsc_camss_top>;
- clk-video-ahb-no-rate-supply =
- <&gdsc_venus>;
- clk-video-axi-no-rate-supply =
- <&gdsc_venus>;
qcom,node-qos-clks {
clock-names =
"clk-mmssnoc-axi-no-rate",
- "clk-noc-cfg-ahb-no-rate",
- "clk-mnoc-ahb-no-rate",
- "clk-camss-ahb-no-rate",
- "clk-video-ahb-no-rate",
- "clk-video-axi-no-rate";
+ "clk-mmss-noc-cfg-ahb-no-rate";
clocks =
<&clock_rpmcc MMSSNOC_AXI_CLK>,
- <&clock_gcc GCC_MMSS_NOC_CFG_AHB_CLK>,
- <&clock_mmss MMSS_MNOC_AHB_CLK>,
- <&clock_mmss MMSS_CAMSS_AHB_CLK>,
- <&clock_mmss MMSS_VIDEO_AHB_CLK>,
- <&clock_mmss MMSS_VIDEO_AXI_CLK>;
+ <&clock_gcc GCC_MMSS_NOC_CFG_AHB_CLK>;
};
};
@@ -132,7 +113,6 @@
label = "fab-snoc";
qcom,fab-dev;
qcom,base-name = "snoc-base";
- qcom,bypass-qos-prg;
qcom,bus-type = <1>;
qcom,qos-off = <4096>;
qcom,base-offset = <24576>;
@@ -146,7 +126,6 @@
label = "fab-mnoc-ahb";
qcom,fab-dev;
qcom,base-name = "mmnoc-ahb-base";
- qcom,bypass-qos-prg;
qcom,setrate-only-clk;
qcom,bus-type = <1>;
clock-names = "bus_clk", "bus_a_clk";
@@ -483,18 +462,6 @@
qcom,bus-dev = <&fab_mnoc>;
qcom,vrail-comp = <50>;
qcom,mas-rpm-id = <ICBID_MASTER_MDP0>;
- clk-mdss-axi-no-rate-supply =
- <&gdsc_mdss>;
- clk-mdss-ahb-no-rate-supply =
- <&gdsc_mdss>;
- qcom,node-qos-clks {
- clock-names =
- "clk-mdss-ahb-no-rate",
- "clk-mdss-axi-no-rate";
- clocks =
- <&clock_mmss MMSS_MDSS_AHB_CLK>,
- <&clock_mmss MMSS_MDSS_AXI_CLK>;
- };
};
mas_mdp_p1: mas-mdp-p1 {
diff --git a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi
index ae8da056d12b..f44d59e021d2 100644
--- a/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-camera-sensor-qrd.dtsi
@@ -21,7 +21,7 @@
status = "ok";
};
- cam_avdd_gpio_regulator:fixed_regulator@1 {
+ cam_avdd_gpio_regulator:cam_avdd_fixed_regulator {
compatible = "regulator-fixed";
regulator-name = "cam_vadd_gpio_regulator";
regulator-min-microvolt = <2800000>;
@@ -31,17 +31,7 @@
vin-supply = <&pm660l_bob>;
};
- cam_dvdd_gpio_regulator:fixed_regulator@1 {
- compatible = "regulator-fixed";
- regulator-name = "cam_vadd_gpio_regulator";
- regulator-min-microvolt = <1050000>;
- regulator-max-microvolt = <1050000>;
- enable-active-high;
- gpio = <&pm660l_gpios 4>;
- vin-supply = <&pm660_s5>;
- };
-
- cam_vaf_gpio_regulator:fixed_regulator@2 {
+ cam_vaf_gpio_regulator:cam_vaf_fixed_regulator {
compatible = "regulator-fixed";
regulator-name = "cam_vaf_gpio_regulator";
regulator-min-microvolt = <2800000>;
@@ -182,11 +172,11 @@
compatible = "qcom,eeprom";
cam_vio-supply = <&pm660_l11>;
cam_vana-supply = <&cam_avdd_gpio_regulator>;
- cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+ cam_vdig-supply = <&pm660_s5>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
- qcom,cam-vreg-min-voltage = <1750000 0 0>;
- qcom,cam-vreg-max-voltage = <1980000 0 0>;
- qcom,cam-vreg-op-mode = <105000 0 0>;
+ qcom,cam-vreg-min-voltage = <1780000 0 1350000>;
+ qcom,cam-vreg-max-voltage = <1950000 0 1350000>;
+ qcom,cam-vreg-op-mode = <105000 0 105000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
@@ -194,12 +184,15 @@
pinctrl-1 = <&cam_sensor_mclk0_suspend
&cam_sensor_rear_suspend>;
gpios = <&tlmm 32 0>,
- <&tlmm 46 0>;
+ <&tlmm 46 0>,
+ <&pm660l_gpios 4 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-req-tbl-num = <0 1>;
- qcom,gpio-req-tbl-flags = <1 0>;
- qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
- "CAM_RESET0";
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-req-tbl-num = <0 1 1>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET0",
+ "CAM_VDIG";
qcom,sensor-position = <0>;
qcom,sensor-mode = <0>;
qcom,cci-master = <0>;
@@ -216,11 +209,11 @@
compatible = "qcom,eeprom";
cam_vio-supply = <&pm660_l11>;
cam_vana-supply = <&cam_avdd_gpio_regulator>;
- cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+ cam_vdig-supply = <&pm660_s5>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
- qcom,cam-vreg-min-voltage = <1750000 0 0>;
- qcom,cam-vreg-max-voltage = <1980000 0 0>;
- qcom,cam-vreg-op-mode = <105000 0 0>;
+ qcom,cam-vreg-min-voltage = <1780000 0 1350000>;
+ qcom,cam-vreg-max-voltage = <1950000 0 1350000>;
+ qcom,cam-vreg-op-mode = <105000 0 105000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
@@ -228,15 +221,18 @@
pinctrl-1 = <&cam_sensor_mclk2_suspend
&cam_sensor_rear2_suspend>;
gpios = <&tlmm 34 0>,
- <&tlmm 48 0>;
+ <&tlmm 48 0>,
+ <&pm660l_gpios 4 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-req-tbl-num = <0 1>;
- qcom,gpio-req-tbl-flags = <1 0>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-req-tbl-num = <0 1 1>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
- "CAM_RESET1";
+ "CAM_RESET1",
+ "CAM_VDIG";
qcom,sensor-position = <0>;
qcom,sensor-mode = <0>;
- qcom,cci-master = <0>;
+ qcom,cci-master = <1>;
status = "ok";
clocks = <&clock_mmss MCLK2_CLK_SRC>,
<&clock_mmss MMSS_CAMSS_MCLK2_CLK>;
@@ -287,18 +283,18 @@
reg = <0x0>;
qcom,csiphy-sd-index = <0>;
qcom,csid-sd-index = <0>;
- qcom,mount-angle = <90>;
+ qcom,mount-angle = <270>;
qcom,led-flash-src = <&led_flash0>;
qcom,actuator-src = <&actuator0>;
qcom,ois-src = <&ois0>;
qcom,eeprom-src = <&eeprom0>;
cam_vio-supply = <&pm660_l11>;
cam_vana-supply = <&cam_avdd_gpio_regulator>;
- cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+ cam_vdig-supply = <&pm660_s5>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
- qcom,cam-vreg-min-voltage = <1780000 0 0>;
- qcom,cam-vreg-max-voltage = <1950000 0 0>;
- qcom,cam-vreg-op-mode = <105000 0 0>;
+ qcom,cam-vreg-min-voltage = <1780000 0 1350000>;
+ qcom,cam-vreg-max-voltage = <1950000 0 1350000>;
+ qcom,cam-vreg-op-mode = <105000 0 105000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk0_active
@@ -306,12 +302,15 @@
pinctrl-1 = <&cam_sensor_mclk0_suspend
&cam_sensor_rear_suspend>;
gpios = <&tlmm 32 0>,
- <&tlmm 46 0>;
+ <&tlmm 46 0>,
+ <&pm660l_gpios 4 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-req-tbl-num = <0 1>;
- qcom,gpio-req-tbl-flags = <1 0>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-req-tbl-num = <0 1 1>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
- "CAM_RESET0";
+ "CAM_RESET0",
+ "CAM_VDIG";
qcom,sensor-position = <0>;
qcom,sensor-mode = <0>;
qcom,cci-master = <0>;
@@ -328,16 +327,16 @@
reg = <0x1>;
qcom,csiphy-sd-index = <1>;
qcom,csid-sd-index = <1>;
- qcom,mount-angle = <90>;
+ qcom,mount-angle = <270>;
qcom,actuator-src = <&actuator1>;
qcom,eeprom-src = <&eeprom1>;
cam_vio-supply = <&pm660_l11>;
cam_vana-supply = <&cam_avdd_gpio_regulator>;
- cam_vdig-supply = <&cam_dvdd_gpio_regulator>;
+ cam_vdig-supply = <&pm660_s5>;
qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
- qcom,cam-vreg-min-voltage = <1780000 0 0>;
- qcom,cam-vreg-max-voltage = <1950000 0 0>;
- qcom,cam-vreg-op-mode = <105000 0 0>;
+ qcom,cam-vreg-min-voltage = <1780000 0 1350000>;
+ qcom,cam-vreg-max-voltage = <1950000 0 1350000>;
+ qcom,cam-vreg-op-mode = <105000 0 105000>;
qcom,gpio-no-mux = <0>;
pinctrl-names = "cam_default", "cam_suspend";
pinctrl-0 = <&cam_sensor_mclk2_active
@@ -345,15 +344,18 @@
pinctrl-1 = <&cam_sensor_mclk2_suspend
&cam_sensor_rear2_suspend>;
gpios = <&tlmm 34 0>,
- <&tlmm 48 0>;
+ <&tlmm 48 0>,
+ <&pm660l_gpios 4 0>;
qcom,gpio-reset = <1>;
- qcom,gpio-req-tbl-num = <0 1>;
- qcom,gpio-req-tbl-flags = <1 0>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-req-tbl-num = <0 1 1>;
+ qcom,gpio-req-tbl-flags = <1 0 0>;
qcom,gpio-req-tbl-label = "CAMIF_MCLK1",
- "CAM_RESET1";
+ "CAM_RESET1",
+ "CAM_VDIG";
qcom,sensor-position = <0>;
qcom,sensor-mode = <0>;
- qcom,cci-master = <0>;
+ qcom,cci-master = <1>;
status = "ok";
clocks = <&clock_mmss MCLK2_CLK_SRC>,
<&clock_mmss MMSS_CAMSS_MCLK2_CLK>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi
index d347f033b12d..1e62a2423e38 100644
--- a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi
@@ -32,21 +32,26 @@
* subsystem is inactive
*/
qcom,active-only;
+ /*
+ * IB votes in MBPS, derived using below formula
+ * IB = (DDR frequency * DDR bus width in Bytes * Dual rate)
+ * Note: IB vote is per DDR channel vote
+ */
qcom,bw-tbl =
< 0 /* off */ >,
- < 762 /* 100 MHz */ >,
- < 1144 /* 150 MHz */ >,
- < 1525 /* 200 MHz */ >,
- < 2288 /* 300 MHz */ >,
- < 3143 /* 412 MHz */ >,
- < 4173 /* 547 MHz */ >,
- < 5195 /* 681 MHz */ >,
- < 5859 /* 768 MHz */ >,
- < 7759 /* 1017 MHz */ >,
- < 9887 /* 1296 MHz */ >,
- < 10327 /* 1353 MHz */ >,
- < 11863 /* 1555 MHz */ >,
- < 13763 /* 1804 MHz */ >;
+ < 381 /* 100 MHz */ >,
+ < 572 /* 150 MHz */ >,
+ < 762 /* 200 MHz */ >,
+ < 1144 /* 300 MHz */ >,
+ < 1571 /* 412 MHz */ >,
+ < 2086 /* 547 MHz */ >,
+ < 2597 /* 681 MHz */ >,
+ < 2929 /* 768 MHz */ >,
+ < 3879 /* 1017 MHz */ >,
+ < 4943 /* 1296 MHz */ >,
+ < 5161 /* 1353 MHz */ >,
+ < 5931 /* 1555 MHz */ >,
+ < 6881 /* 1804 MHz */ >;
};
msm_gpu: qcom,kgsl-3d0@5000000 {
@@ -85,26 +90,27 @@
/* Bus Scale Settings */
qcom,gpubw-dev = <&gpubw>;
qcom,bus-control;
- qcom,bus-width = <16>;
+ /* GPU to BIMC bus width, VBIF data transfer in 1 cycle */
+ qcom,bus-width = <32>;
qcom,msm-bus,name = "grp3d";
qcom,msm-bus,num-cases = <14>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<26 512 0 0>,
- <26 512 0 800000>, /* 1 bus=100 */
- <26 512 0 1200000>, /* 2 bus=150 */
- <26 512 0 1600000>, /* 3 bus=200 */
- <26 512 0 2400000>, /* 4 bus=300 */
- <26 512 0 3296000>, /* 5 bus=412 */
- <26 512 0 4376000>, /* 6 bus=547 */
- <26 512 0 5448000>, /* 7 bus=681 */
- <26 512 0 6144000>, /* 8 bus=768 */
- <26 512 0 8136000>, /* 9 bus=1017 */
- <26 512 0 10368000>, /* 10 bus=1296 */
- <26 512 0 10824000>, /* 11 bus=1353 */
- <26 512 0 12440000>, /* 12 bus=1555 */
- <26 512 0 14432000>; /* 13 bus=1804 */
+ <26 512 0 400000>, /* 1 bus=100 */
+ <26 512 0 600000>, /* 2 bus=150 */
+ <26 512 0 800000>, /* 3 bus=200 */
+ <26 512 0 1200000>, /* 4 bus=300 */
+ <26 512 0 1648000>, /* 5 bus=412 */
+ <26 512 0 2188000>, /* 6 bus=547 */
+ <26 512 0 2724000>, /* 7 bus=681 */
+ <26 512 0 3072000>, /* 8 bus=768 */
+ <26 512 0 4068000>, /* 9 bus=1017 */
+ <26 512 0 5184000>, /* 10 bus=1296 */
+ <26 512 0 5412000>, /* 11 bus=1353 */
+ <26 512 0 6220000>, /* 12 bus=1555 */
+ <26 512 0 7216000>; /* 13 bus=1804 */
/* GDSC regulator names */
regulator-names = "vddcx", "vdd";
@@ -161,8 +167,8 @@
qcom,gpu-pwrlevel@0 {
reg = <0>;
qcom,gpu-freq = <750000000>;
- qcom,bus-freq = <12>;
- qcom,bus-min = <11>;
+ qcom,bus-freq = <13>;
+ qcom,bus-min = <12>;
qcom,bus-max = <13>;
};
@@ -171,7 +177,7 @@
reg = <1>;
qcom,gpu-freq = <700000000>;
qcom,bus-freq = <11>;
- qcom,bus-min = <10>;
+ qcom,bus-min = <11>;
qcom,bus-max = <13>;
};
@@ -179,7 +185,7 @@
qcom,gpu-pwrlevel@2 {
reg = <2>;
qcom,gpu-freq = <647000000>;
- qcom,bus-freq = <10>;
+ qcom,bus-freq = <11>;
qcom,bus-min = <10>;
qcom,bus-max = <12>;
};
@@ -188,9 +194,9 @@
qcom,gpu-pwrlevel@3 {
reg = <3>;
qcom,gpu-freq = <588000000>;
- qcom,bus-freq = <9>;
+ qcom,bus-freq = <10>;
qcom,bus-min = <9>;
- qcom,bus-max = <11>;
+ qcom,bus-max = <12>;
};
/* SVS_L1 */
@@ -198,7 +204,7 @@
reg = <4>;
qcom,gpu-freq = <465000000>;
qcom,bus-freq = <9>;
- qcom,bus-min = <7>;
+ qcom,bus-min = <8>;
qcom,bus-max = <11>;
};
@@ -206,8 +212,8 @@
qcom,gpu-pwrlevel@5 {
reg = <5>;
qcom,gpu-freq = <370000000>;
- qcom,bus-freq = <7>;
- qcom,bus-min = <5>;
+ qcom,bus-freq = <8>;
+ qcom,bus-min = <6>;
qcom,bus-max = <9>;
};
@@ -225,7 +231,7 @@
reg = <7>;
qcom,gpu-freq = <160000000>;
qcom,bus-freq = <3>;
- qcom,bus-min = <2>;
+ qcom,bus-min = <3>;
qcom,bus-max = <5>;
};
diff --git a/block/Kconfig b/block/Kconfig
index 161491d0a879..39e956942b9d 100644
--- a/block/Kconfig
+++ b/block/Kconfig
@@ -33,7 +33,7 @@ config LBDAF
This option is required to support the full capacity of large
(2TB+) block devices, including RAID, disk, Network Block Device,
Logical Volume Manager (LVM) and loopback.
-
+
This option also enables support for single files larger than
2TB.
@@ -111,6 +111,13 @@ config BLK_CMDLINE_PARSER
See Documentation/block/cmdline-partition.txt for more information.
+config BLOCK_PERF_FRAMEWORK
+ bool "Enable Block device performance measurement framework"
+ default n
+ ---help---
+ Enabling this option allows you to measure the performance at the
+ block layer.
+
menu "Partition Types"
source "block/partitions/Kconfig"
diff --git a/block/bio.c b/block/bio.c
index b9829b6504c8..02c4d9bf1590 100644
--- a/block/bio.c
+++ b/block/bio.c
@@ -31,6 +31,8 @@
#include <trace/events/block.h>
+#include "blk.h"
+
/*
* Test patch to inline a certain number of bi_io_vec's inside the bio
* itself, to shrink a bio data allocation from two mempool calls to one
@@ -1765,8 +1767,10 @@ void bio_endio(struct bio *bio)
bio_put(bio);
bio = parent;
} else {
- if (bio->bi_end_io)
+ if (bio->bi_end_io) {
+ blk_update_perf_stats(bio);
bio->bi_end_io(bio);
+ }
bio = NULL;
}
}
diff --git a/block/blk-core.c b/block/blk-core.c
index 450da06fa27e..4162327d8804 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -11,6 +11,12 @@
/*
* This handles all read/write requests to block devices
*/
+
+#ifdef CONFIG_BLOCK_PERF_FRAMEWORK
+#define DRIVER_NAME "Block"
+#define pr_fmt(fmt) DRIVER_NAME ": %s: " fmt, __func__
+#endif
+
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/backing-dev.h>
@@ -34,6 +40,12 @@
#include <linux/pm_runtime.h>
#include <linux/blk-cgroup.h>
+#ifdef CONFIG_BLOCK_PERF_FRAMEWORK
+#include <linux/ktime.h>
+#include <linux/spinlock.h>
+#include <linux/debugfs.h>
+#endif
+
#define CREATE_TRACE_POINTS
#include <trace/events/block.h>
@@ -2111,6 +2123,456 @@ static inline struct task_struct *get_dirty_task(struct bio *bio)
}
#endif
+#ifdef CONFIG_BLOCK_PERF_FRAMEWORK
+#define BLK_PERF_SIZE (1024 * 15)
+#define BLK_PERF_HIST_SIZE (sizeof(u32) * BLK_PERF_SIZE)
+
+struct blk_perf_stats {
+ u32 *read_hist;
+ u32 *write_hist;
+ u32 *flush_hist;
+ int buffers_alloced;
+ ktime_t max_read_time;
+ ktime_t max_write_time;
+ ktime_t max_flush_time;
+ ktime_t min_write_time;
+ ktime_t min_read_time;
+ ktime_t min_flush_time;
+ ktime_t total_write_time;
+ ktime_t total_read_time;
+ u64 total_read_size;
+ u64 total_write_size;
+ spinlock_t lock;
+ int is_enabled;
+};
+
+static struct blk_perf_stats blk_perf;
+static struct dentry *blk_perf_debug_dir;
+
+static int alloc_histogram_buffers(void)
+{
+ int ret = 0;
+
+ if (!blk_perf.read_hist)
+ blk_perf.read_hist = kzalloc(BLK_PERF_HIST_SIZE, GFP_KERNEL);
+
+ if (!blk_perf.write_hist)
+ blk_perf.write_hist = kzalloc(BLK_PERF_HIST_SIZE, GFP_KERNEL);
+
+ if (!blk_perf.flush_hist)
+ blk_perf.flush_hist = kzalloc(BLK_PERF_HIST_SIZE, GFP_KERNEL);
+
+ if (!blk_perf.read_hist || !blk_perf.write_hist || !blk_perf.flush_hist)
+ ret = -ENOMEM;
+
+ if (!ret)
+ blk_perf.buffers_alloced = 1;
+ return ret;
+}
+
+static void clear_histogram_buffers(void)
+{
+ if (!blk_perf.buffers_alloced)
+ return;
+ memset(blk_perf.read_hist, 0, BLK_PERF_HIST_SIZE);
+ memset(blk_perf.write_hist, 0, BLK_PERF_HIST_SIZE);
+ memset(blk_perf.flush_hist, 0, BLK_PERF_HIST_SIZE);
+}
+
+static int enable_perf(void *data, u64 val)
+{
+ int ret;
+
+ if (!blk_perf.buffers_alloced)
+ ret = alloc_histogram_buffers();
+
+ if (ret)
+ return ret;
+
+ spin_lock(&blk_perf.lock);
+ blk_perf.is_enabled = val;
+ spin_unlock(&blk_perf.lock);
+ return 0;
+}
+
+static int is_perf_enabled(void *data, u64 *val)
+{
+ spin_lock(&blk_perf.lock);
+ *val = blk_perf.is_enabled;
+ spin_unlock(&blk_perf.lock);
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(enable_perf_fops, is_perf_enabled, enable_perf,
+ "%llu\n");
+
+static char *blk_debug_buffer;
+static u32 blk_debug_data_size;
+static DEFINE_MUTEX(blk_perf_debug_buffer_mutex);
+
+static ssize_t blk_perf_read(struct file *file, char __user *buf,
+ size_t count, loff_t *file_pos)
+{
+ ssize_t ret = 0;
+
+ mutex_lock(&blk_perf_debug_buffer_mutex);
+ ret = simple_read_from_buffer(buf, count, file_pos, blk_debug_buffer,
+ blk_debug_data_size);
+ mutex_unlock(&blk_perf_debug_buffer_mutex);
+
+ return ret;
+}
+
+static int blk_debug_buffer_alloc(u32 buffer_size)
+{
+ int ret = 0;
+
+ mutex_lock(&blk_perf_debug_buffer_mutex);
+ if (blk_debug_buffer != NULL) {
+ pr_err("blk_debug_buffer is in use\n");
+ ret = -EBUSY;
+ goto end;
+ }
+ blk_debug_buffer = kzalloc(buffer_size, GFP_KERNEL);
+ if (!blk_debug_buffer)
+ ret = -ENOMEM;
+end:
+ mutex_unlock(&blk_perf_debug_buffer_mutex);
+ return ret;
+}
+
+static int blk_perf_close(struct inode *inode, struct file *file)
+{
+ mutex_lock(&blk_perf_debug_buffer_mutex);
+ blk_debug_data_size = 0;
+ kfree(blk_debug_buffer);
+ blk_debug_buffer = NULL;
+ mutex_unlock(&blk_perf_debug_buffer_mutex);
+ return 0;
+}
+
+static u32 fill_basic_perf_info(char *buffer, u32 buffer_size)
+{
+ u32 size = 0;
+
+ size += scnprintf(buffer + size, buffer_size - size, "\n");
+
+ spin_lock(&blk_perf.lock);
+ size += scnprintf(buffer + size, buffer_size - size,
+ "max_read_time_ms: %llu\n",
+ ktime_to_ms(blk_perf.max_read_time));
+
+ size += scnprintf(buffer + size, buffer_size - size,
+ "min_read_time_ms: %llu\n",
+ ktime_to_ms(blk_perf.min_read_time));
+
+ size += scnprintf(buffer + size, buffer_size - size,
+ "total_read_time_ms: %llu\n",
+ ktime_to_ms(blk_perf.total_read_time));
+
+ size += scnprintf(buffer + size, buffer_size - size,
+ "total_read_size: %llu\n\n",
+ blk_perf.total_read_size);
+
+ size += scnprintf(buffer + size, buffer_size - size,
+ "max_write_time_ms: %llu\n",
+ ktime_to_ms(blk_perf.max_write_time));
+
+ size += scnprintf(buffer + size, buffer_size - size,
+ "min_write_time_ms: %llu\n",
+ ktime_to_ms(blk_perf.min_write_time));
+
+ size += scnprintf(buffer + size, buffer_size - size,
+ "total_write_time_ms: %llu\n",
+ ktime_to_ms(blk_perf.total_write_time));
+
+ size += scnprintf(buffer + size, buffer_size - size,
+ "total_write_size: %llu\n\n",
+ blk_perf.total_write_size);
+
+ size += scnprintf(buffer + size, buffer_size - size,
+ "max_flush_time_ms: %llu\n",
+ ktime_to_ms(blk_perf.max_flush_time));
+
+ size += scnprintf(buffer + size, buffer_size - size,
+ "min_flush_time_ms: %llu\n\n",
+ ktime_to_ms(blk_perf.min_flush_time));
+
+ spin_unlock(&blk_perf.lock);
+
+ return size;
+}
+
+static int basic_perf_open(struct inode *inode, struct file *file)
+{
+ u32 buffer_size;
+ int ret;
+
+ buffer_size = BLK_PERF_HIST_SIZE;
+ ret = blk_debug_buffer_alloc(buffer_size);
+ if (ret)
+ return ret;
+
+ mutex_lock(&blk_perf_debug_buffer_mutex);
+ blk_debug_data_size = fill_basic_perf_info(blk_debug_buffer,
+ buffer_size);
+ mutex_unlock(&blk_perf_debug_buffer_mutex);
+ return 0;
+}
+
+
+static const struct file_operations basic_perf_ops = {
+ .read = blk_perf_read,
+ .release = blk_perf_close,
+ .open = basic_perf_open,
+};
+
+static int hist_open_helper(void *hist_buf)
+{
+ int ret;
+
+ if (!blk_perf.buffers_alloced)
+ return -EINVAL;
+
+ ret = blk_debug_buffer_alloc(BLK_PERF_HIST_SIZE);
+ if (ret)
+ return ret;
+
+ spin_lock(&blk_perf.lock);
+ memcpy(blk_debug_buffer, hist_buf, BLK_PERF_HIST_SIZE);
+ spin_unlock(&blk_perf.lock);
+
+ mutex_lock(&blk_perf_debug_buffer_mutex);
+ blk_debug_data_size = BLK_PERF_HIST_SIZE;
+ mutex_unlock(&blk_perf_debug_buffer_mutex);
+ return 0;
+}
+
+static int write_hist_open(struct inode *inode, struct file *file)
+{
+ return hist_open_helper(blk_perf.write_hist);
+}
+
+static const struct file_operations write_hist_ops = {
+ .read = blk_perf_read,
+ .release = blk_perf_close,
+ .open = write_hist_open,
+};
+
+
+static int read_hist_open(struct inode *inode, struct file *file)
+{
+ return hist_open_helper(blk_perf.read_hist);
+}
+
+static const struct file_operations read_hist_ops = {
+ .read = blk_perf_read,
+ .release = blk_perf_close,
+ .open = read_hist_open,
+};
+
+static int flush_hist_open(struct inode *inode, struct file *file)
+{
+ return hist_open_helper(blk_perf.flush_hist);
+}
+
+static const struct file_operations flush_hist_ops = {
+ .read = blk_perf_read,
+ .release = blk_perf_close,
+ .open = flush_hist_open,
+};
+
+static void clear_perf_stats_helper(void)
+{
+ spin_lock(&blk_perf.lock);
+ blk_perf.max_write_time = ktime_set(0, 0);
+ blk_perf.max_read_time = ktime_set(0, 0);
+ blk_perf.max_flush_time = ktime_set(0, 0);
+ blk_perf.min_write_time = ktime_set(KTIME_MAX, 0);
+ blk_perf.min_read_time = ktime_set(KTIME_MAX, 0);
+ blk_perf.min_flush_time = ktime_set(KTIME_MAX, 0);
+ blk_perf.total_write_time = ktime_set(0, 0);
+ blk_perf.total_read_time = ktime_set(0, 0);
+ blk_perf.total_read_size = 0;
+ blk_perf.total_write_size = 0;
+ blk_perf.is_enabled = 0;
+ clear_histogram_buffers();
+ spin_unlock(&blk_perf.lock);
+}
+
+static int clear_perf_stats(void *data, u64 val)
+{
+ clear_perf_stats_helper();
+ return 0;
+}
+
+DEFINE_SIMPLE_ATTRIBUTE(clear_perf_stats_fops, NULL, clear_perf_stats,
+ "%llu\n");
+
+static void blk_debugfs_init(void)
+{
+ struct dentry *f_ent;
+
+ blk_perf_debug_dir = debugfs_create_dir("block_perf", NULL);
+ if (IS_ERR(blk_perf_debug_dir)) {
+ pr_err("Failed to create block_perf debug_fs directory\n");
+ return;
+ }
+
+ f_ent = debugfs_create_file("basic_perf", 0400, blk_perf_debug_dir,
+ NULL, &basic_perf_ops);
+ if (IS_ERR(f_ent)) {
+ pr_err("Failed to create debug_fs basic_perf file\n");
+ return;
+ }
+
+ f_ent = debugfs_create_file("write_hist", 0400, blk_perf_debug_dir,
+ NULL, &write_hist_ops);
+ if (IS_ERR(f_ent)) {
+ pr_err("Failed to create debug_fs write_hist file\n");
+ return;
+ }
+
+ f_ent = debugfs_create_file("read_hist", 0400, blk_perf_debug_dir,
+ NULL, &read_hist_ops);
+ if (IS_ERR(f_ent)) {
+ pr_err("Failed to create debug_fs read_hist file\n");
+ return;
+ }
+
+ f_ent = debugfs_create_file("flush_hist", 0400, blk_perf_debug_dir,
+ NULL, &flush_hist_ops);
+ if (IS_ERR(f_ent)) {
+ pr_err("Failed to create debug_fs flush_hist file\n");
+ return;
+ }
+
+ f_ent = debugfs_create_file("enable_perf", 0600, blk_perf_debug_dir,
+ NULL, &enable_perf_fops);
+ if (IS_ERR(f_ent)) {
+ pr_err("Failed to create debug_fs enable_perf file\n");
+ return;
+ }
+
+ f_ent = debugfs_create_file("clear_perf_stats", 0200,
+ blk_perf_debug_dir, NULL,
+ &clear_perf_stats_fops);
+ if (IS_ERR(f_ent)) {
+ pr_err("Failed to create debug_fs clear_perf_stats file\n");
+ return;
+ }
+}
+
+static void blk_init_perf(void)
+{
+ blk_debugfs_init();
+ spin_lock_init(&blk_perf.lock);
+
+ clear_perf_stats_helper();
+}
+
+
+static void set_submit_info(struct bio *bio, unsigned int count)
+{
+ ktime_t submit_time;
+
+ if (unlikely(blk_perf.is_enabled)) {
+ submit_time = ktime_get();
+ bio->submit_time.tv64 = submit_time.tv64;
+ bio->blk_sector_count = count;
+ return;
+ }
+
+ bio->submit_time.tv64 = 0;
+ bio->blk_sector_count = 0;
+}
+
+void blk_update_perf_read_write_stats(ktime_t bio_process_time, int is_write,
+ int count)
+{
+ u32 bio_process_time_ms;
+
+ bio_process_time_ms = ktime_to_ms(bio_process_time);
+ if (bio_process_time_ms >= BLK_PERF_SIZE)
+ bio_process_time_ms = BLK_PERF_SIZE - 1;
+
+ if (is_write) {
+ if (ktime_after(bio_process_time, blk_perf.max_write_time))
+ blk_perf.max_write_time = bio_process_time;
+
+ if (ktime_before(bio_process_time, blk_perf.min_write_time))
+ blk_perf.min_write_time = bio_process_time;
+ blk_perf.total_write_time =
+ ktime_add(blk_perf.total_write_time, bio_process_time);
+ blk_perf.total_write_size += count;
+ blk_perf.write_hist[bio_process_time_ms] += count;
+
+ } else {
+ if (ktime_after(bio_process_time, blk_perf.max_read_time))
+ blk_perf.max_read_time = bio_process_time;
+
+ if (ktime_before(bio_process_time, blk_perf.min_read_time))
+ blk_perf.min_read_time = bio_process_time;
+ blk_perf.total_read_time =
+ ktime_add(blk_perf.total_read_time, bio_process_time);
+ blk_perf.total_read_size += count;
+ blk_perf.read_hist[bio_process_time_ms] += count;
+ }
+}
+void blk_update_perf_stats(struct bio *bio)
+{
+ ktime_t bio_process_time;
+ u32 bio_process_time_ms;
+ u32 count;
+
+ spin_lock(&blk_perf.lock);
+ if (likely(!blk_perf.is_enabled))
+ goto end;
+ if (!bio->submit_time.tv64)
+ goto end;
+ bio_process_time = ktime_sub(ktime_get(), bio->submit_time);
+
+ count = bio->blk_sector_count;
+
+ if (count) {
+ int is_write = 0;
+
+ if (bio->bi_rw & WRITE ||
+ unlikely(bio->bi_rw & REQ_WRITE_SAME))
+ is_write = 1;
+
+ blk_update_perf_read_write_stats(bio_process_time, is_write,
+ count);
+ } else {
+
+ bio_process_time_ms = ktime_to_ms(bio_process_time);
+ if (bio_process_time_ms >= BLK_PERF_SIZE)
+ bio_process_time_ms = BLK_PERF_SIZE - 1;
+
+ if (ktime_after(bio_process_time, blk_perf.max_flush_time))
+ blk_perf.max_flush_time = bio_process_time;
+
+ if (ktime_before(bio_process_time, blk_perf.min_flush_time))
+ blk_perf.min_flush_time = bio_process_time;
+
+ blk_perf.flush_hist[bio_process_time_ms] += 1;
+ }
+end:
+ spin_unlock(&blk_perf.lock);
+
+}
+#else
+static inline void set_submit_info(struct bio *bio, unsigned int count)
+{
+ (void) bio;
+ (void) count;
+}
+
+static inline void blk_init_perf(void)
+{
+}
+#endif /* #ifdef CONFIG_BLOCK_PERF_FRAMEWORK */
+
/**
* submit_bio - submit a bio to the block device layer for I/O
* @rw: whether to %READ or %WRITE, or maybe to %READA (read ahead)
@@ -2123,6 +2585,7 @@ static inline struct task_struct *get_dirty_task(struct bio *bio)
*/
blk_qc_t submit_bio(int rw, struct bio *bio)
{
+ unsigned int count = 0;
bio->bi_rw |= rw;
/*
@@ -2130,8 +2593,6 @@ blk_qc_t submit_bio(int rw, struct bio *bio)
* go through the normal accounting stuff before submission.
*/
if (bio_has_data(bio)) {
- unsigned int count;
-
if (unlikely(rw & REQ_WRITE_SAME))
count = bdev_logical_block_size(bio->bi_bdev) >> 9;
else
@@ -2158,6 +2619,7 @@ blk_qc_t submit_bio(int rw, struct bio *bio)
}
}
+ set_submit_info(bio, count);
return generic_make_request(bio);
}
EXPORT_SYMBOL(submit_bio);
@@ -3578,7 +4040,7 @@ int __init blk_dev_init(void)
blk_requestq_cachep = kmem_cache_create("blkdev_queue",
sizeof(struct request_queue), 0, SLAB_PANIC, NULL);
-
+ blk_init_perf();
return 0;
}
diff --git a/block/blk.h b/block/blk.h
index ce2287639ab3..6ceebbd61afd 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -112,6 +112,15 @@ void blk_account_io_start(struct request *req, bool new_io);
void blk_account_io_completion(struct request *req, unsigned int bytes);
void blk_account_io_done(struct request *req);
+#ifdef CONFIG_BLOCK_PERF_FRAMEWORK
+void blk_update_perf_stats(struct bio *bio);
+#else
+static inline void blk_update_perf_stats(struct bio *bio)
+{
+ (void) bio;
+}
+#endif
+
/*
* Internal atomic flags for request handling
*/
diff --git a/drivers/clk/msm/clock-debug.c b/drivers/clk/msm/clock-debug.c
index 00a86ba55171..0fe93ede17cc 100644
--- a/drivers/clk/msm/clock-debug.c
+++ b/drivers/clk/msm/clock-debug.c
@@ -1,6 +1,7 @@
/*
* Copyright (C) 2007 Google, Inc.
- * Copyright (c) 2007-2014, 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2007-2014, 2016-2017, The Linux Foundation. All rights
+ * reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -77,6 +78,7 @@ static int clock_debug_measure_get(void *data, u64 *val)
else
is_hw_gated = 0;
+ mutex_lock(&clock->prepare_lock);
ret = clk_set_parent(measure, clock);
if (!ret) {
/*
@@ -107,6 +109,7 @@ static int clock_debug_measure_get(void *data, u64 *val)
*/
meas_rate = clk_get_rate(clock);
sw_rate = clk_get_rate(measure->parent);
+ mutex_unlock(&clock->prepare_lock);
if (sw_rate && meas_rate >= (sw_rate * 2))
*val *= DIV_ROUND_CLOSEST(meas_rate, sw_rate);
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index cd9a82a9bf4a..fe6aa45901d0 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1994,6 +1994,42 @@ static int _isense_clk_set_rate(struct kgsl_pwrctrl *pwr, int level)
return clk_set_rate(pwr->grp_clks[pwr->isense_clk_indx], rate);
}
+static inline void _close_pcl(struct kgsl_pwrctrl *pwr)
+{
+ if (pwr->pcl)
+ msm_bus_scale_unregister_client(pwr->pcl);
+
+ pwr->pcl = 0;
+}
+
+static inline void _close_ocmem_pcl(struct kgsl_pwrctrl *pwr)
+{
+ if (pwr->ocmem_pcl)
+ msm_bus_scale_unregister_client(pwr->ocmem_pcl);
+
+ pwr->ocmem_pcl = 0;
+}
+
+static inline void _close_regulators(struct kgsl_pwrctrl *pwr)
+{
+ int i;
+
+ for (i = 0; i < KGSL_MAX_REGULATORS; i++)
+ pwr->regulators[i].reg = NULL;
+}
+
+static inline void _close_clks(struct kgsl_device *device)
+{
+ struct kgsl_pwrctrl *pwr = &device->pwrctrl;
+ int i;
+
+ for (i = 0; i < KGSL_MAX_CLKS; i++)
+ pwr->grp_clks[i] = NULL;
+
+ if (pwr->gpu_bimc_int_clk)
+ devm_clk_put(&device->pdev->dev, pwr->gpu_bimc_int_clk);
+}
+
int kgsl_pwrctrl_init(struct kgsl_device *device)
{
int i, k, m, n = 0, result;
@@ -2011,7 +2047,7 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
result = _get_clocks(device);
if (result)
- return result;
+ goto error_cleanup_clks;
/* Make sure we have a source clk for freq setting */
if (pwr->grp_clks[0] == NULL)
@@ -2029,7 +2065,8 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
if (pwr->num_pwrlevels == 0) {
KGSL_PWR_ERR(device, "No power levels are defined\n");
- return -EINVAL;
+ result = -EINVAL;
+ goto error_cleanup_clks;
}
/* Initialize the user and thermal clock constraints */
@@ -2059,7 +2096,7 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
result = get_regulators(device);
if (result)
- return result;
+ goto error_cleanup_regulators;
pwr->power_flags = 0;
@@ -2079,8 +2116,10 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
pwr->ocmem_pcl = msm_bus_scale_register_client
(ocmem_scale_table);
- if (!pwr->ocmem_pcl)
- return -EINVAL;
+ if (!pwr->ocmem_pcl) {
+ result = -EINVAL;
+ goto error_disable_pm;
+ }
}
/* Bus width in bytes, set it to zero if not found */
@@ -2110,14 +2149,18 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
* from the driver.
*/
pwr->pcl = msm_bus_scale_register_client(bus_scale_table);
- if (pwr->pcl == 0)
- return -EINVAL;
+ if (pwr->pcl == 0) {
+ result = -EINVAL;
+ goto error_cleanup_ocmem_pcl;
+ }
}
pwr->bus_ib = kzalloc(bus_scale_table->num_usecases *
sizeof(*pwr->bus_ib), GFP_KERNEL);
- if (pwr->bus_ib == NULL)
- return -ENOMEM;
+ if (pwr->bus_ib == NULL) {
+ result = -ENOMEM;
+ goto error_cleanup_pcl;
+ }
/*
* Pull the BW vote out of the bus table. They will be used to
@@ -2175,36 +2218,26 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
&pwr->tsens_name);
return result;
+
+error_cleanup_pcl:
+ _close_pcl(pwr);
+error_cleanup_ocmem_pcl:
+ _close_ocmem_pcl(pwr);
+error_disable_pm:
+ pm_runtime_disable(&pdev->dev);
+error_cleanup_regulators:
+ _close_regulators(pwr);
+error_cleanup_clks:
+ _close_clks(device);
+ return result;
}
void kgsl_pwrctrl_close(struct kgsl_device *device)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
- int i;
KGSL_PWR_INFO(device, "close device %d\n", device->id);
- pm_runtime_disable(&device->pdev->dev);
-
- if (pwr->pcl)
- msm_bus_scale_unregister_client(pwr->pcl);
-
- pwr->pcl = 0;
-
- if (pwr->ocmem_pcl)
- msm_bus_scale_unregister_client(pwr->ocmem_pcl);
-
- pwr->ocmem_pcl = 0;
-
- for (i = 0; i < KGSL_MAX_REGULATORS; i++)
- pwr->regulators[i].reg = NULL;
-
- for (i = 0; i < KGSL_MAX_REGULATORS; i++)
- pwr->grp_clks[i] = NULL;
-
- if (pwr->gpu_bimc_int_clk)
- devm_clk_put(&device->pdev->dev, pwr->gpu_bimc_int_clk);
-
pwr->power_flags = 0;
if (!IS_ERR_OR_NULL(pwr->sysfs_pwr_limit)) {
@@ -2213,6 +2246,16 @@ void kgsl_pwrctrl_close(struct kgsl_device *device)
pwr->sysfs_pwr_limit = NULL;
}
kfree(pwr->bus_ib);
+
+ _close_pcl(pwr);
+
+ _close_ocmem_pcl(pwr);
+
+ pm_runtime_disable(&device->pdev->dev);
+
+ _close_regulators(pwr);
+
+ _close_clks(device);
}
/**
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 3d346d85d45a..1653f7e1ae99 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -4432,6 +4432,7 @@ int qseecom_start_app(struct qseecom_handle **handle,
strlcpy(entry->app_name, app_name, MAX_APP_NAME_SIZE);
if (__qseecom_get_fw_size(app_name, &fw_size, &app_arch)) {
ret = -EIO;
+ kfree(entry);
goto err;
}
entry->app_arch = app_arch;
diff --git a/drivers/mmc/core/Kconfig b/drivers/mmc/core/Kconfig
index 57cc6b29b2d0..9e0ccdc44d6b 100644
--- a/drivers/mmc/core/Kconfig
+++ b/drivers/mmc/core/Kconfig
@@ -2,6 +2,17 @@
# MMC core configuration
#
+config MMC_RING_BUFFER
+ bool "MMC_RING_BUFFER"
+ depends on MMC
+ default n
+ help
+ This enables the ring buffer tracing of significant
+ events for mmc driver to provide command history for
+ debugging purpose.
+
+ If unsure, say N.
+
config MMC_EMBEDDED_SDIO
boolean "MMC embedded SDIO device support (EXPERIMENTAL)"
help
diff --git a/drivers/mmc/core/Makefile b/drivers/mmc/core/Makefile
index 2c25138f28b7..60781dd192ab 100644
--- a/drivers/mmc/core/Makefile
+++ b/drivers/mmc/core/Makefile
@@ -10,3 +10,4 @@ mmc_core-y := core.o bus.o host.o \
quirks.o slot-gpio.o
mmc_core-$(CONFIG_OF) += pwrseq.o pwrseq_simple.o pwrseq_emmc.o
mmc_core-$(CONFIG_DEBUG_FS) += debugfs.o
+obj-$(CONFIG_MMC_RING_BUFFER) += ring_buffer.o
diff --git a/drivers/mmc/core/debugfs.c b/drivers/mmc/core/debugfs.c
index c894f64c2e38..a0d31ded04db 100644
--- a/drivers/mmc/core/debugfs.c
+++ b/drivers/mmc/core/debugfs.c
@@ -32,6 +32,26 @@ module_param(fail_request, charp, 0);
#endif /* CONFIG_FAIL_MMC_REQUEST */
/* The debugfs functions are optimized away when CONFIG_DEBUG_FS isn't set. */
+static int mmc_ring_buffer_show(struct seq_file *s, void *data)
+{
+ struct mmc_host *mmc = s->private;
+
+ mmc_dump_trace_buffer(mmc, s);
+ return 0;
+}
+
+static int mmc_ring_buffer_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, mmc_ring_buffer_show, inode->i_private);
+}
+
+static const struct file_operations mmc_ring_buffer_fops = {
+ .open = mmc_ring_buffer_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
static int mmc_ios_show(struct seq_file *s, void *data)
{
static const char *vdd_str[] = {
@@ -368,6 +388,11 @@ void mmc_add_host_debugfs(struct mmc_host *host)
&host->cmdq_thist_enabled))
goto err_node;
+#ifdef CONFIG_MMC_RING_BUFFER
+ if (!debugfs_create_file("ring_buffer", S_IRUSR,
+ root, host, &mmc_ring_buffer_fops))
+ goto err_node;
+#endif
#ifdef CONFIG_MMC_CLKGATE
if (!debugfs_create_u32("clk_delay", (S_IRUSR | S_IWUSR),
root, &host->clk_delay))
diff --git a/drivers/mmc/core/host.c b/drivers/mmc/core/host.c
index f6a54a8e1076..333f691a73c7 100644
--- a/drivers/mmc/core/host.c
+++ b/drivers/mmc/core/host.c
@@ -26,6 +26,8 @@
#include <linux/mmc/host.h>
#include <linux/mmc/card.h>
+#include <linux/mmc/ring_buffer.h>
+
#include <linux/mmc/slot-gpio.h>
#include "core.h"
@@ -869,6 +871,7 @@ int mmc_add_host(struct mmc_host *host)
mmc_add_host_debugfs(host);
#endif
mmc_host_clk_sysfs_init(host);
+ mmc_trace_init(host);
err = sysfs_create_group(&host->class_dev.kobj, &clk_scaling_attr_grp);
if (err)
diff --git a/drivers/mmc/core/mmc.c b/drivers/mmc/core/mmc.c
index 449514bae4f3..414877874190 100644
--- a/drivers/mmc/core/mmc.c
+++ b/drivers/mmc/core/mmc.c
@@ -2635,6 +2635,7 @@ static int mmc_suspend(struct mmc_host *host)
int err;
ktime_t start = ktime_get();
+ MMC_TRACE(host, "%s: Enter\n", __func__);
err = _mmc_suspend(host, true);
if (!err) {
pm_runtime_disable(&host->card->dev);
@@ -2643,6 +2644,7 @@ static int mmc_suspend(struct mmc_host *host)
trace_mmc_suspend(mmc_hostname(host), err,
ktime_to_us(ktime_sub(ktime_get(), start)));
+ MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
}
@@ -2718,6 +2720,7 @@ static int mmc_resume(struct mmc_host *host)
int err = 0;
ktime_t start = ktime_get();
+ MMC_TRACE(host, "%s: Enter\n", __func__);
if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
err = _mmc_resume(host);
pm_runtime_set_active(&host->card->dev);
@@ -2727,7 +2730,7 @@ static int mmc_resume(struct mmc_host *host)
trace_mmc_resume(mmc_hostname(host), err,
ktime_to_us(ktime_sub(ktime_get(), start)));
-
+ MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
}
diff --git a/drivers/mmc/core/ring_buffer.c b/drivers/mmc/core/ring_buffer.c
new file mode 100644
index 000000000000..83945e1cae40
--- /dev/null
+++ b/drivers/mmc/core/ring_buffer.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/mmc/ring_buffer.h>
+#include <linux/mmc/host.h>
+
+void mmc_stop_tracing(struct mmc_host *mmc)
+{
+ mmc->trace_buf.stop_tracing = true;
+}
+
+void mmc_trace_write(struct mmc_host *mmc,
+ const char *fmt, ...)
+{
+ unsigned int idx;
+ va_list args;
+ char *event;
+ unsigned long flags;
+ char str[MMC_TRACE_EVENT_SZ];
+
+ if (unlikely(!mmc->trace_buf.data) ||
+ unlikely(mmc->trace_buf.stop_tracing))
+ return;
+
+ /*
+ * Here an increment and modulus is used to keep
+ * index within array bounds. The cast to unsigned is
+ * necessary so increment and rolover wraps to 0 correctly
+ */
+ spin_lock_irqsave(&mmc->trace_buf.trace_lock, flags);
+ mmc->trace_buf.wr_idx += 1;
+ idx = ((unsigned int)mmc->trace_buf.wr_idx) &
+ (MMC_TRACE_RBUF_NUM_EVENTS - 1);
+ spin_unlock_irqrestore(&mmc->trace_buf.trace_lock, flags);
+
+ /* Catch some unlikely machine specific wrap-around bug */
+ if (unlikely(idx > (MMC_TRACE_RBUF_NUM_EVENTS - 1))) {
+ pr_err("%s: %s: Invalid idx:%d for mmc trace, tracing stopped !\n",
+ mmc_hostname(mmc), __func__, idx);
+ mmc_stop_tracing(mmc);
+ return;
+ }
+
+ event = &mmc->trace_buf.data[idx * MMC_TRACE_EVENT_SZ];
+ va_start(args, fmt);
+ snprintf(str, MMC_TRACE_EVENT_SZ, "<%d> %lld: %s: %s",
+ raw_smp_processor_id(),
+ ktime_to_ns(ktime_get()),
+ mmc_hostname(mmc), fmt);
+ memset(event, '\0', MMC_TRACE_EVENT_SZ);
+ vscnprintf(event, MMC_TRACE_EVENT_SZ, str, args);
+ va_end(args);
+}
+
+void mmc_trace_init(struct mmc_host *mmc)
+{
+ BUILD_BUG_ON_NOT_POWER_OF_2(MMC_TRACE_RBUF_NUM_EVENTS);
+
+ mmc->trace_buf.data = (char *)
+ __get_free_pages(GFP_KERNEL|__GFP_ZERO,
+ MMC_TRACE_RBUF_SZ_ORDER);
+
+ if (!mmc->trace_buf.data) {
+ pr_err("%s: %s: Unable to allocate trace for mmc\n",
+ __func__, mmc_hostname(mmc));
+ return;
+ }
+
+ spin_lock_init(&mmc->trace_buf.trace_lock);
+ mmc->trace_buf.wr_idx = -1;
+}
+
+void mmc_trace_free(struct mmc_host *mmc)
+{
+ if (mmc->trace_buf.data)
+ free_pages((unsigned long)mmc->trace_buf.data,
+ MMC_TRACE_RBUF_SZ_ORDER);
+}
+
+void mmc_dump_trace_buffer(struct mmc_host *mmc, struct seq_file *s)
+{
+ unsigned int idx, cur_idx;
+ unsigned int N = MMC_TRACE_RBUF_NUM_EVENTS - 1;
+ char *event;
+ unsigned long flags;
+
+ if (!mmc->trace_buf.data)
+ return;
+
+ spin_lock_irqsave(&mmc->trace_buf.trace_lock, flags);
+ idx = ((unsigned int)mmc->trace_buf.wr_idx) & N;
+ cur_idx = (idx + 1) & N;
+
+ do {
+ event = &mmc->trace_buf.data[cur_idx * MMC_TRACE_EVENT_SZ];
+ if (s)
+ seq_printf(s, "%s", (char *)event);
+ else
+ pr_err("%s", (char *)event);
+ cur_idx = (cur_idx + 1) & N;
+ if (cur_idx == idx) {
+ event =
+ &mmc->trace_buf.data[cur_idx * MMC_TRACE_EVENT_SZ];
+ if (s)
+ seq_printf(s, "latest_event: %s",
+ (char *)event);
+ else
+ pr_err("latest_event: %s", (char *)event);
+ break;
+ }
+ } while (1);
+ spin_unlock_irqrestore(&mmc->trace_buf.trace_lock, flags);
+}
diff --git a/drivers/mmc/core/sd.c b/drivers/mmc/core/sd.c
index 7b84030ffe92..7e7d7eb4da2a 100644
--- a/drivers/mmc/core/sd.c
+++ b/drivers/mmc/core/sd.c
@@ -1222,11 +1222,13 @@ static int mmc_sd_suspend(struct mmc_host *host)
{
int err;
+ MMC_TRACE(host, "%s: Enter\n", __func__);
err = _mmc_sd_suspend(host);
if (!err) {
pm_runtime_disable(&host->card->dev);
pm_runtime_set_suspended(&host->card->dev);
}
+ MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
}
@@ -1292,12 +1294,14 @@ static int mmc_sd_resume(struct mmc_host *host)
{
int err = 0;
+ MMC_TRACE(host, "%s: Enter\n", __func__);
if (!(host->caps & MMC_CAP_RUNTIME_RESUME)) {
err = _mmc_sd_resume(host);
pm_runtime_set_active(&host->card->dev);
pm_runtime_mark_last_busy(&host->card->dev);
}
pm_runtime_enable(&host->card->dev);
+ MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
}
diff --git a/drivers/mmc/core/sdio.c b/drivers/mmc/core/sdio.c
index 5fedab49cf34..13a2f2d14d12 100644
--- a/drivers/mmc/core/sdio.c
+++ b/drivers/mmc/core/sdio.c
@@ -998,6 +998,7 @@ static int mmc_sdio_pre_suspend(struct mmc_host *host)
*/
static int mmc_sdio_suspend(struct mmc_host *host)
{
+ MMC_TRACE(host, "%s: Enter\n", __func__);
mmc_claim_host(host);
if (mmc_card_keep_power(host) && mmc_card_wake_sdio_irq(host))
@@ -1013,7 +1014,7 @@ static int mmc_sdio_suspend(struct mmc_host *host)
}
mmc_release_host(host);
-
+ MMC_TRACE(host, "%s: Exit\n", __func__);
return 0;
}
@@ -1024,6 +1025,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
BUG_ON(!host);
BUG_ON(!host->card);
+ MMC_TRACE(host, "%s: Enter\n", __func__);
/* Basic card reinitialization. */
mmc_claim_host(host);
@@ -1079,6 +1081,7 @@ static int mmc_sdio_resume(struct mmc_host *host)
host->pm_flags &= ~MMC_PM_KEEP_POWER;
host->pm_flags &= ~MMC_PM_WAKE_SDIO_IRQ;
+ MMC_TRACE(host, "%s: Exit err: %d\n", __func__, err);
return err;
}
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index 52427815722b..d712f29da9f1 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -198,6 +198,14 @@ static void cmdq_dumpregs(struct cmdq_host *cq_host)
{
struct mmc_host *mmc = cq_host->mmc;
+ MMC_TRACE(mmc,
+ "%s: 0x0C=0x%08x 0x10=0x%08x 0x14=0x%08x 0x18=0x%08x 0x28=0x%08x 0x2C=0x%08x 0x30=0x%08x 0x34=0x%08x 0x54=0x%08x 0x58=0x%08x 0x5C=0x%08x 0x48=0x%08x\n",
+ __func__, cmdq_readl(cq_host, CQCTL), cmdq_readl(cq_host, CQIS),
+ cmdq_readl(cq_host, CQISTE), cmdq_readl(cq_host, CQISGE),
+ cmdq_readl(cq_host, CQTDBR), cmdq_readl(cq_host, CQTCN),
+ cmdq_readl(cq_host, CQDQS), cmdq_readl(cq_host, CQDPT),
+ cmdq_readl(cq_host, CQTERRI), cmdq_readl(cq_host, CQCRI),
+ cmdq_readl(cq_host, CQCRA), cmdq_readl(cq_host, CQCRDCT));
pr_err(DRV_NAME ": ========== REGISTER DUMP (%s)==========\n",
mmc_hostname(mmc));
@@ -426,6 +434,7 @@ static int cmdq_enable(struct mmc_host *mmc)
pm_ref_count:
cmdq_runtime_pm_put(cq_host);
out:
+ MMC_TRACE(mmc, "%s: CQ enabled err: %d\n", __func__, err);
return err;
}
@@ -443,6 +452,7 @@ static void cmdq_disable_nosync(struct mmc_host *mmc, bool soft)
cq_host->enabled = false;
mmc_host_set_cq_disable(mmc);
+ MMC_TRACE(mmc, "%s: CQ disabled\n", __func__);
}
static void cmdq_disable(struct mmc_host *mmc, bool soft)
@@ -525,6 +535,12 @@ static void cmdq_prep_task_desc(struct mmc_request *mrq,
REL_WRITE(!!(req_flags & REL_WR)) |
BLK_COUNT(mrq->cmdq_req->data.blocks) |
BLK_ADDR((u64)mrq->cmdq_req->blk_addr);
+
+ MMC_TRACE(mrq->host,
+ "%s: Task: 0x%08x | Args: 0x%08x | cnt: 0x%08x\n", __func__,
+ lower_32_bits(*data),
+ upper_32_bits(*data),
+ mrq->cmdq_req->data.blocks);
}
static int cmdq_dma_map(struct mmc_host *host, struct mmc_request *mrq)
@@ -665,6 +681,11 @@ static void cmdq_prep_dcmd_desc(struct mmc_host *mmc,
dataddr = (__le64 __force *)(desc + 4);
dataddr[0] = cpu_to_le64((u64)mrq->cmd->arg);
cmdq_log_task_desc_history(cq_host, *task_desc, true);
+ MMC_TRACE(mrq->host,
+ "%s: DCMD: Task: 0x%08x | Args: 0x%08x\n",
+ __func__,
+ lower_32_bits(*task_desc),
+ upper_32_bits(*task_desc));
}
static void cmdq_pm_qos_vote(struct sdhci_host *host, struct mmc_request *mrq)
@@ -743,6 +764,7 @@ ring_doorbell:
cmdq_dumpregs(cq_host);
BUG_ON(1);
}
+ MMC_TRACE(mmc, "%s: tag: %d\n", __func__, tag);
cmdq_writel(cq_host, 1 << tag, CQTDBR);
/* Commit the doorbell write immediately */
wmb();
@@ -785,6 +807,8 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
if (!status && !err)
return IRQ_NONE;
+ MMC_TRACE(mmc, "%s: CQIS: 0x%x err: %d\n",
+ __func__, status, err);
if (err || (status & CQIS_RED)) {
err_info = cmdq_readl(cq_host, CQTERRI);
@@ -920,7 +944,9 @@ skip_cqterri:
/* complete the corresponding mrq */
pr_debug("%s: completing tag -> %lu\n",
mmc_hostname(mmc), tag);
- cmdq_finish_data(mmc, tag);
+ MMC_TRACE(mmc, "%s: completing tag -> %lu\n",
+ __func__, tag);
+ cmdq_finish_data(mmc, tag);
}
}
@@ -997,6 +1023,8 @@ static int cmdq_halt(struct mmc_host *mmc, bool halt)
retries--;
continue;
} else {
+ MMC_TRACE(mmc, "%s: halt done , retries: %d\n",
+ __func__, retries);
/* halt done: re-enable legacy interrupts */
if (cq_host->ops->clear_set_irqs)
cq_host->ops->clear_set_irqs(mmc,
@@ -1014,6 +1042,7 @@ static int cmdq_halt(struct mmc_host *mmc, bool halt)
cq_host->ops->set_data_timeout(mmc, 0xf);
if (cq_host->ops->clear_set_irqs)
cq_host->ops->clear_set_irqs(mmc, true);
+ MMC_TRACE(mmc, "%s: unhalt done\n", __func__);
cmdq_writel(cq_host, cmdq_readl(cq_host, CQCTL) & ~HALT,
CQCTL);
}
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 203daf3bd5eb..466e0a2c8483 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -2,7 +2,7 @@
* drivers/mmc/host/sdhci-msm.c - Qualcomm Technologies, Inc. MSM SDHCI Platform
* driver source file
*
- * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -3293,6 +3293,11 @@ void sdhci_msm_dump_vendor_regs(struct sdhci_host *host)
if (host->cq_host)
sdhci_msm_cmdq_dump_debug_ram(host);
+ MMC_TRACE(host->mmc, "Data cnt: 0x%08x | Fifo cnt: 0x%08x\n",
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_DATA_CNT),
+ sdhci_msm_readl_relaxed(host,
+ msm_host_offset->CORE_MCI_FIFO_CNT));
pr_info("Data cnt: 0x%08x | Fifo cnt: 0x%08x | Int sts: 0x%08x\n",
sdhci_msm_readl_relaxed(host,
msm_host_offset->CORE_MCI_DATA_CNT),
diff --git a/drivers/mmc/host/sdhci.c b/drivers/mmc/host/sdhci.c
index 08822464d82f..3fd564388720 100644
--- a/drivers/mmc/host/sdhci.c
+++ b/drivers/mmc/host/sdhci.c
@@ -111,6 +111,17 @@ static void sdhci_dump_state(struct sdhci_host *host)
static void sdhci_dumpregs(struct sdhci_host *host)
{
+ MMC_TRACE(host->mmc,
+ "%s: 0x04=0x%08x 0x06=0x%08x 0x0E=0x%08x 0x30=0x%08x 0x34=0x%08x 0x38=0x%08x\n",
+ __func__,
+ sdhci_readw(host, SDHCI_BLOCK_SIZE),
+ sdhci_readw(host, SDHCI_BLOCK_COUNT),
+ sdhci_readw(host, SDHCI_COMMAND),
+ sdhci_readl(host, SDHCI_INT_STATUS),
+ sdhci_readl(host, SDHCI_INT_ENABLE),
+ sdhci_readl(host, SDHCI_SIGNAL_ENABLE));
+ mmc_stop_tracing(host->mmc);
+
pr_info(DRIVER_NAME ": =========== REGISTER DUMP (%s)===========\n",
mmc_hostname(host->mmc));
@@ -1013,6 +1024,11 @@ static void sdhci_prepare_data(struct sdhci_host *host, struct mmc_command *cmd)
/* Set the DMA boundary value and block size */
sdhci_set_blk_size_reg(host, data->blksz, SDHCI_DEFAULT_BOUNDARY_ARG);
sdhci_writew(host, data->blocks, SDHCI_BLOCK_COUNT);
+ MMC_TRACE(host->mmc,
+ "%s: 0x28=0x%08x 0x3E=0x%08x 0x06=0x%08x\n", __func__,
+ sdhci_readb(host, SDHCI_HOST_CONTROL),
+ sdhci_readw(host, SDHCI_HOST_CONTROL2),
+ sdhci_readw(host, SDHCI_BLOCK_COUNT));
}
static void sdhci_set_transfer_mode(struct sdhci_host *host,
@@ -1071,6 +1087,9 @@ static void sdhci_set_transfer_mode(struct sdhci_host *host,
mode |= SDHCI_TRNS_DMA;
sdhci_writew(host, mode, SDHCI_TRANSFER_MODE);
+ MMC_TRACE(host->mmc, "%s: 0x00=0x%08x 0x0C=0x%08x\n", __func__,
+ sdhci_readw(host, SDHCI_ARGUMENT2),
+ sdhci_readw(host, SDHCI_TRANSFER_MODE));
}
static void sdhci_finish_data(struct sdhci_host *host)
@@ -1082,6 +1101,8 @@ static void sdhci_finish_data(struct sdhci_host *host)
data = host->data;
host->data = NULL;
+ MMC_TRACE(host->mmc, "%s: 0x24=0x%08x\n", __func__,
+ sdhci_readl(host, SDHCI_PRESENT_STATE));
if (host->flags & SDHCI_REQ_USE_DMA) {
if (host->flags & SDHCI_USE_ADMA)
sdhci_adma_table_post(host, data);
@@ -1210,6 +1231,11 @@ void sdhci_send_command(struct sdhci_host *host, struct mmc_command *cmd)
if (cmd->data)
host->data_start_time = ktime_get();
trace_mmc_cmd_rw_start(cmd->opcode, cmd->arg, cmd->flags);
+ MMC_TRACE(host->mmc,
+ "%s: updated 0x8=0x%08x 0xC=0x%08x 0xE=0x%08x\n", __func__,
+ sdhci_readl(host, SDHCI_ARGUMENT),
+ sdhci_readw(host, SDHCI_TRANSFER_MODE),
+ sdhci_readw(host, SDHCI_COMMAND));
sdhci_writew(host, SDHCI_MAKE_CMD(cmd->opcode, flags), SDHCI_COMMAND);
}
EXPORT_SYMBOL_GPL(sdhci_send_command);
@@ -1231,8 +1257,14 @@ static void sdhci_finish_command(struct sdhci_host *host)
sdhci_readb(host,
SDHCI_RESPONSE + (3-i)*4-1);
}
+ MMC_TRACE(host->mmc,
+ "%s: resp 0: 0x%08x resp 1: 0x%08x resp 2: 0x%08x resp 3: 0x%08x\n",
+ __func__, host->cmd->resp[0], host->cmd->resp[1],
+ host->cmd->resp[2], host->cmd->resp[3]);
} else {
host->cmd->resp[0] = sdhci_readl(host, SDHCI_RESPONSE);
+ MMC_TRACE(host->mmc, "%s: resp 0: 0x%08x\n",
+ __func__, host->cmd->resp[0]);
}
}
@@ -3169,6 +3201,9 @@ static irqreturn_t sdhci_irq(int irq, void *dev_id)
goto out;
}
+ MMC_TRACE(host->mmc,
+ "%s: intmask: 0x%x\n", __func__, intmask);
+
if (intmask & SDHCI_INT_AUTO_CMD_ERR)
host->auto_cmd_err_sts = sdhci_readw(host,
SDHCI_AUTO_CMD_ERR);
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index d3f7e43ea10e..62d8ae0c5f36 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -1216,11 +1216,9 @@ static int _smblib_vbus_regulator_disable(struct regulator_dev *rdev)
if (!chg->external_vconn) {
rc = smblib_read(chg, RID_CC_CONTROL_7_0_REG, &stat);
- if (rc < 0) {
+ if (rc < 0)
smblib_err(chg, "Couldn't read RID_CC_CONTROL_7_0 rc=%d\n",
rc);
- return rc;
- }
/* check if VCONN is enabled on either CC pin */
if (stat & VCONN_EN_CC_MASK) {
@@ -1229,7 +1227,6 @@ static int _smblib_vbus_regulator_disable(struct regulator_dev *rdev)
if (rc < 0)
smblib_err(chg, "Couldn't disable VCONN rc=%d\n",
rc);
- return rc;
}
}
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index 70d935e9d1df..ed76a585ed03 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -410,6 +410,7 @@ static enum power_supply_property smb138x_parallel_props[] = {
POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGER_TEMP,
POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+ POWER_SUPPLY_PROP_MODEL_NAME,
};
static int smb138x_parallel_get_prop(struct power_supply *psy,
@@ -456,6 +457,9 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
rc = smblib_get_prop_charger_temp_max(chg, val);
break;
+ case POWER_SUPPLY_PROP_MODEL_NAME:
+ val->strval = "smb138x";
+ break;
default:
pr_err("parallel power supply get prop %d not supported\n",
prop);
diff --git a/drivers/scsi/ufs/ufs-qcom-ice.c b/drivers/scsi/ufs/ufs-qcom-ice.c
index 070d27df6b49..85f82b2251c1 100644
--- a/drivers/scsi/ufs/ufs-qcom-ice.c
+++ b/drivers/scsi/ufs/ufs-qcom-ice.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#include <linux/io.h>
#include <linux/of.h>
#include <linux/blkdev.h>
+#include <linux/spinlock.h>
#include <crypto/ice.h>
#include "ufs-qcom-ice.h"
@@ -168,6 +169,7 @@ out:
static void ufs_qcom_ice_cfg_work(struct work_struct *work)
{
+ unsigned long flags;
struct ice_data_setting ice_set;
struct ufs_qcom_host *qcom_host =
container_of(work, struct ufs_qcom_host, ice_cfg_work);
@@ -185,12 +187,17 @@ static void ufs_qcom_ice_cfg_work(struct work_struct *work)
qcom_host->ice.vops->config_start(qcom_host->ice.pdev,
qcom_host->req_pending, &ice_set, false);
+ spin_lock_irqsave(&qcom_host->ice_work_lock, flags);
+ qcom_host->req_pending = NULL;
+ spin_unlock_irqrestore(&qcom_host->ice_work_lock, flags);
+
/*
* Resume with requests processing. We assume config_start has been
* successful, but even if it wasn't we still must resume in order to
* allow for the request to be retried.
*/
ufshcd_scsi_unblock_requests(qcom_host->hba);
+
}
/**
@@ -246,6 +253,7 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
struct ice_data_setting ice_set;
char cmd_op = cmd->cmnd[0];
int err;
+ unsigned long flags;
if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
dev_dbg(qcom_host->hba->dev, "%s: ice device is not enabled\n",
@@ -272,14 +280,36 @@ int ufs_qcom_ice_req_setup(struct ufs_qcom_host *qcom_host,
dev_dbg(qcom_host->hba->dev,
"%s: scheduling task for ice setup\n",
__func__);
- qcom_host->req_pending = cmd->request;
- if (schedule_work(&qcom_host->ice_cfg_work))
+
+ spin_lock_irqsave(
+ &qcom_host->ice_work_lock, flags);
+
+ if (!qcom_host->req_pending) {
ufshcd_scsi_block_requests(
qcom_host->hba);
+ qcom_host->req_pending = cmd->request;
+ if (!schedule_work(
+ &qcom_host->ice_cfg_work)) {
+ qcom_host->req_pending = NULL;
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock,
+ flags);
+
+ ufshcd_scsi_unblock_requests(
+ qcom_host->hba);
+ return err;
+ }
+ }
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock, flags);
+
} else {
- dev_err(qcom_host->hba->dev,
- "%s: error in ice_vops->config %d\n",
- __func__, err);
+ if (err != -EBUSY)
+ dev_err(qcom_host->hba->dev,
+ "%s: error in ice_vops->config %d\n",
+ __func__, err);
}
return err;
@@ -320,6 +350,7 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
unsigned int bypass = 0;
struct request *req;
char cmd_op;
+ unsigned long flags;
if (!qcom_host->ice.pdev || !qcom_host->ice.vops) {
dev_dbg(dev, "%s: ice device is not enabled\n", __func__);
@@ -365,12 +396,43 @@ int ufs_qcom_ice_cfg_start(struct ufs_qcom_host *qcom_host,
* request processing.
*/
if (err == -EAGAIN) {
- qcom_host->req_pending = req;
- if (schedule_work(&qcom_host->ice_cfg_work))
+
+ dev_dbg(qcom_host->hba->dev,
+ "%s: scheduling task for ice setup\n",
+ __func__);
+
+ spin_lock_irqsave(
+ &qcom_host->ice_work_lock, flags);
+
+ if (!qcom_host->req_pending) {
ufshcd_scsi_block_requests(
+ qcom_host->hba);
+ qcom_host->req_pending = cmd->request;
+ if (!schedule_work(
+ &qcom_host->ice_cfg_work)) {
+ qcom_host->req_pending = NULL;
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock,
+ flags);
+
+ ufshcd_scsi_unblock_requests(
qcom_host->hba);
+ return err;
+ }
+ }
+
+ spin_unlock_irqrestore(
+ &qcom_host->ice_work_lock, flags);
+
+ } else {
+ if (err != -EBUSY)
+ dev_err(qcom_host->hba->dev,
+ "%s: error in ice_vops->config %d\n",
+ __func__, err);
}
- goto out;
+
+ return err;
}
}
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 2b0731b8358c..03b222d8be93 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1981,6 +1981,8 @@ static int ufs_qcom_init(struct ufs_hba *hba)
/* Make a two way bind between the qcom host and the hba */
host->hba = hba;
+ spin_lock_init(&host->ice_work_lock);
+
ufshcd_set_variant(hba, host);
err = ufs_qcom_ice_get_dev(host);
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 394de8302fd2..74d8a7a30ad6 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -370,6 +370,7 @@ struct ufs_qcom_host {
u32 dbg_print_en;
struct ufs_qcom_testbus testbus;
+ spinlock_t ice_work_lock;
struct work_struct ice_cfg_work;
struct request *req_pending;
};
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index 1e6db2a76fa5..d4acc3c911f5 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -48,6 +48,7 @@
#include "ufshci.h"
#include "ufs_quirks.h"
#include "ufs-debugfs.h"
+#include "ufs-qcom.h"
#define CREATE_TRACE_POINTS
#include <trace/events/ufs.h>
@@ -2884,11 +2885,11 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
"%s: failed to compose upiu %d\n",
__func__, err);
- lrbp->cmd = NULL;
- clear_bit_unlock(tag, &hba->lrb_in_use);
- ufshcd_release_all(hba);
- ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
- goto out;
+ lrbp->cmd = NULL;
+ clear_bit_unlock(tag, &hba->lrb_in_use);
+ ufshcd_release_all(hba);
+ ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
+ goto out;
}
err = ufshcd_map_sg(lrbp);
diff --git a/drivers/soc/qcom/service-notifier.c b/drivers/soc/qcom/service-notifier.c
index e7307c46a895..8a501d4d0615 100644
--- a/drivers/soc/qcom/service-notifier.c
+++ b/drivers/soc/qcom/service-notifier.c
@@ -215,7 +215,7 @@ static void send_ind_ack(struct work_struct *work)
if (QMI_RESP_BIT_SHIFT(resp.resp.result) != QMI_RESULT_SUCCESS_V01)
pr_err("QMI request failed 0x%x\n",
QMI_RESP_BIT_SHIFT(resp.resp.error));
- pr_debug("Indication ACKed for transid %d, service %s, instance %d!\n",
+ pr_info("Indication ACKed for transid %d, service %s, instance %d!\n",
data->ind_msg.transaction_id, data->ind_msg.service_path,
data->instance_id);
}
@@ -240,7 +240,7 @@ static void root_service_service_ind_cb(struct qmi_handle *handle,
return;
}
- pr_debug("Indication received from %s, state: 0x%x, trans-id: %d\n",
+ pr_info("Indication received from %s, state: 0x%x, trans-id: %d\n",
ind_msg.service_name, ind_msg.curr_state,
ind_msg.transaction_id);
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index 7aa04a4fa156..b81348ceb469 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -45,8 +45,6 @@
#include <linux/msm-bus-board.h>
#include "spi_qsd.h"
-#define SPI_MAX_BYTES_PER_WORD (4)
-
static int msm_spi_pm_resume_runtime(struct device *device);
static int msm_spi_pm_suspend_runtime(struct device *device);
static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd);
@@ -440,12 +438,10 @@ static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
u32 data_in;
int i;
int shift;
- int read_bytes = (dd->pack_words ?
- SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word);
data_in = readl_relaxed(dd->base + SPI_INPUT_FIFO);
if (dd->read_buf) {
- for (i = 0; (i < read_bytes) &&
+ for (i = 0; (i < dd->bytes_per_word) &&
dd->rx_bytes_remaining; i++) {
/* The data format depends on bytes_per_word:
4 bytes: 0x12345678
@@ -458,8 +454,8 @@ static void msm_spi_read_word_from_fifo(struct msm_spi *dd)
dd->rx_bytes_remaining--;
}
} else {
- if (dd->rx_bytes_remaining >= read_bytes)
- dd->rx_bytes_remaining -= read_bytes;
+ if (dd->rx_bytes_remaining >= dd->bytes_per_word)
+ dd->rx_bytes_remaining -= dd->bytes_per_word;
else
dd->rx_bytes_remaining = 0;
}
@@ -556,7 +552,7 @@ msm_spi_set_bpw_and_no_io_flags(struct msm_spi *dd, u32 *config, int n)
if (n != (*config & SPI_CFG_N))
*config = (*config & ~SPI_CFG_N) | n;
- if (dd->tx_mode == SPI_BAM_MODE) {
+ if (dd->mode == SPI_BAM_MODE) {
if (dd->read_buf == NULL)
*config |= SPI_NO_INPUT;
if (dd->write_buf == NULL)
@@ -621,34 +617,25 @@ static void msm_spi_set_spi_config(struct msm_spi *dd, int bpw)
static void msm_spi_set_mx_counts(struct msm_spi *dd, u32 n_words)
{
/*
- * For FIFO mode:
- * - Set the MX_OUTPUT_COUNT/MX_INPUT_COUNT registers to 0
- * - Set the READ/WRITE_COUNT registers to 0 (infinite mode)
- * or num bytes (finite mode) if less than fifo worth of data.
- * For Block mode:
- * - Set the MX_OUTPUT/MX_INPUT_COUNT registers to num xfer bytes.
- * - Set the READ/WRITE_COUNT registers to 0.
+ * n_words cannot exceed fifo_size, and only one READ COUNT
+ * interrupt is generated per transaction, so for transactions
+ * larger than fifo size READ COUNT must be disabled.
+ * For those transactions we usually move to Data Mover mode.
*/
- if (dd->tx_mode != SPI_BAM_MODE) {
- if (dd->tx_mode == SPI_FIFO_MODE) {
- if (n_words <= dd->input_fifo_size)
- msm_spi_set_write_count(dd, n_words);
- else
- msm_spi_set_write_count(dd, 0);
- writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
- } else
- writel_relaxed(n_words, dd->base + SPI_MX_OUTPUT_COUNT);
-
- if (dd->rx_mode == SPI_FIFO_MODE) {
- if (n_words <= dd->input_fifo_size)
- writel_relaxed(n_words,
- dd->base + SPI_MX_READ_COUNT);
- else
- writel_relaxed(0,
- dd->base + SPI_MX_READ_COUNT);
+ if (dd->mode == SPI_FIFO_MODE) {
+ if (n_words <= dd->input_fifo_size) {
+ writel_relaxed(n_words,
+ dd->base + SPI_MX_READ_COUNT);
+ msm_spi_set_write_count(dd, n_words);
+ } else {
+ writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
+ msm_spi_set_write_count(dd, 0);
+ }
+ if (dd->qup_ver == SPI_QUP_VERSION_BFAM) {
+ /* must be zero for FIFO */
writel_relaxed(0, dd->base + SPI_MX_INPUT_COUNT);
- } else
- writel_relaxed(n_words, dd->base + SPI_MX_INPUT_COUNT);
+ writel_relaxed(0, dd->base + SPI_MX_OUTPUT_COUNT);
+ }
} else {
/* must be zero for BAM and DMOV */
writel_relaxed(0, dd->base + SPI_MX_READ_COUNT);
@@ -895,7 +882,7 @@ xfr_err:
static int
msm_spi_bam_next_transfer(struct msm_spi *dd)
{
- if (dd->tx_mode != SPI_BAM_MODE)
+ if (dd->mode != SPI_BAM_MODE)
return 0;
if (dd->tx_bytes_remaining > 0) {
@@ -914,7 +901,7 @@ msm_spi_bam_next_transfer(struct msm_spi *dd)
static int msm_spi_dma_send_next(struct msm_spi *dd)
{
int ret = 0;
- if (dd->tx_mode == SPI_BAM_MODE)
+ if (dd->mode == SPI_BAM_MODE)
ret = msm_spi_bam_next_transfer(dd);
return ret;
}
@@ -945,38 +932,32 @@ static inline irqreturn_t msm_spi_qup_irq(int irq, void *dev_id)
}
op = readl_relaxed(dd->base + SPI_OPERATIONAL);
- writel_relaxed(op, dd->base + SPI_OPERATIONAL);
- /*
- * Ensure service flag was cleared before further
- * processing of interrupt.
- */
- mb();
if (op & SPI_OP_INPUT_SERVICE_FLAG) {
+ writel_relaxed(SPI_OP_INPUT_SERVICE_FLAG,
+ dd->base + SPI_OPERATIONAL);
+ /*
+ * Ensure service flag was cleared before further
+ * processing of interrupt.
+ */
+ mb();
ret |= msm_spi_input_irq(irq, dev_id);
}
if (op & SPI_OP_OUTPUT_SERVICE_FLAG) {
+ writel_relaxed(SPI_OP_OUTPUT_SERVICE_FLAG,
+ dd->base + SPI_OPERATIONAL);
+ /*
+ * Ensure service flag was cleared before further
+ * processing of interrupt.
+ */
+ mb();
ret |= msm_spi_output_irq(irq, dev_id);
}
- if (dd->tx_mode != SPI_BAM_MODE) {
- if (!dd->rx_done) {
- if (dd->rx_bytes_remaining == 0)
- dd->rx_done = true;
- }
- if (!dd->tx_done) {
- if (!dd->tx_bytes_remaining &&
- (op & SPI_OP_IP_FIFO_NOT_EMPTY)) {
- dd->tx_done = true;
- }
- }
- }
- if (dd->tx_done && dd->rx_done) {
- msm_spi_set_state(dd, SPI_OP_STATE_RESET);
- dd->tx_done = false;
- dd->rx_done = false;
+ if (dd->done) {
complete(&dd->rx_transfer_complete);
complete(&dd->tx_transfer_complete);
+ dd->done = 0;
}
return ret;
}
@@ -987,23 +968,17 @@ static irqreturn_t msm_spi_input_irq(int irq, void *dev_id)
dd->stat_rx++;
- if (dd->rx_mode == SPI_MODE_NONE)
+ if (dd->mode == SPI_MODE_NONE)
return IRQ_HANDLED;
- if (dd->rx_mode == SPI_FIFO_MODE) {
+ if (dd->mode == SPI_FIFO_MODE) {
while ((readl_relaxed(dd->base + SPI_OPERATIONAL) &
SPI_OP_IP_FIFO_NOT_EMPTY) &&
(dd->rx_bytes_remaining > 0)) {
msm_spi_read_word_from_fifo(dd);
}
- } else if (dd->rx_mode == SPI_BLOCK_MODE) {
- int count = 0;
-
- while (dd->rx_bytes_remaining &&
- (count < dd->input_block_size)) {
- msm_spi_read_word_from_fifo(dd);
- count += SPI_MAX_BYTES_PER_WORD;
- }
+ if (dd->rx_bytes_remaining == 0)
+ msm_spi_complete(dd);
}
return IRQ_HANDLED;
@@ -1014,20 +989,18 @@ static void msm_spi_write_word_to_fifo(struct msm_spi *dd)
u32 word;
u8 byte;
int i;
- int write_bytes =
- (dd->pack_words ? SPI_MAX_BYTES_PER_WORD : dd->bytes_per_word);
word = 0;
if (dd->write_buf) {
- for (i = 0; (i < write_bytes) &&
+ for (i = 0; (i < dd->bytes_per_word) &&
dd->tx_bytes_remaining; i++) {
dd->tx_bytes_remaining--;
byte = *dd->write_buf++;
word |= (byte << (BITS_PER_BYTE * i));
}
} else
- if (dd->tx_bytes_remaining > write_bytes)
- dd->tx_bytes_remaining -= write_bytes;
+ if (dd->tx_bytes_remaining > dd->bytes_per_word)
+ dd->tx_bytes_remaining -= dd->bytes_per_word;
else
dd->tx_bytes_remaining = 0;
dd->write_xfr_cnt++;
@@ -1039,22 +1012,11 @@ static inline void msm_spi_write_rmn_to_fifo(struct msm_spi *dd)
{
int count = 0;
- if (dd->tx_mode == SPI_FIFO_MODE) {
- while ((dd->tx_bytes_remaining > 0) &&
- (count < dd->input_fifo_size) &&
- !(readl_relaxed(dd->base + SPI_OPERATIONAL)
- & SPI_OP_OUTPUT_FIFO_FULL)) {
- msm_spi_write_word_to_fifo(dd);
- count++;
- }
- }
-
- if (dd->tx_mode == SPI_BLOCK_MODE) {
- while (dd->tx_bytes_remaining &&
- (count < dd->output_block_size)) {
- msm_spi_write_word_to_fifo(dd);
- count += SPI_MAX_BYTES_PER_WORD;
- }
+ while ((dd->tx_bytes_remaining > 0) && (count < dd->input_fifo_size) &&
+ !(readl_relaxed(dd->base + SPI_OPERATIONAL) &
+ SPI_OP_OUTPUT_FIFO_FULL)) {
+ msm_spi_write_word_to_fifo(dd);
+ count++;
}
}
@@ -1064,11 +1026,11 @@ static irqreturn_t msm_spi_output_irq(int irq, void *dev_id)
dd->stat_tx++;
- if (dd->tx_mode == SPI_MODE_NONE)
+ if (dd->mode == SPI_MODE_NONE)
return IRQ_HANDLED;
/* Output FIFO is empty. Transmit any outstanding write data. */
- if ((dd->tx_mode == SPI_FIFO_MODE) || (dd->tx_mode == SPI_BLOCK_MODE))
+ if (dd->mode == SPI_FIFO_MODE)
msm_spi_write_rmn_to_fifo(dd);
return IRQ_HANDLED;
@@ -1144,7 +1106,7 @@ error:
static int msm_spi_dma_map_buffers(struct msm_spi *dd)
{
int ret = 0;
- if (dd->tx_mode == SPI_BAM_MODE)
+ if (dd->mode == SPI_BAM_MODE)
ret = msm_spi_bam_map_buffers(dd);
return ret;
}
@@ -1173,7 +1135,7 @@ static void msm_spi_bam_unmap_buffers(struct msm_spi *dd)
static inline void msm_spi_dma_unmap_buffers(struct msm_spi *dd)
{
- if (dd->tx_mode == SPI_BAM_MODE)
+ if (dd->mode == SPI_BAM_MODE)
msm_spi_bam_unmap_buffers(dd);
}
@@ -1235,11 +1197,9 @@ static void
msm_spi_set_transfer_mode(struct msm_spi *dd, u8 bpw, u32 read_count)
{
if (msm_spi_use_dma(dd, dd->cur_transfer, bpw)) {
- dd->tx_mode = SPI_BAM_MODE;
- dd->rx_mode = SPI_BAM_MODE;
+ dd->mode = SPI_BAM_MODE;
} else {
- dd->rx_mode = SPI_FIFO_MODE;
- dd->tx_mode = SPI_FIFO_MODE;
+ dd->mode = SPI_FIFO_MODE;
dd->read_len = dd->cur_transfer->len;
dd->write_len = dd->cur_transfer->len;
}
@@ -1255,19 +1215,14 @@ static void msm_spi_set_qup_io_modes(struct msm_spi *dd)
spi_iom = readl_relaxed(dd->base + SPI_IO_MODES);
/* Set input and output transfer mode: FIFO, DMOV, or BAM */
spi_iom &= ~(SPI_IO_M_INPUT_MODE | SPI_IO_M_OUTPUT_MODE);
- spi_iom = (spi_iom | (dd->tx_mode << OUTPUT_MODE_SHIFT));
- spi_iom = (spi_iom | (dd->rx_mode << INPUT_MODE_SHIFT));
- /* Always enable packing for all % 8 bits_per_word */
- if (dd->cur_transfer->bits_per_word &&
- ((dd->cur_transfer->bits_per_word == 8) ||
- (dd->cur_transfer->bits_per_word == 16) ||
- (dd->cur_transfer->bits_per_word == 32))) {
+ spi_iom = (spi_iom | (dd->mode << OUTPUT_MODE_SHIFT));
+ spi_iom = (spi_iom | (dd->mode << INPUT_MODE_SHIFT));
+ /* Turn on packing for data mover */
+ if (dd->mode == SPI_BAM_MODE)
spi_iom |= SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN;
- dd->pack_words = true;
- } else {
+ else {
spi_iom &= ~(SPI_IO_M_PACK_EN | SPI_IO_M_UNPACK_EN);
spi_iom |= SPI_IO_M_OUTPUT_BIT_SHIFT_EN;
- dd->pack_words = false;
}
/*if (dd->mode == SPI_BAM_MODE) {
@@ -1325,7 +1280,7 @@ static void msm_spi_set_qup_op_mask(struct msm_spi *dd)
{
/* mask INPUT and OUTPUT service flags in to prevent IRQs on FIFO status
* change in BAM mode */
- u32 mask = (dd->tx_mode == SPI_BAM_MODE) ?
+ u32 mask = (dd->mode == SPI_BAM_MODE) ?
QUP_OP_MASK_OUTPUT_SERVICE_FLAG | QUP_OP_MASK_INPUT_SERVICE_FLAG
: 0;
writel_relaxed(mask, dd->base + QUP_OPERATIONAL_MASK);
@@ -1366,8 +1321,6 @@ static int msm_spi_process_transfer(struct msm_spi *dd)
dd->rx_bytes_remaining = dd->cur_msg_len;
dd->read_buf = dd->cur_transfer->rx_buf;
dd->write_buf = dd->cur_transfer->tx_buf;
- dd->tx_done = false;
- dd->rx_done = false;
init_completion(&dd->tx_transfer_complete);
init_completion(&dd->rx_transfer_complete);
if (dd->cur_transfer->bits_per_word)
@@ -1398,12 +1351,10 @@ static int msm_spi_process_transfer(struct msm_spi *dd)
msm_spi_set_transfer_mode(dd, bpw, read_count);
msm_spi_set_mx_counts(dd, read_count);
- if (dd->tx_mode == SPI_BAM_MODE) {
+ if (dd->mode == SPI_BAM_MODE) {
ret = msm_spi_dma_map_buffers(dd);
if (ret < 0) {
pr_err("Mapping DMA buffers\n");
- dd->tx_mode = SPI_MODE_NONE;
- dd->rx_mode = SPI_MODE_NONE;
return ret;
}
}
@@ -1417,11 +1368,11 @@ static int msm_spi_process_transfer(struct msm_spi *dd)
the first. Restricting this to one write avoids contention
issues and race conditions between this thread and the int handler
*/
- if (dd->tx_mode != SPI_BAM_MODE) {
+ if (dd->mode == SPI_FIFO_MODE) {
if (msm_spi_prepare_for_write(dd))
goto transfer_end;
msm_spi_start_write(dd, read_count);
- } else {
+ } else if (dd->mode == SPI_BAM_MODE) {
if ((msm_spi_bam_begin_transfer(dd)) < 0) {
dev_err(dd->dev, "%s: BAM transfer setup failed\n",
__func__);
@@ -1437,11 +1388,11 @@ static int msm_spi_process_transfer(struct msm_spi *dd)
* might fire before the first word is written resulting in a
* possible race condition.
*/
- if (dd->tx_mode != SPI_BAM_MODE)
+ if (dd->mode != SPI_BAM_MODE)
if (msm_spi_set_state(dd, SPI_OP_STATE_RUN)) {
dev_warn(dd->dev,
"%s: Failed to set QUP to run-state. Mode:%d",
- __func__, dd->tx_mode);
+ __func__, dd->mode);
goto transfer_end;
}
@@ -1471,11 +1422,10 @@ static int msm_spi_process_transfer(struct msm_spi *dd)
msm_spi_udelay(dd->xfrs_delay_usec);
transfer_end:
- if ((dd->tx_mode == SPI_BAM_MODE) && status)
+ if ((dd->mode == SPI_BAM_MODE) && status)
msm_spi_bam_flush(dd);
msm_spi_dma_unmap_buffers(dd);
- dd->tx_mode = SPI_MODE_NONE;
- dd->rx_mode = SPI_MODE_NONE;
+ dd->mode = SPI_MODE_NONE;
msm_spi_set_state(dd, SPI_OP_STATE_RESET);
if (!dd->cur_transfer->cs_change)
@@ -2403,8 +2353,7 @@ static int init_resources(struct platform_device *pdev)
pclk_enabled = 0;
dd->transfer_pending = 0;
- dd->tx_mode = SPI_MODE_NONE;
- dd->rx_mode = SPI_MODE_NONE;
+ dd->mode = SPI_MODE_NONE;
rc = msm_spi_request_irq(dd, pdev, master);
if (rc)
diff --git a/drivers/spi/spi_qsd.h b/drivers/spi/spi_qsd.h
index e8e6cdce1a02..53ec1e600594 100644
--- a/drivers/spi/spi_qsd.h
+++ b/drivers/spi/spi_qsd.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -113,8 +113,6 @@
#define INPUT_MODE_SHIFT QSD_REG(10) QUP_REG(12)
/* SPI_OPERATIONAL fields */
-#define SPI_OP_IN_BLK_RD_REQ_FLAG 0x00002000
-#define SPI_OP_OUT_BLK_WR_REQ_FLAG 0x00001000
#define SPI_OP_MAX_INPUT_DONE_FLAG 0x00000800
#define SPI_OP_MAX_OUTPUT_DONE_FLAG 0x00000400
#define SPI_OP_INPUT_SERVICE_FLAG 0x00000200
@@ -316,8 +314,7 @@ struct msm_spi {
bool transfer_pending;
wait_queue_head_t continue_suspend;
/* DMA data */
- enum msm_spi_mode tx_mode;
- enum msm_spi_mode rx_mode;
+ enum msm_spi_mode mode;
bool use_dma;
int tx_dma_chan;
int tx_dma_crci;
@@ -349,8 +346,7 @@ struct msm_spi {
#endif
struct msm_spi_platform_data *pdata; /* Platform data */
/* When set indicates multiple transfers in a single message */
- bool rx_done;
- bool tx_done;
+ bool done;
u32 cur_msg_len;
/* Used in FIFO mode to keep track of the transfer being processed */
struct spi_transfer *cur_tx_transfer;
@@ -368,7 +364,6 @@ struct msm_spi {
struct pinctrl_state *pins_active;
struct pinctrl_state *pins_sleep;
bool is_init_complete;
- bool pack_words;
};
/* Forward declaration */
@@ -522,8 +517,7 @@ static inline void msm_spi_set_write_count(struct msm_spi *dd, int val)
static inline void msm_spi_complete(struct msm_spi *dd)
{
- dd->tx_done = true;
- dd->rx_done = true;
+ dd->done = 1;
}
static inline void msm_spi_enable_error_flags(struct msm_spi *dd)
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index f3715d85aedc..7cafd0dd59f5 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -454,13 +454,23 @@ static void bulk_in_complete(struct usb_ep *ep, struct usb_request *req)
struct fsg_buffhd *bh = req->context;
if (req->status || req->actual != req->length)
- DBG(common, "%s --> %d, %u/%u\n", __func__,
+ pr_debug("%s --> %d, %u/%u\n", __func__,
req->status, req->actual, req->length);
if (req->status == -ECONNRESET) /* Request was cancelled */
usb_ep_fifo_flush(ep);
/* Hold the lock while we update the request and buffer states */
smp_wmb();
+ /*
+ * Disconnect and completion might race each other and driver data
+ * is set to NULL during ep disable. So, add a check if that is case.
+ */
+ if (!common) {
+ bh->inreq_busy = 0;
+ bh->state = BUF_STATE_EMPTY;
+ return;
+ }
+
spin_lock(&common->lock);
bh->inreq_busy = 0;
bh->state = BUF_STATE_EMPTY;
@@ -473,15 +483,24 @@ static void bulk_out_complete(struct usb_ep *ep, struct usb_request *req)
struct fsg_common *common = ep->driver_data;
struct fsg_buffhd *bh = req->context;
- dump_msg(common, "bulk-out", req->buf, req->actual);
if (req->status || req->actual != bh->bulk_out_intended_length)
- DBG(common, "%s --> %d, %u/%u\n", __func__,
+ pr_debug("%s --> %d, %u/%u\n", __func__,
req->status, req->actual, bh->bulk_out_intended_length);
if (req->status == -ECONNRESET) /* Request was cancelled */
usb_ep_fifo_flush(ep);
/* Hold the lock while we update the request and buffer states */
smp_wmb();
+ /*
+ * Disconnect and completion might race each other and driver data
+ * is set to NULL during ep disable. So, add a check if that is case.
+ */
+ if (!common) {
+ bh->outreq_busy = 0;
+ return;
+ }
+
+ dump_msg(common, "bulk-out", req->buf, req->actual);
spin_lock(&common->lock);
bh->outreq_busy = 0;
bh->state = BUF_STATE_FULL;
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
index 11c73f584594..061095b78c37 100644
--- a/drivers/usb/gadget/function/f_qc_rndis.c
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -683,6 +683,7 @@ static int rndis_qc_set_alt(struct usb_function *f, unsigned intf, unsigned alt)
/* we know alt == 0 */
+ opts = container_of(f->fi, struct f_rndis_qc_opts, func_inst);
if (intf == rndis->ctrl_id) {
if (rndis->notify->driver_data) {
VDBG(cdev, "reset rndis control %d\n", intf);
diff --git a/drivers/usb/gadget/function/u_ctrl_qti.c b/drivers/usb/gadget/function/u_ctrl_qti.c
index 8ef223370827..013c54da0d0a 100644
--- a/drivers/usb/gadget/function/u_ctrl_qti.c
+++ b/drivers/usb/gadget/function/u_ctrl_qti.c
@@ -485,7 +485,7 @@ qti_ctrl_write(struct file *fp, const char __user *buf, size_t count,
port->copied_from_modem++;
spin_lock_irqsave(&port->lock, flags);
- if (port && port->port_usb) {
+ if (port->port_usb) {
if (port->port_type == QTI_PORT_RMNET) {
g_rmnet = (struct grmnet *)port->port_usb;
} else {
diff --git a/drivers/usb/gadget/function/u_data_ipa.c b/drivers/usb/gadget/function/u_data_ipa.c
index bf9e0fa9950b..6c18a04f6c1c 100644
--- a/drivers/usb/gadget/function/u_data_ipa.c
+++ b/drivers/usb/gadget/function/u_data_ipa.c
@@ -1107,18 +1107,18 @@ static void bam2bam_data_resume_work(struct work_struct *w)
unsigned long flags;
int ret;
- if (!port->port_usb->cdev) {
- pr_err("!port->port_usb->cdev is NULL");
+ spin_lock_irqsave(&port->port_lock, flags);
+ if (!port->port_usb || !port->port_usb->cdev) {
+ pr_err("port->port_usb or cdev is NULL");
goto exit;
}
if (!port->port_usb->cdev->gadget) {
- pr_err("!port->port_usb->cdev->gadget is NULL");
+ pr_err("port->port_usb->cdev->gadget is NULL");
goto exit;
}
pr_debug("%s: resume started\n", __func__);
- spin_lock_irqsave(&port->port_lock, flags);
gadget = port->port_usb->cdev->gadget;
if (!gadget) {
spin_unlock_irqrestore(&port->port_lock, flags);
diff --git a/drivers/usb/phy/phy-msm-usb.c b/drivers/usb/phy/phy-msm-usb.c
index f3e17b84efff..f1360f20ffe4 100644
--- a/drivers/usb/phy/phy-msm-usb.c
+++ b/drivers/usb/phy/phy-msm-usb.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2009-2017, Code Aurora Forum. All rights reserved.
+/* Copyright (c) 2009-2011, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index da8ca5a7da58..6d73a04d0150 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -39,6 +39,15 @@ struct bvec_iter {
current bvec */
};
+#ifdef CONFIG_BLOCK_PERF_FRAMEWORK
+/* Double declaration from ktime.h so as to not break the include dependency
+ * chain. Should be kept up to date.
+ */
+union blk_ktime {
+ s64 tv64;
+};
+#endif
+
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
* stacking drivers)
@@ -54,6 +63,10 @@ struct bio {
struct bvec_iter bi_iter;
+#ifdef CONFIG_BLOCK_PERF_FRAMEWORK
+ union blk_ktime submit_time;
+ unsigned int blk_sector_count;
+#endif
/* Number of segments in this BIO after
* physical address coalescing is performed.
*/
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 0eab4811ee92..d81d6a2db342 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -113,7 +113,7 @@ extern const struct cpumask *const cpu_isolated_mask;
#define cpu_possible(cpu) ((cpu) == 0)
#define cpu_present(cpu) ((cpu) == 0)
#define cpu_active(cpu) ((cpu) == 0)
-#define cpu_isolated(cpu) ((cpu) == 0)
+#define cpu_isolated(cpu) ((cpu) != 0)
#endif
/* verify cpu argument to cpumask_* operators */
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 276dbf19805b..804d89a825fc 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -22,6 +22,7 @@
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
#include <linux/mmc/pm.h>
+#include <linux/mmc/ring_buffer.h>
#define MMC_AUTOSUSPEND_DELAY_MS 3000
@@ -571,6 +572,7 @@ struct mmc_host {
} perf;
bool perf_enable;
#endif
+ struct mmc_trace_buffer trace_buf;
enum dev_state dev_status;
bool wakeup_on_idle;
struct mmc_cmdq_context_info cmdq_ctx;
diff --git a/include/linux/mmc/ring_buffer.h b/include/linux/mmc/ring_buffer.h
new file mode 100644
index 000000000000..e6bf163ffcfe
--- /dev/null
+++ b/include/linux/mmc/ring_buffer.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __MMC_RING_BUFFER__
+#define __MMC_RING_BUFFER__
+
+#include <linux/mmc/card.h>
+#include <linux/smp.h>
+
+#include "core.h"
+
+#define MMC_TRACE_RBUF_SZ_ORDER 2 /* 2^2 pages */
+#define MMC_TRACE_RBUF_SZ (PAGE_SIZE * (1 << MMC_TRACE_RBUF_SZ_ORDER))
+#define MMC_TRACE_EVENT_SZ 256
+#define MMC_TRACE_RBUF_NUM_EVENTS (MMC_TRACE_RBUF_SZ / MMC_TRACE_EVENT_SZ)
+
+struct mmc_host;
+struct mmc_trace_buffer {
+ int wr_idx;
+ bool stop_tracing;
+ spinlock_t trace_lock;
+ char *data;
+};
+
+#ifdef CONFIG_MMC_RING_BUFFER
+void mmc_stop_tracing(struct mmc_host *mmc);
+void mmc_trace_write(struct mmc_host *mmc, const char *fmt, ...);
+void mmc_trace_init(struct mmc_host *mmc);
+void mmc_trace_free(struct mmc_host *mmc);
+void mmc_dump_trace_buffer(struct mmc_host *mmc, struct seq_file *s);
+#else
+static inline void mmc_stop_tracing(struct mmc_host *mmc) {}
+static inline void mmc_trace_write(struct mmc_host *mmc,
+ const char *fmt, ...) {}
+static inline void mmc_trace_init(struct mmc_host *mmc) {}
+static inline void mmc_trace_free(struct mmc_host *mmc) {}
+static inline void mmc_dump_trace_buffer(struct mmc_host *mmc,
+ struct seq_file *s) {}
+#endif
+
+#define MMC_TRACE(mmc, fmt, ...) \
+ mmc_trace_write(mmc, fmt, ##__VA_ARGS__)
+
+#endif /* __MMC_RING_BUFFER__ */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index bc5637ab01df..4cde40dac778 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -55,7 +55,7 @@
#define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */
/* big enough to hold our biggest descriptor */
-#define USB_COMP_EP0_BUFSIZ 1024
+#define USB_COMP_EP0_BUFSIZ 4096
#define USB_MS_TO_HS_INTERVAL(x) (ilog2((x * 1000 / 125)) + 1)
struct usb_configuration;
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index 40df4f8f1de0..95125c5518e2 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -590,6 +590,7 @@ void update_cluster_topology(void)
* cluster_head visible.
*/
move_list(&cluster_head, &new_head, false);
+ update_all_clusters_stats();
}
void init_clusters(void)
diff --git a/security/pfe/pfk_kc.c b/security/pfe/pfk_kc.c
index 0869a862f521..cd3f08b3959d 100644
--- a/security/pfe/pfk_kc.c
+++ b/security/pfe/pfk_kc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -100,6 +100,9 @@ struct kc_entry {
struct task_struct *thread_pending;
enum pfk_kc_entry_state state;
+
+ /* ref count for the number of requests in the HW queue for this key */
+ int loaded_ref_cnt;
int scm_error;
};
@@ -520,6 +523,10 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
if (entry_exists) {
kc_update_timestamp(entry);
entry->state = ACTIVE_ICE_LOADED;
+
+ if (async)
+ entry->loaded_ref_cnt++;
+
break;
}
case (FREE):
@@ -529,8 +536,17 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
entry->scm_error = ret;
pr_err("%s: key load error (%d)\n", __func__, ret);
} else {
- entry->state = ACTIVE_ICE_LOADED;
kc_update_timestamp(entry);
+ entry->state = ACTIVE_ICE_LOADED;
+
+ /*
+ * only increase ref cnt for async calls,
+ * sync calls from within work thread do not pass
+ * requests further to HW
+ */
+ if (async)
+ entry->loaded_ref_cnt++;
+
}
break;
case (ACTIVE_ICE_PRELOAD):
@@ -539,6 +555,10 @@ int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
break;
case (ACTIVE_ICE_LOADED):
kc_update_timestamp(entry);
+
+ if (async)
+ entry->loaded_ref_cnt++;
+
break;
case(SCM_ERROR):
ret = entry->scm_error;
@@ -572,6 +592,8 @@ void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
const unsigned char *salt, size_t salt_size)
{
struct kc_entry *entry = NULL;
+ struct task_struct *tmp_pending = NULL;
+ int ref_cnt = 0;
if (!kc_is_ready())
return;
@@ -591,14 +613,28 @@ void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
if (!entry) {
kc_spin_unlock();
pr_err("internal error, there should an entry to unlock\n");
+
return;
}
- entry->state = INACTIVE;
+ ref_cnt = --entry->loaded_ref_cnt;
+
+ if (ref_cnt < 0)
+ pr_err("internal error, ref count should never be negative\n");
- /* wake-up invalidation if it's waiting for the entry to be released */
- if (entry->thread_pending) {
- wake_up_process(entry->thread_pending);
- entry->thread_pending = NULL;
+ if (!ref_cnt) {
+ entry->state = INACTIVE;
+ /*
+ * wake-up invalidation if it's waiting
+ * for the entry to be released
+ */
+ if (entry->thread_pending) {
+ tmp_pending = entry->thread_pending;
+ entry->thread_pending = NULL;
+
+ kc_spin_unlock();
+ wake_up_process(tmp_pending);
+ return;
+ }
}
kc_spin_unlock();