summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/arm/msm/qcom,osm.txt10
-rw-r--r--Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt8
-rw-r--r--Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt19
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-bus.dtsi11
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt.dtsi62
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon.dtsi53
-rw-r--r--arch/arm/mm/dma-mapping.c1
-rw-r--r--arch/arm64/configs/msm-perf_defconfig3
-rw-r--r--arch/arm64/configs/msm_defconfig3
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig3
-rw-r--r--arch/arm64/configs/msmcortex_defconfig3
-rw-r--r--drivers/clk/msm/Makefile2
-rw-r--r--drivers/clk/msm/clock-osm.c333
-rw-r--r--drivers/clk/qcom/Makefile1
-rw-r--r--drivers/clk/qcom/clk-dummy.c110
-rw-r--r--drivers/clk/qcom/common.h4
-rw-r--r--drivers/gpu/msm/Makefile2
-rw-r--r--drivers/gpu/msm/a5xx_reg.h2
-rw-r--r--drivers/gpu/msm/adreno.c126
-rw-r--r--drivers/gpu/msm/adreno.h155
-rw-r--r--drivers/gpu/msm/adreno_a3xx.c4
-rw-r--r--drivers/gpu/msm/adreno_a4xx.c563
-rw-r--r--drivers/gpu/msm/adreno_a4xx.h9
-rw-r--r--drivers/gpu/msm/adreno_a4xx_preempt.c571
-rw-r--r--drivers/gpu/msm/adreno_a4xx_snapshot.c5
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c687
-rw-r--r--drivers/gpu/msm/adreno_a5xx.h20
-rw-r--r--drivers/gpu/msm/adreno_a5xx_preempt.c574
-rw-r--r--drivers/gpu/msm/adreno_debugfs.c3
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c617
-rw-r--r--drivers/gpu/msm/adreno_dispatch.h40
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.c49
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.h3
-rw-r--r--drivers/gpu/msm/adreno_ioctl.c2
-rw-r--r--drivers/gpu/msm/adreno_iommu.c102
-rw-r--r--drivers/gpu/msm/adreno_iommu.h6
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c387
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.h26
-rw-r--r--drivers/gpu/msm/adreno_snapshot.c5
-rw-r--r--drivers/gpu/msm/adreno_trace.h101
-rw-r--r--drivers/gpu/msm/kgsl.c32
-rw-r--r--drivers/gpu/msm/kgsl.h41
-rw-r--r--drivers/gpu/msm/kgsl_cmdbatch.h4
-rw-r--r--drivers/gpu/msm/kgsl_device.h1
-rw-r--r--drivers/gpu/msm/kgsl_events.c25
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c50
-rw-r--r--drivers/gpu/msm/kgsl_mmu.h6
-rw-r--r--drivers/gpu/msm/kgsl_pwrctrl.c7
-rw-r--r--drivers/input/touchscreen/it7258_ts_i2c.c851
-rw-r--r--drivers/iommu/arm-smmu.c34
-rw-r--r--drivers/media/platform/msm/camera_v2/camera/camera.c4
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp40.c96
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp44.c92
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp46.c95
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c167
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c30
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c15
-rw-r--r--drivers/mfd/wcd9xxx-utils.c4
-rw-r--r--drivers/misc/qcom/qdsp6v2/q6audio_v2.c7
-rw-r--r--drivers/of/of_batterydata.c23
-rw-r--r--drivers/power/qcom-charger/qpnp-fg.c17
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c141
-rw-r--r--drivers/power/qcom-charger/qpnp-smbcharger.c18
-rw-r--r--drivers/power/qcom-charger/smb-lib.h6
-rw-r--r--drivers/power/qcom-charger/smb-reg.h9
-rw-r--r--drivers/regulator/cpr3-regulator.c24
-rw-r--r--drivers/regulator/cpr3-regulator.h9
-rw-r--r--drivers/regulator/cprh-kbss-regulator.c73
-rw-r--r--drivers/soc/qcom/glink_private.h1
-rw-r--r--drivers/soc/qcom/glink_smem_native_xprt.c2
-rw-r--r--drivers/usb/dwc3/gadget.c11
-rw-r--r--drivers/usb/host/xhci-pci.c1
-rw-r--r--drivers/usb/host/xhci-plat.c1
-rw-r--r--drivers/usb/host/xhci-ring.c3
-rw-r--r--drivers/usb/host/xhci.c8
-rw-r--r--drivers/usb/host/xhci.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c13
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c3
-rw-r--r--include/linux/of_batterydata.h11
-rw-r--r--include/soc/qcom/smem.h1
-rw-r--r--include/uapi/linux/msm_kgsl.h2
-rw-r--r--kernel/drivers/input/touchscreen/msg21xx/msg21xx_ts.c1757
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x.c78
-rw-r--r--sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c5
-rw-r--r--sound/usb/usb_audio_qmi_svc.c36
-rw-r--r--sound/usb/usb_audio_qmi_v01.c368
-rw-r--r--sound/usb/usb_audio_qmi_v01.h75
88 files changed, 6149 insertions, 2796 deletions
diff --git a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
index 2bd7653af5a3..cee9b942a9e3 100644
--- a/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
+++ b/Documentation/devicetree/bindings/arm/msm/qcom,osm.txt
@@ -182,6 +182,14 @@ Properties:
command register for each of the two clusters managed
by the OSM controller.
+- qcom,apm-threshold-voltage
+ Usage: required
+ Value type: <u32>
+ Definition: Specifies the APM threshold voltage in microvolts. If the
+ VDD_APCC supply voltage is above or at this level, then the
+ APM is switched to use VDD_APCC. If VDD_APCC is below
+ this level, then the APM is switched to use VDD_MX.
+
- qcom,apm-mode-ctl
Usage: required
Value type: <prop-encoded-array>
@@ -392,6 +400,8 @@ Example:
qcom,apm-ctrl-status =
<0x179d000c 0x179d0018>;
+ qcom,apm-threshold-voltage = <832000>;
+
qcom,pwrcl-apcs-mem-acc-cfg =
<0x179d1360 0x179d1364 0x179d1364>;
qcom,perfcl-apcs-mem-acc-cfg =
diff --git a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
index 58db2c2350e4..368f0b8c9525 100644
--- a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
@@ -50,6 +50,12 @@ Charger specific properties:
Value type: <u32>
Definition: Specifies the DC input current limit in micro-amps.
+- qcom,wipower-max-uw
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the DC input power limit in micro-watts.
+ If the value is not present, 8W is used as default.
+
=============================================
Second Level Nodes - SMB2 Charger Peripherals
=============================================
@@ -80,9 +86,7 @@ pmicobalt_charger: qcom,qpnp-smb2 {
#address-cells = <1>;
#size-cells = <1>;
- qcom,pmic-revid = <&pmicobalt_revid>;
qcom,suspend-input;
- qcom,disable-charging;
dpdm-supply = <&qusb_phy0>;
qcom,chgr@1000 {
diff --git a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
index b9143cfc2587..833fb645b92a 100644
--- a/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/cprh-kbss-regulator.txt
@@ -42,10 +42,21 @@ KBSS specific properties:
- qcom,apm-threshold-voltage
Usage: optional
Value type: <u32>
- Definition: Specifies the APM threshold voltage in microvolts. If the
- VDD_APCC supply voltage is above this level, then the APM is
- switched to use VDD_APCC. If VDD_APCC is below this level,
- then the APM is switched to use VDD_MX.
+ Definition: Specifies the APM threshold voltage in microvolts. The
+ floor to ceiling range for every corner is adjusted to ensure
+ it does not intersect this voltage. The value of this property
+ must match with the APM threshold voltage defined in the OSM
+ device to ensure that if the VDD_APCC supply voltage is above
+ this level, then the APM is switched to use VDD_APCC and if
+ VDD_APCC is below this level, then the APM is switched to use
+ VDD_MX.
+
+- qcom,apm-crossover-voltage
+ Usage: required if qcom,apm-threshold-voltage is specified
+ Value type: <u32>
+ Definition: Specifies the APM crossover voltage in microvolts which
+ corresponds to the voltage the VDD supply must be set at
+ during an APM switch transition.
- qcom,apm-hysteresis-voltage
Usage: optional
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-bus.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-bus.dtsi
index 3c39a61c4328..86decf438430 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-bus.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-bus.dtsi
@@ -131,6 +131,16 @@
clock-names = "bus_clk", "bus_a_clk";
clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_gcc clk_mmssnoc_axi_a_clk>;
+ clk-mdss-axi-no-rate-supply =
+ <&gdsc_mdss>;
+ clk-mdss-ahb-no-rate-supply =
+ <&gdsc_mdss>;
+ clk-camss-ahb-no-rate-supply =
+ <&gdsc_camss_top>;
+ clk-video-ahb-no-rate-supply =
+ <&gdsc_venus>;
+ clk-video-axi-no-rate-supply =
+ <&gdsc_venus>;
qcom,node-qos-clks {
clock-names =
"clk-noc-cfg-ahb-no-rate",
@@ -141,6 +151,7 @@
"clk-video-ahb-no-rate",
"clk-video-axi-no-rate";
clocks =
+ <&clock_gcc clk_mmssnoc_axi_clk>,
<&clock_gcc clk_gcc_mmss_noc_cfg_ahb_clk>,
<&clock_mmss clk_mmss_mnoc_ahb_clk>,
<&clock_mmss clk_mmss_mdss_ahb_clk>,
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
index 12ee61b34d8c..5833b30d1fd1 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
@@ -576,6 +576,7 @@
qcom,cpr-voltage-settling-time = <1760>;
qcom,apm-threshold-voltage = <832000>;
+ qcom,apm-crossover-voltage = <880000>;
qcom,apm-hysteresis-voltage = <32000>;
qcom,voltage-step = <4000>;
qcom,voltage-base = <352000>;
@@ -737,6 +738,7 @@
qcom,cpr-voltage-settling-time = <1760>;
qcom,apm-threshold-voltage = <832000>;
+ qcom,apm-crossover-voltage = <880000>;
qcom,apm-hysteresis-voltage = <32000>;
qcom,voltage-step = <4000>;
qcom,voltage-base = <352000>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
index e748783b0c7d..5dc530ea8494 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
@@ -804,21 +804,21 @@
< 576000000 0x0504001e 0x03200020 0x1 >,
< 633600000 0x05040021 0x03200020 0x1 >,
< 710400000 0x05040025 0x03200020 0x1 >,
- < 806400000 0x0504002a 0x04200020 0x2 >,
- < 883200000 0x0404002e 0x04250025 0x2 >,
- < 960000000 0x04040032 0x05280028 0x2 >,
- < 1036800000 0x04040036 0x052b002b 0x3 >,
- < 1113600000 0x0404003a 0x052e002e 0x3 >,
- < 1190400000 0x0404003e 0x06320032 0x3 >,
- < 1248000000 0x04040041 0x06340034 0x3 >,
- < 1324800000 0x04040045 0x06370037 0x3 >,
- < 1401600000 0x04040049 0x073a003a 0x3 >,
- < 1478400000 0x0404004d 0x073e003e 0x3 >,
- < 1574400000 0x04040052 0x08420042 0x4 >,
- < 1651200000 0x04040056 0x08450045 0x4 >,
- < 1728000000 0x0404005a 0x08480048 0x4 >,
- < 1804800000 0x0404005e 0x094b004b 0x4 >,
- < 1881600000 0x04040062 0x094e004e 0x4 >;
+ < 806400000 0x0504002a 0x04200020 0x1 >,
+ < 883200000 0x0404002e 0x04250025 0x1 >,
+ < 960000000 0x04040032 0x05280028 0x1 >,
+ < 1036800000 0x04040036 0x052b002b 0x2 >,
+ < 1113600000 0x0404003a 0x052e002e 0x2 >,
+ < 1190400000 0x0404003e 0x06320032 0x2 >,
+ < 1248000000 0x04040041 0x06340034 0x2 >,
+ < 1324800000 0x04040045 0x06370037 0x2 >,
+ < 1401600000 0x04040049 0x073a003a 0x2 >,
+ < 1478400000 0x0404004d 0x073e003e 0x2 >,
+ < 1574400000 0x04040052 0x08420042 0x3 >,
+ < 1651200000 0x04040056 0x08450045 0x3 >,
+ < 1728000000 0x0404005a 0x08480048 0x3 >,
+ < 1804800000 0x0404005e 0x094b004b 0x3 >,
+ < 1881600000 0x04040062 0x094e004e 0x3 >;
qcom,perfcl-speedbin0-v0 =
< 300000000 0x0004000f 0x01200020 0x1 >,
@@ -888,6 +888,7 @@
<0x8fff0036 0x8fff003a 0x0fff0036>,
<0x8fff003d 0x8fff0041 0x0fff003d>;
+ qcom,apm-threshold-voltage = <832000>;
qcom,boost-fsm-en;
qcom,safe-fsm-en;
qcom,ps-fsm-en;
@@ -900,13 +901,11 @@
qcom,perfcl-apcs-mem-acc-cfg =
<0x179d1368 0x179d136C 0x179d1370>;
qcom,pwrcl-apcs-mem-acc-val =
- <0x00000000 0x10000000 0x10000000>,
- <0x00000000 0x10000000 0x10000000>,
+ <0x00000000 0x80000000 0x80000000>,
<0x00000000 0x00000000 0x00000000>,
<0x00000000 0x00000001 0x00000001>;
qcom,perfcl-apcs-mem-acc-val =
- <0x00000000 0x00000000 0x10000000>,
- <0x00000000 0x00000000 0x10000000>,
+ <0x00000000 0x00000000 0x80000000>,
<0x00000000 0x00000000 0x00000000>,
<0x00000000 0x00000000 0x00000001>;
@@ -966,8 +965,12 @@
qcom,do-not-use-ch-gsi-20;
qcom,ipa-wdi2;
qcom,use-64-bit-dma-mask;
- clock-names = "core_clk";
- clocks = <&clock_gcc clk_ipa_clk>;
+ clocks = <&clock_gcc clk_ipa_clk>,
+ <&clock_gcc clk_aggre2_noc_clk>;
+ clock-names = "core_clk", "smmu_clk";
+ qcom,arm-smmu;
+ qcom,smmu-disable-htw;
+ qcom,smmu-s1-bypass;
qcom,msm-bus,name = "ipa";
qcom,msm-bus,num-cases = <4>;
qcom,msm-bus,num-paths = <3>;
@@ -1074,6 +1077,23 @@
compatible = "qcom,smp2pgpio-map-ipa-1-in";
gpios = <&smp2pgpio_ipa_1_in 0 0>;
};
+
+ ipa_smmu_ap: ipa_smmu_ap {
+ compatible = "qcom,ipa-smmu-ap-cb";
+ iommus = <&anoc2_smmu 0x18e0>;
+ qcom,iova-mapping = <0x10000000 0x40000000>;
+ };
+
+ ipa_smmu_wlan: ipa_smmu_wlan {
+ compatible = "qcom,ipa-smmu-wlan-cb";
+ iommus = <&anoc2_smmu 0x18e1>;
+ };
+
+ ipa_smmu_uc: ipa_smmu_uc {
+ compatible = "qcom,ipa-smmu-uc-cb";
+ iommus = <&anoc2_smmu 0x18e2>;
+ qcom,iova-mapping = <0x40000000 0x20000000>;
+ };
};
qcom,ipa_fws@1e08000 {
diff --git a/arch/arm/boot/dts/qcom/msmfalcon.dtsi b/arch/arm/boot/dts/qcom/msmfalcon.dtsi
index 2b19c74bd3cb..ea60ed90cf4f 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon.dtsi
@@ -313,6 +313,59 @@
compatible = "qcom,dummycc";
#clock-cells = <1>;
};
+
+ qcom,ipc-spinlock@1f40000 {
+ compatible = "qcom,ipc-spinlock-sfpb";
+ reg = <0x1f40000 0x8000>;
+ qcom,num-locks = <8>;
+ };
+
+ qcom,smem@86000000 {
+ compatible = "qcom,smem";
+ reg = <0x86000000 0x200000>,
+ <0x17911008 0x4>,
+ <0x778000 0x7000>,
+ <0x1fd4000 0x8>;
+ reg-names = "smem", "irq-reg-base", "aux-mem1",
+ "smem_targ_info_reg";
+ qcom,mpu-enabled;
+ };
+
+ glink_mpss: qcom,glink-ssr-modem {
+ compatible = "qcom,glink_ssr";
+ label = "modem";
+ qcom,edge = "mpss";
+ qcom,notify-edges = <&glink_lpass>, <&glink_rpm>,
+ <&glink_cdsp>;
+ qcom,xprt = "smem";
+ };
+
+ glink_lpass: qcom,glink-ssr-adsp {
+ compatible = "qcom,glink_ssr";
+ label = "adsp";
+ qcom,edge = "lpass";
+ qcom,notify-edges = <&glink_mpss>, <&glink_rpm>,
+ <&glink_cdsp>;
+ qcom,xprt = "smem";
+ };
+
+ glink_rpm: qcom,glink-ssr-rpm {
+ compatible = "qcom,glink_ssr";
+ label = "rpm";
+ qcom,edge = "rpm";
+ qcom,notify-edges = <&glink_lpass>, <&glink_mpss>,
+ <&glink_cdsp>;
+ qcom,xprt = "smem";
+ };
+
+ glink_cdsp: qcom,glink-ssr-cdsp {
+ compatible = "qcom,glink_ssr";
+ label = "cdsp";
+ qcom,edge = "cdsp";
+ qcom,notify-edges = <&glink_lpass>, <&glink_mpss>,
+ <&glink_rpm>;
+ qcom,xprt = "smem";
+ };
};
#include "msmfalcon-ion.dtsi"
diff --git a/arch/arm/mm/dma-mapping.c b/arch/arm/mm/dma-mapping.c
index bf6a92504175..d41957eae6ef 100644
--- a/arch/arm/mm/dma-mapping.c
+++ b/arch/arm/mm/dma-mapping.c
@@ -2284,6 +2284,7 @@ void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
set_dma_ops(dev, dma_ops);
}
+EXPORT_SYMBOL(arch_setup_dma_ops);
void arch_teardown_dma_ops(struct device *dev)
{
diff --git a/arch/arm64/configs/msm-perf_defconfig b/arch/arm64/configs/msm-perf_defconfig
index fc2cce36bff9..f396b0b7f4cc 100644
--- a/arch/arm64/configs/msm-perf_defconfig
+++ b/arch/arm64/configs/msm-perf_defconfig
@@ -408,7 +408,10 @@ CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSM8996=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_XHCI_HCD=y
diff --git a/arch/arm64/configs/msm_defconfig b/arch/arm64/configs/msm_defconfig
index 38e489936895..c2902be72848 100644
--- a/arch/arm64/configs/msm_defconfig
+++ b/arch/arm64/configs/msm_defconfig
@@ -397,7 +397,10 @@ CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSM8996=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 72584917f930..be3e4ce1492a 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -390,7 +390,10 @@ CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSMCOBALT=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_XHCI_HCD=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index e708960cf28b..2e9a7908307b 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -395,7 +395,10 @@ CONFIG_SND_SOC=y
CONFIG_SND_SOC_MSMCOBALT=y
CONFIG_UHID=y
CONFIG_HID_APPLE=y
+CONFIG_HID_ELECOM=y
+CONFIG_HID_MAGICMOUSE=y
CONFIG_HID_MICROSOFT=y
+CONFIG_HID_MULTITOUCH=y
CONFIG_USB=y
CONFIG_USB_XHCI_HCD=y
CONFIG_USB_EHCI_HCD=y
diff --git a/drivers/clk/msm/Makefile b/drivers/clk/msm/Makefile
index d414d71c361b..ecf0b09bb49a 100644
--- a/drivers/clk/msm/Makefile
+++ b/drivers/clk/msm/Makefile
@@ -27,4 +27,4 @@ ifeq ($(CONFIG_COMMON_CLK_MSM), y)
endif
obj-$(CONFIG_COMMON_CLK_MSM) += gdsc.o
-obj-$(CONFIG_COMMON_CLK_MSM)-y += mdss/
+obj-$(CONFIG_COMMON_CLK_MSM) += mdss/
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index 598e52b54c99..9d605503520f 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -75,8 +75,7 @@ enum clk_osm_trace_packet_id {
#define MEM_ACC_SEQ_REG_CFG_START(n) (SEQ_REG(12 + (n)))
#define MEM_ACC_SEQ_CONST(n) (n)
#define MEM_ACC_INSTR_COMP(n) (0x67 + ((n) * 0x40))
-#define MEM_ACC_SEQ_REG_VAL_START(n) \
- ((n) < 8 ? SEQ_REG(4 + (n)) : SEQ_REG(60 + (n) - 8))
+#define MEM_ACC_SEQ_REG_VAL_START(n) (SEQ_REG(60 + (n)))
#define OSM_TABLE_SIZE 40
#define MAX_CLUSTER_CNT 2
@@ -125,6 +124,7 @@ enum clk_osm_trace_packet_id {
#define SPM_CC_CTRL 0x1028
#define SPM_CC_HYSTERESIS 0x101C
#define SPM_CORE_RET_MAPPING 0x1024
+#define CFG_DELAY_VAL_3 0x12C
#define LLM_FREQ_VOTE_HYSTERESIS 0x102C
#define LLM_VOLT_VOTE_HYSTERESIS 0x1030
@@ -184,11 +184,11 @@ enum clk_osm_trace_packet_id {
#define MAX_INSTRUCTIONS 256
#define MAX_BR_INSTRUCTIONS 49
-#define MAX_MEM_ACC_LEVELS 4
+#define MAX_MEM_ACC_LEVELS 3
#define MAX_MEM_ACC_VAL_PER_LEVEL 3
#define MAX_MEM_ACC_VALUES (MAX_MEM_ACC_LEVELS * \
MAX_MEM_ACC_VAL_PER_LEVEL)
-#define MEM_ACC_READ_MASK 0x7
+#define MEM_ACC_APM_READ_MASK 0xff
#define TRACE_CTRL 0x1F38
#define TRACE_CTRL_EN_MASK BIT(0)
@@ -203,6 +203,11 @@ enum clk_osm_trace_packet_id {
#define PERIODIC_TRACE_MAX_NS 21474836475
#define PERIODIC_TRACE_DEFAULT_NS 1000000
+#define PLL_DD_USER_CTL_LO_ENABLE 0x0f04c408
+#define PLL_DD_USER_CTL_LO_DISABLE 0x1f04c41f
+#define PLL_DD_D0_USER_CTL_LO 0x17916208
+#define PLL_DD_D1_USER_CTL_LO 0x17816208
+
static void __iomem *virt_base;
#define lmh_lite_clk_src_source_val 1
@@ -222,50 +227,48 @@ static void __iomem *virt_base;
static u32 seq_instr[] = {
0xc2005000, 0x2c9e3b21, 0xc0ab2cdc, 0xc2882525, 0x359dc491,
- 0x700a500b, 0x70005001, 0x390938c8, 0xcb44c833, 0xce56cd54,
- 0x341336e0, 0xadba0000, 0x10004000, 0x70005001, 0x1000500c,
- 0xc792c5a1, 0x501625e1, 0x3da335a2, 0x50170006, 0x50150006,
- 0xafb9c633, 0xacb31000, 0xacb41000, 0x1000c422, 0x500baefc,
- 0x5001700a, 0xaefd7000, 0x700b5010, 0x700c5012, 0xadb9ad41,
- 0x181b0000, 0x500f500c, 0x34135011, 0x84b9181b, 0xbd808539,
- 0x2ba40003, 0x0006a001, 0x10007105, 0x1000500e, 0x1c0a500c,
- 0x3b181c01, 0x3b431c06, 0x10001c07, 0x39831c06, 0x500c1c07,
- 0x1c0a1c02, 0x10000000, 0x70015002, 0x10000000, 0x50038103,
- 0x50047002, 0x10007003, 0x39853b44, 0x50038104, 0x40037002,
- 0x70095005, 0xb1c0a146, 0x238b0003, 0x10004005, 0x848b8308,
- 0x1000850c, 0x848e830d, 0x1000850c, 0x3a4c5006, 0x3a8f39cd,
- 0x40063ad0, 0x50071000, 0x2c127006, 0x4007a00f, 0x71050006,
- 0x1000700d, 0x1c1aa964, 0x700d4007, 0x50071000, 0x1c167006,
- 0x50125010, 0x40072411, 0x4007700d, 0xa00f1000, 0x0006a821,
- 0x40077105, 0x500c700d, 0x1c1591ad, 0x5011500f, 0x10000000,
- 0x500c2bd4, 0x0006a00f, 0x10007105, 0xa821a00f, 0x70050006,
- 0x91ad500c, 0x500f1c15, 0x10005011, 0x1c162bce, 0x50125010,
- 0xa82aa022, 0x71050006, 0x1c1591a6, 0x5011500f, 0x5014500c,
- 0x0006a00f, 0x00007105, 0x91a41000, 0x22175013, 0x1c1aa963,
- 0x22171000, 0x1c1aa963, 0x50081000, 0x40087007, 0x1c1aa963,
- 0x70085009, 0x10004009, 0x850c848e, 0x0003b1c0, 0x400d2b99,
- 0x500d1000, 0xabaf1000, 0x853184b0, 0x0003bb80, 0xa0371000,
- 0x71050006, 0x85481000, 0xbf8084c3, 0x2ba80003, 0xbf8084c2,
- 0x2ba70003, 0xbf8084c1, 0x2ba60003, 0x8ec71000, 0xc6dd8dc3,
- 0x8c1625ec, 0x8d498c97, 0x8ec61c00, 0xc6dd8dc2, 0x8c1325ec,
- 0x8d158c94, 0x8ec51c00, 0xc6dd8dc1, 0x8c1025ec, 0x8d128c91,
- 0x8dc01c00, 0x182cc633, 0x84c08548, 0x0003bf80, 0x84c12ba9,
- 0x0003bf80, 0x84c22baa, 0x0003bf80, 0x10002bab, 0x8dc08ec4,
- 0x25ecc6dd, 0x8c948c13, 0x1c008d15, 0x8dc18ec5, 0x25ecc6dd,
- 0x8c978c16, 0x1c008d49, 0x8dc28ec6, 0x25ecc6dd, 0x8ccb8c4a,
- 0x1c008d4c, 0xc6338dc3, 0x1000af9b, 0xa759a79a, 0x1000a718,
+ 0x700a500b, 0x5001aefc, 0xaefd7000, 0x390938c8, 0xcb44c833,
+ 0xce56cd54, 0x341336e0, 0xa4baadba, 0xb480a493, 0x10004000,
+ 0x70005001, 0x1000500c, 0xc792c5a1, 0x501625e1, 0x3da335a2,
+ 0x50170006, 0x50150006, 0x1000c633, 0x1000acb3, 0xc422acb4,
+ 0xaefc1000, 0x700a500b, 0x70005001, 0x5010aefd, 0x5012700b,
+ 0xad41700c, 0x84e5adb9, 0xb3808566, 0x239b0003, 0x856484e3,
+ 0xb9800007, 0x2bad0003, 0xac3aa20b, 0x0003181b, 0x0003bb40,
+ 0xa30d239b, 0x500c181b, 0x5011500f, 0x181b3413, 0x853984b9,
+ 0x0003bd80, 0xa0012ba4, 0x72050803, 0x500e1000, 0x500c1000,
+ 0x1c011c0a, 0x3b181c06, 0x1c073b43, 0x1c061000, 0x1c073983,
+ 0x1c02500c, 0x10001c0a, 0x70015002, 0x81031000, 0x70025003,
+ 0x70035004, 0x3b441000, 0x81553985, 0x70025003, 0x50054003,
+ 0xa1467009, 0x0003b1c0, 0x4005238b, 0x835a1000, 0x855c84db,
+ 0x1000a51f, 0x84de835d, 0xa52c855c, 0x50061000, 0x39cd3a4c,
+ 0x3ad03a8f, 0x10004006, 0x70065007, 0xa00f2c12, 0x08034007,
+ 0xaefc7205, 0xaefd700d, 0xa9641000, 0x40071c1a, 0x700daefc,
+ 0x1000aefd, 0x70065007, 0x50101c16, 0x40075012, 0x700daefc,
+ 0x2411aefd, 0xa8211000, 0x0803a00f, 0x500c7005, 0x1c1591e0,
+ 0x500f5014, 0x10005011, 0x500c2bd4, 0x0803a00f, 0x10007205,
+ 0xa00fa9d1, 0x0803a821, 0xa9d07005, 0x91e0500c, 0x500f1c15,
+ 0x10005011, 0x1c162bce, 0x50125010, 0xa022a82a, 0x70050803,
+ 0x1c1591df, 0x5011500f, 0x5014500c, 0x0803a00f, 0x10007205,
+ 0x501391a4, 0x22172217, 0x70075008, 0xa9634008, 0x1c1a0006,
+ 0x70085009, 0x10004009, 0x00008ed9, 0x3e05c8dd, 0x1c033604,
+ 0xabaf1000, 0x856284e1, 0x0003bb80, 0x1000239f, 0x0803a037,
+ 0x10007205, 0x8dc61000, 0x38a71c2a, 0x1c2a8dc4, 0x100038a6,
+ 0x1c2a8dc5, 0x8dc73867, 0x38681c2a, 0x8c491000, 0x8d4b8cca,
+ 0x10001c00, 0x8ccd8c4c, 0x1c008d4e, 0x8c4f1000, 0x8d518cd0,
+ 0x10001c00, 0xa759a79a, 0x1000a718, 0xbf80af9b, 0x00001000,
};
static u32 seq_br_instr[] = {
- 0x28c, 0x1e6, 0x238, 0xd0, 0xec,
- 0xf4, 0xbc, 0xc4, 0x9c, 0xac,
- 0xfc, 0xe2, 0x154, 0x174, 0x17c,
- 0x10a, 0x126, 0x13a, 0x11c, 0x98,
- 0x160, 0x1a6, 0x19a, 0x1ae, 0x1c0,
- 0x1ce, 0x1d2, 0x30, 0x60, 0x86,
- 0x7c, 0x1d8, 0x34, 0x3c, 0x56,
- 0x5a, 0x1de, 0x2e, 0x222, 0x212,
- 0x202, 0x254, 0x264, 0x274, 0x288,
+ 0x248, 0x20e, 0x21c, 0xf6, 0x112,
+ 0x11c, 0xe4, 0xea, 0xc6, 0xd6,
+ 0x126, 0x108, 0x184, 0x1a8, 0x1b0,
+ 0x134, 0x158, 0x16e, 0x14a, 0xc2,
+ 0x190, 0x1d2, 0x1cc, 0x1d4, 0x1e8,
+ 0x0, 0x1f6, 0x32, 0x66, 0xb0,
+ 0xa6, 0x1fc, 0x3c, 0x44, 0x5c,
+ 0x60, 0x204, 0x30, 0x22a, 0x234,
+ 0x23e, 0x0, 0x250, 0x0, 0x0, 0x9a,
+ 0x20c,
};
DEFINE_EXT_CLK(xo_ao, NULL);
@@ -298,6 +301,7 @@ struct clk_osm {
u32 cluster_num;
u32 irq;
u32 apm_crossover_vc;
+ u32 apm_threshold_vc;
u32 cycle_counter_reads;
u32 cycle_counter_delay;
u32 cycle_counter_factor;
@@ -551,8 +555,8 @@ static void clk_osm_print_osm_table(struct clk_osm *c)
lval,
table[i].spare_data);
}
- pr_debug("APM crossover corner: %d\n",
- c->apm_crossover_vc);
+ pr_debug("APM threshold corner=%d, crossover corner=%d\n",
+ c->apm_threshold_vc, c->apm_crossover_vc);
}
static int clk_osm_get_lut(struct platform_device *pdev,
@@ -1116,10 +1120,21 @@ exit:
static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
struct platform_device *pdev)
{
+ struct regulator *regulator = c->vdd_reg;
struct dev_pm_opp *opp;
unsigned long freq = 0;
- int vc, rc = 0;
+ int vc, i, threshold, rc = 0;
+ u32 corner_volt, data;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,apm-threshold-voltage",
+ &threshold);
+ if (rc) {
+ pr_info("qcom,apm-threshold-voltage property not specified\n");
+ return rc;
+ }
+
+ /* Determine crossover virtual corner */
rcu_read_lock();
opp = dev_pm_opp_find_freq_exact(&c->vdd_dev->dev, freq, true);
if (IS_ERR(opp)) {
@@ -1138,6 +1153,48 @@ static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
vc--;
c->apm_crossover_vc = vc;
+ /* Determine threshold virtual corner */
+ for (i = 0; i < OSM_TABLE_SIZE; i++) {
+ freq = c->osm_table[i].frequency;
+ /*
+ * Only frequencies that are supported across all configurations
+ * are present in the OPP table associated with the regulator
+ * device.
+ */
+ data = (c->osm_table[i].freq_data & GENMASK(18, 16)) >> 16;
+ if (data != MAX_CONFIG)
+ continue;
+
+ rcu_read_lock();
+ opp = dev_pm_opp_find_freq_exact(&c->vdd_dev->dev, freq, true);
+ if (IS_ERR(opp)) {
+ rc = PTR_ERR(opp);
+ if (rc == -ERANGE)
+ pr_err("Frequency %lu not found\n", freq);
+ goto exit;
+ }
+
+ vc = dev_pm_opp_get_voltage(opp);
+ if (!vc) {
+ pr_err("No virtual corner found for frequency %lu\n",
+ freq);
+ rc = -ERANGE;
+ goto exit;
+ }
+
+ rcu_read_unlock();
+
+ corner_volt = regulator_list_corner_voltage(regulator, vc);
+
+ /* CPR virtual corners are zero-based numbered */
+ vc--;
+
+ if (corner_volt >= threshold) {
+ c->apm_threshold_vc = vc;
+ break;
+ }
+ }
+
return 0;
exit:
rcu_read_unlock();
@@ -1413,55 +1470,77 @@ static void clk_osm_program_apm_regs(struct clk_osm *c)
*/
clk_osm_write_reg(c, c->apm_mode_ctl, SEQ_REG(2));
- /* Program mode value to switch APM from VDD_APCC to VDD_MX */
- clk_osm_write_reg(c, APM_MX_MODE, SEQ_REG(22));
-
- /* Program mode value to switch APM from VDD_MX to VDD_APCC */
- clk_osm_write_reg(c, APM_APC_MODE, SEQ_REG(25));
-
/* Program address of controller status register */
clk_osm_write_reg(c, c->apm_ctrl_status, SEQ_REG(3));
- /* Program mask used to determine status of APM power supply switch */
- clk_osm_write_reg(c, APM_MODE_SWITCH_MASK, SEQ_REG(24));
+ /* Program mode value to switch APM from VDD_APCC to VDD_MX */
+ clk_osm_write_reg(c, APM_MX_MODE, SEQ_REG(77));
/* Program value used to determine current APM power supply is VDD_MX */
- clk_osm_write_reg(c, APM_MX_MODE_VAL, SEQ_REG(23));
+ clk_osm_write_reg(c, APM_MX_MODE_VAL, SEQ_REG(78));
+
+ /* Program mask used to determine status of APM power supply switch */
+ clk_osm_write_reg(c, APM_MODE_SWITCH_MASK, SEQ_REG(79));
+
+ /* Program mode value to switch APM from VDD_MX to VDD_APCC */
+ clk_osm_write_reg(c, APM_APC_MODE, SEQ_REG(80));
/*
* Program value used to determine current APM power supply
* is VDD_APCC
*/
- clk_osm_write_reg(c, APM_APC_MODE_VAL, SEQ_REG(26));
+ clk_osm_write_reg(c, APM_APC_MODE_VAL, SEQ_REG(81));
}
static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
{
- int i;
+ int i, curr_level, j = 0;
+ int mem_acc_level_map[MAX_MEM_ACC_LEVELS] = {0, 0, 0};
- if (!c->secure_init)
- return;
+ curr_level = c->osm_table[0].spare_data;
+ for (i = 0; i < c->num_entries; i++) {
+ if (curr_level == MAX_MEM_ACC_LEVELS)
+ break;
- clk_osm_write_reg(c, c->pbases[OSM_BASE] + SEQ_REG(50),
- SEQ_REG(49));
- clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(1), SEQ_REG(50));
- clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(1), SEQ_REG(51));
- clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(2), SEQ_REG(52));
- clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(3), SEQ_REG(53));
- clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(4), SEQ_REG(54));
- clk_osm_write_reg(c, MEM_ACC_INSTR_COMP(0), SEQ_REG(55));
- clk_osm_write_reg(c, MEM_ACC_INSTR_COMP(1), SEQ_REG(56));
- clk_osm_write_reg(c, MEM_ACC_INSTR_COMP(2), SEQ_REG(57));
- clk_osm_write_reg(c, MEM_ACC_INSTR_COMP(3), SEQ_REG(58));
- clk_osm_write_reg(c, MEM_ACC_READ_MASK, SEQ_REG(59));
-
- for (i = 0; i < MAX_MEM_ACC_VALUES; i++)
- clk_osm_write_reg(c, c->apcs_mem_acc_val[i],
- MEM_ACC_SEQ_REG_VAL_START(i));
-
- for (i = 0; i < MAX_MEM_ACC_VAL_PER_LEVEL; i++)
- clk_osm_write_reg(c, c->apcs_mem_acc_cfg[i],
- MEM_ACC_SEQ_REG_CFG_START(i));
+ if (c->osm_table[i].spare_data != curr_level) {
+ mem_acc_level_map[j++] = i - 1;
+ curr_level = c->osm_table[i].spare_data;
+ }
+ }
+
+ if (c->secure_init) {
+ clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(1), SEQ_REG(51));
+ clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(2), SEQ_REG(52));
+ clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(3), SEQ_REG(53));
+ clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(4), SEQ_REG(54));
+ clk_osm_write_reg(c, MEM_ACC_APM_READ_MASK, SEQ_REG(59));
+ clk_osm_write_reg(c, mem_acc_level_map[0], SEQ_REG(55));
+ clk_osm_write_reg(c, mem_acc_level_map[0] + 1, SEQ_REG(56));
+ clk_osm_write_reg(c, mem_acc_level_map[1], SEQ_REG(57));
+ clk_osm_write_reg(c, mem_acc_level_map[1] + 1, SEQ_REG(58));
+ clk_osm_write_reg(c, c->pbases[OSM_BASE] + SEQ_REG(28),
+ SEQ_REG(49));
+
+ for (i = 0; i < MAX_MEM_ACC_VALUES; i++)
+ clk_osm_write_reg(c, c->apcs_mem_acc_val[i],
+ MEM_ACC_SEQ_REG_VAL_START(i));
+
+ for (i = 0; i < MAX_MEM_ACC_VAL_PER_LEVEL; i++)
+ clk_osm_write_reg(c, c->apcs_mem_acc_cfg[i],
+ MEM_ACC_SEQ_REG_CFG_START(i));
+ } else {
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(55),
+ mem_acc_level_map[0]);
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(56),
+ mem_acc_level_map[0] + 1);
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(57),
+ mem_acc_level_map[1]);
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(58),
+ mem_acc_level_map[1] + 1);
+ /* SEQ_REG(49) = SEQ_REG(28) init by TZ */
+ }
+
+ return;
}
void clk_osm_setup_sequencer(struct clk_osm *c)
@@ -1500,10 +1579,12 @@ static void clk_osm_setup_cycle_counters(struct clk_osm *c)
static void clk_osm_setup_osm_was(struct clk_osm *c)
{
+ u32 cc_hyst;
u32 val;
val = clk_osm_read_reg(c, PDN_FSM_CTRL_REG);
val |= IGNORE_PLL_LOCK_MASK;
+ cc_hyst = clk_osm_read_reg(c, SPM_CC_HYSTERESIS);
if (c->secure_init) {
clk_osm_write_reg(c, val, SEQ_REG(47));
@@ -1518,10 +1599,51 @@ static void clk_osm_setup_osm_was(struct clk_osm *c)
clk_osm_write_reg(c, 0x0, SEQ_REG(45));
clk_osm_write_reg(c, c->pbases[OSM_BASE] + PDN_FSM_CTRL_REG,
SEQ_REG(46));
+
+ /* C2D/C3 + D2D workaround */
+ clk_osm_write_reg(c, c->pbases[OSM_BASE] + SPM_CC_HYSTERESIS,
+ SEQ_REG(6));
+ clk_osm_write_reg(c, cc_hyst, SEQ_REG(7));
+
+ /* Droop detector PLL lock detect workaround */
+ clk_osm_write_reg(c, PLL_DD_USER_CTL_LO_ENABLE, SEQ_REG(4));
+ clk_osm_write_reg(c, PLL_DD_USER_CTL_LO_DISABLE, SEQ_REG(5));
+ clk_osm_write_reg(c, c->cluster_num == 0 ? PLL_DD_D0_USER_CTL_LO
+ : PLL_DD_D1_USER_CTL_LO, SEQ_REG(21));
+
+ /* PLL lock detect and HMSS AHB clock workaround */
+ clk_osm_write_reg(c, 0x640, CFG_DELAY_VAL_3);
+
+ /* DxFSM workaround */
+ clk_osm_write_reg(c, c->cluster_num == 0 ? 0x17911200 :
+ 0x17811200, SEQ_REG(22));
+ clk_osm_write_reg(c, 0x80800, SEQ_REG(23));
+ clk_osm_write_reg(c, 0x179D1100, SEQ_REG(24));
+ clk_osm_write_reg(c, 0x11f, SEQ_REG(25));
+ clk_osm_write_reg(c, c->cluster_num == 0 ? 0x17912000 :
+ 0x17811290, SEQ_REG(26));
+ clk_osm_write_reg(c, c->cluster_num == 0 ? 0x17911290 :
+ 0x17811290, SEQ_REG(20));
+ clk_osm_write_reg(c, c->cluster_num == 0 ? 0x17811290 :
+ 0x17911290, SEQ_REG(32));
+ clk_osm_write_reg(c, 0x179D4020, SEQ_REG(35));
+ clk_osm_write_reg(c, 0x11f, SEQ_REG(25));
+ clk_osm_write_reg(c, 0xa, SEQ_REG(86));
+ clk_osm_write_reg(c, 0xe, SEQ_REG(87));
+ clk_osm_write_reg(c, 0x00400000, SEQ_REG(88));
+ clk_osm_write_reg(c, 0x00700000, SEQ_REG(89));
} else {
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(47), val);
val &= ~IGNORE_PLL_LOCK_MASK;
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(48), val);
+
+ /* C2D/C3 + D2D workaround */
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(7),
+ cc_hyst);
+
+ /* Droop detector PLL lock detect workaround */
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(4),
+ PLL_DD_USER_CTL_LO_ENABLE);
}
if (c->cluster_num == 0) {
@@ -1671,18 +1793,15 @@ static void clk_osm_do_additional_setup(struct clk_osm *c,
/* APM Programming */
clk_osm_program_apm_regs(c);
- /* MEM-ACC Programming */
- clk_osm_program_mem_acc_regs(c);
-
/* GFMUX Programming */
clk_osm_write_reg(c, c->apcs_cfg_rcgr, SEQ_REG(16));
- clk_osm_write_reg(c, GPLL_SEL, SEQ_REG(17));
- clk_osm_write_reg(c, PLL_EARLY_SEL, SEQ_REG(20));
- clk_osm_write_reg(c, PLL_MAIN_SEL, SEQ_REG(32));
clk_osm_write_reg(c, c->apcs_cmd_rcgr, SEQ_REG(33));
clk_osm_write_reg(c, RCG_UPDATE, SEQ_REG(34));
- clk_osm_write_reg(c, RCG_UPDATE_SUCCESS, SEQ_REG(35));
- clk_osm_write_reg(c, RCG_UPDATE, SEQ_REG(36));
+ clk_osm_write_reg(c, GPLL_SEL, SEQ_REG(17));
+ clk_osm_write_reg(c, PLL_EARLY_SEL, SEQ_REG(82));
+ clk_osm_write_reg(c, PLL_MAIN_SEL, SEQ_REG(83));
+ clk_osm_write_reg(c, RCG_UPDATE_SUCCESS, SEQ_REG(84));
+ clk_osm_write_reg(c, RCG_UPDATE, SEQ_REG(85));
pr_debug("seq_size: %lu, seqbr_size: %lu\n", ARRAY_SIZE(seq_instr),
ARRAY_SIZE(seq_br_instr));
@@ -1693,17 +1812,43 @@ static void clk_osm_do_additional_setup(struct clk_osm *c,
static void clk_osm_apm_vc_setup(struct clk_osm *c)
{
/*
- * APM crossover virtual corner at which the switch
- * from APC to MX and vice-versa should take place.
+ * APM crossover virtual corner corresponds to switching
+ * voltage during APM transition. APM threshold virtual
+ * corner is the first corner which requires switch
+ * sequence of APM from MX to APC.
*/
if (c->secure_init) {
- clk_osm_write_reg(c, c->apm_crossover_vc, SEQ_REG(1));
+ clk_osm_write_reg(c, c->apm_threshold_vc, SEQ_REG(1));
+ clk_osm_write_reg(c, c->apm_crossover_vc, SEQ_REG(72));
+ clk_osm_write_reg(c, c->pbases[OSM_BASE] + SEQ_REG(1),
+ SEQ_REG(8));
+ clk_osm_write_reg(c, c->apm_threshold_vc,
+ SEQ_REG(15));
+ clk_osm_write_reg(c, c->apm_threshold_vc != 0 ?
+ c->apm_threshold_vc - 1 : 0xff,
+ SEQ_REG(31));
+ clk_osm_write_reg(c, 0x3b | c->apm_threshold_vc << 6,
+ SEQ_REG(73));
+ clk_osm_write_reg(c, 0x39 | c->apm_threshold_vc << 6,
+ SEQ_REG(76));
/* Ensure writes complete before returning */
mb();
} else {
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(1),
+ c->apm_threshold_vc);
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(72),
c->apm_crossover_vc);
+ /* SEQ_REG(8) = address of SEQ_REG(1) init by TZ */
+ clk_osm_write_reg(c, c->apm_threshold_vc,
+ SEQ_REG(15));
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(31),
+ c->apm_threshold_vc != 0 ?
+ c->apm_threshold_vc - 1 : 0xff);
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(73),
+ 0x3b | c->apm_threshold_vc << 6);
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(76),
+ 0x39 | c->apm_threshold_vc << 6);
}
}
@@ -2412,6 +2557,10 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
clk_osm_do_additional_setup(&pwrcl_clk, pdev);
clk_osm_do_additional_setup(&perfcl_clk, pdev);
+ /* MEM-ACC Programming */
+ clk_osm_program_mem_acc_regs(&pwrcl_clk);
+ clk_osm_program_mem_acc_regs(&perfcl_clk);
+
/* Program APM crossover corners */
clk_osm_apm_vc_setup(&pwrcl_clk);
clk_osm_apm_vc_setup(&perfcl_clk);
diff --git a/drivers/clk/qcom/Makefile b/drivers/clk/qcom/Makefile
index 94419695cd2e..dc1b66f84af2 100644
--- a/drivers/clk/qcom/Makefile
+++ b/drivers/clk/qcom/Makefile
@@ -12,6 +12,7 @@ clk-qcom-y += clk-regmap-mux.o
clk-qcom-$(CONFIG_KRAIT_CLOCKS) += clk-krait.o
clk-qcom-y += clk-hfpll.o
clk-qcom-y += reset.o
+clk-qcom-y += clk-dummy.o
clk-qcom-$(CONFIG_QCOM_GDSC) += gdsc.o
obj-$(CONFIG_APQ_GCC_8084) += gcc-apq8084.o
diff --git a/drivers/clk/qcom/clk-dummy.c b/drivers/clk/qcom/clk-dummy.c
new file mode 100644
index 000000000000..3205fbc6b8ba
--- /dev/null
+++ b/drivers/clk/qcom/clk-dummy.c
@@ -0,0 +1,110 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
+
+struct clk_dummy {
+ struct clk_hw hw;
+ unsigned long rrate;
+};
+
+#define to_clk_dummy(_hw) container_of(_hw, struct clk_dummy, hw)
+
+static int dummy_clk_set_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long parent_rate)
+{
+ struct clk_dummy *dummy = to_clk_dummy(hw);
+
+ dummy->rrate = rate;
+
+ pr_debug("set rate: %lu\n", rate);
+
+ return 0;
+}
+
+static long dummy_clk_round_rate(struct clk_hw *hw, unsigned long rate,
+ unsigned long *parent_rate)
+{
+ return rate;
+}
+
+static unsigned long dummy_clk_recalc_rate(struct clk_hw *hw,
+ unsigned long parent_rate)
+{
+ struct clk_dummy *dummy = to_clk_dummy(hw);
+
+ pr_debug("clock rate: %lu\n", dummy->rrate);
+
+ return dummy->rrate;
+}
+
+struct clk_ops clk_dummy_ops = {
+ .set_rate = dummy_clk_set_rate,
+ .round_rate = dummy_clk_round_rate,
+ .recalc_rate = dummy_clk_recalc_rate,
+};
+EXPORT_SYMBOL_GPL(clk_dummy_ops);
+
+/**
+ * clk_register_dummy - register dummy clock with the
+ * clock framework
+ * @dev: device that is registering this clock
+ * @name: name of this clock
+ * @flags: framework-specific flags
+ */
+static struct clk *clk_register_dummy(struct device *dev, const char *name,
+ unsigned long flags)
+{
+ struct clk_dummy *dummy;
+ struct clk *clk;
+ struct clk_init_data init = {};
+
+ /* allocate dummy clock */
+ dummy = kzalloc(sizeof(*dummy), GFP_KERNEL);
+ if (!dummy)
+ return ERR_PTR(-ENOMEM);
+
+ init.name = name;
+ init.ops = &clk_dummy_ops;
+ init.flags = flags | CLK_IS_BASIC;
+ init.num_parents = 0;
+ dummy->hw.init = &init;
+
+ /* register the clock */
+ clk = clk_register(dev, &dummy->hw);
+ if (IS_ERR(clk))
+ kfree(dummy);
+
+ return clk;
+}
+
+/**
+ * of_dummy_clk_setup() - Setup function for simple fixed rate clock
+ */
+static void of_dummy_clk_setup(struct device_node *node)
+{
+ struct clk *clk;
+ const char *clk_name = "dummy_clk";
+
+ of_property_read_string(node, "clock-output-names", &clk_name);
+
+ clk = clk_register_dummy(NULL, clk_name, 0);
+ if (!IS_ERR(clk))
+ of_clk_add_provider(node, of_clk_src_simple_get, clk);
+
+ pr_info("%s: Dummy clock registered\n", clk_name);
+}
+CLK_OF_DECLARE(dummy_clk, "qcom,dummycc", of_dummy_clk_setup);
diff --git a/drivers/clk/qcom/common.h b/drivers/clk/qcom/common.h
index ae9bdeb21f29..10cabca921be 100644
--- a/drivers/clk/qcom/common.h
+++ b/drivers/clk/qcom/common.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@@ -48,5 +48,5 @@ extern int qcom_cc_really_probe(struct platform_device *pdev,
struct regmap *regmap);
extern int qcom_cc_probe(struct platform_device *pdev,
const struct qcom_cc_desc *desc);
-
+extern struct clk_ops clk_dummy_ops;
#endif
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index db5a9ca28408..90aee3cad5ad 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -33,6 +33,8 @@ msm_adreno-y += \
adreno_a3xx_snapshot.o \
adreno_a4xx_snapshot.o \
adreno_a5xx_snapshot.o \
+ adreno_a4xx_preempt.o \
+ adreno_a5xx_preempt.o \
adreno_sysfs.o \
adreno.o \
adreno_cp_parser.o \
diff --git a/drivers/gpu/msm/a5xx_reg.h b/drivers/gpu/msm/a5xx_reg.h
index 913cedb885ad..207588844931 100644
--- a/drivers/gpu/msm/a5xx_reg.h
+++ b/drivers/gpu/msm/a5xx_reg.h
@@ -60,6 +60,8 @@
#define A5XX_CP_RB_BASE 0x800
#define A5XX_CP_RB_BASE_HI 0x801
#define A5XX_CP_RB_CNTL 0x802
+#define A5XX_CP_RB_RPTR_ADDR_LO 0x804
+#define A5XX_CP_RB_RPTR_ADDR_HI 0x805
#define A5XX_CP_RB_RPTR 0x806
#define A5XX_CP_RB_WPTR 0x807
#define A5XX_CP_PFP_STAT_ADDR 0x808
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 26e341a876e8..918231b73215 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -171,6 +171,30 @@ void adreno_writereg64(struct adreno_device *adreno_dev,
}
/**
+ * adreno_get_rptr() - Get the current ringbuffer read pointer
+ * @rb: Pointer the ringbuffer to query
+ *
+ * Get the latest rptr
+ */
+unsigned int adreno_get_rptr(struct adreno_ringbuffer *rb)
+{
+ struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
+ unsigned int rptr = 0;
+
+ if (adreno_is_a3xx(adreno_dev))
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
+ &rptr);
+ else {
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ kgsl_sharedmem_readl(&device->scratch, &rptr,
+ SCRATCH_RPTR_OFFSET(rb->id));
+ }
+
+ return rptr;
+}
+
+/**
* adreno_of_read_property() - Adreno read property
* @node: Device node
*
@@ -1290,6 +1314,28 @@ static void _update_threshold_count(struct adreno_device *adreno_dev,
adreno_dev->lm_threshold_cross = adj;
}
+static void _set_secvid(struct kgsl_device *device)
+{
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+
+ /* Program GPU contect protection init values */
+ if (device->mmu.secured) {
+ if (adreno_is_a4xx(adreno_dev))
+ adreno_writereg(adreno_dev,
+ ADRENO_REG_RBBM_SECVID_TRUST_CONFIG, 0x2);
+ adreno_writereg(adreno_dev,
+ ADRENO_REG_RBBM_SECVID_TSB_CONTROL, 0x0);
+
+ adreno_writereg64(adreno_dev,
+ ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
+ ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
+ KGSL_IOMMU_SECURE_BASE);
+ adreno_writereg(adreno_dev,
+ ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
+ KGSL_IOMMU_SECURE_SIZE);
+ }
+}
+
/**
* _adreno_start - Power up the GPU and prepare to accept commands
* @adreno_dev: Pointer to an adreno_device structure
@@ -1332,26 +1378,13 @@ static int _adreno_start(struct adreno_device *adreno_dev)
if (regulator_left_on)
_soft_reset(adreno_dev);
+ adreno_ringbuffer_set_global(adreno_dev, 0);
+
status = kgsl_mmu_start(device);
if (status)
goto error_pwr_off;
- /* Program GPU contect protection init values */
- if (device->mmu.secured) {
- if (adreno_is_a4xx(adreno_dev))
- adreno_writereg(adreno_dev,
- ADRENO_REG_RBBM_SECVID_TRUST_CONFIG, 0x2);
- adreno_writereg(adreno_dev,
- ADRENO_REG_RBBM_SECVID_TSB_CONTROL, 0x0);
-
- adreno_writereg64(adreno_dev,
- ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE,
- ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_BASE_HI,
- KGSL_IOMMU_SECURE_BASE);
- adreno_writereg(adreno_dev,
- ADRENO_REG_RBBM_SECVID_TSB_TRUSTED_SIZE,
- KGSL_IOMMU_SECURE_SIZE);
- }
+ _set_secvid(device);
status = adreno_ocmem_malloc(adreno_dev);
if (status) {
@@ -1533,6 +1566,22 @@ static int adreno_vbif_clear_pending_transactions(struct kgsl_device *device)
return ret;
}
+static void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
+{
+ int i;
+ struct adreno_ringbuffer *rb;
+
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ if (rb->drawctxt_active)
+ kgsl_context_put(&(rb->drawctxt_active->base));
+ rb->drawctxt_active = NULL;
+
+ kgsl_sharedmem_writel(KGSL_DEVICE(adreno_dev),
+ &rb->pagetable_desc, PT_INFO_OFFSET(current_rb_ptname),
+ 0);
+ }
+}
+
static int adreno_stop(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -1645,13 +1694,6 @@ int adreno_reset(struct kgsl_device *device, int fault)
else
kgsl_pwrctrl_change_state(device, KGSL_STATE_NAP);
- /* Set the page table back to the default page table */
- kgsl_mmu_set_pt(&device->mmu, device->mmu.defaultpagetable);
- kgsl_sharedmem_writel(device,
- &adreno_dev->ringbuffers[0].pagetable_desc,
- offsetof(struct adreno_ringbuffer_pagetable_info,
- current_global_ptname), 0);
-
return ret;
}
@@ -2094,9 +2136,15 @@ static int adreno_soft_reset(struct kgsl_device *device)
/* Reset the GPU */
_soft_reset(adreno_dev);
+ /* Set the page table back to the default page table */
+ adreno_ringbuffer_set_global(adreno_dev, 0);
+ kgsl_mmu_set_pt(&device->mmu, device->mmu.defaultpagetable);
+
/* start of new CFF after reset */
kgsl_cffdump_open(device);
+ _set_secvid(device);
+
/* Enable 64 bit gpu addr if feature is set */
if (gpudev->enable_64bit &&
adreno_support_64bit(adreno_dev))
@@ -2149,8 +2197,6 @@ bool adreno_isidle(struct kgsl_device *device)
if (!kgsl_state_is_awake(device))
return true;
- adreno_get_rptr(ADRENO_CURRENT_RINGBUFFER(adreno_dev));
-
/*
* wptr is updated when we add commands to ringbuffer, add a barrier
* to make sure updated wptr is compared to rptr
@@ -2161,15 +2207,13 @@ bool adreno_isidle(struct kgsl_device *device)
* ringbuffer is truly idle when all ringbuffers read and write
* pointers are equal
*/
+
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- if (rb->rptr != rb->wptr)
- break;
+ if (!adreno_rb_empty(rb))
+ return false;
}
- if (i == adreno_dev->num_ringbuffers)
- return adreno_hw_isidle(adreno_dev);
-
- return false;
+ return adreno_hw_isidle(adreno_dev);
}
/**
@@ -2267,25 +2311,11 @@ static int adreno_drain(struct kgsl_device *device)
/* Caller must hold the device mutex. */
static int adreno_suspend_context(struct kgsl_device *device)
{
- int status = 0;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
-
/* process any profiling results that are available */
- adreno_profile_process_results(adreno_dev);
+ adreno_profile_process_results(ADRENO_DEVICE(device));
- status = adreno_idle(device);
- if (status)
- return status;
- /* set the device to default pagetable */
- kgsl_mmu_set_pt(&device->mmu, device->mmu.defaultpagetable);
- kgsl_sharedmem_writel(device,
- &adreno_dev->ringbuffers[0].pagetable_desc,
- offsetof(struct adreno_ringbuffer_pagetable_info,
- current_global_ptname), 0);
- /* set ringbuffers to NULL ctxt */
- adreno_set_active_ctxs_null(adreno_dev);
-
- return status;
+ /* Wait for the device to go idle */
+ return adreno_idle(device);
}
/**
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index 7ac91f203a70..9f462bca26ce 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -193,6 +193,47 @@ enum adreno_gpurev {
struct adreno_gpudev;
+/* Time to allow preemption to complete (in ms) */
+#define ADRENO_PREEMPT_TIMEOUT 10000
+
+/**
+ * enum adreno_preempt_states
+ * ADRENO_PREEMPT_NONE: No preemption is scheduled
+ * ADRENO_PREEMPT_START: The S/W has started
+ * ADRENO_PREEMPT_TRIGGERED: A preeempt has been triggered in the HW
+ * ADRENO_PREEMPT_FAULTED: The preempt timer has fired
+ * ADRENO_PREEMPT_PENDING: The H/W has signaled preemption complete
+ * ADRENO_PREEMPT_COMPLETE: Preemption could not be finished in the IRQ handler,
+ * worker has been scheduled
+ */
+enum adreno_preempt_states {
+ ADRENO_PREEMPT_NONE = 0,
+ ADRENO_PREEMPT_START,
+ ADRENO_PREEMPT_TRIGGERED,
+ ADRENO_PREEMPT_FAULTED,
+ ADRENO_PREEMPT_PENDING,
+ ADRENO_PREEMPT_COMPLETE,
+};
+
+/**
+ * struct adreno_preemption
+ * @state: The current state of preemption
+ * @counters: Memory descriptor for the memory where the GPU writes the
+ * preemption counters on switch
+ * @timer: A timer to make sure preemption doesn't stall
+ * @work: A work struct for the preemption worker (for 5XX)
+ * @token_submit: Indicates if a preempt token has been submitted in
+ * current ringbuffer (for 4XX)
+ */
+struct adreno_preemption {
+ atomic_t state;
+ struct kgsl_memdesc counters;
+ struct timer_list timer;
+ struct work_struct work;
+ bool token_submit;
+};
+
+
struct adreno_busy_data {
unsigned int gpu_busy;
unsigned int vbif_ram_cycles;
@@ -368,7 +409,7 @@ struct adreno_device {
const struct firmware *lm_fw;
uint32_t *lm_sequence;
uint32_t lm_size;
- struct kgsl_memdesc preemption_counters;
+ struct adreno_preemption preempt;
struct work_struct gpmu_work;
uint32_t lm_leakage;
uint32_t lm_limit;
@@ -458,6 +499,8 @@ enum adreno_regs {
ADRENO_REG_CP_WFI_PEND_CTR,
ADRENO_REG_CP_RB_BASE,
ADRENO_REG_CP_RB_BASE_HI,
+ ADRENO_REG_CP_RB_RPTR_ADDR_LO,
+ ADRENO_REG_CP_RB_RPTR_ADDR_HI,
ADRENO_REG_CP_RB_RPTR,
ADRENO_REG_CP_RB_WPTR,
ADRENO_REG_CP_CNTL,
@@ -709,17 +752,12 @@ struct adreno_gpudev {
void (*pwrlevel_change_settings)(struct adreno_device *,
unsigned int prelevel, unsigned int postlevel,
bool post);
- int (*preemption_pre_ibsubmit)(struct adreno_device *,
- struct adreno_ringbuffer *, unsigned int *,
- struct kgsl_context *, uint64_t cond_addr,
- struct kgsl_memobj_node *);
+ unsigned int (*preemption_pre_ibsubmit)(struct adreno_device *,
+ struct adreno_ringbuffer *rb,
+ unsigned int *, struct kgsl_context *);
int (*preemption_yield_enable)(unsigned int *);
- int (*preemption_post_ibsubmit)(struct adreno_device *,
- struct adreno_ringbuffer *, unsigned int *,
- struct kgsl_context *);
- int (*preemption_token)(struct adreno_device *,
- struct adreno_ringbuffer *, unsigned int *,
- uint64_t gpuaddr);
+ unsigned int (*preemption_post_ibsubmit)(struct adreno_device *,
+ unsigned int *);
int (*preemption_init)(struct adreno_device *);
void (*preemption_schedule)(struct adreno_device *);
void (*enable_64bit)(struct adreno_device *);
@@ -1260,34 +1298,32 @@ static inline int adreno_bootstrap_ucode(struct adreno_device *adreno_dev)
}
/**
- * adreno_preempt_state() - Check if preemption state is equal to given state
+ * adreno_in_preempt_state() - Check if preemption state is equal to given state
* @adreno_dev: Device whose preemption state is checked
* @state: State to compare against
*/
-static inline unsigned int adreno_preempt_state(
- struct adreno_device *adreno_dev,
- enum adreno_dispatcher_preempt_states state)
+static inline bool adreno_in_preempt_state(struct adreno_device *adreno_dev,
+ enum adreno_preempt_states state)
{
- return atomic_read(&adreno_dev->dispatcher.preemption_state) ==
- state;
+ return atomic_read(&adreno_dev->preempt.state) == state;
}
-
/**
- * adreno_get_rptr() - Get the current ringbuffer read pointer
- * @rb: Pointer the ringbuffer to query
- *
- * Get the current read pointer from the GPU register.
+ * adreno_set_preempt_state() - Set the specified preemption state
+ * @adreno_dev: Device to change preemption state
+ * @state: State to set
*/
-static inline unsigned int
-adreno_get_rptr(struct adreno_ringbuffer *rb)
+static inline void adreno_set_preempt_state(struct adreno_device *adreno_dev,
+ enum adreno_preempt_states state)
{
- struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
- if (adreno_dev->cur_rb == rb &&
- adreno_preempt_state(adreno_dev,
- ADRENO_DISPATCHER_PREEMPT_CLEAR))
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR, &(rb->rptr));
+ /*
+ * atomic_set doesn't use barriers, so we need to do it ourselves. One
+ * before...
+ */
+ smp_wmb();
+ atomic_set(&adreno_dev->preempt.state, state);
- return rb->rptr;
+ /* ... and one after */
+ smp_wmb();
}
static inline bool adreno_is_preemption_enabled(
@@ -1295,7 +1331,6 @@ static inline bool adreno_is_preemption_enabled(
{
return test_bit(ADRENO_DEVICE_PREEMPTION, &adreno_dev->priv);
}
-
/**
* adreno_ctx_get_rb() - Return the ringbuffer that a context should
* use based on priority
@@ -1332,25 +1367,6 @@ static inline struct adreno_ringbuffer *adreno_ctx_get_rb(
return &(adreno_dev->ringbuffers[
adreno_dev->num_ringbuffers - 1]);
}
-/*
- * adreno_set_active_ctxs_null() - Put back reference to any active context
- * and set the active context to NULL
- * @adreno_dev: The adreno device
- */
-static inline void adreno_set_active_ctxs_null(struct adreno_device *adreno_dev)
-{
- int i;
- struct adreno_ringbuffer *rb;
- FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- if (rb->drawctxt_active)
- kgsl_context_put(&(rb->drawctxt_active->base));
- rb->drawctxt_active = NULL;
- kgsl_sharedmem_writel(KGSL_DEVICE(adreno_dev),
- &rb->pagetable_desc,
- offsetof(struct adreno_ringbuffer_pagetable_info,
- current_rb_ptname), 0);
- }
-}
/*
* adreno_compare_prio_level() - Compares 2 priority levels based on enum values
@@ -1371,6 +1387,13 @@ void adreno_readreg64(struct adreno_device *adreno_dev,
void adreno_writereg64(struct adreno_device *adreno_dev,
enum adreno_regs lo, enum adreno_regs hi, uint64_t val);
+unsigned int adreno_get_rptr(struct adreno_ringbuffer *rb);
+
+static inline bool adreno_rb_empty(struct adreno_ringbuffer *rb)
+{
+ return (adreno_get_rptr(rb) == rb->wptr);
+}
+
static inline bool adreno_soft_fault_detect(struct adreno_device *adreno_dev)
{
return adreno_dev->fast_hang_detect &&
@@ -1400,4 +1423,36 @@ static inline bool adreno_support_64bit(struct adreno_device *adreno_dev)
}
#endif /*BITS_PER_LONG*/
+static inline void adreno_ringbuffer_set_global(
+ struct adreno_device *adreno_dev, int name)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ kgsl_sharedmem_writel(device,
+ &adreno_dev->ringbuffers[0].pagetable_desc,
+ PT_INFO_OFFSET(current_global_ptname), name);
+}
+
+static inline void adreno_ringbuffer_set_pagetable(struct adreno_ringbuffer *rb,
+ struct kgsl_pagetable *pt)
+{
+ struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned long flags;
+
+ spin_lock_irqsave(&rb->preempt_lock, flags);
+
+ kgsl_sharedmem_writel(device, &rb->pagetable_desc,
+ PT_INFO_OFFSET(current_rb_ptname), pt->name);
+
+ kgsl_sharedmem_writeq(device, &rb->pagetable_desc,
+ PT_INFO_OFFSET(ttbr0), kgsl_mmu_pagetable_get_ttbr0(pt));
+
+ kgsl_sharedmem_writel(device, &rb->pagetable_desc,
+ PT_INFO_OFFSET(contextidr),
+ kgsl_mmu_pagetable_get_contextidr(pt));
+
+ spin_unlock_irqrestore(&rb->preempt_lock, flags);
+}
+
#endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c
index ea8b75f4c83b..2accbe5c5764 100644
--- a/drivers/gpu/msm/adreno_a3xx.c
+++ b/drivers/gpu/msm/adreno_a3xx.c
@@ -1756,9 +1756,9 @@ static int _ringbuffer_bootstrap_ucode(struct adreno_device *adreno_dev,
*cmds++ = cp_type3_packet(CP_INTERRUPT, 1);
*cmds++ = 0;
- rb->wptr = rb->wptr - 2;
+ rb->_wptr = rb->_wptr - 2;
adreno_ringbuffer_submit(rb, NULL);
- rb->wptr = rb->wptr + 2;
+ rb->_wptr = rb->_wptr + 2;
} else {
for (i = pfp_idx; i < adreno_dev->pfp_fw_size; i++)
*cmds++ = adreno_dev->pfp_fw[i];
diff --git a/drivers/gpu/msm/adreno_a4xx.c b/drivers/gpu/msm/adreno_a4xx.c
index b1196da0cee1..b15d23cfbe0a 100644
--- a/drivers/gpu/msm/adreno_a4xx.c
+++ b/drivers/gpu/msm/adreno_a4xx.c
@@ -178,111 +178,6 @@ static const struct adreno_vbif_platform a4xx_vbif_platforms[] = {
{ adreno_is_a418, a430_vbif },
};
-/* a4xx_preemption_start() - Setup state to start preemption */
-static void a4xx_preemption_start(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- uint32_t val;
-
- /*
- * Setup scratch registers from which the GPU will program the
- * registers required to start execution of new ringbuffer
- * set ringbuffer address
- */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG8,
- rb->buffer_desc.gpuaddr);
- kgsl_regread(device, A4XX_CP_RB_CNTL, &val);
- /* scratch REG9 corresponds to CP_RB_CNTL register */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG9, val);
- /* scratch REG10 corresponds to rptr address */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG10, 0);
- /* scratch REG11 corresponds to rptr */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG11, rb->rptr);
- /* scratch REG12 corresponds to wptr */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG12, rb->wptr);
- /*
- * scratch REG13 corresponds to IB1_BASE,
- * 0 since we do not do switches in between IB's
- */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG13, 0);
- /* scratch REG14 corresponds to IB1_BUFSZ */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG14, 0);
- /* scratch REG15 corresponds to IB2_BASE */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG15, 0);
- /* scratch REG16 corresponds to IB2_BUFSZ */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG16, 0);
- /* scratch REG17 corresponds to GPR11 */
- kgsl_regwrite(device, A4XX_CP_SCRATCH_REG17, rb->gpr11);
-}
-
-/* a4xx_preemption_save() - Save the state after preemption is done */
-static void a4xx_preemption_save(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- kgsl_regread(device, A4XX_CP_SCRATCH_REG18, &rb->rptr);
- kgsl_regread(device, A4XX_CP_SCRATCH_REG23, &rb->gpr11);
-}
-
-static int a4xx_preemption_token(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb, unsigned int *cmds,
- uint64_t gpuaddr)
-{
- unsigned int *cmds_orig = cmds;
-
- /* Turn on preemption flag */
- /* preemption token - fill when pt switch command size is known */
- *cmds++ = cp_type3_packet(CP_PREEMPT_TOKEN, 3);
- *cmds++ = (uint)gpuaddr;
- *cmds++ = 1;
- /* generate interrupt on preemption completion */
- *cmds++ = 1 << CP_PREEMPT_ORDINAL_INTERRUPT;
-
- return cmds - cmds_orig;
-
-}
-
-static int a4xx_preemption_pre_ibsubmit(
- struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb, unsigned int *cmds,
- struct kgsl_context *context, uint64_t cond_addr,
- struct kgsl_memobj_node *ib)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- unsigned int *cmds_orig = cmds;
- int exec_ib = 0;
-
- cmds += a4xx_preemption_token(adreno_dev, rb, cmds,
- device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(context->id, preempted));
-
- if (ib)
- exec_ib = 1;
-
- *cmds++ = cp_type3_packet(CP_COND_EXEC, 4);
- *cmds++ = cond_addr;
- *cmds++ = cond_addr;
- *cmds++ = 1;
- *cmds++ = 7 + exec_ib * 3;
- if (exec_ib) {
- *cmds++ = cp_type3_packet(CP_INDIRECT_BUFFER_PFE, 2);
- *cmds++ = ib->gpuaddr;
- *cmds++ = (unsigned int) ib->size >> 2;
- }
- /* clear preemption flag */
- *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
- *cmds++ = cond_addr;
- *cmds++ = 0;
- *cmds++ = cp_type3_packet(CP_WAIT_MEM_WRITES, 1);
- *cmds++ = 0;
- *cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
- *cmds++ = 0;
-
- return cmds - cmds_orig;
-}
-
/*
* a4xx_is_sptp_idle() - A430 SP/TP should be off to be considered idle
* @adreno_dev: The adreno device pointer
@@ -723,6 +618,8 @@ static void a4xx_start(struct adreno_device *adreno_dev)
gpudev->vbif_xin_halt_ctrl0_mask =
A405_VBIF_XIN_HALT_CTRL0_MASK;
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+
a4xx_protect_init(adreno_dev);
}
@@ -839,6 +736,7 @@ static unsigned int a4xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A4XX_CP_WFI_PEND_CTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A4XX_CP_RB_BASE),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, ADRENO_REG_SKIP),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO, A4XX_CP_RB_RPTR_ADDR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A4XX_CP_RB_RPTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A4XX_CP_RB_WPTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A4XX_CP_CNTL),
@@ -1634,8 +1532,15 @@ static int a4xx_rb_start(struct adreno_device *adreno_dev,
unsigned int start_type)
{
struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
+ struct kgsl_device *device = &adreno_dev->dev;
+ uint64_t addr;
int ret;
+ addr = SCRATCH_RPTR_GPU_ADDR(device, rb->id);
+
+ adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR_LO,
+ ADRENO_REG_CP_RB_RPTR_ADDR_HI, addr);
+
/*
* The size of the ringbuffer in the hardware is the log2
* representation of the size in quadwords (sizedwords / 2).
@@ -1644,8 +1549,8 @@ static int a4xx_rb_start(struct adreno_device *adreno_dev,
*/
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
- (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F) |
- (1 << 27));
+ ((ilog2(4) << 8) & 0x1F00) |
+ (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F));
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
rb->buffer_desc.gpuaddr);
@@ -1755,6 +1660,19 @@ static struct adreno_coresight a4xx_coresight = {
.groups = a4xx_coresight_groups,
};
+static void a4xx_preempt_callback(struct adreno_device *adreno_dev, int bit)
+{
+ if (atomic_read(&adreno_dev->preempt.state) != ADRENO_PREEMPT_TRIGGERED)
+ return;
+
+ trace_adreno_hw_preempt_trig_to_comp_int(adreno_dev->cur_rb,
+ adreno_dev->next_rb,
+ adreno_get_rptr(adreno_dev->cur_rb),
+ adreno_get_rptr(adreno_dev->next_rb));
+
+ adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
+}
+
#define A4XX_INT_MASK \
((1 << A4XX_INT_RBBM_AHB_ERROR) | \
(1 << A4XX_INT_RBBM_REG_TIMEOUT) | \
@@ -1792,7 +1710,7 @@ static struct adreno_irq_funcs a4xx_irq_funcs[32] = {
/* 6 - RBBM_ATB_ASYNC_OVERFLOW */
ADRENO_IRQ_CALLBACK(a4xx_err_callback),
ADRENO_IRQ_CALLBACK(NULL), /* 7 - RBBM_GPC_ERR */
- ADRENO_IRQ_CALLBACK(adreno_dispatcher_preempt_callback), /* 8 - CP_SW */
+ ADRENO_IRQ_CALLBACK(a4xx_preempt_callback), /* 8 - CP_SW */
ADRENO_IRQ_CALLBACK(a4xx_err_callback), /* 9 - CP_OPCODE_ERROR */
/* 10 - CP_RESERVED_BIT_ERROR */
ADRENO_IRQ_CALLBACK(a4xx_err_callback),
@@ -1833,433 +1751,6 @@ static struct adreno_snapshot_data a4xx_snapshot_data = {
.sect_sizes = &a4xx_snap_sizes,
};
-#define ADRENO_RB_PREEMPT_TOKEN_DWORDS 125
-
-static int a4xx_submit_preempt_token(struct adreno_ringbuffer *rb,
- struct adreno_ringbuffer *incoming_rb)
-{
- struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- unsigned int *ringcmds, *start;
- struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
- int ptname;
- struct kgsl_pagetable *pt;
- int pt_switch_sizedwords = 0, total_sizedwords = 20;
- unsigned link[ADRENO_RB_PREEMPT_TOKEN_DWORDS];
- uint i;
-
- if (incoming_rb->preempted_midway) {
-
- kgsl_sharedmem_readl(&incoming_rb->pagetable_desc,
- &ptname, offsetof(
- struct adreno_ringbuffer_pagetable_info,
- current_rb_ptname));
- pt = kgsl_mmu_get_pt_from_ptname(&(device->mmu),
- ptname);
- /*
- * always expect a valid pt, else pt refcounting is
- * messed up or current pt tracking has a bug which
- * could lead to eventual disaster
- */
- BUG_ON(!pt);
- /* set the ringbuffer for incoming RB */
- pt_switch_sizedwords =
- adreno_iommu_set_pt_generate_cmds(incoming_rb,
- &link[0], pt);
- total_sizedwords += pt_switch_sizedwords;
- }
-
- /*
- * Allocate total_sizedwords space in RB, this is the max space
- * required.
- */
- ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
-
- if (IS_ERR(ringcmds))
- return PTR_ERR(ringcmds);
-
- start = ringcmds;
-
- *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
- *ringcmds++ = 0;
-
- if (incoming_rb->preempted_midway) {
- for (i = 0; i < pt_switch_sizedwords; i++)
- *ringcmds++ = link[i];
- }
-
- *ringcmds++ = cp_register(adreno_dev, adreno_getreg(adreno_dev,
- ADRENO_REG_CP_PREEMPT_DISABLE), 1);
- *ringcmds++ = 0;
-
- *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
- *ringcmds++ = 1;
-
- ringcmds += gpudev->preemption_token(adreno_dev, rb, ringcmds,
- device->memstore.gpuaddr +
- KGSL_MEMSTORE_RB_OFFSET(rb, preempted));
-
- if ((uint)(ringcmds - start) > total_sizedwords) {
- KGSL_DRV_ERR(device, "Insufficient rb size allocated\n");
- BUG();
- }
-
- /*
- * If we have commands less than the space reserved in RB
- * adjust the wptr accordingly
- */
- rb->wptr = rb->wptr - (total_sizedwords - (uint)(ringcmds - start));
-
- /* submit just the preempt token */
- mb();
- kgsl_pwrscale_busy(device);
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->wptr);
- return 0;
-}
-
-/**
- * a4xx_preempt_trig_state() - Schedule preemption in TRIGGERRED
- * state
- * @adreno_dev: Device which is in TRIGGERRED state
- */
-static void a4xx_preempt_trig_state(
- struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- unsigned int rbbase, val;
-
- /*
- * Hardware not yet idle means that preemption interrupt
- * may still occur, nothing to do here until interrupt signals
- * completion of preemption, just return here
- */
- if (!adreno_hw_isidle(adreno_dev))
- return;
-
- /*
- * We just changed states, reschedule dispatcher to change
- * preemption states
- */
- if (ADRENO_DISPATCHER_PREEMPT_TRIGGERED !=
- atomic_read(&dispatcher->preemption_state)) {
- adreno_dispatcher_schedule(device);
- return;
- }
-
- /*
- * H/W is idle and we did not get a preemption interrupt, may
- * be device went idle w/o encountering any preempt token or
- * we already preempted w/o interrupt
- */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_BASE, &rbbase);
- /* Did preemption occur, if so then change states and return */
- if (rbbase != adreno_dev->cur_rb->buffer_desc.gpuaddr) {
- adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT_DEBUG, &val);
- if (val && rbbase == adreno_dev->next_rb->buffer_desc.gpuaddr) {
- KGSL_DRV_INFO(device,
- "Preemption completed without interrupt\n");
- trace_adreno_hw_preempt_trig_to_comp(adreno_dev->cur_rb,
- adreno_dev->next_rb);
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_COMPLETE);
- adreno_dispatcher_schedule(device);
- return;
- }
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
- /* reschedule dispatcher to take care of the fault */
- adreno_dispatcher_schedule(device);
- return;
- }
- /*
- * Check if preempt token was submitted after preemption trigger, if so
- * then preemption should have occurred, since device is already idle it
- * means something went wrong - trigger FT
- */
- if (dispatcher->preempt_token_submit) {
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
- /* reschedule dispatcher to take care of the fault */
- adreno_dispatcher_schedule(device);
- return;
- }
- /*
- * Preempt token was not submitted after preemption trigger so device
- * may have gone idle before preemption could occur, if there are
- * commands that got submitted to current RB after triggering preemption
- * then submit them as those commands may have a preempt token in them
- */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
- &adreno_dev->cur_rb->rptr);
- if (adreno_dev->cur_rb->rptr != adreno_dev->cur_rb->wptr) {
- /*
- * Memory barrier before informing the
- * hardware of new commands
- */
- mb();
- kgsl_pwrscale_busy(device);
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
- adreno_dev->cur_rb->wptr);
- return;
- }
-
- /* Submit preempt token to make preemption happen */
- if (adreno_drawctxt_switch(adreno_dev, adreno_dev->cur_rb, NULL, 0))
- BUG();
- if (a4xx_submit_preempt_token(adreno_dev->cur_rb,
- adreno_dev->next_rb))
- BUG();
- dispatcher->preempt_token_submit = 1;
- adreno_dev->cur_rb->wptr_preempt_end = adreno_dev->cur_rb->wptr;
- trace_adreno_hw_preempt_token_submit(adreno_dev->cur_rb,
- adreno_dev->next_rb);
-}
-
-/**
- * a4xx_preempt_clear_state() - Schedule preemption in
- * CLEAR state. Preemption can be issued in this state.
- * @adreno_dev: Device which is in CLEAR state
- */
-static void a4xx_preempt_clear_state(
- struct adreno_device *adreno_dev)
-
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct adreno_dispatcher_cmdqueue *dispatch_tempq;
- struct kgsl_cmdbatch *cmdbatch;
- struct adreno_ringbuffer *highest_busy_rb;
- int switch_low_to_high;
- int ret;
-
- /* Device not awake means there is nothing to do */
- if (!kgsl_state_is_awake(device))
- return;
-
- /* keep updating the current rptr when preemption is clear */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
- &(adreno_dev->cur_rb->rptr));
-
- highest_busy_rb = adreno_dispatcher_get_highest_busy_rb(adreno_dev);
- if (!highest_busy_rb)
- return;
-
- switch_low_to_high = adreno_compare_prio_level(
- highest_busy_rb->id,
- adreno_dev->cur_rb->id);
-
- /* already current then return */
- if (!switch_low_to_high)
- return;
-
- if (switch_low_to_high < 0) {
- /*
- * if switching to lower priority make sure that the rptr and
- * wptr are equal, when the lower rb is not starved
- */
- if (adreno_dev->cur_rb->rptr != adreno_dev->cur_rb->wptr)
- return;
- /*
- * switch to default context because when we switch back
- * to higher context then its not known which pt will
- * be current, so by making it default here the next
- * commands submitted will set the right pt
- */
- ret = adreno_drawctxt_switch(adreno_dev,
- adreno_dev->cur_rb,
- NULL, 0);
- /*
- * lower priority RB has to wait until space opens up in
- * higher RB
- */
- if (ret)
- return;
-
- adreno_writereg(adreno_dev,
- ADRENO_REG_CP_PREEMPT_DISABLE, 1);
- }
-
- /*
- * setup registers to do the switch to highest priority RB
- * which is not empty or may be starving away(poor thing)
- */
- a4xx_preemption_start(adreno_dev, highest_busy_rb);
-
- /* turn on IOMMU as the preemption may trigger pt switch */
- kgsl_mmu_enable_clk(&device->mmu);
-
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_TRIGGERED);
-
- adreno_dev->next_rb = highest_busy_rb;
- mod_timer(&dispatcher->preempt_timer, jiffies +
- msecs_to_jiffies(ADRENO_DISPATCH_PREEMPT_TIMEOUT));
-
- trace_adreno_hw_preempt_clear_to_trig(adreno_dev->cur_rb,
- adreno_dev->next_rb);
- /* issue PREEMPT trigger */
- adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT, 1);
- /*
- * IOMMU clock can be safely switched off after the timestamp
- * of the first command in the new rb
- */
- dispatch_tempq = &adreno_dev->next_rb->dispatch_q;
- if (dispatch_tempq->head != dispatch_tempq->tail)
- cmdbatch = dispatch_tempq->cmd_q[dispatch_tempq->head];
- else
- cmdbatch = NULL;
- if (cmdbatch)
- adreno_ringbuffer_mmu_disable_clk_on_ts(device,
- adreno_dev->next_rb,
- cmdbatch->global_ts);
- else
- adreno_ringbuffer_mmu_disable_clk_on_ts(device,
- adreno_dev->next_rb, adreno_dev->next_rb->timestamp);
- /* submit preempt token packet to ensure preemption */
- if (switch_low_to_high < 0) {
- ret = a4xx_submit_preempt_token(
- adreno_dev->cur_rb, adreno_dev->next_rb);
- /*
- * unexpected since we are submitting this when rptr = wptr,
- * this was checked above already
- */
- BUG_ON(ret);
- dispatcher->preempt_token_submit = 1;
- adreno_dev->cur_rb->wptr_preempt_end = adreno_dev->cur_rb->wptr;
- } else {
- dispatcher->preempt_token_submit = 0;
- adreno_dispatcher_schedule(device);
- adreno_dev->cur_rb->wptr_preempt_end = 0xFFFFFFFF;
- }
-}
-
-/**
- * a4xx_preempt_complete_state() - Schedule preemption in
- * COMPLETE state
- * @adreno_dev: Device which is in COMPLETE state
- */
-static void a4xx_preempt_complete_state(
- struct adreno_device *adreno_dev)
-
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct adreno_dispatcher_cmdqueue *dispatch_q;
- unsigned int wptr, rbbase;
- unsigned int val, val1;
-
- del_timer_sync(&dispatcher->preempt_timer);
-
- adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &val);
- adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT_DEBUG, &val1);
-
- if (val || !val1) {
- KGSL_DRV_ERR(device,
- "Invalid state after preemption CP_PREEMPT: %08x, CP_PREEMPT_DEBUG: %08x\n",
- val, val1);
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
- adreno_dispatcher_schedule(device);
- return;
- }
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_BASE, &rbbase);
- if (rbbase != adreno_dev->next_rb->buffer_desc.gpuaddr) {
- KGSL_DRV_ERR(device,
- "RBBASE incorrect after preemption, expected %x got %016llx\b",
- rbbase,
- adreno_dev->next_rb->buffer_desc.gpuaddr);
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
- adreno_dispatcher_schedule(device);
- return;
- }
-
- a4xx_preemption_save(adreno_dev, adreno_dev->cur_rb);
-
- dispatch_q = &(adreno_dev->cur_rb->dispatch_q);
- /* new RB is the current RB */
- trace_adreno_hw_preempt_comp_to_clear(adreno_dev->next_rb,
- adreno_dev->cur_rb);
- adreno_dev->prev_rb = adreno_dev->cur_rb;
- adreno_dev->cur_rb = adreno_dev->next_rb;
- adreno_dev->cur_rb->preempted_midway = 0;
- adreno_dev->cur_rb->wptr_preempt_end = 0xFFFFFFFF;
- adreno_dev->next_rb = NULL;
- if (adreno_disp_preempt_fair_sched) {
- /* starved rb is now scheduled so unhalt dispatcher */
- if (ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED ==
- adreno_dev->cur_rb->starve_timer_state)
- adreno_put_gpu_halt(adreno_dev);
- adreno_dev->cur_rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_SCHEDULED;
- adreno_dev->cur_rb->sched_timer = jiffies;
- /*
- * If the outgoing RB is has commands then set the
- * busy time for it
- */
- if (adreno_dev->prev_rb->rptr != adreno_dev->prev_rb->wptr) {
- adreno_dev->prev_rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT;
- adreno_dev->prev_rb->sched_timer = jiffies;
- } else {
- adreno_dev->prev_rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
- }
- }
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_CLEAR);
- if (adreno_compare_prio_level(adreno_dev->prev_rb->id,
- adreno_dev->cur_rb->id) < 0) {
- if (adreno_dev->prev_rb->wptr_preempt_end !=
- adreno_dev->prev_rb->rptr)
- adreno_dev->prev_rb->preempted_midway = 1;
- } else if (adreno_dev->prev_rb->wptr_preempt_end !=
- adreno_dev->prev_rb->rptr) {
- BUG();
- }
- /* submit wptr if required for new rb */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
- if (adreno_dev->cur_rb->wptr != wptr) {
- kgsl_pwrscale_busy(device);
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
- adreno_dev->cur_rb->wptr);
- }
- /* clear preemption register */
- adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT_DEBUG, 0);
- adreno_preempt_process_dispatch_queue(adreno_dev, dispatch_q);
-}
-
-static void a4xx_preemption_schedule(
- struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- if (!adreno_is_preemption_enabled(adreno_dev))
- return;
-
- mutex_lock(&device->mutex);
-
- switch (atomic_read(&dispatcher->preemption_state)) {
- case ADRENO_DISPATCHER_PREEMPT_CLEAR:
- a4xx_preempt_clear_state(adreno_dev);
- break;
- case ADRENO_DISPATCHER_PREEMPT_TRIGGERED:
- a4xx_preempt_trig_state(adreno_dev);
- /*
- * if we transitioned to next state then fall-through
- * processing to next state
- */
- if (!adreno_preempt_state(adreno_dev,
- ADRENO_DISPATCHER_PREEMPT_COMPLETE))
- break;
- case ADRENO_DISPATCHER_PREEMPT_COMPLETE:
- a4xx_preempt_complete_state(adreno_dev);
- break;
- default:
- BUG();
- }
-
- mutex_unlock(&device->mutex);
-}
-
struct adreno_gpudev adreno_a4xx_gpudev = {
.reg_offsets = &a4xx_reg_offsets,
.ft_perf_counters = a4xx_ft_perf_counters,
@@ -2284,6 +1775,6 @@ struct adreno_gpudev adreno_a4xx_gpudev = {
.regulator_enable = a4xx_regulator_enable,
.regulator_disable = a4xx_regulator_disable,
.preemption_pre_ibsubmit = a4xx_preemption_pre_ibsubmit,
- .preemption_token = a4xx_preemption_token,
.preemption_schedule = a4xx_preemption_schedule,
+ .preemption_init = a4xx_preemption_init,
};
diff --git a/drivers/gpu/msm/adreno_a4xx.h b/drivers/gpu/msm/adreno_a4xx.h
index e425dc8e9f7b..5dabc26fd34f 100644
--- a/drivers/gpu/msm/adreno_a4xx.h
+++ b/drivers/gpu/msm/adreno_a4xx.h
@@ -47,6 +47,15 @@
"RBBM_DPM_THERMAL_YELLOW_ERR" }, \
{ BIT(A4XX_INT_RBBM_DPM_THERMAL_RED_ERR), "RBBM_DPM_THERMAL_RED_ERR" }
+unsigned int a4xx_preemption_pre_ibsubmit(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb,
+ unsigned int *cmds,
+ struct kgsl_context *context);
+
+void a4xx_preemption_schedule(struct adreno_device *adreno_dev);
+
+int a4xx_preemption_init(struct adreno_device *adreno_dev);
+
void a4xx_snapshot(struct adreno_device *adreno_dev,
struct kgsl_snapshot *snapshot);
diff --git a/drivers/gpu/msm/adreno_a4xx_preempt.c b/drivers/gpu/msm/adreno_a4xx_preempt.c
new file mode 100644
index 000000000000..4087ac60c89e
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a4xx_preempt.c
@@ -0,0 +1,571 @@
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "adreno.h"
+#include "adreno_a4xx.h"
+#include "adreno_trace.h"
+#include "adreno_pm4types.h"
+
+#define ADRENO_RB_PREEMPT_TOKEN_DWORDS 125
+
+static void a4xx_preemption_timer(unsigned long data)
+{
+ struct adreno_device *adreno_dev = (struct adreno_device *) data;
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int cur_rptr = adreno_get_rptr(adreno_dev->cur_rb);
+ unsigned int next_rptr = adreno_get_rptr(adreno_dev->next_rb);
+
+ KGSL_DRV_ERR(device,
+ "Preemption timed out. cur_rb rptr/wptr %x/%x id %d, next_rb rptr/wptr %x/%x id %d, disp_state: %d\n",
+ cur_rptr, adreno_dev->cur_rb->wptr, adreno_dev->cur_rb->id,
+ next_rptr, adreno_dev->next_rb->wptr, adreno_dev->next_rb->id,
+ atomic_read(&adreno_dev->preempt.state));
+
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ adreno_dispatcher_schedule(device);
+}
+
+static unsigned int a4xx_preemption_token(struct adreno_device *adreno_dev,
+ unsigned int *cmds, uint64_t gpuaddr)
+{
+ unsigned int *cmds_orig = cmds;
+
+ /* Turn on preemption flag */
+ /* preemption token - fill when pt switch command size is known */
+ *cmds++ = cp_type3_packet(CP_PREEMPT_TOKEN, 3);
+ *cmds++ = (uint)gpuaddr;
+ *cmds++ = 1;
+ /* generate interrupt on preemption completion */
+ *cmds++ = 1 << CP_PREEMPT_ORDINAL_INTERRUPT;
+
+ return (unsigned int) (cmds - cmds_orig);
+}
+
+unsigned int a4xx_preemption_pre_ibsubmit(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb, unsigned int *cmds,
+ struct kgsl_context *context)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int *cmds_orig = cmds;
+ unsigned int cond_addr = device->memstore.gpuaddr +
+ MEMSTORE_ID_GPU_ADDR(device, context->id, preempted);
+
+ cmds += a4xx_preemption_token(adreno_dev, cmds, cond_addr);
+
+ *cmds++ = cp_type3_packet(CP_COND_EXEC, 4);
+ *cmds++ = cond_addr;
+ *cmds++ = cond_addr;
+ *cmds++ = 1;
+ *cmds++ = 7;
+
+ /* clear preemption flag */
+ *cmds++ = cp_type3_packet(CP_MEM_WRITE, 2);
+ *cmds++ = cond_addr;
+ *cmds++ = 0;
+ *cmds++ = cp_type3_packet(CP_WAIT_MEM_WRITES, 1);
+ *cmds++ = 0;
+ *cmds++ = cp_type3_packet(CP_WAIT_FOR_ME, 1);
+ *cmds++ = 0;
+
+ return (unsigned int) (cmds - cmds_orig);
+}
+
+
+static void a4xx_preemption_start(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ uint32_t val;
+
+ /*
+ * Setup scratch registers from which the GPU will program the
+ * registers required to start execution of new ringbuffer
+ * set ringbuffer address
+ */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG8,
+ rb->buffer_desc.gpuaddr);
+ kgsl_regread(device, A4XX_CP_RB_CNTL, &val);
+ /* scratch REG9 corresponds to CP_RB_CNTL register */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG9, val);
+ /* scratch REG10 corresponds to rptr address */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG10,
+ SCRATCH_RPTR_GPU_ADDR(device, rb->id));
+ /* scratch REG11 corresponds to rptr */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG11, adreno_get_rptr(rb));
+ /* scratch REG12 corresponds to wptr */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG12, rb->wptr);
+ /*
+ * scratch REG13 corresponds to IB1_BASE,
+ * 0 since we do not do switches in between IB's
+ */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG13, 0);
+ /* scratch REG14 corresponds to IB1_BUFSZ */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG14, 0);
+ /* scratch REG15 corresponds to IB2_BASE */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG15, 0);
+ /* scratch REG16 corresponds to IB2_BUFSZ */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG16, 0);
+ /* scratch REG17 corresponds to GPR11 */
+ kgsl_regwrite(device, A4XX_CP_SCRATCH_REG17, rb->gpr11);
+}
+
+static void a4xx_preemption_save(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ kgsl_regread(device, A4XX_CP_SCRATCH_REG23, &rb->gpr11);
+}
+
+
+static int a4xx_submit_preempt_token(struct adreno_ringbuffer *rb,
+ struct adreno_ringbuffer *incoming_rb)
+{
+ struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int *ringcmds, *start;
+ int ptname;
+ struct kgsl_pagetable *pt;
+ int pt_switch_sizedwords = 0, total_sizedwords = 20;
+ unsigned link[ADRENO_RB_PREEMPT_TOKEN_DWORDS];
+ uint i;
+
+ if (incoming_rb->preempted_midway) {
+
+ kgsl_sharedmem_readl(&incoming_rb->pagetable_desc,
+ &ptname, PT_INFO_OFFSET(current_rb_ptname));
+ pt = kgsl_mmu_get_pt_from_ptname(&(device->mmu),
+ ptname);
+ /* set the ringbuffer for incoming RB */
+ pt_switch_sizedwords =
+ adreno_iommu_set_pt_generate_cmds(incoming_rb,
+ &link[0], pt);
+ total_sizedwords += pt_switch_sizedwords;
+ }
+
+ /*
+ * Allocate total_sizedwords space in RB, this is the max space
+ * required.
+ */
+ ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
+
+ if (IS_ERR(ringcmds))
+ return PTR_ERR(ringcmds);
+
+ start = ringcmds;
+
+ *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
+ *ringcmds++ = 0;
+
+ if (incoming_rb->preempted_midway) {
+ for (i = 0; i < pt_switch_sizedwords; i++)
+ *ringcmds++ = link[i];
+ }
+
+ *ringcmds++ = cp_register(adreno_dev, adreno_getreg(adreno_dev,
+ ADRENO_REG_CP_PREEMPT_DISABLE), 1);
+ *ringcmds++ = 0;
+
+ *ringcmds++ = cp_packet(adreno_dev, CP_SET_PROTECTED_MODE, 1);
+ *ringcmds++ = 1;
+
+ ringcmds += a4xx_preemption_token(adreno_dev, ringcmds,
+ device->memstore.gpuaddr +
+ MEMSTORE_RB_OFFSET(rb, preempted));
+
+ if ((uint)(ringcmds - start) > total_sizedwords)
+ KGSL_DRV_ERR(device, "Insufficient rb size allocated\n");
+
+ /*
+ * If we have commands less than the space reserved in RB
+ * adjust the wptr accordingly
+ */
+ rb->wptr = rb->wptr - (total_sizedwords - (uint)(ringcmds - start));
+
+ /* submit just the preempt token */
+ mb();
+ kgsl_pwrscale_busy(device);
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->wptr);
+ return 0;
+}
+
+static void a4xx_preempt_trig_state(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int rbbase, val;
+ int ret;
+
+ /*
+ * Hardware not yet idle means that preemption interrupt
+ * may still occur, nothing to do here until interrupt signals
+ * completion of preemption, just return here
+ */
+ if (!adreno_hw_isidle(adreno_dev))
+ return;
+
+ /*
+ * We just changed states, reschedule dispatcher to change
+ * preemption states
+ */
+
+ if (atomic_read(&adreno_dev->preempt.state) !=
+ ADRENO_PREEMPT_TRIGGERED) {
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+
+ /*
+ * H/W is idle and we did not get a preemption interrupt, may
+ * be device went idle w/o encountering any preempt token or
+ * we already preempted w/o interrupt
+ */
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_BASE, &rbbase);
+ /* Did preemption occur, if so then change states and return */
+ if (rbbase != adreno_dev->cur_rb->buffer_desc.gpuaddr) {
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT_DEBUG, &val);
+ if (val && rbbase == adreno_dev->next_rb->buffer_desc.gpuaddr) {
+ KGSL_DRV_INFO(device,
+ "Preemption completed without interrupt\n");
+ trace_adreno_hw_preempt_trig_to_comp(adreno_dev->cur_rb,
+ adreno_dev->next_rb,
+ adreno_get_rptr(adreno_dev->cur_rb),
+ adreno_get_rptr(adreno_dev->next_rb));
+ adreno_set_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_COMPLETE);
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ /* reschedule dispatcher to take care of the fault */
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+ /*
+ * Check if preempt token was submitted after preemption trigger, if so
+ * then preemption should have occurred, since device is already idle it
+ * means something went wrong - trigger FT
+ */
+ if (adreno_dev->preempt.token_submit) {
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ /* reschedule dispatcher to take care of the fault */
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+ /*
+ * Preempt token was not submitted after preemption trigger so device
+ * may have gone idle before preemption could occur, if there are
+ * commands that got submitted to current RB after triggering preemption
+ * then submit them as those commands may have a preempt token in them
+ */
+ if (!adreno_rb_empty(adreno_dev->cur_rb)) {
+ /*
+ * Memory barrier before informing the
+ * hardware of new commands
+ */
+ mb();
+ kgsl_pwrscale_busy(device);
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
+ adreno_dev->cur_rb->wptr);
+ return;
+ }
+
+ /* Submit preempt token to make preemption happen */
+ ret = adreno_drawctxt_switch(adreno_dev, adreno_dev->cur_rb,
+ NULL, 0);
+ if (ret)
+ KGSL_DRV_ERR(device,
+ "Unable to switch context to NULL: %d\n", ret);
+
+ ret = a4xx_submit_preempt_token(adreno_dev->cur_rb,
+ adreno_dev->next_rb);
+ if (ret)
+ KGSL_DRV_ERR(device,
+ "Unable to submit preempt token: %d\n", ret);
+
+ adreno_dev->preempt.token_submit = true;
+ adreno_dev->cur_rb->wptr_preempt_end = adreno_dev->cur_rb->wptr;
+ trace_adreno_hw_preempt_token_submit(adreno_dev->cur_rb,
+ adreno_dev->next_rb,
+ adreno_get_rptr(adreno_dev->cur_rb),
+ adreno_get_rptr(adreno_dev->next_rb));
+}
+
+static struct adreno_ringbuffer *a4xx_next_ringbuffer(
+ struct adreno_device *adreno_dev)
+{
+ struct adreno_ringbuffer *rb, *next = NULL;
+ int i;
+
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ if (!adreno_rb_empty(rb) && next == NULL) {
+ next = rb;
+ continue;
+ }
+
+ if (!adreno_disp_preempt_fair_sched)
+ continue;
+
+ switch (rb->starve_timer_state) {
+ case ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT:
+ if (!adreno_rb_empty(rb) &&
+ adreno_dev->cur_rb != rb) {
+ rb->starve_timer_state =
+ ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT;
+ rb->sched_timer = jiffies;
+ }
+ break;
+ case ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT:
+ if (time_after(jiffies, rb->sched_timer +
+ msecs_to_jiffies(
+ adreno_dispatch_starvation_time))) {
+ rb->starve_timer_state =
+ ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED;
+ /* halt dispatcher to remove starvation */
+ adreno_get_gpu_halt(adreno_dev);
+ }
+ break;
+ case ADRENO_DISPATCHER_RB_STARVE_TIMER_SCHEDULED:
+ /*
+ * If the RB has not been running for the minimum
+ * time slice then allow it to run
+ */
+ if (!adreno_rb_empty(rb) && time_before(jiffies,
+ adreno_dev->cur_rb->sched_timer +
+ msecs_to_jiffies(adreno_dispatch_time_slice)))
+ next = rb;
+ else
+ rb->starve_timer_state =
+ ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
+ break;
+ case ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED:
+ default:
+ break;
+ }
+ }
+
+ return next;
+}
+
+static void a4xx_preempt_clear_state(struct adreno_device *adreno_dev)
+
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_ringbuffer *highest_busy_rb;
+ int switch_low_to_high;
+ int ret;
+
+ /* Device not awake means there is nothing to do */
+ if (!kgsl_state_is_awake(device))
+ return;
+
+ highest_busy_rb = a4xx_next_ringbuffer(adreno_dev);
+ if (!highest_busy_rb || highest_busy_rb == adreno_dev->cur_rb)
+ return;
+
+ switch_low_to_high = adreno_compare_prio_level(
+ highest_busy_rb->id,
+ adreno_dev->cur_rb->id);
+
+ if (switch_low_to_high < 0) {
+ /*
+ * if switching to lower priority make sure that the rptr and
+ * wptr are equal, when the lower rb is not starved
+ */
+ if (!adreno_rb_empty(adreno_dev->cur_rb))
+ return;
+ /*
+ * switch to default context because when we switch back
+ * to higher context then its not known which pt will
+ * be current, so by making it default here the next
+ * commands submitted will set the right pt
+ */
+ ret = adreno_drawctxt_switch(adreno_dev,
+ adreno_dev->cur_rb,
+ NULL, 0);
+ /*
+ * lower priority RB has to wait until space opens up in
+ * higher RB
+ */
+ if (ret) {
+ KGSL_DRV_ERR(device,
+ "Unable to switch context to NULL: %d",
+ ret);
+
+ return;
+ }
+
+ adreno_writereg(adreno_dev,
+ ADRENO_REG_CP_PREEMPT_DISABLE, 1);
+ }
+
+ /*
+ * setup registers to do the switch to highest priority RB
+ * which is not empty or may be starving away(poor thing)
+ */
+ a4xx_preemption_start(adreno_dev, highest_busy_rb);
+
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED);
+
+ adreno_dev->next_rb = highest_busy_rb;
+ mod_timer(&adreno_dev->preempt.timer, jiffies +
+ msecs_to_jiffies(ADRENO_PREEMPT_TIMEOUT));
+
+ trace_adreno_hw_preempt_clear_to_trig(adreno_dev->cur_rb,
+ adreno_dev->next_rb,
+ adreno_get_rptr(adreno_dev->cur_rb),
+ adreno_get_rptr(adreno_dev->next_rb));
+ /* issue PREEMPT trigger */
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT, 1);
+
+ /* submit preempt token packet to ensure preemption */
+ if (switch_low_to_high < 0) {
+ ret = a4xx_submit_preempt_token(
+ adreno_dev->cur_rb, adreno_dev->next_rb);
+ KGSL_DRV_ERR(device,
+ "Unable to submit preempt token: %d\n", ret);
+ adreno_dev->preempt.token_submit = true;
+ adreno_dev->cur_rb->wptr_preempt_end = adreno_dev->cur_rb->wptr;
+ } else {
+ adreno_dev->preempt.token_submit = false;
+ adreno_dispatcher_schedule(device);
+ adreno_dev->cur_rb->wptr_preempt_end = 0xFFFFFFFF;
+ }
+}
+
+static void a4xx_preempt_complete_state(struct adreno_device *adreno_dev)
+
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int wptr, rbbase;
+ unsigned int val, val1;
+ unsigned int prevrptr;
+
+ del_timer_sync(&adreno_dev->preempt.timer);
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &val);
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT_DEBUG, &val1);
+
+ if (val || !val1) {
+ KGSL_DRV_ERR(device,
+ "Invalid state after preemption CP_PREEMPT: %08x, CP_PREEMPT_DEBUG: %08x\n",
+ val, val1);
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_BASE, &rbbase);
+ if (rbbase != adreno_dev->next_rb->buffer_desc.gpuaddr) {
+ KGSL_DRV_ERR(device,
+ "RBBASE incorrect after preemption, expected %x got %016llx\b",
+ rbbase,
+ adreno_dev->next_rb->buffer_desc.gpuaddr);
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+
+ a4xx_preemption_save(adreno_dev, adreno_dev->cur_rb);
+
+ /* new RB is the current RB */
+ trace_adreno_hw_preempt_comp_to_clear(adreno_dev->next_rb,
+ adreno_dev->cur_rb,
+ adreno_get_rptr(adreno_dev->next_rb),
+ adreno_get_rptr(adreno_dev->cur_rb));
+
+ adreno_dev->prev_rb = adreno_dev->cur_rb;
+ adreno_dev->cur_rb = adreno_dev->next_rb;
+ adreno_dev->cur_rb->preempted_midway = 0;
+ adreno_dev->cur_rb->wptr_preempt_end = 0xFFFFFFFF;
+ adreno_dev->next_rb = NULL;
+
+ if (adreno_disp_preempt_fair_sched) {
+ /* starved rb is now scheduled so unhalt dispatcher */
+ if (ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED ==
+ adreno_dev->cur_rb->starve_timer_state)
+ adreno_put_gpu_halt(adreno_dev);
+ adreno_dev->cur_rb->starve_timer_state =
+ ADRENO_DISPATCHER_RB_STARVE_TIMER_SCHEDULED;
+ adreno_dev->cur_rb->sched_timer = jiffies;
+ /*
+ * If the outgoing RB is has commands then set the
+ * busy time for it
+ */
+ if (!adreno_rb_empty(adreno_dev->prev_rb)) {
+ adreno_dev->prev_rb->starve_timer_state =
+ ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT;
+ adreno_dev->prev_rb->sched_timer = jiffies;
+ } else {
+ adreno_dev->prev_rb->starve_timer_state =
+ ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
+ }
+ }
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+
+ prevrptr = adreno_get_rptr(adreno_dev->prev_rb);
+
+ if (adreno_compare_prio_level(adreno_dev->prev_rb->id,
+ adreno_dev->cur_rb->id) < 0) {
+ if (adreno_dev->prev_rb->wptr_preempt_end != prevrptr)
+ adreno_dev->prev_rb->preempted_midway = 1;
+ }
+
+ /* submit wptr if required for new rb */
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
+ if (adreno_dev->cur_rb->wptr != wptr) {
+ kgsl_pwrscale_busy(device);
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
+ adreno_dev->cur_rb->wptr);
+ }
+ /* clear preemption register */
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT_DEBUG, 0);
+}
+
+void a4xx_preemption_schedule(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ if (!adreno_is_preemption_enabled(adreno_dev))
+ return;
+
+ mutex_lock(&device->mutex);
+
+ switch (atomic_read(&adreno_dev->preempt.state)) {
+ case ADRENO_PREEMPT_NONE:
+ a4xx_preempt_clear_state(adreno_dev);
+ break;
+ case ADRENO_PREEMPT_TRIGGERED:
+ a4xx_preempt_trig_state(adreno_dev);
+ /*
+ * if we transitioned to next state then fall-through
+ * processing to next state
+ */
+ if (!adreno_in_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_COMPLETE))
+ break;
+ case ADRENO_PREEMPT_COMPLETE:
+ a4xx_preempt_complete_state(adreno_dev);
+ break;
+ default:
+ break;
+ }
+
+ mutex_unlock(&device->mutex);
+}
+
+int a4xx_preemption_init(struct adreno_device *adreno_dev)
+{
+ setup_timer(&adreno_dev->preempt.timer, a4xx_preemption_timer,
+ (unsigned long) adreno_dev);
+
+ return 0;
+}
diff --git a/drivers/gpu/msm/adreno_a4xx_snapshot.c b/drivers/gpu/msm/adreno_a4xx_snapshot.c
index b07e970aae32..6921af5c0ab5 100644
--- a/drivers/gpu/msm/adreno_a4xx_snapshot.c
+++ b/drivers/gpu/msm/adreno_a4xx_snapshot.c
@@ -534,9 +534,6 @@ void a4xx_snapshot(struct adreno_device *adreno_dev,
kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL, 0);
kgsl_regwrite(device, A4XX_RBBM_CLOCK_CTL2, 0);
- /* Turn on MMU clocks since we read MMU registers */
- kgsl_mmu_enable_clk(&device->mmu);
-
/* Master set of (non debug) registers */
SNAPSHOT_REGISTERS(device, snapshot, a4xx_registers);
@@ -554,8 +551,6 @@ void a4xx_snapshot(struct adreno_device *adreno_dev,
a4xx_vbif_snapshot_registers,
ARRAY_SIZE(a4xx_vbif_snapshot_registers));
- kgsl_mmu_disable_clk(&device->mmu);
-
kgsl_snapshot_indexed_registers(device, snapshot,
A4XX_CP_STATE_DEBUG_INDEX, A4XX_CP_STATE_DEBUG_DATA,
0, snap_data->sect_sizes->cp_pfp);
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 512dcd483f45..96f72c59e4cd 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -60,19 +60,12 @@ static const struct adreno_vbif_platform a5xx_vbif_platforms[] = {
{ adreno_is_a506, a530_vbif },
};
-#define PREEMPT_RECORD(_field) \
- offsetof(struct a5xx_cp_preemption_record, _field)
-
-#define PREEMPT_SMMU_RECORD(_field) \
- offsetof(struct a5xx_cp_smmu_info, _field)
-
static void a5xx_irq_storm_worker(struct work_struct *work);
static int _read_fw2_block_header(uint32_t *header, uint32_t id,
uint32_t major, uint32_t minor);
static void a5xx_gpmu_reset(struct work_struct *work);
static int a5xx_gpmu_init(struct adreno_device *adreno_dev);
-
/**
* Number of times to check if the regulator enabled before
* giving up and returning failure.
@@ -108,8 +101,9 @@ static void spin_idle_debug(struct kgsl_device *device,
kgsl_regread(device, A5XX_CP_HW_FAULT, &hwfault);
dev_err(device->dev,
- " rb=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X\n",
- rptr, wptr, status, status3, intstatus);
+ "rb=%d pos=%X/%X rbbm_status=%8.8X/%8.8X int_0_status=%8.8X\n",
+ adreno_dev->cur_rb->id, rptr, wptr, status, status3, intstatus);
+
dev_err(device->dev, " hwfault=%8.8X\n", hwfault);
kgsl_device_snapshot(device, NULL);
@@ -179,277 +173,6 @@ static void a5xx_check_features(struct adreno_device *adreno_dev)
adreno_efuse_unmap(adreno_dev);
}
-/*
- * a5xx_preemption_start() - Setup state to start preemption
- */
-static void a5xx_preemption_start(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
- uint64_t ttbr0;
- uint32_t contextidr;
- struct kgsl_pagetable *pt;
- bool switch_default_pt = true;
-
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(wptr), rb->wptr);
- kgsl_regwrite(device, A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
- lower_32_bits(rb->preemption_desc.gpuaddr));
- kgsl_regwrite(device, A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
- upper_32_bits(rb->preemption_desc.gpuaddr));
- kgsl_sharedmem_readq(&rb->pagetable_desc, &ttbr0,
- offsetof(struct adreno_ringbuffer_pagetable_info, ttbr0));
- kgsl_sharedmem_readl(&rb->pagetable_desc, &contextidr,
- offsetof(struct adreno_ringbuffer_pagetable_info, contextidr));
-
- spin_lock(&kgsl_driver.ptlock);
- list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
- if (kgsl_mmu_pagetable_get_ttbr0(pt) == ttbr0) {
- switch_default_pt = false;
- break;
- }
- }
- spin_unlock(&kgsl_driver.ptlock);
-
- if (switch_default_pt) {
- ttbr0 = kgsl_mmu_pagetable_get_ttbr0(
- device->mmu.defaultpagetable);
- contextidr = kgsl_mmu_pagetable_get_contextidr(
- device->mmu.defaultpagetable);
- }
-
- kgsl_sharedmem_writeq(device, &iommu->smmu_info,
- offsetof(struct a5xx_cp_smmu_info, ttbr0), ttbr0);
- kgsl_sharedmem_writel(device, &iommu->smmu_info,
- offsetof(struct a5xx_cp_smmu_info, context_idr), contextidr);
-}
-
-/*
- * a5xx_preemption_save() - Save the state after preemption is done
- */
-static void a5xx_preemption_save(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb)
-{
- /* save the rptr from ctxrecord here */
- kgsl_sharedmem_readl(&rb->preemption_desc, &rb->rptr,
- PREEMPT_RECORD(rptr));
-}
-
-#ifdef CONFIG_QCOM_KGSL_IOMMU
-static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
-
- /* Allocate mem for storing preemption smmu record */
- return kgsl_allocate_global(device, &iommu->smmu_info, PAGE_SIZE,
- KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED);
-}
-#else
-static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
-{
- return -ENODEV;
-}
-#endif
-
-static int a5xx_preemption_init(struct adreno_device *adreno_dev)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct adreno_ringbuffer *rb;
- int ret;
- unsigned int i;
- uint64_t addr;
-
- /* We are dependent on IOMMU to make preemption go on the CP side */
- if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU)
- return -ENODEV;
-
- /* Allocate mem for storing preemption counters */
- ret = kgsl_allocate_global(device, &adreno_dev->preemption_counters,
- adreno_dev->num_ringbuffers *
- A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0);
- if (ret)
- return ret;
-
- addr = adreno_dev->preemption_counters.gpuaddr;
-
- /* Allocate mem for storing preemption switch record */
- FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- ret = kgsl_allocate_global(device,
- &rb->preemption_desc, A5XX_CP_CTXRECORD_SIZE_IN_BYTES,
- 0, KGSL_MEMDESC_PRIVILEGED);
- if (ret)
- return ret;
-
- /* Initialize the context switch record here */
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(magic), A5XX_CP_CTXRECORD_MAGIC_REF);
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(info), 0);
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(data), 0);
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(cntl), 0x0800000C);
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(rptr), 0);
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(wptr), 0);
- kgsl_sharedmem_writeq(device, &rb->preemption_desc,
- PREEMPT_RECORD(rbase),
- adreno_dev->ringbuffers[i].buffer_desc.gpuaddr);
- kgsl_sharedmem_writeq(device, &rb->preemption_desc,
- PREEMPT_RECORD(counter), addr);
-
- addr += A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
- }
-
- return a5xx_preemption_iommu_init(adreno_dev);
-}
-
-/*
- * a5xx_preemption_token() - Preempt token on a5xx
- * PM4 commands for preempt token on a5xx. These commands are
- * submitted to ringbuffer to trigger preemption.
- */
-static int a5xx_preemption_token(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb, unsigned int *cmds,
- uint64_t gpuaddr)
-{
- unsigned int *cmds_orig = cmds;
-
- *cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
- cmds += cp_gpuaddr(adreno_dev, cmds, gpuaddr);
- *cmds++ = 1;
- /* generate interrupt on preemption completion */
- *cmds++ = 1;
-
- return cmds - cmds_orig;
-
-}
-
-/*
- * a5xx_preemption_pre_ibsubmit() - Below PM4 commands are
- * added at the beginning of every cmdbatch submission.
- */
-static int a5xx_preemption_pre_ibsubmit(
- struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb, unsigned int *cmds,
- struct kgsl_context *context, uint64_t cond_addr,
- struct kgsl_memobj_node *ib)
-{
- unsigned int *cmds_orig = cmds;
- uint64_t gpuaddr = rb->preemption_desc.gpuaddr;
- unsigned int preempt_style = 0;
-
- if (context) {
- /*
- * Preemption from secure to unsecure needs Zap shader to be
- * run to clear all secure content. CP does not know during
- * preemption if it is switching between secure and unsecure
- * contexts so restrict Secure contexts to be preempted at
- * ringbuffer level.
- */
- if (context->flags & KGSL_CONTEXT_SECURE)
- preempt_style = KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER;
- else
- preempt_style = ADRENO_PREEMPT_STYLE(context->flags);
- }
-
- /*
- * CP_PREEMPT_ENABLE_GLOBAL(global preemption) can only be set by KMD
- * in ringbuffer.
- * 1) set global preemption to 0x0 to disable global preemption.
- * Only RB level preemption is allowed in this mode
- * 2) Set global preemption to defer(0x2) for finegrain preemption.
- * when global preemption is set to defer(0x2),
- * CP_PREEMPT_ENABLE_LOCAL(local preemption) determines the
- * preemption point. Local preemption
- * can be enabled by both UMD(within IB) and KMD.
- */
- *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_GLOBAL, 1);
- *cmds++ = ((preempt_style == KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN)
- ? 2 : 0);
-
- /* Turn CP protection OFF */
- *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
- *cmds++ = 0;
-
- /*
- * CP during context switch will save context switch info to
- * a5xx_cp_preemption_record pointed by CONTEXT_SWITCH_SAVE_ADDR
- */
- *cmds++ = cp_type4_packet(A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 1);
- *cmds++ = lower_32_bits(gpuaddr);
- *cmds++ = cp_type4_packet(A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI, 1);
- *cmds++ = upper_32_bits(gpuaddr);
-
- /* Turn CP protection ON */
- *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
- *cmds++ = 1;
-
- /*
- * Enable local preemption for finegrain preemption in case of
- * a misbehaving IB
- */
- if (preempt_style == KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN) {
- *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_LOCAL, 1);
- *cmds++ = 1;
- } else {
- *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_LOCAL, 1);
- *cmds++ = 0;
- }
-
- /* Enable CP_CONTEXT_SWITCH_YIELD packets in the IB2s */
- *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1);
- *cmds++ = 2;
-
- return cmds - cmds_orig;
-}
-
-/*
- * a5xx_preemption_yield_enable() - Below PM4 commands are
- * added after every cmdbatch submission.
- */
-static int a5xx_preemption_yield_enable(unsigned int *cmds)
-{
- /*
- * SRM -- set render mode (ex binning, direct render etc)
- * SRM is set by UMD usually at start of IB to tell CP the type of
- * preemption.
- * KMD needs to set SRM to NULL to indicate CP that rendering is
- * done by IB.
- */
- *cmds++ = cp_type7_packet(CP_SET_RENDER_MODE, 5);
- *cmds++ = 0;
- *cmds++ = 0;
- *cmds++ = 0;
- *cmds++ = 0;
- *cmds++ = 0;
-
- *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1);
- *cmds++ = 1;
-
- return 8;
-}
-
-/*
- * a5xx_preemption_post_ibsubmit() - Below PM4 commands are
- * added after every cmdbatch submission.
- */
-static int a5xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
- struct adreno_ringbuffer *rb, unsigned int *cmds,
- struct kgsl_context *context)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- unsigned int ctx_id = context ? context->id : 0;
-
- return a5xx_preemption_token(adreno_dev, rb, cmds,
- device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(ctx_id, preempted));
-
-}
-
static void a5xx_platform_setup(struct adreno_device *adreno_dev)
{
uint64_t addr;
@@ -1972,12 +1695,8 @@ out:
static void a5xx_start(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
- unsigned int i, bit;
- struct adreno_ringbuffer *rb;
- uint64_t def_ttbr0;
- uint32_t contextidr;
+ unsigned int bit;
adreno_vbif_start(adreno_dev, a5xx_vbif_platforms,
ARRAY_SIZE(a5xx_vbif_platforms));
@@ -2178,58 +1897,21 @@ static void a5xx_start(struct adreno_device *adreno_dev)
}
- if (adreno_is_preemption_enabled(adreno_dev)) {
- struct kgsl_pagetable *pt = device->mmu.defaultpagetable;
-
- def_ttbr0 = kgsl_mmu_pagetable_get_ttbr0(pt);
- contextidr = kgsl_mmu_pagetable_get_contextidr(pt);
-
- /* Initialize the context switch record here */
- kgsl_sharedmem_writel(device, &iommu->smmu_info,
- PREEMPT_SMMU_RECORD(magic),
- A5XX_CP_SMMU_INFO_MAGIC_REF);
- kgsl_sharedmem_writeq(device, &iommu->smmu_info,
- PREEMPT_SMMU_RECORD(ttbr0), def_ttbr0);
- /*
- * The CP doesn't actually use the asid field, so
- * put a bad value into it until it is removed from
- * the preemption record.
- */
- kgsl_sharedmem_writeq(device, &iommu->smmu_info,
- PREEMPT_SMMU_RECORD(asid),
- 0xdecafbad);
- kgsl_sharedmem_writeq(device, &iommu->smmu_info,
- PREEMPT_SMMU_RECORD(context_idr),
- contextidr);
- adreno_writereg64(adreno_dev,
- ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
- ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
- iommu->smmu_info.gpuaddr);
-
- FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(rptr), 0);
- kgsl_sharedmem_writel(device, &rb->preemption_desc,
- PREEMPT_RECORD(wptr), 0);
- kgsl_sharedmem_writeq(device, &rb->pagetable_desc,
- offsetof(struct adreno_ringbuffer_pagetable_info,
- ttbr0), def_ttbr0);
- }
- }
-
+ a5xx_preemption_start(adreno_dev);
a5xx_protect_init(adreno_dev);
}
+/*
+ * Follow the ME_INIT sequence with a preemption yield to allow the GPU to move
+ * to a different ringbuffer, if desired
+ */
static int _preemption_init(
struct adreno_device *adreno_dev,
struct adreno_ringbuffer *rb, unsigned int *cmds,
struct kgsl_context *context)
{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
unsigned int *cmds_orig = cmds;
uint64_t gpuaddr = rb->preemption_desc.gpuaddr;
- uint64_t gpuaddr_token = device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(0, preempted);
/* Turn CP protection OFF */
*cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
@@ -2258,8 +1940,8 @@ static int _preemption_init(
*cmds++ = 1;
*cmds++ = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
- cmds += cp_gpuaddr(adreno_dev, cmds, gpuaddr_token);
- *cmds++ = 1;
+ cmds += cp_gpuaddr(adreno_dev, cmds, 0x0);
+ *cmds++ = 0;
/* generate interrupt on preemption completion */
*cmds++ = 1;
@@ -2297,7 +1979,7 @@ static int a5xx_post_start(struct adreno_device *adreno_dev)
if (adreno_is_preemption_enabled(adreno_dev))
cmds += _preemption_init(adreno_dev, rb, cmds, NULL);
- rb->wptr = rb->wptr - (42 - (cmds - start));
+ rb->_wptr = rb->_wptr - (42 - (cmds - start));
ret = adreno_ringbuffer_submit_spin(rb, NULL, 2000);
if (ret)
@@ -2595,8 +2277,15 @@ static int a5xx_rb_start(struct adreno_device *adreno_dev,
unsigned int start_type)
{
struct adreno_ringbuffer *rb = ADRENO_CURRENT_RINGBUFFER(adreno_dev);
+ struct kgsl_device *device = &adreno_dev->dev;
+ uint64_t addr;
int ret;
+ addr = SCRATCH_RPTR_GPU_ADDR(device, rb->id);
+
+ adreno_writereg64(adreno_dev, ADRENO_REG_CP_RB_RPTR_ADDR_LO,
+ ADRENO_REG_CP_RB_RPTR_ADDR_HI, addr);
+
/*
* The size of the ringbuffer in the hardware is the log2
* representation of the size in quadwords (sizedwords / 2).
@@ -2605,8 +2294,7 @@ static int a5xx_rb_start(struct adreno_device *adreno_dev,
*/
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_CNTL,
- (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F) |
- (1 << 27));
+ A5XX_CP_RB_CNTL_DEFAULT);
adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_BASE,
rb->buffer_desc.gpuaddr);
@@ -3147,6 +2835,10 @@ static unsigned int a5xx_register_offsets[ADRENO_REG_REGISTER_MAX] = {
ADRENO_REG_DEFINE(ADRENO_REG_CP_WFI_PEND_CTR, A5XX_CP_WFI_PEND_CTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE, A5XX_CP_RB_BASE),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_BASE_HI, A5XX_CP_RB_BASE_HI),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_LO,
+ A5XX_CP_RB_RPTR_ADDR_LO),
+ ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR_ADDR_HI,
+ A5XX_CP_RB_RPTR_ADDR_HI),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_RPTR, A5XX_CP_RB_RPTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_RB_WPTR, A5XX_CP_RB_WPTR),
ADRENO_REG_DEFINE(ADRENO_REG_CP_CNTL, A5XX_CP_CNTL),
@@ -3416,6 +3108,8 @@ static void a5xx_cp_callback(struct adreno_device *adreno_dev, int bit)
prev = cur;
}
+ a5xx_preemption_trigger(adreno_dev);
+
kgsl_schedule_work(&device->event_work);
adreno_dispatcher_schedule(device);
}
@@ -3500,9 +3194,6 @@ void a5x_gpc_err_int_callback(struct adreno_device *adreno_dev, int bit)
(1 << A5XX_INT_RBBM_ATB_ASYNC_OVERFLOW) | \
(1 << A5XX_INT_RBBM_GPC_ERROR) | \
(1 << A5XX_INT_CP_HW_ERROR) | \
- (1 << A5XX_INT_CP_IB1) | \
- (1 << A5XX_INT_CP_IB2) | \
- (1 << A5XX_INT_CP_RB) | \
(1 << A5XX_INT_CP_CACHE_FLUSH_TS) | \
(1 << A5XX_INT_RBBM_ATB_BUS_OVERFLOW) | \
(1 << A5XX_INT_UCHE_OOB_ACCESS) | \
@@ -3525,7 +3216,7 @@ static struct adreno_irq_funcs a5xx_irq_funcs[32] = {
/* 6 - RBBM_ATB_ASYNC_OVERFLOW */
ADRENO_IRQ_CALLBACK(a5xx_err_callback),
ADRENO_IRQ_CALLBACK(a5x_gpc_err_int_callback), /* 7 - GPC_ERR */
- ADRENO_IRQ_CALLBACK(adreno_dispatcher_preempt_callback),/* 8 - CP_SW */
+ ADRENO_IRQ_CALLBACK(a5xx_preempt_callback),/* 8 - CP_SW */
ADRENO_IRQ_CALLBACK(a5xx_cp_hw_err_callback), /* 9 - CP_HW_ERROR */
/* 10 - CP_CCU_FLUSH_DEPTH_TS */
ADRENO_IRQ_CALLBACK(NULL),
@@ -3533,9 +3224,9 @@ static struct adreno_irq_funcs a5xx_irq_funcs[32] = {
ADRENO_IRQ_CALLBACK(NULL),
/* 12 - CP_CCU_RESOLVE_TS */
ADRENO_IRQ_CALLBACK(NULL),
- ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 13 - CP_IB2_INT */
- ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 14 - CP_IB1_INT */
- ADRENO_IRQ_CALLBACK(adreno_cp_callback), /* 15 - CP_RB_INT */
+ ADRENO_IRQ_CALLBACK(NULL), /* 13 - CP_IB2_INT */
+ ADRENO_IRQ_CALLBACK(NULL), /* 14 - CP_IB1_INT */
+ ADRENO_IRQ_CALLBACK(NULL), /* 15 - CP_RB_INT */
/* 16 - CCP_UNUSED_1 */
ADRENO_IRQ_CALLBACK(NULL),
ADRENO_IRQ_CALLBACK(NULL), /* 17 - CP_RB_DONE_TS */
@@ -3772,323 +3463,6 @@ static struct adreno_coresight a5xx_coresight = {
.groups = a5xx_coresight_groups,
};
-/**
- * a5xx_preempt_trig_state() - Schedule preemption in TRIGGERRED
- * state
- * @adreno_dev: Device which is in TRIGGERRED state
- */
-static void a5xx_preempt_trig_state(
- struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- unsigned int preempt_busy;
- uint64_t rbbase;
-
- /*
- * triggered preemption, check for busy bits, if not set go to complete
- * bit 0: When high indicates CP is not done with preemption.
- * bit 4: When high indicates that the CP is actively switching between
- * application contexts.
- * Check both the bits to make sure CP is done with preemption.
- */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &preempt_busy);
- if (!(preempt_busy & 0x11)) {
-
- adreno_readreg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
- ADRENO_REG_CP_RB_BASE_HI, &rbbase);
- /* Did preemption occur, if so then change states and return */
- if (rbbase != adreno_dev->cur_rb->buffer_desc.gpuaddr) {
- if (rbbase ==
- adreno_dev->next_rb->buffer_desc.gpuaddr) {
- KGSL_DRV_INFO(device,
- "Preemption completed without interrupt\n");
- trace_adreno_hw_preempt_trig_to_comp(
- adreno_dev->cur_rb,
- adreno_dev->next_rb);
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_COMPLETE);
- } else {
- /*
- * Something wrong with preemption.
- * Set fault and reschedule dispatcher to take
- * care of fault.
- */
- adreno_set_gpu_fault(adreno_dev,
- ADRENO_PREEMPT_FAULT);
- }
- adreno_dispatcher_schedule(device);
- return;
- }
- }
-
- /*
- * Preemption is still happening.
- * Hardware not yet idle means that preemption interrupt
- * may still occur, nothing to do here until interrupt signals
- * completion of preemption, just return here
- */
- if (!adreno_hw_isidle(adreno_dev))
- return;
-
- /*
- * We just changed states, reschedule dispatcher to change
- * preemption states
- */
- if (ADRENO_DISPATCHER_PREEMPT_TRIGGERED !=
- atomic_read(&dispatcher->preemption_state)) {
- adreno_dispatcher_schedule(device);
- return;
- }
-
-
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
-
- /* reschedule dispatcher to take care of the fault */
- adreno_dispatcher_schedule(device);
-}
-
-/**
- * a5xx_preempt_clear_state() - Schedule preemption in CLEAR
- * state. Preemption can be issued in this state.
- * @adreno_dev: Device which is in CLEAR state
- */
-static void a5xx_preempt_clear_state(
- struct adreno_device *adreno_dev)
-
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct adreno_ringbuffer *highest_busy_rb;
- int switch_low_to_high;
- int ret;
-
- /* Device not awake means there is nothing to do */
- if (!kgsl_state_is_awake(device))
- return;
-
- /* keep updating the current rptr when preemption is clear */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
- &(adreno_dev->cur_rb->rptr));
-
- highest_busy_rb = adreno_dispatcher_get_highest_busy_rb(adreno_dev);
- if (!highest_busy_rb)
- return;
-
- switch_low_to_high = adreno_compare_prio_level(
- highest_busy_rb->id, adreno_dev->cur_rb->id);
-
- /* already current then return */
- if (!switch_low_to_high)
- return;
-
- if (switch_low_to_high < 0) {
-
- if (!adreno_hw_isidle(adreno_dev)) {
- adreno_dispatcher_schedule(device);
- return;
- }
-
- /*
- * if switching to lower priority make sure that the rptr and
- * wptr are equal, when the lower rb is not starved
- */
- if (adreno_dev->cur_rb->rptr != adreno_dev->cur_rb->wptr)
- return;
- /*
- * switch to default context because when we switch back
- * to higher context then its not known which pt will
- * be current, so by making it default here the next
- * commands submitted will set the right pt
- */
- ret = adreno_drawctxt_switch(adreno_dev,
- adreno_dev->cur_rb,
- NULL, 0);
- /*
- * lower priority RB has to wait until space opens up in
- * higher RB
- */
- if (ret)
- return;
- }
-
- /* rptr could be updated in drawctxt switch above, update it here */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
- &(adreno_dev->cur_rb->rptr));
-
- /* turn on IOMMU as the preemption may trigger pt switch */
- kgsl_mmu_enable_clk(&device->mmu);
-
- /*
- * setup memory to do the switch to highest priority RB
- * which is not empty or may be starving away(poor thing)
- */
- a5xx_preemption_start(adreno_dev, highest_busy_rb);
-
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_TRIGGERED);
-
- adreno_dev->next_rb = highest_busy_rb;
- mod_timer(&dispatcher->preempt_timer, jiffies +
- msecs_to_jiffies(ADRENO_DISPATCH_PREEMPT_TIMEOUT));
-
- trace_adreno_hw_preempt_clear_to_trig(adreno_dev->cur_rb,
- adreno_dev->next_rb);
- /* issue PREEMPT trigger */
- adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT, 1);
-
- adreno_dispatcher_schedule(device);
-}
-
-/**
- * a5xx_preempt_complete_state() - Schedule preemption in
- * COMPLETE state
- * @adreno_dev: Device which is in COMPLETE state
- */
-static void a5xx_preempt_complete_state(
- struct adreno_device *adreno_dev)
-
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct adreno_dispatcher_cmdqueue *dispatch_q;
- uint64_t rbbase;
- unsigned int wptr;
- unsigned int val;
- static unsigned long wait_for_preemption_complete;
-
- del_timer_sync(&dispatcher->preempt_timer);
-
- adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &val);
-
- if (val) {
- /*
- * Wait for 50ms for preemption state to be updated by CP
- * before triggering hang
- */
- if (wait_for_preemption_complete == 0)
- wait_for_preemption_complete = jiffies +
- msecs_to_jiffies(50);
- if (time_after(jiffies, wait_for_preemption_complete)) {
- wait_for_preemption_complete = 0;
- KGSL_DRV_ERR(device,
- "Invalid state after preemption CP_PREEMPT:%08x STOP:%1x BUSY:%1x\n",
- val, (val & 0x1), (val & 0x10)>>4);
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
- }
- adreno_dispatcher_schedule(device);
- return;
- }
-
- wait_for_preemption_complete = 0;
- adreno_readreg64(adreno_dev, ADRENO_REG_CP_RB_BASE,
- ADRENO_REG_CP_RB_BASE_HI, &rbbase);
- if (rbbase != adreno_dev->next_rb->buffer_desc.gpuaddr) {
- KGSL_DRV_ERR(device,
- "RBBASE incorrect after preemption, expected %016llx got %016llx\b",
- rbbase,
- adreno_dev->next_rb->buffer_desc.gpuaddr);
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
- adreno_dispatcher_schedule(device);
- return;
- }
-
- a5xx_preemption_save(adreno_dev, adreno_dev->cur_rb);
-
- dispatch_q = &(adreno_dev->cur_rb->dispatch_q);
- /* new RB is the current RB */
- trace_adreno_hw_preempt_comp_to_clear(adreno_dev->next_rb,
- adreno_dev->cur_rb);
- adreno_dev->prev_rb = adreno_dev->cur_rb;
- adreno_dev->cur_rb = adreno_dev->next_rb;
- adreno_dev->cur_rb->preempted_midway = 0;
- adreno_dev->cur_rb->wptr_preempt_end = 0xFFFFFFFF;
- adreno_dev->next_rb = NULL;
-
- if (adreno_disp_preempt_fair_sched) {
- /* starved rb is now scheduled so unhalt dispatcher */
- if (ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED ==
- adreno_dev->cur_rb->starve_timer_state)
- adreno_put_gpu_halt(adreno_dev);
- adreno_dev->cur_rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_SCHEDULED;
- adreno_dev->cur_rb->sched_timer = jiffies;
- /*
- * If the outgoing RB is has commands then set the
- * busy time for it
- */
- if (adreno_dev->prev_rb->rptr != adreno_dev->prev_rb->wptr) {
- adreno_dev->prev_rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT;
- adreno_dev->prev_rb->sched_timer = jiffies;
- } else {
- adreno_dev->prev_rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
- }
- }
- adreno_ringbuffer_mmu_disable_clk_on_ts(device, adreno_dev->cur_rb,
- adreno_dev->cur_rb->timestamp);
-
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_CLEAR);
-
- /* submit wptr if required for new rb */
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
- if (adreno_dev->cur_rb->wptr != wptr) {
- kgsl_pwrscale_busy(device);
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
- adreno_dev->cur_rb->wptr);
- }
-
- adreno_preempt_process_dispatch_queue(adreno_dev, dispatch_q);
-}
-
-static void a5xx_preemption_schedule(
- struct adreno_device *adreno_dev)
-{
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct adreno_ringbuffer *rb;
- int i = 0;
-
- if (!adreno_is_preemption_enabled(adreno_dev))
- return;
-
- mutex_lock(&device->mutex);
-
- /*
- * This barrier is needed for most updated preemption_state
- * to be read.
- */
- smp_mb();
-
- if (KGSL_STATE_ACTIVE == device->state)
- FOR_EACH_RINGBUFFER(adreno_dev, rb, i)
- rb->rptr = adreno_get_rptr(rb);
-
- switch (atomic_read(&dispatcher->preemption_state)) {
- case ADRENO_DISPATCHER_PREEMPT_CLEAR:
- a5xx_preempt_clear_state(adreno_dev);
- break;
- case ADRENO_DISPATCHER_PREEMPT_TRIGGERED:
- a5xx_preempt_trig_state(adreno_dev);
- /*
- * if we transitioned to next state then fall-through
- * processing to next state
- */
- if (!adreno_preempt_state(adreno_dev,
- ADRENO_DISPATCHER_PREEMPT_COMPLETE))
- break;
- case ADRENO_DISPATCHER_PREEMPT_COMPLETE:
- a5xx_preempt_complete_state(adreno_dev);
- break;
- default:
- BUG();
- }
-
- mutex_unlock(&device->mutex);
-}
-
struct adreno_gpudev adreno_a5xx_gpudev = {
.reg_offsets = &a5xx_reg_offsets,
.ft_perf_counters = a5xx_ft_perf_counters,
@@ -4116,7 +3490,6 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
a5xx_preemption_yield_enable,
.preemption_post_ibsubmit =
a5xx_preemption_post_ibsubmit,
- .preemption_token = a5xx_preemption_token,
.preemption_init = a5xx_preemption_init,
.preemption_schedule = a5xx_preemption_schedule,
.enable_64bit = a5xx_enable_64bit,
diff --git a/drivers/gpu/msm/adreno_a5xx.h b/drivers/gpu/msm/adreno_a5xx.h
index 6ce95ff7bdbf..7965bb7b5440 100644
--- a/drivers/gpu/msm/adreno_a5xx.h
+++ b/drivers/gpu/msm/adreno_a5xx.h
@@ -112,6 +112,8 @@ void a5xx_crashdump_init(struct adreno_device *adreno_dev);
void a5xx_hwcg_set(struct adreno_device *adreno_dev, bool on);
+#define A5XX_CP_RB_CNTL_DEFAULT (((ilog2(4) << 8) & 0x1F00) | \
+ (ilog2(KGSL_RB_DWORDS >> 1) & 0x3F))
/* GPMU interrupt multiplexor */
#define FW_INTR_INFO (0)
#define LLM_ACK_ERR_INTR (1)
@@ -232,4 +234,22 @@ static inline bool lm_on(struct adreno_device *adreno_dev)
return ADRENO_FEATURE(adreno_dev, ADRENO_LM) &&
test_bit(ADRENO_LM_CTRL, &adreno_dev->pwrctrl_flag);
}
+
+/* Preemption functions */
+void a5xx_preemption_trigger(struct adreno_device *adreno_dev);
+void a5xx_preemption_schedule(struct adreno_device *adreno_dev);
+void a5xx_preemption_start(struct adreno_device *adreno_dev);
+int a5xx_preemption_init(struct adreno_device *adreno_dev);
+int a5xx_preemption_yield_enable(unsigned int *cmds);
+
+unsigned int a5xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
+ unsigned int *cmds);
+unsigned int a5xx_preemption_pre_ibsubmit(
+ struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb,
+ unsigned int *cmds, struct kgsl_context *context);
+
+
+void a5xx_preempt_callback(struct adreno_device *adreno_dev, int bit);
+
#endif
diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c
new file mode 100644
index 000000000000..c1463b824c67
--- /dev/null
+++ b/drivers/gpu/msm/adreno_a5xx_preempt.c
@@ -0,0 +1,574 @@
+/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "adreno.h"
+#include "adreno_a5xx.h"
+#include "a5xx_reg.h"
+#include "adreno_trace.h"
+#include "adreno_pm4types.h"
+
+#define PREEMPT_RECORD(_field) \
+ offsetof(struct a5xx_cp_preemption_record, _field)
+
+#define PREEMPT_SMMU_RECORD(_field) \
+ offsetof(struct a5xx_cp_smmu_info, _field)
+
+static void _update_wptr(struct adreno_device *adreno_dev)
+{
+ struct adreno_ringbuffer *rb = adreno_dev->cur_rb;
+ unsigned int wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&rb->preempt_lock, flags);
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_WPTR, &wptr);
+
+ if (wptr != rb->wptr) {
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
+ rb->wptr);
+
+ rb->dispatch_q.expires = jiffies +
+ msecs_to_jiffies(adreno_cmdbatch_timeout);
+ }
+
+ spin_unlock_irqrestore(&rb->preempt_lock, flags);
+}
+
+static inline bool adreno_move_preempt_state(struct adreno_device *adreno_dev,
+ enum adreno_preempt_states old, enum adreno_preempt_states new)
+{
+ return (atomic_cmpxchg(&adreno_dev->preempt.state, old, new) == old);
+}
+
+static void _a5xx_preemption_done(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int status;
+
+ /*
+ * In the very unlikely case that the power is off, do nothing - the
+ * state will be reset on power up and everybody will be happy
+ */
+
+ if (!kgsl_state_is_awake(device))
+ return;
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
+
+ if (status != 0) {
+ KGSL_DRV_ERR(device,
+ "Preemption not complete: status=%X cur=%d R/W=%X/%X next=%d R/W=%X/%X\n",
+ status, adreno_dev->cur_rb->id,
+ adreno_get_rptr(adreno_dev->cur_rb),
+ adreno_dev->cur_rb->wptr, adreno_dev->next_rb->id,
+ adreno_get_rptr(adreno_dev->next_rb),
+ adreno_dev->next_rb->wptr);
+
+ /* Set a fault and restart */
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ adreno_dispatcher_schedule(device);
+
+ return;
+ }
+
+ del_timer_sync(&adreno_dev->preempt.timer);
+
+ trace_adreno_preempt_done(adreno_dev->cur_rb, adreno_dev->next_rb);
+
+ /* Clean up all the bits */
+ adreno_dev->prev_rb = adreno_dev->cur_rb;
+ adreno_dev->cur_rb = adreno_dev->next_rb;
+ adreno_dev->next_rb = NULL;
+
+ /* Update the wptr for the new command queue */
+ _update_wptr(adreno_dev);
+
+ /* Update the dispatcher timer for the new command queue */
+ mod_timer(&adreno_dev->dispatcher.timer,
+ adreno_dev->cur_rb->dispatch_q.expires);
+
+ /* Clear the preempt state */
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+}
+
+static void _a5xx_preemption_fault(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ unsigned int status;
+
+ /*
+ * If the power is on check the preemption status one more time - if it
+ * was successful then just transition to the complete state
+ */
+ if (kgsl_state_is_awake(device)) {
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
+
+ if (status == 0) {
+ adreno_set_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_COMPLETE);
+
+ adreno_dispatcher_schedule(device);
+ return;
+ }
+ }
+
+ KGSL_DRV_ERR(device,
+ "Preemption timed out: cur=%d R/W=%X/%X, next=%d R/W=%X/%X\n",
+ adreno_dev->cur_rb->id,
+ adreno_get_rptr(adreno_dev->cur_rb), adreno_dev->cur_rb->wptr,
+ adreno_dev->next_rb->id,
+ adreno_get_rptr(adreno_dev->next_rb),
+ adreno_dev->next_rb->wptr);
+
+ adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
+ adreno_dispatcher_schedule(device);
+}
+
+static void _a5xx_preemption_worker(struct work_struct *work)
+{
+ struct adreno_preemption *preempt = container_of(work,
+ struct adreno_preemption, work);
+ struct adreno_device *adreno_dev = container_of(preempt,
+ struct adreno_device, preempt);
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ /* Need to take the mutex to make sure that the power stays on */
+ mutex_lock(&device->mutex);
+
+ if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_FAULTED))
+ _a5xx_preemption_fault(adreno_dev);
+
+ mutex_unlock(&device->mutex);
+}
+
+static void _a5xx_preemption_timer(unsigned long data)
+{
+ struct adreno_device *adreno_dev = (struct adreno_device *) data;
+
+ /* We should only be here from a triggered state */
+ if (!adreno_move_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_FAULTED))
+ return;
+
+ /* Schedule the worker to take care of the details */
+ queue_work(system_unbound_wq, &adreno_dev->preempt.work);
+}
+
+/* Find the highest priority active ringbuffer */
+static struct adreno_ringbuffer *a5xx_next_ringbuffer(
+ struct adreno_device *adreno_dev)
+{
+ struct adreno_ringbuffer *rb;
+ unsigned long flags;
+ unsigned int i;
+
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ bool empty;
+
+ spin_lock_irqsave(&rb->preempt_lock, flags);
+ empty = adreno_rb_empty(rb);
+ spin_unlock_irqrestore(&rb->preempt_lock, flags);
+
+ if (empty == false)
+ return rb;
+ }
+
+ return NULL;
+}
+
+void a5xx_preemption_trigger(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+ struct adreno_ringbuffer *next;
+ uint64_t ttbr0;
+ unsigned int contextidr;
+ unsigned long flags;
+
+ /* Put ourselves into a possible trigger state */
+ if (!adreno_move_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_NONE, ADRENO_PREEMPT_START))
+ return;
+
+ /* Get the next ringbuffer to preempt in */
+ next = a5xx_next_ringbuffer(adreno_dev);
+
+ /*
+ * Nothing to do if every ringbuffer is empty or if the current
+ * ringbuffer is the only active one
+ */
+ if (next == NULL || next == adreno_dev->cur_rb) {
+ /*
+ * Update any critical things that might have been skipped while
+ * we were looking for a new ringbuffer
+ */
+
+ if (next != NULL) {
+ _update_wptr(adreno_dev);
+
+ mod_timer(&adreno_dev->dispatcher.timer,
+ adreno_dev->cur_rb->dispatch_q.expires);
+ }
+
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+ return;
+ }
+
+ /* Turn off the dispatcher timer */
+ del_timer(&adreno_dev->dispatcher.timer);
+
+ /*
+ * This is the most critical section - we need to take care not to race
+ * until we have programmed the CP for the switch
+ */
+
+ spin_lock_irqsave(&next->preempt_lock, flags);
+
+ /* Get the pagetable from the pagetable info */
+ kgsl_sharedmem_readq(&next->pagetable_desc, &ttbr0,
+ PT_INFO_OFFSET(ttbr0));
+ kgsl_sharedmem_readl(&next->pagetable_desc, &contextidr,
+ PT_INFO_OFFSET(contextidr));
+
+ kgsl_sharedmem_writel(device, &next->preemption_desc,
+ PREEMPT_RECORD(wptr), next->wptr);
+
+ spin_unlock_irqrestore(&next->preempt_lock, flags);
+
+ /* And write it to the smmu info */
+ kgsl_sharedmem_writeq(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(ttbr0), ttbr0);
+ kgsl_sharedmem_writel(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(context_idr), contextidr);
+
+ kgsl_regwrite(device, A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
+ lower_32_bits(next->preemption_desc.gpuaddr));
+ kgsl_regwrite(device, A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
+ upper_32_bits(next->preemption_desc.gpuaddr));
+
+ adreno_dev->next_rb = next;
+
+ /* Start the timer to detect a stuck preemption */
+ mod_timer(&adreno_dev->preempt.timer,
+ jiffies + msecs_to_jiffies(ADRENO_PREEMPT_TIMEOUT));
+
+ trace_adreno_preempt_trigger(adreno_dev->cur_rb, adreno_dev->next_rb);
+
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_TRIGGERED);
+
+ /* Trigger the preemption */
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_PREEMPT, 1);
+}
+
+void a5xx_preempt_callback(struct adreno_device *adreno_dev, int bit)
+{
+ unsigned int status;
+
+ if (!adreno_move_preempt_state(adreno_dev,
+ ADRENO_PREEMPT_TRIGGERED, ADRENO_PREEMPT_PENDING))
+ return;
+
+ adreno_readreg(adreno_dev, ADRENO_REG_CP_PREEMPT, &status);
+
+ if (status != 0) {
+ KGSL_DRV_ERR(KGSL_DEVICE(adreno_dev),
+ "preempt interrupt with non-zero status: %X\n", status);
+
+ /*
+ * Under the assumption that this is a race between the
+ * interrupt and the register, schedule the worker to clean up.
+ * If the status still hasn't resolved itself by the time we get
+ * there then we have to assume something bad happened
+ */
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE);
+ adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
+ return;
+ }
+
+ del_timer(&adreno_dev->preempt.timer);
+
+ trace_adreno_preempt_done(adreno_dev->cur_rb,
+ adreno_dev->next_rb);
+
+ adreno_dev->prev_rb = adreno_dev->cur_rb;
+ adreno_dev->cur_rb = adreno_dev->next_rb;
+ adreno_dev->next_rb = NULL;
+
+ /* Update the wptr if it changed while preemption was ongoing */
+ _update_wptr(adreno_dev);
+
+ /* Update the dispatcher timer for the new command queue */
+ mod_timer(&adreno_dev->dispatcher.timer,
+ adreno_dev->cur_rb->dispatch_q.expires);
+
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+}
+
+void a5xx_preemption_schedule(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+
+ if (!adreno_is_preemption_enabled(adreno_dev))
+ return;
+
+ mutex_lock(&device->mutex);
+
+ if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_COMPLETE))
+ _a5xx_preemption_done(adreno_dev);
+
+ a5xx_preemption_trigger(adreno_dev);
+
+ mutex_unlock(&device->mutex);
+}
+
+unsigned int a5xx_preemption_pre_ibsubmit(
+ struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb,
+ unsigned int *cmds, struct kgsl_context *context)
+{
+ unsigned int *cmds_orig = cmds;
+ uint64_t gpuaddr = rb->preemption_desc.gpuaddr;
+ unsigned int preempt_style = 0;
+
+ if (context) {
+ /*
+ * Preemption from secure to unsecure needs Zap shader to be
+ * run to clear all secure content. CP does not know during
+ * preemption if it is switching between secure and unsecure
+ * contexts so restrict Secure contexts to be preempted at
+ * ringbuffer level.
+ */
+ if (context->flags & KGSL_CONTEXT_SECURE)
+ preempt_style = KGSL_CONTEXT_PREEMPT_STYLE_RINGBUFFER;
+ else
+ preempt_style = ADRENO_PREEMPT_STYLE(context->flags);
+ }
+
+ /*
+ * CP_PREEMPT_ENABLE_GLOBAL(global preemption) can only be set by KMD
+ * in ringbuffer.
+ * 1) set global preemption to 0x0 to disable global preemption.
+ * Only RB level preemption is allowed in this mode
+ * 2) Set global preemption to defer(0x2) for finegrain preemption.
+ * when global preemption is set to defer(0x2),
+ * CP_PREEMPT_ENABLE_LOCAL(local preemption) determines the
+ * preemption point. Local preemption
+ * can be enabled by both UMD(within IB) and KMD.
+ */
+ *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_GLOBAL, 1);
+ *cmds++ = ((preempt_style == KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN)
+ ? 2 : 0);
+
+ /* Turn CP protection OFF */
+ *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
+ *cmds++ = 0;
+
+ /*
+ * CP during context switch will save context switch info to
+ * a5xx_cp_preemption_record pointed by CONTEXT_SWITCH_SAVE_ADDR
+ */
+ *cmds++ = cp_type4_packet(A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 1);
+ *cmds++ = lower_32_bits(gpuaddr);
+ *cmds++ = cp_type4_packet(A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI, 1);
+ *cmds++ = upper_32_bits(gpuaddr);
+
+ /* Turn CP protection ON */
+ *cmds++ = cp_type7_packet(CP_SET_PROTECTED_MODE, 1);
+ *cmds++ = 1;
+
+ /*
+ * Enable local preemption for finegrain preemption in case of
+ * a misbehaving IB
+ */
+ if (preempt_style == KGSL_CONTEXT_PREEMPT_STYLE_FINEGRAIN) {
+ *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_LOCAL, 1);
+ *cmds++ = 1;
+ } else {
+ *cmds++ = cp_type7_packet(CP_PREEMPT_ENABLE_LOCAL, 1);
+ *cmds++ = 0;
+ }
+
+ /* Enable CP_CONTEXT_SWITCH_YIELD packets in the IB2s */
+ *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1);
+ *cmds++ = 2;
+
+ return (unsigned int) (cmds - cmds_orig);
+}
+
+int a5xx_preemption_yield_enable(unsigned int *cmds)
+{
+ /*
+ * SRM -- set render mode (ex binning, direct render etc)
+ * SRM is set by UMD usually at start of IB to tell CP the type of
+ * preemption.
+ * KMD needs to set SRM to NULL to indicate CP that rendering is
+ * done by IB.
+ */
+ *cmds++ = cp_type7_packet(CP_SET_RENDER_MODE, 5);
+ *cmds++ = 0;
+ *cmds++ = 0;
+ *cmds++ = 0;
+ *cmds++ = 0;
+ *cmds++ = 0;
+
+ *cmds++ = cp_type7_packet(CP_YIELD_ENABLE, 1);
+ *cmds++ = 1;
+
+ return 8;
+}
+
+unsigned int a5xx_preemption_post_ibsubmit(struct adreno_device *adreno_dev,
+ unsigned int *cmds)
+{
+ int dwords = 0;
+
+ cmds[dwords++] = cp_type7_packet(CP_CONTEXT_SWITCH_YIELD, 4);
+ /* Write NULL to the address to skip the data write */
+ dwords += cp_gpuaddr(adreno_dev, &cmds[dwords], 0x0);
+ cmds[dwords++] = 1;
+ /* generate interrupt on preemption completion */
+ cmds[dwords++] = 1;
+
+ return dwords;
+}
+
+void a5xx_preemption_start(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+ struct adreno_ringbuffer *rb;
+ unsigned int i;
+
+ if (!adreno_is_preemption_enabled(adreno_dev))
+ return;
+
+ /* Force the state to be clear */
+ adreno_set_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE);
+
+ kgsl_sharedmem_writel(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(magic), A5XX_CP_SMMU_INFO_MAGIC_REF);
+ kgsl_sharedmem_writeq(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(ttbr0), MMU_DEFAULT_TTBR0(device));
+
+ /* The CP doesn't use the asid record, so poison it */
+ kgsl_sharedmem_writel(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(asid), 0xDECAFBAD);
+ kgsl_sharedmem_writel(device, &iommu->smmu_info,
+ PREEMPT_SMMU_RECORD(context_idr),
+ MMU_DEFAULT_CONTEXTIDR(device));
+
+ adreno_writereg64(adreno_dev,
+ ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
+ ADRENO_REG_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
+ iommu->smmu_info.gpuaddr);
+
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(rptr), 0);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(wptr), 0);
+
+ adreno_ringbuffer_set_pagetable(rb,
+ device->mmu.defaultpagetable);
+ }
+
+}
+
+static int a5xx_preemption_ringbuffer_init(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb, uint64_t counteraddr)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ int ret;
+
+ ret = kgsl_allocate_global(device, &rb->preemption_desc,
+ A5XX_CP_CTXRECORD_SIZE_IN_BYTES, 0, KGSL_MEMDESC_PRIVILEGED);
+ if (ret)
+ return ret;
+
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(magic), A5XX_CP_CTXRECORD_MAGIC_REF);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(info), 0);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(data), 0);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(cntl), A5XX_CP_RB_CNTL_DEFAULT);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(rptr), 0);
+ kgsl_sharedmem_writel(device, &rb->preemption_desc,
+ PREEMPT_RECORD(wptr), 0);
+ kgsl_sharedmem_writeq(device, &rb->preemption_desc,
+ PREEMPT_RECORD(rptr_addr), SCRATCH_RPTR_GPU_ADDR(device,
+ rb->id));
+ kgsl_sharedmem_writeq(device, &rb->preemption_desc,
+ PREEMPT_RECORD(rbase), rb->buffer_desc.gpuaddr);
+ kgsl_sharedmem_writeq(device, &rb->preemption_desc,
+ PREEMPT_RECORD(counter), counteraddr);
+
+ return 0;
+}
+
+#ifdef CONFIG_QCOM_KGSL_IOMMU
+static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_iommu *iommu = KGSL_IOMMU_PRIV(device);
+
+ /* Allocate mem for storing preemption smmu record */
+ return kgsl_allocate_global(device, &iommu->smmu_info, PAGE_SIZE,
+ KGSL_MEMFLAGS_GPUREADONLY, KGSL_MEMDESC_PRIVILEGED);
+}
+#else
+static int a5xx_preemption_iommu_init(struct adreno_device *adreno_dev)
+{
+ return -ENODEV;
+}
+#endif
+
+int a5xx_preemption_init(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_preemption *preempt = &adreno_dev->preempt;
+ struct adreno_ringbuffer *rb;
+ int ret;
+ unsigned int i;
+ uint64_t addr;
+
+ /* We are dependent on IOMMU to make preemption go on the CP side */
+ if (kgsl_mmu_get_mmutype(device) != KGSL_MMU_TYPE_IOMMU)
+ return -ENODEV;
+
+ INIT_WORK(&preempt->work, _a5xx_preemption_worker);
+
+ setup_timer(&preempt->timer, _a5xx_preemption_timer,
+ (unsigned long) adreno_dev);
+
+ /* Allocate mem for storing preemption counters */
+ ret = kgsl_allocate_global(device, &preempt->counters,
+ adreno_dev->num_ringbuffers *
+ A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE, 0, 0);
+ if (ret)
+ return ret;
+
+ addr = preempt->counters.gpuaddr;
+
+ /* Allocate mem for storing preemption switch record */
+ FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
+ ret = a5xx_preemption_ringbuffer_init(adreno_dev, rb, addr);
+ if (ret)
+ return ret;
+
+ addr += A5XX_CP_CTXRECORD_PREEMPTION_COUNTER_SIZE;
+ }
+
+ return a5xx_preemption_iommu_init(adreno_dev);
+}
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 1a1db3ab3dc9..9cbcd06d7658 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -226,8 +226,7 @@ static void cmdbatch_print(struct seq_file *s, struct kgsl_cmdbatch *cmdbatch)
if (cmdbatch->flags & KGSL_CONTEXT_SYNC)
return;
- seq_printf(s, "\t%d: ib: expires: %lu",
- cmdbatch->timestamp, cmdbatch->expires);
+ seq_printf(s, "\t%d: ", cmdbatch->timestamp);
seq_puts(s, " flags: ");
print_flags(s, cmdbatch_flags, ARRAY_SIZE(cmdbatch_flags),
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 3f36a93ea110..ac3805800691 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -28,10 +28,10 @@
#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
/* Time in ms after which the dispatcher tries to schedule an unscheduled RB */
-static unsigned int _dispatch_starvation_time = 2000;
+unsigned int adreno_dispatch_starvation_time = 2000;
/* Amount of time in ms that a starved RB is permitted to execute for */
-static unsigned int _dispatch_time_slice = 25;
+unsigned int adreno_dispatch_time_slice = 25;
/*
* If set then dispatcher tries to schedule lower priority RB's after if they
@@ -78,6 +78,24 @@ unsigned int adreno_cmdbatch_timeout = 2000;
/* Interval for reading and comparing fault detection registers */
static unsigned int _fault_timer_interval = 200;
+#define CMDQUEUE_RB(_cmdqueue) \
+ ((struct adreno_ringbuffer *) \
+ container_of((_cmdqueue), struct adreno_ringbuffer, dispatch_q))
+
+#define CMDQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q)
+
+static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_cmdqueue *cmdqueue);
+
+static inline bool cmdqueue_is_current(
+ struct adreno_dispatcher_cmdqueue *cmdqueue)
+{
+ struct adreno_ringbuffer *rb = CMDQUEUE_RB(cmdqueue);
+ struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
+
+ return (adreno_dev->cur_rb == rb);
+}
+
static void _add_context(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt)
{
@@ -283,7 +301,8 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
/* Retire pending GPU events for the object */
kgsl_process_event_group(device, &context->events);
- trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb);
+ trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
+ adreno_get_rptr(drawctxt->rb));
kgsl_cmdbatch_destroy(cmdbatch);
}
@@ -576,8 +595,15 @@ static int sendcmd(struct adreno_device *adreno_dev,
if (dispatcher->inflight == 1) {
if (ret == 0) {
+
+ /* Stop fault timer before reading fault registers */
+ del_timer_sync(&dispatcher->fault_timer);
+
fault_detect_read(adreno_dev);
+ /* Start the fault timer on first submission */
+ start_fault_timer(adreno_dev);
+
if (!test_and_set_bit(ADRENO_DISPATCHER_ACTIVE,
&dispatcher->priv))
reinit_completion(&dispatcher->idle_gate);
@@ -594,11 +620,15 @@ static int sendcmd(struct adreno_device *adreno_dev,
dispatch_q->inflight--;
/*
+ * Don't log a message in case of:
* -ENOENT means that the context was detached before the
- * command was submitted - don't log a message in that case
+ * command was submitted
+ * -ENOSPC means that there temporarily isn't any room in the
+ * ringbuffer
+ * -PROTO means that a fault is currently being worked
*/
- if (ret != -ENOENT)
+ if (ret != -ENOENT && ret != -ENOSPC && ret != -EPROTO)
KGSL_DRV_ERR(device,
"Unable to submit command to the ringbuffer %d\n",
ret);
@@ -609,7 +639,8 @@ static int sendcmd(struct adreno_device *adreno_dev,
nsecs = do_div(secs, 1000000000);
trace_adreno_cmdbatch_submitted(cmdbatch, (int) dispatcher->inflight,
- time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb);
+ time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb,
+ adreno_get_rptr(drawctxt->rb));
cmdbatch->submit_ticks = time.ticks;
@@ -618,28 +649,26 @@ static int sendcmd(struct adreno_device *adreno_dev,
ADRENO_DISPATCH_CMDQUEUE_SIZE;
/*
- * If this is the first command in the pipe then the GPU will
- * immediately start executing it so we can start the expiry timeout on
- * the command batch here. Subsequent command batches will have their
- * timer started when the previous command batch is retired.
- * Set the timer if the cmdbatch was submitted to current
- * active RB else this timer will need to be set when the
- * RB becomes active, also if dispatcher is not is CLEAR
- * state then the cmdbatch it is currently executing is
- * unclear so do not set timer in that case either.
+ * For the first submission in any given command queue update the
+ * expected expire time - this won't actually be used / updated until
+ * the command queue in question goes current, but universally setting
+ * it here avoids the possibilty of some race conditions with preempt
*/
- if (1 == dispatch_q->inflight &&
- (&(adreno_dev->cur_rb->dispatch_q)) == dispatch_q &&
- adreno_preempt_state(adreno_dev,
- ADRENO_DISPATCHER_PREEMPT_CLEAR)) {
- cmdbatch->expires = jiffies +
+
+ if (dispatch_q->inflight == 1)
+ dispatch_q->expires = jiffies +
msecs_to_jiffies(adreno_cmdbatch_timeout);
- mod_timer(&dispatcher->timer, cmdbatch->expires);
+
+ /*
+ * If we believe ourselves to be current and preemption isn't a thing,
+ * then set up the timer. If this misses, then preemption is indeed a
+ * thing and the timer will be set up in due time
+ */
+ if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
+ if (cmdqueue_is_current(dispatch_q))
+ mod_timer(&dispatcher->timer, dispatch_q->expires);
}
- /* Start the fault detection timer on the first submission */
- if (dispatcher->inflight == 1)
- start_fault_timer(adreno_dev);
/*
* we just submitted something, readjust ringbuffer
@@ -924,87 +953,6 @@ static int get_timestamp(struct adreno_context *drawctxt,
}
/**
- * adreno_dispatcher_preempt_timer() - Timer that triggers when preemption has
- * not completed
- * @data: Pointer to adreno device that did not preempt in timely manner
- */
-static void adreno_dispatcher_preempt_timer(unsigned long data)
-{
- struct adreno_device *adreno_dev = (struct adreno_device *) data;
- struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
-
- KGSL_DRV_ERR(KGSL_DEVICE(adreno_dev),
- "Preemption timed out. cur_rb rptr/wptr %x/%x id %d, next_rb rptr/wptr %x/%x id %d, disp_state: %d\n",
- adreno_dev->cur_rb->rptr, adreno_dev->cur_rb->wptr,
- adreno_dev->cur_rb->id, adreno_dev->next_rb->rptr,
- adreno_dev->next_rb->wptr, adreno_dev->next_rb->id,
- atomic_read(&dispatcher->preemption_state));
- adreno_set_gpu_fault(adreno_dev, ADRENO_PREEMPT_FAULT);
- adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
-}
-
-/**
- * adreno_dispatcher_get_highest_busy_rb() - Returns the highest priority RB
- * which is busy
- * @adreno_dev: Device whose RB is returned
- */
-struct adreno_ringbuffer *adreno_dispatcher_get_highest_busy_rb(
- struct adreno_device *adreno_dev)
-{
- struct adreno_ringbuffer *rb, *highest_busy_rb = NULL;
- int i;
-
- FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- if (rb->rptr != rb->wptr && !highest_busy_rb) {
- highest_busy_rb = rb;
- goto done;
- }
-
- if (!adreno_disp_preempt_fair_sched)
- continue;
-
- switch (rb->starve_timer_state) {
- case ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT:
- if (rb->rptr != rb->wptr &&
- adreno_dev->cur_rb != rb) {
- rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT;
- rb->sched_timer = jiffies;
- }
- break;
- case ADRENO_DISPATCHER_RB_STARVE_TIMER_INIT:
- if (time_after(jiffies, rb->sched_timer +
- msecs_to_jiffies(_dispatch_starvation_time))) {
- rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED;
- /* halt dispatcher to remove starvation */
- adreno_get_gpu_halt(adreno_dev);
- }
- break;
- case ADRENO_DISPATCHER_RB_STARVE_TIMER_SCHEDULED:
- BUG_ON(adreno_dev->cur_rb != rb);
- /*
- * If the RB has not been running for the minimum
- * time slice then allow it to run
- */
- if ((rb->rptr != rb->wptr) && time_before(jiffies,
- adreno_dev->cur_rb->sched_timer +
- msecs_to_jiffies(_dispatch_time_slice)))
- highest_busy_rb = rb;
- else
- rb->starve_timer_state =
- ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
- break;
- case ADRENO_DISPATCHER_RB_STARVE_TIMER_ELAPSED:
- default:
- break;
- }
- }
-done:
- return highest_busy_rb;
-}
-
-/**
* adreno_dispactcher_queue_cmd() - Queue a new command in the context
* @adreno_dev: Pointer to the adreno device struct
* @drawctxt: Pointer to the adreno draw context
@@ -1433,7 +1381,7 @@ static void adreno_fault_header(struct kgsl_device *device,
if (rb != NULL)
pr_fault(device, cmdbatch,
"gpu fault rb %d rb sw r/w %4.4x/%4.4x\n",
- rb->id, rb->rptr, rb->wptr);
+ rb->id, rptr, rb->wptr);
} else {
int id = (rb != NULL) ? rb->id : -1;
@@ -1444,7 +1392,7 @@ static void adreno_fault_header(struct kgsl_device *device,
if (rb != NULL)
dev_err(device->dev,
"RB[%d] gpu fault rb sw r/w %4.4x/%4.4x\n",
- rb->id, rb->rptr, rb->wptr);
+ rb->id, rptr, rb->wptr);
}
}
@@ -1751,6 +1699,27 @@ replay:
kfree(replay);
}
+static void do_header_and_snapshot(struct kgsl_device *device,
+ struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch)
+{
+ /* Always dump the snapshot on a non-cmdbatch failure */
+ if (cmdbatch == NULL) {
+ adreno_fault_header(device, rb, NULL);
+ kgsl_device_snapshot(device, NULL);
+ return;
+ }
+
+ /* Skip everything if the PMDUMP flag is set */
+ if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy))
+ return;
+
+ /* Print the fault header */
+ adreno_fault_header(device, rb, cmdbatch);
+
+ if (!(cmdbatch->context->flags & KGSL_CONTEXT_NO_SNAPSHOT))
+ kgsl_device_snapshot(device, cmdbatch->context);
+}
+
static int dispatcher_do_fault(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -1787,7 +1756,7 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
/* Turn off all the timers */
del_timer_sync(&dispatcher->timer);
del_timer_sync(&dispatcher->fault_timer);
- del_timer_sync(&dispatcher->preempt_timer);
+ del_timer_sync(&adreno_dev->preempt.timer);
mutex_lock(&device->mutex);
@@ -1813,14 +1782,12 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
* retire cmdbatches from all the dispatch_q's before starting recovery
*/
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- adreno_dispatch_process_cmdqueue(adreno_dev,
- &(rb->dispatch_q), 0);
+ adreno_dispatch_retire_cmdqueue(adreno_dev,
+ &(rb->dispatch_q));
/* Select the active dispatch_q */
if (base == rb->buffer_desc.gpuaddr) {
dispatch_q = &(rb->dispatch_q);
hung_rb = rb;
- adreno_readreg(adreno_dev, ADRENO_REG_CP_RB_RPTR,
- &hung_rb->rptr);
if (adreno_dev->cur_rb != hung_rb) {
adreno_dev->prev_rb = adreno_dev->cur_rb;
adreno_dev->cur_rb = hung_rb;
@@ -1834,7 +1801,7 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
}
}
- if (dispatch_q && (dispatch_q->tail != dispatch_q->head)) {
+ if (!adreno_cmdqueue_is_empty(dispatch_q)) {
cmdbatch = dispatch_q->cmd_q[dispatch_q->head];
trace_adreno_cmdbatch_fault(cmdbatch, fault);
}
@@ -1842,17 +1809,7 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
ADRENO_REG_CP_IB1_BASE_HI, &base);
- /*
- * Dump the snapshot information if this is the first
- * detected fault for the oldest active command batch
- */
-
- if (cmdbatch == NULL ||
- !test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy)) {
- adreno_fault_header(device, hung_rb, cmdbatch);
- kgsl_device_snapshot(device,
- cmdbatch ? cmdbatch->context : NULL);
- }
+ do_header_and_snapshot(device, hung_rb, cmdbatch);
/* Terminate the stalled transaction and resume the IOMMU */
if (fault & ADRENO_IOMMU_PAGE_FAULT)
@@ -1860,8 +1817,6 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
/* Reset the dispatcher queue */
dispatcher->inflight = 0;
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_CLEAR);
/* Reset the GPU and make sure halt is not set during recovery */
halt = adreno_gpu_halt(adreno_dev);
@@ -1875,12 +1830,12 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
if (hung_rb != NULL) {
kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_MAX + hung_rb->id,
- soptimestamp), hung_rb->timestamp);
+ MEMSTORE_RB_OFFSET(hung_rb, soptimestamp),
+ hung_rb->timestamp);
kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_MAX + hung_rb->id,
- eoptimestamp), hung_rb->timestamp);
+ MEMSTORE_RB_OFFSET(hung_rb, eoptimestamp),
+ hung_rb->timestamp);
/* Schedule any pending events to be run */
kgsl_process_event_group(device, &hung_rb->events);
@@ -1953,139 +1908,170 @@ static void cmdbatch_profile_ticks(struct adreno_device *adreno_dev,
*retire = entry->retired;
}
-int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *dispatch_q,
- int long_ib_detect)
+static void retire_cmdbatch(struct adreno_device *adreno_dev,
+ struct kgsl_cmdbatch *cmdbatch)
{
- struct adreno_dispatcher *dispatcher = &(adreno_dev->dispatcher);
- uint64_t start_ticks = 0, retire_ticks = 0;
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ uint64_t start = 0, end = 0;
- struct adreno_dispatcher_cmdqueue *active_q =
- &(adreno_dev->cur_rb->dispatch_q);
+ if (cmdbatch->fault_recovery != 0) {
+ set_bit(ADRENO_CONTEXT_FAULT, &cmdbatch->context->priv);
+ _print_recovery(KGSL_DEVICE(adreno_dev), cmdbatch);
+ }
+
+ if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv))
+ cmdbatch_profile_ticks(adreno_dev, cmdbatch, &start, &end);
+
+ trace_adreno_cmdbatch_retired(cmdbatch, (int) dispatcher->inflight,
+ start, end, ADRENO_CMDBATCH_RB(cmdbatch),
+ adreno_get_rptr(drawctxt->rb));
+
+ drawctxt->submit_retire_ticks[drawctxt->ticks_index] =
+ end - cmdbatch->submit_ticks;
+
+ drawctxt->ticks_index = (drawctxt->ticks_index + 1) %
+ SUBMIT_RETIRE_TICKS_SIZE;
+
+ kgsl_cmdbatch_destroy(cmdbatch);
+}
+
+static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_cmdqueue *cmdqueue)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
int count = 0;
- while (dispatch_q->head != dispatch_q->tail) {
+ while (!adreno_cmdqueue_is_empty(cmdqueue)) {
struct kgsl_cmdbatch *cmdbatch =
- dispatch_q->cmd_q[dispatch_q->head];
- struct adreno_context *drawctxt;
- BUG_ON(cmdbatch == NULL);
+ cmdqueue->cmd_q[cmdqueue->head];
- drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ if (!kgsl_check_timestamp(device, cmdbatch->context,
+ cmdbatch->timestamp))
+ break;
- /*
- * First try to expire the timestamp. This happens if the
- * context is valid and the timestamp expired normally or if the
- * context was destroyed before the command batch was finished
- * in the GPU. Either way retire the command batch advance the
- * pointers and continue processing the queue
- */
+ retire_cmdbatch(adreno_dev, cmdbatch);
- if (kgsl_check_timestamp(KGSL_DEVICE(adreno_dev),
- cmdbatch->context, cmdbatch->timestamp)) {
+ dispatcher->inflight--;
+ cmdqueue->inflight--;
- /*
- * If the cmdbatch in question had faulted announce its
- * successful completion to the world
- */
+ cmdqueue->cmd_q[cmdqueue->head] = NULL;
- if (cmdbatch->fault_recovery != 0) {
- /* Mark the context as faulted and recovered */
- set_bit(ADRENO_CONTEXT_FAULT,
- &cmdbatch->context->priv);
+ cmdqueue->head = CMDQUEUE_NEXT(cmdqueue->head,
+ ADRENO_DISPATCH_CMDQUEUE_SIZE);
- _print_recovery(KGSL_DEVICE(adreno_dev),
- cmdbatch);
- }
+ count++;
+ }
- /* Reduce the number of inflight command batches */
- dispatcher->inflight--;
- dispatch_q->inflight--;
+ return count;
+}
- /*
- * If kernel profiling is enabled get the submit and
- * retired ticks from the buffer
- */
+static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_cmdqueue *cmdqueue)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_cmdbatch *cmdbatch = cmdqueue->cmd_q[cmdqueue->head];
- if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv))
- cmdbatch_profile_ticks(adreno_dev, cmdbatch,
- &start_ticks, &retire_ticks);
+ /* Don't timeout if the timer hasn't expired yet (duh) */
+ if (time_is_after_jiffies(cmdqueue->expires))
+ return;
- trace_adreno_cmdbatch_retired(cmdbatch,
- (int) dispatcher->inflight, start_ticks,
- retire_ticks, ADRENO_CMDBATCH_RB(cmdbatch));
+ /* Don't timeout if the IB timeout is disabled globally */
+ if (!adreno_long_ib_detect(adreno_dev))
+ return;
- /* Record the delta between submit and retire ticks */
- drawctxt->submit_retire_ticks[drawctxt->ticks_index] =
- retire_ticks - cmdbatch->submit_ticks;
+ /* Don't time out if the context has disabled it */
+ if (cmdbatch->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
+ return;
- drawctxt->ticks_index = (drawctxt->ticks_index + 1)
- % SUBMIT_RETIRE_TICKS_SIZE;
+ pr_context(device, cmdbatch->context, "gpu timeout ctx %d ts %d\n",
+ cmdbatch->context->id, cmdbatch->timestamp);
- /* Zero the old entry*/
- dispatch_q->cmd_q[dispatch_q->head] = NULL;
+ adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);
+}
- /* Advance the buffer head */
- dispatch_q->head = CMDQUEUE_NEXT(dispatch_q->head,
- ADRENO_DISPATCH_CMDQUEUE_SIZE);
+static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_cmdqueue *cmdqueue)
+{
+ int count = adreno_dispatch_retire_cmdqueue(adreno_dev, cmdqueue);
- /* Destroy the retired command batch */
- kgsl_cmdbatch_destroy(cmdbatch);
+ /* Nothing to do if there are no pending commands */
+ if (adreno_cmdqueue_is_empty(cmdqueue))
+ return count;
- /* Update the expire time for the next command batch */
+ /* Don't update the cmdqueue timeout if we are about to preempt out */
+ if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE))
+ return count;
- if (dispatch_q->inflight > 0 &&
- dispatch_q == active_q) {
- cmdbatch =
- dispatch_q->cmd_q[dispatch_q->head];
- cmdbatch->expires = jiffies +
- msecs_to_jiffies(
- adreno_cmdbatch_timeout);
- }
+ /* Don't update the cmdqueue timeout if it isn't active */
+ if (!cmdqueue_is_current(cmdqueue))
+ return count;
- count++;
- continue;
- }
- /*
- * Break here if fault detection is disabled for the context or
- * if the long running IB detection is disaled device wide or
- * if the dispatch q is not active
- * Long running command buffers will be allowed to run to
- * completion - but badly behaving command buffers (infinite
- * shaders etc) can end up running forever.
- */
+ /*
+ * If the current ringbuffer retired any commands then universally
+ * reset the timeout
+ */
- if (!long_ib_detect ||
- drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE
- || dispatch_q != active_q)
- break;
+ if (count) {
+ cmdqueue->expires = jiffies +
+ msecs_to_jiffies(adreno_cmdbatch_timeout);
+ return count;
+ }
- /*
- * The last line of defense is to check if the command batch has
- * timed out. If we get this far but the timeout hasn't expired
- * yet then the GPU is still ticking away
- */
+ /*
+ * If we get here then 1) the ringbuffer is current and 2) we haven't
+ * retired anything. Check to see if the timeout if valid for the
+ * current cmdbatch and fault if it has expired
+ */
+ _adreno_dispatch_check_timeout(adreno_dev, cmdqueue);
+ return 0;
+}
- if (time_is_after_jiffies(cmdbatch->expires))
- break;
+/* Update the dispatcher timers */
+static void _dispatcher_update_timers(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- /* Boom goes the dynamite */
+ /* Kick the idle timer */
+ mutex_lock(&device->mutex);
+ kgsl_pwrscale_update(device);
+ mod_timer(&device->idle_timer,
+ jiffies + device->pwrctrl.interval_timeout);
+ mutex_unlock(&device->mutex);
- pr_context(KGSL_DEVICE(adreno_dev), cmdbatch->context,
- "gpu timeout ctx %d ts %d\n",
- cmdbatch->context->id, cmdbatch->timestamp);
+ /* Check to see if we need to update the command timer */
+ if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
+ struct adreno_dispatcher_cmdqueue *cmdqueue =
+ CMDQUEUE(adreno_dev->cur_rb);
- adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);
- break;
+ if (!adreno_cmdqueue_is_empty(cmdqueue))
+ mod_timer(&dispatcher->timer, cmdqueue->expires);
}
- return count;
}
-/**
- * adreno_dispatcher_work() - Master work handler for the dispatcher
- * @work: Pointer to the work struct for the current work queue
- *
- * Process expired commands and send new ones.
- */
+/* Take down the dispatcher and release any power states */
+static void _dispatcher_power_down(struct adreno_device *adreno_dev)
+{
+ struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
+
+ mutex_lock(&device->mutex);
+
+ if (test_and_clear_bit(ADRENO_DISPATCHER_ACTIVE, &dispatcher->priv))
+ complete_all(&dispatcher->idle_gate);
+
+ del_timer_sync(&dispatcher->fault_timer);
+
+ if (test_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv)) {
+ kgsl_active_count_put(device);
+ clear_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
+ }
+
+ mutex_unlock(&device->mutex);
+}
+
static void adreno_dispatcher_work(struct work_struct *work)
{
struct adreno_dispatcher *dispatcher =
@@ -2095,95 +2081,50 @@ static void adreno_dispatcher_work(struct work_struct *work)
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
int count = 0;
- int cur_rb_id = adreno_dev->cur_rb->id;
+ unsigned int i = 0;
mutex_lock(&dispatcher->mutex);
- if (ADRENO_DISPATCHER_PREEMPT_CLEAR ==
- atomic_read(&dispatcher->preemption_state))
- /* process the active q*/
- count = adreno_dispatch_process_cmdqueue(adreno_dev,
- &(adreno_dev->cur_rb->dispatch_q),
- adreno_long_ib_detect(adreno_dev));
-
- else if (ADRENO_DISPATCHER_PREEMPT_TRIGGERED ==
- atomic_read(&dispatcher->preemption_state))
- count = adreno_dispatch_process_cmdqueue(adreno_dev,
- &(adreno_dev->cur_rb->dispatch_q), 0);
-
- /* Check if gpu fault occurred */
- if (dispatcher_do_fault(adreno_dev))
- goto done;
-
- if (gpudev->preemption_schedule)
- gpudev->preemption_schedule(adreno_dev);
-
- if (cur_rb_id != adreno_dev->cur_rb->id) {
- struct adreno_dispatcher_cmdqueue *dispatch_q =
- &(adreno_dev->cur_rb->dispatch_q);
- /* active level switched, clear new level cmdbatches */
- count = adreno_dispatch_process_cmdqueue(adreno_dev,
- dispatch_q,
- adreno_long_ib_detect(adreno_dev));
- /*
- * If GPU has already completed all the commands in new incoming
- * RB then we may not get another interrupt due to which
- * dispatcher may not run again. Schedule dispatcher here so
- * we can come back and process the other RB's if required
- */
- if (dispatch_q->head == dispatch_q->tail)
- adreno_dispatcher_schedule(device);
- }
/*
- * If inflight went to 0, queue back up the event processor to catch
- * stragglers
+ * As long as there are inflight commands, process retired comamnds from
+ * all cmdqueues
*/
- if (dispatcher->inflight == 0 && count)
- kgsl_schedule_work(&device->event_work);
-
- /* Try to dispatch new commands */
- _adreno_dispatcher_issuecmds(adreno_dev);
-
-done:
- /* Either update the timer for the next command batch or disable it */
- if (dispatcher->inflight) {
- struct kgsl_cmdbatch *cmdbatch =
- adreno_dev->cur_rb->dispatch_q.cmd_q[
- adreno_dev->cur_rb->dispatch_q.head];
- if (cmdbatch && adreno_preempt_state(adreno_dev,
- ADRENO_DISPATCHER_PREEMPT_CLEAR))
- /* Update the timeout timer for the next cmdbatch */
- mod_timer(&dispatcher->timer, cmdbatch->expires);
-
- /* There are still things in flight - update the idle counts */
- mutex_lock(&device->mutex);
- kgsl_pwrscale_update(device);
- mod_timer(&device->idle_timer, jiffies +
- device->pwrctrl.interval_timeout);
- mutex_unlock(&device->mutex);
- } else {
- /* There is nothing left in the pipeline. Shut 'er down boys */
- mutex_lock(&device->mutex);
+ for (i = 0; i < adreno_dev->num_ringbuffers; i++) {
+ struct adreno_dispatcher_cmdqueue *cmdqueue =
+ CMDQUEUE(&adreno_dev->ringbuffers[i]);
- if (test_and_clear_bit(ADRENO_DISPATCHER_ACTIVE,
- &dispatcher->priv))
- complete_all(&dispatcher->idle_gate);
+ count += adreno_dispatch_process_cmdqueue(adreno_dev,
+ cmdqueue);
+ if (dispatcher->inflight == 0)
+ break;
+ }
- /*
- * Stop the fault timer before decrementing the active count to
- * avoid reading the hardware registers while we are trying to
- * turn clocks off
- */
- del_timer_sync(&dispatcher->fault_timer);
+ /*
+ * dispatcher_do_fault() returns 0 if no faults occurred. If that is the
+ * case, then clean up preemption and try to schedule more work
+ */
+ if (dispatcher_do_fault(adreno_dev) == 0) {
+ /* Clean up after preemption */
+ if (gpudev->preemption_schedule)
+ gpudev->preemption_schedule(adreno_dev);
- if (test_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv)) {
- kgsl_active_count_put(device);
- clear_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
- }
+ /* Re-kick the event engine to catch stragglers */
+ if (dispatcher->inflight == 0 && count != 0)
+ kgsl_schedule_work(&device->event_work);
- mutex_unlock(&device->mutex);
+ /* Run the scheduler for to dispatch new commands */
+ _adreno_dispatcher_issuecmds(adreno_dev);
}
+ /*
+ * If there are commands pending, update the timers, otherwise release
+ * the power state to prepare for power down
+ */
+ if (dispatcher->inflight > 0)
+ _dispatcher_update_timers(adreno_dev);
+ else
+ _dispatcher_power_down(adreno_dev);
+
mutex_unlock(&dispatcher->mutex);
}
@@ -2305,7 +2246,7 @@ void adreno_dispatcher_close(struct adreno_device *adreno_dev)
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
struct adreno_dispatcher_cmdqueue *dispatch_q =
&(rb->dispatch_q);
- while (dispatch_q->head != dispatch_q->tail) {
+ while (!adreno_cmdqueue_is_empty(dispatch_q)) {
kgsl_cmdbatch_destroy(
dispatch_q->cmd_q[dispatch_q->head]);
dispatch_q->head = (dispatch_q->head + 1)
@@ -2395,9 +2336,9 @@ static DISPATCHER_UINT_ATTR(fault_throttle_burst, 0644, 0,
static DISPATCHER_UINT_ATTR(disp_preempt_fair_sched, 0644, 0,
adreno_disp_preempt_fair_sched);
static DISPATCHER_UINT_ATTR(dispatch_time_slice, 0644, 0,
- _dispatch_time_slice);
+ adreno_dispatch_time_slice);
static DISPATCHER_UINT_ATTR(dispatch_starvation_time, 0644, 0,
- _dispatch_starvation_time);
+ adreno_dispatch_starvation_time);
static struct attribute *dispatcher_attrs[] = {
&dispatcher_attr_inflight.attr,
@@ -2474,9 +2415,6 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev)
setup_timer(&dispatcher->fault_timer, adreno_dispatcher_fault_timer,
(unsigned long) adreno_dev);
- setup_timer(&dispatcher->preempt_timer, adreno_dispatcher_preempt_timer,
- (unsigned long) adreno_dev);
-
INIT_WORK(&dispatcher->work, adreno_dispatcher_work);
init_completion(&dispatcher->idle_gate);
@@ -2485,9 +2423,6 @@ int adreno_dispatcher_init(struct adreno_device *adreno_dev)
plist_head_init(&dispatcher->pending);
spin_lock_init(&dispatcher->plist_lock);
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_CLEAR);
-
ret = kobject_init_and_add(&dispatcher->kobj, &ktype_dispatcher,
&device->dev->kobj, "dispatch");
@@ -2544,49 +2479,3 @@ int adreno_dispatcher_idle(struct adreno_device *adreno_dev)
adreno_dispatcher_schedule(device);
return ret;
}
-
-void adreno_preempt_process_dispatch_queue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *dispatch_q)
-{
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_cmdbatch *cmdbatch;
-
- if (dispatch_q->head != dispatch_q->tail) {
- /*
- * retire cmdbacthes from previous q, and don't check for
- * timeout since the cmdbatch may have been preempted
- */
- adreno_dispatch_process_cmdqueue(adreno_dev,
- dispatch_q, 0);
- }
-
- /* set the timer for the first cmdbatch of active dispatch_q */
- dispatch_q = &(adreno_dev->cur_rb->dispatch_q);
- if (dispatch_q->head != dispatch_q->tail) {
- cmdbatch = dispatch_q->cmd_q[dispatch_q->head];
- cmdbatch->expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
- }
- kgsl_schedule_work(&device->event_work);
-}
-
-/**
- * adreno_dispatcher_preempt_callback() - Callback funcion for CP_SW interrupt
- * @adreno_dev: The device on which the interrupt occurred
- * @bit: Interrupt bit in the interrupt status register
- */
-void adreno_dispatcher_preempt_callback(struct adreno_device *adreno_dev,
- int bit)
-{
- struct adreno_dispatcher *dispatcher = &(adreno_dev->dispatcher);
-
- if (ADRENO_DISPATCHER_PREEMPT_TRIGGERED !=
- atomic_read(&dispatcher->preemption_state))
- return;
-
- trace_adreno_hw_preempt_trig_to_comp_int(adreno_dev->cur_rb,
- adreno_dev->next_rb);
- atomic_set(&dispatcher->preemption_state,
- ADRENO_DISPATCHER_PREEMPT_COMPLETE);
- adreno_dispatcher_schedule(KGSL_DEVICE(adreno_dev));
-}
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index 308d5b936819..699c3e4adb27 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -11,29 +11,13 @@
*
*/
-
#ifndef ____ADRENO_DISPATCHER_H
#define ____ADRENO_DISPATCHER_H
-/* Time to allow preemption to complete (in ms) */
-#define ADRENO_DISPATCH_PREEMPT_TIMEOUT 10000
-
extern unsigned int adreno_disp_preempt_fair_sched;
extern unsigned int adreno_cmdbatch_timeout;
-
-/**
- * enum adreno_dispatcher_preempt_states - States of dispatcher for ringbuffer
- * preemption
- * @ADRENO_DISPATCHER_PREEMPT_CLEAR: No preemption is underway,
- * only 1 preemption can be underway at any point
- * @ADRENO_DISPATCHER_PREEMPT_TRIGGERED: A preemption is underway
- * @ADRENO_DISPATCHER_PREEMPT_COMPLETE: A preemption has just completed
- */
-enum adreno_dispatcher_preempt_states {
- ADRENO_DISPATCHER_PREEMPT_CLEAR = 0,
- ADRENO_DISPATCHER_PREEMPT_TRIGGERED,
- ADRENO_DISPATCHER_PREEMPT_COMPLETE,
-};
+extern unsigned int adreno_dispatch_starvation_time;
+extern unsigned int adreno_dispatch_time_slice;
/**
* enum adreno_dispatcher_starve_timer_states - Starvation control states of
@@ -71,6 +55,7 @@ enum adreno_dispatcher_starve_timer_states {
* @head: Head pointer to the q
* @tail: Queues tail pointer
* @active_context_count: Number of active contexts seen in this rb cmdqueue
+ * @expires: The jiffies value at which this cmdqueue has run too long
*/
struct adreno_dispatcher_cmdqueue {
struct kgsl_cmdbatch *cmd_q[ADRENO_DISPATCH_CMDQUEUE_SIZE];
@@ -78,6 +63,7 @@ struct adreno_dispatcher_cmdqueue {
unsigned int head;
unsigned int tail;
int active_context_count;
+ unsigned long expires;
};
/**
@@ -92,11 +78,6 @@ struct adreno_dispatcher_cmdqueue {
* @work: work_struct to put the dispatcher in a work queue
* @kobj: kobject for the dispatcher directory in the device sysfs node
* @idle_gate: Gate to wait on for dispatcher to idle
- * @preemption_state: Indicated what state the dispatcher is in, states are
- * defined by enum adreno_dispatcher_preempt_states
- * @preempt_token_submit: Indicates if a preempt token has been subnitted in
- * current ringbuffer.
- * @preempt_timer: Timer to track if preemption occured within specified time
* @disp_preempt_fair_sched: If set then dispatcher will try to be fair to
* starving RB's by scheduling them in and enforcing a minimum time slice
* for every RB that is scheduled to run on the device
@@ -113,9 +94,6 @@ struct adreno_dispatcher {
struct work_struct work;
struct kobject kobj;
struct completion idle_gate;
- atomic_t preemption_state;
- int preempt_token_submit;
- struct timer_list preempt_timer;
unsigned int disp_preempt_fair_sched;
};
@@ -141,12 +119,12 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device,
struct adreno_context *drawctxt);
void adreno_dispatcher_preempt_callback(struct adreno_device *adreno_dev,
int bit);
-struct adreno_ringbuffer *adreno_dispatcher_get_highest_busy_rb(
- struct adreno_device *adreno_dev);
-int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *dispatch_q,
- int long_ib_detect);
void adreno_preempt_process_dispatch_queue(struct adreno_device *adreno_dev,
struct adreno_dispatcher_cmdqueue *dispatch_q);
+static inline bool adreno_cmdqueue_is_empty(
+ struct adreno_dispatcher_cmdqueue *cmdqueue)
+{
+ return (cmdqueue != NULL && cmdqueue->head == cmdqueue->tail);
+}
#endif /* __ADRENO_DISPATCHER_H */
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index d8498d938b6a..fb95f6108fb8 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -346,7 +346,8 @@ adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
KGSL_CONTEXT_PWR_CONSTRAINT |
KGSL_CONTEXT_IFH_NOP |
KGSL_CONTEXT_SECURE |
- KGSL_CONTEXT_PREEMPT_STYLE_MASK);
+ KGSL_CONTEXT_PREEMPT_STYLE_MASK |
+ KGSL_CONTEXT_NO_SNAPSHOT);
/* Check for errors before trying to initialize */
@@ -466,20 +467,6 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
list_del_init(&drawctxt->active_node);
spin_unlock(&adreno_dev->active_list_lock);
- /* deactivate context */
- mutex_lock(&device->mutex);
- if (rb->drawctxt_active == drawctxt) {
- if (adreno_dev->cur_rb == rb) {
- if (!kgsl_active_count_get(device)) {
- adreno_drawctxt_switch(adreno_dev, rb, NULL, 0);
- kgsl_active_count_put(device);
- } else
- BUG();
- } else
- adreno_drawctxt_switch(adreno_dev, rb, NULL, 0);
- }
- mutex_unlock(&device->mutex);
-
spin_lock(&drawctxt->lock);
count = drawctxt_detach_cmdbatches(drawctxt, list);
spin_unlock(&drawctxt->lock);
@@ -548,12 +535,21 @@ void adreno_drawctxt_destroy(struct kgsl_context *context)
kfree(drawctxt);
}
+static void _drawctxt_switch_wait_callback(struct kgsl_device *device,
+ struct kgsl_event_group *group,
+ void *priv, int result)
+{
+ struct adreno_context *drawctxt = (struct adreno_context *) priv;
+
+ kgsl_context_put(&drawctxt->base);
+}
+
/**
* adreno_drawctxt_switch - switch the current draw context in a given RB
* @adreno_dev - The 3D device that owns the context
* @rb: The ringubffer pointer on which the current context is being changed
* @drawctxt - the 3D context to switch to
- * @flags - Flags to accompany the switch (from user space)
+ * @flags: Control flags for the switch
*
* Switch the current draw context in given RB
*/
@@ -583,8 +579,7 @@ int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base))
return -ENOENT;
- trace_adreno_drawctxt_switch(rb,
- drawctxt, flags);
+ trace_adreno_drawctxt_switch(rb, drawctxt);
/* Get a refcount to the new instance */
if (drawctxt) {
@@ -596,16 +591,18 @@ int adreno_drawctxt_switch(struct adreno_device *adreno_dev,
/* No context - set the default pagetable and thats it. */
new_pt = device->mmu.defaultpagetable;
}
- ret = adreno_ringbuffer_set_pt_ctx(rb, new_pt, drawctxt);
- if (ret) {
- KGSL_DRV_ERR(device,
- "Failed to set pagetable on rb %d\n", rb->id);
+ ret = adreno_ringbuffer_set_pt_ctx(rb, new_pt, drawctxt, flags);
+ if (ret)
return ret;
- }
- /* Put the old instance of the active drawctxt */
- if (rb->drawctxt_active)
- kgsl_context_put(&rb->drawctxt_active->base);
+ if (rb->drawctxt_active) {
+ /* Wait for the timestamp to expire */
+ if (kgsl_add_event(device, &rb->events, rb->timestamp,
+ _drawctxt_switch_wait_callback,
+ rb->drawctxt_active)) {
+ kgsl_context_put(&rb->drawctxt_active->base);
+ }
+ }
rb->drawctxt_active = drawctxt;
return 0;
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 7e80247e9322..5ea911954991 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -104,6 +104,9 @@ enum adreno_context_priv {
ADRENO_CONTEXT_SKIP_CMD,
};
+/* Flags for adreno_drawctxt_switch() */
+#define ADRENO_CONTEXT_SWITCH_FORCE_GPU BIT(0)
+
struct kgsl_context *adreno_drawctxt_create(struct kgsl_device_private *,
uint32_t *flags);
diff --git a/drivers/gpu/msm/adreno_ioctl.c b/drivers/gpu/msm/adreno_ioctl.c
index 519087a77b83..0d5e3e094c36 100644
--- a/drivers/gpu/msm/adreno_ioctl.c
+++ b/drivers/gpu/msm/adreno_ioctl.c
@@ -103,7 +103,7 @@ static long adreno_ioctl_preemption_counters_query(
levels_to_copy = gpudev->num_prio_levels;
if (copy_to_user((void __user *) (uintptr_t) read->counters,
- adreno_dev->preemption_counters.hostptr,
+ adreno_dev->preempt.counters.hostptr,
levels_to_copy * size_level))
return -EFAULT;
diff --git a/drivers/gpu/msm/adreno_iommu.c b/drivers/gpu/msm/adreno_iommu.c
index 2eeda01b3c4d..aa00dcb84185 100644
--- a/drivers/gpu/msm/adreno_iommu.c
+++ b/drivers/gpu/msm/adreno_iommu.c
@@ -275,6 +275,7 @@ static bool _ctx_switch_use_cpu_path(
struct adreno_ringbuffer *rb)
{
struct kgsl_mmu *mmu = KGSL_MMU(adreno_dev);
+
/*
* If rb is current, we can use cpu path when GPU is
* idle and we are switching to default pt.
@@ -284,7 +285,7 @@ static bool _ctx_switch_use_cpu_path(
if (adreno_dev->cur_rb == rb)
return adreno_isidle(KGSL_DEVICE(adreno_dev)) &&
(new_pt == mmu->defaultpagetable);
- else if ((rb->wptr == rb->rptr) &&
+ else if (adreno_rb_empty(rb) &&
(new_pt == mmu->defaultpagetable))
return true;
@@ -360,8 +361,7 @@ static unsigned int _adreno_mmu_set_pt_update_condition(
*/
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- switch_pt_enable)));
+ PT_INFO_OFFSET(switch_pt_enable)));
*cmds++ = 1;
*cmds++ = cp_packet(adreno_dev, CP_WAIT_MEM_WRITES, 1);
*cmds++ = 0;
@@ -375,14 +375,11 @@ static unsigned int _adreno_mmu_set_pt_update_condition(
*cmds++ = (1 << 8) | (1 << 4) | 3;
cmds += cp_gpuaddr(adreno_dev, cmds,
(adreno_dev->ringbuffers[0].pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- current_global_ptname)));
+ PT_INFO_OFFSET(current_global_ptname)));
*cmds++ = ptname;
*cmds++ = 0xFFFFFFFF;
- cmds += cp_gpuaddr(adreno_dev, cmds,
- (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- switch_pt_enable)));
+ cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
+ PT_INFO_OFFSET(switch_pt_enable)));
*cmds++ = 0;
*cmds++ = cp_packet(adreno_dev, CP_WAIT_MEM_WRITES, 1);
*cmds++ = 0;
@@ -406,23 +403,18 @@ static unsigned int _adreno_iommu_pt_update_pid_to_mem(
unsigned int *cmds_orig = cmds;
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
- cmds += cp_gpuaddr(adreno_dev, cmds,
- (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- current_rb_ptname)));
+ cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
+ PT_INFO_OFFSET(current_rb_ptname)));
*cmds++ = ptname;
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
cmds += cp_gpuaddr(adreno_dev, cmds,
- (adreno_dev->ringbuffers[0].pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- current_global_ptname)));
+ (adreno_dev->ringbuffers[0].pagetable_desc.gpuaddr +
+ PT_INFO_OFFSET(current_global_ptname)));
*cmds++ = ptname;
/* pagetable switch done, Housekeeping: set the switch_pt_enable to 0 */
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
- cmds += cp_gpuaddr(adreno_dev, cmds,
- (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- switch_pt_enable)));
+ cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
+ PT_INFO_OFFSET(switch_pt_enable)));
*cmds++ = 0;
*cmds++ = cp_packet(adreno_dev, CP_WAIT_MEM_WRITES, 1);
*cmds++ = 0;
@@ -444,14 +436,10 @@ static unsigned int _adreno_iommu_set_pt_v1(struct adreno_ringbuffer *rb,
/* set flag that indicates whether pt switch is required*/
cmds += _adreno_mmu_set_pt_update_condition(rb, cmds, ptname);
*cmds++ = cp_mem_packet(adreno_dev, CP_COND_EXEC, 4, 2);
- cmds += cp_gpuaddr(adreno_dev, cmds,
- (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- switch_pt_enable)));
- cmds += cp_gpuaddr(adreno_dev, cmds,
- (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info,
- switch_pt_enable)));
+ cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
+ PT_INFO_OFFSET(switch_pt_enable)));
+ cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
+ PT_INFO_OFFSET(switch_pt_enable)));
*cmds++ = 1;
/* Exec count to be filled later */
cond_exec_ptr = cmds;
@@ -566,7 +554,7 @@ static unsigned int _adreno_iommu_set_pt_v2_a5xx(struct kgsl_device *device,
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 4, 1);
cmds += cp_gpuaddr(adreno_dev, cmds, (rb->pagetable_desc.gpuaddr +
- offsetof(struct adreno_ringbuffer_pagetable_info, ttbr0)));
+ PT_INFO_OFFSET(ttbr0)));
*cmds++ = lower_32_bits(ttbr0);
*cmds++ = upper_32_bits(ttbr0);
*cmds++ = contextidr;
@@ -651,14 +639,14 @@ static unsigned int __add_curr_ctxt_cmds(struct adreno_ringbuffer *rb,
*cmds++ = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
- cmds += cp_gpuaddr(adreno_dev, cmds, device->memstore.gpuaddr +
- KGSL_MEMSTORE_RB_OFFSET(rb, current_context));
+ cmds += cp_gpuaddr(adreno_dev, cmds,
+ MEMSTORE_RB_GPU_ADDR(device, rb, current_context));
*cmds++ = (drawctxt ? drawctxt->base.id : 0);
*cmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
- cmds += cp_gpuaddr(adreno_dev, cmds, device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- current_context));
+ cmds += cp_gpuaddr(adreno_dev, cmds,
+ MEMSTORE_ID_GPU_ADDR(device,
+ KGSL_MEMSTORE_GLOBAL, current_context));
*cmds++ = (drawctxt ? drawctxt->base.id : 0);
/* Invalidate UCHE for new context */
@@ -706,7 +694,7 @@ static void _set_ctxt_cpu(struct adreno_ringbuffer *rb,
}
/* Update rb memstore with current context */
kgsl_sharedmem_writel(device, &device->memstore,
- KGSL_MEMSTORE_RB_OFFSET(rb, current_context),
+ MEMSTORE_RB_OFFSET(rb, current_context),
drawctxt ? drawctxt->base.id : 0);
}
@@ -746,26 +734,11 @@ static int _set_pagetable_cpu(struct adreno_ringbuffer *rb,
if (result)
return result;
/* write the new pt set to memory var */
- kgsl_sharedmem_writel(device,
- &adreno_dev->ringbuffers[0].pagetable_desc,
- offsetof(
- struct adreno_ringbuffer_pagetable_info,
- current_global_ptname), new_pt->name);
+ adreno_ringbuffer_set_global(adreno_dev, new_pt->name);
}
/* Update the RB pagetable info here */
- kgsl_sharedmem_writel(device, &rb->pagetable_desc,
- offsetof(
- struct adreno_ringbuffer_pagetable_info,
- current_rb_ptname), new_pt->name);
- kgsl_sharedmem_writeq(device, &rb->pagetable_desc,
- offsetof(
- struct adreno_ringbuffer_pagetable_info,
- ttbr0), kgsl_mmu_pagetable_get_ttbr0(new_pt));
- kgsl_sharedmem_writel(device, &rb->pagetable_desc,
- offsetof(
- struct adreno_ringbuffer_pagetable_info,
- contextidr), kgsl_mmu_pagetable_get_contextidr(new_pt));
+ adreno_ringbuffer_set_pagetable(rb, new_pt);
return 0;
}
@@ -795,8 +768,6 @@ static int _set_pagetable_gpu(struct adreno_ringbuffer *rb,
return 0;
}
- kgsl_mmu_enable_clk(KGSL_MMU(adreno_dev));
-
cmds += adreno_iommu_set_pt_generate_cmds(rb, cmds, new_pt);
if ((unsigned int) (cmds - link) > (PAGE_SIZE / sizeof(unsigned int))) {
@@ -812,16 +783,6 @@ static int _set_pagetable_gpu(struct adreno_ringbuffer *rb,
KGSL_CMD_FLAGS_PMODE, link,
(unsigned int)(cmds - link));
- /*
- * On error disable the IOMMU clock right away otherwise turn it off
- * after the command has been retired
- */
- if (result)
- kgsl_mmu_disable_clk(KGSL_MMU(adreno_dev));
- else
- adreno_ringbuffer_mmu_disable_clk_on_ts(KGSL_DEVICE(adreno_dev),
- rb, rb->timestamp);
-
kfree(link);
return result;
}
@@ -886,7 +847,8 @@ int adreno_iommu_init(struct adreno_device *adreno_dev)
*/
int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
struct kgsl_pagetable *new_pt,
- struct adreno_context *drawctxt)
+ struct adreno_context *drawctxt,
+ unsigned long flags)
{
struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -897,7 +859,8 @@ int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
if (rb->drawctxt_active)
cur_pt = rb->drawctxt_active->base.proc_priv->pagetable;
- cpu_path = _ctx_switch_use_cpu_path(adreno_dev, new_pt, rb);
+ cpu_path = !(flags & ADRENO_CONTEXT_SWITCH_FORCE_GPU) &&
+ _ctx_switch_use_cpu_path(adreno_dev, new_pt, rb);
/* Pagetable switch */
if (new_pt != cur_pt) {
@@ -907,10 +870,8 @@ int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
result = _set_pagetable_gpu(rb, new_pt);
}
- if (result) {
- KGSL_DRV_ERR(device, "Error switching pagetable %d\n", result);
+ if (result)
return result;
- }
/* Context switch */
if (cpu_path)
@@ -918,8 +879,5 @@ int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
else
result = _set_ctxt_gpu(rb, drawctxt);
- if (result)
- KGSL_DRV_ERR(device, "Error switching context %d\n", result);
-
return result;
}
diff --git a/drivers/gpu/msm/adreno_iommu.h b/drivers/gpu/msm/adreno_iommu.h
index c557c65bb4c9..5a6c2c549370 100644
--- a/drivers/gpu/msm/adreno_iommu.h
+++ b/drivers/gpu/msm/adreno_iommu.h
@@ -17,7 +17,8 @@
#ifdef CONFIG_QCOM_KGSL_IOMMU
int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
struct kgsl_pagetable *new_pt,
- struct adreno_context *drawctxt);
+ struct adreno_context *drawctxt,
+ unsigned long flags);
int adreno_iommu_init(struct adreno_device *adreno_dev);
@@ -33,7 +34,8 @@ static inline int adreno_iommu_init(struct adreno_device *adreno_dev)
static inline int adreno_iommu_set_pt_ctx(struct adreno_ringbuffer *rb,
struct kgsl_pagetable *new_pt,
- struct adreno_context *drawctxt)
+ struct adreno_context *drawctxt,
+ unsigned long flags)
{
return 0;
}
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index dceb8fb93461..0160939e97f9 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -30,8 +30,6 @@
#include "a3xx_reg.h"
#include "adreno_a5xx.h"
-#define GSL_RB_NOP_SIZEDWORDS 2
-
#define RB_HOSTPTR(_rb, _pos) \
((unsigned int *) ((_rb)->buffer_desc.hostptr + \
((_pos) * sizeof(unsigned int))))
@@ -50,86 +48,89 @@ static void _cff_write_ringbuffer(struct adreno_ringbuffer *rb)
if (device->cff_dump_enable == 0)
return;
- /*
- * This code is predicated on the fact that we write a full block of
- * stuff without wrapping
- */
- BUG_ON(rb->wptr < rb->last_wptr);
-
- size = (rb->wptr - rb->last_wptr) * sizeof(unsigned int);
+ size = (rb->_wptr - rb->last_wptr) * sizeof(unsigned int);
hostptr = RB_HOSTPTR(rb, rb->last_wptr);
gpuaddr = RB_GPUADDR(rb, rb->last_wptr);
kgsl_cffdump_memcpy(device, gpuaddr, hostptr, size);
+ rb->last_wptr = rb->_wptr;
}
-void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb,
+static void adreno_get_submit_time(struct adreno_device *adreno_dev,
struct adreno_submit_time *time)
{
- struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
- BUG_ON(rb->wptr == 0);
-
- /* Write the changes to CFF if so enabled */
- _cff_write_ringbuffer(rb);
-
+ unsigned long flags;
/*
- * Read the current GPU ticks and wallclock for most accurate
- * profiling
+ * Here we are attempting to create a mapping between the
+ * GPU time domain (alwayson counter) and the CPU time domain
+ * (local_clock) by sampling both values as close together as
+ * possible. This is useful for many types of debugging and
+ * profiling. In order to make this mapping as accurate as
+ * possible, we must turn off interrupts to avoid running
+ * interrupt handlers between the two samples.
*/
- if (time != NULL) {
- /*
- * Here we are attempting to create a mapping between the
- * GPU time domain (alwayson counter) and the CPU time domain
- * (local_clock) by sampling both values as close together as
- * possible. This is useful for many types of debugging and
- * profiling. In order to make this mapping as accurate as
- * possible, we must turn off interrupts to avoid running
- * interrupt handlers between the two samples.
- */
- unsigned long flags;
- local_irq_save(flags);
+ local_irq_save(flags);
- /* Read always on registers */
- if (!adreno_is_a3xx(adreno_dev)) {
- adreno_readreg64(adreno_dev,
- ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
- ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
- &time->ticks);
+ /* Read always on registers */
+ if (!adreno_is_a3xx(adreno_dev)) {
+ adreno_readreg64(adreno_dev,
+ ADRENO_REG_RBBM_ALWAYSON_COUNTER_LO,
+ ADRENO_REG_RBBM_ALWAYSON_COUNTER_HI,
+ &time->ticks);
- /*
- * Mask hi bits as they may be incorrect on
- * a4x and some a5x
- */
- if (ADRENO_GPUREV(adreno_dev) >= 400 &&
+ /* Mask hi bits as they may be incorrect on some targets */
+ if (ADRENO_GPUREV(adreno_dev) >= 400 &&
ADRENO_GPUREV(adreno_dev) <= ADRENO_REV_A530)
- time->ticks &= 0xFFFFFFFF;
- }
- else
- time->ticks = 0;
+ time->ticks &= 0xFFFFFFFF;
+ } else
+ time->ticks = 0;
- /* Get the kernel clock for time since boot */
- time->ktime = local_clock();
+ /* Get the kernel clock for time since boot */
+ time->ktime = local_clock();
- /* Get the timeofday for the wall time (for the user) */
- getnstimeofday(&time->utime);
+ /* Get the timeofday for the wall time (for the user) */
+ getnstimeofday(&time->utime);
- local_irq_restore(flags);
- }
+ local_irq_restore(flags);
+}
+
+void adreno_ringbuffer_wptr(struct adreno_device *adreno_dev,
+ struct adreno_ringbuffer *rb)
+{
+ unsigned long flags;
- /* Memory barrier before informing the hardware of new commands */
- mb();
+ spin_lock_irqsave(&rb->preempt_lock, flags);
+ if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
- if (adreno_preempt_state(adreno_dev, ADRENO_DISPATCHER_PREEMPT_CLEAR) &&
- (adreno_dev->cur_rb == rb)) {
- /*
- * Let the pwrscale policy know that new commands have
- * been submitted.
- */
- kgsl_pwrscale_busy(KGSL_DEVICE(adreno_dev));
- adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR, rb->wptr);
+ if (adreno_dev->cur_rb == rb) {
+ /*
+ * Let the pwrscale policy know that new commands have
+ * been submitted.
+ */
+ kgsl_pwrscale_busy(KGSL_DEVICE(adreno_dev));
+ adreno_writereg(adreno_dev, ADRENO_REG_CP_RB_WPTR,
+ rb->_wptr);
+ }
}
+
+ rb->wptr = rb->_wptr;
+ spin_unlock_irqrestore(&rb->preempt_lock, flags);
+}
+
+void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb,
+ struct adreno_submit_time *time)
+{
+ struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
+
+ /* Write the changes to CFF if so enabled */
+ _cff_write_ringbuffer(rb);
+
+ if (time != NULL)
+ adreno_get_submit_time(adreno_dev, time);
+
+ adreno_ringbuffer_wptr(adreno_dev, rb);
}
int adreno_ringbuffer_submit_spin(struct adreno_ringbuffer *rb,
@@ -141,125 +142,36 @@ int adreno_ringbuffer_submit_spin(struct adreno_ringbuffer *rb,
return adreno_spin_idle(adreno_dev, timeout);
}
-static int
-adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb,
- unsigned int numcmds, int wptr_ahead)
+unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
+ unsigned int dwords)
{
- int nopcount = 0;
- unsigned int freecmds;
- unsigned int wptr = rb->wptr;
- unsigned int *cmds = NULL;
- uint64_t gpuaddr;
- unsigned long wait_time;
- unsigned long wait_timeout = msecs_to_jiffies(ADRENO_IDLE_TIMEOUT);
- unsigned int rptr;
struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
+ unsigned int rptr = adreno_get_rptr(rb);
+ unsigned int ret;
- /* if wptr ahead, fill the remaining with NOPs */
- if (wptr_ahead) {
- /* -1 for header */
- nopcount = KGSL_RB_DWORDS - rb->wptr - 1;
-
- cmds = RB_HOSTPTR(rb, rb->wptr);
- gpuaddr = RB_GPUADDR(rb, rb->wptr);
-
- rptr = adreno_get_rptr(rb);
- /* For non current rb we don't expect the rptr to move */
- if ((adreno_dev->cur_rb != rb ||
- !adreno_preempt_state(adreno_dev,
- ADRENO_DISPATCHER_PREEMPT_CLEAR)) &&
- !rptr)
- return -ENOSPC;
-
- /* Make sure that rptr is not 0 before submitting
- * commands at the end of ringbuffer. We do not
- * want the rptr and wptr to become equal when
- * the ringbuffer is not empty */
- wait_time = jiffies + wait_timeout;
- while (!rptr) {
- rptr = adreno_get_rptr(rb);
- if (time_after(jiffies, wait_time))
- return -ETIMEDOUT;
- }
-
- rb->wptr = 0;
- }
-
- rptr = adreno_get_rptr(rb);
- freecmds = rptr - rb->wptr;
- if (freecmds == 0 || freecmds > numcmds)
- goto done;
+ if (rptr <= rb->_wptr) {
+ unsigned int *cmds;
- /* non current rptr will not advance anyway or if preemption underway */
- if (adreno_dev->cur_rb != rb ||
- !adreno_preempt_state(adreno_dev,
- ADRENO_DISPATCHER_PREEMPT_CLEAR)) {
- rb->wptr = wptr;
- return -ENOSPC;
- }
-
- wait_time = jiffies + wait_timeout;
- /* wait for space in ringbuffer */
- while (1) {
- rptr = adreno_get_rptr(rb);
-
- freecmds = rptr - rb->wptr;
-
- if (freecmds == 0 || freecmds > numcmds)
- break;
-
- if (time_after(jiffies, wait_time)) {
- KGSL_DRV_ERR(KGSL_DEVICE(adreno_dev),
- "Timed out waiting for freespace in RB rptr: 0x%x, wptr: 0x%x, rb id %d\n",
- rptr, wptr, rb->id);
- return -ETIMEDOUT;
+ if (rb->_wptr + dwords <= (KGSL_RB_DWORDS - 2)) {
+ ret = rb->_wptr;
+ rb->_wptr = (rb->_wptr + dwords) % KGSL_RB_DWORDS;
+ return RB_HOSTPTR(rb, ret);
}
- }
-done:
- if (wptr_ahead) {
- *cmds = cp_packet(adreno_dev, CP_NOP, nopcount);
- kgsl_cffdump_write(KGSL_DEVICE(adreno_dev), gpuaddr, *cmds);
- }
- return 0;
-}
+ cmds = RB_HOSTPTR(rb, rb->_wptr);
+ *cmds = cp_packet(adreno_dev, CP_NOP,
+ KGSL_RB_DWORDS - rb->_wptr - 1);
-unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
- unsigned int numcmds)
-{
- unsigned int *ptr = NULL;
- int ret = 0;
- unsigned int rptr;
- BUG_ON(numcmds >= KGSL_RB_DWORDS);
-
- rptr = adreno_get_rptr(rb);
- /* check for available space */
- if (rb->wptr >= rptr) {
- /* wptr ahead or equal to rptr */
- /* reserve dwords for nop packet */
- if ((rb->wptr + numcmds) > (KGSL_RB_DWORDS -
- GSL_RB_NOP_SIZEDWORDS))
- ret = adreno_ringbuffer_waitspace(rb, numcmds, 1);
- } else {
- /* wptr behind rptr */
- if ((rb->wptr + numcmds) >= rptr)
- ret = adreno_ringbuffer_waitspace(rb, numcmds, 0);
- /* check for remaining space */
- /* reserve dwords for nop packet */
- if (!ret && (rb->wptr + numcmds) > (KGSL_RB_DWORDS -
- GSL_RB_NOP_SIZEDWORDS))
- ret = adreno_ringbuffer_waitspace(rb, numcmds, 1);
+ rb->_wptr = 0;
}
- if (!ret) {
- rb->last_wptr = rb->wptr;
-
- ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
- rb->wptr += numcmds;
- } else
- ptr = ERR_PTR(ret);
+ if (rb->_wptr + dwords < rptr) {
+ ret = rb->_wptr;
+ rb->_wptr = (rb->_wptr + dwords) % KGSL_RB_DWORDS;
+ return RB_HOSTPTR(rb, ret);
+ }
- return ptr;
+ return ERR_PTR(-ENOSPC);
}
/**
@@ -279,8 +191,10 @@ int adreno_ringbuffer_start(struct adreno_device *adreno_dev,
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
kgsl_sharedmem_set(device, &(rb->buffer_desc),
0, 0xAA, KGSL_RB_SIZE);
+ kgsl_sharedmem_writel(device, &device->scratch,
+ SCRATCH_RPTR_OFFSET(rb->id), 0);
rb->wptr = 0;
- rb->rptr = 0;
+ rb->_wptr = 0;
rb->wptr_preempt_end = 0xFFFFFFFF;
rb->starve_timer_state =
ADRENO_DISPATCHER_RB_STARVE_TIMER_UNINIT;
@@ -322,6 +236,8 @@ static int _adreno_ringbuffer_probe(struct adreno_device *adreno_dev,
rb->timestamp = 0;
init_waitqueue_head(&rb->ts_expire_waitq);
+ spin_lock_init(&rb->preempt_lock);
+
/*
* Allocate mem for storing RB pagetables and commands to
* switch pagetable
@@ -433,6 +349,18 @@ int cp_secure_mode(struct adreno_device *adreno_dev, uint *cmds,
return cmds - start;
}
+static inline int cp_mem_write(struct adreno_device *adreno_dev,
+ unsigned int *cmds, uint64_t gpuaddr, unsigned int value)
+{
+ int dwords = 0;
+
+ cmds[dwords++] = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
+ dwords += cp_gpuaddr(adreno_dev, &cmds[dwords], gpuaddr);
+ cmds[dwords++] = value;
+
+ return dwords;
+}
+
static int
adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
unsigned int flags, unsigned int *cmds,
@@ -446,18 +374,20 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
unsigned int total_sizedwords = sizedwords;
unsigned int i;
unsigned int context_id = 0;
- uint64_t gpuaddr = device->memstore.gpuaddr;
bool profile_ready;
struct adreno_context *drawctxt = rb->drawctxt_active;
struct kgsl_context *context = NULL;
bool secured_ctxt = false;
- uint64_t cond_addr;
static unsigned int _seq_cnt;
if (drawctxt != NULL && kgsl_context_detached(&drawctxt->base) &&
!(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
return -ENOENT;
+ /* On fault return error so that we don't keep submitting */
+ if (adreno_gpu_fault(adreno_dev) != 0)
+ return -EPROTO;
+
rb->timestamp++;
/* If this is a internal IB, use the global timestamp for it */
@@ -529,7 +459,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
* required in ringbuffer and adjust the write pointer depending on
* gpucore at the end of this function.
*/
- total_sizedwords += 4; /* sop timestamp */
+ total_sizedwords += 8; /* sop timestamp */
total_sizedwords += 5; /* eop timestamp */
if (drawctxt && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
@@ -564,14 +494,9 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
*ringcmds++ = KGSL_CMD_IDENTIFIER;
if (adreno_is_preemption_enabled(adreno_dev) &&
- gpudev->preemption_pre_ibsubmit) {
- cond_addr = device->memstore.gpuaddr +
- KGSL_MEMSTORE_OFFSET(context_id,
- preempted);
+ gpudev->preemption_pre_ibsubmit)
ringcmds += gpudev->preemption_pre_ibsubmit(
- adreno_dev, rb, ringcmds, context,
- cond_addr, NULL);
- }
+ adreno_dev, rb, ringcmds, context);
if (flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE) {
*ringcmds++ = cp_packet(adreno_dev, CP_NOP, 1);
@@ -601,16 +526,15 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
adreno_profile_preib_processing(adreno_dev, drawctxt,
&flags, &ringcmds);
- /* start-of-pipeline timestamp */
- *ringcmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
+ /* start-of-pipeline timestamp for the context */
if (drawctxt && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE))
- ringcmds += cp_gpuaddr(adreno_dev, ringcmds,
- gpuaddr + KGSL_MEMSTORE_OFFSET(context_id,
- soptimestamp));
- else
- ringcmds += cp_gpuaddr(adreno_dev, ringcmds,
- gpuaddr + KGSL_MEMSTORE_RB_OFFSET(rb, soptimestamp));
- *ringcmds++ = timestamp;
+ ringcmds += cp_mem_write(adreno_dev, ringcmds,
+ MEMSTORE_ID_GPU_ADDR(device, context_id, soptimestamp),
+ timestamp);
+
+ /* start-of-pipeline timestamp for the ringbuffer */
+ ringcmds += cp_mem_write(adreno_dev, ringcmds,
+ MEMSTORE_RB_GPU_ADDR(device, rb, soptimestamp), rb->timestamp);
if (secured_ctxt)
ringcmds += cp_secure_mode(adreno_dev, ringcmds, 1);
@@ -659,11 +583,9 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
* early detection of timestamp interrupt storms to stave
* off system collapse.
*/
- *ringcmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
- ringcmds += cp_gpuaddr(adreno_dev, ringcmds, gpuaddr +
- KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
- ref_wait_ts));
- *ringcmds++ = ++_seq_cnt;
+ ringcmds += cp_mem_write(adreno_dev, ringcmds,
+ MEMSTORE_ID_GPU_ADDR(device, KGSL_MEMSTORE_GLOBAL,
+ ref_wait_ts), ++_seq_cnt);
/*
* end-of-pipeline timestamp. If per context timestamps is not
@@ -677,16 +599,17 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
*ringcmds++ = CACHE_FLUSH_TS;
if (drawctxt && !(flags & KGSL_CMD_FLAGS_INTERNAL_ISSUE)) {
- ringcmds += cp_gpuaddr(adreno_dev, ringcmds, gpuaddr +
- KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp));
+ ringcmds += cp_gpuaddr(adreno_dev, ringcmds,
+ MEMSTORE_ID_GPU_ADDR(device, context_id, eoptimestamp));
*ringcmds++ = timestamp;
- *ringcmds++ = cp_mem_packet(adreno_dev, CP_MEM_WRITE, 2, 1);
- ringcmds += cp_gpuaddr(adreno_dev, ringcmds, gpuaddr +
- KGSL_MEMSTORE_RB_OFFSET(rb, eoptimestamp));
- *ringcmds++ = rb->timestamp;
+
+ /* Write the end of pipeline timestamp to the ringbuffer too */
+ ringcmds += cp_mem_write(adreno_dev, ringcmds,
+ MEMSTORE_RB_GPU_ADDR(device, rb, eoptimestamp),
+ rb->timestamp);
} else {
- ringcmds += cp_gpuaddr(adreno_dev, ringcmds, gpuaddr +
- KGSL_MEMSTORE_RB_OFFSET(rb, eoptimestamp));
+ ringcmds += cp_gpuaddr(adreno_dev, ringcmds,
+ MEMSTORE_RB_GPU_ADDR(device, rb, eoptimestamp));
*ringcmds++ = timestamp;
}
@@ -707,8 +630,8 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
if (gpudev->preemption_post_ibsubmit &&
adreno_is_preemption_enabled(adreno_dev))
- ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev, rb,
- ringcmds, &drawctxt->base);
+ ringcmds += gpudev->preemption_post_ibsubmit(adreno_dev,
+ ringcmds);
/*
* If we have more ringbuffer commands than space reserved
@@ -722,7 +645,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
* required. If we have commands less than the space reserved in RB
* adjust the wptr accordingly.
*/
- rb->wptr = rb->wptr - (total_sizedwords - (ringcmds - start));
+ rb->_wptr = rb->_wptr - (total_sizedwords - (ringcmds - start));
adreno_ringbuffer_submit(rb, time);
@@ -1063,14 +986,24 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
*cmds++ = cp_packet(adreno_dev, CP_NOP, 1);
*cmds++ = KGSL_END_OF_IB_IDENTIFIER;
- ret = adreno_drawctxt_switch(adreno_dev, rb, drawctxt, cmdbatch->flags);
+ /* Context switches commands should *always* be on the GPU */
+ ret = adreno_drawctxt_switch(adreno_dev, rb, drawctxt,
+ ADRENO_CONTEXT_SWITCH_FORCE_GPU);
/*
* In the unlikely event of an error in the drawctxt switch,
* treat it like a hang
*/
- if (ret)
+ if (ret) {
+ /*
+ * It is "normal" to get a -ENOSPC or a -ENOENT. Don't log it,
+ * the upper layers know how to handle it
+ */
+ if (ret != -ENOSPC && ret != -ENOENT)
+ KGSL_DRV_ERR(device,
+ "Unable to switch draw context: %d\n", ret);
goto done;
+ }
if (test_bit(CMDBATCH_FLAG_WFI, &cmdbatch->priv))
flags = KGSL_CMD_FLAGS_WFI;
@@ -1138,44 +1071,6 @@ done:
}
/**
- * adreno_ringbuffer_mmu_clk_disable_event() - Callback function that
- * disables the MMU clocks.
- * @device: Device pointer
- * @context: The ringbuffer context pointer
- * @data: Pointer containing the adreno_mmu_disable_clk_param structure
- * @type: The event call type (RETIRED or CANCELLED)
- */
-static void adreno_ringbuffer_mmu_clk_disable_event(struct kgsl_device *device,
- struct kgsl_event_group *group, void *data, int type)
-{
- kgsl_mmu_disable_clk(&device->mmu);
-}
-
-/*
- * adreno_ringbuffer_mmu_disable_clk_on_ts() - Sets up event to disable MMU
- * clocks
- * @device - The kgsl device pointer
- * @rb: The ringbuffer in whose event list the event is added
- * @timestamp: The timestamp on which the event should trigger
- *
- * Creates an event to disable the MMU clocks on timestamp and if event
- * already exists then updates the timestamp of disabling the MMU clocks
- * with the passed in ts if it is greater than the current value at which
- * the clocks will be disabled
- * Return - void
- */
-void
-adreno_ringbuffer_mmu_disable_clk_on_ts(struct kgsl_device *device,
- struct adreno_ringbuffer *rb, unsigned int timestamp)
-{
- if (kgsl_add_event(device, &(rb->events), timestamp,
- adreno_ringbuffer_mmu_clk_disable_event, NULL)) {
- KGSL_DRV_ERR(device,
- "Failed to add IOMMU disable clk event\n");
- }
-}
-
-/**
* adreno_ringbuffer_wait_callback() - Callback function for event registered
* on a ringbuffer timestamp
* @device: Device for which the the callback is valid
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index f1980fd92961..b126f710b5e6 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -73,13 +73,16 @@ struct adreno_ringbuffer_pagetable_info {
unsigned int contextidr;
};
+#define PT_INFO_OFFSET(_field) \
+ offsetof(struct adreno_ringbuffer_pagetable_info, _field)
+
/**
* struct adreno_ringbuffer - Definition for an adreno ringbuffer object
* @flags: Internal control flags for the ringbuffer
- * @buffer_desc: Pointer to the ringbuffer memory descriptor
- * @wptr: Local copy of the wptr offset
- * @rptr: Read pointer offset in dwords from baseaddr
- * @last_wptr: offset of the last H/W committed wptr
+ * @buffer_desc: Pointer to the ringbuffer memory descripto
+ * @_wptr: The next value of wptr to be written to the hardware on submit
+ * @wptr: Local copy of the wptr offset last written to hardware
+ * @last_wptr: offset of the last wptr that was written to CFF
* @rb_ctx: The context that represents a ringbuffer
* @id: Priority level of the ringbuffer, also used as an ID
* @fault_detect_ts: The last retired global timestamp read during fault detect
@@ -101,12 +104,13 @@ struct adreno_ringbuffer_pagetable_info {
* @sched_timer: Timer that tracks how long RB has been waiting to be scheduled
* or how long it has been scheduled for after preempting in
* @starve_timer_state: Indicates the state of the wait.
+ * @preempt_lock: Lock to protect the wptr pointer while it is being updated
*/
struct adreno_ringbuffer {
uint32_t flags;
struct kgsl_memdesc buffer_desc;
+ unsigned int _wptr;
unsigned int wptr;
- unsigned int rptr;
unsigned int last_wptr;
int id;
unsigned int fault_detect_ts;
@@ -122,14 +126,12 @@ struct adreno_ringbuffer {
int preempted_midway;
unsigned long sched_timer;
enum adreno_dispatcher_starve_timer_states starve_timer_state;
+ spinlock_t preempt_lock;
};
/* Returns the current ringbuffer */
#define ADRENO_CURRENT_RINGBUFFER(a) ((a)->cur_rb)
-#define KGSL_MEMSTORE_RB_OFFSET(rb, field) \
- KGSL_MEMSTORE_OFFSET((rb->id + KGSL_MEMSTORE_MAX), field)
-
int cp_secure_mode(struct adreno_device *adreno_dev, uint *cmds, int set);
int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
@@ -170,9 +172,6 @@ void adreno_ringbuffer_read_pfp_ucode(struct kgsl_device *device);
void adreno_ringbuffer_read_pm4_ucode(struct kgsl_device *device);
-void adreno_ringbuffer_mmu_disable_clk_on_ts(struct kgsl_device *device,
- struct adreno_ringbuffer *rb, unsigned int ts);
-
int adreno_ringbuffer_waittimestamp(struct adreno_ringbuffer *rb,
unsigned int timestamp,
unsigned int msecs);
@@ -204,9 +203,10 @@ static inline unsigned int adreno_ringbuffer_dec_wrapped(unsigned int val,
}
static inline int adreno_ringbuffer_set_pt_ctx(struct adreno_ringbuffer *rb,
- struct kgsl_pagetable *pt, struct adreno_context *context)
+ struct kgsl_pagetable *pt, struct adreno_context *context,
+ unsigned long flags)
{
- return adreno_iommu_set_pt_ctx(rb, pt, context);
+ return adreno_iommu_set_pt_ctx(rb, pt, context, flags);
}
#endif /* __ADRENO_RINGBUFFER_H */
diff --git a/drivers/gpu/msm/adreno_snapshot.c b/drivers/gpu/msm/adreno_snapshot.c
index ca61d36a1384..b069b16c75ef 100644
--- a/drivers/gpu/msm/adreno_snapshot.c
+++ b/drivers/gpu/msm/adreno_snapshot.c
@@ -467,7 +467,7 @@ static size_t snapshot_rb(struct kgsl_device *device, u8 *buf,
header->start = 0;
header->end = KGSL_RB_DWORDS;
header->wptr = rb->wptr;
- header->rptr = rb->rptr;
+ header->rptr = adreno_get_rptr(rb);
header->rbsize = KGSL_RB_DWORDS;
header->count = KGSL_RB_DWORDS;
adreno_rb_readtimestamp(adreno_dev, rb, KGSL_TIMESTAMP_QUEUED,
@@ -741,8 +741,7 @@ static size_t snapshot_global(struct kgsl_device *device, u8 *buf,
header->size = memdesc->size >> 2;
header->gpuaddr = memdesc->gpuaddr;
- header->ptbase =
- kgsl_mmu_pagetable_get_ttbr0(device->mmu.defaultpagetable);
+ header->ptbase = MMU_DEFAULT_TTBR0(device);
header->type = SNAPSHOT_GPU_OBJECT_GLOBAL;
memcpy(ptr, memdesc->hostptr, memdesc->size);
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
index 5f1bbb9a83b3..f52ddfa894d5 100644
--- a/drivers/gpu/msm/adreno_trace.h
+++ b/drivers/gpu/msm/adreno_trace.h
@@ -55,8 +55,8 @@ TRACE_EVENT(adreno_cmdbatch_queued,
TRACE_EVENT(adreno_cmdbatch_submitted,
TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight, uint64_t ticks,
unsigned long secs, unsigned long usecs,
- struct adreno_ringbuffer *rb),
- TP_ARGS(cmdbatch, inflight, ticks, secs, usecs, rb),
+ struct adreno_ringbuffer *rb, unsigned int rptr),
+ TP_ARGS(cmdbatch, inflight, ticks, secs, usecs, rb, rptr),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -81,7 +81,7 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
__entry->usecs = usecs;
__entry->prio = cmdbatch->context->priority;
__entry->rb_id = rb->id;
- __entry->rptr = rb->rptr;
+ __entry->rptr = rptr;
__entry->wptr = rb->wptr;
__entry->q_inflight = rb->dispatch_q.inflight;
),
@@ -100,8 +100,8 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
TRACE_EVENT(adreno_cmdbatch_retired,
TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight,
uint64_t start, uint64_t retire,
- struct adreno_ringbuffer *rb),
- TP_ARGS(cmdbatch, inflight, start, retire, rb),
+ struct adreno_ringbuffer *rb, unsigned int rptr),
+ TP_ARGS(cmdbatch, inflight, start, retire, rb, rptr),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -126,7 +126,7 @@ TRACE_EVENT(adreno_cmdbatch_retired,
__entry->retire = retire;
__entry->prio = cmdbatch->context->priority;
__entry->rb_id = rb->id;
- __entry->rptr = rb->rptr;
+ __entry->rptr = rptr;
__entry->wptr = rb->wptr;
__entry->q_inflight = rb->dispatch_q.inflight;
),
@@ -267,9 +267,8 @@ TRACE_EVENT(adreno_drawctxt_wait_done,
TRACE_EVENT(adreno_drawctxt_switch,
TP_PROTO(struct adreno_ringbuffer *rb,
- struct adreno_context *newctx,
- unsigned int flags),
- TP_ARGS(rb, newctx, flags),
+ struct adreno_context *newctx),
+ TP_ARGS(rb, newctx),
TP_STRUCT__entry(
__field(int, rb_level)
__field(unsigned int, oldctx)
@@ -283,8 +282,8 @@ TRACE_EVENT(adreno_drawctxt_switch,
__entry->newctx = newctx ? newctx->base.id : 0;
),
TP_printk(
- "rb level=%d oldctx=%u newctx=%u flags=%X",
- __entry->rb_level, __entry->oldctx, __entry->newctx, flags
+ "rb level=%d oldctx=%u newctx=%u",
+ __entry->rb_level, __entry->oldctx, __entry->newctx
)
);
@@ -427,8 +426,9 @@ TRACE_EVENT(kgsl_a5xx_irq_status,
DECLARE_EVENT_CLASS(adreno_hw_preempt_template,
TP_PROTO(struct adreno_ringbuffer *cur_rb,
- struct adreno_ringbuffer *new_rb),
- TP_ARGS(cur_rb, new_rb),
+ struct adreno_ringbuffer *new_rb,
+ unsigned int cur_rptr, unsigned int new_rptr),
+ TP_ARGS(cur_rb, new_rb, cur_rptr, new_rptr),
TP_STRUCT__entry(__field(int, cur_level)
__field(int, new_level)
__field(unsigned int, cur_rptr)
@@ -440,8 +440,8 @@ DECLARE_EVENT_CLASS(adreno_hw_preempt_template,
),
TP_fast_assign(__entry->cur_level = cur_rb->id;
__entry->new_level = new_rb->id;
- __entry->cur_rptr = cur_rb->rptr;
- __entry->new_rptr = new_rb->rptr;
+ __entry->cur_rptr = cur_rptr;
+ __entry->new_rptr = new_rptr;
__entry->cur_wptr = cur_rb->wptr;
__entry->new_wptr = new_rb->wptr;
__entry->cur_rbbase = cur_rb->buffer_desc.gpuaddr;
@@ -458,26 +458,30 @@ DECLARE_EVENT_CLASS(adreno_hw_preempt_template,
DEFINE_EVENT(adreno_hw_preempt_template, adreno_hw_preempt_clear_to_trig,
TP_PROTO(struct adreno_ringbuffer *cur_rb,
- struct adreno_ringbuffer *new_rb),
- TP_ARGS(cur_rb, new_rb)
+ struct adreno_ringbuffer *new_rb,
+ unsigned int cur_rptr, unsigned int new_rptr),
+ TP_ARGS(cur_rb, new_rb, cur_rptr, new_rptr)
);
DEFINE_EVENT(adreno_hw_preempt_template, adreno_hw_preempt_trig_to_comp,
TP_PROTO(struct adreno_ringbuffer *cur_rb,
- struct adreno_ringbuffer *new_rb),
- TP_ARGS(cur_rb, new_rb)
+ struct adreno_ringbuffer *new_rb,
+ unsigned int cur_rptr, unsigned int new_rptr),
+ TP_ARGS(cur_rb, new_rb, cur_rptr, new_rptr)
);
DEFINE_EVENT(adreno_hw_preempt_template, adreno_hw_preempt_trig_to_comp_int,
TP_PROTO(struct adreno_ringbuffer *cur_rb,
- struct adreno_ringbuffer *new_rb),
- TP_ARGS(cur_rb, new_rb)
+ struct adreno_ringbuffer *new_rb,
+ unsigned int cur_rptr, unsigned int new_rptr),
+ TP_ARGS(cur_rb, new_rb, cur_rptr, new_rptr)
);
TRACE_EVENT(adreno_hw_preempt_comp_to_clear,
TP_PROTO(struct adreno_ringbuffer *cur_rb,
- struct adreno_ringbuffer *new_rb),
- TP_ARGS(cur_rb, new_rb),
+ struct adreno_ringbuffer *new_rb,
+ unsigned int cur_rptr, unsigned int new_rptr),
+ TP_ARGS(cur_rb, new_rb, cur_rptr, new_rptr),
TP_STRUCT__entry(__field(int, cur_level)
__field(int, new_level)
__field(unsigned int, cur_rptr)
@@ -490,8 +494,8 @@ TRACE_EVENT(adreno_hw_preempt_comp_to_clear,
),
TP_fast_assign(__entry->cur_level = cur_rb->id;
__entry->new_level = new_rb->id;
- __entry->cur_rptr = cur_rb->rptr;
- __entry->new_rptr = new_rb->rptr;
+ __entry->cur_rptr = cur_rptr;
+ __entry->new_rptr = new_rptr;
__entry->cur_wptr = cur_rb->wptr;
__entry->new_wptr_end = new_rb->wptr_preempt_end;
__entry->new_wptr = new_rb->wptr;
@@ -509,8 +513,9 @@ TRACE_EVENT(adreno_hw_preempt_comp_to_clear,
TRACE_EVENT(adreno_hw_preempt_token_submit,
TP_PROTO(struct adreno_ringbuffer *cur_rb,
- struct adreno_ringbuffer *new_rb),
- TP_ARGS(cur_rb, new_rb),
+ struct adreno_ringbuffer *new_rb,
+ unsigned int cur_rptr, unsigned int new_rptr),
+ TP_ARGS(cur_rb, new_rb, cur_rptr, new_rptr),
TP_STRUCT__entry(__field(int, cur_level)
__field(int, new_level)
__field(unsigned int, cur_rptr)
@@ -523,8 +528,8 @@ TRACE_EVENT(adreno_hw_preempt_token_submit,
),
TP_fast_assign(__entry->cur_level = cur_rb->id;
__entry->new_level = new_rb->id;
- __entry->cur_rptr = cur_rb->rptr;
- __entry->new_rptr = new_rb->rptr;
+ __entry->cur_rptr = cur_rptr;
+ __entry->new_rptr = new_rptr;
__entry->cur_wptr = cur_rb->wptr;
__entry->cur_wptr_end = cur_rb->wptr_preempt_end;
__entry->new_wptr = new_rb->wptr;
@@ -541,23 +546,37 @@ TRACE_EVENT(adreno_hw_preempt_token_submit,
)
);
-TRACE_EVENT(adreno_rb_starve,
- TP_PROTO(struct adreno_ringbuffer *rb),
- TP_ARGS(rb),
- TP_STRUCT__entry(__field(int, id)
- __field(unsigned int, rptr)
- __field(unsigned int, wptr)
+TRACE_EVENT(adreno_preempt_trigger,
+ TP_PROTO(struct adreno_ringbuffer *cur, struct adreno_ringbuffer *next),
+ TP_ARGS(cur, next),
+ TP_STRUCT__entry(
+ __field(struct adreno_ringbuffer *, cur)
+ __field(struct adreno_ringbuffer *, next)
),
- TP_fast_assign(__entry->id = rb->id;
- __entry->rptr = rb->rptr;
- __entry->wptr = rb->wptr;
+ TP_fast_assign(
+ __entry->cur = cur;
+ __entry->next = next;
),
- TP_printk(
- "rb %d r/w %x/%x starved", __entry->id, __entry->rptr,
- __entry->wptr
+ TP_printk("trigger from id=%d to id=%d",
+ __entry->cur->id, __entry->next->id
)
);
+TRACE_EVENT(adreno_preempt_done,
+ TP_PROTO(struct adreno_ringbuffer *cur, struct adreno_ringbuffer *next),
+ TP_ARGS(cur, next),
+ TP_STRUCT__entry(
+ __field(struct adreno_ringbuffer *, cur)
+ __field(struct adreno_ringbuffer *, next)
+ ),
+ TP_fast_assign(
+ __entry->cur = cur;
+ __entry->next = next;
+ ),
+ TP_printk("done switch to id=%d from id=%d",
+ __entry->next->id, __entry->cur->id
+ )
+);
#endif /* _ADRENO_TRACE_H */
/* This part must be outside protection */
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 2563591f376e..f77dbb7f20af 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -233,6 +233,8 @@ int kgsl_readtimestamp(struct kgsl_device *device, void *priv,
}
EXPORT_SYMBOL(kgsl_readtimestamp);
+static long gpumem_free_entry(struct kgsl_mem_entry *entry);
+
/* Scheduled by kgsl_mem_entry_put_deferred() */
static void _deferred_put(struct work_struct *work)
{
@@ -247,10 +249,8 @@ kgsl_mem_entry_create(void)
{
struct kgsl_mem_entry *entry = kzalloc(sizeof(*entry), GFP_KERNEL);
- if (entry != NULL) {
+ if (entry != NULL)
kref_init(&entry->refcount);
- INIT_WORK(&entry->work, _deferred_put);
- }
return entry;
}
@@ -1150,6 +1150,8 @@ static int kgsl_open_device(struct kgsl_device *device)
atomic_inc(&device->active_cnt);
kgsl_sharedmem_set(device, &device->memstore, 0, 0,
device->memstore.size);
+ kgsl_sharedmem_set(device, &device->scratch, 0, 0,
+ device->scratch.size);
result = device->ftbl->init(device);
if (result)
@@ -1855,7 +1857,10 @@ static long gpuobj_free_on_timestamp(struct kgsl_device_private *dev_priv,
static void gpuobj_free_fence_func(void *priv)
{
- kgsl_mem_entry_put_deferred((struct kgsl_mem_entry *) priv);
+ struct kgsl_mem_entry *entry = priv;
+
+ INIT_WORK(&entry->work, _deferred_put);
+ queue_work(kgsl_driver.mem_workqueue, &entry->work);
}
static long gpuobj_free_on_fence(struct kgsl_device_private *dev_priv,
@@ -3910,11 +3915,13 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
status = kgsl_allocate_global(device, &device->memstore,
KGSL_MEMSTORE_SIZE, 0, 0);
- if (status != 0) {
- KGSL_DRV_ERR(device, "kgsl_allocate_global failed %d\n",
- status);
+ if (status != 0)
goto error_close_mmu;
- }
+
+ status = kgsl_allocate_global(device, &device->scratch,
+ PAGE_SIZE, 0, 0);
+ if (status != 0)
+ goto error_free_memstore;
/*
* The default request type PM_QOS_REQ_ALL_CORES is
@@ -3964,6 +3971,8 @@ int kgsl_device_platform_probe(struct kgsl_device *device)
return 0;
+error_free_memstore:
+ kgsl_free_global(device, &device->memstore);
error_close_mmu:
kgsl_mmu_close(device);
error_pwrctrl_close:
@@ -3990,6 +3999,8 @@ void kgsl_device_platform_remove(struct kgsl_device *device)
idr_destroy(&device->context_idr);
+ kgsl_free_global(device, &device->scratch);
+
kgsl_free_global(device, &device->memstore);
kgsl_mmu_close(device);
@@ -4091,8 +4102,9 @@ static int __init kgsl_core_init(void)
INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
kgsl_driver.workqueue = create_singlethread_workqueue("kgsl-workqueue");
- kgsl_driver.mem_workqueue =
- create_singlethread_workqueue("kgsl-mementry");
+
+ kgsl_driver.mem_workqueue = alloc_workqueue("kgsl-mementry",
+ WQ_UNBOUND | WQ_MEM_RECLAIM, 0);
kgsl_events_init();
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index dfe83be799b3..c172021c8944 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -37,6 +37,32 @@
#define KGSL_MEMSTORE_MAX (KGSL_MEMSTORE_SIZE / \
sizeof(struct kgsl_devmemstore) - 1 - KGSL_PRIORITY_MAX_RB_LEVELS)
+#define MEMSTORE_RB_OFFSET(rb, field) \
+ KGSL_MEMSTORE_OFFSET(((rb)->id + KGSL_MEMSTORE_MAX), field)
+
+#define MEMSTORE_ID_GPU_ADDR(dev, iter, field) \
+ ((dev)->memstore.gpuaddr + KGSL_MEMSTORE_OFFSET(iter, field))
+
+#define MEMSTORE_RB_GPU_ADDR(dev, rb, field) \
+ ((dev)->memstore.gpuaddr + \
+ KGSL_MEMSTORE_OFFSET(((rb)->id + KGSL_MEMSTORE_MAX), field))
+
+/*
+ * SCRATCH MEMORY: The scratch memory is one page worth of data that
+ * is mapped into the GPU. This allows for some 'shared' data between
+ * the GPU and CPU. For example, it will be used by the GPU to write
+ * each updated RPTR for each RB.
+ *
+ * Used Data:
+ * Offset: Length(bytes): What
+ * 0x0: 4 * KGSL_PRIORITY_MAX_RB_LEVELS: RB0 RPTR
+ */
+
+/* Shadow global helpers */
+#define SCRATCH_RPTR_OFFSET(id) ((id) * sizeof(unsigned int))
+#define SCRATCH_RPTR_GPU_ADDR(dev, id) \
+ ((dev)->scratch.gpuaddr + SCRATCH_RPTR_OFFSET(id))
+
/* Timestamp window used to detect rollovers (half of integer range) */
#define KGSL_TIMESTAMP_WINDOW 0x80000000
@@ -447,21 +473,6 @@ kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
kref_put(&entry->refcount, kgsl_mem_entry_destroy);
}
-/**
- * kgsl_mem_entry_put_deferred() - Schedule a task to put the memory entry
- * @entry: Mem entry to put
- *
- * This function is for atomic contexts where a normal kgsl_mem_entry_put()
- * would result in the memory entry getting destroyed and possibly taking
- * mutexes along the way. Schedule the work to happen outside of the atomic
- * context.
- */
-static inline void kgsl_mem_entry_put_deferred(struct kgsl_mem_entry *entry)
-{
- if (entry != NULL)
- queue_work(kgsl_driver.mem_workqueue, &entry->work);
-}
-
/*
* kgsl_addr_range_overlap() - Checks if 2 ranges overlap
* @gpuaddr1: Start of first address range
diff --git a/drivers/gpu/msm/kgsl_cmdbatch.h b/drivers/gpu/msm/kgsl_cmdbatch.h
index 1547ac02fdbf..d5cbf375b5d3 100644
--- a/drivers/gpu/msm/kgsl_cmdbatch.h
+++ b/drivers/gpu/msm/kgsl_cmdbatch.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -31,7 +31,6 @@
* @fault_policy: Internal policy describing how to handle this command in case
* of a fault
* @fault_recovery: recovery actions actually tried for this batch
- * @expires: Point in time when the cmdbatch is considered to be hung
* @refcount: kref structure to maintain the reference count
* @cmdlist: List of IBs to issue
* @memlist: List of all memory used in this command batch
@@ -61,7 +60,6 @@ struct kgsl_cmdbatch {
unsigned long priv;
unsigned long fault_policy;
unsigned long fault_recovery;
- unsigned long expires;
struct kref refcount;
struct list_head cmdlist;
struct list_head memlist;
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index c3fb2b81fcbd..4159a5fe375f 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -227,6 +227,7 @@ struct kgsl_device {
/* GPU shader memory size */
unsigned int shader_mem_len;
struct kgsl_memdesc memstore;
+ struct kgsl_memdesc scratch;
const char *iomemname;
const char *shadermemname;
diff --git a/drivers/gpu/msm/kgsl_events.c b/drivers/gpu/msm/kgsl_events.c
index e1f9ad17d0ff..6f70b9ddd376 100644
--- a/drivers/gpu/msm/kgsl_events.c
+++ b/drivers/gpu/msm/kgsl_events.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -56,6 +56,23 @@ static void _kgsl_event_worker(struct work_struct *work)
kmem_cache_free(events_cache, event);
}
+/* return true if the group needs to be processed */
+static bool _do_process_group(unsigned int processed, unsigned int cur)
+{
+ if (processed == cur)
+ return false;
+
+ /*
+ * This ensures that the timestamp didn't slip back accidently, maybe
+ * due to a memory barrier issue. This is highly unlikely but we've
+ * been burned here in the past.
+ */
+ if ((cur < processed) && ((processed - cur) < KGSL_TIMESTAMP_WINDOW))
+ return false;
+
+ return true;
+}
+
static void _process_event_group(struct kgsl_device *device,
struct kgsl_event_group *group, bool flush)
{
@@ -80,11 +97,7 @@ static void _process_event_group(struct kgsl_device *device,
group->readtimestamp(device, group->priv, KGSL_TIMESTAMP_RETIRED,
&timestamp);
- /*
- * If no timestamps have been retired since the last time we were here
- * then we can avoid going through this loop
- */
- if (!flush && timestamp_cmp(timestamp, group->processed) <= 0)
+ if (!flush && _do_process_group(group->processed, timestamp) == false)
goto out;
list_for_each_entry_safe(event, tmp, &group->events, node) {
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index a338559ac0bb..103d290eb681 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -220,9 +220,6 @@ static int _attach_pt(struct kgsl_iommu_pt *iommu_pt,
if (ret == 0)
iommu_pt->attached = true;
- else
- KGSL_CORE_ERR("iommu_attach_device(%s) failed: %d\n",
- ctx->name, ret);
return ret;
}
@@ -1452,25 +1449,25 @@ done:
return ret;
}
+static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt);
+
static int kgsl_iommu_start(struct kgsl_mmu *mmu)
{
int status;
struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
- struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
status = _setup_user_context(mmu);
if (status)
return status;
status = _setup_secure_context(mmu);
- if (status)
+ if (status) {
_detach_context(&iommu->ctx[KGSL_IOMMU_CONTEXT_USER]);
- else {
- kgsl_iommu_enable_clk(mmu);
- KGSL_IOMMU_SET_CTX_REG(ctx, TLBIALL, 1);
- kgsl_iommu_disable_clk(mmu);
+ return status;
}
- return status;
+
+ /* Make sure the hardware is programmed to the default pagetable */
+ return kgsl_iommu_set_pt(mmu, mmu->defaultpagetable);
}
static int
@@ -1707,23 +1704,15 @@ kgsl_iommu_get_current_ttbr0(struct kgsl_mmu *mmu)
*
* Return - void
*/
-static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu,
- struct kgsl_pagetable *pt)
+static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
- struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
- int ret = 0;
uint64_t ttbr0, temp;
unsigned int contextidr;
unsigned long wait_for_flush;
- /*
- * If using a global pagetable, we can skip all this
- * because the pagetable will be set up by the iommu
- * driver and never changed at runtime.
- */
- if (!kgsl_mmu_is_perprocess(mmu))
+ if ((pt != mmu->defaultpagetable) && !kgsl_mmu_is_perprocess(mmu))
return 0;
kgsl_iommu_enable_clk(mmu);
@@ -1731,14 +1720,6 @@ static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu,
ttbr0 = kgsl_mmu_pagetable_get_ttbr0(pt);
contextidr = kgsl_mmu_pagetable_get_contextidr(pt);
- /*
- * Taking the liberty to spin idle since this codepath
- * is invoked when we can spin safely for it to be idle
- */
- ret = adreno_spin_idle(ADRENO_DEVICE(device), ADRENO_IDLE_TIMEOUT);
- if (ret)
- return ret;
-
KGSL_IOMMU_SET_CTX_REG_Q(ctx, TTBR0, ttbr0);
KGSL_IOMMU_SET_CTX_REG(ctx, CONTEXTIDR, contextidr);
@@ -1767,10 +1748,8 @@ static int kgsl_iommu_set_pt(struct kgsl_mmu *mmu,
cpu_relax();
}
- /* Disable smmu clock */
kgsl_iommu_disable_clk(mmu);
-
- return ret;
+ return 0;
}
/*
@@ -1788,8 +1767,6 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
struct kgsl_device *device = KGSL_MMU_DEVICE(mmu);
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- int ret = 0;
- unsigned int sctlr_val;
if ((adreno_dev->ft_pf_policy &
BIT(KGSL_FT_PAGEFAULT_GPUHALT_ENABLE)) ==
@@ -1798,10 +1775,7 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
/* If not attached, policy will be updated during the next attach */
if (ctx->default_pt != NULL) {
- /* Need to idle device before changing options */
- ret = device->ftbl->idle(device);
- if (ret)
- return ret;
+ unsigned int sctlr_val;
kgsl_iommu_enable_clk(mmu);
@@ -1820,7 +1794,7 @@ static int kgsl_iommu_set_pf_policy(struct kgsl_mmu *mmu,
kgsl_iommu_disable_clk(mmu);
}
- return ret;
+ return 0;
}
static struct kgsl_protected_registers *
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index 3652aa2e6ec4..5339917911b1 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -21,6 +21,12 @@
#define KGSL_MMU_GLOBAL_PT 0
#define KGSL_MMU_SECURE_PT 1
+#define MMU_DEFAULT_TTBR0(_d) \
+ (kgsl_mmu_pagetable_get_ttbr0((_d)->mmu.defaultpagetable))
+
+#define MMU_DEFAULT_CONTEXTIDR(_d) \
+ (kgsl_mmu_pagetable_get_contextidr((_d)->mmu.defaultpagetable))
+
struct kgsl_device;
enum kgsl_mmutype {
diff --git a/drivers/gpu/msm/kgsl_pwrctrl.c b/drivers/gpu/msm/kgsl_pwrctrl.c
index da8c8585d31e..2b9eef8b6351 100644
--- a/drivers/gpu/msm/kgsl_pwrctrl.c
+++ b/drivers/gpu/msm/kgsl_pwrctrl.c
@@ -1381,6 +1381,9 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
_isense_clk_set_rate(pwr,
pwr->num_pwrlevels - 1);
}
+
+ /* Turn off the IOMMU clocks */
+ kgsl_mmu_disable_clk(&device->mmu);
} else if (requested_state == KGSL_STATE_SLEEP) {
/* High latency clock maintenance. */
for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
@@ -1428,7 +1431,11 @@ static void kgsl_pwrctrl_clk(struct kgsl_device *device, int state,
pwr->gpu_bimc_interface_enabled = 1;
}
}
+
+ /* Turn on the IOMMU clocks */
+ kgsl_mmu_enable_clk(&device->mmu);
}
+
}
}
diff --git a/drivers/input/touchscreen/it7258_ts_i2c.c b/drivers/input/touchscreen/it7258_ts_i2c.c
new file mode 100644
index 000000000000..048358e2ef9d
--- /dev/null
+++ b/drivers/input/touchscreen/it7258_ts_i2c.c
@@ -0,0 +1,851 @@
+/* drivers/input/touchscreen/it7258_ts_i2c.c
+ *
+ * Copyright (C) 2014 ITE Tech. Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/i2c.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/firmware.h>
+#include <linux/gpio.h>
+#include <linux/slab.h>
+#include <linux/wakelock.h>
+
+#define MAX_BUFFER_SIZE 144
+#define DEVICE_NAME "IT7260"
+#define SCREEN_X_RESOLUTION 320
+#define SCREEN_Y_RESOLUTION 320
+
+#define BUF_COMMAND 0x20 /* all commands writes go to this idx */
+#define BUF_SYS_COMMAND 0x40
+#define BUF_QUERY 0x80 /* "revice ready?" and "wake up please" and "read touch data" reads go to this idx */
+#define BUF_RESPONSE 0xA0 /* most command responce reads go to this idx */
+#define BUF_SYS_RESPONSE 0xC0
+#define BUF_POINT_INFO 0xE0 /* reads of "point" go through here and produce 14 bytes of data */
+
+/* commands and their subcommands. when no subcommands exist, a zero is send as the second byte */
+#define CMD_IDENT_CHIP 0x00
+#define CMD_READ_VERSIONS 0x01 /* VERSION_LENGTH bytes of data in response */
+# define VER_FIRMWARE 0x00
+# define VER_CONFIG 0x06
+# define VERSION_LENGTH 10
+#define CMD_PWR_CTL 0x04 /* subcommand is zero, next byte is power mode */
+# define PWR_CTL_LOW_POWER_MODE 0x01 /* idle mode */
+# define PWR_CTL_SLEEP_MODE 0x02 /* sleep mode */
+#define CMD_UNKNOWN_7 0x07 /* command is not documented in the datasheet v1.0.0.7 */
+#define CMD_FIRMWARE_REINIT_C 0x0C
+#define CMD_CALIBRATE 0x13 /* needs to be followed by 4 bytes of zeroes */
+#define CMD_FIRMWARE_UPGRADE 0x60
+# define FIRMWARE_MODE_ENTER 0x00
+# define FIRMWARE_MODE_EXIT 0x80
+#define CMD_SET_START_OFFSET 0x61 /* address for FW read/write */
+#define CMD_FW_WRITE 0x62 /* subcommand is number of bytes to write */
+#define CMD_FW_READ 0x63 /* subcommand is number of bytes to read */
+#define CMD_FIRMWARE_REINIT_6F 0x6F
+
+#define FW_WRITE_CHUNK_SIZE 128
+#define FW_WRITE_RETRY_COUNT 4
+#define CHIP_FLASH_SIZE 0x8000
+#define SYSFS_FW_UPLOAD_MODE_MANUAL 2
+#define SYSFS_RESULT_FAIL (-1)
+#define SYSFS_RESULT_NOT_DONE 0
+#define SYSFS_RESULT_SUCCESS 1
+#define DEVICE_READY_MAX_WAIT 500
+
+/* result of reading with BUF_QUERY bits */
+#define CMD_STATUS_BITS 0x07
+#define CMD_STATUS_DONE 0x00
+#define CMD_STATUS_BUSY 0x01
+#define CMD_STATUS_ERROR 0x02
+#define PT_INFO_BITS 0xF8
+#define BT_INFO_NONE 0x00
+#define PT_INFO_YES 0x80
+#define BT_INFO_NONE_BUT_DOWN 0x08 /* no new data but finder(s) still down */
+
+/* use this to include integers in commands */
+#define CMD_UINT16(v) ((uint8_t)(v)) , ((uint8_t)((v) >> 8))
+
+
+struct FingerData {
+ uint8_t xLo;
+ uint8_t hi;
+ uint8_t yLo;
+ uint8_t pressure;
+} __attribute__((packed));
+
+struct PointData {
+ uint8_t flags;
+ uint8_t palm;
+ struct FingerData fd[3];
+} __attribute__((packed));
+
+#define PD_FLAGS_DATA_TYPE_BITS 0xF0
+/* other types (like chip-detected gestures) exist but we do not care */
+#define PD_FLAGS_DATA_TYPE_TOUCH 0x00
+#define PD_FLAGS_NOT_PEN 0x08 /* set if pen touched, clear if finger(s) */
+#define PD_FLAGS_HAVE_FINGERS 0x07 /* a bit for each finger data that is valid (from lsb to msb) */
+#define PD_PALM_FLAG_BIT 0x01
+#define FD_PRESSURE_BITS 0x0F
+#define FD_PRESSURE_NONE 0x00
+#define FD_PRESSURE_HOVER 0x01
+#define FD_PRESSURE_LIGHT 0x02
+#define FD_PRESSURE_NORMAL 0x04
+#define FD_PRESSURE_HIGH 0x08
+#define FD_PRESSURE_HEAVY 0x0F
+
+struct IT7260_ts_data {
+ struct i2c_client *client;
+ struct input_dev *input_dev;
+};
+
+static int8_t fwUploadResult = SYSFS_RESULT_NOT_DONE;
+static int8_t calibrationWasSuccessful = SYSFS_RESULT_NOT_DONE;
+static bool devicePresent = false;
+static DEFINE_MUTEX(sleepModeMutex);
+static bool chipAwake = true;
+static bool hadFingerDown = false;
+static bool isDeviceSleeping = false;
+static bool isDeviceSuspend = false;
+static struct input_dev *input_dev;
+static struct IT7260_ts_data *gl_ts;
+
+#define LOGE(...) pr_err(DEVICE_NAME ": " __VA_ARGS__)
+#define LOGI(...) printk(DEVICE_NAME ": " __VA_ARGS__)
+
+/* internal use func - does not make sure chip is ready before read */
+static bool i2cReadNoReadyCheck(uint8_t bufferIndex, uint8_t *dataBuffer, uint16_t dataLength)
+{
+ struct i2c_msg msgs[2] = {
+ {
+ .addr = gl_ts->client->addr,
+ .flags = I2C_M_NOSTART,
+ .len = 1,
+ .buf = &bufferIndex
+ },
+ {
+ .addr = gl_ts->client->addr,
+ .flags = I2C_M_RD,
+ .len = dataLength,
+ .buf = dataBuffer
+ }
+ };
+
+ memset(dataBuffer, 0xFF, dataLength);
+
+ return i2c_transfer(gl_ts->client->adapter, msgs, 2);
+}
+
+static bool i2cWriteNoReadyCheck(uint8_t bufferIndex, const uint8_t *dataBuffer, uint16_t dataLength)
+{
+ uint8_t txbuf[257];
+ struct i2c_msg msg = {
+ .addr = gl_ts->client->addr,
+ .flags = 0,
+ .len = dataLength + 1,
+ .buf = txbuf
+ };
+
+ /* just to be careful */
+ BUG_ON(dataLength > sizeof(txbuf) - 1);
+
+ txbuf[0] = bufferIndex;
+ memcpy(txbuf + 1, dataBuffer, dataLength);
+
+ return i2c_transfer(gl_ts->client->adapter, &msg, 1);
+}
+
+/*
+ * Device is apparently always ready for i2c but not for actual register reads/writes.
+ * This function ascertains it is ready for that too. the results of this call often
+ * were ignored.
+ */
+static bool waitDeviceReady(bool forever, bool slowly)
+{
+ uint8_t ucQuery;
+ uint32_t count = DEVICE_READY_MAX_WAIT;
+
+ do{
+ if (!i2cReadNoReadyCheck(BUF_QUERY, &ucQuery, sizeof(ucQuery)))
+ ucQuery = CMD_STATUS_BUSY;
+
+ if (slowly)
+ mdelay(1000);
+ if (!forever)
+ count--;
+
+ }while((ucQuery & CMD_STATUS_BUSY) && count);
+
+ return !ucQuery;
+}
+
+static bool i2cRead(uint8_t bufferIndex, uint8_t *dataBuffer, uint16_t dataLength)
+{
+ waitDeviceReady(false, false);
+ return i2cReadNoReadyCheck(bufferIndex, dataBuffer, dataLength);
+}
+
+static bool i2cWrite(uint8_t bufferIndex, const uint8_t *dataBuffer, uint16_t dataLength)
+{
+ waitDeviceReady(false, false);
+ return i2cWriteNoReadyCheck(bufferIndex, dataBuffer, dataLength);
+}
+
+static bool chipFirmwareReinitialize(uint8_t cmdOfChoice)
+{
+ uint8_t cmd[] = {cmdOfChoice};
+ uint8_t rsp[2];
+
+ if (!i2cWrite(BUF_COMMAND, cmd, sizeof(cmd)))
+ return false;
+
+ if (!i2cRead(BUF_RESPONSE, rsp, sizeof(rsp)))
+ return false;
+
+ /* a reply of two zero bytes signifies success */
+ return !rsp[0] && !rsp[1];
+}
+
+static bool chipFirmwareUpgradeModeEnterExit(bool enter)
+{
+ uint8_t cmd[] = {CMD_FIRMWARE_UPGRADE, 0, 'I', 'T', '7', '2', '6', '0', 0x55, 0xAA};
+ uint8_t resp[2];
+
+ cmd[1] = enter ? FIRMWARE_MODE_ENTER : FIRMWARE_MODE_EXIT;
+ if (!i2cWrite(BUF_COMMAND, cmd, sizeof(cmd)))
+ return false;
+
+ if (!i2cRead(BUF_RESPONSE, resp, sizeof(resp)))
+ return false;
+
+ /* a reply of two zero bytes signifies success */
+ return !resp[0] && !resp[1];
+}
+
+static bool chipSetStartOffset(uint16_t offset)
+{
+ uint8_t cmd[] = {CMD_SET_START_OFFSET, 0, CMD_UINT16(offset)};
+ uint8_t resp[2];
+
+ if (!i2cWrite(BUF_COMMAND, cmd, 4))
+ return false;
+
+
+ if (!i2cRead(BUF_RESPONSE, resp, sizeof(resp)))
+ return false;
+
+
+ /* a reply of two zero bytes signifies success */
+ return !resp[0] && !resp[1];
+}
+
+
+/* write fwLength bytes from fwData at chip offset writeStartOffset */
+static bool chipFlashWriteAndVerify(unsigned int fwLength, const uint8_t *fwData, uint16_t writeStartOffset)
+{
+ uint32_t curDataOfst;
+
+ for (curDataOfst = 0; curDataOfst < fwLength; curDataOfst += FW_WRITE_CHUNK_SIZE) {
+
+ uint8_t cmdWrite[2 + FW_WRITE_CHUNK_SIZE] = {CMD_FW_WRITE};
+ uint8_t bufRead[FW_WRITE_CHUNK_SIZE];
+ uint8_t cmdRead[2] = {CMD_FW_READ};
+ unsigned i, nRetries;
+ uint32_t curWriteSz;
+
+ /* figure out how much to write */
+ curWriteSz = fwLength - curDataOfst;
+ if (curWriteSz > FW_WRITE_CHUNK_SIZE)
+ curWriteSz = FW_WRITE_CHUNK_SIZE;
+
+ /* prepare the write command */
+ cmdWrite[1] = curWriteSz;
+ for (i = 0; i < curWriteSz; i++)
+ cmdWrite[i + 2] = fwData[curDataOfst + i];
+
+ /* prepare the read command */
+ cmdRead[1] = curWriteSz;
+
+ for (nRetries = 0; nRetries < FW_WRITE_RETRY_COUNT; nRetries++) {
+
+ /* set write offset and write the data*/
+ chipSetStartOffset(writeStartOffset + curDataOfst);
+ i2cWrite(BUF_COMMAND, cmdWrite, 2 + curWriteSz);
+
+ /* set offset and read the data back */
+ chipSetStartOffset(writeStartOffset + curDataOfst);
+ i2cWrite(BUF_COMMAND, cmdRead, sizeof(cmdRead));
+ i2cRead(BUF_RESPONSE, bufRead, curWriteSz);
+
+ /* verify. If success break out of retry loop */
+ for (i = 0; i < curWriteSz && bufRead[i] == cmdWrite[i + 2]; i++);
+ if (i == curWriteSz)
+ break;
+ LOGE("write of data offset %u failed on try %u at byte %u/%u\n", curDataOfst, nRetries, i, curWriteSz);
+ }
+ /* if we've failed after all the retries, tell the caller */
+ if (nRetries == FW_WRITE_RETRY_COUNT)
+ return false;
+ }
+
+ return true;
+}
+
+static bool chipFirmwareUpload(uint32_t fwLen, const uint8_t *fwData, uint32_t cfgLen, const uint8_t *cfgData)
+{
+ bool success = false;
+
+ /* enter fw upload mode */
+ if (!chipFirmwareUpgradeModeEnterExit(true))
+ return false;
+
+ /* flash the firmware if requested */
+ if (fwLen && fwData && !chipFlashWriteAndVerify(fwLen, fwData, 0)) {
+ LOGE("failed to upload touch firmware\n");
+ goto out;
+ }
+
+ /* flash config data if requested */
+ if (fwLen && fwData && !chipFlashWriteAndVerify(cfgLen, cfgData, CHIP_FLASH_SIZE - cfgLen)) {
+ LOGE("failed to upload touch cfg data\n");
+ goto out;
+ }
+
+ success = true;
+
+out:
+ return chipFirmwareUpgradeModeEnterExit(false) && chipFirmwareReinitialize(CMD_FIRMWARE_REINIT_6F) && success;
+}
+
+/*
+ * both buffers should be VERSION_LENGTH in size,
+ * but only a part of them is significant
+ */
+static bool chipGetVersions(uint8_t *verFw, uint8_t *verCfg, bool logIt)
+{
+ /* this code to get versions is reproduced as was written, but it does not make sense. Something here *PROBABLY IS* wrong */
+ static const uint8_t cmdReadFwVer[] = {CMD_READ_VERSIONS, VER_FIRMWARE};
+ static const uint8_t cmdReadCfgVer[] = {CMD_READ_VERSIONS, VER_CONFIG};
+ bool ret = true;
+
+ /* this structure is so that we definitely do all the calls, but still return a status in case anyone cares */
+ ret = i2cWrite(BUF_COMMAND, cmdReadFwVer, sizeof(cmdReadFwVer)) && ret;
+ ret = i2cRead(BUF_RESPONSE, verFw, VERSION_LENGTH) && ret;
+ ret = i2cWrite(BUF_COMMAND, cmdReadCfgVer, sizeof(cmdReadCfgVer)) && ret;
+ ret = i2cRead(BUF_RESPONSE, verCfg, VERSION_LENGTH) && ret;
+
+ if (logIt)
+ LOGI("current versions: fw@{%X,%X,%X,%X}, cfg@{%X,%X,%X,%X}\n",
+ verFw[5], verFw[6], verFw[7], verFw[8],
+ verCfg[1], verCfg[2], verCfg[3], verCfg[4]);
+
+ return ret;
+}
+
+static ssize_t sysfsUpgradeStore(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ const struct firmware *fw, *cfg;
+ uint8_t verFw[10], verCfg[10];
+ unsigned fwLen = 0, cfgLen = 0;
+ bool manualUpgrade, success;
+ int mode = 0;
+
+ if (request_firmware(&fw, "it7260.fw", dev))
+ LOGE("failed to get firmware for it7260\n");
+ else
+ fwLen = fw->size;
+
+
+ if (request_firmware(&cfg, "it7260.cfg", dev))
+ LOGE("failed to get config data for it7260\n");
+ else
+ cfgLen = cfg->size;
+
+ sscanf(buf, "%d", &mode);
+ manualUpgrade = mode == SYSFS_FW_UPLOAD_MODE_MANUAL;
+ LOGI("firmware found %ub of fw and %ub of config in %s mode\n",
+ fwLen, cfgLen, manualUpgrade ? "manual" : "normal");
+
+ chipGetVersions(verFw, verCfg, true);
+
+ fwUploadResult = SYSFS_RESULT_NOT_DONE;
+ if (fwLen && cfgLen) {
+ if (manualUpgrade || (verFw[5] < fw->data[8] || verFw[6] <
+ fw->data[9] || verFw[7] < fw->data[10] || verFw[8] <
+ fw->data[11]) || (verCfg[1] < cfg->data[cfgLen - 8]
+ || verCfg[2] < cfg->data[cfgLen - 7] || verCfg[3] <
+ cfg->data[cfgLen - 6] ||
+ verCfg[4] < cfg->data[cfgLen - 5])){
+ LOGI("firmware/config will be upgraded\n");
+ disable_irq(gl_ts->client->irq);
+ success = chipFirmwareUpload(fwLen, fw->data, cfgLen, cfg->data);
+ enable_irq(gl_ts->client->irq);
+
+ fwUploadResult = success ? SYSFS_RESULT_SUCCESS : SYSFS_RESULT_FAIL;
+ LOGI("upload %s\n", success ? "success" : "failed");
+ }
+ else {
+ LOGI("firmware/config upgrade not needed\n");
+ }
+ }
+
+ if (fwLen)
+ release_firmware(fw);
+
+ if (cfgLen)
+ release_firmware(cfg);
+
+ return count;
+}
+
+static ssize_t sysfsUpgradeShow(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d", fwUploadResult);
+}
+
+static ssize_t sysfsCalibrationShow(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d", calibrationWasSuccessful);
+}
+
+static bool chipSendCalibrationCmd(bool autoTuneOn)
+{
+ uint8_t cmdCalibrate[] = {CMD_CALIBRATE, 0, autoTuneOn ? 1 : 0, 0, 0};
+ return i2cWrite(BUF_COMMAND, cmdCalibrate, sizeof(cmdCalibrate));
+}
+
+static ssize_t sysfsCalibrationStore(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ uint8_t resp;
+
+ if (!chipSendCalibrationCmd(false))
+ LOGE("failed to send calibration command\n");
+ else {
+ calibrationWasSuccessful = i2cRead(BUF_RESPONSE, &resp, sizeof(resp)) ? SYSFS_RESULT_SUCCESS : SYSFS_RESULT_FAIL;
+
+ /* previous logic that was here never called chipFirmwareReinitialize() due to checking a guaranteed-not-null value against null. We now call it. Hopefully this is OK */
+ if (!resp)
+ LOGI("chipFirmwareReinitialize -> %s\n", chipFirmwareReinitialize(CMD_FIRMWARE_REINIT_6F) ? "success" : "fail");
+ }
+
+ return count;
+}
+
+static ssize_t sysfsPointShow(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ uint8_t pointData[sizeof(struct PointData)];
+ bool readSuccess;
+ ssize_t ret;
+
+ readSuccess = i2cReadNoReadyCheck(BUF_POINT_INFO, pointData, sizeof(pointData));
+ ret = sprintf(buf, "point_show read ret[%d]--point[%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x][%x]=\n",
+ readSuccess, pointData[0],pointData[1],pointData[2],pointData[3],
+ pointData[4],pointData[5],pointData[6],pointData[7],pointData[8],
+ pointData[9],pointData[10],pointData[11],pointData[12],pointData[13]);
+
+
+ LOGI("%s", buf);
+
+ return ret;
+}
+
+static ssize_t sysfsPointStore(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ return count;
+}
+
+static ssize_t sysfsStatusShow(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ return sprintf(buf, "%d\n", devicePresent ? 1 : 0);
+}
+
+static ssize_t sysfsStatusStore(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ uint8_t verFw[10], verCfg[10];
+
+ chipGetVersions(verFw, verCfg, true);
+
+ return count;
+}
+
+static ssize_t sysfsVersionShow(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ uint8_t verFw[10], verCfg[10];
+
+ chipGetVersions(verFw, verCfg, false);
+ return sprintf(buf, "%x,%x,%x,%x # %x,%x,%x,%x\n",verFw[5], verFw[6], verFw[7], verFw[8], verCfg[1], verCfg[2], verCfg[3], verCfg[4]);
+}
+
+static ssize_t sysfsVersionStore(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ return count;
+}
+
+static ssize_t sysfsSleepShow(struct device *dev, struct device_attribute *attr, char *buf)
+{
+ /*
+ * The usefulness of this was questionable at best - we were at least leaking a byte of
+ * kernel data (by claiming to return a byte but not writing to buf. To fix this now
+ * we actually return the sleep status
+ */
+ if (!mutex_lock_interruptible(&sleepModeMutex)) {
+ *buf = chipAwake ? '1' : '0';
+ mutex_unlock(&sleepModeMutex);
+ return 1;
+ }
+ else
+ return -EINTR;
+}
+
+static ssize_t sysfsSleepStore(struct device *dev, struct device_attribute *attr, const char *buf, size_t count)
+{
+ static const uint8_t cmdGoSleep[] = {CMD_PWR_CTL, 0x00, PWR_CTL_SLEEP_MODE};
+ int goToSleepVal;
+ bool goToWake;
+ uint8_t dummy;
+
+ sscanf(buf, "%d", &goToSleepVal);
+ goToWake = !goToSleepVal; /* convert to bool of proper polarity */
+
+ if (!mutex_lock_interruptible(&sleepModeMutex)) {
+ if ((chipAwake && goToWake) || (!chipAwake && !goToWake))
+ LOGE("duplicate request to %s chip\n", goToWake ? "wake" : "sleep");
+ else if (goToWake) {
+ i2cReadNoReadyCheck(BUF_QUERY, &dummy, sizeof(dummy));
+ enable_irq(gl_ts->client->irq);
+ LOGI("touch is going to wake!\n\n");
+ } else {
+ disable_irq(gl_ts->client->irq);
+ i2cWriteNoReadyCheck(BUF_COMMAND, cmdGoSleep, sizeof(cmdGoSleep));
+ LOGI("touch is going to sleep...\n\n");
+ }
+ chipAwake = goToWake;
+ mutex_unlock(&sleepModeMutex);
+ return count;
+ } else
+ return -EINTR;
+}
+
+
+static DEVICE_ATTR(status, S_IRUGO|S_IWUSR|S_IWGRP, sysfsStatusShow, sysfsStatusStore);
+static DEVICE_ATTR(version, S_IRUGO|S_IWUSR|S_IWGRP, sysfsVersionShow, sysfsVersionStore);
+static DEVICE_ATTR(sleep, S_IRUGO|S_IWUSR|S_IWGRP, sysfsSleepShow, sysfsSleepStore);
+
+static struct attribute *it7260_attrstatus[] = {
+ &dev_attr_status.attr,
+ &dev_attr_version.attr,
+ &dev_attr_sleep.attr,
+ NULL
+};
+
+static const struct attribute_group it7260_attrstatus_group = {
+ .attrs = it7260_attrstatus,
+};
+
+static DEVICE_ATTR(calibration, S_IRUGO|S_IWUSR|S_IWGRP, sysfsCalibrationShow, sysfsCalibrationStore);
+static DEVICE_ATTR(upgrade, S_IRUGO|S_IWUSR|S_IWGRP, sysfsUpgradeShow, sysfsUpgradeStore);
+static DEVICE_ATTR(point, S_IRUGO|S_IWUSR|S_IWGRP, sysfsPointShow, sysfsPointStore);
+
+
+static struct attribute *it7260_attributes[] = {
+ &dev_attr_calibration.attr,
+ &dev_attr_upgrade.attr,
+ &dev_attr_point.attr,
+ NULL
+};
+
+static const struct attribute_group it7260_attr_group = {
+ .attrs = it7260_attributes,
+};
+
+static void chipExternalCalibration(bool autoTuneEnabled)
+{
+ uint8_t resp[2];
+
+ LOGI("sent calibration command -> %d\n", chipSendCalibrationCmd(autoTuneEnabled));
+ waitDeviceReady(true, true);
+ i2cReadNoReadyCheck(BUF_RESPONSE, resp, sizeof(resp));
+ chipFirmwareReinitialize(CMD_FIRMWARE_REINIT_C);
+}
+
+void sendCalibrationCmd(void)
+{
+ chipExternalCalibration(false);
+}
+EXPORT_SYMBOL(sendCalibrationCmd);
+
+static void readFingerData(uint16_t *xP, uint16_t *yP, uint8_t *pressureP, const struct FingerData *fd)
+{
+ uint16_t x = fd->xLo;
+ uint16_t y = fd->yLo;
+
+ x += ((uint16_t)(fd->hi & 0x0F)) << 8;
+ y += ((uint16_t)(fd->hi & 0xF0)) << 4;
+
+ if (xP)
+ *xP = x;
+ if (yP)
+ *yP = y;
+ if (pressureP)
+ *pressureP = fd->pressure & FD_PRESSURE_BITS;
+}
+
+static void readTouchDataPoint(void)
+{
+ struct PointData pointData;
+ uint8_t devStatus;
+ uint8_t pressure = FD_PRESSURE_NONE;
+ uint16_t x, y;
+
+ /* verify there is point data to read & it is readable and valid */
+ i2cReadNoReadyCheck(BUF_QUERY, &devStatus, sizeof(devStatus));
+ if (!((devStatus & PT_INFO_BITS) & PT_INFO_YES)) {
+ LOGE("readTouchDataPoint() called when no data available (0x%02X)\n", devStatus);
+ return;
+ }
+ if (!i2cReadNoReadyCheck(BUF_POINT_INFO, (void*)&pointData, sizeof(pointData))) {
+ LOGE("readTouchDataPoint() failed to read point data buffer\n");
+ return;
+ }
+ if ((pointData.flags & PD_FLAGS_DATA_TYPE_BITS) != PD_FLAGS_DATA_TYPE_TOUCH) {
+ LOGE("readTouchDataPoint() dropping non-point data of type 0x%02X\n", pointData.flags);
+ return;
+ }
+
+ if ((pointData.flags & PD_FLAGS_HAVE_FINGERS) & 1)
+ readFingerData(&x, &y, &pressure, pointData.fd);
+
+ if (pressure >= FD_PRESSURE_LIGHT) {
+
+ if (!hadFingerDown)
+ hadFingerDown = true;
+
+ readFingerData(&x, &y, &pressure, pointData.fd);
+
+ input_report_abs(gl_ts->input_dev, ABS_X, x);
+ input_report_abs(gl_ts->input_dev, ABS_Y, y);
+ input_report_key(gl_ts->input_dev, BTN_TOUCH, 1);
+ input_sync(gl_ts->input_dev);
+
+ } else if (hadFingerDown) {
+ hadFingerDown = false;
+
+ input_report_key(gl_ts->input_dev, BTN_TOUCH, 0);
+ input_sync(gl_ts->input_dev);
+ }
+
+}
+
+static irqreturn_t IT7260_ts_threaded_handler(int irq, void *devid)
+{
+ smp_rmb();
+ if (isDeviceSleeping) {
+ smp_wmb();
+ } else {
+ readTouchDataPoint();
+ }
+
+ return IRQ_HANDLED;
+}
+
+static bool chipIdentifyIT7260(void)
+{
+ static const uint8_t cmdIdent[] = {CMD_IDENT_CHIP};
+ static const uint8_t expectedID[] = {0x0A, 'I', 'T', 'E', '7', '2', '6', '0'};
+ uint8_t chipID[10] = {0,};
+
+ waitDeviceReady(true, false);
+
+ if (!i2cWriteNoReadyCheck(BUF_COMMAND, cmdIdent, sizeof(cmdIdent))) {
+ LOGE("i2cWrite() failed\n");
+ return false;
+ }
+
+ waitDeviceReady(true, false);
+
+ if (!i2cReadNoReadyCheck(BUF_RESPONSE, chipID, sizeof(chipID))) {
+ LOGE("i2cRead() failed\n");
+ return false;
+ }
+ pr_info("chipIdentifyIT7260 read id: %02X %02X %02X %02X %02X %02X %02X %02X %02X %02X\n",
+ chipID[0], chipID[1], chipID[2], chipID[3], chipID[4],
+ chipID[5], chipID[6], chipID[7], chipID[8], chipID[9]);
+
+ if (memcmp(chipID, expectedID, sizeof(expectedID)))
+ return false;
+
+ if (chipID[8] == '5' && chipID[9] == '6')
+ LOGI("rev BX3 found\n");
+ else if (chipID[8] == '6' && chipID[9] == '6')
+ LOGI("rev BX4 found\n");
+ else
+ LOGI("unknown revision (0x%02X 0x%02X) found\n", chipID[8], chipID[9]);
+
+ return true;
+}
+
+static int IT7260_ts_probe(struct i2c_client *client, const struct i2c_device_id *id)
+{
+ static const uint8_t cmdStart[] = {CMD_UNKNOWN_7};
+ struct IT7260_i2c_platform_data *pdata;
+ uint8_t rsp[2];
+ int ret = -1;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ LOGE("need I2C_FUNC_I2C\n");
+ ret = -ENODEV;
+ goto err_out;
+ }
+
+ if (!client->irq) {
+ LOGE("need IRQ\n");
+ ret = -ENODEV;
+ goto err_out;
+ }
+ gl_ts = kzalloc(sizeof(*gl_ts), GFP_KERNEL);
+ if (!gl_ts) {
+ ret = -ENOMEM;
+ goto err_out;
+ }
+
+ gl_ts->client = client;
+ i2c_set_clientdata(client, gl_ts);
+ pdata = client->dev.platform_data;
+
+ if (sysfs_create_group(&(client->dev.kobj), &it7260_attrstatus_group)) {
+ dev_err(&client->dev, "failed to register sysfs #1\n");
+ goto err_sysfs_grp_create_1;
+ }
+
+ if (!chipIdentifyIT7260()) {
+ LOGI ("chipIdentifyIT7260 FAIL");
+ goto err_ident_fail_or_input_alloc;
+ }
+
+ input_dev = input_allocate_device();
+ if (!input_dev) {
+ LOGE("failed to allocate input device\n");
+ ret = -ENOMEM;
+ goto err_ident_fail_or_input_alloc;
+ }
+ gl_ts->input_dev = input_dev;
+
+ input_dev->name = DEVICE_NAME;
+ input_dev->phys = "I2C";
+ input_dev->id.bustype = BUS_I2C;
+ input_dev->id.vendor = 0x0001;
+ input_dev->id.product = 0x7260;
+ set_bit(EV_SYN, input_dev->evbit);
+ set_bit(EV_KEY, input_dev->evbit);
+ set_bit(EV_ABS, input_dev->evbit);
+ set_bit(INPUT_PROP_DIRECT,input_dev->propbit);
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ set_bit(KEY_SLEEP,input_dev->keybit);
+ set_bit(KEY_WAKEUP,input_dev->keybit);
+ set_bit(KEY_POWER,input_dev->keybit);
+ input_set_abs_params(input_dev, ABS_X, 0, SCREEN_X_RESOLUTION, 0, 0);
+ input_set_abs_params(input_dev, ABS_Y, 0, SCREEN_Y_RESOLUTION, 0, 0);
+
+ if (input_register_device(input_dev)) {
+ LOGE("failed to register input device\n");
+ goto err_input_register;
+ }
+
+ if (request_threaded_irq(client->irq, NULL, IT7260_ts_threaded_handler, IRQF_TRIGGER_LOW | IRQF_ONESHOT, client->name, gl_ts)) {
+ dev_err(&client->dev, "request_irq failed\n");
+ goto err_irq_reg;
+ }
+
+ if (sysfs_create_group(&(client->dev.kobj), &it7260_attr_group)) {
+ dev_err(&client->dev, "failed to register sysfs #2\n");
+ goto err_sysfs_grp_create_2;
+ }
+
+ devicePresent = true;
+
+ i2cWriteNoReadyCheck(BUF_COMMAND, cmdStart, sizeof(cmdStart));
+ mdelay(10);
+ i2cReadNoReadyCheck(BUF_RESPONSE, rsp, sizeof(rsp));
+ mdelay(10);
+
+ return 0;
+
+err_sysfs_grp_create_2:
+ free_irq(client->irq, gl_ts);
+
+err_irq_reg:
+ input_unregister_device(input_dev);
+ input_dev = NULL;
+
+err_input_register:
+ if (input_dev)
+ input_free_device(input_dev);
+
+err_ident_fail_or_input_alloc:
+ sysfs_remove_group(&(client->dev.kobj), &it7260_attrstatus_group);
+
+err_sysfs_grp_create_1:
+ kfree(gl_ts);
+
+err_out:
+ return ret;
+}
+
+static int IT7260_ts_remove(struct i2c_client *client)
+{
+ devicePresent = false;
+ return 0;
+}
+
+static const struct i2c_device_id IT7260_ts_id[] = {
+ { DEVICE_NAME, 0},
+ {}
+};
+
+MODULE_DEVICE_TABLE(i2c, IT7260_ts_id);
+
+static const struct of_device_id IT7260_match_table[] = {
+ { .compatible = "ITE,IT7260_ts",},
+ {},
+};
+
+static int IT7260_ts_resume(struct i2c_client *i2cdev)
+{
+ isDeviceSuspend = false;
+ return 0;
+}
+
+static int IT7260_ts_suspend(struct i2c_client *i2cdev, pm_message_t pmesg)
+{
+ isDeviceSuspend = true;
+ return 0;
+}
+
+static struct i2c_driver IT7260_ts_driver = {
+ .driver = {
+ .owner = THIS_MODULE,
+ .name = DEVICE_NAME,
+ .of_match_table = IT7260_match_table,
+ },
+ .probe = IT7260_ts_probe,
+ .remove = IT7260_ts_remove,
+ .id_table = IT7260_ts_id,
+ .resume = IT7260_ts_resume,
+ .suspend = IT7260_ts_suspend,
+};
+
+module_i2c_driver(IT7260_ts_driver);
+
+MODULE_DESCRIPTION("IT7260 Touchscreen Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index ce3f8713df3e..db4b66bb18ed 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -568,6 +568,28 @@ static struct arm_smmu_master *find_smmu_master(struct arm_smmu_device *smmu,
return NULL;
}
+static struct arm_smmu_master *find_smmu_master_by_sid(
+ struct arm_smmu_device *smmu, u32 sid)
+{
+ struct rb_node *next;
+ struct arm_smmu_master *master;
+ struct arm_smmu_master_cfg *cfg;
+ int i;
+
+ next = rb_first(&smmu->masters);
+ for (; next; next = rb_next(next)) {
+ master = container_of(next, struct arm_smmu_master, node);
+ cfg = &master->cfg;
+
+ for (i = 0; i < cfg->num_streamids; i++) {
+ if (cfg->streamids[i] == sid)
+ return master;
+ }
+ }
+
+ return NULL;
+}
+
static struct arm_smmu_master_cfg *
find_smmu_master_cfg(struct device *dev)
{
@@ -1175,8 +1197,9 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
bool fatal_asf;
void __iomem *gr1_base;
phys_addr_t phys_soft;
- u32 frsynra;
+ u32 sid;
bool non_fatal_fault = smmu_domain->non_fatal_faults;
+ struct arm_smmu_master *master;
static DEFINE_RATELIMIT_STATE(_rs,
DEFAULT_RATELIMIT_INTERVAL,
@@ -1231,7 +1254,9 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
iova = far;
phys_soft = arm_smmu_iova_to_phys(domain, iova);
- frsynra = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
+ sid = readl_relaxed(gr1_base + ARM_SMMU_GR1_CBFRSYNRA(cfg->cbndx));
+ sid &= 0xffff;
+ master = find_smmu_master_by_sid(smmu, sid);
tmp = report_iommu_fault(domain, smmu->dev, iova, flags);
if (!tmp || (tmp == -EBUSY)) {
dev_dbg(smmu->dev,
@@ -1246,6 +1271,9 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
fsr);
if (__ratelimit(&_rs)) {
+ dev_err(smmu->dev, "Context Fault for %s\n",
+ master ? master->of_node->name : "Unknown SID");
+
dev_err(smmu->dev,
"Unhandled context fault: iova=0x%08lx, fsr=0x%x, fsynr=0x%x, cb=%d\n",
iova, fsr, fsynr, cfg->cbndx);
@@ -1271,7 +1299,7 @@ static irqreturn_t arm_smmu_context_fault(int irq, void *dev)
dev_name(smmu->dev));
dev_err(smmu->dev,
"hard iova-to-phys (ATOS)=%pa\n", &phys_atos);
- dev_err(smmu->dev, "SID=0x%x\n", frsynra & 0xffff);
+ dev_err(smmu->dev, "SID=0x%x\n", sid);
}
ret = IRQ_NONE;
resume = RESUME_TERMINATE;
diff --git a/drivers/media/platform/msm/camera_v2/camera/camera.c b/drivers/media/platform/msm/camera_v2/camera/camera.c
index c1aeb8c43e81..3985df780216 100644
--- a/drivers/media/platform/msm/camera_v2/camera/camera.c
+++ b/drivers/media/platform/msm/camera_v2/camera/camera.c
@@ -538,7 +538,7 @@ static int camera_v4l2_fh_open(struct file *filep)
{
struct msm_video_device *pvdev = video_drvdata(filep);
struct camera_v4l2_private *sp;
- unsigned int stream_id;
+ unsigned long stream_id;
sp = kzalloc(sizeof(*sp), GFP_KERNEL);
if (!sp) {
@@ -617,7 +617,7 @@ static int camera_v4l2_open(struct file *filep)
int rc = 0;
struct v4l2_event event;
struct msm_video_device *pvdev = video_drvdata(filep);
- unsigned int opn_idx, idx;
+ unsigned long opn_idx, idx;
BUG_ON(!pvdev);
rc = camera_v4l2_fh_open(filep);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
index a76ccc06c9e1..d42ada769380 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp40.c
@@ -101,29 +101,21 @@ static void msm_vfe40_config_irq(struct vfe_device *vfe_dev,
uint32_t irq0_mask, uint32_t irq1_mask,
enum msm_isp_irq_operation oper)
{
- uint32_t val;
-
switch (oper) {
case MSM_ISP_IRQ_ENABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
- val |= irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x28);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
- val |= irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x2C);
+ vfe_dev->irq0_mask |= irq0_mask;
+ vfe_dev->irq1_mask |= irq1_mask;
break;
case MSM_ISP_IRQ_DISABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
- val &= ~irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x28);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
- val &= ~irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x2C);
+ vfe_dev->irq0_mask &= ~irq0_mask;
+ vfe_dev->irq1_mask &= ~irq1_mask;
break;
case MSM_ISP_IRQ_SET:
- msm_camera_io_w_mb(irq0_mask, vfe_dev->vfe_base + 0x28);
- msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x2C);
+ vfe_dev->irq0_mask = irq0_mask;
+ vfe_dev->irq1_mask = irq1_mask;
}
+ msm_camera_io_w_mb(vfe_dev->irq0_mask, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w_mb(vfe_dev->irq1_mask, vfe_dev->vfe_base + 0x2C);
}
static int32_t msm_vfe40_init_qos_parms(struct vfe_device *vfe_dev,
@@ -335,10 +327,8 @@ static void msm_vfe40_init_hardware_reg(struct vfe_device *vfe_dev)
msm_vfe40_init_vbif_parms(vfe_dev, &vbif_parms);
/* BUS_CFG */
msm_camera_io_w(0x10000001, vfe_dev->vfe_base + 0x50);
- vfe_dev->irq0_mask = 0xE00000F1;
- vfe_dev->irq1_mask = 0xFEFFFFFF;
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, 0x800000E0, 0xFEFFFF7E,
+ MSM_ISP_IRQ_ENABLE);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
msm_camera_io_w_mb(0xFEFFFFFF, vfe_dev->vfe_base + 0x34);
msm_camera_io_w(1, vfe_dev->vfe_base + 0x24);
@@ -346,15 +336,13 @@ static void msm_vfe40_init_hardware_reg(struct vfe_device *vfe_dev)
msm_camera_io_w(0, vfe_dev->vfe_base + 0x30);
msm_camera_io_w_mb(0, vfe_dev->vfe_base + 0x34);
msm_camera_io_w(1, vfe_dev->vfe_base + 0x24);
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
}
static void msm_vfe40_clear_status_reg(struct vfe_device *vfe_dev)
{
vfe_dev->irq0_mask = (1 << 31);
vfe_dev->irq1_mask = 0;
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
+ msm_vfe40_config_irq(vfe_dev, (1 << 31), 0,
MSM_ISP_IRQ_SET);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
@@ -589,7 +577,6 @@ static void msm_vfe40_read_irq_status(struct vfe_device *vfe_dev,
if (*irq_status1 & (1 << 0)) {
vfe_dev->error_info.camif_status =
msm_camera_io_r(vfe_dev->vfe_base + 0x31C);
- vfe_dev->irq1_mask &= ~(1 << 0);
msm_vfe40_config_irq(vfe_dev, 0, (1 << 0), MSM_ISP_IRQ_DISABLE);
}
@@ -812,11 +799,9 @@ static void msm_vfe40_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
comp_mask |= (axi_data->composite_info[comp_mask_index].
stream_composite_mask << (comp_mask_index * 8));
- vfe_dev->irq0_mask |= 1 << (comp_mask_index + 25);
-
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, 1 << (comp_mask_index + 25), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe40_axi_clear_comp_mask(struct vfe_device *vfe_dev,
@@ -828,27 +813,24 @@ static void msm_vfe40_axi_clear_comp_mask(struct vfe_device *vfe_dev,
comp_mask = msm_camera_io_r(vfe_dev->vfe_base + 0x40);
comp_mask &= ~(0x7F << (comp_mask_index * 8));
- vfe_dev->irq0_mask &= ~(1 << (comp_mask_index + 25));
-
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, (1 << (comp_mask_index + 25)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe40_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask |= 1 << (stream_info->wm[0] + 8);
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe40_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
vfe_dev->irq0_mask &= ~(1 << (stream_info->wm[0] + 8));
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe40_cfg_framedrop(void __iomem *vfe_base,
@@ -1088,10 +1070,8 @@ static void msm_vfe40_cfg_fetch_engine(struct vfe_device *vfe_dev,
temp |= (1 << 1);
msm_camera_io_w(temp, vfe_dev->vfe_base + 0x50);
- vfe_dev->irq0_mask &= 0xFEFFFFFF;
- vfe_dev->irq0_mask |= (1 << 24);
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, (1 << 24), 0,
+ MSM_ISP_IRQ_ENABLE);
msm_camera_io_w((fe_cfg->fetch_height - 1),
vfe_dev->vfe_base + 0x238);
@@ -1382,13 +1362,11 @@ static void msm_vfe40_update_camif_state(struct vfe_device *vfe_dev,
return;
if (update_state == ENABLE_CAMIF) {
- msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
- msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w_mb(0x81, vfe_dev->vfe_base + 0x34);
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x24);
- vfe_dev->irq0_mask |= 0xF7;
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask,
- vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, 0xF7, 0x81,
+ MSM_ISP_IRQ_ENABLE);
msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x318);
bus_en =
@@ -1413,8 +1391,8 @@ static void msm_vfe40_update_camif_state(struct vfe_device *vfe_dev,
if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
update_state = DISABLE_CAMIF;
- msm_vfe40_config_irq(vfe_dev, 0, 0,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev, 0, 0x81,
+ MSM_ISP_IRQ_DISABLE);
val = msm_camera_io_r(vfe_dev->vfe_base + 0x464);
/* disable danger signal */
msm_camera_io_w_mb(val & ~(1 << 8), vfe_dev->vfe_base + 0x464);
@@ -1897,6 +1875,9 @@ static void msm_vfe40_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
comp_mask_reg |= mask_bf_scale << (16 + request_comp_index * 8);
atomic_set(stats_comp_mask, stats_mask |
atomic_read(stats_comp_mask));
+ msm_vfe40_config_irq(vfe_dev,
+ 1 << (request_comp_index + 29), 0,
+ MSM_ISP_IRQ_ENABLE);
} else {
if (!(atomic_read(stats_comp_mask) & stats_mask))
return;
@@ -1904,6 +1885,9 @@ static void msm_vfe40_stats_cfg_comp_mask(struct vfe_device *vfe_dev,
~stats_mask & atomic_read(stats_comp_mask));
comp_mask_reg &= ~(mask_bf_scale <<
(16 + request_comp_index * 8));
+ msm_vfe40_config_irq(vfe_dev,
+ 1 << (request_comp_index + 29), 0,
+ MSM_ISP_IRQ_DISABLE);
}
msm_camera_io_w(comp_mask_reg, vfe_dev->vfe_base + 0x44);
@@ -1919,20 +1903,18 @@ static void msm_vfe40_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- vfe_dev->irq0_mask |=
- 1 << (STATS_IDX(stream_info->stream_handle) + 16);
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev,
+ 1 << (STATS_IDX(stream_info->stream_handle) + 16), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe40_stats_clear_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- vfe_dev->irq0_mask &=
- ~(1 << (STATS_IDX(stream_info->stream_handle) + 16));
- msm_vfe40_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe40_config_irq(vfe_dev,
+ (1 << (STATS_IDX(stream_info->stream_handle) + 16)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe40_stats_cfg_wm_reg(
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
index 08b20395813c..388656b9ca30 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp44.c
@@ -70,30 +70,22 @@ static void msm_vfe44_config_irq(struct vfe_device *vfe_dev,
uint32_t irq0_mask, uint32_t irq1_mask,
enum msm_isp_irq_operation oper)
{
- uint32_t val;
-
switch (oper) {
case MSM_ISP_IRQ_ENABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
- val |= irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x28);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
- val |= irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x2C);
+ vfe_dev->irq0_mask |= irq0_mask;
+ vfe_dev->irq1_mask |= irq1_mask;
break;
case MSM_ISP_IRQ_DISABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x28);
- val &= ~irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x28);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x2C);
- val &= ~irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x2C);
+ vfe_dev->irq0_mask &= ~irq0_mask;
+ vfe_dev->irq1_mask &= ~irq1_mask;
break;
case MSM_ISP_IRQ_SET:
- msm_camera_io_w_mb(irq0_mask, vfe_dev->vfe_base + 0x28);
- msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x2C);
+ vfe_dev->irq0_mask = irq0_mask;
+ vfe_dev->irq1_mask = irq1_mask;
break;
}
+ msm_camera_io_w_mb(irq0_mask, vfe_dev->vfe_base + 0x28);
+ msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x2C);
}
static int32_t msm_vfe44_init_dt_parms(struct vfe_device *vfe_dev,
@@ -181,10 +173,8 @@ static void msm_vfe44_init_hardware_reg(struct vfe_device *vfe_dev)
/* BUS_CFG */
msm_camera_io_w(0x10000001, vfe_dev->vfe_base + 0x50);
- vfe_dev->irq0_mask = 0xE00000F1;
- vfe_dev->irq1_mask = 0xFFFFFFFF;
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev, 0x800000E0, 0xFFFFFF7E,
+ MSM_ISP_IRQ_ENABLE);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x24);
@@ -193,9 +183,7 @@ static void msm_vfe44_init_hardware_reg(struct vfe_device *vfe_dev)
static void msm_vfe44_clear_status_reg(struct vfe_device *vfe_dev)
{
- vfe_dev->irq0_mask = 0x80000000;
- vfe_dev->irq1_mask = 0;
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
+ msm_vfe44_config_irq(vfe_dev, 0x80000000, 0,
MSM_ISP_IRQ_SET);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
@@ -419,7 +407,6 @@ static void msm_vfe44_read_irq_status(struct vfe_device *vfe_dev,
if (*irq_status1 & (1 << 0)) {
vfe_dev->error_info.camif_status =
msm_camera_io_r(vfe_dev->vfe_base + 0x31C);
- vfe_dev->irq1_mask &= ~(1 << 0);
msm_vfe44_config_irq(vfe_dev, 0, (1 << 0), MSM_ISP_IRQ_DISABLE);
}
@@ -650,9 +637,8 @@ static void msm_vfe44_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
stream_composite_mask << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
- vfe_dev->irq0_mask |= 1 << (comp_mask_index + 25);
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev, 1 << (comp_mask_index + 25), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe44_axi_clear_comp_mask(struct vfe_device *vfe_dev,
@@ -664,25 +650,22 @@ static void msm_vfe44_axi_clear_comp_mask(struct vfe_device *vfe_dev,
comp_mask &= ~(0x7F << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x40);
- vfe_dev->irq0_mask &= ~(1 << (comp_mask_index + 25));
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev, (1 << (comp_mask_index + 25)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe44_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask |= 1 << (stream_info->wm[0] + 8);
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe44_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask &= ~(1 << (stream_info->wm[0] + 8));
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe44_cfg_framedrop(void __iomem *vfe_base,
@@ -918,10 +901,8 @@ static void msm_vfe44_cfg_fetch_engine(struct vfe_device *vfe_dev,
temp |= (1 << 1);
msm_camera_io_w(temp, vfe_dev->vfe_base + 0x50);
- vfe_dev->irq0_mask &= 0xFEFFFFFF;
- vfe_dev->irq0_mask |= (1 << 24);
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask,
- vfe_dev->irq1_mask, MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev, (1 << 24), 0,
+ MSM_ISP_IRQ_SET);
msm_camera_io_w((fe_cfg->fetch_height - 1) & 0xFFF,
vfe_dev->vfe_base + 0x238);
@@ -1045,13 +1026,12 @@ static void msm_vfe44_update_camif_state(struct vfe_device *vfe_dev,
return;
if (update_state == ENABLE_CAMIF) {
- msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x30);
- msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x34);
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x30);
+ msm_camera_io_w_mb(0x81, vfe_dev->vfe_base + 0x34);
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x24);
- vfe_dev->irq0_mask |= 0xF7;
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask,
- vfe_dev->irq1_mask, MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev, 0xF7, 0x81,
+ MSM_ISP_IRQ_ENABLE);
msm_camera_io_w_mb(0x140000, vfe_dev->vfe_base + 0x318);
bus_en =
@@ -1075,7 +1055,7 @@ static void msm_vfe44_update_camif_state(struct vfe_device *vfe_dev,
if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
update_state = DISABLE_CAMIF;
msm_vfe44_config_irq(vfe_dev, 0,
- 0, MSM_ISP_IRQ_SET);
+ 0x81, MSM_ISP_IRQ_DISABLE);
val = msm_camera_io_r(vfe_dev->vfe_base + 0xC18);
/* disable danger signal */
msm_camera_io_w_mb(val & ~(1 << 8), vfe_dev->vfe_base + 0xC18);
@@ -1526,6 +1506,9 @@ static void msm_vfe44_stats_cfg_comp_mask(
comp_mask_reg |= mask_bf_scale << (16 + request_comp_index * 8);
atomic_set(stats_comp_mask, stats_mask |
atomic_read(stats_comp_mask));
+ msm_vfe44_config_irq(vfe_dev,
+ 1 << (request_comp_index + 29), 0,
+ MSM_ISP_IRQ_ENABLE);
} else {
if (!(atomic_read(stats_comp_mask) & stats_mask))
return;
@@ -1540,6 +1523,9 @@ static void msm_vfe44_stats_cfg_comp_mask(
~stats_mask & atomic_read(stats_comp_mask));
comp_mask_reg &= ~(mask_bf_scale <<
(16 + request_comp_index * 8));
+ msm_vfe44_config_irq(vfe_dev,
+ 1 << (request_comp_index + 29), 0,
+ MSM_ISP_IRQ_DISABLE);
}
msm_camera_io_w(comp_mask_reg, vfe_dev->vfe_base + 0x44);
@@ -1555,20 +1541,18 @@ static void msm_vfe44_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- vfe_dev->irq0_mask |=
- 1 << (STATS_IDX(stream_info->stream_handle) + 15);
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev,
+ 1 << (STATS_IDX(stream_info->stream_handle) + 15), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe44_stats_clear_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- vfe_dev->irq0_mask &=
- ~(1 << (STATS_IDX(stream_info->stream_handle) + 15));
- msm_vfe44_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe44_config_irq(vfe_dev,
+ (1 << (STATS_IDX(stream_info->stream_handle) + 15)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe44_stats_cfg_wm_reg(
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
index 9f815e65edc8..40bb044fde47 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp46.c
@@ -92,30 +92,24 @@ static void msm_vfe46_config_irq(struct vfe_device *vfe_dev,
uint32_t irq0_mask, uint32_t irq1_mask,
enum msm_isp_irq_operation oper)
{
- uint32_t val;
-
switch (oper) {
case MSM_ISP_IRQ_ENABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
- val |= irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x5C);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
- val |= irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x60);
+ vfe_dev->irq0_mask |= irq0_mask;
+ vfe_dev->irq1_mask |= irq1_mask;
break;
case MSM_ISP_IRQ_DISABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
- val &= ~irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x5C);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
- val &= ~irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x60);
+ vfe_dev->irq0_mask &= ~irq0_mask;
+ vfe_dev->irq1_mask &= ~irq1_mask;
break;
case MSM_ISP_IRQ_SET:
- msm_camera_io_w_mb(irq0_mask, vfe_dev->vfe_base + 0x5C);
- msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x60);
+ vfe_dev->irq0_mask = irq0_mask;
+ vfe_dev->irq1_mask = irq1_mask;
break;
}
+ msm_camera_io_w_mb(vfe_dev->irq0_mask,
+ vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w_mb(vfe_dev->irq1_mask,
+ vfe_dev->vfe_base + 0x60);
}
static int32_t msm_vfe46_init_dt_parms(struct vfe_device *vfe_dev,
@@ -208,20 +202,16 @@ static void msm_vfe46_init_hardware_reg(struct vfe_device *vfe_dev)
/* BUS_CFG */
msm_camera_io_w(0x00000001, vfe_dev->vfe_base + 0x84);
/* IRQ_MASK/CLEAR */
- vfe_dev->irq0_mask = 0xE00000F1;
- vfe_dev->irq1_mask = 0xE1FFFFFF;
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, 0x810000E0, 0xFFFFFF7E,
+ MSM_ISP_IRQ_ENABLE);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58);
}
static void msm_vfe46_clear_status_reg(struct vfe_device *vfe_dev)
{
- vfe_dev->irq0_mask = 0x80000000;
- vfe_dev->irq1_mask = 0x0;
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, 0x80000000, 0, MSM_ISP_IRQ_SET);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58);
@@ -355,7 +345,6 @@ static void msm_vfe46_read_irq_status(struct vfe_device *vfe_dev,
if (*irq_status1 & (1 << 0)) {
vfe_dev->error_info.camif_status =
msm_camera_io_r(vfe_dev->vfe_base + 0x3D0);
- vfe_dev->irq1_mask &= ~(1 << 0);
msm_vfe46_config_irq(vfe_dev, 0, (1 << 0), MSM_ISP_IRQ_DISABLE);
}
@@ -587,9 +576,8 @@ static void msm_vfe46_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
stream_composite_mask << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
- vfe_dev->irq0_mask |= 1 << (comp_mask_index + 25);
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, 1 << (comp_mask_index + 25), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe46_axi_clear_comp_mask(struct vfe_device *vfe_dev,
@@ -601,25 +589,22 @@ static void msm_vfe46_axi_clear_comp_mask(struct vfe_device *vfe_dev,
comp_mask &= ~(0x7F << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
- vfe_dev->irq0_mask |= 1 << (comp_mask_index + 25);
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, 1 << (comp_mask_index + 25), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe46_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask |= 1 << (stream_info->wm[0] + 8);
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe46_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask &= ~(1 << (stream_info->wm[0] + 8));
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe46_cfg_framedrop(void __iomem *vfe_base,
@@ -857,10 +842,8 @@ static void msm_vfe46_cfg_fetch_engine(struct vfe_device *vfe_dev,
temp |= (1 << 1);
msm_camera_io_w(temp, vfe_dev->vfe_base + 0x84);
- vfe_dev->irq0_mask &= 0xFEFFFFFF;
- vfe_dev->irq0_mask |= (1 << 24);
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask,
- vfe_dev->irq1_mask, MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, 1 << 24, 0,
+ MSM_ISP_IRQ_ENABLE);
temp = fe_cfg->fetch_height - 1;
msm_camera_io_w(temp & 0x3FFF, vfe_dev->vfe_base + 0x278);
@@ -1120,9 +1103,11 @@ static void msm_vfe46_update_camif_state(struct vfe_device *vfe_dev,
return;
if (update_state == ENABLE_CAMIF) {
- vfe_dev->irq0_mask |= 0xF5;
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask,
- vfe_dev->irq1_mask, MSM_ISP_IRQ_SET);
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w(0x81, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
+ msm_vfe46_config_irq(vfe_dev, 0x15, 0x81,
+ MSM_ISP_IRQ_ENABLE);
bus_en =
((vfe_dev->axi_data.
@@ -1148,7 +1133,8 @@ static void msm_vfe46_update_camif_state(struct vfe_device *vfe_dev,
if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
update_state = DISABLE_CAMIF;
- msm_vfe46_config_irq(vfe_dev, 0, 0, MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev, 0, 0x81,
+ MSM_ISP_IRQ_DISABLE);
/* disable danger signal */
val = msm_camera_io_r(vfe_dev->vfe_base + 0xC18);
val &= ~(1 << 8);
@@ -1611,6 +1597,9 @@ static void msm_vfe46_stats_cfg_comp_mask(
comp_mask_reg |= mask_bf_scale << (16 + request_comp_index * 8);
atomic_set(stats_comp_mask, stats_mask |
atomic_read(stats_comp_mask));
+ msm_vfe46_config_irq(vfe_dev,
+ 1 << (request_comp_index + 29), 0,
+ MSM_ISP_IRQ_ENABLE);
} else {
if (!(atomic_read(stats_comp_mask) & stats_mask))
return;
@@ -1625,6 +1614,9 @@ static void msm_vfe46_stats_cfg_comp_mask(
~stats_mask & atomic_read(stats_comp_mask));
comp_mask_reg &= ~(mask_bf_scale <<
(16 + request_comp_index * 8));
+ msm_vfe46_config_irq(vfe_dev,
+ 1 << (request_comp_index + 29), 0,
+ MSM_ISP_IRQ_DISABLE);
}
msm_camera_io_w(comp_mask_reg, vfe_dev->vfe_base + 0x78);
@@ -1640,19 +1632,18 @@ static void msm_vfe46_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- vfe_dev->irq0_mask |= 1 << (STATS_IDX(stream_info->stream_handle) + 15);
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev,
+ 1 << (STATS_IDX(stream_info->stream_handle) + 15), 0,
+ MSM_ISP_IRQ_ENABLE);
}
static void msm_vfe46_stats_clear_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- vfe_dev->irq0_mask &=
- ~(1 << (STATS_IDX(stream_info->stream_handle) + 15));
- msm_vfe46_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe46_config_irq(vfe_dev,
+ 1 << (STATS_IDX(stream_info->stream_handle) + 15), 0,
+ MSM_ISP_IRQ_DISABLE);
}
static void msm_vfe46_stats_cfg_wm_reg(
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index 20aa69f322db..290f100ffeba 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -149,30 +149,24 @@ void msm_vfe47_config_irq(struct vfe_device *vfe_dev,
uint32_t irq0_mask, uint32_t irq1_mask,
enum msm_isp_irq_operation oper)
{
- uint32_t val;
-
switch (oper) {
case MSM_ISP_IRQ_ENABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
- val |= irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x5C);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
- val |= irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x60);
+ vfe_dev->irq0_mask |= irq0_mask;
+ vfe_dev->irq1_mask |= irq1_mask;
break;
case MSM_ISP_IRQ_DISABLE:
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x5C);
- val &= ~irq0_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x5C);
- val = msm_camera_io_r(vfe_dev->vfe_base + 0x60);
- val &= ~irq1_mask;
- msm_camera_io_w_mb(val, vfe_dev->vfe_base + 0x60);
+ vfe_dev->irq0_mask &= ~irq0_mask;
+ vfe_dev->irq1_mask &= ~irq1_mask;
break;
case MSM_ISP_IRQ_SET:
- msm_camera_io_w_mb(irq0_mask, vfe_dev->vfe_base + 0x5C);
- msm_camera_io_w_mb(irq1_mask, vfe_dev->vfe_base + 0x60);
+ vfe_dev->irq0_mask = irq0_mask;
+ vfe_dev->irq1_mask = irq1_mask;
break;
}
+ msm_camera_io_w_mb(vfe_dev->irq0_mask,
+ vfe_dev->vfe_base + 0x5C);
+ msm_camera_io_w_mb(vfe_dev->irq1_mask,
+ vfe_dev->vfe_base + 0x60);
}
static int32_t msm_vfe47_init_dt_parms(struct vfe_device *vfe_dev,
@@ -285,13 +279,6 @@ int msm_vfe47_init_hardware(struct vfe_device *vfe_dev)
else
id = CAM_AHB_CLIENT_VFE1;
- rc = cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SVS_VOTE);
- if (rc < 0) {
- pr_err("%s: failed to vote for AHB\n", __func__);
- goto ahb_vote_fail;
- }
- vfe_dev->ahb_vote = CAM_AHB_SVS_VOTE;
-
rc = vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(
vfe_dev, 1);
if (rc)
@@ -302,6 +289,13 @@ int msm_vfe47_init_hardware(struct vfe_device *vfe_dev)
if (rc)
goto clk_enable_failed;
+ rc = cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SVS_VOTE);
+ if (rc < 0) {
+ pr_err("%s: failed to vote for AHB\n", __func__);
+ goto ahb_vote_fail;
+ }
+ vfe_dev->ahb_vote = CAM_AHB_SVS_VOTE;
+
vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] =
vfe_dev->vfe_base;
@@ -312,14 +306,14 @@ int msm_vfe47_init_hardware(struct vfe_device *vfe_dev)
return rc;
irq_enable_fail:
vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
- vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 0);
-clk_enable_failed:
- vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
-enable_regulators_failed:
if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SUSPEND_VOTE) < 0)
pr_err("%s: failed to remove vote for AHB\n", __func__);
vfe_dev->ahb_vote = CAM_AHB_SUSPEND_VOTE;
ahb_vote_fail:
+ vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(vfe_dev, 0);
+clk_enable_failed:
+ vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
+enable_regulators_failed:
return rc;
}
@@ -338,9 +332,6 @@ void msm_vfe47_release_hardware(struct vfe_device *vfe_dev)
msm_isp_flush_tasklet(vfe_dev);
vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] = NULL;
- vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(
- vfe_dev, 0);
- vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
msm_isp_update_bandwidth(ISP_VFE0 + vfe_dev->pdev->id, 0, 0);
@@ -351,7 +342,12 @@ void msm_vfe47_release_hardware(struct vfe_device *vfe_dev)
if (cam_config_ahb_clk(NULL, 0, id, CAM_AHB_SUSPEND_VOTE) < 0)
pr_err("%s: failed to vote for AHB\n", __func__);
- vfe_dev->ahb_vote = CAM_AHB_SUSPEND_VOTE;
+
+ vfe_dev->ahb_vote = CAM_AHB_SUSPEND_VOTE;
+
+ vfe_dev->hw_info->vfe_ops.platform_ops.enable_clks(
+ vfe_dev, 0);
+ vfe_dev->hw_info->vfe_ops.platform_ops.enable_regulators(vfe_dev, 0);
}
void msm_vfe47_init_hardware_reg(struct vfe_device *vfe_dev)
@@ -382,19 +378,16 @@ void msm_vfe47_init_hardware_reg(struct vfe_device *vfe_dev)
/* BUS_CFG */
msm_camera_io_w(0x00000101, vfe_dev->vfe_base + 0x84);
/* IRQ_MASK/CLEAR */
- vfe_dev->irq0_mask = 0xE00000F3;
- vfe_dev->irq1_mask = 0xFFFFFFFF;
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe47_config_irq(vfe_dev, 0x810000E0, 0xFFFFFF7E,
+ MSM_ISP_IRQ_ENABLE);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w_mb(0x1, vfe_dev->vfe_base + 0x58);
}
void msm_vfe47_clear_status_reg(struct vfe_device *vfe_dev)
{
- vfe_dev->irq0_mask = 0x80000000;
- vfe_dev->irq1_mask = 0x0;
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
+ msm_vfe47_config_irq(vfe_dev, 0x80000000, 0x0,
MSM_ISP_IRQ_SET);
msm_camera_io_w(0xFFFFFFFF, vfe_dev->vfe_base + 0x64);
msm_camera_io_w_mb(0xFFFFFFFF, vfe_dev->vfe_base + 0x68);
@@ -543,7 +536,6 @@ void msm_vfe47_read_irq_status(struct vfe_device *vfe_dev,
vfe_dev->error_info.camif_status =
msm_camera_io_r(vfe_dev->vfe_base + 0x4A4);
/* mask off camif error after first occurrance */
- vfe_dev->irq1_mask &= ~(1 << 0);
msm_vfe47_config_irq(vfe_dev, 0, (1 << 0), MSM_ISP_IRQ_DISABLE);
}
@@ -785,9 +777,8 @@ void msm_vfe47_axi_cfg_comp_mask(struct vfe_device *vfe_dev,
stream_composite_mask << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
- vfe_dev->irq0_mask |= 1 << (comp_mask_index + 25);
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe47_config_irq(vfe_dev, 1 << (comp_mask_index + 25), 0,
+ MSM_ISP_IRQ_ENABLE);
}
void msm_vfe47_axi_clear_comp_mask(struct vfe_device *vfe_dev,
@@ -799,25 +790,22 @@ void msm_vfe47_axi_clear_comp_mask(struct vfe_device *vfe_dev,
comp_mask &= ~(0x7F << (comp_mask_index * 8));
msm_camera_io_w(comp_mask, vfe_dev->vfe_base + 0x74);
- vfe_dev->irq0_mask &= ~(1 << (comp_mask_index + 25));
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe47_config_irq(vfe_dev, (1 << (comp_mask_index + 25)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
void msm_vfe47_axi_cfg_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask |= 1 << (stream_info->wm[0] + 8);
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe47_config_irq(vfe_dev, 1 << (stream_info->wm[0] + 8), 0,
+ MSM_ISP_IRQ_ENABLE);
}
void msm_vfe47_axi_clear_wm_irq_mask(struct vfe_device *vfe_dev,
struct msm_vfe_axi_stream *stream_info)
{
- vfe_dev->irq0_mask &= ~(1 << (stream_info->wm[0] + 8));
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask, vfe_dev->irq1_mask,
- MSM_ISP_IRQ_SET);
+ msm_vfe47_config_irq(vfe_dev, (1 << (stream_info->wm[0] + 8)), 0,
+ MSM_ISP_IRQ_DISABLE);
}
void msm_vfe47_cfg_framedrop(void __iomem *vfe_base,
@@ -1065,10 +1053,8 @@ void msm_vfe47_cfg_fetch_engine(struct vfe_device *vfe_dev,
temp |= (1 << 1);
msm_camera_io_w(temp, vfe_dev->vfe_base + 0x84);
- vfe_dev->irq0_mask &= 0xFEFFFFFF;
- vfe_dev->irq0_mask |= (1 << 24);
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask,
- vfe_dev->irq1_mask, MSM_ISP_IRQ_SET);
+ msm_vfe47_config_irq(vfe_dev, (1 << 24), 0,
+ MSM_ISP_IRQ_ENABLE);
temp = fe_cfg->fetch_height - 1;
msm_camera_io_w(temp & 0x3FFF, vfe_dev->vfe_base + 0x308);
@@ -1394,9 +1380,11 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
val = msm_camera_io_r(vfe_dev->vfe_base + 0x47C);
if (update_state == ENABLE_CAMIF) {
- vfe_dev->irq0_mask |= 0xF5;
- msm_vfe47_config_irq(vfe_dev, vfe_dev->irq0_mask,
- vfe_dev->irq1_mask, MSM_ISP_IRQ_SET);
+ msm_camera_io_w(0x0, vfe_dev->vfe_base + 0x64);
+ msm_camera_io_w(0x81, vfe_dev->vfe_base + 0x68);
+ msm_camera_io_w(0x1, vfe_dev->vfe_base + 0x58);
+ msm_vfe47_config_irq(vfe_dev, 0x15, 0x81,
+ MSM_ISP_IRQ_ENABLE);
if ((vfe_dev->hvx_cmd > HVX_DISABLE) &&
(vfe_dev->hvx_cmd <= HVX_ROUND_TRIP))
@@ -1427,8 +1415,9 @@ void msm_vfe47_update_camif_state(struct vfe_device *vfe_dev,
/* For testgen always halt on camif boundary */
if (vfe_dev->axi_data.src_info[VFE_PIX_0].input_mux == TESTGEN)
update_state = DISABLE_CAMIF;
- /* turn off all irq before camif disable */
- msm_vfe47_config_irq(vfe_dev, 0, 0, MSM_ISP_IRQ_SET);
+ /* turn off camif violation and error irqs */
+ msm_vfe47_config_irq(vfe_dev, 0, 0x81,
+ MSM_ISP_IRQ_DISABLE);
val = msm_camera_io_r(vfe_dev->vfe_base + 0x464);
/* disable danger signal */
msm_camera_io_w_mb(val & ~(1 << 8), vfe_dev->vfe_base + 0x464);
@@ -1896,6 +1885,8 @@ void msm_vfe47_stats_cfg_comp_mask(
comp_mask_reg |= stats_mask << (request_comp_index * 16);
atomic_set(stats_comp_mask, stats_mask |
atomic_read(stats_comp_mask));
+ msm_vfe47_config_irq(vfe_dev, 1 << (29 + request_comp_index),
+ 0, MSM_ISP_IRQ_ENABLE);
} else {
if (!(atomic_read(stats_comp_mask) & stats_mask))
return;
@@ -1903,6 +1894,8 @@ void msm_vfe47_stats_cfg_comp_mask(
atomic_set(stats_comp_mask,
~stats_mask & atomic_read(stats_comp_mask));
comp_mask_reg &= ~(stats_mask << (request_comp_index * 16));
+ msm_vfe47_config_irq(vfe_dev, 1 << (29 + request_comp_index),
+ 0, MSM_ISP_IRQ_DISABLE);
}
msm_camera_io_w(comp_mask_reg, vfe_dev->vfe_base + 0x78);
@@ -1919,49 +1912,39 @@ void msm_vfe47_stats_cfg_wm_irq_mask(
struct vfe_device *vfe_dev,
struct msm_vfe_stats_stream *stream_info)
{
- uint32_t irq_mask;
- uint32_t irq_mask_1;
-
- irq_mask = vfe_dev->irq0_mask;
- irq_mask_1 = vfe_dev->irq1_mask;
-
switch (STATS_IDX(stream_info->stream_handle)) {
case STATS_COMP_IDX_AEC_BG:
- irq_mask |= 1 << 15;
+ msm_vfe47_config_irq(vfe_dev, 1 << 15, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_HDR_BE:
- irq_mask |= 1 << 16;
+ msm_vfe47_config_irq(vfe_dev, 1 << 16, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_BG:
- irq_mask |= 1 << 17;
+ msm_vfe47_config_irq(vfe_dev, 1 << 17, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_BF:
- irq_mask |= 1 << 18;
- irq_mask_1 |= 1 << 26;
+ msm_vfe47_config_irq(vfe_dev, 1 << 18, 1 << 26,
+ MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_HDR_BHIST:
- irq_mask |= 1 << 19;
+ msm_vfe47_config_irq(vfe_dev, 1 << 19, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_RS:
- irq_mask |= 1 << 20;
+ msm_vfe47_config_irq(vfe_dev, 1 << 20, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_CS:
- irq_mask |= 1 << 21;
+ msm_vfe47_config_irq(vfe_dev, 1 << 21, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_IHIST:
- irq_mask |= 1 << 22;
+ msm_vfe47_config_irq(vfe_dev, 1 << 22, 0, MSM_ISP_IRQ_ENABLE);
break;
case STATS_COMP_IDX_BHIST:
- irq_mask |= 1 << 23;
+ msm_vfe47_config_irq(vfe_dev, 1 << 23, 0, MSM_ISP_IRQ_ENABLE);
break;
default:
pr_err("%s: Invalid stats idx %d\n", __func__,
STATS_IDX(stream_info->stream_handle));
}
-
- msm_vfe47_config_irq(vfe_dev, irq_mask, irq_mask_1, MSM_ISP_IRQ_SET);
- vfe_dev->irq0_mask = irq_mask;
- vfe_dev->irq1_mask = irq_mask_1;
}
void msm_vfe47_stats_clear_wm_irq_mask(
@@ -1975,41 +1958,37 @@ void msm_vfe47_stats_clear_wm_irq_mask(
switch (STATS_IDX(stream_info->stream_handle)) {
case STATS_COMP_IDX_AEC_BG:
- irq_mask &= ~(1 << 15);
+ msm_vfe47_config_irq(vfe_dev, 1 << 15, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_HDR_BE:
- irq_mask &= ~(1 << 16);
+ msm_vfe47_config_irq(vfe_dev, 1 << 16, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_BG:
- irq_mask &= ~(1 << 17);
+ msm_vfe47_config_irq(vfe_dev, 1 << 17, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_BF:
- irq_mask &= ~(1 << 18);
- irq_mask_1 &= ~(1 << 26);
+ msm_vfe47_config_irq(vfe_dev, 1 << 18, 1 << 26,
+ MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_HDR_BHIST:
- irq_mask &= ~(1 << 19);
+ msm_vfe47_config_irq(vfe_dev, 1 << 19, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_RS:
- irq_mask &= ~(1 << 20);
+ msm_vfe47_config_irq(vfe_dev, 1 << 20, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_CS:
- irq_mask &= ~(1 << 21);
+ msm_vfe47_config_irq(vfe_dev, 1 << 21, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_IHIST:
- irq_mask &= ~(1 << 22);
+ msm_vfe47_config_irq(vfe_dev, 1 << 22, 0, MSM_ISP_IRQ_DISABLE);
break;
case STATS_COMP_IDX_BHIST:
- irq_mask &= ~(1 << 23);
+ msm_vfe47_config_irq(vfe_dev, 1 << 23, 0, MSM_ISP_IRQ_DISABLE);
break;
default:
pr_err("%s: Invalid stats idx %d\n", __func__,
STATS_IDX(stream_info->stream_handle));
}
-
- msm_vfe47_config_irq(vfe_dev, irq_mask, irq_mask_1, MSM_ISP_IRQ_SET);
- vfe_dev->irq0_mask = irq_mask;
- vfe_dev->irq1_mask = irq_mask_1;
}
void msm_vfe47_stats_cfg_wm_reg(
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
index 3dd55e02826d..8721fc18eaa8 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_axi_util.c
@@ -1188,15 +1188,9 @@ int msm_isp_request_axi_stream(struct vfe_device *vfe_dev, void *arg)
vfe_dev->vt_enable = stream_cfg_cmd->vt_enable;
msm_isp_start_avtimer();
}
- if (stream_info->num_planes > 1) {
+ if (stream_info->num_planes > 1)
msm_isp_axi_reserve_comp_mask(
&vfe_dev->axi_data, stream_info);
- vfe_dev->hw_info->vfe_ops.axi_ops.
- cfg_comp_mask(vfe_dev, stream_info);
- } else {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- cfg_wm_irq_mask(vfe_dev, stream_info);
- }
for (i = 0; i < stream_info->num_planes; i++) {
vfe_dev->hw_info->vfe_ops.axi_ops.
@@ -1252,14 +1246,8 @@ int msm_isp_release_axi_stream(struct vfe_device *vfe_dev, void *arg)
clear_wm_xbar_reg(vfe_dev, stream_info, i);
}
- if (stream_info->num_planes > 1) {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- clear_comp_mask(vfe_dev, stream_info);
+ if (stream_info->num_planes > 1)
msm_isp_axi_free_comp_mask(&vfe_dev->axi_data, stream_info);
- } else {
- vfe_dev->hw_info->vfe_ops.axi_ops.
- clear_wm_irq_mask(vfe_dev, stream_info);
- }
vfe_dev->hw_info->vfe_ops.axi_ops.clear_framedrop(vfe_dev, stream_info);
msm_isp_axi_free_wm(axi_data, stream_info);
@@ -2617,6 +2605,13 @@ static int msm_isp_start_axi_stream(struct vfe_device *vfe_dev,
return rc;
}
spin_unlock_irqrestore(&stream_info->lock, flags);
+ if (stream_info->num_planes > 1) {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_comp_mask(vfe_dev, stream_info);
+ } else {
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ cfg_wm_irq_mask(vfe_dev, stream_info);
+ }
stream_info->state = START_PENDING;
@@ -2733,6 +2728,13 @@ static int msm_isp_stop_axi_stream(struct vfe_device *vfe_dev,
spin_unlock_irqrestore(&stream_info->lock, flags);
wait_for_complete_for_this_stream = 0;
+ if (stream_info->num_planes > 1)
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_comp_mask(vfe_dev, stream_info);
+ else
+ vfe_dev->hw_info->vfe_ops.axi_ops.
+ clear_wm_irq_mask(vfe_dev, stream_info);
+
stream_info->state = STOP_PENDING;
if (!halt && !ext_read &&
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
index e98c99fcb62d..4aef6b5c7f38 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp_stats_util.c
@@ -444,10 +444,6 @@ int msm_isp_request_stats_stream(struct vfe_device *vfe_dev, void *arg)
stream_info->framedrop_pattern = 0x1;
stream_info->framedrop_period = framedrop_period - 1;
- if (!stream_info->composite_flag)
- vfe_dev->hw_info->vfe_ops.stats_ops.
- cfg_wm_irq_mask(vfe_dev, stream_info);
-
if (stream_info->init_stats_frame_drop == 0)
vfe_dev->hw_info->vfe_ops.stats_ops.cfg_wm_reg(vfe_dev,
stream_info);
@@ -485,10 +481,6 @@ int msm_isp_release_stats_stream(struct vfe_device *vfe_dev, void *arg)
rc = msm_isp_cfg_stats_stream(vfe_dev, &stream_cfg_cmd);
}
- if (!stream_info->composite_flag)
- vfe_dev->hw_info->vfe_ops.stats_ops.
- clear_wm_irq_mask(vfe_dev, stream_info);
-
vfe_dev->hw_info->vfe_ops.stats_ops.clear_wm_reg(vfe_dev, stream_info);
memset(stream_info, 0, sizeof(struct msm_vfe_stats_stream));
return 0;
@@ -711,6 +703,9 @@ static int msm_isp_start_stats_stream(struct vfe_device *vfe_dev,
pr_err("%s: No buffer for stream%d\n", __func__, idx);
return rc;
}
+ if (!stream_info->composite_flag)
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ cfg_wm_irq_mask(vfe_dev, stream_info);
if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
stream_info->state = STATS_START_PENDING;
@@ -784,6 +779,10 @@ static int msm_isp_stop_stats_stream(struct vfe_device *vfe_dev,
return -EINVAL;
}
+ if (!stream_info->composite_flag)
+ vfe_dev->hw_info->vfe_ops.stats_ops.
+ clear_wm_irq_mask(vfe_dev, stream_info);
+
if (vfe_dev->axi_data.src_info[VFE_PIX_0].active)
stream_info->state = STATS_STOP_PENDING;
else
diff --git a/drivers/mfd/wcd9xxx-utils.c b/drivers/mfd/wcd9xxx-utils.c
index 2160dfd063b1..22d61d96a11d 100644
--- a/drivers/mfd/wcd9xxx-utils.c
+++ b/drivers/mfd/wcd9xxx-utils.c
@@ -39,6 +39,10 @@ static enum wcd9xxx_intf_status wcd9xxx_intf = -1;
static struct mfd_cell tavil_devs[] = {
{
+ .name = "qcom-wcd-pinctrl",
+ .of_compatible = "qcom,wcd-pinctrl",
+ },
+ {
.name = "tavil_codec",
},
};
diff --git a/drivers/misc/qcom/qdsp6v2/q6audio_v2.c b/drivers/misc/qcom/qdsp6v2/q6audio_v2.c
index 7eb6629b1e57..51ba23da1270 100644
--- a/drivers/misc/qcom/qdsp6v2/q6audio_v2.c
+++ b/drivers/misc/qcom/qdsp6v2/q6audio_v2.c
@@ -85,6 +85,13 @@ void audio_in_get_dsp_frames(void *priv,
pr_debug("%s:session id %d: enc_framesotal_size=0x%8x\n", __func__,
audio->ac->session, payload[4]);
+ /* Ensure the index is within max array size: FRAME_NUM */
+ if (index >= FRAME_NUM) {
+ pr_err("%s: Invalid index %d\n",
+ __func__, index);
+ return;
+ }
+
audio->out_frame_info[index][0] = payload[9];
audio->out_frame_info[index][1] = payload[5];
diff --git a/drivers/of/of_batterydata.c b/drivers/of/of_batterydata.c
index 5f140cd0c2a6..4410f270f557 100644
--- a/drivers/of/of_batterydata.c
+++ b/drivers/of/of_batterydata.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -312,32 +312,15 @@ static int64_t of_batterydata_convert_battery_id_kohm(int batt_id_uv,
struct device_node *of_batterydata_get_best_profile(
const struct device_node *batterydata_container_node,
- const char *psy_name, const char *batt_type)
+ int batt_id_kohm, const char *batt_type)
{
struct batt_ids batt_ids;
struct device_node *node, *best_node = NULL;
- struct power_supply *psy;
const char *battery_type = NULL;
- union power_supply_propval ret = {0, };
int delta = 0, best_delta = 0, best_id_kohm = 0, id_range_pct,
- batt_id_kohm = 0, i = 0, rc = 0, limit = 0;
+ i = 0, rc = 0, limit = 0;
bool in_range = false;
- psy = power_supply_get_by_name(psy_name);
- if (!psy) {
- pr_err("%s supply not found. defer\n", psy_name);
- return ERR_PTR(-EPROBE_DEFER);
- }
-
- rc = power_supply_get_property(psy, POWER_SUPPLY_PROP_RESISTANCE_ID,
- &ret);
- if (rc) {
- pr_err("failed to retrieve resistance value rc=%d\n", rc);
- return ERR_PTR(-ENOSYS);
- }
-
- batt_id_kohm = ret.intval / 1000;
-
/* read battery id range percentage for best profile */
rc = of_property_read_u32(batterydata_container_node,
"qcom,batt-id-range-pct", &id_range_pct);
diff --git a/drivers/power/qcom-charger/qpnp-fg.c b/drivers/power/qcom-charger/qpnp-fg.c
index 8660c1f8c3f5..0658f0d3b1eb 100644
--- a/drivers/power/qcom-charger/qpnp-fg.c
+++ b/drivers/power/qcom-charger/qpnp-fg.c
@@ -4772,8 +4772,7 @@ fail:
#define BATTERY_PSY_WAIT_MS 2000
static int fg_batt_profile_init(struct fg_chip *chip)
{
- int rc = 0, ret;
- int len;
+ int rc = 0, ret, len, batt_id;
struct device_node *node = chip->pdev->dev.of_node;
struct device_node *batt_node, *profile_node;
const char *data, *batt_type_str;
@@ -4802,14 +4801,16 @@ wait:
goto no_profile;
}
+ batt_id = get_sram_prop_now(chip, FG_DATA_BATT_ID);
+ batt_id /= 1000;
if (fg_debug_mask & FG_STATUS)
- pr_info("battery id = %d\n",
- get_sram_prop_now(chip, FG_DATA_BATT_ID));
- profile_node = of_batterydata_get_best_profile(batt_node, "bms",
+ pr_info("battery id = %dKOhms\n", batt_id);
+
+ profile_node = of_batterydata_get_best_profile(batt_node, batt_id,
fg_batt_type);
- if (!profile_node) {
- pr_err("couldn't find profile handle\n");
- rc = -ENODATA;
+ if (IS_ERR_OR_NULL(profile_node)) {
+ rc = PTR_ERR(profile_node);
+ pr_err("couldn't find profile handle %d\n", rc);
goto no_profile;
}
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index 7810ecb9b15b..ad00a987ae41 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -25,54 +25,98 @@
#include "smb-lib.h"
#include "pmic-voter.h"
-#define SMB2_DEFAULT_FCC_UA 3000000
-#define SMB2_DEFAULT_FV_UV 4350000
-#define SMB2_DEFAULT_ICL_UA 3000000
+#define SMB2_DEFAULT_FCC_UA 3000000
+#define SMB2_DEFAULT_FV_UV 4350000
+#define SMB2_DEFAULT_ICL_UA 3000000
+#define SMB2_DEFAULT_WPWR_UW 8000000
static struct smb_params v1_params = {
- .fcc = {
+ .fcc = {
.name = "fast charge current",
.reg = FAST_CHARGE_CURRENT_CFG_REG,
.min_u = 0,
.max_u = 4500000,
.step_u = 25000,
},
- .fv = {
+ .fv = {
.name = "float voltage",
.reg = FLOAT_VOLTAGE_CFG_REG,
- .min_u = 3487500,
- .max_u = 4920000,
- .step_u = 7500,
+ .min_u = 2500000,
+ .max_u = 5000000,
+ .step_u = 10000,
},
- .usb_icl = {
+ .usb_icl = {
.name = "usb input current limit",
.reg = USBIN_CURRENT_LIMIT_CFG_REG,
.min_u = 0,
- .max_u = 4800000,
+ .max_u = 6000000,
.step_u = 25000,
},
- .icl_stat = {
+ .icl_stat = {
.name = "input current limit status",
.reg = ICL_STATUS_REG,
.min_u = 0,
.max_u = 4800000,
.step_u = 25000,
},
- .dc_icl = {
+ .dc_icl = {
.name = "dc input current limit",
.reg = DCIN_CURRENT_LIMIT_CFG_REG,
.min_u = 0,
+ .max_u = 6000000,
+ .step_u = 25000,
+ },
+ .dc_icl_pt_lv = {
+ .name = "dc icl PT <8V",
+ .reg = ZIN_ICL_PT_REG,
+ .min_u = 0,
+ .max_u = 3000000,
+ .step_u = 25000,
+ },
+ .dc_icl_pt_hv = {
+ .name = "dc icl PT >8V",
+ .reg = ZIN_ICL_PT_HV_REG,
+ .min_u = 0,
+ .max_u = 3000000,
+ .step_u = 25000,
+ },
+ .dc_icl_div2_lv = {
+ .name = "dc icl div2 <5.5V",
+ .reg = ZIN_ICL_LV_REG,
+ .min_u = 0,
+ .max_u = 3000000,
+ .step_u = 25000,
+ },
+ .dc_icl_div2_mid_lv = {
+ .name = "dc icl div2 5.5-6.5V",
+ .reg = ZIN_ICL_MID_LV_REG,
+ .min_u = 0,
+ .max_u = 3000000,
+ .step_u = 25000,
+ },
+ .dc_icl_div2_mid_hv = {
+ .name = "dc icl div2 6.5-8.0V",
+ .reg = ZIN_ICL_MID_HV_REG,
+ .min_u = 0,
+ .max_u = 3000000,
+ .step_u = 25000,
+ },
+ .dc_icl_div2_hv = {
+ .name = "dc icl div2 >8.0V",
+ .reg = ZIN_ICL_HV_REG,
+ .min_u = 0,
.max_u = 3000000,
.step_u = 25000,
},
};
struct smb_dt_props {
- bool suspend_input;
- int fcc_ua;
- int usb_icl_ua;
- int dc_icl_ua;
- int fv_uv;
+ bool suspend_input;
+ int fcc_ua;
+ int usb_icl_ua;
+ int dc_icl_ua;
+ int fv_uv;
+ int wipower_max_uw;
};
struct smb2 {
@@ -125,6 +169,11 @@ static int smb2_parse_dt(struct smb2 *chip)
if (rc < 0)
chip->dt.dc_icl_ua = SMB2_DEFAULT_ICL_UA;
+ rc = of_property_read_u32(node,
+ "qcom,wipower-max-uw", &chip->dt.wipower_max_uw);
+ if (rc < 0)
+ chip->dt.wipower_max_uw = SMB2_DEFAULT_WPWR_UW;
+
return 0;
}
@@ -486,6 +535,57 @@ static int smb2_init_vconn_regulator(struct smb2 *chip)
/***************************
* HARDWARE INITIALIZATION *
***************************/
+static int smb2_config_wipower_input_power(struct smb2 *chip, int uw)
+{
+ int rc;
+ int ua;
+ struct smb_charger *chg = &chip->chg;
+ s64 nw = (s64)uw * 1000;
+
+ ua = div_s64(nw, ZIN_ICL_PT_MAX_MV);
+ rc = smblib_set_charge_param(chg, &chg->param.dc_icl_pt_lv, ua);
+ if (rc < 0) {
+ pr_err("Couldn't configure dc_icl_pt_lv rc = %d\n", rc);
+ return rc;
+ }
+
+ ua = div_s64(nw, ZIN_ICL_PT_HV_MAX_MV);
+ rc = smblib_set_charge_param(chg, &chg->param.dc_icl_pt_hv, ua);
+ if (rc < 0) {
+ pr_err("Couldn't configure dc_icl_pt_hv rc = %d\n", rc);
+ return rc;
+ }
+
+ ua = div_s64(nw, ZIN_ICL_LV_MAX_MV);
+ rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_lv, ua);
+ if (rc < 0) {
+ pr_err("Couldn't configure dc_icl_div2_lv rc = %d\n", rc);
+ return rc;
+ }
+
+ ua = div_s64(nw, ZIN_ICL_MID_LV_MAX_MV);
+ rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_mid_lv, ua);
+ if (rc < 0) {
+ pr_err("Couldn't configure dc_icl_div2_mid_lv rc = %d\n", rc);
+ return rc;
+ }
+
+ ua = div_s64(nw, ZIN_ICL_MID_HV_MAX_MV);
+ rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_mid_hv, ua);
+ if (rc < 0) {
+ pr_err("Couldn't configure dc_icl_div2_mid_hv rc = %d\n", rc);
+ return rc;
+ }
+
+ ua = div_s64(nw, ZIN_ICL_HV_MAX_MV);
+ rc = smblib_set_charge_param(chg, &chg->param.dc_icl_div2_hv, ua);
+ if (rc < 0) {
+ pr_err("Couldn't configure dc_icl_div2_hv rc = %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
static int smb2_init_hw(struct smb2 *chip)
{
@@ -582,6 +682,13 @@ static int smb2_init_hw(struct smb2 *chip)
return rc;
}
+ /* configure wipower watts */
+ rc = smb2_config_wipower_input_power(chip, chip->dt.wipower_max_uw);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't configure wipower rc=%d\n", rc);
+ return rc;
+ }
+
return rc;
}
diff --git a/drivers/power/qcom-charger/qpnp-smbcharger.c b/drivers/power/qcom-charger/qpnp-smbcharger.c
index 2536f4ec5c15..6c1e58d046e8 100644
--- a/drivers/power/qcom-charger/qpnp-smbcharger.c
+++ b/drivers/power/qcom-charger/qpnp-smbcharger.c
@@ -3507,19 +3507,27 @@ static int smbchg_config_chg_battery_type(struct smbchg_chip *chip)
if (chip->battery_type && !strcmp(prop.strval, chip->battery_type))
return 0;
+ chip->battery_type = prop.strval;
batt_node = of_parse_phandle(node, "qcom,battery-data", 0);
if (!batt_node) {
pr_smb(PR_MISC, "No batterydata available\n");
return 0;
}
+ rc = power_supply_get_property(chip->bms_psy,
+ POWER_SUPPLY_PROP_RESISTANCE_ID, &prop);
+ if (rc < 0) {
+ pr_smb(PR_STATUS, "Unable to read battery-id rc=%d\n", rc);
+ return 0;
+ }
+
profile_node = of_batterydata_get_best_profile(batt_node,
- "bms", NULL);
- if (!profile_node) {
- pr_err("couldn't find profile handle\n");
- return -EINVAL;
+ prop.intval / 1000, NULL);
+ if (IS_ERR_OR_NULL(profile_node)) {
+ rc = PTR_ERR(profile_node);
+ pr_err("couldn't find profile handle %d\n", rc);
+ return rc;
}
- chip->battery_type = prop.strval;
/* change vfloat */
rc = of_property_read_u32(profile_node, "qcom,max-voltage-uv",
diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h
index 8b3d00b6a5c1..1ee4945b625f 100644
--- a/drivers/power/qcom-charger/smb-lib.h
+++ b/drivers/power/qcom-charger/smb-lib.h
@@ -62,6 +62,12 @@ struct smb_params {
struct smb_chg_param usb_icl;
struct smb_chg_param icl_stat;
struct smb_chg_param dc_icl;
+ struct smb_chg_param dc_icl_pt_lv;
+ struct smb_chg_param dc_icl_pt_hv;
+ struct smb_chg_param dc_icl_div2_lv;
+ struct smb_chg_param dc_icl_div2_mid_lv;
+ struct smb_chg_param dc_icl_div2_mid_hv;
+ struct smb_chg_param dc_icl_div2_hv;
};
struct parallel_params {
diff --git a/drivers/power/qcom-charger/smb-reg.h b/drivers/power/qcom-charger/smb-reg.h
index f63e983c595c..5af01c229f01 100644
--- a/drivers/power/qcom-charger/smb-reg.h
+++ b/drivers/power/qcom-charger/smb-reg.h
@@ -719,6 +719,15 @@ enum {
#define ZIN_ICL_MID_HV_REG (DCIN_BASE + 0x98)
#define ZIN_ICL_MID_HV_MASK GENMASK(7, 0)
+enum {
+ ZIN_ICL_PT_MAX_MV = 8000,
+ ZIN_ICL_PT_HV_MAX_MV = 9000,
+ ZIN_ICL_LV_MAX_MV = 5500,
+ ZIN_ICL_MID_LV_MAX_MV = 6500,
+ ZIN_ICL_MID_HV_MAX_MV = 8000,
+ ZIN_ICL_HV_MAX_MV = 11000,
+};
+
/* MISC Peripheral Registers */
#define REVISION1_REG (MISC_BASE + 0x00)
#define DIG_MINOR_MASK GENMASK(7, 0)
diff --git a/drivers/regulator/cpr3-regulator.c b/drivers/regulator/cpr3-regulator.c
index c13e811a5d71..78c5c47e4e8b 100644
--- a/drivers/regulator/cpr3-regulator.c
+++ b/drivers/regulator/cpr3-regulator.c
@@ -1109,10 +1109,18 @@ static int cpr3_regulator_init_cprh_corners(struct cpr3_regulator *vreg)
}
if (ro_sel == INT_MAX) {
- cpr3_err(vreg, "corner=%d has invalid RO select value\n",
- i);
- rc = -EINVAL;
- goto free_base_quots;
+ if (!corner->proc_freq) {
+ /*
+ * Corner is not used as active DCVS set point
+ * select RO 0 arbitrarily.
+ */
+ ro_sel = 0;
+ } else {
+ cpr3_err(vreg, "corner=%d has invalid RO select value\n",
+ i);
+ rc = -EINVAL;
+ goto free_base_quots;
+ }
}
open_loop_volt_steps = DIV_ROUND_UP(corner->open_loop_volt -
@@ -1121,9 +1129,11 @@ static int cpr3_regulator_init_cprh_corners(struct cpr3_regulator *vreg)
floor_volt_steps = DIV_ROUND_UP(corner->floor_volt -
ctrl->base_volt,
ctrl->step_volt);
- delta_quot_steps = DIV_ROUND_UP(corner->target_quot[ro_sel] -
- base_quots[ro_sel],
- CPRH_DELTA_QUOT_STEP_FACTOR);
+ delta_quot_steps = corner->proc_freq ?
+ DIV_ROUND_UP(corner->target_quot[ro_sel] -
+ base_quots[ro_sel],
+ CPRH_DELTA_QUOT_STEP_FACTOR) :
+ 0;
if (open_loop_volt_steps > CPRH_CORNER_INIT_VOLTAGE_MAX_VALUE ||
floor_volt_steps > CPRH_CORNER_FLOOR_VOLTAGE_MAX_VALUE ||
diff --git a/drivers/regulator/cpr3-regulator.h b/drivers/regulator/cpr3-regulator.h
index d750b70519d1..3ddc1dc3c982 100644
--- a/drivers/regulator/cpr3-regulator.h
+++ b/drivers/regulator/cpr3-regulator.h
@@ -572,7 +572,13 @@ struct cpr3_panic_regs_info {
* when hardware closed-loop attempts to exceed the ceiling
* voltage
* @apm: Handle to the array power mux (APM)
- * @apm_threshold_volt: APM threshold voltage in microvolts
+ * @apm_threshold_volt: Voltage in microvolts which defines the threshold
+ * voltage to determine the APM supply selection for
+ * each corner
+ * @apm_crossover_volt: Voltage in microvolts corresponding to the voltage that
+ * the VDD supply must be set to while an APM switch is in
+ * progress. This element must be initialized for CPRh
+ * controllers when an APM threshold voltage is defined
* @apm_adj_volt: Minimum difference between APM threshold voltage and
* open-loop voltage which allows the APM threshold voltage
* to be used as a ceiling
@@ -736,6 +742,7 @@ struct cpr3_controller {
int ceiling_irq;
struct msm_apm_ctrl_dev *apm;
int apm_threshold_volt;
+ int apm_crossover_volt;
int apm_adj_volt;
enum msm_apm_supply apm_high_supply;
enum msm_apm_supply apm_low_supply;
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index ffd3db1a6dff..dfdd6921fed5 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -697,61 +697,38 @@ free_temp:
}
/**
- * cprh_kbss_apm_threshold_as_corner() - introduce a corner whose floor, open-loop,
- * and ceiling voltages correspond to the APM threshold voltage.
+ * cprh_kbss_apm_crossover_as_corner() - introduce a corner whose floor,
+ * open-loop, and ceiling voltages correspond to the APM
+ * crossover voltage.
* @vreg: Pointer to the CPR3 regulator
*
* The APM corner is utilized as a crossover corner by OSM and CPRh
- * hardware to determine the correct APM supply selection for the
- * rest of the corners. This function must be called after all other
- * functions which load per-corner values.
+ * hardware to set the VDD supply voltage during the APM switch
+ * routine.
*
* Return: 0 on success, errno on failure
*/
-static int cprh_kbss_apm_threshold_as_corner(struct cpr3_regulator *vreg)
+static int cprh_kbss_apm_crossover_as_corner(struct cpr3_regulator *vreg)
{
struct cpr3_controller *ctrl = vreg->thread->ctrl;
struct cpr3_corner *corner;
- struct cprh_corner_band *corner_band;
- int i, threshold, apm_corner = 0;
- if (!ctrl->apm_threshold_volt) {
- /* APM voltage threshold corner not required. */
+ if (!ctrl->apm_crossover_volt) {
+ /* APM voltage crossover corner not required. */
return 0;
}
- threshold = ctrl->apm_threshold_volt;
- vreg->corner_count++;
-
- for (i = vreg->corner_count - 1; i >= 1; i--) {
- corner = &vreg->corner[i];
-
- if (threshold >= vreg->corner[i - 1].open_loop_volt) {
- apm_corner = i;
- break;
- }
-
- memcpy(corner, &vreg->corner[i - 1], sizeof(*corner));
- }
-
- corner = &vreg->corner[apm_corner];
- corner->proc_freq = 0;
- corner->floor_volt = threshold;
- corner->ceiling_volt = threshold;
- corner->open_loop_volt = threshold;
- corner->use_open_loop = true;
- cpr3_info(vreg, "APM threshold corner=%d, open-loop=%d\n",
- apm_corner, threshold);
-
+ corner = &vreg->corner[vreg->corner_count];
/*
- * Update corner band mappings to account for the inserted
- * APM crossover corner.
+ * 0 MHz indicates this corner is not to be
+ * used as active DCVS set point.
*/
- for (i = 0; i < vreg->corner_band_count; i++) {
- corner_band = &vreg->corner_band[i];
- if (corner_band->corner >= apm_corner)
- corner_band->corner++;
- }
+ corner->proc_freq = 0;
+ corner->floor_volt = ctrl->apm_crossover_volt;
+ corner->ceiling_volt = ctrl->apm_crossover_volt;
+ corner->open_loop_volt = ctrl->apm_crossover_volt;
+ corner->use_open_loop = true;
+ vreg->corner_count++;
return 0;
}
@@ -1203,9 +1180,9 @@ static int cprh_kbss_init_regulator(struct cpr3_regulator *vreg)
return -EINVAL;
}
- rc = cprh_kbss_apm_threshold_as_corner(vreg);
+ rc = cprh_kbss_apm_crossover_as_corner(vreg);
if (rc) {
- cpr3_err(vreg, "unable to introduce APM voltage threshold corner\n, rc=%d\n",
+ cpr3_err(vreg, "unable to introduce APM voltage crossover corner, rc=%d\n",
rc);
return rc;
}
@@ -1288,8 +1265,18 @@ static int cprh_kbss_init_controller(struct cpr3_controller *ctrl)
rc = of_property_read_u32(ctrl->dev->of_node,
"qcom,apm-threshold-voltage",
&ctrl->apm_threshold_volt);
- if (rc)
+ if (rc) {
cpr3_debug(ctrl, "qcom,apm-threshold-voltage not specified\n");
+ } else {
+ rc = of_property_read_u32(ctrl->dev->of_node,
+ "qcom,apm-crossover-voltage",
+ &ctrl->apm_crossover_volt);
+ if (rc) {
+ cpr3_err(ctrl, "error reading property qcom,apm-crossover-voltage, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
of_property_read_u32(ctrl->dev->of_node, "qcom,apm-hysteresis-voltage",
&ctrl->apm_adj_volt);
diff --git a/drivers/soc/qcom/glink_private.h b/drivers/soc/qcom/glink_private.h
index 2f064e546f48..cdd6988418f7 100644
--- a/drivers/soc/qcom/glink_private.h
+++ b/drivers/soc/qcom/glink_private.h
@@ -19,6 +19,7 @@
#include <linux/kernel.h>
#include <linux/kref.h>
#include <linux/ratelimit.h>
+#include <linux/sched.h>
#include <linux/seq_file.h>
#include <linux/spinlock.h>
#include <linux/types.h>
diff --git a/drivers/soc/qcom/glink_smem_native_xprt.c b/drivers/soc/qcom/glink_smem_native_xprt.c
index d7d08dc588e5..84f346385f18 100644
--- a/drivers/soc/qcom/glink_smem_native_xprt.c
+++ b/drivers/soc/qcom/glink_smem_native_xprt.c
@@ -2191,6 +2191,8 @@ static int subsys_name_to_id(const char *name)
return SMEM_WCNSS;
if (!strcmp(name, "spss"))
return SMEM_SPSS;
+ if (!strcmp(name, "cdsp"))
+ return SMEM_CDSP;
return -ENODEV;
}
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 7087b5744eef..35994b827549 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1787,6 +1787,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
reg = dwc3_readl(dwc->regs, DWC3_DCTL);
if (is_on) {
+ dbg_event(0xFF, "Pullup_enable", is_on);
if (dwc->revision <= DWC3_REVISION_187A) {
reg &= ~DWC3_DCTL_TRGTULST_MASK;
reg |= DWC3_DCTL_TRGTULST_RX_DET;
@@ -1824,6 +1825,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
dwc->pullups_connected = true;
} else {
+ dbg_event(0xFF, "Pullup_disable", is_on);
dwc3_gadget_disable_irq(dwc);
__dwc3_gadget_ep_disable(dwc->eps[0]);
__dwc3_gadget_ep_disable(dwc->eps[1]);
@@ -1849,8 +1851,15 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
break;
}
timeout--;
- if (!timeout)
+ if (!timeout) {
+ dev_err(dwc->dev, "failed to %s controller\n",
+ is_on ? "start" : "stop");
+ if (is_on)
+ dbg_event(0xFF, "STARTTOUT", reg);
+ else
+ dbg_event(0xFF, "STOPTOUT", reg);
return -ETIMEDOUT;
+ }
udelay(1);
} while (1);
diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c
index c2d65206ec6c..6c47c26b5df7 100644
--- a/drivers/usb/host/xhci-pci.c
+++ b/drivers/usb/host/xhci-pci.c
@@ -299,6 +299,7 @@ static void xhci_pci_remove(struct pci_dev *dev)
struct xhci_hcd *xhci;
xhci = hcd_to_xhci(pci_get_drvdata(dev));
+ xhci->xhc_state |= XHCI_STATE_REMOVING;
if (xhci->shared_hcd) {
usb_remove_hcd(xhci->shared_hcd);
usb_put_hcd(xhci->shared_hcd);
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 8125a8f96311..3e49861a09a2 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -284,6 +284,7 @@ static int xhci_plat_remove(struct platform_device *dev)
pm_runtime_disable(&dev->dev);
device_remove_file(&dev->dev, &dev_attr_config_imod);
+ xhci->xhc_state |= XHCI_STATE_REMOVING;
usb_remove_hcd(xhci->shared_hcd);
usb_phy_shutdown(hcd->usb_phy);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index db0f0831b94f..2b63969c2bbf 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -4008,7 +4008,8 @@ static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
int reserved_trbs = xhci->cmd_ring_reserved_trbs;
int ret;
- if (xhci->xhc_state) {
+ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_HALTED)) {
xhci_dbg(xhci, "xHCI dying or halted, can't queue_command\n");
return -ESHUTDOWN;
}
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 42f74d55e3bd..dd7669331d00 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -155,7 +155,8 @@ static int xhci_start(struct xhci_hcd *xhci)
"waited %u microseconds.\n",
XHCI_MAX_HALT_USEC);
if (!ret)
- xhci->xhc_state &= ~(XHCI_STATE_HALTED | XHCI_STATE_DYING);
+ /* clear state flags. Including dying, halted or removing */
+ xhci->xhc_state = 0;
return ret;
}
@@ -2762,7 +2763,8 @@ int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
if (ret <= 0)
return ret;
xhci = hcd_to_xhci(hcd);
- if (xhci->xhc_state & XHCI_STATE_DYING)
+ if ((xhci->xhc_state & XHCI_STATE_DYING) ||
+ (xhci->xhc_state & XHCI_STATE_REMOVING))
return -ENODEV;
xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
@@ -3809,7 +3811,7 @@ static int xhci_setup_device(struct usb_hcd *hcd, struct usb_device *udev,
mutex_lock(&xhci->mutex);
- if (xhci->xhc_state) /* dying or halted */
+ if (xhci->xhc_state) /* dying, removing or halted */
goto out;
if (!udev->slot_id) {
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index a6e94e029a10..dc03aac4f88a 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -1604,6 +1604,7 @@ struct xhci_hcd {
*/
#define XHCI_STATE_DYING (1 << 0)
#define XHCI_STATE_HALTED (1 << 1)
+#define XHCI_STATE_REMOVING (1 << 2)
/* Statistics */
int error_bitmask;
unsigned int quirks;
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 164bf0273597..5a355f226179 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1638,9 +1638,16 @@ static int mdss_mdp_gdsc_notifier_call(struct notifier_block *self,
if (event & REGULATOR_EVENT_ENABLE) {
__mdss_restore_sec_cfg(mdata);
} else if (event & REGULATOR_EVENT_PRE_DISABLE) {
- pr_debug("mdss gdsc is getting disabled\n");
- /* halt the vbif transactions */
- mdss_mdp_vbif_axi_halt(mdata);
+ int active_cnt = atomic_read(&mdata->active_intf_cnt);
+
+ pr_debug("mdss gdsc is getting disabled, active_cnt=%d\n",
+ active_cnt);
+ /*
+ * halt the vbif transactions only if we have any active
+ * overlay session
+ */
+ if (active_cnt)
+ mdss_mdp_vbif_axi_halt(mdata);
}
return NOTIFY_OK;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index f97a9f9a9adc..cd8df78bc8c0 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -561,7 +561,8 @@ static u32 __calc_qseed3_mdp_clk_rate(struct mdss_mdp_pipe *pipe,
u64 active_line;
u64 backfill_line;
- ver_dwnscale = ((u64)src_h << PHASE_STEP_SHIFT) / dst.h;
+ ver_dwnscale = (u64)src_h << PHASE_STEP_SHIFT;
+ do_div(ver_dwnscale, dst.h);
if (ver_dwnscale > (MDSS_MDP_QSEED3_VER_DOWNSCALE_LIM
<< PHASE_STEP_SHIFT)) {
diff --git a/include/linux/of_batterydata.h b/include/linux/of_batterydata.h
index fe2c996de264..5505371488d0 100644
--- a/include/linux/of_batterydata.h
+++ b/include/linux/of_batterydata.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2014, 2016 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -39,10 +39,7 @@ int of_batterydata_read_data(struct device_node *container_node,
* of_batterydata_get_best_profile() - Find matching battery data device node
* @batterydata_container_node: pointer to the battery-data container device
* node containing the profile nodes.
- * @psy_name: Name of the power supply which holds the
- * POWER_SUPPLY_RESISTANCE_ID value to be used to match
- * against the id resistances specified in the corresponding
- * battery data profiles.
+ * @batt_id_kohm: Battery ID in KOhms for which we want to find the profile.
* @batt_type: Battery type which we want to force load the profile.
*
* This routine returns a device_node pointer to the closest match battery data
@@ -50,7 +47,7 @@ int of_batterydata_read_data(struct device_node *container_node,
*/
struct device_node *of_batterydata_get_best_profile(
struct device_node *batterydata_container_node,
- const char *psy_name, const char *batt_type);
+ int batt_id_kohm, const char *batt_type);
#else
static inline int of_batterydata_read_data(struct device_node *container_node,
struct bms_battery_data *batt_data,
@@ -60,7 +57,7 @@ static inline int of_batterydata_read_data(struct device_node *container_node,
}
static inline struct device_node *of_batterydata_get_best_profile(
struct device_node *batterydata_container_node,
- struct device_node *best_node, const char *psy_name)
+ int batt_id_kohm, const char *batt_type)
{
return -ENXIO;
}
diff --git a/include/soc/qcom/smem.h b/include/soc/qcom/smem.h
index 9295532dec8a..79bcc1b31cf8 100644
--- a/include/soc/qcom/smem.h
+++ b/include/soc/qcom/smem.h
@@ -26,6 +26,7 @@ enum {
SMEM_TZ,
SMEM_SPSS,
SMEM_HYP,
+ SMEM_CDSP,
NUM_SMEM_SUBSYSTEMS,
};
diff --git a/include/uapi/linux/msm_kgsl.h b/include/uapi/linux/msm_kgsl.h
index 34503420c882..dbba773cd49d 100644
--- a/include/uapi/linux/msm_kgsl.h
+++ b/include/uapi/linux/msm_kgsl.h
@@ -43,13 +43,13 @@
/* This is a cmdbatch exclusive flag - use the CMDBATCH equivalent instead */
#define KGSL_CONTEXT_SYNC 0x00000400
#define KGSL_CONTEXT_PWR_CONSTRAINT 0x00000800
-
#define KGSL_CONTEXT_PRIORITY_MASK 0x0000F000
#define KGSL_CONTEXT_PRIORITY_SHIFT 12
#define KGSL_CONTEXT_PRIORITY_UNDEF 0
#define KGSL_CONTEXT_IFH_NOP 0x00010000
#define KGSL_CONTEXT_SECURE 0x00020000
+#define KGSL_CONTEXT_NO_SNAPSHOT 0x00040000
#define KGSL_CONTEXT_PREEMPT_STYLE_MASK 0x0E000000
#define KGSL_CONTEXT_PREEMPT_STYLE_SHIFT 25
diff --git a/kernel/drivers/input/touchscreen/msg21xx/msg21xx_ts.c b/kernel/drivers/input/touchscreen/msg21xx/msg21xx_ts.c
new file mode 100644
index 000000000000..4eb7fd4b1cc9
--- /dev/null
+++ b/kernel/drivers/input/touchscreen/msg21xx/msg21xx_ts.c
@@ -0,0 +1,1757 @@
+/*
+ * MStar MSG21XX touchscreen driver
+ *
+ * Copyright (c) 2006-2012 MStar Semiconductor, Inc.
+ *
+ * Copyright (C) 2012 Bruce Ding <bruce.ding@mstarsemi.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/input.h>
+#include <linux/interrupt.h>
+#include <linux/i2c.h>
+#include <linux/timer.h>
+#include <linux/gpio.h>
+
+#include <linux/sysfs.h>
+#include <linux/init.h>
+#include <linux/mutex.h>
+#include <mach/gpio.h>
+#include <linux/spinlock.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <linux/syscalls.h>
+#include <linux/file.h>
+#include <linux/fs.h>
+#include <linux/fcntl.h>
+#include <linux/string.h>
+#include <asm/unistd.h>
+#include <linux/cdev.h>
+#include <asm/uaccess.h>
+#if defined(CONFIG_HAS_EARLYSUSPEND)
+#include <linux/earlysuspend.h>
+#endif
+#include <linux/input.h>
+#if defined(CONFIG_FB)
+#include <linux/notifier.h>
+#include <linux/fb.h>
+#endif
+#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
+#include <linux/input/vir_ps.h>
+#endif
+
+/*=============================================================*/
+// Macro Definition
+/*=============================================================*/
+
+#define TOUCH_DRIVER_DEBUG 0
+#if (TOUCH_DRIVER_DEBUG == 1)
+#define DBG(fmt, arg...) pr_err(fmt, ##arg) //pr_info(fmt, ##arg)
+#else
+#define DBG(fmt, arg...)
+#endif
+
+/*=============================================================*/
+// Constant Value & Variable Definition
+/*=============================================================*/
+
+#define U8 unsigned char
+#define U16 unsigned short
+#define U32 unsigned int
+#define S8 signed char
+#define S16 signed short
+#define S32 signed int
+
+#define TOUCH_SCREEN_X_MIN (0)
+#define TOUCH_SCREEN_Y_MIN (0)
+/*
+ * Note.
+ * Please change the below touch screen resolution according to the touch panel that you are using.
+ */
+#define TOUCH_SCREEN_X_MAX (480)
+#define TOUCH_SCREEN_Y_MAX (800)
+/*
+ * Note.
+ * Please do not change the below setting.
+ */
+#define TPD_WIDTH (2048)
+#define TPD_HEIGHT (2048)
+
+/*
+ * Note.
+ * Please change the below GPIO pin setting to follow the platform that you are using
+ */
+static int int_gpio = 1;
+static int reset_gpio = 0;
+#define MS_TS_MSG21XX_GPIO_RST reset_gpio
+#define MS_TS_MSG21XX_GPIO_INT int_gpio
+//---------------------------------------------------------------------//
+
+//#define SYSFS_AUTHORITY_CHANGE_FOR_CTS_TEST
+
+#ifdef SYSFS_AUTHORITY_CHANGE_FOR_CTS_TEST
+#define SYSFS_AUTHORITY (0644)
+#else
+#define SYSFS_AUTHORITY (0777)
+#endif
+
+#define FIRMWARE_AUTOUPDATE
+#ifdef FIRMWARE_AUTOUPDATE
+typedef enum {
+ SWID_START = 1,
+ SWID_TRULY = SWID_START,
+ SWID_NULL,
+} SWID_ENUM;
+
+unsigned char MSG_FIRMWARE[1][33*1024] =
+{
+ {
+ #include "msg21xx_truly_update_bin.h"
+ }
+};
+#endif
+
+#define CONFIG_TP_HAVE_KEY
+
+/*
+ * Note.
+ * If the below virtual key value definition are not consistent with those that defined in key layout file of platform,
+ * please change the below virtual key value to follow the platform that you are using.
+ */
+#ifdef CONFIG_TP_HAVE_KEY
+#define TOUCH_KEY_MENU (139) //229
+#define TOUCH_KEY_HOME (172) //102
+#define TOUCH_KEY_BACK (158)
+#define TOUCH_KEY_SEARCH (217)
+
+const U16 tp_key_array[] = {TOUCH_KEY_MENU, TOUCH_KEY_HOME, TOUCH_KEY_BACK, TOUCH_KEY_SEARCH};
+#define MAX_KEY_NUM (sizeof(tp_key_array)/sizeof(tp_key_array[0]))
+#endif
+
+#define SLAVE_I2C_ID_DBBUS (0xC4>>1)
+#define SLAVE_I2C_ID_DWI2C (0x4C>>1)
+
+#define DEMO_MODE_PACKET_LENGTH (8)
+#define MAX_TOUCH_NUM (2) //5
+
+#define TP_PRINT
+#ifdef TP_PRINT
+static int tp_print_proc_read(void);
+static void tp_print_create_entry(void);
+#endif
+
+static char *fw_version = NULL; // customer firmware version
+static U16 fw_version_major = 0;
+static U16 fw_version_minor = 0;
+static U8 temp[94][1024];
+static U32 crc32_table[256];
+static int FwDataCnt = 0;
+static U8 bFwUpdating = 0;
+static struct class *firmware_class = NULL;
+static struct device *firmware_cmd_dev = NULL;
+
+static struct i2c_client *i2c_client = NULL;
+
+#if defined(CONFIG_FB)
+static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data);
+static struct notifier_block msg21xx_fb_notif;
+#elif defined (CONFIG_HAS_EARLYSUSPEND)
+static struct early_suspend mstar_ts_early_suspend;
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
+static U8 bEnableTpProximity = 0;
+static U8 bFaceClosingTp = 0;
+#endif
+static U8 bTpInSuspend = 0;
+
+static int irq_msg21xx = -1;
+static struct work_struct msg21xx_wk;
+static struct mutex msg21xx_mutex;
+static struct input_dev *input_dev = NULL;
+
+/*=============================================================*/
+// Data Type Definition
+/*=============================================================*/
+
+typedef struct
+{
+ U16 x;
+ U16 y;
+} touchPoint_t;
+
+/// max 80+1+1 = 82 bytes
+typedef struct
+{
+ touchPoint_t point[MAX_TOUCH_NUM];
+ U8 count;
+ U8 keycode;
+} touchInfo_t;
+
+enum i2c_speed
+{
+ I2C_SLOW = 0,
+ I2C_NORMAL = 1, /* Enable erasing/writing for 10 msec. */
+ I2C_FAST = 2, /* Disable EWENB before 10 msec timeout. */
+};
+
+typedef enum
+{
+ EMEM_ALL = 0,
+ EMEM_MAIN,
+ EMEM_INFO,
+} EMEM_TYPE_t;
+
+/*=============================================================*/
+// Function Definition
+/*=============================================================*/
+
+/// CRC
+static U32 _CRC_doReflect(U32 ref, S8 ch)
+{
+ U32 value = 0;
+ U32 i = 0;
+
+ for (i = 1; i < (ch + 1); i ++)
+ {
+ if (ref & 1)
+ {
+ value |= 1 << (ch - i);
+ }
+ ref >>= 1;
+ }
+
+ return value;
+}
+
+U32 _CRC_getValue(U32 text, U32 prevCRC)
+{
+ U32 ulCRC = prevCRC;
+
+ ulCRC = (ulCRC >> 8) ^ crc32_table[(ulCRC & 0xFF) ^ text];
+
+ return ulCRC;
+}
+
+static void _CRC_initTable(void)
+{
+ U32 magic_number = 0x04c11db7;
+ U32 i, j;
+
+ for (i = 0; i <= 0xFF; i ++)
+ {
+ crc32_table[i] = _CRC_doReflect (i, 8) << 24;
+ for (j = 0; j < 8; j ++)
+ {
+ crc32_table[i] = (crc32_table[i] << 1) ^ (crc32_table[i] & (0x80000000L) ? magic_number : 0);
+ }
+ crc32_table[i] = _CRC_doReflect(crc32_table[i], 32);
+ }
+}
+
+static void reset_hw(void)
+{
+ DBG("reset_hw()\n");
+
+ gpio_direction_output(MS_TS_MSG21XX_GPIO_RST, 1);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 0);
+ mdelay(100); /* Note that the RST must be in LOW 10ms at least */
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 1);
+ mdelay(100); /* Enable the interrupt service thread/routine for INT after 50ms */
+}
+
+static int read_i2c_seq(U8 addr, U8* buf, U16 size)
+{
+ int rc = 0;
+ struct i2c_msg msgs[] =
+ {
+ {
+ .addr = addr,
+ .flags = I2C_M_RD, // read flag
+ .len = size,
+ .buf = buf,
+ },
+ };
+
+ /* If everything went ok (i.e. 1 msg transmitted), return #bytes
+ transmitted, else error code. */
+ if (i2c_client != NULL)
+ {
+ rc = i2c_transfer(i2c_client->adapter, msgs, 1);
+ if (rc < 0)
+ {
+ DBG("read_i2c_seq() error %d\n", rc);
+ }
+ }
+ else
+ {
+ DBG("i2c_client is NULL\n");
+ }
+
+ return rc;
+}
+
+static int write_i2c_seq(U8 addr, U8* buf, U16 size)
+{
+ int rc = 0;
+ struct i2c_msg msgs[] =
+ {
+ {
+ .addr = addr,
+ .flags = 0, // if read flag is undefined, then it means write flag.
+ .len = size,
+ .buf = buf,
+ },
+ };
+
+ /* If everything went ok (i.e. 1 msg transmitted), return #bytes
+ transmitted, else error code. */
+ if (i2c_client != NULL)
+ {
+ rc = i2c_transfer(i2c_client->adapter, msgs, 1);
+ if ( rc < 0 )
+ {
+ DBG("write_i2c_seq() error %d\n", rc);
+ }
+ }
+ else
+ {
+ DBG("i2c_client is NULL\n");
+ }
+
+ return rc;
+}
+
+static U16 read_reg(U8 bank, U8 addr)
+{
+ U8 tx_data[3] = {0x10, bank, addr};
+ U8 rx_data[2] = {0};
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, &tx_data[0], 3);
+ read_i2c_seq(SLAVE_I2C_ID_DBBUS, &rx_data[0], 2);
+
+ return (rx_data[1] << 8 | rx_data[0]);
+}
+
+static void write_reg(U8 bank, U8 addr, U16 data)
+{
+ U8 tx_data[5] = {0x10, bank, addr, data & 0xFF, data >> 8};
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, &tx_data[0], 5);
+}
+
+static void write_reg_8bit(U8 bank, U8 addr, U8 data)
+{
+ U8 tx_data[4] = {0x10, bank, addr, data};
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, &tx_data[0], 4);
+}
+
+void dbbusDWIICEnterSerialDebugMode(void)
+{
+ U8 data[5];
+
+ // Enter the Serial Debug Mode
+ data[0] = 0x53;
+ data[1] = 0x45;
+ data[2] = 0x52;
+ data[3] = 0x44;
+ data[4] = 0x42;
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, data, 5);
+}
+
+void dbbusDWIICStopMCU(void)
+{
+ U8 data[1];
+
+ // Stop the MCU
+ data[0] = 0x37;
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, data, 1);
+}
+
+void dbbusDWIICIICUseBus(void)
+{
+ U8 data[1];
+
+ // IIC Use Bus
+ data[0] = 0x35;
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, data, 1);
+}
+
+void dbbusDWIICIICReshape(void)
+{
+ U8 data[1];
+
+ // IIC Re-shape
+ data[0] = 0x71;
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, data, 1);
+}
+
+void dbbusDWIICIICNotUseBus(void)
+{
+ U8 data[1];
+
+ // IIC Not Use Bus
+ data[0] = 0x34;
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, data, 1);
+}
+
+void dbbusDWIICNotStopMCU(void)
+{
+ U8 data[1];
+
+ // Not Stop the MCU
+ data[0] = 0x36;
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, data, 1);
+}
+
+void dbbusDWIICExitSerialDebugMode(void)
+{
+ U8 data[1];
+
+ // Exit the Serial Debug Mode
+ data[0] = 0x45;
+
+ write_i2c_seq(SLAVE_I2C_ID_DBBUS, data, 1);
+
+ // Delay some interval to guard the next transaction
+ //udelay ( 200 ); // delay about 0.2ms
+}
+
+//---------------------------------------------------------------------//
+
+static U8 get_ic_type(void)
+{
+ U8 ic_type = 0;
+
+ reset_hw();
+ dbbusDWIICEnterSerialDebugMode();
+ dbbusDWIICStopMCU();
+ dbbusDWIICIICUseBus();
+ dbbusDWIICIICReshape();
+ mdelay ( 300 );
+
+ // stop mcu
+ write_reg_8bit ( 0x0F, 0xE6, 0x01 );
+ // disable watch dog
+ write_reg ( 0x3C, 0x60, 0xAA55 );
+ // get ic type
+ ic_type = (0xff)&(read_reg(0x1E, 0xCC));
+
+ if (ic_type != 1 //msg2133
+ && ic_type != 2 //msg21xxA
+ && ic_type != 3) //msg26xxM
+ {
+ ic_type = 0;
+ }
+
+ reset_hw();
+
+ return ic_type;
+}
+
+static int get_customer_firmware_version(void)
+{
+ U8 dbbus_tx_data[3] = {0};
+ U8 dbbus_rx_data[4] = {0};
+ int ret = 0;
+
+ DBG("get_customer_firmware_version()\n");
+
+ dbbus_tx_data[0] = 0x53;
+ dbbus_tx_data[1] = 0x00;
+ dbbus_tx_data[2] = 0x2A;
+ mutex_lock(&msg21xx_mutex);
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_tx_data[0], 3);
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_rx_data[0], 4);
+ mutex_unlock(&msg21xx_mutex);
+ fw_version_major = (dbbus_rx_data[1]<<8) + dbbus_rx_data[0];
+ fw_version_minor = (dbbus_rx_data[3]<<8) + dbbus_rx_data[2];
+
+ DBG("*** major = %d ***\n", fw_version_major);
+ DBG("*** minor = %d ***\n", fw_version_minor);
+
+ if (fw_version == NULL)
+ {
+ fw_version = kzalloc(sizeof(char), GFP_KERNEL);
+ }
+
+ sprintf(fw_version, "%03d%03d", fw_version_major, fw_version_minor);
+
+ return ret;
+}
+
+static int firmware_erase_c33 ( EMEM_TYPE_t emem_type )
+{
+ // stop mcu
+ write_reg ( 0x0F, 0xE6, 0x0001 );
+
+ //disable watch dog
+ write_reg_8bit ( 0x3C, 0x60, 0x55 );
+ write_reg_8bit ( 0x3C, 0x61, 0xAA );
+
+ // set PROGRAM password
+ write_reg_8bit ( 0x16, 0x1A, 0xBA );
+ write_reg_8bit ( 0x16, 0x1B, 0xAB );
+
+ write_reg_8bit ( 0x16, 0x18, 0x80 );
+
+ if ( emem_type == EMEM_ALL )
+ {
+ write_reg_8bit ( 0x16, 0x08, 0x10 ); //mark
+ }
+
+ write_reg_8bit ( 0x16, 0x18, 0x40 );
+ mdelay ( 10 );
+
+ // clear pce
+ write_reg_8bit ( 0x16, 0x18, 0x80 );
+
+ // erase trigger
+ if ( emem_type == EMEM_MAIN )
+ {
+ write_reg_8bit ( 0x16, 0x0E, 0x04 ); //erase main
+ }
+ else
+ {
+ write_reg_8bit ( 0x16, 0x0E, 0x08 ); //erase all block
+ }
+
+ return ( 1 );
+}
+
+static ssize_t firmware_update_c33 ( struct device *dev, struct device_attribute *attr,
+ const char *buf, size_t size, EMEM_TYPE_t emem_type )
+{
+ U32 i, j;
+ U32 crc_main, crc_main_tp;
+ U32 crc_info, crc_info_tp;
+ U16 reg_data = 0;
+ int update_pass = 1;
+
+ crc_main = 0xffffffff;
+ crc_info = 0xffffffff;
+
+ reset_hw();
+ dbbusDWIICEnterSerialDebugMode();
+ dbbusDWIICStopMCU();
+ dbbusDWIICIICUseBus();
+ dbbusDWIICIICReshape();
+ mdelay ( 300 );
+
+ //erase main
+ firmware_erase_c33 ( EMEM_MAIN );
+ mdelay ( 1000 );
+
+ reset_hw();
+ dbbusDWIICEnterSerialDebugMode();
+ dbbusDWIICStopMCU();
+ dbbusDWIICIICUseBus();
+ dbbusDWIICIICReshape();
+ mdelay ( 300 );
+
+ /////////////////////////
+ // Program
+ /////////////////////////
+
+ //polling 0x3CE4 is 0x1C70
+ if ( ( emem_type == EMEM_ALL ) || ( emem_type == EMEM_MAIN ) )
+ {
+ do
+ {
+ reg_data = read_reg ( 0x3C, 0xE4 );
+ }
+ while ( reg_data != 0x1C70 );
+ }
+
+ switch ( emem_type )
+ {
+ case EMEM_ALL:
+ write_reg ( 0x3C, 0xE4, 0xE38F ); // for all-blocks
+ break;
+ case EMEM_MAIN:
+ write_reg ( 0x3C, 0xE4, 0x7731 ); // for main block
+ break;
+ case EMEM_INFO:
+ write_reg ( 0x3C, 0xE4, 0x7731 ); // for info block
+
+ write_reg_8bit ( 0x0F, 0xE6, 0x01 );
+
+ write_reg_8bit ( 0x3C, 0xE4, 0xC5 );
+ write_reg_8bit ( 0x3C, 0xE5, 0x78 );
+
+ write_reg_8bit ( 0x1E, 0x04, 0x9F );
+ write_reg_8bit ( 0x1E, 0x05, 0x82 );
+
+ write_reg_8bit ( 0x0F, 0xE6, 0x00 );
+ mdelay ( 100 );
+ break;
+ }
+
+ // polling 0x3CE4 is 0x2F43
+ do
+ {
+ reg_data = read_reg ( 0x3C, 0xE4 );
+ }
+ while ( reg_data != 0x2F43 );
+
+ // calculate CRC 32
+ _CRC_initTable ();
+
+ for ( i = 0; i < 32; i++ ) // total 32 KB : 2 byte per R/W
+ {
+ if ( i == 31 )
+ {
+ temp[i][1014] = 0x5A;
+ temp[i][1015] = 0xA5;
+
+ for ( j = 0; j < 1016; j++ )
+ {
+ crc_main = _CRC_getValue ( temp[i][j], crc_main);
+ }
+ }
+ else
+ {
+ for ( j = 0; j < 1024; j++ )
+ {
+ crc_main = _CRC_getValue ( temp[i][j], crc_main);
+ }
+ }
+
+ //write_i2c_seq(SLAVE_I2C_ID_DWI2C, temp[i], 1024);
+ for (j = 0; j < 8; j++)
+ {
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &temp[i][j*128], 128 );
+ }
+ msleep (100);
+
+ // polling 0x3CE4 is 0xD0BC
+ do
+ {
+ reg_data = read_reg ( 0x3C, 0xE4 );
+ }
+ while ( reg_data != 0xD0BC );
+
+ write_reg ( 0x3C, 0xE4, 0x2F43 );
+ }
+
+ if ( ( emem_type == EMEM_ALL ) || ( emem_type == EMEM_MAIN ) )
+ {
+ // write file done and check crc
+ write_reg ( 0x3C, 0xE4, 0x1380 );
+ }
+ mdelay ( 10 );
+
+ if ( ( emem_type == EMEM_ALL ) || ( emem_type == EMEM_MAIN ) )
+ {
+ // polling 0x3CE4 is 0x9432
+ do
+ {
+ reg_data = read_reg ( 0x3C, 0xE4 );
+ }while ( reg_data != 0x9432 );
+ }
+
+ crc_main = crc_main ^ 0xffffffff;
+ crc_info = crc_info ^ 0xffffffff;
+
+ if ( ( emem_type == EMEM_ALL ) || ( emem_type == EMEM_MAIN ) )
+ {
+ // CRC Main from TP
+ crc_main_tp = read_reg ( 0x3C, 0x80 );
+ crc_main_tp = ( crc_main_tp << 16 ) | read_reg ( 0x3C, 0x82 );
+
+ // CRC Info from TP
+ crc_info_tp = read_reg ( 0x3C, 0xA0 );
+ crc_info_tp = ( crc_info_tp << 16 ) | read_reg ( 0x3C, 0xA2 );
+ }
+
+ update_pass = 1;
+ if ( ( emem_type == EMEM_ALL ) || ( emem_type == EMEM_MAIN ) )
+ {
+ if ( crc_main_tp != crc_main )
+ update_pass = 0;
+
+ /*
+ if ( crc_info_tp != crc_info )
+ update_pass = 0;
+ */
+ }
+
+ if ( !update_pass )
+ {
+ DBG( "update_C33 failed\n" );
+ reset_hw();
+ FwDataCnt = 0;
+ return 0;
+ }
+
+ DBG( "update_C33 OK\n" );
+ reset_hw();
+ FwDataCnt = 0;
+ return size;
+}
+
+#ifdef FIRMWARE_AUTOUPDATE
+unsigned short main_sw_id = 0x7FF, info_sw_id = 0x7FF;
+U32 bin_conf_crc32 = 0;
+
+static U32 _CalMainCRC32(void)
+{
+ U32 ret=0;
+ U16 reg_data=0;
+
+ reset_hw();
+
+ dbbusDWIICEnterSerialDebugMode();
+ dbbusDWIICStopMCU();
+ dbbusDWIICIICUseBus();
+ dbbusDWIICIICReshape();
+ msleep ( 100 );
+
+ //Stop MCU
+ write_reg ( 0x0F, 0xE6, 0x0001 );
+
+ // Stop Watchdog
+ write_reg_8bit ( 0x3C, 0x60, 0x55 );
+ write_reg_8bit ( 0x3C, 0x61, 0xAA );
+
+ //cmd
+ write_reg ( 0x3C, 0xE4, 0xDF4C );
+ write_reg ( 0x1E, 0x04, 0x7d60 );
+ // TP SW reset
+ write_reg ( 0x1E, 0x04, 0x829F );
+
+ //MCU run
+ write_reg ( 0x0F, 0xE6, 0x0000 );
+
+ //polling 0x3CE4
+ do
+ {
+ reg_data = read_reg ( 0x3C, 0xE4 );
+ }while ( reg_data != 0x9432 );
+
+ // Cal CRC Main from TP
+ ret = read_reg ( 0x3C, 0x80 );
+ ret = ( ret << 16 ) | read_reg ( 0x3C, 0x82 );
+
+ DBG("[21xxA]:Current main crc32=0x%x\n",ret);
+ return (ret);
+}
+
+static void _ReadBinConfig ( void )
+{
+ U8 dbbus_tx_data[5]={0};
+ U8 dbbus_rx_data[4]={0};
+ U16 reg_data=0;
+
+ reset_hw();
+
+ dbbusDWIICEnterSerialDebugMode();
+ dbbusDWIICStopMCU();
+ dbbusDWIICIICUseBus();
+ dbbusDWIICIICReshape();
+ msleep ( 100 );
+
+ //Stop MCU
+ write_reg ( 0x0F, 0xE6, 0x0001 );
+
+ // Stop Watchdog
+ write_reg_8bit ( 0x3C, 0x60, 0x55 );
+ write_reg_8bit ( 0x3C, 0x61, 0xAA );
+
+ //cmd
+ write_reg ( 0x3C, 0xE4, 0xA4AB );
+ write_reg ( 0x1E, 0x04, 0x7d60 );
+
+ // TP SW reset
+ write_reg ( 0x1E, 0x04, 0x829F );
+
+ //MCU run
+ write_reg ( 0x0F, 0xE6, 0x0000 );
+
+ //polling 0x3CE4
+ do
+ {
+ reg_data = read_reg ( 0x3C, 0xE4 );
+ }
+ while ( reg_data != 0x5B58 );
+
+ dbbus_tx_data[0] = 0x72;
+ dbbus_tx_data[1] = 0x7F;
+ dbbus_tx_data[2] = 0x55;
+ dbbus_tx_data[3] = 0x00;
+ dbbus_tx_data[4] = 0x04;
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_tx_data[0], 5 );
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_rx_data[0], 4 );
+ if ((dbbus_rx_data[0]>=0x30 && dbbus_rx_data[0]<=0x39)
+ &&(dbbus_rx_data[1]>=0x30 && dbbus_rx_data[1]<=0x39)
+ &&(dbbus_rx_data[2]>=0x31 && dbbus_rx_data[2]<=0x39))
+ {
+ main_sw_id = (dbbus_rx_data[0]-0x30)*100+(dbbus_rx_data[1]-0x30)*10+(dbbus_rx_data[2]-0x30);
+ }
+
+ dbbus_tx_data[0] = 0x72;
+ dbbus_tx_data[1] = 0x7F;
+ dbbus_tx_data[2] = 0xFC;
+ dbbus_tx_data[3] = 0x00;
+ dbbus_tx_data[4] = 0x04;
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_tx_data[0], 5 );
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_rx_data[0], 4 );
+ bin_conf_crc32 = dbbus_rx_data[0];
+ bin_conf_crc32 = (bin_conf_crc32<<8)|dbbus_rx_data[1];
+ bin_conf_crc32 = (bin_conf_crc32<<8)|dbbus_rx_data[2];
+ bin_conf_crc32 = (bin_conf_crc32<<8)|dbbus_rx_data[3];
+
+ dbbus_tx_data[0] = 0x72;
+ dbbus_tx_data[1] = 0x83;
+ dbbus_tx_data[2] = 0x00;
+ dbbus_tx_data[3] = 0x00;
+ dbbus_tx_data[4] = 0x04;
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_tx_data[0], 5 );
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_rx_data[0], 4 );
+ if ((dbbus_rx_data[0]>=0x30 && dbbus_rx_data[0]<=0x39)
+ &&(dbbus_rx_data[1]>=0x30 && dbbus_rx_data[1]<=0x39)
+ &&(dbbus_rx_data[2]>=0x31 && dbbus_rx_data[2]<=0x39))
+ {
+ info_sw_id = (dbbus_rx_data[0]-0x30)*100+(dbbus_rx_data[1]-0x30)*10+(dbbus_rx_data[2]-0x30);
+ }
+
+ DBG("[21xxA]:main_sw_id = %d, info_sw_id = %d, bin_conf_crc32=0x%x\n", main_sw_id, info_sw_id, bin_conf_crc32);
+}
+
+static int fwAutoUpdate(void *unused)
+{
+ int time = 0;
+ ssize_t ret = 0;
+
+ for (time = 0; time < 5; time++)
+ {
+ DBG("fwAutoUpdate time = %d\n",time);
+ ret = firmware_update_c33(NULL, NULL, NULL, 1, EMEM_MAIN);
+ if (ret == 1)
+ {
+ DBG("AUTO_UPDATE OK!!!");
+ break;
+ }
+ }
+ if (time == 5)
+ {
+ DBG("AUTO_UPDATE failed!!!");
+ }
+ enable_irq(irq_msg21xx);
+ return 0;
+}
+#endif
+
+//------------------------------------------------------------------------------//
+static ssize_t firmware_update_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ DBG("*** firmware_update_show() fw_version = %s ***\n", fw_version);
+
+ return sprintf(buf, "%s\n", fw_version);
+}
+
+static ssize_t firmware_update_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ bFwUpdating = 1;
+ disable_irq(irq_msg21xx);
+
+ DBG("*** update fw size = %d ***\n", FwDataCnt);
+ size = firmware_update_c33 (dev, attr, buf, size, EMEM_MAIN);
+
+ enable_irq(irq_msg21xx);
+ bFwUpdating = 0;
+
+ return size;
+}
+
+static DEVICE_ATTR(update, SYSFS_AUTHORITY, firmware_update_show, firmware_update_store);
+
+static ssize_t firmware_version_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ DBG("*** firmware_version_show() fw_version = %s ***\n", fw_version);
+
+ return sprintf(buf, "%s\n", fw_version);
+}
+
+static ssize_t firmware_version_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ get_customer_firmware_version();
+
+ DBG("*** firmware_version_store() fw_version = %s ***\n", fw_version);
+
+ return size;
+}
+
+static DEVICE_ATTR(version, SYSFS_AUTHORITY, firmware_version_show, firmware_version_store);
+
+static ssize_t firmware_data_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ DBG("*** firmware_data_show() FwDataCnt = %d ***\n", FwDataCnt);
+
+ return FwDataCnt;
+}
+
+static ssize_t firmware_data_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ int count = size / 1024;
+ int i;
+
+ for (i = 0; i < count; i ++)
+ {
+ memcpy(temp[FwDataCnt], buf+(i*1024), 1024);
+
+ FwDataCnt ++;
+ }
+
+ DBG("***FwDataCnt = %d ***\n", FwDataCnt);
+
+ if (buf != NULL)
+ {
+ DBG("*** buf[0] = %c ***\n", buf[0]);
+ }
+
+ return size;
+}
+
+static DEVICE_ATTR(data, SYSFS_AUTHORITY, firmware_data_show, firmware_data_store);
+
+#ifdef TP_PRINT
+static ssize_t tp_print_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ tp_print_proc_read();
+
+ return sprintf(buf, "%d\n", bTpInSuspend);
+}
+
+static ssize_t tp_print_store(struct device *dev,
+ struct device_attribute *attr, const char *buf, size_t size)
+{
+ DBG("*** tp_print_store() ***\n");
+
+ return size;
+}
+
+static DEVICE_ATTR(tpp, SYSFS_AUTHORITY, tp_print_show, tp_print_store);
+#endif
+
+//------------------------------------------------------------------------------//
+#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
+static void _msg_enable_proximity(void)
+{
+ U8 tx_data[4] = {0};
+
+ DBG("_msg_enable_proximity!");
+ tx_data[0] = 0x52;
+ tx_data[1] = 0x00;
+ tx_data[2] = 0x47;
+ tx_data[3] = 0xa0;
+ mutex_lock(&msg21xx_mutex);
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &tx_data[0], 4);
+ mutex_unlock(&msg21xx_mutex);
+
+ bEnableTpProximity = 1;
+}
+
+static void _msg_disable_proximity(void)
+{
+ U8 tx_data[4] = {0};
+
+ DBG("_msg_disable_proximity!");
+ tx_data[0] = 0x52;
+ tx_data[1] = 0x00;
+ tx_data[2] = 0x47;
+ tx_data[3] = 0xa1;
+ mutex_lock(&msg21xx_mutex);
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &tx_data[0], 4);
+ mutex_unlock(&msg21xx_mutex);
+
+ bEnableTpProximity = 0;
+ bFaceClosingTp = 0;
+}
+
+void tsps_msg21xx_enable(int en)
+{
+ if (en)
+ {
+ _msg_enable_proximity();
+ }
+ else
+ {
+ _msg_disable_proximity();
+ }
+}
+
+int tsps_msg21xx_data(void)
+{
+ return bFaceClosingTp;
+}
+#endif
+
+static U8 calculate_checksum(U8 *msg, S32 length)
+{
+ S32 Checksum = 0;
+ S32 i;
+
+ for (i = 0; i < length; i ++)
+ {
+ Checksum += msg[i];
+ }
+
+ return (U8)((-Checksum) & 0xFF);
+}
+
+static S32 parse_info(touchInfo_t *info)
+{
+ U8 data[DEMO_MODE_PACKET_LENGTH] = {0};
+ U8 checksum = 0;
+ U32 x = 0, y = 0;
+ U32 x2 = 0, y2 = 0;
+ U32 delta_x = 0, delta_y = 0;
+
+ mutex_lock(&msg21xx_mutex);
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &data[0], DEMO_MODE_PACKET_LENGTH);
+ mutex_unlock(&msg21xx_mutex);
+ checksum = calculate_checksum(&data[0], (DEMO_MODE_PACKET_LENGTH-1));
+ DBG("check sum: [%x] == [%x]? \n", data[DEMO_MODE_PACKET_LENGTH-1], checksum);
+
+ if(data[DEMO_MODE_PACKET_LENGTH-1] != checksum)
+ {
+ DBG("WRONG CHECKSUM\n");
+ return -1;
+ }
+
+ if(data[0] != 0x52)
+ {
+ DBG("WRONG HEADER\n");
+ return -1;
+ }
+
+ info->keycode = 0xFF;
+ if ((data[1] == 0xFF) && (data[2] == 0xFF) && (data[3] == 0xFF) && (data[4] == 0xFF) && (data[6] == 0xFF))
+ {
+ if ((data[5] == 0xFF) || (data[5] == 0))
+ {
+ info->keycode = 0xFF;
+ }
+ else if ((data[5] == 1) || (data[5] == 2) || (data[5] == 4) || (data[5] == 8))
+ {
+ if (data[5] == 1)
+ {
+ info->keycode = 0;
+ }
+ else if (data[5] == 2)
+ {
+ info->keycode = 1;
+ }
+ else if (data[5] == 4)
+ {
+ info->keycode = 2;
+ }
+ else if (data[5] == 8)
+ {
+ info->keycode = 3;
+ }
+ }
+ #ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
+ else if (bEnableTpProximity &&((data[5] == 0x80) || (data[5] == 0x40)))
+ {
+ if (data[5] == 0x80)
+ {
+ bFaceClosingTp = 1;
+ }
+ else if (data[5] == 0x40)
+ {
+ bFaceClosingTp = 0;
+ }
+ DBG("bEnableTpProximity=%d; bFaceClosingTp=%d; data[5]=%x;\n", bEnableTpProximity, bFaceClosingTp, data[5]);
+ return -1;
+ }
+ #endif
+ else
+ {
+ DBG("WRONG KEY\n");
+ return -1;
+ }
+ }
+ else
+ {
+ x = (((data[1] & 0xF0 ) << 4) | data[2]);
+ y = ((( data[1] & 0x0F) << 8) | data[3]);
+ delta_x = (((data[4] & 0xF0) << 4 ) | data[5]);
+ delta_y = (((data[4] & 0x0F) << 8 ) | data[6]);
+
+ if ((delta_x == 0) && (delta_y == 0))
+ {
+ info->point[0].x = x * TOUCH_SCREEN_X_MAX / TPD_WIDTH;
+ info->point[0].y = y * TOUCH_SCREEN_Y_MAX/ TPD_HEIGHT;
+ info->count = 1;
+ }
+ else
+ {
+ if (delta_x > 2048)
+ {
+ delta_x -= 4096;
+ }
+ if (delta_y > 2048)
+ {
+ delta_y -= 4096;
+ }
+ x2 = (U32)((S16)x + (S16)delta_x);
+ y2 = (U32)((S16)y + (S16)delta_y);
+ info->point[0].x = x * TOUCH_SCREEN_X_MAX / TPD_WIDTH;
+ info->point[0].y = y * TOUCH_SCREEN_Y_MAX/ TPD_HEIGHT;
+ info->point[1].x = x2 * TOUCH_SCREEN_X_MAX / TPD_WIDTH;
+ info->point[1].y = y2 * TOUCH_SCREEN_Y_MAX/ TPD_HEIGHT;
+ info->count = 2;
+ }
+ }
+
+ return 0;
+}
+
+static void touch_driver_touch_pressed(int x, int y)
+{
+ DBG("point touch pressed");
+
+ input_report_key(input_dev, BTN_TOUCH, 1);
+ input_report_abs(input_dev, ABS_MT_TOUCH_MAJOR, 1);
+ input_report_abs(input_dev, ABS_MT_POSITION_X, x);
+ input_report_abs(input_dev, ABS_MT_POSITION_Y, y);
+ input_mt_sync(input_dev);
+}
+
+static void touch_driver_touch_released(void)
+{
+ DBG("point touch released");
+
+ input_report_key(input_dev, BTN_TOUCH, 0);
+ input_mt_sync(input_dev);
+}
+
+/* read data through I2C then report data to input sub-system when interrupt occurred */
+void touch_driver_do_work(struct work_struct *work)
+{
+ touchInfo_t info;
+ int i = 0;
+ static int last_keycode = 0xFF;
+ static int last_count = 0;
+
+ DBG("touch_driver_do_work()\n");
+
+ memset(&info, 0x0, sizeof(info));
+ if (0 == parse_info(&info))
+ {
+ #ifdef CONFIG_TP_HAVE_KEY
+ if (info.keycode != 0xFF) //key touch pressed
+ {
+ DBG("touch_driver_do_work() info.keycode=%x, last_keycode=%x, tp_key_array[%d]=%d\n", info.keycode, last_keycode, info.keycode, tp_key_array[info.keycode]);
+ if (info.keycode < MAX_KEY_NUM)
+ {
+ if (info.keycode != last_keycode)
+ {
+ DBG("key touch pressed");
+
+ input_report_key(input_dev, BTN_TOUCH, 1);
+ input_report_key(input_dev, tp_key_array[info.keycode], 1);
+
+ last_keycode = info.keycode;
+ }
+ else
+ {
+ /// pass duplicate key-pressing
+ DBG("REPEATED KEY\n");
+ }
+ }
+ else
+ {
+ DBG("WRONG KEY\n");
+ }
+ }
+ else //key touch released
+ {
+ if (last_keycode != 0xFF)
+ {
+ DBG("key touch released");
+
+ input_report_key(input_dev, BTN_TOUCH, 0);
+ input_report_key(input_dev, tp_key_array[last_keycode], 0);
+
+ last_keycode = 0xFF;
+ }
+ }
+ #endif //CONFIG_TP_HAVE_KEY
+
+ if (info.count > 0) //point touch pressed
+ {
+ for (i = 0; i < info.count; i ++)
+ {
+ touch_driver_touch_pressed(info.point[i].x, info.point[i].y);
+ }
+ last_count = info.count;
+ }
+ else if (last_count > 0) //point touch released
+ {
+ touch_driver_touch_released();
+ last_count = 0;
+ }
+
+ input_sync(input_dev);
+ }
+
+ enable_irq(irq_msg21xx);
+}
+
+/* The interrupt service routine will be triggered when interrupt occurred */
+irqreturn_t touch_driver_isr(int irq, void *dev_id)
+{
+ DBG("touch_driver_isr()\n");
+
+ disable_irq_nosync(irq_msg21xx);
+ schedule_work(&msg21xx_wk);
+
+ return IRQ_HANDLED;
+}
+
+#if defined(CONFIG_FB)
+static int fb_notifier_callback(struct notifier_block *self, unsigned long event, void *data)
+{
+ struct fb_event *evdata = data;
+ int *blank;
+
+ if (evdata && evdata->data && event == FB_EVENT_BLANK )
+ {
+ blank = evdata->data;
+ if (*blank == FB_BLANK_UNBLANK)
+ {
+ if (bTpInSuspend)
+ {
+ gpio_direction_output(MS_TS_MSG21XX_GPIO_RST, 1);
+ mdelay(10);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 0);
+ mdelay(10);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 1);
+ mdelay(200);
+
+ touch_driver_touch_released();
+ input_sync(input_dev);
+
+ enable_irq(irq_msg21xx);
+ }
+ bTpInSuspend = 0;
+ }
+ else if (*blank == FB_BLANK_POWERDOWN)
+ {
+ if (bFwUpdating)
+ {
+ DBG("suspend bFwUpdating=%d\n", bFwUpdating);
+ return 0;
+ }
+
+ #ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
+ if (bEnableTpProximity)
+ {
+ DBG("suspend bEnableTpProximity=%d\n", bEnableTpProximity);
+ return 0;
+ }
+ #endif
+
+ if (bTpInSuspend == 0)
+ {
+ disable_irq(irq_msg21xx);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 0);
+ }
+ bTpInSuspend = 1;
+ }
+ }
+
+ return 0;
+}
+#endif
+
+#ifdef CONFIG_HAS_EARLYSUSPEND
+void touch_driver_early_suspend(struct early_suspend *p)
+{
+ DBG("touch_driver_early_suspend()\n");
+
+ if (bFwUpdating)
+ {
+ DBG("suspend bFwUpdating=%d\n", bFwUpdating);
+ return;
+ }
+
+#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
+ if (bEnableTpProximity)
+ {
+ DBG("suspend bEnableTpProximity=%d\n", bEnableTpProximity);
+ return;
+ }
+#endif
+
+ if (bTpInSuspend == 0)
+ {
+ disable_irq(irq_msg21xx);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 0);
+ }
+ bTpInSuspend = 1;
+}
+
+void touch_driver_early_resume(struct early_suspend *p)
+{
+ DBG("touch_driver_early_resume() bTpInSuspend=%d\n", bTpInSuspend);
+
+ if (bTpInSuspend)
+ {
+ gpio_direction_output(MS_TS_MSG21XX_GPIO_RST, 1);
+ mdelay(10);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 0);
+ mdelay(10);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 1);
+ mdelay(200);
+
+ touch_driver_touch_released();
+ input_sync(input_dev);
+
+ enable_irq(irq_msg21xx);
+ }
+ bTpInSuspend = 0;
+}
+#endif
+
+/* probe function is used for matching and initializing input device */
+static int touch_driver_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+#ifdef FIRMWARE_AUTOUPDATE
+ unsigned short update_bin_major = 0, update_bin_minor = 0;
+ int i, update_flag = 0;
+#endif
+ int ret = 0;
+
+ if (input_dev != NULL)
+ {
+ DBG("input device has found\n");
+ return -1;
+ }
+
+ DBG("*** %s ***\n", __FUNCTION__);
+
+ i2c_client = client;
+
+ ret = gpio_request(MS_TS_MSG21XX_GPIO_RST, "reset");
+ if (ret < 0)
+ {
+ pr_err("*** Failed to request GPIO %d, error %d ***\n", MS_TS_MSG21XX_GPIO_RST, ret);
+ goto err0;
+ }
+
+ // power on TP
+ gpio_direction_output(MS_TS_MSG21XX_GPIO_RST, 1);
+ mdelay(100);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 0);
+ mdelay(10);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_RST, 1);
+ mdelay(200);
+ if (0 == get_ic_type())
+ {
+ pr_err("the currnet ic is not Mstar\n");
+ ret = -1;
+ goto err0;
+ }
+
+ mutex_init(&msg21xx_mutex);
+
+ /* allocate an input device */
+ input_dev = input_allocate_device();
+ if (!input_dev)
+ {
+ ret = -ENOMEM;
+ pr_err("*** input device allocation failed ***\n");
+ goto err1;
+ }
+
+ input_dev->name = client->name;
+ input_dev->phys = "I2C";
+ input_dev->dev.parent = &client->dev;
+ input_dev->id.bustype = BUS_I2C;
+
+ /* set the supported event type for input device */
+ set_bit(EV_ABS, input_dev->evbit);
+ set_bit(EV_SYN, input_dev->evbit);
+ set_bit(EV_KEY, input_dev->evbit);
+ set_bit(BTN_TOUCH, input_dev->keybit);
+ set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
+
+#ifdef CONFIG_TP_HAVE_KEY
+ {
+ int i;
+ for (i = 0; i < MAX_KEY_NUM; i ++)
+ {
+ input_set_capability(input_dev, EV_KEY, tp_key_array[i]);
+ }
+ }
+#endif
+
+ input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 2, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_X, TOUCH_SCREEN_X_MIN, TOUCH_SCREEN_X_MAX, 0, 0);
+ input_set_abs_params(input_dev, ABS_MT_POSITION_Y, TOUCH_SCREEN_Y_MIN, TOUCH_SCREEN_Y_MAX, 0, 0);
+
+ /* register the input device to input sub-system */
+ ret = input_register_device(input_dev);
+ if (ret < 0)
+ {
+ pr_err("*** Unable to register ms-touchscreen input device ***\n");
+ goto err1;
+ }
+
+ /* set sysfs for firmware */
+ firmware_class = class_create(THIS_MODULE, "ms-touchscreen-msg20xx"); //client->name
+ if (IS_ERR(firmware_class))
+ pr_err("Failed to create class(firmware)!\n");
+
+ firmware_cmd_dev = device_create(firmware_class, NULL, 0, NULL, "device");
+ if (IS_ERR(firmware_cmd_dev))
+ pr_err("Failed to create device(firmware_cmd_dev)!\n");
+
+ // version
+ if (device_create_file(firmware_cmd_dev, &dev_attr_version) < 0)
+ pr_err("Failed to create device file(%s)!\n", dev_attr_version.attr.name);
+ // update
+ if (device_create_file(firmware_cmd_dev, &dev_attr_update) < 0)
+ pr_err("Failed to create device file(%s)!\n", dev_attr_update.attr.name);
+ // data
+ if (device_create_file(firmware_cmd_dev, &dev_attr_data) < 0)
+ pr_err("Failed to create device file(%s)!\n", dev_attr_data.attr.name);
+
+#ifdef TP_PRINT
+ tp_print_create_entry();
+#endif
+
+ dev_set_drvdata(firmware_cmd_dev, NULL);
+
+ /* initialize the work queue */
+ INIT_WORK(&msg21xx_wk, touch_driver_do_work);
+
+ ret = gpio_request(MS_TS_MSG21XX_GPIO_INT, "interrupt");
+ if (ret < 0)
+ {
+ pr_err("*** Failed to request GPIO %d, error %d ***\n", MS_TS_MSG21XX_GPIO_INT, ret);
+ goto err2;
+ }
+ gpio_direction_input(MS_TS_MSG21XX_GPIO_INT);
+ gpio_set_value(MS_TS_MSG21XX_GPIO_INT, 1);
+
+ irq_msg21xx = gpio_to_irq(MS_TS_MSG21XX_GPIO_INT);
+
+ /* request an irq and register the isr */
+ ret = request_irq(irq_msg21xx, touch_driver_isr, IRQF_TRIGGER_RISING, "msg21xx", NULL);
+ if (ret != 0)
+ {
+ pr_err("*** Unable to claim irq %d; error %d ***\n", MS_TS_MSG21XX_GPIO_INT, ret);
+ goto err3;
+ }
+
+ disable_irq(irq_msg21xx);
+
+#if defined(CONFIG_FB)
+ msg21xx_fb_notif.notifier_call = fb_notifier_callback;
+ ret = fb_register_client(&msg21xx_fb_notif);
+#elif defined (CONFIG_HAS_EARLYSUSPEND)
+ mstar_ts_early_suspend.level = EARLY_SUSPEND_LEVEL_BLANK_SCREEN;
+ mstar_ts_early_suspend.suspend = touch_driver_early_suspend;
+ mstar_ts_early_suspend.resume = touch_driver_early_resume;
+ register_early_suspend(&mstar_ts_early_suspend);
+#endif
+
+#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
+ tsps_assist_register_callback("msg21xx", &tsps_msg21xx_enable, &tsps_msg21xx_data);
+#endif
+
+#ifdef FIRMWARE_AUTOUPDATE
+ get_customer_firmware_version();
+ _ReadBinConfig();
+
+ if (main_sw_id == info_sw_id)
+ {
+ if (_CalMainCRC32() == bin_conf_crc32)
+ {
+ if ((main_sw_id >= SWID_START) && (main_sw_id < SWID_NULL))
+ {
+ update_bin_major= (MSG_FIRMWARE[main_sw_id-SWID_START][0x7f4f] << 8) + MSG_FIRMWARE[main_sw_id-SWID_START][0x7f4e];
+ update_bin_minor= (MSG_FIRMWARE[main_sw_id-SWID_START][0x7f51] << 8) + MSG_FIRMWARE[main_sw_id-SWID_START][0x7f50];
+
+ //check upgrading
+ if ((update_bin_major == fw_version_major) && (update_bin_minor > fw_version_minor))
+ {
+ update_flag = 1;
+ }
+ }
+ DBG("MAIN sw_id=%d,update_flag=%d,update_bin_major=%d,update_bin_minor=%d\n",main_sw_id,update_flag,update_bin_major,update_bin_minor);
+ }
+ else
+ {
+ if ((info_sw_id >= SWID_START) && (info_sw_id < SWID_NULL))
+ {
+ update_bin_major= (MSG_FIRMWARE[info_sw_id-SWID_START][0x7f4f] << 8) + MSG_FIRMWARE[info_sw_id-SWID_START][0x7f4e];
+ update_bin_minor= (MSG_FIRMWARE[info_sw_id-SWID_START][0x7f51] << 8) + MSG_FIRMWARE[info_sw_id-SWID_START][0x7f50];
+ update_flag = 1;
+ }
+ DBG("INFO1 sw_id=%d,update_flag=%d,update_bin_major=%d,update_bin_minor=%d\n",info_sw_id,update_flag,update_bin_major,update_bin_minor);
+ }
+ }
+ else
+ {
+ if ((info_sw_id >= SWID_START) && (info_sw_id < SWID_NULL))
+ {
+ update_bin_major= (MSG_FIRMWARE[info_sw_id-SWID_START][0x7f4f] << 8) + MSG_FIRMWARE[info_sw_id-SWID_START][0x7f4e];
+ update_bin_minor= (MSG_FIRMWARE[info_sw_id-SWID_START][0x7f51] << 8) + MSG_FIRMWARE[info_sw_id-SWID_START][0x7f50];
+ update_flag = 1;
+ }
+ DBG("INFO2 sw_id=%d,update_flag=%d,update_bin_major=%d,update_bin_minor=%d\n",info_sw_id,update_flag,update_bin_major,update_bin_minor);
+ }
+
+ if (update_flag == 1)
+ {
+ DBG("MSG21XX_fw_auto_update begin....\n");
+ //transfer data
+ for (i = 0; i < 33; i++)
+ {
+ firmware_data_store(NULL, NULL, &(MSG_FIRMWARE[info_sw_id-SWID_START][i*1024]), 1024);
+ }
+
+ kthread_run(fwAutoUpdate, 0, "MSG21XX_fw_auto_update");
+ DBG("*** mstar touch screen registered ***\n");
+ return 0;
+ }
+
+ reset_hw();
+#endif
+
+ DBG("*** mstar touch screen registered ***\n");
+ enable_irq(irq_msg21xx);
+ return 0;
+
+err3:
+ free_irq(irq_msg21xx, input_dev);
+
+err2:
+ gpio_free(MS_TS_MSG21XX_GPIO_INT);
+
+err1:
+ mutex_destroy(&msg21xx_mutex);
+ input_unregister_device(input_dev);
+ input_free_device(input_dev);
+ input_dev = NULL;
+
+err0:
+ gpio_free(MS_TS_MSG21XX_GPIO_RST);
+
+ return ret;
+}
+
+/* remove function is triggered when the input device is removed from input sub-system */
+static int touch_driver_remove(struct i2c_client *client)
+{
+ DBG("touch_driver_remove()\n");
+
+ free_irq(irq_msg21xx, input_dev);
+ gpio_free(MS_TS_MSG21XX_GPIO_INT);
+ gpio_free(MS_TS_MSG21XX_GPIO_RST);
+ input_unregister_device(input_dev);
+ mutex_destroy(&msg21xx_mutex);
+
+ return 0;
+}
+
+/* The I2C device list is used for matching I2C device and I2C device driver. */
+static const struct i2c_device_id touch_device_id[] =
+{
+ {"msg21xx", 0},
+ {}, /* should not omitted */
+};
+
+MODULE_DEVICE_TABLE(i2c, touch_device_id);
+
+static struct i2c_driver touch_device_driver =
+{
+ .driver = {
+ .name = "msg21xx",
+ .owner = THIS_MODULE,
+ },
+ .probe = touch_driver_probe,
+ .remove = touch_driver_remove,
+ .id_table = touch_device_id,
+};
+
+static int __init touch_driver_init(void)
+{
+ int ret;
+
+ /* register driver */
+ ret = i2c_add_driver(&touch_device_driver);
+ if (ret < 0)
+ {
+ DBG("add touch_device_driver i2c driver failed.\n");
+ return -ENODEV;
+ }
+ DBG("add touch_device_driver i2c driver.\n");
+
+ return ret;
+}
+
+static void __exit touch_driver_exit(void)
+{
+ DBG("remove touch_device_driver i2c driver.\n");
+
+ i2c_del_driver(&touch_device_driver);
+}
+
+#ifdef TP_PRINT
+#include <linux/proc_fs.h>
+
+static U16 InfoAddr = 0x0F, PoolAddr = 0x10, TransLen = 256;
+static U8 row, units, cnt;
+
+static int tp_print_proc_read(void)
+{
+ U16 i, j;
+ U16 left, offset = 0;
+ U8 dbbus_tx_data[3] = {0};
+ U8 u8Data;
+ S16 s16Data;
+ S32 s32Data;
+ char *buf = NULL;
+
+ left = cnt*row*units;
+ if ((bTpInSuspend == 0) && (InfoAddr != 0x0F) && (PoolAddr != 0x10) && (left > 0))
+ {
+ buf = kmalloc(left, GFP_KERNEL);
+ if (buf != NULL)
+ {
+ printk("tpp: \n");
+
+ while (left > 0)
+ {
+ dbbus_tx_data[0] = 0x53;
+ dbbus_tx_data[1] = ((PoolAddr + offset) >> 8) & 0xFF;
+ dbbus_tx_data[2] = (PoolAddr + offset) & 0xFF;
+ mutex_lock(&msg21xx_mutex);
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_tx_data[0], 3);
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &buf[offset], left > TransLen ? TransLen : left);
+ mutex_unlock(&msg21xx_mutex);
+
+ if (left > TransLen)
+ {
+ left -= TransLen;
+ offset += TransLen;
+ }
+ else
+ {
+ left = 0;
+ }
+ }
+
+ for (i = 0; i < cnt; i++)
+ {
+ printk("tpp: ");
+ for (j = 0; j < row; j++)
+ {
+ if (units == 1)
+ {
+ u8Data = buf[i*row*units + j*units];
+ printk("%d\t", u8Data);
+ }
+ else if (units == 2)
+ {
+ s16Data = buf[i*row*units + j*units] + (buf[i*row*units + j*units + 1] << 8);
+ printk("%d\t", s16Data);
+ }
+ else if (units == 4)
+ {
+ s32Data = buf[i*row*units + j*units] + (buf[i*row*units + j*units + 1] << 8) + (buf[i*row*units + j*units + 2] << 16) + (buf[i*row*units + j*units + 3] << 24);
+ printk("%d\t", s32Data);
+ }
+ }
+ printk("\n");
+ }
+
+ kfree(buf);
+ }
+ }
+
+ return 0;
+}
+
+static void tp_print_create_entry(void)
+{
+ U8 dbbus_tx_data[3] = {0};
+ U8 dbbus_rx_data[8] = {0};
+
+ dbbus_tx_data[0] = 0x53;
+ dbbus_tx_data[1] = 0x00;
+ dbbus_tx_data[2] = 0x58;
+ mutex_lock(&msg21xx_mutex);
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_tx_data[0], 3);
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_rx_data[0], 4);
+ mutex_unlock(&msg21xx_mutex);
+ InfoAddr = (dbbus_rx_data[1]<<8) + dbbus_rx_data[0];
+ PoolAddr = (dbbus_rx_data[3]<<8) + dbbus_rx_data[2];
+ printk("InfoAddr=0x%X\n", InfoAddr);
+ printk("PoolAddr=0x%X\n", PoolAddr);
+
+ if ((InfoAddr != 0x0F) && (PoolAddr != 0x10))
+ {
+ msleep(10);
+ dbbus_tx_data[0] = 0x53;
+ dbbus_tx_data[1] = (InfoAddr >> 8) & 0xFF;
+ dbbus_tx_data[2] = InfoAddr & 0xFF;
+ mutex_lock(&msg21xx_mutex);
+ write_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_tx_data[0], 3);
+ read_i2c_seq(SLAVE_I2C_ID_DWI2C, &dbbus_rx_data[0], 8);
+ mutex_unlock(&msg21xx_mutex);
+
+ units = dbbus_rx_data[0];
+ row = dbbus_rx_data[1];
+ cnt = dbbus_rx_data[2];
+ TransLen = (dbbus_rx_data[7]<<8) + dbbus_rx_data[6];
+ printk("tpp: row=%d, units=%d\n", row, units);
+ printk("tpp: cnt=%d, TransLen=%d\n", cnt, TransLen);
+
+ // tpp
+ if (device_create_file(firmware_cmd_dev, &dev_attr_tpp) < 0)
+ {
+ pr_err("Failed to create device file(%s)!\n", dev_attr_tpp.attr.name);
+ }
+ }
+}
+#endif
+
+module_init(touch_driver_init);
+module_exit(touch_driver_exit);
+MODULE_AUTHOR("MStar Semiconductor, Inc.");
+MODULE_LICENSE("GPL v2");
+
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 6b2ef88c7163..75387b7c2069 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -108,6 +108,7 @@ static const struct snd_kcontrol_new name##_mux = \
#define WCD934X_DEC_PWR_LVL_LP 0x02
#define WCD934X_DEC_PWR_LVL_HP 0x04
#define WCD934X_DEC_PWR_LVL_DF 0x00
+#define WCD934X_STRING_LEN 100
#define WCD934X_MAX_MICBIAS 4
#define DAPM_MICBIAS1_STANDALONE "MIC BIAS1 Standalone"
@@ -470,7 +471,7 @@ struct tavil_priv {
struct clk *wcd_ext_clk;
struct mutex codec_mutex;
- struct work_struct wcd_add_child_devices_work;
+ struct work_struct tavil_add_child_devices_work;
struct hpf_work tx_hpf_work[WCD934X_NUM_DECIMATORS];
struct tx_mute_work tx_mute_dwork[WCD934X_NUM_DECIMATORS];
};
@@ -5551,7 +5552,7 @@ static int tavil_swrm_handle_irq(void *handle,
return ret;
}
-static void wcd_add_child_devices(struct work_struct *work)
+static void tavil_add_child_devices(struct work_struct *work)
{
struct tavil_priv *tavil;
struct platform_device *pdev;
@@ -5560,9 +5561,10 @@ static void wcd_add_child_devices(struct work_struct *work)
struct tavil_swr_ctrl_data *swr_ctrl_data = NULL, *temp;
int ret, ctrl_num = 0;
struct wcd_swr_ctrl_platform_data *platdata;
+ char plat_dev_name[WCD934X_STRING_LEN];
tavil = container_of(work, struct tavil_priv,
- wcd_add_child_devices_work);
+ tavil_add_child_devices_work);
if (!tavil) {
pr_err("%s: Memory for WCD934X does not exist\n",
__func__);
@@ -5583,17 +5585,17 @@ static void wcd_add_child_devices(struct work_struct *work)
platdata = &tavil->swr.plat_data;
for_each_child_of_node(wcd9xxx->dev->of_node, node) {
- temp = krealloc(swr_ctrl_data,
- (ctrl_num + 1) * sizeof(struct tavil_swr_ctrl_data),
- GFP_KERNEL);
- if (!temp) {
- dev_err(wcd9xxx->dev, "out of memory\n");
- ret = -ENOMEM;
- goto err_mem;
- }
- swr_ctrl_data = temp;
- swr_ctrl_data[ctrl_num].swr_pdev = NULL;
- pdev = platform_device_alloc("tavil_swr_ctrl", -1);
+ if (!strcmp(node->name, "swr_master"))
+ strlcpy(plat_dev_name, "tavil_swr_ctrl",
+ (WCD934X_STRING_LEN - 1));
+ else if (strnstr(node->name, "msm_cdc_pinctrl",
+ strlen("msm_cdc_pinctrl")) != NULL)
+ strlcpy(plat_dev_name, node->name,
+ (WCD934X_STRING_LEN - 1));
+ else
+ continue;
+
+ pdev = platform_device_alloc(plat_dev_name, -1);
if (!pdev) {
dev_err(wcd9xxx->dev, "%s: pdev memory alloc failed\n",
__func__);
@@ -5603,34 +5605,51 @@ static void wcd_add_child_devices(struct work_struct *work)
pdev->dev.parent = tavil->dev;
pdev->dev.of_node = node;
- ret = platform_device_add_data(pdev, platdata,
- sizeof(*platdata));
- if (ret) {
- dev_err(&pdev->dev, "%s: cannot add plat data for ctrl:%d\n",
- __func__, ctrl_num);
- goto err_pdev_add;
+ if (strcmp(node->name, "swr_master") == 0) {
+ ret = platform_device_add_data(pdev, platdata,
+ sizeof(*platdata));
+ if (ret) {
+ dev_err(&pdev->dev,
+ "%s: cannot add plat data ctrl:%d\n",
+ __func__, ctrl_num);
+ goto err_pdev_add;
+ }
}
ret = platform_device_add(pdev);
if (ret) {
- dev_err(&pdev->dev, "%s: Cannot add swr platform device\n",
+ dev_err(&pdev->dev,
+ "%s: Cannot add platform device\n",
__func__);
goto err_pdev_add;
}
- swr_ctrl_data[ctrl_num].swr_pdev = pdev;
- ctrl_num++;
- dev_dbg(&pdev->dev, "%s: Added soundwire ctrl device(s)\n",
- __func__);
+ if (strcmp(node->name, "swr_master") == 0) {
+ temp = krealloc(swr_ctrl_data,
+ (ctrl_num + 1) * sizeof(
+ struct tavil_swr_ctrl_data),
+ GFP_KERNEL);
+ if (!temp) {
+ dev_err(wcd9xxx->dev, "out of memory\n");
+ ret = -ENOMEM;
+ goto err_pdev_add;
+ }
+ swr_ctrl_data = temp;
+ swr_ctrl_data[ctrl_num].swr_pdev = pdev;
+ ctrl_num++;
+ dev_dbg(&pdev->dev,
+ "%s: Added soundwire ctrl device(s)\n",
+ __func__);
+ tavil->swr.ctrl_data = swr_ctrl_data;
+ }
}
- tavil->swr.ctrl_data = swr_ctrl_data;
return;
err_pdev_add:
platform_device_put(pdev);
err_mem:
- kfree(swr_ctrl_data);
+ return;
}
static int __tavil_enable_efuse_sensing(struct tavil_priv *tavil)
@@ -5676,7 +5695,8 @@ static int tavil_probe(struct platform_device *pdev)
tavil->wcd9xxx = dev_get_drvdata(pdev->dev.parent);
tavil->dev = &pdev->dev;
- INIT_WORK(&tavil->wcd_add_child_devices_work, wcd_add_child_devices);
+ INIT_WORK(&tavil->tavil_add_child_devices_work,
+ tavil_add_child_devices);
mutex_init(&tavil->swr.read_mutex);
mutex_init(&tavil->swr.write_mutex);
mutex_init(&tavil->swr.clk_mutex);
@@ -5733,7 +5753,7 @@ static int tavil_probe(struct platform_device *pdev)
__func__);
goto err_cdc_reg;
}
- schedule_work(&tavil->wcd_add_child_devices_work);
+ schedule_work(&tavil->tavil_add_child_devices_work);
return ret;
diff --git a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c
index 48180cf5e337..ad2f2e9865c3 100644
--- a/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c
+++ b/sound/soc/msm/qdsp6v2/msm-ds2-dap-config.c
@@ -1523,8 +1523,9 @@ static int msm_ds2_dap_get_param(u32 cmd, void *arg)
}
/* Return if invalid length */
- if (dolby_data->length >
- (DOLBY_MAX_LENGTH_INDIVIDUAL_PARAM - DOLBY_PARAM_PAYLOAD_SIZE)) {
+ if ((dolby_data->length >
+ (DOLBY_MAX_LENGTH_INDIVIDUAL_PARAM - DOLBY_PARAM_PAYLOAD_SIZE)) ||
+ (dolby_data->length <= 0)) {
pr_err("Invalid length %d", dolby_data->length);
rc = -EINVAL;
goto end;
diff --git a/sound/usb/usb_audio_qmi_svc.c b/sound/usb/usb_audio_qmi_svc.c
index 9d2b75876bfe..94c051773fd9 100644
--- a/sound/usb/usb_audio_qmi_svc.c
+++ b/sound/usb/usb_audio_qmi_svc.c
@@ -405,12 +405,15 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
subs->interface, subs->altset_idx);
goto err;
}
- resp->bDelay = as->bDelay;
+ resp->data_path_delay = as->bDelay;
+ resp->data_path_delay_valid = 1;
fmt_v1 = (struct uac_format_type_i_discrete_descriptor *)fmt;
- resp->bSubslotSize = fmt_v1->bSubframeSize;
+ resp->usb_audio_subslot_size = fmt_v1->bSubframeSize;
+ resp->usb_audio_subslot_size_valid = 1;
} else if (protocol == UAC_VERSION_2) {
fmt_v2 = (struct uac_format_type_i_ext_descriptor *)fmt;
- resp->bSubslotSize = fmt_v2->bSubslotSize;
+ resp->usb_audio_subslot_size = fmt_v2->bSubslotSize;
+ resp->usb_audio_subslot_size_valid = 1;
} else {
pr_err("%s: unknown protocol version %x\n", __func__, protocol);
goto err;
@@ -424,11 +427,14 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
subs->interface, subs->altset_idx);
goto err;
}
- resp->bcdADC = ac->bcdADC;
+ resp->usb_audio_spec_revision = ac->bcdADC;
+ resp->usb_audio_spec_revision_valid = 1;
resp->slot_id = subs->dev->slot_id;
+ resp->slot_id_valid = 1;
memcpy(&resp->std_as_opr_intf_desc, &alts->desc, sizeof(alts->desc));
+ resp->std_as_opr_intf_desc_valid = 1;
ep = usb_pipe_endpoint(subs->dev, subs->data_endpoint->pipe);
if (!ep) {
@@ -437,6 +443,7 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
goto err;
}
memcpy(&resp->std_as_data_ep_desc, &ep->desc, sizeof(ep->desc));
+ resp->std_as_data_ep_desc_valid = 1;
xhci_pa = usb_get_xfer_ring_dma_addr(subs->dev, ep);
if (!xhci_pa) {
@@ -454,6 +461,8 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
goto err;
}
memcpy(&resp->std_as_sync_ep_desc, &ep->desc, sizeof(ep->desc));
+ resp->std_as_sync_ep_desc_valid = 1;
+
xhci_pa = usb_get_xfer_ring_dma_addr(subs->dev, ep);
if (!xhci_pa) {
pr_err("%s:failed to get sync ep ring dma address\n",
@@ -464,6 +473,7 @@ static int prepare_qmi_response(struct snd_usb_substream *subs,
}
resp->interrupter_num = uaudio_qdev->intr_num;
+ resp->interrupter_num_valid = 1;
/* map xhci data structures PA memory to iova */
@@ -570,6 +580,8 @@ skip_sync:
resp->xhci_mem_info.xfer_buff.va = PREPEND_SID_TO_IOVA(va,
uaudio_qdev->sid);
+ resp->xhci_mem_info_valid = 1;
+
if (!atomic_read(&uadev[card_num].in_use)) {
kref_init(&uadev[card_num].kref);
init_waitqueue_head(&uadev[card_num].disconnect_wq);
@@ -734,7 +746,7 @@ static void uaudio_dev_release(struct kref *kref)
static int handle_uaudio_stream_req(void *req_h, void *req)
{
struct qmi_uaudio_stream_req_msg_v01 *req_msg;
- struct qmi_uaudio_stream_resp_msg_v01 resp = {0};
+ struct qmi_uaudio_stream_resp_msg_v01 resp = {{0}, 0};
struct snd_usb_substream *subs;
struct snd_usb_audio *chip = NULL;
struct uaudio_qmi_svc *svc = uaudio_svc;
@@ -744,6 +756,13 @@ static int handle_uaudio_stream_req(void *req_h, void *req)
req_msg = (struct qmi_uaudio_stream_req_msg_v01 *)req;
+ if (!req_msg->audio_format_valid || !req_msg->bit_rate_valid ||
+ !req_msg->number_of_ch_valid || !req_msg->xfer_buff_size_valid) {
+ pr_err("%s: invalid request msg\n", __func__);
+ ret = -EINVAL;
+ goto response;
+ }
+
direction = req_msg->usb_token & SND_PCM_STREAM_DIRECTION;
pcm_dev_num = (req_msg->usb_token & SND_PCM_DEV_NUM_MASK) >> 8;
pcm_card_num = (req_msg->usb_token & SND_PCM_CARD_NUM_MASK) >> 16;
@@ -828,7 +847,12 @@ response:
uaudio_dev_release);
}
- resp.status = ret;
+ resp.usb_token = req_msg->usb_token;
+ resp.usb_token_valid = 1;
+ resp.internal_status = ret;
+ resp.internal_status_valid = 1;
+ resp.status = ret ? USB_AUDIO_STREAM_REQ_FAILURE_V01 : ret;
+ resp.status_valid = 1;
ret = qmi_send_resp_from_cb(svc->uaudio_svc_hdl, svc->curr_conn, req_h,
&uaudio_stream_resp_desc, &resp, sizeof(resp));
diff --git a/sound/usb/usb_audio_qmi_v01.c b/sound/usb/usb_audio_qmi_v01.c
index 31b1ba74d5c7..6f6f194e89fb 100644
--- a/sound/usb/usb_audio_qmi_v01.c
+++ b/sound/usb/usb_audio_qmi_v01.c
@@ -280,65 +280,92 @@ static struct elem_info usb_interface_descriptor_v01_ei[] = {
struct elem_info qmi_uaudio_stream_req_msg_v01_ei[] = {
{
- .data_type = QMI_UNSIGNED_4_BYTE,
+ .data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
- .elem_size = sizeof(uint32_t),
+ .elem_size = sizeof(uint8_t),
.is_array = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_uaudio_stream_req_msg_v01,
- priv_data),
+ enable),
},
{
- .data_type = QMI_UNSIGNED_1_BYTE,
+ .data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
- .elem_size = sizeof(uint8_t),
+ .elem_size = sizeof(uint32_t),
.is_array = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_uaudio_stream_req_msg_v01,
- enable),
+ usb_token),
},
{
- .data_type = QMI_UNSIGNED_4_BYTE,
+ .data_type = QMI_OPT_FLAG,
.elem_len = 1,
- .elem_size = sizeof(uint32_t),
+ .elem_size = sizeof(uint8_t),
.is_array = NO_ARRAY,
- .tlv_type = 0x03,
+ .tlv_type = 0x10,
.offset = offsetof(struct qmi_uaudio_stream_req_msg_v01,
- usb_token),
+ audio_format_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(uint32_t),
.is_array = NO_ARRAY,
- .tlv_type = 0x04,
+ .tlv_type = 0x10,
.offset = offsetof(struct qmi_uaudio_stream_req_msg_v01,
audio_format),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+ number_of_ch_valid),
+ },
+ {
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(uint32_t),
.is_array = NO_ARRAY,
- .tlv_type = 0x05,
+ .tlv_type = 0x11,
.offset = offsetof(struct qmi_uaudio_stream_req_msg_v01,
number_of_ch),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+ bit_rate_valid),
+ },
+ {
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(uint32_t),
.is_array = NO_ARRAY,
- .tlv_type = 0x06,
+ .tlv_type = 0x12,
.offset = offsetof(struct qmi_uaudio_stream_req_msg_v01,
bit_rate),
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_uaudio_stream_req_msg_v01,
+ xfer_buff_size_valid),
+ },
+ {
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(uint32_t),
.is_array = NO_ARRAY,
- .tlv_type = 0x07,
+ .tlv_type = 0x13,
.offset = offsetof(struct qmi_uaudio_stream_req_msg_v01,
xfer_buff_size),
},
@@ -351,115 +378,256 @@ struct elem_info qmi_uaudio_stream_req_msg_v01_ei[] = {
struct elem_info qmi_uaudio_stream_resp_msg_v01_ei[] = {
{
+ .data_type = QMI_STRUCT,
+ .elem_len = 1,
+ .elem_size = sizeof(struct qmi_response_type_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x02,
+ .offset = offsetof(
+ struct qmi_uaudio_stream_resp_msg_v01,
+ resp),
+ .ei_array = get_qmi_response_type_v01_ei(),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct qmi_uaudio_stream_resp_msg_v01,
+ status_valid),
+ },
+ {
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
+ .elem_len = 1,
+ .elem_size = sizeof(enum usb_audio_stream_status_enum_v01),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x10,
+ .offset = offsetof(
+ struct qmi_uaudio_stream_resp_msg_v01,
+ status),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct qmi_uaudio_stream_resp_msg_v01,
+ internal_status_valid),
+ },
+ {
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(uint32_t),
.is_array = NO_ARRAY,
- .tlv_type = 0x01,
+ .tlv_type = 0x11,
+ .offset = offsetof(
+ struct qmi_uaudio_stream_resp_msg_v01,
+ internal_status),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
.offset = offsetof(
struct qmi_uaudio_stream_resp_msg_v01,
- priv_data),
+ slot_id_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(uint32_t),
.is_array = NO_ARRAY,
- .tlv_type = 0x02,
+ .tlv_type = 0x12,
.offset = offsetof(
struct qmi_uaudio_stream_resp_msg_v01,
- status),
+ slot_id),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(
+ struct qmi_uaudio_stream_resp_msg_v01,
+ usb_token_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(uint32_t),
.is_array = NO_ARRAY,
- .tlv_type = 0x03,
+ .tlv_type = 0x13,
.offset = offsetof(
struct qmi_uaudio_stream_resp_msg_v01,
- slot_id),
+ usb_token),
},
{
- .data_type = QMI_UNSIGNED_1_BYTE,
+ .data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(uint8_t),
.is_array = NO_ARRAY,
- .tlv_type = 0x04,
+ .tlv_type = 0x14,
.offset = offsetof(
struct qmi_uaudio_stream_resp_msg_v01,
- bSubslotSize),
+ std_as_opr_intf_desc_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct usb_interface_descriptor_v01),
.is_array = NO_ARRAY,
- .tlv_type = 0x05,
+ .tlv_type = 0x14,
.offset = offsetof(
struct qmi_uaudio_stream_resp_msg_v01,
std_as_opr_intf_desc),
.ei_array = usb_interface_descriptor_v01_ei,
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(
+ struct qmi_uaudio_stream_resp_msg_v01,
+ std_as_data_ep_desc_valid),
+ },
+ {
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct usb_endpoint_descriptor_v01),
.is_array = NO_ARRAY,
- .tlv_type = 0x06,
+ .tlv_type = 0x15,
.offset = offsetof(
struct qmi_uaudio_stream_resp_msg_v01,
std_as_data_ep_desc),
.ei_array = usb_endpoint_descriptor_v01_ei,
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(
+ struct qmi_uaudio_stream_resp_msg_v01,
+ std_as_sync_ep_desc_valid),
+ },
+ {
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct usb_endpoint_descriptor_v01),
.is_array = NO_ARRAY,
- .tlv_type = 0x07,
+ .tlv_type = 0x16,
.offset = offsetof(
struct qmi_uaudio_stream_resp_msg_v01,
std_as_sync_ep_desc),
.ei_array = usb_endpoint_descriptor_v01_ei,
},
{
- .data_type = QMI_UNSIGNED_1_BYTE,
+ .data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(uint8_t),
.is_array = NO_ARRAY,
- .tlv_type = 0x08,
+ .tlv_type = 0x17,
.offset = offsetof(
struct qmi_uaudio_stream_resp_msg_v01,
- bDelay),
+ usb_audio_spec_revision_valid),
},
{
.data_type = QMI_UNSIGNED_2_BYTE,
.elem_len = 1,
.elem_size = sizeof(uint16_t),
.is_array = NO_ARRAY,
- .tlv_type = 0x09,
+ .tlv_type = 0x17,
.offset = offsetof(
struct qmi_uaudio_stream_resp_msg_v01,
- bcdADC),
+ usb_audio_spec_revision),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct qmi_uaudio_stream_resp_msg_v01,
+ data_path_delay_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(
+ struct qmi_uaudio_stream_resp_msg_v01,
+ data_path_delay),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(
+ struct qmi_uaudio_stream_resp_msg_v01,
+ usb_audio_subslot_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x19,
+ .offset = offsetof(
+ struct qmi_uaudio_stream_resp_msg_v01,
+ usb_audio_subslot_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1A,
+ .offset = offsetof(
+ struct qmi_uaudio_stream_resp_msg_v01,
+ xhci_mem_info_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct apps_mem_info_v01),
.is_array = NO_ARRAY,
- .tlv_type = 0x0A,
+ .tlv_type = 0x1A,
.offset = offsetof(
struct qmi_uaudio_stream_resp_msg_v01,
xhci_mem_info),
.ei_array = apps_mem_info_v01_ei,
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x1B,
+ .offset = offsetof(
+ struct qmi_uaudio_stream_resp_msg_v01,
+ interrupter_num_valid),
+ },
+ {
.data_type = QMI_UNSIGNED_1_BYTE,
.elem_len = 1,
.elem_size = sizeof(uint8_t),
.is_array = NO_ARRAY,
- .tlv_type = 0x0B,
+ .tlv_type = 0x1B,
.offset = offsetof(
struct qmi_uaudio_stream_resp_msg_v01,
interrupter_num),
@@ -473,13 +641,14 @@ struct elem_info qmi_uaudio_stream_resp_msg_v01_ei[] = {
struct elem_info qmi_uaudio_stream_ind_msg_v01_ei[] = {
{
- .data_type = QMI_UNSIGNED_4_BYTE,
+ .data_type = QMI_SIGNED_4_BYTE_ENUM,
.elem_len = 1,
- .elem_size = sizeof(uint32_t),
+ .elem_size = sizeof(
+ enum usb_audio_device_indication_enum_v01),
.is_array = NO_ARRAY,
.tlv_type = 0x01,
.offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
- usb_token),
+ dev_event),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
@@ -488,76 +657,175 @@ struct elem_info qmi_uaudio_stream_ind_msg_v01_ei[] = {
.is_array = NO_ARRAY,
.tlv_type = 0x02,
.offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
- priv_data),
+ slot_id),
},
{
- .data_type = QMI_UNSIGNED_4_BYTE,
+ .data_type = QMI_OPT_FLAG,
.elem_len = 1,
- .elem_size = sizeof(uint32_t),
+ .elem_size = sizeof(uint8_t),
.is_array = NO_ARRAY,
- .tlv_type = 0x03,
+ .tlv_type = 0x10,
.offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
- status),
+ usb_token_valid),
},
{
.data_type = QMI_UNSIGNED_4_BYTE,
.elem_len = 1,
.elem_size = sizeof(uint32_t),
.is_array = NO_ARRAY,
- .tlv_type = 0x04,
+ .tlv_type = 0x10,
.offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
- slot_id),
+ usb_token),
},
{
- .data_type = QMI_UNSIGNED_1_BYTE,
+ .data_type = QMI_OPT_FLAG,
.elem_len = 1,
.elem_size = sizeof(uint8_t),
.is_array = NO_ARRAY,
- .tlv_type = 0x05,
+ .tlv_type = 0x11,
.offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
- bSubslotSize),
+ std_as_opr_intf_desc_valid),
},
{
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct usb_interface_descriptor_v01),
.is_array = NO_ARRAY,
- .tlv_type = 0x06,
+ .tlv_type = 0x11,
.offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
std_as_opr_intf_desc),
.ei_array = usb_interface_descriptor_v01_ei,
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x12,
+ .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+ std_as_data_ep_desc_valid),
+ },
+ {
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct usb_endpoint_descriptor_v01),
.is_array = NO_ARRAY,
- .tlv_type = 0x07,
+ .tlv_type = 0x12,
.offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
std_as_data_ep_desc),
.ei_array = usb_endpoint_descriptor_v01_ei,
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x13,
+ .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+ std_as_sync_ep_desc_valid),
+ },
+ {
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct usb_endpoint_descriptor_v01),
.is_array = NO_ARRAY,
- .tlv_type = 0x08,
+ .tlv_type = 0x13,
.offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
std_as_sync_ep_desc),
.ei_array = usb_endpoint_descriptor_v01_ei,
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+ usb_audio_spec_revision_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_2_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint16_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x14,
+ .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+ usb_audio_spec_revision),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+ data_path_delay_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x15,
+ .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+ data_path_delay),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+ usb_audio_subslot_size_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x16,
+ .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+ usb_audio_subslot_size),
+ },
+ {
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x17,
+ .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+ xhci_mem_info_valid),
+ },
+ {
.data_type = QMI_STRUCT,
.elem_len = 1,
.elem_size = sizeof(struct apps_mem_info_v01),
.is_array = NO_ARRAY,
- .tlv_type = 0x09,
+ .tlv_type = 0x17,
.offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
xhci_mem_info),
.ei_array = apps_mem_info_v01_ei,
},
{
+ .data_type = QMI_OPT_FLAG,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+ interrupter_num_valid),
+ },
+ {
+ .data_type = QMI_UNSIGNED_1_BYTE,
+ .elem_len = 1,
+ .elem_size = sizeof(uint8_t),
+ .is_array = NO_ARRAY,
+ .tlv_type = 0x18,
+ .offset = offsetof(struct qmi_uaudio_stream_ind_msg_v01,
+ interrupter_num),
+ },
+ {
.data_type = QMI_EOTI,
.is_array = NO_ARRAY,
.is_array = QMI_COMMON_TLV_TYPE,
diff --git a/sound/usb/usb_audio_qmi_v01.h b/sound/usb/usb_audio_qmi_v01.h
index 7ad1ab8a61a9..aa1018a22105 100644
--- a/sound/usb/usb_audio_qmi_v01.h
+++ b/sound/usb/usb_audio_qmi_v01.h
@@ -13,7 +13,7 @@
#ifndef USB_QMI_V01_H
#define USB_QMI_V01_H
-#define UAUDIO_STREAM_SERVICE_ID_V01 0x41C
+#define UAUDIO_STREAM_SERVICE_ID_V01 0x41D
#define UAUDIO_STREAM_SERVICE_VERS_V01 0x01
#define QMI_UAUDIO_STREAM_RESP_V01 0x0001
@@ -58,46 +58,93 @@ struct usb_interface_descriptor_v01 {
uint8_t iInterface;
};
+enum usb_audio_stream_status_enum_v01 {
+ USB_AUDIO_STREAM_STATUS_ENUM_MIN_VAL_V01 = INT_MIN,
+ USB_AUDIO_STREAM_REQ_SUCCESS_V01 = 0,
+ USB_AUDIO_STREAM_REQ_FAILURE_V01 = 1,
+ USB_AUDIO_STREAM_REQ_FAILURE_NOT_FOUND_V01 = 2,
+ USB_AUDIO_STREAM_REQ_FAILURE_INVALID_PARAM_V01 = 3,
+ USB_AUDIO_STREAM_REQ_FAILURE_MEMALLOC_V01 = 4,
+ USB_AUDIO_STREAM_STATUS_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
+enum usb_audio_device_indication_enum_v01 {
+ USB_AUDIO_DEVICE_INDICATION_ENUM_MIN_VAL_V01 = INT_MIN,
+ USB_AUDIO_DEV_CONNECT_V01 = 0,
+ USB_AUDIO_DEV_DISCONNECT_V01 = 1,
+ USB_AUDIO_DEV_SUSPEND_V01 = 2,
+ USB_AUDIO_DEV_RESUME_V01 = 3,
+ USB_AUDIO_DEVICE_INDICATION_ENUM_MAX_VAL_V01 = INT_MAX,
+};
+
struct qmi_uaudio_stream_req_msg_v01 {
- uint32_t priv_data;
uint8_t enable;
uint32_t usb_token;
+ uint8_t audio_format_valid;
uint32_t audio_format;
+ uint8_t number_of_ch_valid;
uint32_t number_of_ch;
+ uint8_t bit_rate_valid;
uint32_t bit_rate;
+ uint8_t xfer_buff_size_valid;
uint32_t xfer_buff_size;
};
-#define QMI_UAUDIO_STREAM_REQ_MSG_V01_MAX_MSG_LEN 46
+#define QMI_UAUDIO_STREAM_REQ_MSG_V01_MAX_MSG_LEN 39
extern struct elem_info qmi_uaudio_stream_req_msg_v01_ei[];
struct qmi_uaudio_stream_resp_msg_v01 {
- uint32_t priv_data;
- uint32_t status;
+ struct qmi_response_type_v01 resp;
+ uint8_t status_valid;
+ enum usb_audio_stream_status_enum_v01 status;
+ uint8_t internal_status_valid;
+ uint32_t internal_status;
+ uint8_t slot_id_valid;
uint32_t slot_id;
- uint8_t bSubslotSize;
+ uint8_t usb_token_valid;
+ uint32_t usb_token;
+ uint8_t std_as_opr_intf_desc_valid;
struct usb_interface_descriptor_v01 std_as_opr_intf_desc;
+ uint8_t std_as_data_ep_desc_valid;
struct usb_endpoint_descriptor_v01 std_as_data_ep_desc;
+ uint8_t std_as_sync_ep_desc_valid;
struct usb_endpoint_descriptor_v01 std_as_sync_ep_desc;
- uint8_t bDelay;
- uint16_t bcdADC;
+ uint8_t usb_audio_spec_revision_valid;
+ uint16_t usb_audio_spec_revision;
+ uint8_t data_path_delay_valid;
+ uint8_t data_path_delay;
+ uint8_t usb_audio_subslot_size_valid;
+ uint8_t usb_audio_subslot_size;
+ uint8_t xhci_mem_info_valid;
struct apps_mem_info_v01 xhci_mem_info;
+ uint8_t interrupter_num_valid;
uint8_t interrupter_num;
};
-#define QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN 177
+#define QMI_UAUDIO_STREAM_RESP_MSG_V01_MAX_MSG_LEN 191
extern struct elem_info qmi_uaudio_stream_resp_msg_v01_ei[];
struct qmi_uaudio_stream_ind_msg_v01 {
- uint32_t usb_token;
- uint32_t priv_data;
- uint32_t status;
+ enum usb_audio_device_indication_enum_v01 dev_event;
uint32_t slot_id;
- uint8_t bSubslotSize;
+ uint8_t usb_token_valid;
+ uint32_t usb_token;
+ uint8_t std_as_opr_intf_desc_valid;
struct usb_interface_descriptor_v01 std_as_opr_intf_desc;
+ uint8_t std_as_data_ep_desc_valid;
struct usb_endpoint_descriptor_v01 std_as_data_ep_desc;
+ uint8_t std_as_sync_ep_desc_valid;
struct usb_endpoint_descriptor_v01 std_as_sync_ep_desc;
+ uint8_t usb_audio_spec_revision_valid;
+ uint16_t usb_audio_spec_revision;
+ uint8_t data_path_delay_valid;
+ uint8_t data_path_delay;
+ uint8_t usb_audio_subslot_size_valid;
+ uint8_t usb_audio_subslot_size;
+ uint8_t xhci_mem_info_valid;
struct apps_mem_info_v01 xhci_mem_info;
+ uint8_t interrupter_num_valid;
+ uint8_t interrupter_num;
};
-#define QMI_UAUDIO_STREAM_IND_MSG_V01_MAX_MSG_LEN 171
+#define QMI_UAUDIO_STREAM_IND_MSG_V01_MAX_MSG_LEN 177
extern struct elem_info qmi_uaudio_stream_ind_msg_v01_ei[];
#endif