summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt31
-rw-r--r--Documentation/devicetree/bindings/gpu/adreno-iommu.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt4
-rw-r--r--Documentation/devicetree/bindings/platform/msm/ipa.txt1
-rw-r--r--Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt73
-rw-r--r--Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt6
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-qpnp.txt1
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi54
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi50
-rw-r--r--arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi7
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi42
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi32
-rw-r--r--arch/arm64/configs/msmfalcon_defconfig1
-rw-r--r--drivers/char/adsprpc.c8
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c83
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h1
-rw-r--r--drivers/cpuidle/lpm-levels.c321
-rw-r--r--drivers/cpuidle/lpm-levels.h17
-rw-r--r--drivers/gpu/msm/Makefile2
-rw-r--r--drivers/gpu/msm/adreno.c26
-rw-r--r--drivers/gpu/msm/adreno.h43
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c116
-rw-r--r--drivers/gpu/msm/adreno_a5xx_preempt.c2
-rw-r--r--drivers/gpu/msm/adreno_debugfs.c76
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c1244
-rw-r--r--drivers/gpu/msm/adreno_dispatch.h38
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.c54
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.h27
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c178
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.h6
-rw-r--r--drivers/gpu/msm/adreno_trace.h64
-rw-r--r--drivers/gpu/msm/kgsl.c268
-rw-r--r--drivers/gpu/msm/kgsl.h19
-rw-r--r--drivers/gpu/msm/kgsl_cffdump.c4
-rw-r--r--drivers/gpu/msm/kgsl_cffdump.h6
-rw-r--r--drivers/gpu/msm/kgsl_cmdbatch.h168
-rw-r--r--drivers/gpu/msm/kgsl_compat.h8
-rw-r--r--drivers/gpu/msm/kgsl_device.h14
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.c (renamed from drivers/gpu/msm/kgsl_cmdbatch.c)642
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.h198
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c13
-rw-r--r--drivers/gpu/msm/kgsl_mmu.h2
-rw-r--r--drivers/gpu/msm/kgsl_trace.h44
-rw-r--r--drivers/iio/adc/qcom-rradc.c100
-rw-r--r--drivers/input/misc/ots_pat9125/pat9125_linux_driver.c478
-rw-r--r--drivers/input/misc/ots_pat9125/pixart_ots.c72
-rw-r--r--drivers/input/misc/ots_pat9125/pixart_ots.h41
-rw-r--r--drivers/input/misc/ots_pat9125/pixart_platform.h18
-rw-r--r--drivers/iommu/arm-smmu.c39
-rw-r--r--drivers/iommu/dma-mapping-fast.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c4
-rw-r--r--drivers/iommu/iommu-debug.c22
-rw-r--r--drivers/iommu/msm_dma_iommu_mapping.c12
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c15
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c131
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c8
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_res_parse.c9
-rw-r--r--drivers/media/platform/msm/vidc/venus_boot.c13
-rw-r--r--drivers/net/wireless/cnss/cnss_pci.c10
-rw-r--r--drivers/platform/msm/gsi/gsi.c215
-rw-r--r--drivers/platform/msm/gsi/gsi.h10
-rw-r--r--drivers/platform/msm/gsi/gsi_dbg.c53
-rw-r--r--drivers/platform/msm/gsi/gsi_reg.h217
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c42
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c68
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h2
-rw-r--r--drivers/platform/msm/msm_11ad/msm_11ad.c12
-rw-r--r--drivers/power/qcom-charger/fg-core.h46
-rw-r--r--drivers/power/qcom-charger/fg-reg.h1
-rw-r--r--drivers/power/qcom-charger/fg-util.c6
-rw-r--r--drivers/power/qcom-charger/qpnp-fg-gen3.c957
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c16
-rw-r--r--drivers/power/qcom-charger/smb-lib.c69
-rw-r--r--drivers/power/qcom-charger/smb-lib.h3
-rw-r--r--drivers/power/qcom-charger/smb-reg.h10
-rw-r--r--drivers/pwm/pwm-qpnp.c11
-rw-r--r--drivers/soc/qcom/icnss.c10
-rw-r--r--drivers/soc/qcom/qdsp6v2/msm_audio_ion.c5
-rw-r--r--drivers/usb/gadget/function/f_ncm.c82
-rw-r--r--drivers/video/fbdev/msm/mdss.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c138
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c34
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.c67
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.h61
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_panel.c44
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.c22
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h7
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c11
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_hwio.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c84
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c30
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c10
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.c8
-rw-r--r--drivers/video/fbdev/msm/msm_ext_display.c18
-rw-r--r--fs/ext4/inode.c7
-rw-r--r--include/linux/cgroup_subsys.h4
-rw-r--r--include/linux/iommu.h1
-rw-r--r--include/linux/msm_ext_display.h1
-rw-r--r--include/linux/msm_gsi.h10
-rw-r--r--include/trace/events/trace_msm_low_power.h58
-rw-r--r--init/Kconfig17
-rw-r--r--kernel/sched/tune.c224
-rw-r--r--kernel/sysctl.c4
-rw-r--r--lib/asn1_decoder.c16
-rw-r--r--net/ipv4/tcp_input.c15
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c287
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h14
119 files changed, 5282 insertions, 2750 deletions
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index 90abf0305319..68b8f09238e0 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -249,6 +249,35 @@ Optional properties:
60 = 60 frames per second (default)
- qcom,mdss-dsi-panel-clockrate: A 64 bit value specifies the panel clock speed in Hz.
0 = default value.
+- qcom,mdss-mdp-kickoff-threshold: This property can be used to define a region
+ (in terms of scanlines) where the
+hardware is allowed
+ to trigger a data transfer from MDP to DSI.
+ If this property is used, the region must be defined setting
+ two values, the low and the high thresholds:
+ <low_threshold high_threshold>
+ Where following condition must be met:
+ low_threshold < high_threshold
+ These values will be used by the driver in such way that if
+ the Driver receives a request to kickoff a transfer (MDP to DSI),
+ the transfer will be triggered only if the following condition
+ is satisfied:
+ low_threshold < scanline < high_threshold
+ If the condition is not met, then the driver will delay the
+ transfer by the time defined in the following property:
+ "qcom,mdss-mdp-kickoff-delay".
+ So in order to use this property, the delay property must
+ be defined as well and greater than 0.
+- qcom,mdss-mdp-kickoff-delay: This property defines the delay in microseconds that
+ the driver will delay before triggering an MDP transfer if the
+ thresholds defined by the following property are not met:
+ "qcom,mdss-mdp-kickoff-threshold".
+ So in order to use this property, the threshold property must
+ be defined as well. Note that this delay cannot be zero
+ and also should not be greater than
+the fps window.
+ i.e. For 60fps value should not exceed
+16666 uS.
- qcom,mdss-mdp-transfer-time-us: Specifies the dsi transfer time for command mode
panels in microseconds. Driver uses this number to adjust
the clock rate according to the expected transfer time.
@@ -568,6 +597,8 @@ Example:
qcom,mdss-dsi-dma-trigger = <0>;
qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-panel-clockrate = <424000000>;
+ qcom,mdss-mdp-kickoff-threshold = <11 2430>;
+ qcom,mdss-mdp-kickoff-delay = <1000>;
qcom,mdss-mdp-transfer-time-us = <12500>;
qcom,mdss-dsi-panel-timings = [7d 25 1d 00 37 33
22 27 1e 03 04 00];
diff --git a/Documentation/devicetree/bindings/gpu/adreno-iommu.txt b/Documentation/devicetree/bindings/gpu/adreno-iommu.txt
index de88a6eba7a5..b399145ea8a2 100644
--- a/Documentation/devicetree/bindings/gpu/adreno-iommu.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno-iommu.txt
@@ -36,8 +36,6 @@ Optional properties:
for secure buffer allocation
- qcom,secure_align_mask: A mask for determining how secure buffers need to
be aligned
-- qcom,coherent-htw: A boolean specifying if coherent hardware table walks should
- be enabled.
- List of sub nodes, one for each of the translation context banks supported.
The driver uses the names of these nodes to determine how they are used,
diff --git a/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt b/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt
index 721a4f72563e..1ab49edfe30c 100644
--- a/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt
@@ -41,6 +41,10 @@ The channel list supported by the RRADC driver is available in the enum rradc_ch
located at at drivers/iio/adc/qcom-rradc.c. Clients can use this index from the enum
as the channel number while requesting ADC reads.
+Optional property:
+- qcom,pmic-revid : Phandle pointing to the revision peripheral node. Use it to query the
+ PMIC fabrication ID for applying the appropriate temperature
+ compensation parameters.
Example:
/* RRADC node */
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index a8db893f6709..80f2d8f43e35 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -36,7 +36,6 @@ Optional:
compatible "qcom,ipa-smmu-wlan-cb"
- ipa_smmu_uc: uc SMMU device
compatible "qcom,ipa-smmu-uc-cb"
-- qcom,smmu-disable-htw: boolean value to turn off SMMU page table caching
- qcom,use-a2-service: determine if A2 service will be used
- qcom,use-ipa-tethering-bridge: determine if tethering bridge will be used
- qcom,use-ipa-bamdma-a2-bridge: determine if a2/ipa hw bridge will be used
diff --git a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt
index bd358593fcb3..7841251c67fe 100644
--- a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt
@@ -87,7 +87,8 @@ First Level Node - FG Gen3 device
Value type: <u32>
Definition: Percentage of monotonic SOC increase upon which the delta
SOC interrupt will be triggered. If this property is not
- specified, then the default value will be 1.
+ specified, then the default value will be 1. Possible
+ values are in the range of 0 to 12.
- qcom,fg-recharge-soc-thr
Usage: optional
@@ -145,6 +146,76 @@ First Level Node - FG Gen3 device
Value type: <bool>
Definition: Enables the cycle counter feature.
+- qcom,fg-force-load-profile
+ Usage: optional
+ Value type: <bool>
+ Definition: If set, battery profile will be force loaded if the profile
+ loaded earlier by bootloader doesn't match with the profile
+ available in the device tree.
+
+- qcom,cl-start-capacity
+ Usage: optional
+ Value type: <u32>
+ Definition: Battery SOC threshold to start the capacity learning.
+ If this is not specified, then the default value used
+ will be 15.
+
+- qcom,cl-min-temp
+ Usage: optional
+ Value type: <u32>
+ Definition: Lower limit of battery temperature to start the capacity
+ learning. If this is not specified, then the default value
+ used will be 150. Unit is in decidegC.
+
+- qcom,cl-max-temp
+ Usage: optional
+ Value type: <u32>
+ Definition: Upper limit of battery temperature to start the capacity
+ learning. If this is not specified, then the default value
+ used will be 450 (45C). Unit is in decidegC.
+
+- qcom,cl-max-increment
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum capacity increment allowed per capacity learning
+ cycle. If this is not specified, then the default value
+ used will be 5 (0.5%). Unit is in decipercentage.
+
+- qcom,cl-max-decrement
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum capacity decrement allowed per capacity learning
+ cycle. If this is not specified, then the default value
+ used will be 100 (10%). Unit is in decipercentage.
+
+- qcom,cl-min-limit
+ Usage: optional
+ Value type: <u32>
+ Definition: Minimum limit that the capacity cannot go below in a
+ capacity learning cycle. If this is not specified, then
+ the default value is 0. Unit is in decipercentage.
+
+- qcom,cl-max-limit
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum limit that the capacity cannot go above in a
+ capacity learning cycle. If this is not specified, then
+ the default value is 0. Unit is in decipercentage.
+
+- qcom,fg-jeita-hyst-temp
+ Usage: optional
+ Value type: <u32>
+ Definition: Hysteresis applied to Jeita temperature comparison.
+ Possible values are:
+ 0 - No hysteresis
+ 1,2,3 - Value in Celsius.
+
+- qcom,fg-batt-temp-delta
+ Usage: optional
+ Value type: <u32>
+ Definition: Battery temperature delta interrupt threshold. Possible
+ values are: 2, 4, 6 and 10. Unit is in Kelvin.
+
==========================================================
Second Level Nodes - Peripherals managed by FG Gen3 driver
==========================================================
diff --git a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
index 21404dfc4b7b..12ac75a8608c 100644
--- a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
@@ -53,6 +53,12 @@ Charger specific properties:
Definition: Specifies the USB input current limit in micro-amps.
If the value is not present, 1.5Amps is used as default.
+- qcom,usb-ocl-ua
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the OTG output current limit in micro-amps.
+ If the value is not present, 1.5Amps is used as default
+
- qcom,dc-icl-ua
Usage: optional
Value type: <u32>
diff --git a/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt b/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt
index c784a01d6411..8cb513b5605f 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt
@@ -15,6 +15,7 @@ Required device bindings:
- reg-names: Name for the above register.
"qpnp-lpg-channel-base" = physical base address of the
controller's LPG channel register.
+- qcom,lpg-lut-size: LPG LUT size.
- qcom,channel-id: channel Id for the PWM.
- qcom,supported-sizes: Supported PWM sizes.
Following three pwm sizes lists are supported by PWM/LPG controllers.
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi
index 95a8e80ccdbd..9ad9e4adce00 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi
@@ -43,7 +43,7 @@
qcom,mdss-dsi-t-clk-pre = <0x24>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
- qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-dsi-te-pin-select = <1>;
qcom,mdss-dsi-wr-mem-start = <0x2c>;
qcom,mdss-dsi-wr-mem-continue = <0x3c>;
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi
index fd11be721dbb..6b549a4af6eb 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi
@@ -76,7 +76,7 @@
qcom,mdss-dsi-t-clk-pre = <0x24>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
- qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,compression-mode = "dsc";
qcom,config-select = <&dsi_nt35597_dsc_video_config0>;
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
index b6f19b78ea70..1e42d0846acf 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -59,33 +59,33 @@
qcom,mdss-dsi-te-check-enable;
qcom,mdss-dsi-te-using-te-pin;
qcom,ulps-enabled;
- qcom,mdss-dsi-on-command = [15 01 00 00 10 00 02 ff 10
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 ba 03
- 15 01 00 00 10 00 02 e5 01
- 15 01 00 00 10 00 02 35 00
- 15 01 00 00 10 00 02 bb 10
- 15 01 00 00 10 00 02 b0 03
- 15 01 00 00 10 00 02 ff e0
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 6b 3d
- 15 01 00 00 10 00 02 6c 3d
- 15 01 00 00 10 00 02 6d 3d
- 15 01 00 00 10 00 02 6e 3d
- 15 01 00 00 10 00 02 6f 3d
- 15 01 00 00 10 00 02 35 02
- 15 01 00 00 10 00 02 36 72
- 15 01 00 00 10 00 02 37 10
- 15 01 00 00 10 00 02 08 c0
- 15 01 00 00 10 00 02 ff 24
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 c6 06
- 15 01 00 00 10 00 02 ff 10
- 05 01 00 00 a0 00 02 11 00
- 05 01 00 00 a0 00 02 29 00];
+ qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 ba 03
+ 15 01 00 00 00 00 02 e5 01
+ 15 01 00 00 00 00 02 35 00
+ 15 01 00 00 00 00 02 bb 10
+ 15 01 00 00 00 00 02 b0 03
+ 15 01 00 00 00 00 02 ff e0
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 6b 3d
+ 15 01 00 00 00 00 02 6c 3d
+ 15 01 00 00 00 00 02 6d 3d
+ 15 01 00 00 00 00 02 6e 3d
+ 15 01 00 00 00 00 02 6f 3d
+ 15 01 00 00 00 00 02 35 02
+ 15 01 00 00 00 00 02 36 72
+ 15 01 00 00 00 00 02 37 10
+ 15 01 00 00 00 00 02 08 c0
+ 15 01 00 00 00 00 02 ff 24
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 c6 06
+ 15 01 00 00 00 00 02 ff 10
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 32 00 02 29 00];
- qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 0a 00 02 28 00
+ 05 01 00 00 3c 00 02 10 00];
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
index 367384a8c3e5..82413bfbca89 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
@@ -29,30 +29,30 @@
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
- qcom,mdss-dsi-on-command = [15 01 00 00 10 00 02 ff 10
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 ba 03
- 15 01 00 00 10 00 02 e5 01
- 15 01 00 00 10 00 02 35 00
- 15 01 00 00 10 00 02 bb 03
- 15 01 00 00 10 00 02 b0 03
- 39 01 00 00 10 00 06 3b 03 08 08 64 9a
- 15 01 00 00 10 00 02 ff e0
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 6b 3d
- 15 01 00 00 10 00 02 6c 3d
- 15 01 00 00 10 00 02 6d 3d
- 15 01 00 00 10 00 02 6e 3d
- 15 01 00 00 10 00 02 6f 3d
- 15 01 00 00 10 00 02 35 02
- 15 01 00 00 10 00 02 36 72
- 15 01 00 00 10 00 02 37 10
- 15 01 00 00 10 00 02 08 c0
- 15 01 00 00 10 00 02 ff 10
- 05 01 00 00 a0 00 02 11 00
- 05 01 00 00 a0 00 02 29 00];
- qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 ba 03
+ 15 01 00 00 00 00 02 e5 01
+ 15 01 00 00 00 00 02 35 00
+ 15 01 00 00 00 00 02 bb 03
+ 15 01 00 00 00 00 02 b0 03
+ 39 01 00 00 00 00 06 3b 03 08 08 64 9a
+ 15 01 00 00 00 00 02 ff e0
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 6b 3d
+ 15 01 00 00 00 00 02 6c 3d
+ 15 01 00 00 00 00 02 6d 3d
+ 15 01 00 00 00 00 02 6e 3d
+ 15 01 00 00 00 00 02 6f 3d
+ 15 01 00 00 00 00 02 35 02
+ 15 01 00 00 00 00 02 36 72
+ 15 01 00 00 00 00 02 37 10
+ 15 01 00 00 00 00 02 08 c0
+ 15 01 00 00 00 00 02 ff 10
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 32 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 0a 00 02 28 00
+ 05 01 00 00 3c 00 02 10 00];
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <0>;
@@ -69,7 +69,7 @@
qcom,mdss-dsi-t-clk-pre = <0x2d>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
- qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-dsi-min-refresh-rate = <55>;
qcom,mdss-dsi-max-refresh-rate = <60>;
qcom,mdss-dsi-pan-enable-dynamic-fps;
diff --git a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
index 90df1d0c1ac0..69067f5f1cc7 100644
--- a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
+++ b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
@@ -13,6 +13,7 @@
qcom,ascent_3450mah {
/* #Ascent_860_82209_0000_3450mAh_averaged_MasterSlave_Jul20th2016*/
qcom,max-voltage-uv = <4350000>;
+ qcom,fg-cc-cv-threshold-mv = <4340>;
qcom,nom-batt-capacity-mah = <3450>;
qcom,batt-id-kohm = <60>;
qcom,battery-beta = <3435>;
diff --git a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
index 2c1edde56d6a..c3f23b75fa9c 100644
--- a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
+++ b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
@@ -13,6 +13,7 @@
qcom,itech_3000mah {
/* #Itech_B00826LF_3000mAh_ver1660_averaged_MasterSlave_Jul20th2016*/
qcom,max-voltage-uv = <4350000>;
+ qcom,fg-cc-cv-threshold-mv = <4340>;
qcom,nom-batt-capacity-mah = <3000>;
qcom,batt-id-kohm = <100>;
qcom,battery-beta = <3450>;
diff --git a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
index 4f76276b2790..28d230dfb6bf 100644
--- a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
@@ -311,6 +311,7 @@
#address-cells = <1>;
#size-cells = <0>;
#io-channel-cells = <1>;
+ qcom,pmic-revid = <&pmicobalt_revid>;
};
pmicobalt_fg: qpnp,fg {
@@ -386,6 +387,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <1>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <0>;
@@ -399,6 +401,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <2>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <1>;
@@ -412,6 +415,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <3>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <2>;
@@ -424,6 +428,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <4>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <3>;
@@ -436,6 +441,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <5>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <4>;
@@ -448,6 +454,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <6>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <5>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
index 085ca0187ee6..fcceac6e2469 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
@@ -21,6 +21,8 @@
qca,bt-vdd-pa-supply = <&pmcobalt_l17_pin_ctrl>;
qca,bt-vdd-ldo-supply = <&pmcobalt_l25_pin_ctrl>;
qca,bt-chip-pwd-supply = <&pmicobalt_bob_pin1>;
+ clocks = <&clock_gcc clk_rf_clk2>;
+ clock-names = "rf_clk2";
qca,bt-vdd-io-voltage-level = <1352000 1352000>;
qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
index d973bc5ed84f..0278cbde90ce 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
@@ -82,6 +82,7 @@
qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,cmd-sync-wait-broadcast;
qcom,esd-check-enabled;
qcom,mdss-dsi-panel-status-check-mode = "bta_check";
};
@@ -90,6 +91,7 @@
qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,cmd-sync-wait-broadcast;
};
&dsi_dual_nt35597_truly_video {
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
index 99402e3033ed..f9bb6e512d33 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
@@ -22,6 +22,8 @@
qca,bt-vdd-pa-supply = <&pmcobalt_l17_pin_ctrl>;
qca,bt-vdd-ldo-supply = <&pmcobalt_l25_pin_ctrl>;
qca,bt-chip-pwd-supply = <&pmicobalt_bob_pin1>;
+ clocks = <&clock_gcc clk_rf_clk2>;
+ clock-names = "rf_clk2";
qca,bt-vdd-io-voltage-level = <1352000 1352000>;
qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi
index 6018124caf68..a20d80fda72b 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi
@@ -24,7 +24,7 @@
qcom,vctl-port = <0x0>;
qcom,phase-port = <0x1>;
qcom,saw2-avs-ctl = <0x1010031>;
- qcom,saw2-avs-limit = <0x4000208>;
+ qcom,saw2-avs-limit = <0x4580458>;
qcom,pfm-port = <0x2>;
};
@@ -40,7 +40,7 @@
qcom,vctl-port = <0x0>;
qcom,phase-port = <0x1>;
qcom,saw2-avs-ctl = <0x1010031>;
- qcom,saw2-avs-limit = <0x4000208>;
+ qcom,saw2-avs-limit = <0x4580458>;
qcom,pfm-port = <0x2>;
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
index c2d45ec3ef07..2a61cccad273 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
@@ -537,6 +537,28 @@
qcom,enable-time = <500>;
};
};
+
+ qcom,pmcobalt@1 {
+ pmcobalt_s10: regulator@2f00 {
+ compatible = "qcom,qpnp-regulator";
+ reg = <0x2f00 0x100>;
+ regulator-name = "pmcobalt_s10";
+ regulator-min-microvolt = <572000>;
+ regulator-max-microvolt = <1112000>;
+ qcom,enable-time = <500>;
+ regulator-always-on;
+ };
+
+ pmcobalt_s13: regulator@3800 {
+ compatible = "qcom,qpnp-regulator";
+ reg = <0x3800 0x100>;
+ regulator-name = "pmcobalt_s13";
+ regulator-min-microvolt = <572000>;
+ regulator-max-microvolt = <1112000>;
+ qcom,enable-time = <500>;
+ regulator-always-on;
+ };
+ };
};
/* Stub regulators */
@@ -590,6 +612,9 @@
qcom,cpr-panic-reg-name-list =
"PWR_CPRH_STATUS", "APCLUS0_L2_SAW4_PMIC_STS";
+ qcom,cpr-aging-ref-voltage = <1112000>;
+ vdd-supply = <&pmcobalt_s10>;
+
thread@0 {
qcom,cpr-thread-id = <0>;
qcom,cpr-consecutive-up = <0>;
@@ -712,6 +737,13 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-max-voltage-adjustment = <15000>;
+ qcom,cpr-aging-ref-corner = <22>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
+ qcom,allow-aging-open-loop-voltage-adjustment =
+ <1>;
};
};
};
@@ -752,6 +784,9 @@
qcom,cpr-panic-reg-name-list =
"PERF_CPRH_STATUS", "APCLUS1_L2_SAW4_PMIC_STS";
+ qcom,cpr-aging-ref-voltage = <1112000>;
+ vdd-supply = <&pmcobalt_s13>;
+
thread@0 {
qcom,cpr-thread-id = <0>;
qcom,cpr-consecutive-up = <0>;
@@ -894,6 +929,13 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-max-voltage-adjustment = <15000>;
+ qcom,cpr-aging-ref-corner = <25>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
+ qcom,allow-aging-open-loop-voltage-adjustment =
+ <1>;
};
};
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
index 74aae4051ad6..9b791d6b7fb0 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
@@ -227,9 +227,20 @@
qcom,max-bandwidth-per-pipe-kbps = <4700000>;
};
+&pmcobalt_s10 {
+ regulator-min-microvolt = <568000>;
+ regulator-max-microvolt = <1056000>;
+};
+
+&pmcobalt_s13 {
+ regulator-min-microvolt = <568000>;
+ regulator-max-microvolt = <1056000>;
+};
+
&apc0_cpr {
compatible = "qcom,cprh-msmcobalt-v2-kbss-regulator";
qcom,cpr-corner-switch-delay-time = <1042>;
+ qcom,cpr-aging-ref-voltage = <1056000>;
};
&apc0_pwrcl_vreg {
@@ -371,11 +382,16 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-ref-corner = <22 22>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
};
&apc1_cpr {
compatible = "qcom,cprh-msmcobalt-v2-kbss-regulator";
qcom,cpr-corner-switch-delay-time = <1042>;
+ qcom,cpr-aging-ref-voltage = <1056000>;
};
&apc1_perfcl_vreg {
@@ -527,6 +543,10 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-ref-corner = <30 26>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
};
&pm8005_s1 {
@@ -693,6 +713,18 @@
};
+&soc {
+ /* Gold L2 SAW */
+ qcom,spm@178120000 {
+ qcom,saw2-avs-limit = <0x4200420>;
+ };
+
+ /* Silver L2 SAW */
+ qcom,spm@179120000 {
+ qcom,saw2-avs-limit = <0x4200420>;
+ };
+};
+
/* GPU overrides */
&msm_gpu {
/* Updated chip ID */
diff --git a/arch/arm64/configs/msmfalcon_defconfig b/arch/arm64/configs/msmfalcon_defconfig
index 34f0da3c37a4..348c34a94119 100644
--- a/arch/arm64/configs/msmfalcon_defconfig
+++ b/arch/arm64/configs/msmfalcon_defconfig
@@ -519,6 +519,7 @@ CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_IRQ_HELPER=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
CONFIG_MSM_GLADIATOR_ERP_V2=y
CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y
CONFIG_MSM_GLADIATOR_HANG_DETECT=y
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 13116f010e89..63dc23387133 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -2275,7 +2275,6 @@ static int fastrpc_cb_probe(struct device *dev)
const char *name;
unsigned int start = 0x80000000;
int err = 0, i;
- int disable_htw = 1;
int secure_vmid = VMID_CP_PIXEL;
VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
@@ -2311,9 +2310,6 @@ static int fastrpc_cb_probe(struct device *dev)
start, 0x7fffffff)));
if (err)
goto bail;
- iommu_domain_set_attr(sess->smmu.mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
iommu_set_fault_handler(sess->smmu.mapping->domain,
fastrpc_smmu_fault_handler, sess);
if (sess->smmu.secure)
@@ -2341,7 +2337,6 @@ static int fastrpc_cb_legacy_probe(struct device *dev)
unsigned int *range = 0, range_size = 0;
unsigned int *sids = 0, sids_size = 0;
int err = 0, ret = 0, i;
- int disable_htw = 1;
VERIFY(err, 0 != (domains_child_node = of_get_child_by_name(
dev->of_node,
@@ -2395,9 +2390,6 @@ static int fastrpc_cb_legacy_probe(struct device *dev)
range[0], range[1])));
if (err)
goto bail;
- iommu_domain_set_attr(first_sess->smmu.mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
VERIFY(err, !arm_iommu_attach_device(first_sess->dev,
first_sess->smmu.mapping));
if (err)
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
index 9a080e4ee39b..a574a9cd2b5a 100644
--- a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
+++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
@@ -18,6 +18,7 @@
#include <linux/iopoll.h>
#include <linux/delay.h>
#include <linux/clk/msm-clock-generic.h>
+#include <linux/usb/usbpd.h>
#include "mdss-pll.h"
#include "mdss-dp-pll.h"
@@ -172,9 +173,27 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
{
u32 res = 0;
struct mdss_pll_resources *dp_res = vco->priv;
+ u8 orientation, ln_cnt;
+ u32 spare_value;
+
+ spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+ ln_cnt = spare_value & 0x0F;
+ orientation = (spare_value & 0xF0) >> 4;
+ pr_debug("%s: spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
+ __func__, spare_value, ln_cnt, orientation);
+
+ if (ln_cnt != 4) {
+ if (orientation == ORIENTATION_CC2)
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_PD_CTL, 0x2d);
+ else
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_PD_CTL, 0x35);
+ } else {
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_PD_CTL, 0x3d);
+ }
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_PD_CTL, 0x3d);
/* Make sure the PHY register writes are done */
wmb();
MDSS_PLL_REG_W(dp_res->pll_base,
@@ -314,8 +333,13 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
/* Make sure the PLL register writes are done */
wmb();
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_MODE, 0x58);
+ if (orientation == ORIENTATION_CC2)
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_MODE, 0x48);
+ else
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_MODE, 0x58);
+
MDSS_PLL_REG_W(dp_res->phy_base,
DP_PHY_TX0_TX1_LANE_CTL, 0x05);
MDSS_PLL_REG_W(dp_res->phy_base,
@@ -427,6 +451,12 @@ static int dp_pll_enable(struct clk *c)
u32 status;
struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
struct mdss_pll_resources *dp_res = vco->priv;
+ u8 orientation, ln_cnt;
+ u32 spare_value, bias_en, drvr_en;
+
+ spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+ ln_cnt = spare_value & 0x0F;
+ orientation = (spare_value & 0xF0) >> 4;
MDSS_PLL_REG_W(dp_res->phy_base,
DP_PHY_CFG, 0x01);
@@ -474,18 +504,45 @@ static int dp_pll_enable(struct clk *c)
pr_debug("%s: PLL is locked\n", __func__);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ if (ln_cnt == 1) {
+ bias_en = 0x3e;
+ drvr_en = 0x13;
+ } else {
+ bias_en = 0x3f;
+ drvr_en = 0x10;
+ }
+
+ if (ln_cnt != 4) {
+ if (orientation == ORIENTATION_CC1) {
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x3f);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
- 0x10);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ drvr_en);
+ } else {
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x3f);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
+ drvr_en);
+ }
+ } else {
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
- 0x10);
+ drvr_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
+ drvr_en);
+ }
+
MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_TX_POL_INV,
0x0a);
@@ -615,7 +672,7 @@ int dp_vco_prepare(struct clk *c)
rc = dp_pll_enable(c);
if (rc) {
mdss_pll_resource_enable(dp_pll_res, false);
- pr_err("ndx=%d failed to enable dsi pll\n",
+ pr_err("ndx=%d failed to enable dp pll\n",
dp_pll_res->index);
goto error;
}
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
index d89545b38e64..28f21ed1fe0d 100644
--- a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
+++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
@@ -41,6 +41,7 @@
#define DP_PHY_TX0_TX1_LANE_CTL 0x0068
#define DP_PHY_TX2_TX3_LANE_CTL 0x0084
+#define DP_PHY_SPARE0 0x00A8
#define DP_PHY_STATUS 0x00BC
/* Tx registers */
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index ced95aa2b649..37e504381313 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -85,8 +85,6 @@ struct lpm_debug {
struct lpm_cluster *lpm_root_node;
-#define MAXSAMPLES 5
-
static bool lpm_prediction;
module_param_named(lpm_prediction,
lpm_prediction, bool, S_IRUGO | S_IWUSR | S_IWGRP);
@@ -108,6 +106,7 @@ struct lpm_history {
uint32_t hptr;
uint32_t hinvalid;
uint32_t htmr_wkup;
+ int64_t stime;
};
static DEFINE_PER_CPU(struct lpm_history, hist);
@@ -359,9 +358,6 @@ static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
static void histtimer_cancel(void)
{
- if (!lpm_prediction)
- return;
-
hrtimer_try_to_cancel(&histtimer);
}
@@ -383,6 +379,51 @@ static void histtimer_start(uint32_t time_us)
hrtimer_start(&histtimer, hist_ktime, HRTIMER_MODE_REL_PINNED);
}
+static void cluster_timer_init(struct lpm_cluster *cluster)
+{
+ struct list_head *list;
+
+ if (!cluster)
+ return;
+
+ hrtimer_init(&cluster->histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ list_for_each(list, &cluster->child) {
+ struct lpm_cluster *n;
+
+ n = list_entry(list, typeof(*n), list);
+ cluster_timer_init(n);
+ }
+}
+
+static void clusttimer_cancel(void)
+{
+ int cpu = raw_smp_processor_id();
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+
+ hrtimer_try_to_cancel(&cluster->histtimer);
+ hrtimer_try_to_cancel(&cluster->parent->histtimer);
+}
+
+static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
+{
+ struct lpm_cluster *cluster = container_of(h,
+ struct lpm_cluster, histtimer);
+
+ cluster->history.hinvalid = 1;
+ return HRTIMER_NORESTART;
+}
+
+static void clusttimer_start(struct lpm_cluster *cluster, uint32_t time_us)
+{
+ uint64_t time_ns = time_us * NSEC_PER_USEC;
+ ktime_t clust_ktime = ns_to_ktime(time_ns);
+
+ cluster->histtimer.function = clusttimer_fn;
+ hrtimer_start(&cluster->histtimer, clust_ktime,
+ HRTIMER_MODE_REL_PINNED);
+}
+
static void msm_pm_set_timer(uint32_t modified_time_us)
{
u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
@@ -492,14 +533,17 @@ static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
if (history->hinvalid) {
history->hinvalid = 0;
history->htmr_wkup = 1;
+ history->stime = 0;
return 0;
}
/*
* Predict only when all the samples are collected.
*/
- if (history->nsamp < MAXSAMPLES)
+ if (history->nsamp < MAXSAMPLES) {
+ history->stime = 0;
return 0;
+ }
/*
* Check if the samples are not much deviated, if so use the
@@ -540,6 +584,7 @@ again:
*/
if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
|| stddev <= ref_stddev) {
+ history->stime = ktime_to_us(ktime_get()) + avg;
return avg;
} else if (divisor > (MAXSAMPLES - 1)) {
thresh = max - 1;
@@ -567,6 +612,8 @@ again:
*idx_restrict = j;
do_div(total, failed);
*idx_restrict_time = total;
+ history->stime = ktime_to_us(ktime_get())
+ + *idx_restrict_time;
break;
}
}
@@ -584,6 +631,7 @@ static inline void invalidate_predict_history(struct cpuidle_device *dev)
if (history->hinvalid) {
history->hinvalid = 0;
history->htmr_wkup = 1;
+ history->stime = 0;
}
}
@@ -603,6 +651,7 @@ static void clear_predict_history(void)
history->mode[i] = -1;
history->hptr = 0;
history->nsamp = 0;
+ history->stime = 0;
}
}
}
@@ -724,12 +773,14 @@ static int cpu_power_select(struct cpuidle_device *dev,
}
static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
- struct cpumask *mask, bool from_idle)
+ struct cpumask *mask, bool from_idle, uint32_t *pred_time)
{
int cpu;
int next_cpu = raw_smp_processor_id();
ktime_t next_event;
struct cpumask online_cpus_in_cluster;
+ struct lpm_history *history;
+ int64_t prediction = LONG_MAX;
next_event.tv64 = KTIME_MAX;
if (!suspend_wake_time)
@@ -754,11 +805,21 @@ static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
next_event.tv64 = next_event_c->tv64;
next_cpu = cpu;
}
+
+ if (from_idle && lpm_prediction) {
+ history = &per_cpu(hist, cpu);
+ if (history->stime && (history->stime < prediction))
+ prediction = history->stime;
+ }
}
if (mask)
cpumask_copy(mask, cpumask_of(next_cpu));
+ if (from_idle && lpm_prediction) {
+ if (prediction > ktime_to_us(ktime_get()))
+ *pred_time = prediction - ktime_to_us(ktime_get());
+ }
if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
return ktime_to_us(ktime_sub(next_event, ktime_get()));
@@ -766,18 +827,193 @@ static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
return 0;
}
-static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
+static int cluster_predict(struct lpm_cluster *cluster,
+ uint32_t *pred_us)
+{
+ int i, j;
+ int ret = 0;
+ struct cluster_history *history = &cluster->history;
+ int64_t cur_time = ktime_to_us(ktime_get());
+
+ if (!lpm_prediction)
+ return 0;
+
+ if (history->hinvalid) {
+ history->hinvalid = 0;
+ history->htmr_wkup = 1;
+ history->flag = 0;
+ return ret;
+ }
+
+ if (history->nsamp == MAXSAMPLES) {
+ for (i = 0; i < MAXSAMPLES; i++) {
+ if ((cur_time - history->stime[i])
+ > CLUST_SMPL_INVLD_TIME)
+ history->nsamp--;
+ }
+ }
+
+ if (history->nsamp < MAXSAMPLES) {
+ history->flag = 0;
+ return ret;
+ }
+
+ if (history->flag == 2)
+ history->flag = 0;
+
+ if (history->htmr_wkup != 1) {
+ uint64_t total = 0;
+
+ if (history->flag == 1) {
+ for (i = 0; i < MAXSAMPLES; i++)
+ total += history->resi[i];
+ do_div(total, MAXSAMPLES);
+ *pred_us = total;
+ return 2;
+ }
+
+ for (j = 1; j < cluster->nlevels; j++) {
+ uint32_t failed = 0;
+
+ total = 0;
+ for (i = 0; i < MAXSAMPLES; i++) {
+ if ((history->mode[i] == j) && (history->resi[i]
+ < cluster->levels[j].pwr.min_residency)) {
+ failed++;
+ total += history->resi[i];
+ }
+ }
+
+ if (failed > (MAXSAMPLES-2)) {
+ do_div(total, failed);
+ *pred_us = total;
+ history->flag = 1;
+ return 1;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static void update_cluster_history_time(struct cluster_history *history,
+ int idx, uint64_t start)
+{
+ history->entry_idx = idx;
+ history->entry_time = start;
+}
+
+static void update_cluster_history(struct cluster_history *history, int idx)
+{
+ uint32_t tmr = 0;
+ uint32_t residency = 0;
+ struct lpm_cluster *cluster =
+ container_of(history, struct lpm_cluster, history);
+
+ if (!lpm_prediction)
+ return;
+
+ if ((history->entry_idx == -1) || (history->entry_idx == idx)) {
+ residency = ktime_to_us(ktime_get()) - history->entry_time;
+ history->stime[history->hptr] = history->entry_time;
+ } else
+ return;
+
+ if (history->htmr_wkup) {
+ if (!history->hptr)
+ history->hptr = MAXSAMPLES-1;
+ else
+ history->hptr--;
+
+ history->resi[history->hptr] += residency;
+
+ history->htmr_wkup = 0;
+ tmr = 1;
+ } else {
+ history->resi[history->hptr] = residency;
+ }
+
+ history->mode[history->hptr] = idx;
+
+ history->entry_idx = INT_MIN;
+ history->entry_time = 0;
+
+ if (history->nsamp < MAXSAMPLES)
+ history->nsamp++;
+
+ trace_cluster_pred_hist(cluster->cluster_name,
+ history->mode[history->hptr], history->resi[history->hptr],
+ history->hptr, tmr);
+
+ (history->hptr)++;
+
+ if (history->hptr >= MAXSAMPLES)
+ history->hptr = 0;
+}
+
+static void clear_cl_history_each(struct cluster_history *history)
+{
+ int i;
+
+ for (i = 0; i < MAXSAMPLES; i++) {
+ history->resi[i] = 0;
+ history->mode[i] = -1;
+ history->stime[i] = 0;
+ }
+ history->hptr = 0;
+ history->nsamp = 0;
+ history->flag = 0;
+ history->hinvalid = 0;
+ history->htmr_wkup = 0;
+}
+
+static void clear_cl_predict_history(void)
+{
+ struct lpm_cluster *cluster = lpm_root_node;
+ struct list_head *list;
+
+ if (!lpm_prediction)
+ return;
+
+ clear_cl_history_each(&cluster->history);
+
+ list_for_each(list, &cluster->child) {
+ struct lpm_cluster *n;
+
+ n = list_entry(list, typeof(*n), list);
+ clear_cl_history_each(&n->history);
+ }
+}
+
+static int cluster_select(struct lpm_cluster *cluster, bool from_idle,
+ int *ispred)
{
int best_level = -1;
int i;
struct cpumask mask;
uint32_t latency_us = ~0U;
uint32_t sleep_us;
+ uint32_t cpupred_us = 0, pred_us = 0;
+ int pred_mode = 0, predicted = 0;
if (!cluster)
return -EINVAL;
- sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL, from_idle);
+ sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL,
+ from_idle, &cpupred_us);
+
+ if (from_idle) {
+ pred_mode = cluster_predict(cluster, &pred_us);
+
+ if (cpupred_us && pred_mode && (cpupred_us < pred_us))
+ pred_us = cpupred_us;
+
+ if (pred_us && pred_mode && (pred_us < sleep_us))
+ predicted = 1;
+
+ if (predicted && (pred_us == cpupred_us))
+ predicted = 2;
+ }
if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
@@ -823,10 +1059,19 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
best_level = i;
- if (sleep_us <= pwr_params->max_residency)
+ if (predicted ? (pred_us <= pwr_params->max_residency)
+ : (sleep_us <= pwr_params->max_residency))
break;
}
+ if ((best_level == (cluster->nlevels - 1)) && (pred_mode == 2))
+ cluster->history.flag = 2;
+
+ *ispred = predicted;
+
+ trace_cluster_pred_select(cluster->cluster_name, best_level, sleep_us,
+ latency_us, predicted, pred_us);
+
return best_level;
}
@@ -840,7 +1085,7 @@ static void cluster_notify(struct lpm_cluster *cluster,
}
static int cluster_configure(struct lpm_cluster *cluster, int idx,
- bool from_idle)
+ bool from_idle, int predicted)
{
struct lpm_cluster_level *level = &cluster->levels[idx];
int ret, i;
@@ -858,6 +1103,10 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
lpm_stats_cluster_enter(cluster->stats, idx);
+
+ if (from_idle && lpm_prediction)
+ update_cluster_history_time(&cluster->history, idx,
+ ktime_to_us(ktime_get()));
}
for (i = 0; i < cluster->ndevices; i++) {
@@ -869,8 +1118,10 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
if (level->notify_rpm) {
struct cpumask nextcpu, *cpumask;
uint64_t us;
+ uint32_t pred_us;
- us = get_cluster_sleep_time(cluster, &nextcpu, from_idle);
+ us = get_cluster_sleep_time(cluster, &nextcpu,
+ from_idle, &pred_us);
cpumask = level->disable_dynamic_routing ? NULL : &nextcpu;
ret = msm_rpm_enter_sleep(0, cpumask);
@@ -881,6 +1132,8 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
us = us + 1;
clear_predict_history();
+ clear_cl_predict_history();
+
do_div(us, USEC_PER_SEC/SCLK_HZ);
msm_mpm_enter_sleep(us, from_idle, cpumask);
}
@@ -891,6 +1144,15 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
sched_set_cluster_dstate(&cluster->child_cpus, idx, 0, 0);
cluster->last_level = idx;
+
+ if (predicted && (idx < (cluster->nlevels - 1))) {
+ struct power_params *pwr_params = &cluster->levels[idx].pwr;
+
+ tick_broadcast_exit();
+ clusttimer_start(cluster, pwr_params->max_residency + tmr_add);
+ tick_broadcast_enter();
+ }
+
return 0;
failed_set_mode:
@@ -909,6 +1171,7 @@ static void cluster_prepare(struct lpm_cluster *cluster,
int64_t start_time)
{
int i;
+ int predicted = 0;
if (!cluster)
return;
@@ -939,12 +1202,28 @@ static void cluster_prepare(struct lpm_cluster *cluster,
&cluster->child_cpus))
goto failed;
- i = cluster_select(cluster, from_idle);
+ i = cluster_select(cluster, from_idle, &predicted);
+
+ if (((i < 0) || (i == cluster->default_level))
+ && predicted && from_idle) {
+ update_cluster_history_time(&cluster->history,
+ -1, ktime_to_us(ktime_get()));
+
+ if (i < 0) {
+ struct power_params *pwr_params =
+ &cluster->levels[0].pwr;
+
+ tick_broadcast_exit();
+ clusttimer_start(cluster,
+ pwr_params->max_residency + tmr_add);
+ tick_broadcast_enter();
+ }
+ }
if (i < 0)
goto failed;
- if (cluster_configure(cluster, i, from_idle))
+ if (cluster_configure(cluster, i, from_idle, predicted))
goto failed;
cluster->stats->sleep_time = start_time;
@@ -988,6 +1267,10 @@ static void cluster_unprepare(struct lpm_cluster *cluster,
&lvl->num_cpu_votes, cpu);
}
+ if (from_idle && first_cpu &&
+ (cluster->last_level == cluster->default_level))
+ update_cluster_history(&cluster->history, cluster->last_level);
+
if (!first_cpu || cluster->last_level == cluster->default_level)
goto unlock_return;
@@ -1029,6 +1312,10 @@ static void cluster_unprepare(struct lpm_cluster *cluster,
sched_set_cluster_dstate(&cluster->child_cpus, 0, 0, 0);
cluster_notify(cluster, &cluster->levels[last_level], false);
+
+ if (from_idle)
+ update_cluster_history(&cluster->history, last_level);
+
cluster_unprepare(cluster->parent, &cluster->child_cpus,
last_level, from_idle, end_time);
unlock_return:
@@ -1288,7 +1575,10 @@ exit:
update_history(dev, idx);
trace_cpu_idle_exit(idx, success);
local_irq_enable();
- histtimer_cancel();
+ if (lpm_prediction) {
+ histtimer_cancel();
+ clusttimer_cancel();
+ }
return idx;
}
@@ -1561,6 +1851,7 @@ static int lpm_probe(struct platform_device *pdev)
suspend_set_ops(&lpm_suspend_ops);
hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cluster_timer_init(lpm_root_node);
ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
if (ret) {
diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h
index 63fe0a0fbc08..3c9665ea8981 100644
--- a/drivers/cpuidle/lpm-levels.h
+++ b/drivers/cpuidle/lpm-levels.h
@@ -14,6 +14,8 @@
#include <soc/qcom/spm.h>
#define NR_LPM_LEVELS 8
+#define MAXSAMPLES 5
+#define CLUST_SMPL_INVLD_TIME 40000
extern bool use_psci;
@@ -85,6 +87,19 @@ struct low_power_ops {
enum msm_pm_l2_scm_flag tz_flag;
};
+struct cluster_history {
+ uint32_t resi[MAXSAMPLES];
+ int mode[MAXSAMPLES];
+ int64_t stime[MAXSAMPLES];
+ uint32_t hptr;
+ uint32_t hinvalid;
+ uint32_t htmr_wkup;
+ uint64_t entry_time;
+ int entry_idx;
+ int nsamp;
+ int flag;
+};
+
struct lpm_cluster {
struct list_head list;
struct list_head child;
@@ -109,6 +124,8 @@ struct lpm_cluster {
unsigned int psci_mode_shift;
unsigned int psci_mode_mask;
bool no_saw_devices;
+ struct cluster_history history;
+ struct hrtimer histtimer;
};
int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index 90aee3cad5ad..625a2640b4c4 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -3,7 +3,7 @@ ccflags-y := -Idrivers/staging/android
msm_kgsl_core-y = \
kgsl.o \
kgsl_trace.o \
- kgsl_cmdbatch.o \
+ kgsl_drawobj.o \
kgsl_ioctl.o \
kgsl_sharedmem.o \
kgsl_pwrctrl.o \
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 1356835d0e93..e9d16426d4a5 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -40,6 +40,7 @@
/* Include the master list of GPU cores that are supported */
#include "adreno-gpulist.h"
+#include "adreno_dispatch.h"
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "adreno."
@@ -1015,8 +1016,8 @@ static void _adreno_free_memories(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv))
- kgsl_free_global(device, &adreno_dev->cmdbatch_profile_buffer);
+ if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv))
+ kgsl_free_global(device, &adreno_dev->profile_buffer);
/* Free local copies of firmware and other command streams */
kfree(adreno_dev->pfp_fw);
@@ -1187,22 +1188,22 @@ static int adreno_init(struct kgsl_device *device)
}
/*
- * Allocate a small chunk of memory for precise cmdbatch profiling for
+ * Allocate a small chunk of memory for precise drawobj profiling for
* those targets that have the always on timer
*/
if (!adreno_is_a3xx(adreno_dev)) {
int r = kgsl_allocate_global(device,
- &adreno_dev->cmdbatch_profile_buffer, PAGE_SIZE,
+ &adreno_dev->profile_buffer, PAGE_SIZE,
0, 0, "alwayson");
- adreno_dev->cmdbatch_profile_index = 0;
+ adreno_dev->profile_index = 0;
if (r == 0) {
- set_bit(ADRENO_DEVICE_CMDBATCH_PROFILE,
+ set_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE,
&adreno_dev->priv);
kgsl_sharedmem_set(device,
- &adreno_dev->cmdbatch_profile_buffer, 0, 0,
+ &adreno_dev->profile_buffer, 0, 0,
PAGE_SIZE);
}
@@ -1653,14 +1654,9 @@ static inline bool adreno_try_soft_reset(struct kgsl_device *device, int fault)
int adreno_reset(struct kgsl_device *device, int fault)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
int ret = -EINVAL;
int i = 0;
- /* broadcast to HW - reset is coming */
- if (gpudev->pre_reset)
- gpudev->pre_reset(adreno_dev);
-
/* Try soft reset first */
if (adreno_try_soft_reset(device, fault)) {
/* Make sure VBIF is cleared before resetting */
@@ -2340,12 +2336,12 @@ int adreno_idle(struct kgsl_device *device)
* adreno_drain() - Drain the dispatch queue
* @device: Pointer to the KGSL device structure for the GPU
*
- * Drain the dispatcher of existing command batches. This halts
+ * Drain the dispatcher of existing drawobjs. This halts
* additional commands from being issued until the gate is completed.
*/
static int adreno_drain(struct kgsl_device *device)
{
- reinit_completion(&device->cmdbatch_gate);
+ reinit_completion(&device->halt_gate);
return 0;
}
@@ -2825,7 +2821,7 @@ static const struct kgsl_functable adreno_functable = {
.getproperty_compat = adreno_getproperty_compat,
.waittimestamp = adreno_waittimestamp,
.readtimestamp = adreno_readtimestamp,
- .issueibcmds = adreno_ringbuffer_issueibcmds,
+ .queue_cmds = adreno_dispatcher_queue_cmds,
.ioctl = adreno_ioctl,
.compat_ioctl = adreno_compat_ioctl,
.power_stats = adreno_power_stats,
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index d4858f3f818e..295a3d80d476 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -76,13 +76,13 @@
KGSL_CONTEXT_PREEMPT_STYLE_SHIFT)
/*
- * return the dispatcher cmdqueue in which the given cmdbatch should
+ * return the dispatcher drawqueue in which the given drawobj should
* be submitted
*/
-#define ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(c) \
+#define ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(c) \
(&((ADRENO_CONTEXT(c->context))->rb->dispatch_q))
-#define ADRENO_CMDBATCH_RB(c) \
+#define ADRENO_DRAWOBJ_RB(c) \
((ADRENO_CONTEXT(c->context))->rb)
/* Adreno core features */
@@ -346,8 +346,8 @@ struct adreno_gpu_core {
* @halt: Atomic variable to check whether the GPU is currently halted
* @ctx_d_debugfs: Context debugfs node
* @pwrctrl_flag: Flag to hold adreno specific power attributes
- * @cmdbatch_profile_buffer: Memdesc holding the cmdbatch profiling buffer
- * @cmdbatch_profile_index: Index to store the start/stop ticks in the profiling
+ * @profile_buffer: Memdesc holding the drawobj profiling buffer
+ * @profile_index: Index to store the start/stop ticks in the profiling
* buffer
* @sp_local_gpuaddr: Base GPU virtual address for SP local memory
* @sp_pvt_gpuaddr: Base GPU virtual address for SP private memory
@@ -404,8 +404,8 @@ struct adreno_device {
struct dentry *ctx_d_debugfs;
unsigned long pwrctrl_flag;
- struct kgsl_memdesc cmdbatch_profile_buffer;
- unsigned int cmdbatch_profile_index;
+ struct kgsl_memdesc profile_buffer;
+ unsigned int profile_index;
uint64_t sp_local_gpuaddr;
uint64_t sp_pvt_gpuaddr;
const struct firmware *lm_fw;
@@ -441,7 +441,7 @@ struct adreno_device {
* @ADRENO_DEVICE_STARTED - Set if the device start sequence is in progress
* @ADRENO_DEVICE_FAULT - Set if the device is currently in fault (and shouldn't
* send any more commands to the ringbuffer)
- * @ADRENO_DEVICE_CMDBATCH_PROFILE - Set if the device supports command batch
+ * @ADRENO_DEVICE_DRAWOBJ_PROFILE - Set if the device supports drawobj
* profiling via the ALWAYSON counter
* @ADRENO_DEVICE_PREEMPTION - Turn on/off preemption
* @ADRENO_DEVICE_SOFT_FAULT_DETECT - Set if soft fault detect is enabled
@@ -459,7 +459,7 @@ enum adreno_device_flags {
ADRENO_DEVICE_HANG_INTR = 4,
ADRENO_DEVICE_STARTED = 5,
ADRENO_DEVICE_FAULT = 6,
- ADRENO_DEVICE_CMDBATCH_PROFILE = 7,
+ ADRENO_DEVICE_DRAWOBJ_PROFILE = 7,
ADRENO_DEVICE_GPU_REGULATOR_ENABLED = 8,
ADRENO_DEVICE_PREEMPTION = 9,
ADRENO_DEVICE_SOFT_FAULT_DETECT = 10,
@@ -469,22 +469,22 @@ enum adreno_device_flags {
};
/**
- * struct adreno_cmdbatch_profile_entry - a single command batch entry in the
+ * struct adreno_drawobj_profile_entry - a single drawobj entry in the
* kernel profiling buffer
- * @started: Number of GPU ticks at start of the command batch
- * @retired: Number of GPU ticks at the end of the command batch
+ * @started: Number of GPU ticks at start of the drawobj
+ * @retired: Number of GPU ticks at the end of the drawobj
*/
-struct adreno_cmdbatch_profile_entry {
+struct adreno_drawobj_profile_entry {
uint64_t started;
uint64_t retired;
};
-#define ADRENO_CMDBATCH_PROFILE_COUNT \
- (PAGE_SIZE / sizeof(struct adreno_cmdbatch_profile_entry))
+#define ADRENO_DRAWOBJ_PROFILE_COUNT \
+ (PAGE_SIZE / sizeof(struct adreno_drawobj_profile_entry))
-#define ADRENO_CMDBATCH_PROFILE_OFFSET(_index, _member) \
- ((_index) * sizeof(struct adreno_cmdbatch_profile_entry) \
- + offsetof(struct adreno_cmdbatch_profile_entry, _member))
+#define ADRENO_DRAWOBJ_PROFILE_OFFSET(_index, _member) \
+ ((_index) * sizeof(struct adreno_drawobj_profile_entry) \
+ + offsetof(struct adreno_drawobj_profile_entry, _member))
/**
@@ -765,7 +765,6 @@ struct adreno_gpudev {
int (*preemption_init)(struct adreno_device *);
void (*preemption_schedule)(struct adreno_device *);
void (*enable_64bit)(struct adreno_device *);
- void (*pre_reset)(struct adreno_device *);
void (*clk_set_options)(struct adreno_device *,
const char *, struct clk *);
};
@@ -776,7 +775,7 @@ struct adreno_gpudev {
* @KGSL_FT_REPLAY: Replay the faulting command
* @KGSL_FT_SKIPIB: Skip the faulting indirect buffer
* @KGSL_FT_SKIPFRAME: Skip the frame containing the faulting IB
- * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command batch
+ * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command obj
* @KGSL_FT_TEMP_DISABLE: Disables FT for all commands
* @KGSL_FT_THROTTLE: Disable the context if it faults too often
* @KGSL_FT_SKIPCMD: Skip the command containing the faulting IB
@@ -793,7 +792,7 @@ enum kgsl_ft_policy_bits {
/* KGSL_FT_MAX_BITS is used to calculate the mask */
KGSL_FT_MAX_BITS,
/* Internal bits - set during GFT */
- /* Skip the PM dump on replayed command batches */
+ /* Skip the PM dump on replayed command obj's */
KGSL_FT_SKIP_PMDUMP = 31,
};
@@ -882,7 +881,7 @@ int adreno_reset(struct kgsl_device *device, int fault);
void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch);
+ struct kgsl_drawobj *drawobj);
int adreno_coresight_init(struct adreno_device *adreno_dev);
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index d52981d10ff5..e67bb92c0c28 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -1406,105 +1406,10 @@ static void a530_lm_enable(struct adreno_device *adreno_dev)
adreno_is_a530v2(adreno_dev) ? 0x00060011 : 0x00000011);
}
-static bool llm_is_enabled(struct adreno_device *adreno_dev)
-{
- unsigned int r;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- kgsl_regread(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, &r);
- return r & (GPMU_BCL_ENABLED | GPMU_LLM_ENABLED);
-}
-
-
-static void sleep_llm(struct adreno_device *adreno_dev)
-{
- unsigned int r, retry;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- if (!llm_is_enabled(adreno_dev))
- return;
-
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL, &r);
-
- if ((r & STATE_OF_CHILD) == 0) {
- /* If both children are on, sleep CHILD_O1 first */
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- STATE_OF_CHILD, STATE_OF_CHILD_01 | IDLE_FULL_LM_SLEEP);
- /* Wait for IDLE_FULL_ACK before continuing */
- for (retry = 0; retry < 5; retry++) {
- udelay(1);
- kgsl_regread(device,
- A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- if (r & IDLE_FULL_ACK)
- break;
- }
-
- if (retry == 5)
- KGSL_CORE_ERR("GPMU: LLM failed to idle: 0x%X\n", r);
- }
-
- /* Now turn off both children */
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- 0, STATE_OF_CHILD | IDLE_FULL_LM_SLEEP);
-
- /* wait for WAKEUP_ACK to be zero */
- for (retry = 0; retry < 5; retry++) {
- udelay(1);
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- if ((r & WAKEUP_ACK) == 0)
- break;
- }
-
- if (retry == 5)
- KGSL_CORE_ERR("GPMU: LLM failed to sleep: 0x%X\n", r);
-}
-
-static void wake_llm(struct adreno_device *adreno_dev)
-{
- unsigned int r, retry;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- if (!llm_is_enabled(adreno_dev))
- return;
-
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- STATE_OF_CHILD, STATE_OF_CHILD_01);
-
- if (((device->pwrctrl.num_pwrlevels - 2) -
- device->pwrctrl.active_pwrlevel) <= LM_DCVS_LIMIT)
- return;
-
- udelay(1);
-
- /* Turn on all children */
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- STATE_OF_CHILD | IDLE_FULL_LM_SLEEP, 0);
-
- /* Wait for IDLE_FULL_ACK to be zero and WAKEUP_ACK to be set */
- for (retry = 0; retry < 5; retry++) {
- udelay(1);
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- if ((r & (WAKEUP_ACK | IDLE_FULL_ACK)) == WAKEUP_ACK)
- break;
- }
-
- if (retry == 5)
- KGSL_CORE_ERR("GPMU: LLM failed to wake: 0x%X\n", r);
-}
-
-static bool llm_is_awake(struct adreno_device *adreno_dev)
-{
- unsigned int r;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- return r & WAKEUP_ACK;
-}
-
static void a540_lm_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- uint32_t agc_lm_config =
+ uint32_t agc_lm_config = AGC_BCL_DISABLED |
((ADRENO_CHIPID_PATCH(adreno_dev->chipid) & 0x3)
<< AGC_GPU_VERSION_SHIFT);
unsigned int r;
@@ -1518,11 +1423,6 @@ static void a540_lm_init(struct adreno_device *adreno_dev)
AGC_LM_CONFIG_ISENSE_ENABLE;
kgsl_regread(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, &r);
- if (!(r & GPMU_BCL_ENABLED))
- agc_lm_config |= AGC_BCL_DISABLED;
-
- if (r & GPMU_LLM_ENABLED)
- agc_lm_config |= AGC_LLM_ENABLED;
if ((r & GPMU_ISENSE_STATUS) == GPMU_ISENSE_END_POINT_CAL_ERR) {
KGSL_CORE_ERR(
@@ -1551,9 +1451,6 @@ static void a540_lm_init(struct adreno_device *adreno_dev)
kgsl_regwrite(device, A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK,
VOLTAGE_INTR_EN);
-
- if (lm_on(adreno_dev))
- wake_llm(adreno_dev);
}
@@ -1665,14 +1562,6 @@ static void a5xx_enable_64bit(struct adreno_device *adreno_dev)
kgsl_regwrite(device, A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
}
-static void a5xx_pre_reset(struct adreno_device *adreno_dev)
-{
- if (adreno_is_a540(adreno_dev) && lm_on(adreno_dev)) {
- if (llm_is_awake(adreno_dev))
- sleep_llm(adreno_dev);
- }
-}
-
/*
* a5xx_gpmu_reset() - Re-enable GPMU based power features and restart GPMU
* @work: Pointer to the work struct for gpmu reset
@@ -1707,8 +1596,6 @@ static void a5xx_gpmu_reset(struct work_struct *work)
if (a5xx_regulator_enable(adreno_dev))
goto out;
- a5xx_pre_reset(adreno_dev);
-
/* Soft reset of the GPMU block */
kgsl_regwrite(device, A5XX_RBBM_BLOCK_SW_RESET_CMD, BIT(16));
@@ -3542,6 +3429,5 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
.preemption_init = a5xx_preemption_init,
.preemption_schedule = a5xx_preemption_schedule,
.enable_64bit = a5xx_enable_64bit,
- .pre_reset = a5xx_pre_reset,
.clk_set_options = a5xx_clk_set_options,
};
diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c
index 4baee4a5c0b1..09c550c9f58c 100644
--- a/drivers/gpu/msm/adreno_a5xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a5xx_preempt.c
@@ -37,7 +37,7 @@ static void _update_wptr(struct adreno_device *adreno_dev)
rb->wptr);
rb->dispatch_q.expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
+ msecs_to_jiffies(adreno_drawobj_timeout);
}
spin_unlock_irqrestore(&rb->preempt_lock, flags);
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 680827e5b848..fffe08038bcd 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -129,7 +129,7 @@ typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i,
static void sync_event_print(struct seq_file *s,
- struct kgsl_cmdbatch_sync_event *sync_event)
+ struct kgsl_drawobj_sync_event *sync_event)
{
switch (sync_event->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: {
@@ -153,12 +153,12 @@ struct flag_entry {
const char *str;
};
-static const struct flag_entry cmdbatch_flags[] = {KGSL_CMDBATCH_FLAGS};
+static const struct flag_entry drawobj_flags[] = {KGSL_DRAWOBJ_FLAGS};
-static const struct flag_entry cmdbatch_priv[] = {
- { CMDBATCH_FLAG_SKIP, "skip"},
- { CMDBATCH_FLAG_FORCE_PREAMBLE, "force_preamble"},
- { CMDBATCH_FLAG_WFI, "wait_for_idle" },
+static const struct flag_entry cmdobj_priv[] = {
+ { CMDOBJ_SKIP, "skip"},
+ { CMDOBJ_FORCE_PREAMBLE, "force_preamble"},
+ { CMDOBJ_WFI, "wait_for_idle" },
};
static const struct flag_entry context_flags[] = {KGSL_CONTEXT_FLAGS};
@@ -199,42 +199,54 @@ static void print_flags(struct seq_file *s, const struct flag_entry *table,
seq_puts(s, "None");
}
-static void cmdbatch_print(struct seq_file *s, struct kgsl_cmdbatch *cmdbatch)
+static void syncobj_print(struct seq_file *s,
+ struct kgsl_drawobj_sync *syncobj)
{
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
unsigned int i;
- /* print fences first, since they block this cmdbatch */
+ seq_puts(s, " syncobj ");
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ event = &syncobj->synclist[i];
- if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
+ if (!kgsl_drawobj_event_pending(syncobj, i))
continue;
- /*
- * Timestamp is 0 for KGSL_CONTEXT_SYNC, but print it anyways
- * so that it is clear if the fence was a separate submit
- * or part of an IB submit.
- */
- seq_printf(s, "\t%d ", cmdbatch->timestamp);
sync_event_print(s, event);
seq_puts(s, "\n");
}
+}
- /* if this flag is set, there won't be an IB */
- if (cmdbatch->flags & KGSL_CONTEXT_SYNC)
- return;
+static void cmdobj_print(struct seq_file *s,
+ struct kgsl_drawobj_cmd *cmdobj)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- seq_printf(s, "\t%d: ", cmdbatch->timestamp);
+ if (drawobj->type == CMDOBJ_TYPE)
+ seq_puts(s, " cmdobj ");
+ else
+ seq_puts(s, " markerobj ");
- seq_puts(s, " flags: ");
- print_flags(s, cmdbatch_flags, ARRAY_SIZE(cmdbatch_flags),
- cmdbatch->flags);
+ seq_printf(s, "\t %d ", drawobj->timestamp);
seq_puts(s, " priv: ");
- print_flags(s, cmdbatch_priv, ARRAY_SIZE(cmdbatch_priv),
- cmdbatch->priv);
+ print_flags(s, cmdobj_priv, ARRAY_SIZE(cmdobj_priv),
+ cmdobj->priv);
+}
+
+static void drawobj_print(struct seq_file *s,
+ struct kgsl_drawobj *drawobj)
+{
+ if (drawobj->type == SYNCOBJ_TYPE)
+ syncobj_print(s, SYNCOBJ(drawobj));
+ else if ((drawobj->type == CMDOBJ_TYPE) ||
+ (drawobj->type == MARKEROBJ_TYPE))
+ cmdobj_print(s, CMDOBJ(drawobj));
+
+ seq_puts(s, " flags: ");
+ print_flags(s, drawobj_flags, ARRAY_SIZE(drawobj_flags),
+ drawobj->flags);
seq_puts(s, "\n");
}
@@ -285,13 +297,13 @@ static int ctx_print(struct seq_file *s, void *unused)
queued, consumed, retired,
drawctxt->internal_timestamp);
- seq_puts(s, "cmdqueue:\n");
+ seq_puts(s, "drawqueue:\n");
spin_lock(&drawctxt->lock);
- for (i = drawctxt->cmdqueue_head;
- i != drawctxt->cmdqueue_tail;
- i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE))
- cmdbatch_print(s, drawctxt->cmdqueue[i]);
+ for (i = drawctxt->drawqueue_head;
+ i != drawctxt->drawqueue_tail;
+ i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE))
+ drawobj_print(s, drawctxt->drawqueue[i]);
spin_unlock(&drawctxt->lock);
seq_puts(s, "events:\n");
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 522c32743d3d..cb4108b4e1f9 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -25,7 +25,7 @@
#include "adreno_trace.h"
#include "kgsl_sharedmem.h"
-#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
+#define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
/* Time in ms after which the dispatcher tries to schedule an unscheduled RB */
unsigned int adreno_dispatch_starvation_time = 2000;
@@ -43,13 +43,13 @@ unsigned int adreno_dispatch_time_slice = 25;
unsigned int adreno_disp_preempt_fair_sched;
/* Number of commands that can be queued in a context before it sleeps */
-static unsigned int _context_cmdqueue_size = 50;
+static unsigned int _context_drawqueue_size = 50;
/* Number of milliseconds to wait for the context queue to clear */
static unsigned int _context_queue_wait = 10000;
-/* Number of command batches sent at a time from a single context */
-static unsigned int _context_cmdbatch_burst = 5;
+/* Number of drawobjs sent at a time from a single context */
+static unsigned int _context_drawobj_burst = 5;
/*
* GFT throttle parameters. If GFT recovered more than
@@ -73,24 +73,25 @@ static unsigned int _dispatcher_q_inflight_hi = 15;
static unsigned int _dispatcher_q_inflight_lo = 4;
/* Command batch timeout (in milliseconds) */
-unsigned int adreno_cmdbatch_timeout = 2000;
+unsigned int adreno_drawobj_timeout = 2000;
/* Interval for reading and comparing fault detection registers */
static unsigned int _fault_timer_interval = 200;
-#define CMDQUEUE_RB(_cmdqueue) \
+#define DRAWQUEUE_RB(_drawqueue) \
((struct adreno_ringbuffer *) \
- container_of((_cmdqueue), struct adreno_ringbuffer, dispatch_q))
+ container_of((_drawqueue),\
+ struct adreno_ringbuffer, dispatch_q))
-#define CMDQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q)
+#define DRAWQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q)
-static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue);
+static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_drawqueue *drawqueue);
-static inline bool cmdqueue_is_current(
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static inline bool drawqueue_is_current(
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
- struct adreno_ringbuffer *rb = CMDQUEUE_RB(cmdqueue);
+ struct adreno_ringbuffer *rb = DRAWQUEUE_RB(drawqueue);
struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
return (adreno_dev->cur_rb == rb);
@@ -114,7 +115,8 @@ static int __count_context(struct adreno_context *drawctxt, void *data)
return time_after(jiffies, expires) ? 0 : 1;
}
-static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data)
+static int __count_drawqueue_context(struct adreno_context *drawctxt,
+ void *data)
{
unsigned long expires = drawctxt->active_time + msecs_to_jiffies(100);
@@ -122,7 +124,7 @@ static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data)
return 0;
return (&drawctxt->rb->dispatch_q ==
- (struct adreno_dispatcher_cmdqueue *) data) ? 1 : 0;
+ (struct adreno_dispatcher_drawqueue *) data) ? 1 : 0;
}
static int _adreno_count_active_contexts(struct adreno_device *adreno_dev,
@@ -142,7 +144,7 @@ static int _adreno_count_active_contexts(struct adreno_device *adreno_dev,
}
static void _track_context(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue,
+ struct adreno_dispatcher_drawqueue *drawqueue,
struct adreno_context *drawctxt)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -154,9 +156,9 @@ static void _track_context(struct adreno_device *adreno_dev,
device->active_context_count =
_adreno_count_active_contexts(adreno_dev,
__count_context, NULL);
- cmdqueue->active_context_count =
+ drawqueue->active_context_count =
_adreno_count_active_contexts(adreno_dev,
- __count_cmdqueue_context, cmdqueue);
+ __count_drawqueue_context, drawqueue);
spin_unlock(&adreno_dev->active_list_lock);
}
@@ -169,9 +171,9 @@ static void _track_context(struct adreno_device *adreno_dev,
*/
static inline int
-_cmdqueue_inflight(struct adreno_dispatcher_cmdqueue *cmdqueue)
+_drawqueue_inflight(struct adreno_dispatcher_drawqueue *drawqueue)
{
- return (cmdqueue->active_context_count > 1)
+ return (drawqueue->active_context_count > 1)
? _dispatcher_q_inflight_lo : _dispatcher_q_inflight_hi;
}
@@ -271,20 +273,20 @@ static void start_fault_timer(struct adreno_device *adreno_dev)
}
/**
- * _retire_marker() - Retire a marker command batch without sending it to the
- * hardware
- * @cmdbatch: Pointer to the cmdbatch to retire
+ * _retire_timestamp() - Retire object without sending it
+ * to the hardware
+ * @drawobj: Pointer to the object to retire
*
- * In some cases marker commands can be retired by the software without going to
- * the GPU. In those cases, update the memstore from the CPU, kick off the
- * event engine to handle expired events and destroy the command batch.
+ * In some cases ibs can be retired by the software
+ * without going to the GPU. In those cases, update the
+ * memstore from the CPU, kick off the event engine to handle
+ * expired events and destroy the ib.
*/
-static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
+static void _retire_timestamp(struct kgsl_drawobj *drawobj)
{
- struct kgsl_context *context = cmdbatch->context;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_context *context = drawobj->context;
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
struct kgsl_device *device = context->device;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
/*
* Write the start and end timestamp to the memstore to keep the
@@ -292,11 +294,11 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
*/
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
- cmdbatch->timestamp);
+ drawobj->timestamp);
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
- cmdbatch->timestamp);
+ drawobj->timestamp);
/* Retire pending GPU events for the object */
@@ -307,13 +309,13 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
* rptr scratch out address. At this point GPU clocks turned off.
* So avoid reading GPU register directly for A3xx.
*/
- if (adreno_is_a3xx(adreno_dev))
- trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
- 0);
+ if (adreno_is_a3xx(ADRENO_DEVICE(device)))
+ trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb,
+ 0, 0);
else
- trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
- adreno_get_rptr(drawctxt->rb));
- kgsl_cmdbatch_destroy(cmdbatch);
+ trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb,
+ adreno_get_rptr(drawctxt->rb), 0);
+ kgsl_drawobj_destroy(drawobj);
}
static int _check_context_queue(struct adreno_context *drawctxt)
@@ -330,7 +332,7 @@ static int _check_context_queue(struct adreno_context *drawctxt)
if (kgsl_context_invalid(&drawctxt->base))
ret = 1;
else
- ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0;
+ ret = drawctxt->queued < _context_drawqueue_size ? 1 : 0;
spin_unlock(&drawctxt->lock);
@@ -341,176 +343,151 @@ static int _check_context_queue(struct adreno_context *drawctxt)
* return true if this is a marker command and the dependent timestamp has
* retired
*/
-static bool _marker_expired(struct kgsl_cmdbatch *cmdbatch)
-{
- return (cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
- kgsl_check_timestamp(cmdbatch->device, cmdbatch->context,
- cmdbatch->marker_timestamp);
-}
-
-static inline void _pop_cmdbatch(struct adreno_context *drawctxt)
+static bool _marker_expired(struct kgsl_drawobj_cmd *markerobj)
{
- drawctxt->cmdqueue_head = CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
- ADRENO_CONTEXT_CMDQUEUE_SIZE);
- drawctxt->queued--;
-}
-/**
- * Removes all expired marker and sync cmdbatches from
- * the context queue when marker command and dependent
- * timestamp are retired. This function is recursive.
- * returns cmdbatch if context has command, NULL otherwise.
- */
-static struct kgsl_cmdbatch *_expire_markers(struct adreno_context *drawctxt)
-{
- struct kgsl_cmdbatch *cmdbatch;
-
- if (drawctxt->cmdqueue_head == drawctxt->cmdqueue_tail)
- return NULL;
-
- cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];
-
- if (cmdbatch == NULL)
- return NULL;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(markerobj);
- /* Check to see if this is a marker we can skip over */
- if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
- _marker_expired(cmdbatch)) {
- _pop_cmdbatch(drawctxt);
- _retire_marker(cmdbatch);
- return _expire_markers(drawctxt);
- }
-
- if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
- if (!kgsl_cmdbatch_events_pending(cmdbatch)) {
- _pop_cmdbatch(drawctxt);
- kgsl_cmdbatch_destroy(cmdbatch);
- return _expire_markers(drawctxt);
- }
- }
-
- return cmdbatch;
+ return (drawobj->flags & KGSL_DRAWOBJ_MARKER) &&
+ kgsl_check_timestamp(drawobj->device, drawobj->context,
+ markerobj->marker_timestamp);
}
-static void expire_markers(struct adreno_context *drawctxt)
+static inline void _pop_drawobj(struct adreno_context *drawctxt)
{
- spin_lock(&drawctxt->lock);
- _expire_markers(drawctxt);
- spin_unlock(&drawctxt->lock);
+ drawctxt->drawqueue_head = DRAWQUEUE_NEXT(drawctxt->drawqueue_head,
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE);
+ drawctxt->queued--;
}
-static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt)
+static int _retire_markerobj(struct kgsl_drawobj_cmd *cmdobj,
+ struct adreno_context *drawctxt)
{
- struct kgsl_cmdbatch *cmdbatch;
- bool pending = false;
-
- cmdbatch = _expire_markers(drawctxt);
-
- if (cmdbatch == NULL)
- return NULL;
+ if (_marker_expired(cmdobj)) {
+ _pop_drawobj(drawctxt);
+ _retire_timestamp(DRAWOBJ(cmdobj));
+ return 0;
+ }
/*
- * If the marker isn't expired but the SKIP bit is set
- * then there are real commands following this one in
- * the queue. This means that we need to dispatch the
- * command so that we can keep the timestamp accounting
- * correct. If skip isn't set then we block this queue
+ * If the marker isn't expired but the SKIP bit
+ * is set then there are real commands following
+ * this one in the queue. This means that we
+ * need to dispatch the command so that we can
+ * keep the timestamp accounting correct. If
+ * skip isn't set then we block this queue
* until the dependent timestamp expires
*/
- if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
- (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)))
- pending = true;
+ return test_bit(CMDOBJ_SKIP, &cmdobj->priv) ? 1 : -EAGAIN;
+}
- if (kgsl_cmdbatch_events_pending(cmdbatch))
- pending = true;
+static int _retire_syncobj(struct kgsl_drawobj_sync *syncobj,
+ struct adreno_context *drawctxt)
+{
+ if (!kgsl_drawobj_events_pending(syncobj)) {
+ _pop_drawobj(drawctxt);
+ kgsl_drawobj_destroy(DRAWOBJ(syncobj));
+ return 0;
+ }
/*
- * If changes are pending and the canary timer hasn't been
- * started yet, start it
+ * If we got here, there are pending events for sync object.
+ * Start the canary timer if it hasnt been started already.
*/
- if (pending) {
- /*
- * If syncpoints are pending start the canary timer if
- * it hasn't already been started
- */
- if (!cmdbatch->timeout_jiffies) {
- cmdbatch->timeout_jiffies =
- jiffies + msecs_to_jiffies(5000);
- mod_timer(&cmdbatch->timer, cmdbatch->timeout_jiffies);
- }
-
- return ERR_PTR(-EAGAIN);
+ if (!syncobj->timeout_jiffies) {
+ syncobj->timeout_jiffies = jiffies + msecs_to_jiffies(5000);
+ mod_timer(&syncobj->timer, syncobj->timeout_jiffies);
}
- _pop_cmdbatch(drawctxt);
- return cmdbatch;
+ return -EAGAIN;
}
-/**
- * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue
- * @drawctxt: Pointer to the adreno draw context
- *
- * Dequeue a new command batch from the context list
+/*
+ * Retires all expired marker and sync objs from the context
+ * queue and returns one of the below
+ * a) next drawobj that needs to be sent to ringbuffer
+ * b) -EAGAIN for syncobj with syncpoints pending.
+ * c) -EAGAIN for markerobj whose marker timestamp has not expired yet.
+ * c) NULL for no commands remaining in drawqueue.
*/
-static struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch(
- struct adreno_context *drawctxt)
+static struct kgsl_drawobj *_process_drawqueue_get_next_drawobj(
+ struct adreno_context *drawctxt)
{
- struct kgsl_cmdbatch *cmdbatch;
+ struct kgsl_drawobj *drawobj;
+ unsigned int i = drawctxt->drawqueue_head;
+ int ret = 0;
- spin_lock(&drawctxt->lock);
- cmdbatch = _get_cmdbatch(drawctxt);
- spin_unlock(&drawctxt->lock);
+ if (drawctxt->drawqueue_head == drawctxt->drawqueue_tail)
+ return NULL;
- /*
- * Delete the timer and wait for timer handler to finish executing
- * on another core before queueing the buffer. We must do this
- * without holding any spin lock that the timer handler might be using
- */
- if (!IS_ERR_OR_NULL(cmdbatch))
- del_timer_sync(&cmdbatch->timer);
+ for (i = drawctxt->drawqueue_head; i != drawctxt->drawqueue_tail;
+ i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE)) {
+
+ drawobj = drawctxt->drawqueue[i];
+
+ if (drawobj == NULL)
+ return NULL;
+
+ if (drawobj->type == CMDOBJ_TYPE)
+ return drawobj;
+ else if (drawobj->type == MARKEROBJ_TYPE) {
+ ret = _retire_markerobj(CMDOBJ(drawobj), drawctxt);
+ /* Special case where marker needs to be sent to GPU */
+ if (ret == 1)
+ return drawobj;
+ } else if (drawobj->type == SYNCOBJ_TYPE)
+ ret = _retire_syncobj(SYNCOBJ(drawobj), drawctxt);
+
+ if (ret == -EAGAIN)
+ return ERR_PTR(-EAGAIN);
+
+ continue;
+ }
- return cmdbatch;
+ return NULL;
}
/**
- * adreno_dispatcher_requeue_cmdbatch() - Put a command back on the context
+ * adreno_dispatcher_requeue_cmdobj() - Put a command back on the context
* queue
* @drawctxt: Pointer to the adreno draw context
- * @cmdbatch: Pointer to the KGSL cmdbatch to requeue
+ * @cmdobj: Pointer to the KGSL command object to requeue
*
* Failure to submit a command to the ringbuffer isn't the fault of the command
* being submitted so if a failure happens, push it back on the head of the the
* context queue to be reconsidered again unless the context got detached.
*/
-static inline int adreno_dispatcher_requeue_cmdbatch(
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch)
+static inline int adreno_dispatcher_requeue_cmdobj(
+ struct adreno_context *drawctxt,
+ struct kgsl_drawobj_cmd *cmdobj)
{
unsigned int prev;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
spin_lock(&drawctxt->lock);
if (kgsl_context_detached(&drawctxt->base) ||
kgsl_context_invalid(&drawctxt->base)) {
spin_unlock(&drawctxt->lock);
- /* get rid of this cmdbatch since the context is bad */
- kgsl_cmdbatch_destroy(cmdbatch);
+ /* get rid of this drawobj since the context is bad */
+ kgsl_drawobj_destroy(drawobj);
return -ENOENT;
}
- prev = drawctxt->cmdqueue_head == 0 ?
- (ADRENO_CONTEXT_CMDQUEUE_SIZE - 1) :
- (drawctxt->cmdqueue_head - 1);
+ prev = drawctxt->drawqueue_head == 0 ?
+ (ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1) :
+ (drawctxt->drawqueue_head - 1);
/*
* The maximum queue size always needs to be one less then the size of
- * the ringbuffer queue so there is "room" to put the cmdbatch back in
+ * the ringbuffer queue so there is "room" to put the drawobj back in
*/
- BUG_ON(prev == drawctxt->cmdqueue_tail);
+ WARN_ON(prev == drawctxt->drawqueue_tail);
- drawctxt->cmdqueue[prev] = cmdbatch;
+ drawctxt->drawqueue[prev] = drawobj;
drawctxt->queued++;
/* Reset the command queue head to reflect the newly requeued change */
- drawctxt->cmdqueue_head = prev;
+ drawctxt->drawqueue_head = prev;
spin_unlock(&drawctxt->lock);
return 0;
}
@@ -545,21 +522,22 @@ static void dispatcher_queue_context(struct adreno_device *adreno_dev,
}
/**
- * sendcmd() - Send a command batch to the GPU hardware
+ * sendcmd() - Send a drawobj to the GPU hardware
* @dispatcher: Pointer to the adreno dispatcher struct
- * @cmdbatch: Pointer to the KGSL cmdbatch being sent
+ * @drawobj: Pointer to the KGSL drawobj being sent
*
- * Send a KGSL command batch to the GPU hardware
+ * Send a KGSL drawobj to the GPU hardware
*/
static int sendcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
- struct adreno_dispatcher_cmdqueue *dispatch_q =
- ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
+ struct adreno_dispatcher_drawqueue *dispatch_q =
+ ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj);
struct adreno_submit_time time;
uint64_t secs = 0;
unsigned long nsecs = 0;
@@ -588,15 +566,15 @@ static int sendcmd(struct adreno_device *adreno_dev,
set_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
}
- if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv)) {
- set_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv);
- cmdbatch->profile_index = adreno_dev->cmdbatch_profile_index;
- adreno_dev->cmdbatch_profile_index =
- (adreno_dev->cmdbatch_profile_index + 1) %
- ADRENO_CMDBATCH_PROFILE_COUNT;
+ if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv)) {
+ set_bit(CMDOBJ_PROFILE, &cmdobj->priv);
+ cmdobj->profile_index = adreno_dev->profile_index;
+ adreno_dev->profile_index =
+ (adreno_dev->profile_index + 1) %
+ ADRENO_DRAWOBJ_PROFILE_COUNT;
}
- ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch, &time);
+ ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdobj, &time);
/*
* On the first command, if the submission was successful, then read the
@@ -649,17 +627,17 @@ static int sendcmd(struct adreno_device *adreno_dev,
secs = time.ktime;
nsecs = do_div(secs, 1000000000);
- trace_adreno_cmdbatch_submitted(cmdbatch, (int) dispatcher->inflight,
+ trace_adreno_cmdbatch_submitted(drawobj, (int) dispatcher->inflight,
time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb,
adreno_get_rptr(drawctxt->rb));
mutex_unlock(&device->mutex);
- cmdbatch->submit_ticks = time.ticks;
+ cmdobj->submit_ticks = time.ticks;
- dispatch_q->cmd_q[dispatch_q->tail] = cmdbatch;
+ dispatch_q->cmd_q[dispatch_q->tail] = cmdobj;
dispatch_q->tail = (dispatch_q->tail + 1) %
- ADRENO_DISPATCH_CMDQUEUE_SIZE;
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE;
/*
* For the first submission in any given command queue update the
@@ -670,7 +648,7 @@ static int sendcmd(struct adreno_device *adreno_dev,
if (dispatch_q->inflight == 1)
dispatch_q->expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
+ msecs_to_jiffies(adreno_drawobj_timeout);
/*
* If we believe ourselves to be current and preemption isn't a thing,
@@ -678,7 +656,7 @@ static int sendcmd(struct adreno_device *adreno_dev,
* thing and the timer will be set up in due time
*/
if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
- if (cmdqueue_is_current(dispatch_q))
+ if (drawqueue_is_current(dispatch_q))
mod_timer(&dispatcher->timer, dispatch_q->expires);
}
@@ -704,75 +682,70 @@ static int sendcmd(struct adreno_device *adreno_dev,
static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt)
{
- struct adreno_dispatcher_cmdqueue *dispatch_q =
+ struct adreno_dispatcher_drawqueue *dispatch_q =
&(drawctxt->rb->dispatch_q);
int count = 0;
int ret = 0;
- int inflight = _cmdqueue_inflight(dispatch_q);
+ int inflight = _drawqueue_inflight(dispatch_q);
unsigned int timestamp;
if (dispatch_q->inflight >= inflight) {
- expire_markers(drawctxt);
+ spin_lock(&drawctxt->lock);
+ _process_drawqueue_get_next_drawobj(drawctxt);
+ spin_unlock(&drawctxt->lock);
return -EBUSY;
}
/*
- * Each context can send a specific number of command batches per cycle
+ * Each context can send a specific number of drawobjs per cycle
*/
- while ((count < _context_cmdbatch_burst) &&
+ while ((count < _context_drawobj_burst) &&
(dispatch_q->inflight < inflight)) {
- struct kgsl_cmdbatch *cmdbatch;
+ struct kgsl_drawobj *drawobj;
+ struct kgsl_drawobj_cmd *cmdobj;
if (adreno_gpu_fault(adreno_dev) != 0)
break;
- cmdbatch = adreno_dispatcher_get_cmdbatch(drawctxt);
+ spin_lock(&drawctxt->lock);
+ drawobj = _process_drawqueue_get_next_drawobj(drawctxt);
/*
- * adreno_context_get_cmdbatch returns -EAGAIN if the current
- * cmdbatch has pending sync points so no more to do here.
+ * adreno_context_get_drawobj returns -EAGAIN if the current
+ * drawobj has pending sync points so no more to do here.
* When the sync points are satisfied then the context will get
* reqeueued
*/
- if (IS_ERR_OR_NULL(cmdbatch)) {
- if (IS_ERR(cmdbatch))
- ret = PTR_ERR(cmdbatch);
+ if (IS_ERR_OR_NULL(drawobj)) {
+ if (IS_ERR(drawobj))
+ ret = PTR_ERR(drawobj);
+ spin_unlock(&drawctxt->lock);
break;
}
+ _pop_drawobj(drawctxt);
+ spin_unlock(&drawctxt->lock);
- /*
- * If this is a synchronization submission then there are no
- * commands to submit. Discard it and get the next item from
- * the queue. Decrement count so this packet doesn't count
- * against the burst for the context
- */
-
- if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
- kgsl_cmdbatch_destroy(cmdbatch);
- continue;
- }
-
- timestamp = cmdbatch->timestamp;
-
- ret = sendcmd(adreno_dev, cmdbatch);
+ timestamp = drawobj->timestamp;
+ cmdobj = CMDOBJ(drawobj);
+ ret = sendcmd(adreno_dev, cmdobj);
/*
- * On error from sendcmd() try to requeue the command batch
+ * On error from sendcmd() try to requeue the cmdobj
* unless we got back -ENOENT which means that the context has
* been detached and there will be no more deliveries from here
*/
if (ret != 0) {
- /* Destroy the cmdbatch on -ENOENT */
+ /* Destroy the cmdobj on -ENOENT */
if (ret == -ENOENT)
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
else {
/*
* If the requeue returns an error, return that
* instead of whatever sendcmd() sent us
*/
- int r = adreno_dispatcher_requeue_cmdbatch(
- drawctxt, cmdbatch);
+ int r = adreno_dispatcher_requeue_cmdobj(
+ drawctxt, cmdobj);
if (r)
ret = r;
}
@@ -934,99 +907,87 @@ static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
/**
* get_timestamp() - Return the next timestamp for the context
* @drawctxt - Pointer to an adreno draw context struct
- * @cmdbatch - Pointer to a command batch
+ * @drawobj - Pointer to a drawobj
* @timestamp - Pointer to a timestamp value possibly passed from the user
+ * @user_ts - user generated timestamp
*
* Assign a timestamp based on the settings of the draw context and the command
* batch.
*/
static int get_timestamp(struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch, unsigned int *timestamp)
+ struct kgsl_drawobj *drawobj, unsigned int *timestamp,
+ unsigned int user_ts)
{
- /* Synchronization commands don't get a timestamp */
- if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
- *timestamp = 0;
- return 0;
- }
if (drawctxt->base.flags & KGSL_CONTEXT_USER_GENERATED_TS) {
/*
* User specified timestamps need to be greater than the last
* issued timestamp in the context
*/
- if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0)
+ if (timestamp_cmp(drawctxt->timestamp, user_ts) >= 0)
return -ERANGE;
- drawctxt->timestamp = *timestamp;
+ drawctxt->timestamp = user_ts;
} else
drawctxt->timestamp++;
*timestamp = drawctxt->timestamp;
+ drawobj->timestamp = *timestamp;
return 0;
}
-/**
- * adreno_dispactcher_queue_cmd() - Queue a new command in the context
- * @adreno_dev: Pointer to the adreno device struct
- * @drawctxt: Pointer to the adreno draw context
- * @cmdbatch: Pointer to the command batch being submitted
- * @timestamp: Pointer to the requested timestamp
- *
- * Queue a command in the context - if there isn't any room in the queue, then
- * block until there is
- */
-int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp)
+static void _set_ft_policy(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ struct kgsl_drawobj_cmd *cmdobj)
{
- struct adreno_dispatcher_cmdqueue *dispatch_q =
- ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch);
- int ret;
-
- spin_lock(&drawctxt->lock);
-
- if (kgsl_context_detached(&drawctxt->base)) {
- spin_unlock(&drawctxt->lock);
- return -ENOENT;
- }
+ /*
+ * Set the fault tolerance policy for the command batch - assuming the
+ * context hasn't disabled FT use the current device policy
+ */
+ if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
+ set_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy);
+ else
+ cmdobj->fault_policy = adreno_dev->ft_policy;
+}
+static void _cmdobj_set_flags(struct adreno_context *drawctxt,
+ struct kgsl_drawobj_cmd *cmdobj)
+{
/*
* Force the preamble for this submission only - this is usually
* requested by the dispatcher as part of fault recovery
*/
-
if (test_and_clear_bit(ADRENO_CONTEXT_FORCE_PREAMBLE,
&drawctxt->base.priv))
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv);
/*
- * Force the premable if set from userspace in the context or cmdbatch
- * flags
+ * Force the premable if set from userspace in the context or
+ * command obj flags
*/
-
if ((drawctxt->base.flags & KGSL_CONTEXT_CTX_SWITCH) ||
- (cmdbatch->flags & KGSL_CMDBATCH_CTX_SWITCH))
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+ (cmdobj->base.flags & KGSL_DRAWOBJ_CTX_SWITCH))
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv);
- /* Skip this cmdbatch commands if IFH_NOP is enabled */
+ /* Skip this ib if IFH_NOP is enabled */
if (drawctxt->base.flags & KGSL_CONTEXT_IFH_NOP)
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
+ set_bit(CMDOBJ_SKIP, &cmdobj->priv);
/*
* If we are waiting for the end of frame and it hasn't appeared yet,
- * then mark the command batch as skipped. It will still progress
+ * then mark the command obj as skipped. It will still progress
* through the pipeline but it won't actually send any commands
*/
if (test_bit(ADRENO_CONTEXT_SKIP_EOF, &drawctxt->base.priv)) {
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
+ set_bit(CMDOBJ_SKIP, &cmdobj->priv);
/*
- * If this command batch represents the EOF then clear the way
+ * If this command obj represents the EOF then clear the way
* for the dispatcher to continue submitting
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_END_OF_FRAME) {
+ if (cmdobj->base.flags & KGSL_DRAWOBJ_END_OF_FRAME) {
clear_bit(ADRENO_CONTEXT_SKIP_EOF,
&drawctxt->base.priv);
@@ -1038,10 +999,84 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
&drawctxt->base.priv);
}
}
+}
- /* Wait for room in the context queue */
+static inline int _check_context_state(struct kgsl_context *context)
+{
+ if (kgsl_context_invalid(context))
+ return -EDEADLK;
+
+ if (kgsl_context_detached(context))
+ return -ENOENT;
+
+ return 0;
+}
+
+static inline bool _verify_ib(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_memobj_node *ib)
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+
+ /* The maximum allowable size for an IB in the CP is 0xFFFFF dwords */
+ if (ib->size == 0 || ((ib->size >> 2) > 0xFFFFF)) {
+ pr_context(device, context, "ctxt %d invalid ib size %lld\n",
+ context->id, ib->size);
+ return false;
+ }
+
+ /* Make sure that the address is mapped */
+ if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) {
+ pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n",
+ context->id, ib->gpuaddr);
+ return false;
+ }
+
+ return true;
+}
+
+static inline int _verify_cmdobj(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count)
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_memobj_node *ib;
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ /* Verify the IBs before they get queued */
+ if (drawobj[i]->type == CMDOBJ_TYPE) {
+ struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj[i]);
+
+ list_for_each_entry(ib, &cmdobj->cmdlist, node)
+ if (_verify_ib(dev_priv,
+ &ADRENO_CONTEXT(context)->base, ib)
+ == false)
+ return -EINVAL;
+ /*
+ * Clear the wake on touch bit to indicate an IB has
+ * been submitted since the last time we set it.
+ * But only clear it when we have rendering commands.
+ */
+ device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH;
+ }
+
+ /* A3XX does not have support for drawobj profiling */
+ if (adreno_is_a3xx(ADRENO_DEVICE(device)) &&
+ (drawobj[i]->flags & KGSL_DRAWOBJ_PROFILING))
+ return -EOPNOTSUPP;
+ }
- while (drawctxt->queued >= _context_cmdqueue_size) {
+ return 0;
+}
+
+static inline int _wait_for_room_in_context_queue(
+ struct adreno_context *drawctxt)
+{
+ int ret = 0;
+
+ /* Wait for room in the context queue */
+ while (drawctxt->queued >= _context_drawqueue_size) {
trace_adreno_drawctxt_sleep(drawctxt);
spin_unlock(&drawctxt->lock);
@@ -1052,98 +1087,210 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
spin_lock(&drawctxt->lock);
trace_adreno_drawctxt_wake(drawctxt);
- if (ret <= 0) {
- spin_unlock(&drawctxt->lock);
+ if (ret <= 0)
return (ret == 0) ? -ETIMEDOUT : (int) ret;
- }
}
+
+ return 0;
+}
+
+static unsigned int _check_context_state_to_queue_cmds(
+ struct adreno_context *drawctxt)
+{
+ int ret = _check_context_state(&drawctxt->base);
+
+ if (ret)
+ return ret;
+
+ ret = _wait_for_room_in_context_queue(drawctxt);
+ if (ret)
+ return ret;
+
/*
* Account for the possiblity that the context got invalidated
* while we were sleeping
*/
+ return _check_context_state(&drawctxt->base);
+}
- if (kgsl_context_invalid(&drawctxt->base)) {
- spin_unlock(&drawctxt->lock);
- return -EDEADLK;
- }
- if (kgsl_context_detached(&drawctxt->base)) {
- spin_unlock(&drawctxt->lock);
- return -ENOENT;
- }
+static void _queue_drawobj(struct adreno_context *drawctxt,
+ struct kgsl_drawobj *drawobj)
+{
+ /* Put the command into the queue */
+ drawctxt->drawqueue[drawctxt->drawqueue_tail] = drawobj;
+ drawctxt->drawqueue_tail = (drawctxt->drawqueue_tail + 1) %
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE;
+ drawctxt->queued++;
+ trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued);
+}
- ret = get_timestamp(drawctxt, cmdbatch, timestamp);
- if (ret) {
- spin_unlock(&drawctxt->lock);
+static int _queue_markerobj(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *markerobj,
+ uint32_t *timestamp, unsigned int user_ts)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(markerobj);
+ int ret;
+
+ ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts);
+ if (ret)
return ret;
+
+ /*
+ * See if we can fastpath this thing - if nothing is queued
+ * and nothing is inflight retire without bothering the GPU
+ */
+ if (!drawctxt->queued && kgsl_check_timestamp(drawobj->device,
+ drawobj->context, drawctxt->queued_timestamp)) {
+ trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued);
+ _retire_timestamp(drawobj);
+ return 1;
}
- cmdbatch->timestamp = *timestamp;
+ /*
+ * Remember the last queued timestamp - the marker will block
+ * until that timestamp is expired (unless another command
+ * comes along and forces the marker to execute)
+ */
- if (cmdbatch->flags & KGSL_CMDBATCH_MARKER) {
+ markerobj->marker_timestamp = drawctxt->queued_timestamp;
+ drawctxt->queued_timestamp = *timestamp;
+ _set_ft_policy(adreno_dev, drawctxt, markerobj);
+ _cmdobj_set_flags(drawctxt, markerobj);
- /*
- * See if we can fastpath this thing - if nothing is queued
- * and nothing is inflight retire without bothering the GPU
- */
+ _queue_drawobj(drawctxt, drawobj);
- if (!drawctxt->queued && kgsl_check_timestamp(cmdbatch->device,
- cmdbatch->context, drawctxt->queued_timestamp)) {
- trace_adreno_cmdbatch_queued(cmdbatch,
- drawctxt->queued);
+ return 0;
+}
- _retire_marker(cmdbatch);
- spin_unlock(&drawctxt->lock);
- return 0;
- }
+static int _queue_cmdobj(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *cmdobj,
+ uint32_t *timestamp, unsigned int user_ts)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ unsigned int j;
+ int ret;
- /*
- * Remember the last queued timestamp - the marker will block
- * until that timestamp is expired (unless another command
- * comes along and forces the marker to execute)
- */
+ ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts);
+ if (ret)
+ return ret;
+
+ /*
+ * If this is a real command then we need to force any markers
+ * queued before it to dispatch to keep time linear - set the
+ * skip bit so the commands get NOPed.
+ */
+ j = drawctxt->drawqueue_head;
+
+ while (j != drawctxt->drawqueue_tail) {
+ if (drawctxt->drawqueue[j]->type == MARKEROBJ_TYPE) {
+ struct kgsl_drawobj_cmd *markerobj =
+ CMDOBJ(drawctxt->drawqueue[j]);
+ set_bit(CMDOBJ_SKIP, &markerobj->priv);
+ }
- cmdbatch->marker_timestamp = drawctxt->queued_timestamp;
+ j = DRAWQUEUE_NEXT(j, ADRENO_CONTEXT_DRAWQUEUE_SIZE);
}
- /* SYNC commands have timestamp 0 and will get optimized out anyway */
- if (!(cmdbatch->flags & KGSL_CONTEXT_SYNC))
- drawctxt->queued_timestamp = *timestamp;
+ drawctxt->queued_timestamp = *timestamp;
+ _set_ft_policy(adreno_dev, drawctxt, cmdobj);
+ _cmdobj_set_flags(drawctxt, cmdobj);
- /*
- * Set the fault tolerance policy for the command batch - assuming the
- * context hasn't disabled FT use the current device policy
- */
+ _queue_drawobj(drawctxt, drawobj);
- if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
- set_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy);
- else
- cmdbatch->fault_policy = adreno_dev->ft_policy;
+ return 0;
+}
- /* Put the command into the queue */
- drawctxt->cmdqueue[drawctxt->cmdqueue_tail] = cmdbatch;
- drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) %
- ADRENO_CONTEXT_CMDQUEUE_SIZE;
+static void _queue_syncobj(struct adreno_context *drawctxt,
+ struct kgsl_drawobj_sync *syncobj, uint32_t *timestamp)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
- /*
- * If this is a real command then we need to force any markers queued
- * before it to dispatch to keep time linear - set the skip bit so
- * the commands get NOPed.
- */
+ *timestamp = 0;
+ drawobj->timestamp = 0;
- if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER)) {
- unsigned int i = drawctxt->cmdqueue_head;
+ _queue_drawobj(drawctxt, drawobj);
+}
- while (i != drawctxt->cmdqueue_tail) {
- if (drawctxt->cmdqueue[i]->flags & KGSL_CMDBATCH_MARKER)
- set_bit(CMDBATCH_FLAG_SKIP,
- &drawctxt->cmdqueue[i]->priv);
+/**
+ * adreno_dispactcher_queue_drawobj() - Queue a new draw object in the context
+ * @dev_priv: Pointer to the device private struct
+ * @context: Pointer to the kgsl draw context
+ * @drawobj: Pointer to the array of drawobj's being submitted
+ * @count: Number of drawobj's being submitted
+ * @timestamp: Pointer to the requested timestamp
+ *
+ * Queue a command in the context - if there isn't any room in the queue, then
+ * block until there is
+ */
+int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count, uint32_t *timestamp)
- i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE);
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+ struct adreno_dispatcher_drawqueue *dispatch_q;
+ int ret;
+ unsigned int i, user_ts;
+
+ ret = _check_context_state(&drawctxt->base);
+ if (ret)
+ return ret;
+
+ ret = _verify_cmdobj(dev_priv, context, drawobj, count);
+ if (ret)
+ return ret;
+
+ /* wait for the suspend gate */
+ wait_for_completion(&device->halt_gate);
+
+ spin_lock(&drawctxt->lock);
+
+ ret = _check_context_state_to_queue_cmds(drawctxt);
+ if (ret) {
+ spin_unlock(&drawctxt->lock);
+ return ret;
+ }
+
+ user_ts = *timestamp;
+
+ for (i = 0; i < count; i++) {
+
+ switch (drawobj[i]->type) {
+ case MARKEROBJ_TYPE:
+ ret = _queue_markerobj(adreno_dev, drawctxt,
+ CMDOBJ(drawobj[i]),
+ timestamp, user_ts);
+ if (ret == 1) {
+ spin_unlock(&drawctxt->lock);
+ goto done;
+ } else if (ret) {
+ spin_unlock(&drawctxt->lock);
+ return ret;
+ }
+ break;
+ case CMDOBJ_TYPE:
+ ret = _queue_cmdobj(adreno_dev, drawctxt,
+ CMDOBJ(drawobj[i]),
+ timestamp, user_ts);
+ if (ret) {
+ spin_unlock(&drawctxt->lock);
+ return ret;
+ }
+ break;
+ case SYNCOBJ_TYPE:
+ _queue_syncobj(drawctxt, SYNCOBJ(drawobj[i]),
+ timestamp);
+ break;
+ default:
+ spin_unlock(&drawctxt->lock);
+ return -EINVAL;
}
+
}
- drawctxt->queued++;
- trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued);
+ dispatch_q = ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj[0]);
_track_context(adreno_dev, dispatch_q, drawctxt);
@@ -1163,8 +1310,11 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
* queue will try to schedule new commands anyway.
*/
- if (dispatch_q->inflight < _context_cmdbatch_burst)
+ if (dispatch_q->inflight < _context_drawobj_burst)
adreno_dispatcher_issuecmds(adreno_dev);
+done:
+ if (test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv))
+ return -EPROTO;
return 0;
}
@@ -1208,15 +1358,15 @@ static void mark_guilty_context(struct kgsl_device *device, unsigned int id)
}
/*
- * If an IB inside of the command batch has a gpuaddr that matches the base
+ * If an IB inside of the drawobj has a gpuaddr that matches the base
* passed in then zero the size which effectively skips it when it is submitted
* in the ringbuffer.
*/
-static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base)
+static void _skip_ib(struct kgsl_drawobj_cmd *cmdobj, uint64_t base)
{
struct kgsl_memobj_node *ib;
- list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
+ list_for_each_entry(ib, &cmdobj->cmdlist, node) {
if (ib->gpuaddr == base) {
ib->priv |= MEMOBJ_SKIP;
if (base)
@@ -1225,10 +1375,11 @@ static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base)
}
}
-static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmdbatch **replay, int count)
+static void _skip_cmd(struct kgsl_drawobj_cmd *cmdobj,
+ struct kgsl_drawobj_cmd **replay, int count)
{
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
int i;
/*
@@ -1243,9 +1394,9 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch,
* b) force preamble for next commandbatch
*/
for (i = 1; i < count; i++) {
- if (replay[i]->context->id == cmdbatch->context->id) {
+ if (DRAWOBJ(replay[i])->context->id == drawobj->context->id) {
replay[i]->fault_policy = replay[0]->fault_policy;
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv);
set_bit(KGSL_FT_SKIPCMD, &replay[i]->fault_recovery);
break;
}
@@ -1262,41 +1413,44 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch,
drawctxt->fault_policy = replay[0]->fault_policy;
}
- /* set the flags to skip this cmdbatch */
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
- cmdbatch->fault_recovery = 0;
+ /* set the flags to skip this cmdobj */
+ set_bit(CMDOBJ_SKIP, &cmdobj->priv);
+ cmdobj->fault_recovery = 0;
}
-static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmdbatch **replay, int count)
+static void _skip_frame(struct kgsl_drawobj_cmd *cmdobj,
+ struct kgsl_drawobj_cmd **replay, int count)
{
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
int skip = 1;
int i;
for (i = 0; i < count; i++) {
+ struct kgsl_drawobj *replay_obj = DRAWOBJ(replay[i]);
+
/*
- * Only operate on command batches that belong to the
+ * Only operate on drawobj's that belong to the
* faulting context
*/
- if (replay[i]->context->id != cmdbatch->context->id)
+ if (replay_obj->context->id != drawobj->context->id)
continue;
/*
- * Skip all the command batches in this context until
+ * Skip all the drawobjs in this context until
* the EOF flag is seen. If the EOF flag is seen then
* force the preamble for the next command.
*/
if (skip) {
- set_bit(CMDBATCH_FLAG_SKIP, &replay[i]->priv);
+ set_bit(CMDOBJ_SKIP, &replay[i]->priv);
- if (replay[i]->flags & KGSL_CMDBATCH_END_OF_FRAME)
+ if (replay_obj->flags & KGSL_DRAWOBJ_END_OF_FRAME)
skip = 0;
} else {
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv);
return;
}
}
@@ -1318,26 +1472,28 @@ static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch,
set_bit(ADRENO_CONTEXT_FORCE_PREAMBLE, &drawctxt->base.priv);
}
-static void remove_invalidated_cmdbatches(struct kgsl_device *device,
- struct kgsl_cmdbatch **replay, int count)
+static void remove_invalidated_cmdobjs(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd **replay, int count)
{
int i;
for (i = 0; i < count; i++) {
- struct kgsl_cmdbatch *cmd = replay[i];
- if (cmd == NULL)
+ struct kgsl_drawobj_cmd *cmdobj = replay[i];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+
+ if (cmdobj == NULL)
continue;
- if (kgsl_context_detached(cmd->context) ||
- kgsl_context_invalid(cmd->context)) {
+ if (kgsl_context_detached(drawobj->context) ||
+ kgsl_context_invalid(drawobj->context)) {
replay[i] = NULL;
mutex_lock(&device->mutex);
kgsl_cancel_events_timestamp(device,
- &cmd->context->events, cmd->timestamp);
+ &drawobj->context->events, drawobj->timestamp);
mutex_unlock(&device->mutex);
- kgsl_cmdbatch_destroy(cmd);
+ kgsl_drawobj_destroy(drawobj);
}
}
}
@@ -1361,9 +1517,10 @@ static inline const char *_kgsl_context_comm(struct kgsl_context *context)
static void adreno_fault_header(struct kgsl_device *device,
- struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch)
+ struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
unsigned int status, rptr, wptr, ib1sz, ib2sz;
uint64_t ib1base, ib2base;
@@ -1377,22 +1534,22 @@ static void adreno_fault_header(struct kgsl_device *device,
ADRENO_REG_CP_IB2_BASE_HI, &ib2base);
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &ib2sz);
- if (cmdbatch != NULL) {
+ if (drawobj != NULL) {
struct adreno_context *drawctxt =
- ADRENO_CONTEXT(cmdbatch->context);
+ ADRENO_CONTEXT(drawobj->context);
- trace_adreno_gpu_fault(cmdbatch->context->id,
- cmdbatch->timestamp,
+ trace_adreno_gpu_fault(drawobj->context->id,
+ drawobj->timestamp,
status, rptr, wptr, ib1base, ib1sz,
ib2base, ib2sz, drawctxt->rb->id);
- pr_fault(device, cmdbatch,
+ pr_fault(device, drawobj,
"gpu fault ctx %d ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
- cmdbatch->context->id, cmdbatch->timestamp, status,
+ drawobj->context->id, drawobj->timestamp, status,
rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
if (rb != NULL)
- pr_fault(device, cmdbatch,
+ pr_fault(device, drawobj,
"gpu fault rb %d rb sw r/w %4.4x/%4.4x\n",
rb->id, rptr, rb->wptr);
} else {
@@ -1411,33 +1568,34 @@ static void adreno_fault_header(struct kgsl_device *device,
void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj *drawobj)
{
if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) &&
kgsl_context_detached(&drawctxt->base)) {
- pr_context(KGSL_DEVICE(adreno_dev), cmdbatch->context,
- "gpu detached context %d\n", cmdbatch->context->id);
+ pr_context(KGSL_DEVICE(adreno_dev), drawobj->context,
+ "gpu detached context %d\n", drawobj->context->id);
clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv);
}
}
/**
- * process_cmdbatch_fault() - Process a cmdbatch for fault policies
- * @device: Device on which the cmdbatch caused a fault
- * @replay: List of cmdbatches that are to be replayed on the device. The
- * faulting cmdbatch is the first command in the replay list and the remaining
- * cmdbatches in the list are commands that were submitted to the same queue
+ * process_cmdobj_fault() - Process a cmdobj for fault policies
+ * @device: Device on which the cmdobj caused a fault
+ * @replay: List of cmdobj's that are to be replayed on the device. The
+ * first command in the replay list is the faulting command and the remaining
+ * cmdobj's in the list are commands that were submitted to the same queue
* as the faulting one.
- * @count: Number of cmdbatches in replay
+ * @count: Number of cmdobj's in replay
* @base: The IB1 base at the time of fault
* @fault: The fault type
*/
-static void process_cmdbatch_fault(struct kgsl_device *device,
- struct kgsl_cmdbatch **replay, int count,
+static void process_cmdobj_fault(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd **replay, int count,
unsigned int base,
int fault)
{
- struct kgsl_cmdbatch *cmdbatch = replay[0];
+ struct kgsl_drawobj_cmd *cmdobj = replay[0];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
int i;
char *state = "failed";
@@ -1451,18 +1609,18 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
* where 1st and 4th gpu hang are more than 3 seconds apart we
* won't disable GFT and invalidate the context.
*/
- if (test_bit(KGSL_FT_THROTTLE, &cmdbatch->fault_policy)) {
- if (time_after(jiffies, (cmdbatch->context->fault_time
+ if (test_bit(KGSL_FT_THROTTLE, &cmdobj->fault_policy)) {
+ if (time_after(jiffies, (drawobj->context->fault_time
+ msecs_to_jiffies(_fault_throttle_time)))) {
- cmdbatch->context->fault_time = jiffies;
- cmdbatch->context->fault_count = 1;
+ drawobj->context->fault_time = jiffies;
+ drawobj->context->fault_count = 1;
} else {
- cmdbatch->context->fault_count++;
- if (cmdbatch->context->fault_count >
+ drawobj->context->fault_count++;
+ if (drawobj->context->fault_count >
_fault_throttle_burst) {
set_bit(KGSL_FT_DISABLE,
- &cmdbatch->fault_policy);
- pr_context(device, cmdbatch->context,
+ &cmdobj->fault_policy);
+ pr_context(device, drawobj->context,
"gpu fault threshold exceeded %d faults in %d msecs\n",
_fault_throttle_burst,
_fault_throttle_time);
@@ -1471,45 +1629,45 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
}
/*
- * If FT is disabled for this cmdbatch invalidate immediately
+ * If FT is disabled for this cmdobj invalidate immediately
*/
- if (test_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy) ||
- test_bit(KGSL_FT_TEMP_DISABLE, &cmdbatch->fault_policy)) {
+ if (test_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy) ||
+ test_bit(KGSL_FT_TEMP_DISABLE, &cmdobj->fault_policy)) {
state = "skipped";
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+ bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG);
}
/* If the context is detached do not run FT on context */
- if (kgsl_context_detached(cmdbatch->context)) {
+ if (kgsl_context_detached(drawobj->context)) {
state = "detached";
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+ bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG);
}
/*
- * Set a flag so we don't print another PM dump if the cmdbatch fails
+ * Set a flag so we don't print another PM dump if the cmdobj fails
* again on replay
*/
- set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
+ set_bit(KGSL_FT_SKIP_PMDUMP, &cmdobj->fault_policy);
/*
* A hardware fault generally means something was deterministically
- * wrong with the command batch - no point in trying to replay it
+ * wrong with the cmdobj - no point in trying to replay it
* Clear the replay bit and move on to the next policy level
*/
if (fault & ADRENO_HARD_FAULT)
- clear_bit(KGSL_FT_REPLAY, &(cmdbatch->fault_policy));
+ clear_bit(KGSL_FT_REPLAY, &(cmdobj->fault_policy));
/*
* A timeout fault means the IB timed out - clear the policy and
* invalidate - this will clear the FT_SKIP_PMDUMP bit but that is okay
- * because we won't see this cmdbatch again
+ * because we won't see this cmdobj again
*/
if (fault & ADRENO_TIMEOUT_FAULT)
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+ bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG);
/*
* If the context had a GPU page fault then it is likely it would fault
@@ -1517,83 +1675,84 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
*/
if (test_bit(KGSL_CONTEXT_PRIV_PAGEFAULT,
- &cmdbatch->context->priv)) {
+ &drawobj->context->priv)) {
/* we'll need to resume the mmu later... */
- clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy);
+ clear_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy);
clear_bit(KGSL_CONTEXT_PRIV_PAGEFAULT,
- &cmdbatch->context->priv);
+ &drawobj->context->priv);
}
/*
- * Execute the fault tolerance policy. Each command batch stores the
+ * Execute the fault tolerance policy. Each cmdobj stores the
* current fault policy that was set when it was queued.
* As the options are tried in descending priority
* (REPLAY -> SKIPIBS -> SKIPFRAME -> NOTHING) the bits are cleared
- * from the cmdbatch policy so the next thing can be tried if the
+ * from the cmdobj policy so the next thing can be tried if the
* change comes around again
*/
- /* Replay the hanging command batch again */
- if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_REPLAY));
- set_bit(KGSL_FT_REPLAY, &cmdbatch->fault_recovery);
+ /* Replay the hanging cmdobj again */
+ if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_REPLAY));
+ set_bit(KGSL_FT_REPLAY, &cmdobj->fault_recovery);
return;
}
/*
* Skip the last IB1 that was played but replay everything else.
- * Note that the last IB1 might not be in the "hung" command batch
+ * Note that the last IB1 might not be in the "hung" cmdobj
* because the CP may have caused a page-fault while it was prefetching
* the next IB1/IB2. walk all outstanding commands and zap the
* supposedly bad IB1 where ever it lurks.
*/
- if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPIB));
- set_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_recovery);
+ if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPIB));
+ set_bit(KGSL_FT_SKIPIB, &cmdobj->fault_recovery);
for (i = 0; i < count; i++) {
if (replay[i] != NULL &&
- replay[i]->context->id == cmdbatch->context->id)
- cmdbatch_skip_ib(replay[i], base);
+ DRAWOBJ(replay[i])->context->id ==
+ drawobj->context->id)
+ _skip_ib(replay[i], base);
}
return;
}
- /* Skip the faulted command batch submission */
- if (test_and_clear_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPCMD));
+ /* Skip the faulted cmdobj submission */
+ if (test_and_clear_bit(KGSL_FT_SKIPCMD, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPCMD));
- /* Skip faulting command batch */
- cmdbatch_skip_cmd(cmdbatch, replay, count);
+ /* Skip faulting cmdobj */
+ _skip_cmd(cmdobj, replay, count);
return;
}
- if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch,
+ if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj,
BIT(KGSL_FT_SKIPFRAME));
- set_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_recovery);
+ set_bit(KGSL_FT_SKIPFRAME, &cmdobj->fault_recovery);
/*
- * Skip all the pending command batches for this context until
+ * Skip all the pending cmdobj's for this context until
* the EOF frame is seen
*/
- cmdbatch_skip_frame(cmdbatch, replay, count);
+ _skip_frame(cmdobj, replay, count);
return;
}
/* If we get here then all the policies failed */
- pr_context(device, cmdbatch->context, "gpu %s ctx %d ts %d\n",
- state, cmdbatch->context->id, cmdbatch->timestamp);
+ pr_context(device, drawobj->context, "gpu %s ctx %d ts %d\n",
+ state, drawobj->context->id, drawobj->timestamp);
/* Mark the context as failed */
- mark_guilty_context(device, cmdbatch->context->id);
+ mark_guilty_context(device, drawobj->context->id);
/* Invalidate the context */
- adreno_drawctxt_invalidate(device, cmdbatch->context);
+ adreno_drawctxt_invalidate(device, drawobj->context);
}
/**
@@ -1605,12 +1764,12 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
* @base: The IB1 base during the fault
*/
static void recover_dispatch_q(struct kgsl_device *device,
- struct adreno_dispatcher_cmdqueue *dispatch_q,
+ struct adreno_dispatcher_drawqueue *dispatch_q,
int fault,
unsigned int base)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct kgsl_cmdbatch **replay = NULL;
+ struct kgsl_drawobj_cmd **replay;
unsigned int ptr;
int first = 0;
int count = 0;
@@ -1624,14 +1783,16 @@ static void recover_dispatch_q(struct kgsl_device *device,
/* Recovery failed - mark everybody on this q guilty */
while (ptr != dispatch_q->tail) {
- struct kgsl_context *context =
- dispatch_q->cmd_q[ptr]->context;
+ struct kgsl_drawobj_cmd *cmdobj =
+ dispatch_q->cmd_q[ptr];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- mark_guilty_context(device, context->id);
- adreno_drawctxt_invalidate(device, context);
- kgsl_cmdbatch_destroy(dispatch_q->cmd_q[ptr]);
+ mark_guilty_context(device, drawobj->context->id);
+ adreno_drawctxt_invalidate(device, drawobj->context);
+ kgsl_drawobj_destroy(drawobj);
- ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ ptr = DRAWQUEUE_NEXT(ptr,
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE);
}
/*
@@ -1643,22 +1804,22 @@ static void recover_dispatch_q(struct kgsl_device *device,
goto replay;
}
- /* Copy the inflight command batches into the temporary storage */
+ /* Copy the inflight cmdobj's into the temporary storage */
ptr = dispatch_q->head;
while (ptr != dispatch_q->tail) {
replay[count++] = dispatch_q->cmd_q[ptr];
- ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ ptr = DRAWQUEUE_NEXT(ptr, ADRENO_DISPATCH_DRAWQUEUE_SIZE);
}
if (fault && count)
- process_cmdbatch_fault(device, replay,
+ process_cmdobj_fault(device, replay,
count, base, fault);
replay:
dispatch_q->inflight = 0;
dispatch_q->head = dispatch_q->tail = 0;
- /* Remove any pending command batches that have been invalidated */
- remove_invalidated_cmdbatches(device, replay, count);
+ /* Remove any pending cmdobj's that have been invalidated */
+ remove_invalidated_cmdobjs(device, replay, count);
/* Replay the pending command buffers */
for (i = 0; i < count; i++) {
@@ -1674,16 +1835,16 @@ replay:
*/
if (first == 0) {
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv);
first = 1;
}
/*
- * Force each command batch to wait for idle - this avoids weird
+ * Force each cmdobj to wait for idle - this avoids weird
* CP parse issues
*/
- set_bit(CMDBATCH_FLAG_WFI, &replay[i]->priv);
+ set_bit(CMDOBJ_WFI, &replay[i]->priv);
ret = sendcmd(adreno_dev, replay[i]);
@@ -1693,15 +1854,18 @@ replay:
*/
if (ret) {
- pr_context(device, replay[i]->context,
+ pr_context(device, replay[i]->base.context,
"gpu reset failed ctx %d ts %d\n",
- replay[i]->context->id, replay[i]->timestamp);
+ replay[i]->base.context->id,
+ replay[i]->base.timestamp);
/* Mark this context as guilty (failed recovery) */
- mark_guilty_context(device, replay[i]->context->id);
+ mark_guilty_context(device,
+ replay[i]->base.context->id);
- adreno_drawctxt_invalidate(device, replay[i]->context);
- remove_invalidated_cmdbatches(device, &replay[i],
+ adreno_drawctxt_invalidate(device,
+ replay[i]->base.context);
+ remove_invalidated_cmdobjs(device, &replay[i],
count - i);
}
}
@@ -1713,36 +1877,38 @@ replay:
}
static void do_header_and_snapshot(struct kgsl_device *device,
- struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch)
+ struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj)
{
- /* Always dump the snapshot on a non-cmdbatch failure */
- if (cmdbatch == NULL) {
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+
+ /* Always dump the snapshot on a non-drawobj failure */
+ if (cmdobj == NULL) {
adreno_fault_header(device, rb, NULL);
kgsl_device_snapshot(device, NULL);
return;
}
/* Skip everything if the PMDUMP flag is set */
- if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy))
+ if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdobj->fault_policy))
return;
/* Print the fault header */
- adreno_fault_header(device, rb, cmdbatch);
+ adreno_fault_header(device, rb, cmdobj);
- if (!(cmdbatch->context->flags & KGSL_CONTEXT_NO_SNAPSHOT))
- kgsl_device_snapshot(device, cmdbatch->context);
+ if (!(drawobj->context->flags & KGSL_CONTEXT_NO_SNAPSHOT))
+ kgsl_device_snapshot(device, drawobj->context);
}
static int dispatcher_do_fault(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct adreno_dispatcher_cmdqueue *dispatch_q = NULL, *dispatch_q_temp;
+ struct adreno_dispatcher_drawqueue *dispatch_q = NULL, *dispatch_q_temp;
struct adreno_ringbuffer *rb;
struct adreno_ringbuffer *hung_rb = NULL;
unsigned int reg;
uint64_t base;
- struct kgsl_cmdbatch *cmdbatch = NULL;
+ struct kgsl_drawobj_cmd *cmdobj = NULL;
int ret, i;
int fault;
int halt;
@@ -1792,10 +1958,10 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg);
}
/*
- * retire cmdbatches from all the dispatch_q's before starting recovery
+ * retire cmdobj's from all the dispatch_q's before starting recovery
*/
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- adreno_dispatch_retire_cmdqueue(adreno_dev,
+ adreno_dispatch_retire_drawqueue(adreno_dev,
&(rb->dispatch_q));
/* Select the active dispatch_q */
if (base == rb->buffer_desc.gpuaddr) {
@@ -1814,15 +1980,15 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
}
}
- if (dispatch_q && !adreno_cmdqueue_is_empty(dispatch_q)) {
- cmdbatch = dispatch_q->cmd_q[dispatch_q->head];
- trace_adreno_cmdbatch_fault(cmdbatch, fault);
+ if (dispatch_q && !adreno_drawqueue_is_empty(dispatch_q)) {
+ cmdobj = dispatch_q->cmd_q[dispatch_q->head];
+ trace_adreno_cmdbatch_fault(cmdobj, fault);
}
adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
ADRENO_REG_CP_IB1_BASE_HI, &base);
- do_header_and_snapshot(device, hung_rb, cmdbatch);
+ do_header_and_snapshot(device, hung_rb, cmdobj);
/* Terminate the stalled transaction and resume the IOMMU */
if (fault & ADRENO_IOMMU_PAGE_FAULT)
@@ -1876,23 +2042,24 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
return 1;
}
-static inline int cmdbatch_consumed(struct kgsl_cmdbatch *cmdbatch,
+static inline int drawobj_consumed(struct kgsl_drawobj *drawobj,
unsigned int consumed, unsigned int retired)
{
- return ((timestamp_cmp(cmdbatch->timestamp, consumed) >= 0) &&
- (timestamp_cmp(retired, cmdbatch->timestamp) < 0));
+ return ((timestamp_cmp(drawobj->timestamp, consumed) >= 0) &&
+ (timestamp_cmp(retired, drawobj->timestamp) < 0));
}
static void _print_recovery(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
static struct {
unsigned int mask;
const char *str;
} flags[] = { ADRENO_FT_TYPES };
- int i, nr = find_first_bit(&cmdbatch->fault_recovery, BITS_PER_LONG);
+ int i, nr = find_first_bit(&cmdobj->fault_recovery, BITS_PER_LONG);
char *result = "unknown";
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
for (i = 0; i < ARRAY_SIZE(flags); i++) {
if (flags[i].mask == BIT(nr)) {
@@ -1901,40 +2068,41 @@ static void _print_recovery(struct kgsl_device *device,
}
}
- pr_context(device, cmdbatch->context,
+ pr_context(device, drawobj->context,
"gpu %s ctx %d ts %d policy %lX\n",
- result, cmdbatch->context->id, cmdbatch->timestamp,
- cmdbatch->fault_recovery);
+ result, drawobj->context->id, drawobj->timestamp,
+ cmdobj->fault_recovery);
}
-static void cmdbatch_profile_ticks(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch, uint64_t *start, uint64_t *retire)
+static void cmdobj_profile_ticks(struct adreno_device *adreno_dev,
+ struct kgsl_drawobj_cmd *cmdobj, uint64_t *start, uint64_t *retire)
{
- void *ptr = adreno_dev->cmdbatch_profile_buffer.hostptr;
- struct adreno_cmdbatch_profile_entry *entry;
+ void *ptr = adreno_dev->profile_buffer.hostptr;
+ struct adreno_drawobj_profile_entry *entry;
- entry = (struct adreno_cmdbatch_profile_entry *)
- (ptr + (cmdbatch->profile_index * sizeof(*entry)));
+ entry = (struct adreno_drawobj_profile_entry *)
+ (ptr + (cmdobj->profile_index * sizeof(*entry)));
rmb();
*start = entry->started;
*retire = entry->retired;
}
-static void retire_cmdbatch(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch)
+static void retire_cmdobj(struct adreno_device *adreno_dev,
+ struct kgsl_drawobj_cmd *cmdobj)
{
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
uint64_t start = 0, end = 0;
- if (cmdbatch->fault_recovery != 0) {
- set_bit(ADRENO_CONTEXT_FAULT, &cmdbatch->context->priv);
- _print_recovery(KGSL_DEVICE(adreno_dev), cmdbatch);
+ if (cmdobj->fault_recovery != 0) {
+ set_bit(ADRENO_CONTEXT_FAULT, &drawobj->context->priv);
+ _print_recovery(KGSL_DEVICE(adreno_dev), cmdobj);
}
- if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv))
- cmdbatch_profile_ticks(adreno_dev, cmdbatch, &start, &end);
+ if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv))
+ cmdobj_profile_ticks(adreno_dev, cmdobj, &start, &end);
/*
* For A3xx we still get the rptr from the CP_RB_RPTR instead of
@@ -1942,48 +2110,49 @@ static void retire_cmdbatch(struct adreno_device *adreno_dev,
* So avoid reading GPU register directly for A3xx.
*/
if (adreno_is_a3xx(adreno_dev))
- trace_adreno_cmdbatch_retired(cmdbatch,
- (int) dispatcher->inflight, start, end,
- ADRENO_CMDBATCH_RB(cmdbatch), 0);
+ trace_adreno_cmdbatch_retired(drawobj,
+ (int) dispatcher->inflight, start, end,
+ ADRENO_DRAWOBJ_RB(drawobj), 0, cmdobj->fault_recovery);
else
- trace_adreno_cmdbatch_retired(cmdbatch,
- (int) dispatcher->inflight, start, end,
- ADRENO_CMDBATCH_RB(cmdbatch),
- adreno_get_rptr(drawctxt->rb));
+ trace_adreno_cmdbatch_retired(drawobj,
+ (int) dispatcher->inflight, start, end,
+ ADRENO_DRAWOBJ_RB(drawobj),
+ adreno_get_rptr(drawctxt->rb), cmdobj->fault_recovery);
drawctxt->submit_retire_ticks[drawctxt->ticks_index] =
- end - cmdbatch->submit_ticks;
+ end - cmdobj->submit_ticks;
drawctxt->ticks_index = (drawctxt->ticks_index + 1) %
SUBMIT_RETIRE_TICKS_SIZE;
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
}
-static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
int count = 0;
- while (!adreno_cmdqueue_is_empty(cmdqueue)) {
- struct kgsl_cmdbatch *cmdbatch =
- cmdqueue->cmd_q[cmdqueue->head];
+ while (!adreno_drawqueue_is_empty(drawqueue)) {
+ struct kgsl_drawobj_cmd *cmdobj =
+ drawqueue->cmd_q[drawqueue->head];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- if (!kgsl_check_timestamp(device, cmdbatch->context,
- cmdbatch->timestamp))
+ if (!kgsl_check_timestamp(device, drawobj->context,
+ drawobj->timestamp))
break;
- retire_cmdbatch(adreno_dev, cmdbatch);
+ retire_cmdobj(adreno_dev, cmdobj);
dispatcher->inflight--;
- cmdqueue->inflight--;
+ drawqueue->inflight--;
- cmdqueue->cmd_q[cmdqueue->head] = NULL;
+ drawqueue->cmd_q[drawqueue->head] = NULL;
- cmdqueue->head = CMDQUEUE_NEXT(cmdqueue->head,
- ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ drawqueue->head = DRAWQUEUE_NEXT(drawqueue->head,
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE);
count++;
}
@@ -1992,13 +2161,14 @@ static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
}
static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_cmdbatch *cmdbatch = cmdqueue->cmd_q[cmdqueue->head];
+ struct kgsl_drawobj *drawobj =
+ DRAWOBJ(drawqueue->cmd_q[drawqueue->head]);
/* Don't timeout if the timer hasn't expired yet (duh) */
- if (time_is_after_jiffies(cmdqueue->expires))
+ if (time_is_after_jiffies(drawqueue->expires))
return;
/* Don't timeout if the IB timeout is disabled globally */
@@ -2006,30 +2176,30 @@ static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev,
return;
/* Don't time out if the context has disabled it */
- if (cmdbatch->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
+ if (drawobj->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
return;
- pr_context(device, cmdbatch->context, "gpu timeout ctx %d ts %d\n",
- cmdbatch->context->id, cmdbatch->timestamp);
+ pr_context(device, drawobj->context, "gpu timeout ctx %d ts %d\n",
+ drawobj->context->id, drawobj->timestamp);
adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);
}
-static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static int adreno_dispatch_process_drawqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
- int count = adreno_dispatch_retire_cmdqueue(adreno_dev, cmdqueue);
+ int count = adreno_dispatch_retire_drawqueue(adreno_dev, drawqueue);
/* Nothing to do if there are no pending commands */
- if (adreno_cmdqueue_is_empty(cmdqueue))
+ if (adreno_drawqueue_is_empty(drawqueue))
return count;
- /* Don't update the cmdqueue timeout if we are about to preempt out */
+ /* Don't update the drawqueue timeout if we are about to preempt out */
if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE))
return count;
- /* Don't update the cmdqueue timeout if it isn't active */
- if (!cmdqueue_is_current(cmdqueue))
+ /* Don't update the drawqueue timeout if it isn't active */
+ if (!drawqueue_is_current(drawqueue))
return count;
/*
@@ -2038,17 +2208,17 @@ static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
*/
if (count) {
- cmdqueue->expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
+ drawqueue->expires = jiffies +
+ msecs_to_jiffies(adreno_drawobj_timeout);
return count;
}
/*
* If we get here then 1) the ringbuffer is current and 2) we haven't
* retired anything. Check to see if the timeout if valid for the
- * current cmdbatch and fault if it has expired
+ * current drawobj and fault if it has expired
*/
- _adreno_dispatch_check_timeout(adreno_dev, cmdqueue);
+ _adreno_dispatch_check_timeout(adreno_dev, drawqueue);
return 0;
}
@@ -2067,11 +2237,11 @@ static void _dispatcher_update_timers(struct adreno_device *adreno_dev)
/* Check to see if we need to update the command timer */
if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
- struct adreno_dispatcher_cmdqueue *cmdqueue =
- CMDQUEUE(adreno_dev->cur_rb);
+ struct adreno_dispatcher_drawqueue *drawqueue =
+ DRAWQUEUE(adreno_dev->cur_rb);
- if (!adreno_cmdqueue_is_empty(cmdqueue))
- mod_timer(&dispatcher->timer, cmdqueue->expires);
+ if (!adreno_drawqueue_is_empty(drawqueue))
+ mod_timer(&dispatcher->timer, drawqueue->expires);
}
}
@@ -2111,14 +2281,14 @@ static void adreno_dispatcher_work(struct work_struct *work)
/*
* As long as there are inflight commands, process retired comamnds from
- * all cmdqueues
+ * all drawqueues
*/
for (i = 0; i < adreno_dev->num_ringbuffers; i++) {
- struct adreno_dispatcher_cmdqueue *cmdqueue =
- CMDQUEUE(&adreno_dev->ringbuffers[i]);
+ struct adreno_dispatcher_drawqueue *drawqueue =
+ DRAWQUEUE(&adreno_dev->ringbuffers[i]);
- count += adreno_dispatch_process_cmdqueue(adreno_dev,
- cmdqueue);
+ count += adreno_dispatch_process_drawqueue(adreno_dev,
+ drawqueue);
if (dispatcher->inflight == 0)
break;
}
@@ -2178,7 +2348,7 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device,
}
/*
- * This is called on a regular basis while command batches are inflight. Fault
+ * This is called on a regular basis while cmdobj's are inflight. Fault
* detection registers are read and compared to the existing values - if they
* changed then the GPU is still running. If they are the same between
* subsequent calls then the GPU may have faulted
@@ -2230,7 +2400,7 @@ static void adreno_dispatcher_timer(unsigned long data)
*/
void adreno_dispatcher_start(struct kgsl_device *device)
{
- complete_all(&device->cmdbatch_gate);
+ complete_all(&device->halt_gate);
/* Schedule the work loop to get things going */
adreno_dispatcher_schedule(device);
@@ -2267,13 +2437,13 @@ void adreno_dispatcher_close(struct adreno_device *adreno_dev)
del_timer_sync(&dispatcher->fault_timer);
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- struct adreno_dispatcher_cmdqueue *dispatch_q =
+ struct adreno_dispatcher_drawqueue *dispatch_q =
&(rb->dispatch_q);
- while (!adreno_cmdqueue_is_empty(dispatch_q)) {
- kgsl_cmdbatch_destroy(
- dispatch_q->cmd_q[dispatch_q->head]);
+ while (!adreno_drawqueue_is_empty(dispatch_q)) {
+ kgsl_drawobj_destroy(
+ DRAWOBJ(dispatch_q->cmd_q[dispatch_q->head]));
dispatch_q->head = (dispatch_q->head + 1)
- % ADRENO_DISPATCH_CMDQUEUE_SIZE;
+ % ADRENO_DISPATCH_DRAWQUEUE_SIZE;
}
}
@@ -2332,23 +2502,23 @@ static ssize_t _show_uint(struct adreno_dispatcher *dispatcher,
*((unsigned int *) attr->value));
}
-static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_CMDQUEUE_SIZE,
+static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_DRAWQUEUE_SIZE,
_dispatcher_q_inflight_hi);
static DISPATCHER_UINT_ATTR(inflight_low_latency, 0644,
- ADRENO_DISPATCH_CMDQUEUE_SIZE, _dispatcher_q_inflight_lo);
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE, _dispatcher_q_inflight_lo);
/*
* Our code that "puts back" a command from the context is much cleaner
* if we are sure that there will always be enough room in the
* ringbuffer so restrict the maximum size of the context queue to
- * ADRENO_CONTEXT_CMDQUEUE_SIZE - 1
+ * ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1
*/
-static DISPATCHER_UINT_ATTR(context_cmdqueue_size, 0644,
- ADRENO_CONTEXT_CMDQUEUE_SIZE - 1, _context_cmdqueue_size);
+static DISPATCHER_UINT_ATTR(context_drawqueue_size, 0644,
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1, _context_drawqueue_size);
static DISPATCHER_UINT_ATTR(context_burst_count, 0644, 0,
- _context_cmdbatch_burst);
-static DISPATCHER_UINT_ATTR(cmdbatch_timeout, 0644, 0,
- adreno_cmdbatch_timeout);
+ _context_drawobj_burst);
+static DISPATCHER_UINT_ATTR(drawobj_timeout, 0644, 0,
+ adreno_drawobj_timeout);
static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait);
static DISPATCHER_UINT_ATTR(fault_detect_interval, 0644, 0,
_fault_timer_interval);
@@ -2366,9 +2536,9 @@ static DISPATCHER_UINT_ATTR(dispatch_starvation_time, 0644, 0,
static struct attribute *dispatcher_attrs[] = {
&dispatcher_attr_inflight.attr,
&dispatcher_attr_inflight_low_latency.attr,
- &dispatcher_attr_context_cmdqueue_size.attr,
+ &dispatcher_attr_context_drawqueue_size.attr,
&dispatcher_attr_context_burst_count.attr,
- &dispatcher_attr_cmdbatch_timeout.attr,
+ &dispatcher_attr_drawobj_timeout.attr,
&dispatcher_attr_context_queue_wait.attr,
&dispatcher_attr_fault_detect_interval.attr,
&dispatcher_attr_fault_throttle_time.attr,
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index 699c3e4adb27..cb9106fedc82 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -15,7 +15,7 @@
#define ____ADRENO_DISPATCHER_H
extern unsigned int adreno_disp_preempt_fair_sched;
-extern unsigned int adreno_cmdbatch_timeout;
+extern unsigned int adreno_drawobj_timeout;
extern unsigned int adreno_dispatch_starvation_time;
extern unsigned int adreno_dispatch_time_slice;
@@ -44,21 +44,21 @@ enum adreno_dispatcher_starve_timer_states {
* sizes that can be chosen at runtime
*/
-#define ADRENO_DISPATCH_CMDQUEUE_SIZE 128
+#define ADRENO_DISPATCH_DRAWQUEUE_SIZE 128
-#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
+#define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
/**
- * struct adreno_dispatcher_cmdqueue - List of commands for a RB level
- * @cmd_q: List of command batches submitted to dispatcher
+ * struct adreno_dispatcher_drawqueue - List of commands for a RB level
+ * @cmd_q: List of command obj's submitted to dispatcher
* @inflight: Number of commands inflight in this q
* @head: Head pointer to the q
* @tail: Queues tail pointer
- * @active_context_count: Number of active contexts seen in this rb cmdqueue
- * @expires: The jiffies value at which this cmdqueue has run too long
+ * @active_context_count: Number of active contexts seen in this rb drawqueue
+ * @expires: The jiffies value at which this drawqueue has run too long
*/
-struct adreno_dispatcher_cmdqueue {
- struct kgsl_cmdbatch *cmd_q[ADRENO_DISPATCH_CMDQUEUE_SIZE];
+struct adreno_dispatcher_drawqueue {
+ struct kgsl_drawobj_cmd *cmd_q[ADRENO_DISPATCH_DRAWQUEUE_SIZE];
unsigned int inflight;
unsigned int head;
unsigned int tail;
@@ -70,10 +70,10 @@ struct adreno_dispatcher_cmdqueue {
* struct adreno_dispatcher - container for the adreno GPU dispatcher
* @mutex: Mutex to protect the structure
* @state: Current state of the dispatcher (active or paused)
- * @timer: Timer to monitor the progress of the command batches
- * @inflight: Number of command batch operations pending in the ringbuffer
+ * @timer: Timer to monitor the progress of the drawobjs
+ * @inflight: Number of drawobj operations pending in the ringbuffer
* @fault: Non-zero if a fault was detected.
- * @pending: Priority list of contexts waiting to submit command batches
+ * @pending: Priority list of contexts waiting to submit drawobjs
* @plist_lock: Spin lock to protect the pending queue
* @work: work_struct to put the dispatcher in a work queue
* @kobj: kobject for the dispatcher directory in the device sysfs node
@@ -109,9 +109,9 @@ int adreno_dispatcher_idle(struct adreno_device *adreno_dev);
void adreno_dispatcher_irq_fault(struct adreno_device *adreno_dev);
void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
-int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp);
+int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count, uint32_t *timestamp);
void adreno_dispatcher_schedule(struct kgsl_device *device);
void adreno_dispatcher_pause(struct adreno_device *adreno_dev);
@@ -120,11 +120,11 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device,
void adreno_dispatcher_preempt_callback(struct adreno_device *adreno_dev,
int bit);
void adreno_preempt_process_dispatch_queue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *dispatch_q);
+ struct adreno_dispatcher_drawqueue *dispatch_q);
-static inline bool adreno_cmdqueue_is_empty(
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static inline bool adreno_drawqueue_is_empty(
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
- return (cmdqueue != NULL && cmdqueue->head == cmdqueue->tail);
+ return (drawqueue != NULL && drawqueue->head == drawqueue->tail);
}
#endif /* __ADRENO_DISPATCHER_H */
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index d9ebe37d0cf0..3a110ed221a8 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -59,14 +59,14 @@ void adreno_drawctxt_dump(struct kgsl_device *device,
kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &retire);
/*
- * We may have cmdbatch timer running, which also uses same
+ * We may have kgsl sync obj timer running, which also uses same
* lock, take a lock with software interrupt disabled (bh)
* to avoid spin lock recursion.
*
* Use Spin trylock because dispatcher can acquire drawctxt->lock
* if context is pending and the fence it is waiting on just got
* signalled. Dispatcher acquires drawctxt->lock and tries to
- * delete the cmdbatch timer using del_timer_sync().
+ * delete the sync obj timer using del_timer_sync().
* del_timer_sync() waits till timer and its pending handlers
* are deleted. But if the timer expires at the same time,
* timer handler could be waiting on drawctxt->lock leading to a
@@ -83,23 +83,27 @@ void adreno_drawctxt_dump(struct kgsl_device *device,
context->id, queue, drawctxt->submitted_timestamp,
start, retire);
- if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
- struct kgsl_cmdbatch *cmdbatch =
- drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+ if (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
+ struct kgsl_drawobj *drawobj =
+ drawctxt->drawqueue[drawctxt->drawqueue_head];
- if (test_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv)) {
+ if (test_bit(ADRENO_CONTEXT_FENCE_LOG, &context->priv)) {
dev_err(device->dev,
" possible deadlock. Context %d might be blocked for itself\n",
context->id);
goto stats;
}
- if (kgsl_cmdbatch_events_pending(cmdbatch)) {
- dev_err(device->dev,
- " context[%d] (ts=%d) Active sync points:\n",
- context->id, cmdbatch->timestamp);
+ if (drawobj->type == SYNCOBJ_TYPE) {
+ struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
+
+ if (kgsl_drawobj_events_pending(syncobj)) {
+ dev_err(device->dev,
+ " context[%d] (ts=%d) Active sync points:\n",
+ context->id, drawobj->timestamp);
- kgsl_dump_syncpoints(device, cmdbatch);
+ kgsl_dump_syncpoints(device, syncobj);
+ }
}
}
@@ -229,19 +233,19 @@ done:
return ret;
}
-static int drawctxt_detach_cmdbatches(struct adreno_context *drawctxt,
- struct kgsl_cmdbatch **list)
+static int drawctxt_detach_drawobjs(struct adreno_context *drawctxt,
+ struct kgsl_drawobj **list)
{
int count = 0;
- while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
- struct kgsl_cmdbatch *cmdbatch =
- drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+ while (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
+ struct kgsl_drawobj *drawobj =
+ drawctxt->drawqueue[drawctxt->drawqueue_head];
- drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
- ADRENO_CONTEXT_CMDQUEUE_SIZE;
+ drawctxt->drawqueue_head = (drawctxt->drawqueue_head + 1) %
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE;
- list[count++] = cmdbatch;
+ list[count++] = drawobj;
}
return count;
@@ -259,7 +263,7 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device,
struct kgsl_context *context)
{
struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
- struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE];
+ struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
int i, count;
trace_adreno_drawctxt_invalidate(drawctxt);
@@ -280,13 +284,13 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device,
drawctxt->timestamp);
/* Get rid of commands still waiting in the queue */
- count = drawctxt_detach_cmdbatches(drawctxt, list);
+ count = drawctxt_detach_drawobjs(drawctxt, list);
spin_unlock(&drawctxt->lock);
for (i = 0; i < count; i++) {
kgsl_cancel_events_timestamp(device, &context->events,
list[i]->timestamp);
- kgsl_cmdbatch_destroy(list[i]);
+ kgsl_drawobj_destroy(list[i]);
}
/* Make sure all pending events are processed or cancelled */
@@ -453,7 +457,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
struct adreno_context *drawctxt;
struct adreno_ringbuffer *rb;
int ret, count, i;
- struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE];
+ struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
if (context == NULL)
return;
@@ -468,7 +472,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
spin_unlock(&adreno_dev->active_list_lock);
spin_lock(&drawctxt->lock);
- count = drawctxt_detach_cmdbatches(drawctxt, list);
+ count = drawctxt_detach_drawobjs(drawctxt, list);
spin_unlock(&drawctxt->lock);
for (i = 0; i < count; i++) {
@@ -478,7 +482,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
* detached status here.
*/
adreno_fault_skipcmd_detached(adreno_dev, drawctxt, list[i]);
- kgsl_cmdbatch_destroy(list[i]);
+ kgsl_drawobj_destroy(list[i]);
}
/*
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 5ea911954991..0578f16ae9e1 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -18,7 +18,7 @@ struct adreno_context_type {
const char *str;
};
-#define ADRENO_CONTEXT_CMDQUEUE_SIZE 128
+#define ADRENO_CONTEXT_DRAWQUEUE_SIZE 128
#define SUBMIT_RETIRE_TICKS_SIZE 7
struct kgsl_device;
@@ -32,20 +32,21 @@ struct kgsl_context;
* @internal_timestamp: Global timestamp of the last issued command
* NOTE: guarded by device->mutex, not drawctxt->mutex!
* @type: Context type (GL, CL, RS)
- * @mutex: Mutex to protect the cmdqueue
- * @cmdqueue: Queue of command batches waiting to be dispatched for this context
- * @cmdqueue_head: Head of the cmdqueue queue
- * @cmdqueue_tail: Tail of the cmdqueue queue
+ * @mutex: Mutex to protect the drawqueue
+ * @drawqueue: Queue of drawobjs waiting to be dispatched for this
+ * context
+ * @drawqueue_head: Head of the drawqueue queue
+ * @drawqueue_tail: Tail of the drawqueue queue
* @pending: Priority list node for the dispatcher list of pending contexts
* @wq: Workqueue structure for contexts to sleep pending room in the queue
* @waiting: Workqueue structure for contexts waiting for a timestamp or event
- * @queued: Number of commands queued in the cmdqueue
- * @fault_policy: GFT fault policy set in cmdbatch_skip_cmd();
+ * @queued: Number of commands queued in the drawqueue
+ * @fault_policy: GFT fault policy set in _skip_cmd();
* @debug_root: debugfs entry for this context.
* @queued_timestamp: The last timestamp that was queued on this context
* @rb: The ringbuffer in which this context submits commands.
* @submitted_timestamp: The last timestamp that was submitted for this context
- * @submit_retire_ticks: Array to hold cmdbatch execution times from submit
+ * @submit_retire_ticks: Array to hold command obj execution times from submit
* to retire
* @ticks_index: The index into submit_retire_ticks[] where the new delta will
* be written.
@@ -60,9 +61,9 @@ struct adreno_context {
spinlock_t lock;
/* Dispatcher */
- struct kgsl_cmdbatch *cmdqueue[ADRENO_CONTEXT_CMDQUEUE_SIZE];
- unsigned int cmdqueue_head;
- unsigned int cmdqueue_tail;
+ struct kgsl_drawobj *drawqueue[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
+ unsigned int drawqueue_head;
+ unsigned int drawqueue_tail;
struct plist_node pending;
wait_queue_head_t wq;
@@ -92,8 +93,9 @@ struct adreno_context {
* @ADRENO_CONTEXT_SKIP_EOF - Context skip IBs until the next end of frame
* marker.
* @ADRENO_CONTEXT_FORCE_PREAMBLE - Force the preamble for the next submission.
- * @ADRENO_CONTEXT_SKIP_CMD - Context's command batch is skipped during
+ * @ADRENO_CONTEXT_SKIP_CMD - Context's drawobj's skipped during
fault tolerance.
+ * @ADRENO_CONTEXT_FENCE_LOG - Dump fences on this context.
*/
enum adreno_context_priv {
ADRENO_CONTEXT_FAULT = KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC,
@@ -102,6 +104,7 @@ enum adreno_context_priv {
ADRENO_CONTEXT_SKIP_EOF,
ADRENO_CONTEXT_FORCE_PREAMBLE,
ADRENO_CONTEXT_SKIP_CMD,
+ ADRENO_CONTEXT_FENCE_LOG,
};
/* Flags for adreno_drawctxt_switch() */
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 07ef09034d7c..fc0602a60ac1 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -671,96 +671,17 @@ adreno_ringbuffer_issuecmds(struct adreno_ringbuffer *rb,
sizedwords, 0, NULL);
}
-/**
- * _ringbuffer_verify_ib() - Check if an IB's size is within a permitted limit
- * @device: The kgsl device pointer
- * @ibdesc: Pointer to the IB descriptor
- */
-static inline bool _ringbuffer_verify_ib(struct kgsl_device_private *dev_priv,
- struct kgsl_context *context, struct kgsl_memobj_node *ib)
-{
- struct kgsl_device *device = dev_priv->device;
- struct kgsl_process_private *private = dev_priv->process_priv;
-
- /* The maximum allowable size for an IB in the CP is 0xFFFFF dwords */
- if (ib->size == 0 || ((ib->size >> 2) > 0xFFFFF)) {
- pr_context(device, context, "ctxt %d invalid ib size %lld\n",
- context->id, ib->size);
- return false;
- }
-
- /* Make sure that the address is mapped */
- if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) {
- pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n",
- context->id, ib->gpuaddr);
- return false;
- }
-
- return true;
-}
-
-int
-adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
- struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp)
-{
- struct kgsl_device *device = dev_priv->device;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
- struct kgsl_memobj_node *ib;
- int ret;
-
- if (kgsl_context_invalid(context))
- return -EDEADLK;
-
- /* Verify the IBs before they get queued */
- list_for_each_entry(ib, &cmdbatch->cmdlist, node)
- if (_ringbuffer_verify_ib(dev_priv, context, ib) == false)
- return -EINVAL;
-
- /* wait for the suspend gate */
- wait_for_completion(&device->cmdbatch_gate);
-
- /*
- * Clear the wake on touch bit to indicate an IB has been
- * submitted since the last time we set it. But only clear
- * it when we have rendering commands.
- */
- if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER)
- && !(cmdbatch->flags & KGSL_CMDBATCH_SYNC))
- device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH;
-
- /* A3XX does not have support for command batch profiling */
- if (adreno_is_a3xx(adreno_dev) &&
- (cmdbatch->flags & KGSL_CMDBATCH_PROFILING))
- return -EOPNOTSUPP;
-
- /* Queue the command in the ringbuffer */
- ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch,
- timestamp);
-
- /*
- * Return -EPROTO if the device has faulted since the last time we
- * checked - userspace uses this to perform post-fault activities
- */
- if (!ret && test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv))
- ret = -EPROTO;
-
- return ret;
-}
-
static void adreno_ringbuffer_set_constraint(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj *drawobj)
{
- struct kgsl_context *context = cmdbatch->context;
+ struct kgsl_context *context = drawobj->context;
/*
* Check if the context has a constraint and constraint flags are
* set.
*/
if (context->pwr_constraint.type &&
((context->flags & KGSL_CONTEXT_PWR_CONSTRAINT) ||
- (cmdbatch->flags & KGSL_CONTEXT_PWR_CONSTRAINT)))
+ (drawobj->flags & KGSL_CONTEXT_PWR_CONSTRAINT)))
kgsl_pwrctrl_set_constraint(device, &context->pwr_constraint,
context->id);
}
@@ -792,10 +713,12 @@ static inline int _get_alwayson_counter(struct adreno_device *adreno_dev,
/* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */
int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch, struct adreno_submit_time *time)
+ struct kgsl_drawobj_cmd *cmdobj,
+ struct adreno_submit_time *time)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
struct kgsl_memobj_node *ib;
unsigned int numibs = 0;
unsigned int *link;
@@ -803,25 +726,25 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
struct kgsl_context *context;
struct adreno_context *drawctxt;
bool use_preamble = true;
- bool cmdbatch_user_profiling = false;
- bool cmdbatch_kernel_profiling = false;
+ bool user_profiling = false;
+ bool kernel_profiling = false;
int flags = KGSL_CMD_FLAGS_NONE;
int ret;
struct adreno_ringbuffer *rb;
- struct kgsl_cmdbatch_profiling_buffer *profile_buffer = NULL;
+ struct kgsl_drawobj_profiling_buffer *profile_buffer = NULL;
unsigned int dwords = 0;
struct adreno_submit_time local;
- struct kgsl_mem_entry *entry = cmdbatch->profiling_buf_entry;
+ struct kgsl_mem_entry *entry = cmdobj->profiling_buf_entry;
if (entry)
profile_buffer = kgsl_gpuaddr_to_vaddr(&entry->memdesc,
- cmdbatch->profiling_buffer_gpuaddr);
+ cmdobj->profiling_buffer_gpuaddr);
- context = cmdbatch->context;
+ context = drawobj->context;
drawctxt = ADRENO_CONTEXT(context);
/* Get the total IBs in the list */
- list_for_each_entry(ib, &cmdbatch->cmdlist, node)
+ list_for_each_entry(ib, &cmdobj->cmdlist, node)
numibs++;
rb = drawctxt->rb;
@@ -838,14 +761,14 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
* c) force preamble for commandbatch
*/
if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) &&
- (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv))) {
+ (!test_bit(CMDOBJ_SKIP, &cmdobj->priv))) {
- set_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_recovery);
- cmdbatch->fault_policy = drawctxt->fault_policy;
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+ set_bit(KGSL_FT_SKIPCMD, &cmdobj->fault_recovery);
+ cmdobj->fault_policy = drawctxt->fault_policy;
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv);
/* if context is detached print fault recovery */
- adreno_fault_skipcmd_detached(adreno_dev, drawctxt, cmdbatch);
+ adreno_fault_skipcmd_detached(adreno_dev, drawctxt, drawobj);
/* clear the drawctxt flags */
clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv);
@@ -857,7 +780,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
if a context switch hasn't occured */
if ((drawctxt->base.flags & KGSL_CONTEXT_PREAMBLE) &&
- !test_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv) &&
+ !test_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv) &&
(rb->drawctxt_active == drawctxt))
use_preamble = false;
@@ -867,7 +790,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
* the accounting sane. Set start_index and numibs to 0 to just
* generate the start and end markers and skip everything else
*/
- if (test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)) {
+ if (test_bit(CMDOBJ_SKIP, &cmdobj->priv)) {
use_preamble = false;
numibs = 0;
}
@@ -884,9 +807,9 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
/* Each IB takes up 30 dwords in worst case */
dwords += (numibs * 30);
- if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING &&
+ if (drawobj->flags & KGSL_DRAWOBJ_PROFILING &&
!adreno_is_a3xx(adreno_dev) && profile_buffer) {
- cmdbatch_user_profiling = true;
+ user_profiling = true;
dwords += 6;
/*
@@ -907,8 +830,8 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
time = &local;
}
- if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv)) {
- cmdbatch_kernel_profiling = true;
+ if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv)) {
+ kernel_profiling = true;
dwords += 6;
if (adreno_is_a5xx(adreno_dev))
dwords += 2;
@@ -929,26 +852,26 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
*cmds++ = cp_packet(adreno_dev, CP_NOP, 1);
*cmds++ = KGSL_START_OF_IB_IDENTIFIER;
- if (cmdbatch_kernel_profiling) {
+ if (kernel_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- adreno_dev->cmdbatch_profile_buffer.gpuaddr +
- ADRENO_CMDBATCH_PROFILE_OFFSET(cmdbatch->profile_index,
+ adreno_dev->profile_buffer.gpuaddr +
+ ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index,
started));
}
/*
- * Add cmds to read the GPU ticks at the start of the cmdbatch and
- * write it into the appropriate cmdbatch profiling buffer offset
+ * Add cmds to read the GPU ticks at the start of command obj and
+ * write it into the appropriate command obj profiling buffer offset
*/
- if (cmdbatch_user_profiling) {
+ if (user_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- cmdbatch->profiling_buffer_gpuaddr +
- offsetof(struct kgsl_cmdbatch_profiling_buffer,
+ cmdobj->profiling_buffer_gpuaddr +
+ offsetof(struct kgsl_drawobj_profiling_buffer,
gpu_ticks_submitted));
}
if (numibs) {
- list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
+ list_for_each_entry(ib, &cmdobj->cmdlist, node) {
/*
* Skip 0 sized IBs - these are presumed to have been
* removed from consideration by the FT policy
@@ -972,21 +895,21 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
adreno_is_preemption_enabled(adreno_dev))
cmds += gpudev->preemption_yield_enable(cmds);
- if (cmdbatch_kernel_profiling) {
+ if (kernel_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- adreno_dev->cmdbatch_profile_buffer.gpuaddr +
- ADRENO_CMDBATCH_PROFILE_OFFSET(cmdbatch->profile_index,
+ adreno_dev->profile_buffer.gpuaddr +
+ ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index,
retired));
}
/*
- * Add cmds to read the GPU ticks at the end of the cmdbatch and
- * write it into the appropriate cmdbatch profiling buffer offset
+ * Add cmds to read the GPU ticks at the end of command obj and
+ * write it into the appropriate command obj profiling buffer offset
*/
- if (cmdbatch_user_profiling) {
+ if (user_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- cmdbatch->profiling_buffer_gpuaddr +
- offsetof(struct kgsl_cmdbatch_profiling_buffer,
+ cmdobj->profiling_buffer_gpuaddr +
+ offsetof(struct kgsl_drawobj_profiling_buffer,
gpu_ticks_retired));
}
@@ -1012,7 +935,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
goto done;
}
- if (test_bit(CMDBATCH_FLAG_WFI, &cmdbatch->priv))
+ if (test_bit(CMDOBJ_WFI, &cmdobj->priv))
flags = KGSL_CMD_FLAGS_WFI;
/*
@@ -1025,26 +948,26 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
flags |= KGSL_CMD_FLAGS_PWRON_FIXUP;
/* Set the constraints before adding to ringbuffer */
- adreno_ringbuffer_set_constraint(device, cmdbatch);
+ adreno_ringbuffer_set_constraint(device, drawobj);
/* CFF stuff executed only if CFF is enabled */
- kgsl_cffdump_capture_ib_desc(device, context, cmdbatch);
+ kgsl_cffdump_capture_ib_desc(device, context, cmdobj);
ret = adreno_ringbuffer_addcmds(rb, flags,
&link[0], (cmds - link),
- cmdbatch->timestamp, time);
+ drawobj->timestamp, time);
if (!ret) {
- cmdbatch->global_ts = drawctxt->internal_timestamp;
+ cmdobj->global_ts = drawctxt->internal_timestamp;
/* Put the timevalues in the profiling buffer */
- if (cmdbatch_user_profiling) {
+ if (user_profiling) {
/*
* Return kernel clock time to the the client
* if requested
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING_KTIME) {
+ if (drawobj->flags & KGSL_DRAWOBJ_PROFILING_KTIME) {
uint64_t secs = time->ktime;
profile_buffer->wall_clock_ns =
@@ -1069,9 +992,8 @@ done:
kgsl_memdesc_unmap(&entry->memdesc);
- trace_kgsl_issueibcmds(device, context->id, cmdbatch,
- numibs, cmdbatch->timestamp,
- cmdbatch->flags, ret, drawctxt->type);
+ trace_kgsl_issueibcmds(device, context->id, numibs, drawobj->timestamp,
+ drawobj->flags, ret, drawctxt->type);
kfree(link);
return ret;
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index b126f710b5e6..63374af1e3f7 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -119,7 +119,7 @@ struct adreno_ringbuffer {
struct adreno_context *drawctxt_active;
struct kgsl_memdesc preemption_desc;
struct kgsl_memdesc pagetable_desc;
- struct adreno_dispatcher_cmdqueue dispatch_q;
+ struct adreno_dispatcher_drawqueue dispatch_q;
wait_queue_head_t ts_expire_waitq;
unsigned int wptr_preempt_end;
unsigned int gpr11;
@@ -136,11 +136,11 @@ int cp_secure_mode(struct adreno_device *adreno_dev, uint *cmds, int set);
int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj *drawobj,
uint32_t *timestamp);
int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj_cmd *cmdobj,
struct adreno_submit_time *time);
int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt);
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
index f52ddfa894d5..16ca0980cfbe 100644
--- a/drivers/gpu/msm/adreno_trace.h
+++ b/drivers/gpu/msm/adreno_trace.h
@@ -27,8 +27,8 @@
#include "adreno_a5xx.h"
TRACE_EVENT(adreno_cmdbatch_queued,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int queued),
- TP_ARGS(cmdbatch, queued),
+ TP_PROTO(struct kgsl_drawobj *drawobj, unsigned int queued),
+ TP_ARGS(drawobj, queued),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -37,26 +37,26 @@ TRACE_EVENT(adreno_cmdbatch_queued,
__field(unsigned int, prio)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->queued = queued;
- __entry->flags = cmdbatch->flags;
- __entry->prio = cmdbatch->context->priority;
+ __entry->flags = drawobj->flags;
+ __entry->prio = drawobj->context->priority;
),
TP_printk(
"ctx=%u ctx_prio=%u ts=%u queued=%u flags=%s",
__entry->id, __entry->prio,
__entry->timestamp, __entry->queued,
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "none"
+ KGSL_DRAWOBJ_FLAGS) : "none"
)
);
TRACE_EVENT(adreno_cmdbatch_submitted,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight, uint64_t ticks,
+ TP_PROTO(struct kgsl_drawobj *drawobj, int inflight, uint64_t ticks,
unsigned long secs, unsigned long usecs,
struct adreno_ringbuffer *rb, unsigned int rptr),
- TP_ARGS(cmdbatch, inflight, ticks, secs, usecs, rb, rptr),
+ TP_ARGS(drawobj, inflight, ticks, secs, usecs, rb, rptr),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -72,14 +72,14 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
__field(int, q_inflight)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->inflight = inflight;
- __entry->flags = cmdbatch->flags;
+ __entry->flags = drawobj->flags;
__entry->ticks = ticks;
__entry->secs = secs;
__entry->usecs = usecs;
- __entry->prio = cmdbatch->context->priority;
+ __entry->prio = drawobj->context->priority;
__entry->rb_id = rb->id;
__entry->rptr = rptr;
__entry->wptr = rb->wptr;
@@ -90,7 +90,7 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
__entry->id, __entry->prio, __entry->timestamp,
__entry->inflight,
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "none",
+ KGSL_DRAWOBJ_FLAGS) : "none",
__entry->ticks, __entry->secs, __entry->usecs,
__entry->rb_id, __entry->rptr, __entry->wptr,
__entry->q_inflight
@@ -98,10 +98,11 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
);
TRACE_EVENT(adreno_cmdbatch_retired,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight,
+ TP_PROTO(struct kgsl_drawobj *drawobj, int inflight,
uint64_t start, uint64_t retire,
- struct adreno_ringbuffer *rb, unsigned int rptr),
- TP_ARGS(cmdbatch, inflight, start, retire, rb, rptr),
+ struct adreno_ringbuffer *rb, unsigned int rptr,
+ unsigned long fault_recovery),
+ TP_ARGS(drawobj, inflight, start, retire, rb, rptr, fault_recovery),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -115,16 +116,17 @@ TRACE_EVENT(adreno_cmdbatch_retired,
__field(unsigned int, rptr)
__field(unsigned int, wptr)
__field(int, q_inflight)
+ __field(unsigned long, fault_recovery)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->inflight = inflight;
- __entry->recovery = cmdbatch->fault_recovery;
- __entry->flags = cmdbatch->flags;
+ __entry->recovery = fault_recovery;
+ __entry->flags = drawobj->flags;
__entry->start = start;
__entry->retire = retire;
- __entry->prio = cmdbatch->context->priority;
+ __entry->prio = drawobj->context->priority;
__entry->rb_id = rb->id;
__entry->rptr = rptr;
__entry->wptr = rb->wptr;
@@ -138,7 +140,7 @@ TRACE_EVENT(adreno_cmdbatch_retired,
__print_flags(__entry->recovery, "|",
ADRENO_FT_TYPES) : "none",
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "none",
+ KGSL_DRAWOBJ_FLAGS) : "none",
__entry->start,
__entry->retire,
__entry->rb_id, __entry->rptr, __entry->wptr,
@@ -147,16 +149,16 @@ TRACE_EVENT(adreno_cmdbatch_retired,
);
TRACE_EVENT(adreno_cmdbatch_fault,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int fault),
- TP_ARGS(cmdbatch, fault),
+ TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int fault),
+ TP_ARGS(cmdobj, fault),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
__field(unsigned int, fault)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = cmdobj->base.context->id;
+ __entry->timestamp = cmdobj->base.timestamp;
__entry->fault = fault;
),
TP_printk(
@@ -171,16 +173,16 @@ TRACE_EVENT(adreno_cmdbatch_fault,
);
TRACE_EVENT(adreno_cmdbatch_recovery,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int action),
- TP_ARGS(cmdbatch, action),
+ TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int action),
+ TP_ARGS(cmdobj, action),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
__field(unsigned int, action)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = cmdobj->base.context->id;
+ __entry->timestamp = cmdobj->base.timestamp;
__entry->action = action;
),
TP_printk(
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 88581b079246..add4590bbb90 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -36,7 +36,7 @@
#include "kgsl_cffdump.h"
#include "kgsl_log.h"
#include "kgsl_sharedmem.h"
-#include "kgsl_cmdbatch.h"
+#include "kgsl_drawobj.h"
#include "kgsl_device.h"
#include "kgsl_trace.h"
#include "kgsl_sync.h"
@@ -1497,11 +1497,17 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_ringbuffer_issueibcmds *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch = NULL;
+ struct kgsl_drawobj *drawobj;
+ struct kgsl_drawobj_cmd *cmdobj;
long result = -EINVAL;
/* The legacy functions don't support synchronization commands */
- if ((param->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER)))
+ if ((param->flags & (KGSL_DRAWOBJ_SYNC | KGSL_DRAWOBJ_MARKER)))
+ return -EINVAL;
+
+ /* Sanity check the number of IBs */
+ if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST &&
+ (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS))
return -EINVAL;
/* Get the context */
@@ -1509,23 +1515,20 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
if (context == NULL)
return -EINVAL;
- /* Create a command batch */
- cmdbatch = kgsl_cmdbatch_create(device, context, param->flags);
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
- goto done;
+ cmdobj = kgsl_drawobj_cmd_create(device, context, param->flags,
+ CMDOBJ_TYPE);
+ if (IS_ERR(cmdobj)) {
+ kgsl_context_put(context);
+ return PTR_ERR(cmdobj);
}
- if (param->flags & KGSL_CMDBATCH_SUBMIT_IB_LIST) {
- /* Sanity check the number of IBs */
- if (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS) {
- result = -EINVAL;
- goto done;
- }
- result = kgsl_cmdbatch_add_ibdesc_list(device, cmdbatch,
+ drawobj = DRAWOBJ(cmdobj);
+
+ if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST)
+ result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj,
(void __user *) param->ibdesc_addr,
param->numibs);
- } else {
+ else {
struct kgsl_ibdesc ibdesc;
/* Ultra legacy path */
@@ -1533,83 +1536,119 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
ibdesc.sizedwords = param->numibs;
ibdesc.ctrl = 0;
- result = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc);
+ result = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
}
- if (result)
- goto done;
-
- result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, &param->timestamp);
+ if (result == 0)
+ result = dev_priv->device->ftbl->queue_cmds(dev_priv, context,
+ &drawobj, 1, &param->timestamp);
-done:
/*
* -EPROTO is a "success" error - it just tells the user that the
* context had previously faulted
*/
if (result && result != -EPROTO)
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
kgsl_context_put(context);
return result;
}
+/* Returns 0 on failure. Returns command type(s) on success */
+static unsigned int _process_command_input(struct kgsl_device *device,
+ unsigned int flags, unsigned int numcmds,
+ unsigned int numobjs, unsigned int numsyncs)
+{
+ if (numcmds > KGSL_MAX_NUMIBS ||
+ numobjs > KGSL_MAX_NUMIBS ||
+ numsyncs > KGSL_MAX_SYNCPOINTS)
+ return 0;
+
+ /*
+ * The SYNC bit is supposed to identify a dummy sync object
+ * so warn the user if they specified any IBs with it.
+ * A MARKER command can either have IBs or not but if the
+ * command has 0 IBs it is automatically assumed to be a marker.
+ */
+
+ /* If they specify the flag, go with what they say */
+ if (flags & KGSL_DRAWOBJ_MARKER)
+ return MARKEROBJ_TYPE;
+ else if (flags & KGSL_DRAWOBJ_SYNC)
+ return SYNCOBJ_TYPE;
+
+ /* If not, deduce what they meant */
+ if (numsyncs && numcmds)
+ return SYNCOBJ_TYPE | CMDOBJ_TYPE;
+ else if (numsyncs)
+ return SYNCOBJ_TYPE;
+ else if (numcmds)
+ return CMDOBJ_TYPE;
+ else if (numcmds == 0)
+ return MARKEROBJ_TYPE;
+
+ return 0;
+}
+
long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_submit_commands *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch = NULL;
- long result = -EINVAL;
-
- /*
- * The SYNC bit is supposed to identify a dummy sync object so warn the
- * user if they specified any IBs with it. A MARKER command can either
- * have IBs or not but if the command has 0 IBs it is automatically
- * assumed to be a marker. If none of the above make sure that the user
- * specified a sane number of IBs
- */
-
- if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds)
- KGSL_DEV_ERR_ONCE(device,
- "Commands specified with the SYNC flag. They will be ignored\n");
- else if (param->numcmds > KGSL_MAX_NUMIBS)
- return -EINVAL;
- else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0)
- param->flags |= KGSL_CMDBATCH_MARKER;
+ struct kgsl_drawobj *drawobj[2];
+ unsigned int type;
+ long result;
+ unsigned int i = 0;
- /* Make sure that we don't have too many syncpoints */
- if (param->numsyncs > KGSL_MAX_SYNCPOINTS)
+ type = _process_command_input(device, param->flags, param->numcmds, 0,
+ param->numsyncs);
+ if (!type)
return -EINVAL;
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context == NULL)
return -EINVAL;
- /* Create a command batch */
- cmdbatch = kgsl_cmdbatch_create(device, context, param->flags);
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
- goto done;
+ if (type & SYNCOBJ_TYPE) {
+ struct kgsl_drawobj_sync *syncobj =
+ kgsl_drawobj_sync_create(device, context);
+ if (IS_ERR(syncobj)) {
+ result = PTR_ERR(syncobj);
+ goto done;
+ }
+
+ drawobj[i++] = DRAWOBJ(syncobj);
+
+ result = kgsl_drawobj_sync_add_syncpoints(device, syncobj,
+ param->synclist, param->numsyncs);
+ if (result)
+ goto done;
}
- result = kgsl_cmdbatch_add_ibdesc_list(device, cmdbatch,
- param->cmdlist, param->numcmds);
- if (result)
- goto done;
+ if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) {
+ struct kgsl_drawobj_cmd *cmdobj =
+ kgsl_drawobj_cmd_create(device,
+ context, param->flags, type);
+ if (IS_ERR(cmdobj)) {
+ result = PTR_ERR(cmdobj);
+ goto done;
+ }
- result = kgsl_cmdbatch_add_syncpoints(device, cmdbatch,
- param->synclist, param->numsyncs);
- if (result)
- goto done;
+ drawobj[i++] = DRAWOBJ(cmdobj);
- /* If no profiling buffer was specified, clear the flag */
- if (cmdbatch->profiling_buf_entry == NULL)
- cmdbatch->flags &= ~KGSL_CMDBATCH_PROFILING;
+ result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj,
+ param->cmdlist, param->numcmds);
+ if (result)
+ goto done;
- result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, &param->timestamp);
+ /* If no profiling buffer was specified, clear the flag */
+ if (cmdobj->profiling_buf_entry == NULL)
+ DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING;
+ }
+
+ result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
+ i, &param->timestamp);
done:
/*
@@ -1617,7 +1656,9 @@ done:
* context had previously faulted
*/
if (result && result != -EPROTO)
- kgsl_cmdbatch_destroy(cmdbatch);
+ while (i--)
+ kgsl_drawobj_destroy(drawobj[i]);
+
kgsl_context_put(context);
return result;
@@ -1629,63 +1670,69 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
struct kgsl_gpu_command *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch = NULL;
-
- long result = -EINVAL;
+ struct kgsl_drawobj *drawobj[2];
+ unsigned int type;
+ long result;
+ unsigned int i = 0;
- /*
- * The SYNC bit is supposed to identify a dummy sync object so warn the
- * user if they specified any IBs with it. A MARKER command can either
- * have IBs or not but if the command has 0 IBs it is automatically
- * assumed to be a marker. If none of the above make sure that the user
- * specified a sane number of IBs
- */
- if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds)
- KGSL_DEV_ERR_ONCE(device,
- "Commands specified with the SYNC flag. They will be ignored\n");
- else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0)
- param->flags |= KGSL_CMDBATCH_MARKER;
-
- /* Make sure that the memobj and syncpoint count isn't too big */
- if (param->numcmds > KGSL_MAX_NUMIBS ||
- param->numobjs > KGSL_MAX_NUMIBS ||
- param->numsyncs > KGSL_MAX_SYNCPOINTS)
+ type = _process_command_input(device, param->flags, param->numcmds,
+ param->numobjs, param->numsyncs);
+ if (!type)
return -EINVAL;
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context == NULL)
return -EINVAL;
- cmdbatch = kgsl_cmdbatch_create(device, context, param->flags);
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
- goto done;
+ if (type & SYNCOBJ_TYPE) {
+ struct kgsl_drawobj_sync *syncobj =
+ kgsl_drawobj_sync_create(device, context);
+
+ if (IS_ERR(syncobj)) {
+ result = PTR_ERR(syncobj);
+ goto done;
+ }
+
+ drawobj[i++] = DRAWOBJ(syncobj);
+
+ result = kgsl_drawobj_sync_add_synclist(device, syncobj,
+ to_user_ptr(param->synclist),
+ param->syncsize, param->numsyncs);
+ if (result)
+ goto done;
}
- result = kgsl_cmdbatch_add_cmdlist(device, cmdbatch,
- to_user_ptr(param->cmdlist),
- param->cmdsize, param->numcmds);
- if (result)
- goto done;
+ if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) {
+ struct kgsl_drawobj_cmd *cmdobj =
+ kgsl_drawobj_cmd_create(device,
+ context, param->flags, type);
- result = kgsl_cmdbatch_add_memlist(device, cmdbatch,
- to_user_ptr(param->objlist),
- param->objsize, param->numobjs);
- if (result)
- goto done;
+ if (IS_ERR(cmdobj)) {
+ result = PTR_ERR(cmdobj);
+ goto done;
+ }
- result = kgsl_cmdbatch_add_synclist(device, cmdbatch,
- to_user_ptr(param->synclist),
- param->syncsize, param->numsyncs);
- if (result)
- goto done;
+ drawobj[i++] = DRAWOBJ(cmdobj);
+
+ result = kgsl_drawobj_cmd_add_cmdlist(device, cmdobj,
+ to_user_ptr(param->cmdlist),
+ param->cmdsize, param->numcmds);
+ if (result)
+ goto done;
- /* If no profiling buffer was specified, clear the flag */
- if (cmdbatch->profiling_buf_entry == NULL)
- cmdbatch->flags &= ~KGSL_CMDBATCH_PROFILING;
+ result = kgsl_drawobj_cmd_add_memlist(device, cmdobj,
+ to_user_ptr(param->objlist),
+ param->objsize, param->numobjs);
+ if (result)
+ goto done;
+
+ /* If no profiling buffer was specified, clear the flag */
+ if (cmdobj->profiling_buf_entry == NULL)
+ DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING;
+ }
- result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, &param->timestamp);
+ result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
+ i, &param->timestamp);
done:
/*
@@ -1693,7 +1740,8 @@ done:
* context had previously faulted
*/
if (result && result != -EPROTO)
- kgsl_cmdbatch_destroy(cmdbatch);
+ while (i--)
+ kgsl_drawobj_destroy(drawobj[i]);
kgsl_context_put(context);
return result;
@@ -4600,7 +4648,7 @@ static void kgsl_core_exit(void)
kgsl_driver.class = NULL;
}
- kgsl_cmdbatch_exit();
+ kgsl_drawobj_exit();
kgsl_memfree_exit();
unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
@@ -4676,7 +4724,7 @@ static int __init kgsl_core_init(void)
kgsl_events_init();
- result = kgsl_cmdbatch_init();
+ result = kgsl_drawobj_init();
if (result)
goto err;
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 7ac84b777051..826c4edb3582 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -28,6 +28,25 @@
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
+/*
+ * --- kgsl drawobj flags ---
+ * These flags are same as --- drawobj flags ---
+ * but renamed to reflect that cmdbatch is renamed to drawobj.
+ */
+#define KGSL_DRAWOBJ_MEMLIST KGSL_CMDBATCH_MEMLIST
+#define KGSL_DRAWOBJ_MARKER KGSL_CMDBATCH_MARKER
+#define KGSL_DRAWOBJ_SUBMIT_IB_LIST KGSL_CMDBATCH_SUBMIT_IB_LIST
+#define KGSL_DRAWOBJ_CTX_SWITCH KGSL_CMDBATCH_CTX_SWITCH
+#define KGSL_DRAWOBJ_PROFILING KGSL_CMDBATCH_PROFILING
+#define KGSL_DRAWOBJ_PROFILING_KTIME KGSL_CMDBATCH_PROFILING_KTIME
+#define KGSL_DRAWOBJ_END_OF_FRAME KGSL_CMDBATCH_END_OF_FRAME
+#define KGSL_DRAWOBJ_SYNC KGSL_CMDBATCH_SYNC
+#define KGSL_DRAWOBJ_PWR_CONSTRAINT KGSL_CMDBATCH_PWR_CONSTRAINT
+#define KGSL_DRAWOBJ_SPARSE KGSL_CMDBATCH_SPARSE
+
+#define kgsl_drawobj_profiling_buffer kgsl_cmdbatch_profiling_buffer
+
+
/* The number of memstore arrays limits the number of contexts allowed.
* If more contexts are needed, update multiple for MEMSTORE_SIZE
*/
diff --git a/drivers/gpu/msm/kgsl_cffdump.c b/drivers/gpu/msm/kgsl_cffdump.c
index 8e783f8ce017..3337570477f9 100644
--- a/drivers/gpu/msm/kgsl_cffdump.c
+++ b/drivers/gpu/msm/kgsl_cffdump.c
@@ -705,7 +705,7 @@ static int kgsl_cffdump_capture_adreno_ib_cff(struct kgsl_device *device,
*/
int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
int ret = 0;
struct kgsl_memobj_node *ib;
@@ -713,7 +713,7 @@ int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
if (!device->cff_dump_enable)
return 0;
/* Dump CFF for IB and all objects in it */
- list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
+ list_for_each_entry(ib, &cmdobj->cmdlist, node) {
ret = kgsl_cffdump_capture_adreno_ib_cff(
device, context->proc_priv, ib->gpuaddr,
ib->size >> 2);
diff --git a/drivers/gpu/msm/kgsl_cffdump.h b/drivers/gpu/msm/kgsl_cffdump.h
index 315a097ba817..14bc397cb570 100644
--- a/drivers/gpu/msm/kgsl_cffdump.h
+++ b/drivers/gpu/msm/kgsl_cffdump.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011,2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2011,2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -58,7 +58,7 @@ int kgsl_cff_dump_enable_set(void *data, u64 val);
int kgsl_cff_dump_enable_get(void *data, u64 *val);
int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch);
+ struct kgsl_drawobj_cmd *cmdobj);
void kgsl_cffdump_printline(int id, uint opcode, uint op1, uint op2,
uint op3, uint op4, uint op5);
@@ -164,7 +164,7 @@ static inline void kgsl_cffdump_user_event(struct kgsl_device *device,
static inline int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_cmdbatch.h b/drivers/gpu/msm/kgsl_cmdbatch.h
deleted file mode 100644
index d5cbf375b5d3..000000000000
--- a/drivers/gpu/msm/kgsl_cmdbatch.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __KGSL_CMDBATCH_H
-#define __KGSL_CMDBATCH_H
-
-#define KGSL_CMDBATCH_FLAGS \
- { KGSL_CMDBATCH_MARKER, "MARKER" }, \
- { KGSL_CMDBATCH_CTX_SWITCH, "CTX_SWITCH" }, \
- { KGSL_CMDBATCH_SYNC, "SYNC" }, \
- { KGSL_CMDBATCH_END_OF_FRAME, "EOF" }, \
- { KGSL_CMDBATCH_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \
- { KGSL_CMDBATCH_SUBMIT_IB_LIST, "IB_LIST" }
-
-/**
- * struct kgsl_cmdbatch - KGSl command descriptor
- * @device: KGSL GPU device that the command was created for
- * @context: KGSL context that created the command
- * @timestamp: Timestamp assigned to the command
- * @flags: flags
- * @priv: Internal flags
- * @fault_policy: Internal policy describing how to handle this command in case
- * of a fault
- * @fault_recovery: recovery actions actually tried for this batch
- * @refcount: kref structure to maintain the reference count
- * @cmdlist: List of IBs to issue
- * @memlist: List of all memory used in this command batch
- * @synclist: Array of context/timestamp tuples to wait for before issuing
- * @numsyncs: Number of sync entries in the array
- * @pending: Bitmask of sync events that are active
- * @timer: a timer used to track possible sync timeouts for this cmdbatch
- * @marker_timestamp: For markers, the timestamp of the last "real" command that
- * was queued
- * @profiling_buf_entry: Mem entry containing the profiling buffer
- * @profiling_buffer_gpuaddr: GPU virt address of the profile buffer added here
- * for easy access
- * @profile_index: Index to store the start/stop ticks in the kernel profiling
- * buffer
- * @submit_ticks: Variable to hold ticks at the time of cmdbatch submit.
- * @global_ts: The ringbuffer timestamp corresponding to this cmdbatch
- * @timeout_jiffies: For a syncpoint cmdbatch the jiffies at which the
- * timer will expire
- * This structure defines an atomic batch of command buffers issued from
- * userspace.
- */
-struct kgsl_cmdbatch {
- struct kgsl_device *device;
- struct kgsl_context *context;
- uint32_t timestamp;
- uint32_t flags;
- unsigned long priv;
- unsigned long fault_policy;
- unsigned long fault_recovery;
- struct kref refcount;
- struct list_head cmdlist;
- struct list_head memlist;
- struct kgsl_cmdbatch_sync_event *synclist;
- unsigned int numsyncs;
- unsigned long pending;
- struct timer_list timer;
- unsigned int marker_timestamp;
- struct kgsl_mem_entry *profiling_buf_entry;
- uint64_t profiling_buffer_gpuaddr;
- unsigned int profile_index;
- uint64_t submit_ticks;
- unsigned int global_ts;
- unsigned long timeout_jiffies;
-};
-
-/**
- * struct kgsl_cmdbatch_sync_event
- * @id: identifer (positiion within the pending bitmap)
- * @type: Syncpoint type
- * @cmdbatch: Pointer to the cmdbatch that owns the sync event
- * @context: Pointer to the KGSL context that owns the cmdbatch
- * @timestamp: Pending timestamp for the event
- * @handle: Pointer to a sync fence handle
- * @device: Pointer to the KGSL device
- */
-struct kgsl_cmdbatch_sync_event {
- unsigned int id;
- int type;
- struct kgsl_cmdbatch *cmdbatch;
- struct kgsl_context *context;
- unsigned int timestamp;
- struct kgsl_sync_fence_waiter *handle;
- struct kgsl_device *device;
-};
-
-/**
- * enum kgsl_cmdbatch_priv - Internal cmdbatch flags
- * @CMDBATCH_FLAG_SKIP - skip the entire command batch
- * @CMDBATCH_FLAG_FORCE_PREAMBLE - Force the preamble on for the cmdbatch
- * @CMDBATCH_FLAG_WFI - Force wait-for-idle for the submission
- * @CMDBATCH_FLAG_PROFILE - store the start / retire ticks for the command batch
- * in the profiling buffer
- * @CMDBATCH_FLAG_FENCE_LOG - Set if the cmdbatch is dumping fence logs via the
- * cmdbatch timer - this is used to avoid recursion
- */
-
-enum kgsl_cmdbatch_priv {
- CMDBATCH_FLAG_SKIP = 0,
- CMDBATCH_FLAG_FORCE_PREAMBLE,
- CMDBATCH_FLAG_WFI,
- CMDBATCH_FLAG_PROFILE,
- CMDBATCH_FLAG_FENCE_LOG,
-};
-
-
-int kgsl_cmdbatch_add_memobj(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_ibdesc *ibdesc);
-
-int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmd_syncpoint *sync);
-
-struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int flags);
-int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, struct kgsl_ibdesc *ibdesc);
-int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count);
-int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count);
-int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
- unsigned int size, unsigned int count);
-int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
- unsigned int size, unsigned int count);
-int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
- unsigned int size, unsigned int count);
-
-int kgsl_cmdbatch_init(void);
-void kgsl_cmdbatch_exit(void);
-
-void kgsl_dump_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch);
-
-void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch);
-
-void kgsl_cmdbatch_destroy_object(struct kref *kref);
-
-static inline bool kgsl_cmdbatch_events_pending(struct kgsl_cmdbatch *cmdbatch)
-{
- return !bitmap_empty(&cmdbatch->pending, KGSL_MAX_SYNCPOINTS);
-}
-
-static inline bool kgsl_cmdbatch_event_pending(struct kgsl_cmdbatch *cmdbatch,
- unsigned int bit)
-{
- if (bit >= KGSL_MAX_SYNCPOINTS)
- return false;
-
- return test_bit(bit, &cmdbatch->pending);
-}
-
-#endif /* __KGSL_CMDBATCH_H */
diff --git a/drivers/gpu/msm/kgsl_compat.h b/drivers/gpu/msm/kgsl_compat.h
index ca1685e5fcf5..7681d74fb108 100644
--- a/drivers/gpu/msm/kgsl_compat.h
+++ b/drivers/gpu/msm/kgsl_compat.h
@@ -236,8 +236,8 @@ static inline compat_size_t sizet_to_compat(size_t size)
return (compat_size_t)size;
}
-int kgsl_cmdbatch_create_compat(struct kgsl_device *device, unsigned int flags,
- struct kgsl_cmdbatch *cmdbatch, void __user *cmdlist,
+int kgsl_drawobj_create_compat(struct kgsl_device *device, unsigned int flags,
+ struct kgsl_drawobj *drawobj, void __user *cmdlist,
unsigned int numcmds, void __user *synclist,
unsigned int numsyncs);
@@ -245,8 +245,8 @@ long kgsl_compat_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg);
#else
-static inline int kgsl_cmdbatch_create_compat(struct kgsl_device *device,
- unsigned int flags, struct kgsl_cmdbatch *cmdbatch,
+static inline int kgsl_drawobj_create_compat(struct kgsl_device *device,
+ unsigned int flags, struct kgsl_drawobj *drawobj,
void __user *cmdlist, unsigned int numcmds,
void __user *synclist, unsigned int numsyncs)
{
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 24511a4de6f1..04935e8d0019 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -25,7 +25,7 @@
#include "kgsl_pwrscale.h"
#include "kgsl_snapshot.h"
#include "kgsl_sharedmem.h"
-#include "kgsl_cmdbatch.h"
+#include "kgsl_drawobj.h"
#define KGSL_IOCTL_FUNC(_cmd, _func) \
[_IOC_NR((_cmd))] = \
@@ -127,9 +127,9 @@ struct kgsl_functable {
unsigned int msecs);
int (*readtimestamp) (struct kgsl_device *device, void *priv,
enum kgsl_timestamp_type type, unsigned int *timestamp);
- int (*issueibcmds) (struct kgsl_device_private *dev_priv,
- struct kgsl_context *context, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamps);
+ int (*queue_cmds)(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count, uint32_t *timestamp);
void (*power_stats)(struct kgsl_device *device,
struct kgsl_power_stats *stats);
unsigned int (*gpuid)(struct kgsl_device *device, unsigned int *chipid);
@@ -186,7 +186,7 @@ long kgsl_ioctl_helper(struct file *filep, unsigned int cmd, unsigned long arg,
/**
* struct kgsl_memobj_node - Memory object descriptor
- * @node: Local list node for the cmdbatch
+ * @node: Local list node for the object
* @id: GPU memory ID for the object
* offset: Offset within the object
* @gpuaddr: GPU address for the object
@@ -235,7 +235,7 @@ struct kgsl_device {
struct kgsl_mmu mmu;
struct completion hwaccess_gate;
- struct completion cmdbatch_gate;
+ struct completion halt_gate;
const struct kgsl_functable *ftbl;
struct work_struct idle_check_ws;
struct timer_list idle_timer;
@@ -292,7 +292,7 @@ struct kgsl_device {
#define KGSL_DEVICE_COMMON_INIT(_dev) \
.hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
- .cmdbatch_gate = COMPLETION_INITIALIZER((_dev).cmdbatch_gate),\
+ .halt_gate = COMPLETION_INITIALIZER((_dev).halt_gate),\
.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
kgsl_idle_check),\
.context_idr = IDR_INIT((_dev).context_idr),\
diff --git a/drivers/gpu/msm/kgsl_cmdbatch.c b/drivers/gpu/msm/kgsl_drawobj.c
index 6272410ce544..7840daa6a3e2 100644
--- a/drivers/gpu/msm/kgsl_cmdbatch.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,17 +11,17 @@
*/
/*
- * KGSL command batch management
- * A command batch is a single submission from userland. The cmdbatch
+ * KGSL drawobj management
+ * A drawobj is a single submission from userland. The drawobj
* encapsulates everything about the submission : command buffers, flags and
* sync points.
*
* Sync points are events that need to expire before the
- * cmdbatch can be queued to the hardware. All synpoints are contained in an
- * array of kgsl_cmdbatch_sync_event structs in the command batch. There can be
+ * drawobj can be queued to the hardware. All synpoints are contained in an
+ * array of kgsl_drawobj_sync_event structs in the drawobj. There can be
* multiple types of events both internal ones (GPU events) and external
* triggers. As the events expire bits are cleared in a pending bitmap stored
- * in the command batch. The GPU will submit the command as soon as the bitmap
+ * in the drawobj. The GPU will submit the command as soon as the bitmap
* goes to zero indicating no more pending events.
*/
@@ -31,7 +31,7 @@
#include "kgsl.h"
#include "kgsl_device.h"
-#include "kgsl_cmdbatch.h"
+#include "kgsl_drawobj.h"
#include "kgsl_sync.h"
#include "kgsl_trace.h"
#include "kgsl_compat.h"
@@ -42,26 +42,43 @@
*/
static struct kmem_cache *memobjs_cache;
-/**
- * kgsl_cmdbatch_put() - Decrement the refcount for a command batch object
- * @cmdbatch: Pointer to the command batch object
- */
-static inline void kgsl_cmdbatch_put(struct kgsl_cmdbatch *cmdbatch)
+static void drawobj_destroy_object(struct kref *kref)
{
- if (cmdbatch)
- kref_put(&cmdbatch->refcount, kgsl_cmdbatch_destroy_object);
+ struct kgsl_drawobj *drawobj = container_of(kref,
+ struct kgsl_drawobj, refcount);
+ struct kgsl_drawobj_sync *syncobj;
+
+ kgsl_context_put(drawobj->context);
+
+ switch (drawobj->type) {
+ case SYNCOBJ_TYPE:
+ syncobj = SYNCOBJ(drawobj);
+ kfree(syncobj->synclist);
+ kfree(syncobj);
+ break;
+ case CMDOBJ_TYPE:
+ case MARKEROBJ_TYPE:
+ kfree(CMDOBJ(drawobj));
+ break;
+ }
+}
+
+static inline void drawobj_put(struct kgsl_drawobj *drawobj)
+{
+ if (drawobj)
+ kref_put(&drawobj->refcount, drawobj_destroy_object);
}
void kgsl_dump_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_sync *syncobj)
{
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
unsigned int i;
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ event = &syncobj->synclist[i];
- if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
+ if (!kgsl_drawobj_event_pending(syncobj, i))
continue;
switch (event->type) {
@@ -90,32 +107,33 @@ void kgsl_dump_syncpoints(struct kgsl_device *device,
}
}
-static void _kgsl_cmdbatch_timer(unsigned long data)
+static void syncobj_timer(unsigned long data)
{
struct kgsl_device *device;
- struct kgsl_cmdbatch *cmdbatch = (struct kgsl_cmdbatch *) data;
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync *syncobj = (struct kgsl_drawobj_sync *) data;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ struct kgsl_drawobj_sync_event *event;
unsigned int i;
- if (cmdbatch == NULL || cmdbatch->context == NULL)
+ if (syncobj == NULL || drawobj->context == NULL)
return;
- device = cmdbatch->context->device;
+ device = drawobj->context->device;
dev_err(device->dev,
"kgsl: possible gpu syncpoint deadlock for context %d timestamp %d\n",
- cmdbatch->context->id, cmdbatch->timestamp);
+ drawobj->context->id, drawobj->timestamp);
- set_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv);
- kgsl_context_dump(cmdbatch->context);
- clear_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv);
+ set_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
+ kgsl_context_dump(drawobj->context);
+ clear_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
dev_err(device->dev, " pending events:\n");
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ event = &syncobj->synclist[i];
- if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
+ if (!kgsl_drawobj_event_pending(syncobj, i))
continue;
switch (event->type) {
@@ -137,48 +155,31 @@ static void _kgsl_cmdbatch_timer(unsigned long data)
dev_err(device->dev, "--gpu syncpoint deadlock print end--\n");
}
-/**
- * kgsl_cmdbatch_destroy_object() - Destroy a cmdbatch object
- * @kref: Pointer to the kref structure for this object
- *
- * Actually destroy a command batch object. Called from kgsl_cmdbatch_put
- */
-void kgsl_cmdbatch_destroy_object(struct kref *kref)
-{
- struct kgsl_cmdbatch *cmdbatch = container_of(kref,
- struct kgsl_cmdbatch, refcount);
-
- kgsl_context_put(cmdbatch->context);
-
- kfree(cmdbatch->synclist);
- kfree(cmdbatch);
-}
-EXPORT_SYMBOL(kgsl_cmdbatch_destroy_object);
-
/*
* a generic function to retire a pending sync event and (possibly)
* kick the dispatcher
*/
-static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device,
- struct kgsl_cmdbatch_sync_event *event)
+static void drawobj_sync_expire(struct kgsl_device *device,
+ struct kgsl_drawobj_sync_event *event)
{
+ struct kgsl_drawobj_sync *syncobj = event->syncobj;
/*
* Clear the event from the pending mask - if it is already clear, then
* leave without doing anything useful
*/
- if (!test_and_clear_bit(event->id, &event->cmdbatch->pending))
+ if (!test_and_clear_bit(event->id, &syncobj->pending))
return;
/*
* If no more pending events, delete the timer and schedule the command
* for dispatch
*/
- if (!kgsl_cmdbatch_events_pending(event->cmdbatch)) {
- del_timer_sync(&event->cmdbatch->timer);
+ if (!kgsl_drawobj_events_pending(event->syncobj)) {
+ del_timer_sync(&syncobj->timer);
if (device->ftbl->drawctxt_sched)
device->ftbl->drawctxt_sched(device,
- event->cmdbatch->context);
+ event->syncobj->base.context);
}
}
@@ -186,20 +187,20 @@ static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device,
* This function is called by the GPU event when the sync event timestamp
* expires
*/
-static void kgsl_cmdbatch_sync_func(struct kgsl_device *device,
+static void drawobj_sync_func(struct kgsl_device *device,
struct kgsl_event_group *group, void *priv, int result)
{
- struct kgsl_cmdbatch_sync_event *event = priv;
+ struct kgsl_drawobj_sync_event *event = priv;
- trace_syncpoint_timestamp_expire(event->cmdbatch,
+ trace_syncpoint_timestamp_expire(event->syncobj,
event->context, event->timestamp);
- kgsl_cmdbatch_sync_expire(device, event);
+ drawobj_sync_expire(device, event);
kgsl_context_put(event->context);
- kgsl_cmdbatch_put(event->cmdbatch);
+ drawobj_put(&event->syncobj->base);
}
-static inline void _free_memobj_list(struct list_head *list)
+static inline void memobj_list_free(struct list_head *list)
{
struct kgsl_memobj_node *mem, *tmpmem;
@@ -210,39 +211,28 @@ static inline void _free_memobj_list(struct list_head *list)
}
}
-/**
- * kgsl_cmdbatch_destroy() - Destroy a cmdbatch structure
- * @cmdbatch: Pointer to the command batch object to destroy
- *
- * Start the process of destroying a command batch. Cancel any pending events
- * and decrement the refcount. Asynchronous events can still signal after
- * kgsl_cmdbatch_destroy has returned.
- */
-void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
+static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj)
{
- unsigned int i;
+ struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
unsigned long pending;
-
- if (IS_ERR_OR_NULL(cmdbatch))
- return;
+ unsigned int i;
/* Zap the canary timer */
- del_timer_sync(&cmdbatch->timer);
+ del_timer_sync(&syncobj->timer);
/*
* Copy off the pending list and clear all pending events - this will
* render any subsequent asynchronous callback harmless
*/
- bitmap_copy(&pending, &cmdbatch->pending, KGSL_MAX_SYNCPOINTS);
- bitmap_zero(&cmdbatch->pending, KGSL_MAX_SYNCPOINTS);
+ bitmap_copy(&pending, &syncobj->pending, KGSL_MAX_SYNCPOINTS);
+ bitmap_zero(&syncobj->pending, KGSL_MAX_SYNCPOINTS);
/*
* Clear all pending events - this will render any subsequent async
* callbacks harmless
*/
-
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- struct kgsl_cmdbatch_sync_event *event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i];
/* Don't do anything if the event has already expired */
if (!test_bit(i, &pending))
@@ -250,127 +240,152 @@ void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
switch (event->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
- kgsl_cancel_event(cmdbatch->device,
+ kgsl_cancel_event(drawobj->device,
&event->context->events, event->timestamp,
- kgsl_cmdbatch_sync_func, event);
+ drawobj_sync_func, event);
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
if (kgsl_sync_fence_async_cancel(event->handle))
- kgsl_cmdbatch_put(cmdbatch);
+ drawobj_put(drawobj);
break;
}
}
/*
- * Release the the refcount on the mem entry associated with the
- * cmdbatch profiling buffer
+ * If we cancelled an event, there's a good chance that the context is
+ * on a dispatcher queue, so schedule to get it removed.
+ */
+ if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) &&
+ drawobj->device->ftbl->drawctxt_sched)
+ drawobj->device->ftbl->drawctxt_sched(drawobj->device,
+ drawobj->context);
+
+}
+
+static void drawobj_destroy_cmd(struct kgsl_drawobj *drawobj)
+{
+ struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj);
+
+ /*
+ * Release the refcount on the mem entry associated with the
+ * ib profiling buffer
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING)
- kgsl_mem_entry_put(cmdbatch->profiling_buf_entry);
+ if (cmdobj->base.flags & KGSL_DRAWOBJ_PROFILING)
+ kgsl_mem_entry_put(cmdobj->profiling_buf_entry);
/* Destroy the cmdlist we created */
- _free_memobj_list(&cmdbatch->cmdlist);
+ memobj_list_free(&cmdobj->cmdlist);
/* Destroy the memlist we created */
- _free_memobj_list(&cmdbatch->memlist);
+ memobj_list_free(&cmdobj->memlist);
+}
- /*
- * If we cancelled an event, there's a good chance that the context is
- * on a dispatcher queue, so schedule to get it removed.
+/**
+ * kgsl_drawobj_destroy() - Destroy a kgsl object structure
+ * @obj: Pointer to the kgsl object to destroy
+ *
+ * Start the process of destroying a command batch. Cancel any pending events
+ * and decrement the refcount. Asynchronous events can still signal after
+ * kgsl_drawobj_destroy has returned.
*/
- if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) &&
- cmdbatch->device->ftbl->drawctxt_sched)
- cmdbatch->device->ftbl->drawctxt_sched(cmdbatch->device,
- cmdbatch->context);
+void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj)
+{
+ if (!drawobj)
+ return;
+
+ if (drawobj->type & SYNCOBJ_TYPE)
+ drawobj_destroy_sync(drawobj);
+ else if (drawobj->type & (CMDOBJ_TYPE | MARKEROBJ_TYPE))
+ drawobj_destroy_cmd(drawobj);
+ else
+ return;
- kgsl_cmdbatch_put(cmdbatch);
+ drawobj_put(drawobj);
}
-EXPORT_SYMBOL(kgsl_cmdbatch_destroy);
+EXPORT_SYMBOL(kgsl_drawobj_destroy);
-/*
- * A callback that gets registered with kgsl_sync_fence_async_wait and is fired
- * when a fence is expired
- */
-static void kgsl_cmdbatch_sync_fence_func(void *priv)
+static void drawobj_sync_fence_func(void *priv)
{
- struct kgsl_cmdbatch_sync_event *event = priv;
+ struct kgsl_drawobj_sync_event *event = priv;
- trace_syncpoint_fence_expire(event->cmdbatch,
+ trace_syncpoint_fence_expire(event->syncobj,
event->handle ? event->handle->name : "unknown");
- kgsl_cmdbatch_sync_expire(event->device, event);
+ drawobj_sync_expire(event->device, event);
- kgsl_cmdbatch_put(event->cmdbatch);
+ drawobj_put(&event->syncobj->base);
}
-/* kgsl_cmdbatch_add_sync_fence() - Add a new sync fence syncpoint
+/* drawobj_add_sync_fence() - Add a new sync fence syncpoint
* @device: KGSL device
- * @cmdbatch: KGSL cmdbatch to add the sync point to
- * @priv: Private sructure passed by the user
+ * @syncobj: KGSL sync obj to add the sync point to
+ * @priv: Private structure passed by the user
*
- * Add a new fence sync syncpoint to the cmdbatch.
+ * Add a new fence sync syncpoint to the sync obj.
*/
-static int kgsl_cmdbatch_add_sync_fence(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void *priv)
+static int drawobj_add_sync_fence(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void *priv)
{
struct kgsl_cmd_syncpoint_fence *sync = priv;
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ struct kgsl_drawobj_sync_event *event;
unsigned int id;
- kref_get(&cmdbatch->refcount);
+ kref_get(&drawobj->refcount);
- id = cmdbatch->numsyncs++;
+ id = syncobj->numsyncs++;
- event = &cmdbatch->synclist[id];
+ event = &syncobj->synclist[id];
event->id = id;
event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE;
- event->cmdbatch = cmdbatch;
+ event->syncobj = syncobj;
event->device = device;
event->context = NULL;
- set_bit(event->id, &cmdbatch->pending);
+ set_bit(event->id, &syncobj->pending);
event->handle = kgsl_sync_fence_async_wait(sync->fd,
- kgsl_cmdbatch_sync_fence_func, event);
+ drawobj_sync_fence_func, event);
if (IS_ERR_OR_NULL(event->handle)) {
int ret = PTR_ERR(event->handle);
- clear_bit(event->id, &cmdbatch->pending);
+ clear_bit(event->id, &syncobj->pending);
event->handle = NULL;
- kgsl_cmdbatch_put(cmdbatch);
+ drawobj_put(drawobj);
/*
* If ret == 0 the fence was already signaled - print a trace
* message so we can track that
*/
if (ret == 0)
- trace_syncpoint_fence_expire(cmdbatch, "signaled");
+ trace_syncpoint_fence_expire(syncobj, "signaled");
return ret;
}
- trace_syncpoint_fence(cmdbatch, event->handle->name);
+ trace_syncpoint_fence(syncobj, event->handle->name);
return 0;
}
-/* kgsl_cmdbatch_add_sync_timestamp() - Add a new sync point for a cmdbatch
+/* drawobj_add_sync_timestamp() - Add a new sync point for a sync obj
* @device: KGSL device
- * @cmdbatch: KGSL cmdbatch to add the sync point to
- * @priv: Private sructure passed by the user
+ * @syncobj: KGSL sync obj to add the sync point to
+ * @priv: Private structure passed by the user
*
- * Add a new sync point timestamp event to the cmdbatch.
+ * Add a new sync point timestamp event to the sync obj.
*/
-static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void *priv)
+static int drawobj_add_sync_timestamp(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void *priv)
{
struct kgsl_cmd_syncpoint_timestamp *sync = priv;
- struct kgsl_context *context = kgsl_context_get(cmdbatch->device,
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ struct kgsl_context *context = kgsl_context_get(device,
sync->context_id);
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
int ret = -EINVAL;
unsigned int id;
@@ -384,8 +399,9 @@ static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
* create a sync point on a future timestamp.
*/
- if (context == cmdbatch->context) {
+ if (context == drawobj->context) {
unsigned int queued;
+
kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED,
&queued);
@@ -397,29 +413,29 @@ static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
}
}
- kref_get(&cmdbatch->refcount);
+ kref_get(&drawobj->refcount);
- id = cmdbatch->numsyncs++;
+ id = syncobj->numsyncs++;
- event = &cmdbatch->synclist[id];
+ event = &syncobj->synclist[id];
event->id = id;
event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP;
- event->cmdbatch = cmdbatch;
+ event->syncobj = syncobj;
event->context = context;
event->timestamp = sync->timestamp;
event->device = device;
- set_bit(event->id, &cmdbatch->pending);
+ set_bit(event->id, &syncobj->pending);
ret = kgsl_add_event(device, &context->events, sync->timestamp,
- kgsl_cmdbatch_sync_func, event);
+ drawobj_sync_func, event);
if (ret) {
- clear_bit(event->id, &cmdbatch->pending);
- kgsl_cmdbatch_put(cmdbatch);
+ clear_bit(event->id, &syncobj->pending);
+ drawobj_put(drawobj);
} else {
- trace_syncpoint_timestamp(cmdbatch, context, sync->timestamp);
+ trace_syncpoint_timestamp(syncobj, context, sync->timestamp);
}
done:
@@ -430,43 +446,46 @@ done:
}
/**
- * kgsl_cmdbatch_add_sync() - Add a sync point to a command batch
+ * kgsl_drawobj_sync_add_sync() - Add a sync point to a command
+ * batch
* @device: Pointer to the KGSL device struct for the GPU
- * @cmdbatch: Pointer to the cmdbatch
+ * @syncobj: Pointer to the sync obj
* @sync: Pointer to the user-specified struct defining the syncpoint
*
- * Create a new sync point in the cmdbatch based on the user specified
- * parameters
+ * Create a new sync point in the sync obj based on the
+ * user specified parameters
*/
-int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch,
+int kgsl_drawobj_sync_add_sync(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj,
struct kgsl_cmd_syncpoint *sync)
{
void *priv;
int ret, psize;
- int (*func)(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ int (*func)(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj,
void *priv);
switch (sync->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
psize = sizeof(struct kgsl_cmd_syncpoint_timestamp);
- func = kgsl_cmdbatch_add_sync_timestamp;
+ func = drawobj_add_sync_timestamp;
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
psize = sizeof(struct kgsl_cmd_syncpoint_fence);
- func = kgsl_cmdbatch_add_sync_fence;
+ func = drawobj_add_sync_fence;
break;
default:
KGSL_DRV_ERR(device,
"bad syncpoint type ctxt %d type 0x%x size %zu\n",
- cmdbatch->context->id, sync->type, sync->size);
+ drawobj->context->id, sync->type, sync->size);
return -EINVAL;
}
if (sync->size != psize) {
KGSL_DRV_ERR(device,
"bad syncpoint size ctxt %d type 0x%x size %zu\n",
- cmdbatch->context->id, sync->type, sync->size);
+ drawobj->context->id, sync->type, sync->size);
return -EINVAL;
}
@@ -479,30 +498,32 @@ int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
return -EFAULT;
}
- ret = func(device, cmdbatch, priv);
+ ret = func(device, syncobj, priv);
kfree(priv);
return ret;
}
static void add_profiling_buffer(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, uint64_t gpuaddr, uint64_t size,
+ struct kgsl_drawobj_cmd *cmdobj,
+ uint64_t gpuaddr, uint64_t size,
unsigned int id, uint64_t offset)
{
struct kgsl_mem_entry *entry;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- if (!(cmdbatch->flags & KGSL_CMDBATCH_PROFILING))
+ if (!(drawobj->flags & KGSL_DRAWOBJ_PROFILING))
return;
/* Only the first buffer entry counts - ignore the rest */
- if (cmdbatch->profiling_buf_entry != NULL)
+ if (cmdobj->profiling_buf_entry != NULL)
return;
if (id != 0)
- entry = kgsl_sharedmem_find_id(cmdbatch->context->proc_priv,
+ entry = kgsl_sharedmem_find_id(drawobj->context->proc_priv,
id);
else
- entry = kgsl_sharedmem_find(cmdbatch->context->proc_priv,
+ entry = kgsl_sharedmem_find(drawobj->context->proc_priv,
gpuaddr);
if (entry != NULL) {
@@ -515,47 +536,50 @@ static void add_profiling_buffer(struct kgsl_device *device,
if (entry == NULL) {
KGSL_DRV_ERR(device,
"ignore bad profile buffer ctxt %d id %d offset %lld gpuaddr %llx size %lld\n",
- cmdbatch->context->id, id, offset, gpuaddr, size);
+ drawobj->context->id, id, offset, gpuaddr, size);
return;
}
- cmdbatch->profiling_buf_entry = entry;
+ cmdobj->profiling_buf_entry = entry;
if (id != 0)
- cmdbatch->profiling_buffer_gpuaddr =
+ cmdobj->profiling_buffer_gpuaddr =
entry->memdesc.gpuaddr + offset;
else
- cmdbatch->profiling_buffer_gpuaddr = gpuaddr;
+ cmdobj->profiling_buffer_gpuaddr = gpuaddr;
}
/**
- * kgsl_cmdbatch_add_ibdesc() - Add a legacy ibdesc to a command batch
- * @cmdbatch: Pointer to the cmdbatch
+ * kgsl_drawobj_cmd_add_ibdesc() - Add a legacy ibdesc to a command
+ * batch
+ * @cmdobj: Pointer to the ib
* @ibdesc: Pointer to the user-specified struct defining the memory or IB
*
- * Create a new memory entry in the cmdbatch based on the user specified
- * parameters
+ * Create a new memory entry in the ib based on the
+ * user specified parameters
*/
-int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, struct kgsl_ibdesc *ibdesc)
+int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc)
{
uint64_t gpuaddr = (uint64_t) ibdesc->gpuaddr;
uint64_t size = (uint64_t) ibdesc->sizedwords << 2;
struct kgsl_memobj_node *mem;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
/* sanitize the ibdesc ctrl flags */
ibdesc->ctrl &= KGSL_IBDESC_MEMLIST | KGSL_IBDESC_PROFILING_BUFFER;
- if (cmdbatch->flags & KGSL_CMDBATCH_MEMLIST &&
+ if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
ibdesc->ctrl & KGSL_IBDESC_MEMLIST) {
if (ibdesc->ctrl & KGSL_IBDESC_PROFILING_BUFFER) {
- add_profiling_buffer(device, cmdbatch,
+ add_profiling_buffer(device, cmdobj,
gpuaddr, size, 0, 0);
return 0;
}
}
- if (cmdbatch->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER))
+ /* Ignore if SYNC or MARKER is specified */
+ if (drawobj->type & (SYNCOBJ_TYPE | MARKEROBJ_TYPE))
return 0;
mem = kmem_cache_alloc(memobjs_cache, GFP_KERNEL);
@@ -569,74 +593,121 @@ int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
mem->offset = 0;
mem->flags = 0;
- if (cmdbatch->flags & KGSL_CMDBATCH_MEMLIST &&
- ibdesc->ctrl & KGSL_IBDESC_MEMLIST) {
+ if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
+ ibdesc->ctrl & KGSL_IBDESC_MEMLIST)
/* add to the memlist */
- list_add_tail(&mem->node, &cmdbatch->memlist);
- } else {
+ list_add_tail(&mem->node, &cmdobj->memlist);
+ else {
/* set the preamble flag if directed to */
- if (cmdbatch->context->flags & KGSL_CONTEXT_PREAMBLE &&
- list_empty(&cmdbatch->cmdlist))
+ if (drawobj->context->flags & KGSL_CONTEXT_PREAMBLE &&
+ list_empty(&cmdobj->cmdlist))
mem->flags = KGSL_CMDLIST_CTXTSWITCH_PREAMBLE;
/* add to the cmd list */
- list_add_tail(&mem->node, &cmdbatch->cmdlist);
+ list_add_tail(&mem->node, &cmdobj->cmdlist);
}
return 0;
}
+static inline int drawobj_init(struct kgsl_device *device,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj,
+ unsigned int type)
+{
+ /*
+ * Increase the reference count on the context so it doesn't disappear
+ * during the lifetime of this object
+ */
+ if (!_kgsl_context_get(context))
+ return -ENOENT;
+
+ kref_init(&drawobj->refcount);
+
+ drawobj->device = device;
+ drawobj->context = context;
+ drawobj->type = type;
+
+ return 0;
+}
+
/**
- * kgsl_cmdbatch_create() - Create a new cmdbatch structure
+ * kgsl_drawobj_sync_create() - Create a new sync obj
+ * structure
* @device: Pointer to a KGSL device struct
* @context: Pointer to a KGSL context struct
- * @flags: Flags for the cmdbatch
*
- * Allocate an new cmdbatch structure
+ * Allocate an new kgsl_drawobj_sync structure
*/
-struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int flags)
+struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device,
+ struct kgsl_context *context)
{
- struct kgsl_cmdbatch *cmdbatch = kzalloc(sizeof(*cmdbatch), GFP_KERNEL);
- if (cmdbatch == NULL)
+ struct kgsl_drawobj_sync *syncobj = kzalloc(sizeof(*syncobj),
+ GFP_KERNEL);
+ if (syncobj == NULL)
return ERR_PTR(-ENOMEM);
- /*
- * Increase the reference count on the context so it doesn't disappear
- * during the lifetime of this command batch
- */
+ if (drawobj_init(device, context, DRAWOBJ(syncobj), SYNCOBJ_TYPE)) {
+ kfree(syncobj);
+ return ERR_PTR(-ENOENT);
+ }
+
+ /* Add a timer to help debug sync deadlocks */
+ setup_timer(&syncobj->timer, syncobj_timer, (unsigned long) syncobj);
+
+ return syncobj;
+}
+
+/**
+ * kgsl_drawobj_cmd_create() - Create a new command obj
+ * structure
+ * @device: Pointer to a KGSL device struct
+ * @context: Pointer to a KGSL context struct
+ * @flags: Flags for the command obj
+ * @type: type of cmdobj MARKER/CMD
+ *
+ * Allocate a new kgsl_drawobj_cmd structure
+ */
+struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int flags,
+ unsigned int type)
+{
+ struct kgsl_drawobj_cmd *cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
+ struct kgsl_drawobj *drawobj;
+
+ if (cmdobj == NULL)
+ return ERR_PTR(-ENOMEM);
- if (!_kgsl_context_get(context)) {
- kfree(cmdbatch);
+ type &= CMDOBJ_TYPE | MARKEROBJ_TYPE;
+ if (type == 0) {
+ kfree(cmdobj);
+ return ERR_PTR(-EINVAL);
+ }
+
+ drawobj = DRAWOBJ(cmdobj);
+
+ if (drawobj_init(device, context, drawobj, type)) {
+ kfree(cmdobj);
return ERR_PTR(-ENOENT);
}
- kref_init(&cmdbatch->refcount);
- INIT_LIST_HEAD(&cmdbatch->cmdlist);
- INIT_LIST_HEAD(&cmdbatch->memlist);
-
- cmdbatch->device = device;
- cmdbatch->context = context;
- /* sanitize our flags for cmdbatches */
- cmdbatch->flags = flags & (KGSL_CMDBATCH_CTX_SWITCH
- | KGSL_CMDBATCH_MARKER
- | KGSL_CMDBATCH_END_OF_FRAME
- | KGSL_CMDBATCH_SYNC
- | KGSL_CMDBATCH_PWR_CONSTRAINT
- | KGSL_CMDBATCH_MEMLIST
- | KGSL_CMDBATCH_PROFILING
- | KGSL_CMDBATCH_PROFILING_KTIME);
+ /* sanitize our flags for drawobj's */
+ drawobj->flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH
+ | KGSL_DRAWOBJ_MARKER
+ | KGSL_DRAWOBJ_END_OF_FRAME
+ | KGSL_DRAWOBJ_PWR_CONSTRAINT
+ | KGSL_DRAWOBJ_MEMLIST
+ | KGSL_DRAWOBJ_PROFILING
+ | KGSL_DRAWOBJ_PROFILING_KTIME);
- /* Add a timer to help debug sync deadlocks */
- setup_timer(&cmdbatch->timer, _kgsl_cmdbatch_timer,
- (unsigned long) cmdbatch);
+ INIT_LIST_HEAD(&cmdobj->cmdlist);
+ INIT_LIST_HEAD(&cmdobj->memlist);
- return cmdbatch;
+ return cmdobj;
}
#ifdef CONFIG_COMPAT
static int add_ibdesc_list_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
{
int i, ret = 0;
struct kgsl_ibdesc_compat ibdesc32;
@@ -654,7 +725,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device,
ibdesc.sizedwords = (size_t) ibdesc32.sizedwords;
ibdesc.ctrl = (unsigned int) ibdesc32.ctrl;
- ret = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc);
+ ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
if (ret)
break;
@@ -665,7 +736,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device,
}
static int add_syncpoints_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
{
struct kgsl_cmd_syncpoint_compat sync32;
struct kgsl_cmd_syncpoint sync;
@@ -683,7 +754,7 @@ static int add_syncpoints_compat(struct kgsl_device *device,
sync.priv = compat_ptr(sync32.priv);
sync.size = (size_t) sync32.size;
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+ ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
if (ret)
break;
@@ -694,26 +765,54 @@ static int add_syncpoints_compat(struct kgsl_device *device,
}
#else
static int add_ibdesc_list_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
{
return -EINVAL;
}
static int add_syncpoints_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
{
return -EINVAL;
}
#endif
-int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+/* Returns:
+ * -EINVAL: Bad data
+ * 0: All data fields are empty (nothing to do)
+ * 1: All list information is valid
+ */
+static int _verify_input_list(unsigned int count, void __user *ptr,
+ unsigned int size)
+{
+ /* Return early if nothing going on */
+ if (count == 0 && ptr == NULL && size == 0)
+ return 0;
+
+ /* Sanity check inputs */
+ if (count == 0 || ptr == NULL || size == 0)
+ return -EINVAL;
+
+ return 1;
+}
+
+int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
{
struct kgsl_ibdesc ibdesc;
+ struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
int i, ret;
+ /* Ignore everything if this is a MARKER */
+ if (baseobj->type & MARKEROBJ_TYPE)
+ return 0;
+
+ ret = _verify_input_list(count, ptr, sizeof(ibdesc));
+ if (ret <= 0)
+ return -EINVAL;
+
if (is_compat_task())
- return add_ibdesc_list_compat(device, cmdbatch, ptr, count);
+ return add_ibdesc_list_compat(device, cmdobj, ptr, count);
for (i = 0; i < count; i++) {
memset(&ibdesc, 0, sizeof(ibdesc));
@@ -721,7 +820,7 @@ int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
if (copy_from_user(&ibdesc, ptr, sizeof(ibdesc)))
return -EFAULT;
- ret = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc);
+ ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
if (ret)
return ret;
@@ -731,8 +830,8 @@ int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
return 0;
}
-int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
{
struct kgsl_cmd_syncpoint sync;
int i, ret;
@@ -740,17 +839,14 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
if (count == 0)
return 0;
- if (count > KGSL_MAX_SYNCPOINTS)
- return -EINVAL;
-
- cmdbatch->synclist = kcalloc(count,
- sizeof(struct kgsl_cmdbatch_sync_event), GFP_KERNEL);
+ syncobj->synclist = kcalloc(count,
+ sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
- if (cmdbatch->synclist == NULL)
+ if (syncobj->synclist == NULL)
return -ENOMEM;
if (is_compat_task())
- return add_syncpoints_compat(device, cmdbatch, ptr, count);
+ return add_syncpoints_compat(device, syncobj, ptr, count);
for (i = 0; i < count; i++) {
memset(&sync, 0, sizeof(sync));
@@ -758,7 +854,7 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
if (copy_from_user(&sync, ptr, sizeof(sync)))
return -EFAULT;
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+ ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
if (ret)
return ret;
@@ -768,7 +864,7 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
return 0;
}
-static int kgsl_cmdbatch_add_object(struct list_head *head,
+static int drawobj_add_object(struct list_head *head,
struct kgsl_command_object *obj)
{
struct kgsl_memobj_node *mem;
@@ -793,24 +889,22 @@ static int kgsl_cmdbatch_add_object(struct list_head *head,
KGSL_CMDLIST_CTXTSWITCH_PREAMBLE | \
KGSL_CMDLIST_IB_PREAMBLE)
-int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
+/* This can only accept MARKEROBJ_TYPE and CMDOBJ_TYPE */
+int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
unsigned int size, unsigned int count)
{
struct kgsl_command_object obj;
- int i, ret = 0;
+ struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
+ int i, ret;
- /* Return early if nothing going on */
- if (count == 0 && ptr == NULL && size == 0)
+ /* Ignore everything if this is a MARKER */
+ if (baseobj->type & MARKEROBJ_TYPE)
return 0;
- /* Sanity check inputs */
- if (count == 0 || ptr == NULL || size == 0)
- return -EINVAL;
-
- /* Ignore all if SYNC or MARKER is specified */
- if (cmdbatch->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER))
- return 0;
+ ret = _verify_input_list(count, ptr, size);
+ if (ret <= 0)
+ return ret;
for (i = 0; i < count; i++) {
memset(&obj, 0, sizeof(obj));
@@ -823,12 +917,12 @@ int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
if (!(obj.flags & CMDLIST_FLAGS)) {
KGSL_DRV_ERR(device,
"invalid cmdobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n",
- cmdbatch->context->id, obj.flags, obj.id,
+ baseobj->context->id, obj.flags, obj.id,
obj.offset, obj.gpuaddr, obj.size);
return -EINVAL;
}
- ret = kgsl_cmdbatch_add_object(&cmdbatch->cmdlist, &obj);
+ ret = drawobj_add_object(&cmdobj->cmdlist, &obj);
if (ret)
return ret;
@@ -838,20 +932,21 @@ int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
return 0;
}
-int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
+int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
unsigned int size, unsigned int count)
{
struct kgsl_command_object obj;
- int i, ret = 0;
+ struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
+ int i, ret;
- /* Return early if nothing going on */
- if (count == 0 && ptr == NULL && size == 0)
+ /* Ignore everything if this is a MARKER */
+ if (baseobj->type & MARKEROBJ_TYPE)
return 0;
- /* Sanity check inputs */
- if (count == 0 || ptr == NULL || size == 0)
- return -EINVAL;
+ ret = _verify_input_list(count, ptr, size);
+ if (ret <= 0)
+ return ret;
for (i = 0; i < count; i++) {
memset(&obj, 0, sizeof(obj));
@@ -863,17 +958,16 @@ int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
if (!(obj.flags & KGSL_OBJLIST_MEMOBJ)) {
KGSL_DRV_ERR(device,
"invalid memobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n",
- cmdbatch->context->id, obj.flags, obj.id,
- obj.offset, obj.gpuaddr, obj.size);
+ DRAWOBJ(cmdobj)->context->id, obj.flags,
+ obj.id, obj.offset, obj.gpuaddr, obj.size);
return -EINVAL;
}
if (obj.flags & KGSL_OBJLIST_PROFILE)
- add_profiling_buffer(device, cmdbatch, obj.gpuaddr,
+ add_profiling_buffer(device, cmdobj, obj.gpuaddr,
obj.size, obj.id, obj.offset);
else {
- ret = kgsl_cmdbatch_add_object(&cmdbatch->memlist,
- &obj);
+ ret = drawobj_add_object(&cmdobj->memlist, &obj);
if (ret)
return ret;
}
@@ -884,29 +978,23 @@ int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
return 0;
}
-int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
+int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr,
unsigned int size, unsigned int count)
{
struct kgsl_command_syncpoint syncpoint;
struct kgsl_cmd_syncpoint sync;
- int i, ret = 0;
-
- /* Return early if nothing going on */
- if (count == 0 && ptr == NULL && size == 0)
- return 0;
-
- /* Sanity check inputs */
- if (count == 0 || ptr == NULL || size == 0)
- return -EINVAL;
+ int i, ret;
- if (count > KGSL_MAX_SYNCPOINTS)
+ /* If creating a sync and the data is not there or wrong then error */
+ ret = _verify_input_list(count, ptr, size);
+ if (ret <= 0)
return -EINVAL;
- cmdbatch->synclist = kcalloc(count,
- sizeof(struct kgsl_cmdbatch_sync_event), GFP_KERNEL);
+ syncobj->synclist = kcalloc(count,
+ sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
- if (cmdbatch->synclist == NULL)
+ if (syncobj->synclist == NULL)
return -ENOMEM;
for (i = 0; i < count; i++) {
@@ -920,7 +1008,7 @@ int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
sync.priv = to_user_ptr(syncpoint.priv);
sync.size = syncpoint.size;
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+ ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
if (ret)
return ret;
@@ -930,13 +1018,13 @@ int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
return 0;
}
-void kgsl_cmdbatch_exit(void)
+void kgsl_drawobj_exit(void)
{
if (memobjs_cache != NULL)
kmem_cache_destroy(memobjs_cache);
}
-int kgsl_cmdbatch_init(void)
+int kgsl_drawobj_init(void)
{
memobjs_cache = KMEM_CACHE(kgsl_memobj_node, 0);
if (memobjs_cache == NULL) {
diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h
new file mode 100644
index 000000000000..89ed944c539a
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_drawobj.h
@@ -0,0 +1,198 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __KGSL_DRAWOBJ_H
+#define __KGSL_DRAWOBJ_H
+
+#define DRAWOBJ(obj) (&obj->base)
+#define SYNCOBJ(obj) \
+ container_of(obj, struct kgsl_drawobj_sync, base)
+#define CMDOBJ(obj) \
+ container_of(obj, struct kgsl_drawobj_cmd, base)
+
+#define CMDOBJ_TYPE BIT(0)
+#define MARKEROBJ_TYPE BIT(1)
+#define SYNCOBJ_TYPE BIT(2)
+
+/**
+ * struct kgsl_drawobj - KGSL drawobj descriptor
+ * @device: KGSL GPU device that the command was created for
+ * @context: KGSL context that created the command
+ * @type: Object type
+ * @timestamp: Timestamp assigned to the command
+ * @flags: flags
+ * @refcount: kref structure to maintain the reference count
+ */
+struct kgsl_drawobj {
+ struct kgsl_device *device;
+ struct kgsl_context *context;
+ uint32_t type;
+ uint32_t timestamp;
+ unsigned long flags;
+ struct kref refcount;
+};
+
+/**
+ * struct kgsl_drawobj_cmd - KGSL command obj, This covers marker
+ * cmds also since markers are special form of cmds that do not
+ * need their cmds to be executed.
+ * @base: Base kgsl_drawobj
+ * @priv: Internal flags
+ * @global_ts: The ringbuffer timestamp corresponding to this
+ * command obj
+ * @fault_policy: Internal policy describing how to handle this command in case
+ * of a fault
+ * @fault_recovery: recovery actions actually tried for this batch
+ * be hung
+ * @refcount: kref structure to maintain the reference count
+ * @cmdlist: List of IBs to issue
+ * @memlist: List of all memory used in this command batch
+ * @marker_timestamp: For markers, the timestamp of the last "real" command that
+ * was queued
+ * @profiling_buf_entry: Mem entry containing the profiling buffer
+ * @profiling_buffer_gpuaddr: GPU virt address of the profile buffer added here
+ * for easy access
+ * @profile_index: Index to store the start/stop ticks in the kernel profiling
+ * buffer
+ * @submit_ticks: Variable to hold ticks at the time of
+ * command obj submit.
+
+ */
+struct kgsl_drawobj_cmd {
+ struct kgsl_drawobj base;
+ unsigned long priv;
+ unsigned int global_ts;
+ unsigned long fault_policy;
+ unsigned long fault_recovery;
+ struct list_head cmdlist;
+ struct list_head memlist;
+ unsigned int marker_timestamp;
+ struct kgsl_mem_entry *profiling_buf_entry;
+ uint64_t profiling_buffer_gpuaddr;
+ unsigned int profile_index;
+ uint64_t submit_ticks;
+};
+
+/**
+ * struct kgsl_drawobj_sync - KGSL sync object
+ * @base: Base kgsl_drawobj, this needs to be the first entry
+ * @synclist: Array of context/timestamp tuples to wait for before issuing
+ * @numsyncs: Number of sync entries in the array
+ * @pending: Bitmask of sync events that are active
+ * @timer: a timer used to track possible sync timeouts for this
+ * sync obj
+ * @timeout_jiffies: For a sync obj the jiffies at
+ * which the timer will expire
+ */
+struct kgsl_drawobj_sync {
+ struct kgsl_drawobj base;
+ struct kgsl_drawobj_sync_event *synclist;
+ unsigned int numsyncs;
+ unsigned long pending;
+ struct timer_list timer;
+ unsigned long timeout_jiffies;
+};
+
+/**
+ * struct kgsl_drawobj_sync_event
+ * @id: identifer (positiion within the pending bitmap)
+ * @type: Syncpoint type
+ * @syncobj: Pointer to the syncobj that owns the sync event
+ * @context: KGSL context for whose timestamp we want to
+ * register this event
+ * @timestamp: Pending timestamp for the event
+ * @handle: Pointer to a sync fence handle
+ * @device: Pointer to the KGSL device
+ */
+struct kgsl_drawobj_sync_event {
+ unsigned int id;
+ int type;
+ struct kgsl_drawobj_sync *syncobj;
+ struct kgsl_context *context;
+ unsigned int timestamp;
+ struct kgsl_sync_fence_waiter *handle;
+ struct kgsl_device *device;
+};
+
+#define KGSL_DRAWOBJ_FLAGS \
+ { KGSL_DRAWOBJ_MARKER, "MARKER" }, \
+ { KGSL_DRAWOBJ_CTX_SWITCH, "CTX_SWITCH" }, \
+ { KGSL_DRAWOBJ_SYNC, "SYNC" }, \
+ { KGSL_DRAWOBJ_END_OF_FRAME, "EOF" }, \
+ { KGSL_DRAWOBJ_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \
+ { KGSL_DRAWOBJ_SUBMIT_IB_LIST, "IB_LIST" }
+
+/**
+ * enum kgsl_drawobj_cmd_priv - Internal command obj flags
+ * @CMDOBJ_SKIP - skip the entire command obj
+ * @CMDOBJ_FORCE_PREAMBLE - Force the preamble on for
+ * command obj
+ * @CMDOBJ_WFI - Force wait-for-idle for the submission
+ * @CMDOBJ_PROFILE - store the start / retire ticks for
+ * the command obj in the profiling buffer
+ */
+enum kgsl_drawobj_cmd_priv {
+ CMDOBJ_SKIP = 0,
+ CMDOBJ_FORCE_PREAMBLE,
+ CMDOBJ_WFI,
+ CMDOBJ_PROFILE,
+};
+
+struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int flags,
+ unsigned int type);
+int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc);
+int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count);
+int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
+ unsigned int size, unsigned int count);
+int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
+ unsigned int size, unsigned int count);
+
+struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device,
+ struct kgsl_context *context);
+int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr,
+ int count);
+int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr,
+ unsigned int size, unsigned int count);
+int kgsl_drawobj_sync_add_sync(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_cmd_syncpoint *sync);
+
+int kgsl_drawobj_init(void);
+void kgsl_drawobj_exit(void);
+
+void kgsl_dump_syncpoints(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj);
+
+void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj);
+
+static inline bool kgsl_drawobj_events_pending(
+ struct kgsl_drawobj_sync *syncobj)
+{
+ return !bitmap_empty(&syncobj->pending, KGSL_MAX_SYNCPOINTS);
+}
+
+static inline bool kgsl_drawobj_event_pending(
+ struct kgsl_drawobj_sync *syncobj, unsigned int bit)
+{
+ if (bit >= KGSL_MAX_SYNCPOINTS)
+ return false;
+
+ return test_bit(bit, &syncobj->pending);
+}
+#endif /* __KGSL_DRAWOBJ_H */
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 71b6086423d6..9f35a3197a4c 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1118,7 +1118,6 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
int ret = 0;
struct kgsl_iommu_pt *iommu_pt = NULL;
- int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
unsigned int cb_num;
struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
@@ -1128,9 +1127,6 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
if (IS_ERR(iommu_pt))
return PTR_ERR(iommu_pt);
- iommu_domain_set_attr(iommu_pt->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
-
if (kgsl_mmu_is_perprocess(mmu)) {
ret = iommu_domain_set_attr(iommu_pt->domain,
DOMAIN_ATTR_PROCID, &pt->name);
@@ -1189,7 +1185,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
int ret = 0;
struct kgsl_iommu_pt *iommu_pt = NULL;
struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
- int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
int secure_vmid = VMID_CP_PIXEL;
unsigned int cb_num;
@@ -1207,9 +1202,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
if (IS_ERR(iommu_pt))
return PTR_ERR(iommu_pt);
- iommu_domain_set_attr(iommu_pt->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
-
ret = iommu_domain_set_attr(iommu_pt->domain,
DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
if (ret) {
@@ -1251,7 +1243,6 @@ static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
int dynamic = 1;
unsigned int cb_num = ctx->cb_num;
- int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
@@ -1278,9 +1269,6 @@ static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
goto done;
}
- iommu_domain_set_attr(iommu_pt->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
-
ret = _attach_pt(iommu_pt, ctx);
if (ret)
goto done;
@@ -2492,7 +2480,6 @@ static const struct {
{ "qcom,global_pt", KGSL_MMU_GLOBAL_PAGETABLE },
{ "qcom,hyp_secure_alloc", KGSL_MMU_HYP_SECURE_ALLOC },
{ "qcom,force-32bit", KGSL_MMU_FORCE_32BIT },
- { "qcom,coherent-htw", KGSL_MMU_COHERENT_HTW },
};
static int _kgsl_iommu_probe(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index acbc0e784cf2..3e32c25b3dbe 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -130,8 +130,6 @@ struct kgsl_mmu_pt_ops {
#define KGSL_MMU_FORCE_32BIT BIT(5)
/* 64 bit address is live */
#define KGSL_MMU_64BIT BIT(6)
-/* MMU can do coherent hardware table walks */
-#define KGSL_MMU_COHERENT_HTW BIT(7)
/* The MMU supports non-contigious pages */
#define KGSL_MMU_PAGED BIT(8)
/* The device requires a guard page */
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 4ef9f80177d6..6438c6e65b97 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -36,14 +36,13 @@ TRACE_EVENT(kgsl_issueibcmds,
TP_PROTO(struct kgsl_device *device,
int drawctxt_id,
- struct kgsl_cmdbatch *cmdbatch,
unsigned int numibs,
int timestamp,
int flags,
int result,
unsigned int type),
- TP_ARGS(device, drawctxt_id, cmdbatch, numibs, timestamp,
+ TP_ARGS(device, drawctxt_id, numibs, timestamp,
flags, result, type),
TP_STRUCT__entry(
@@ -74,7 +73,7 @@ TRACE_EVENT(kgsl_issueibcmds,
__entry->numibs,
__entry->timestamp,
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "None",
+ KGSL_DRAWOBJ_FLAGS) : "None",
__entry->result,
__print_symbolic(__entry->drawctxt_type, KGSL_CONTEXT_TYPES)
)
@@ -1028,59 +1027,62 @@ TRACE_EVENT(kgsl_pagetable_destroy,
);
DECLARE_EVENT_CLASS(syncpoint_timestamp_template,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context,
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_context *context,
unsigned int timestamp),
- TP_ARGS(cmdbatch, context, timestamp),
+ TP_ARGS(syncobj, context, timestamp),
TP_STRUCT__entry(
- __field(unsigned int, cmdbatch_context_id)
+ __field(unsigned int, syncobj_context_id)
__field(unsigned int, context_id)
__field(unsigned int, timestamp)
),
TP_fast_assign(
- __entry->cmdbatch_context_id = cmdbatch->context->id;
+ __entry->syncobj_context_id = syncobj->base.context->id;
__entry->context_id = context->id;
__entry->timestamp = timestamp;
),
TP_printk("ctx=%d sync ctx=%d ts=%d",
- __entry->cmdbatch_context_id, __entry->context_id,
+ __entry->syncobj_context_id, __entry->context_id,
__entry->timestamp)
);
DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context,
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_context *context,
unsigned int timestamp),
- TP_ARGS(cmdbatch, context, timestamp)
+ TP_ARGS(syncobj, context, timestamp)
);
DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp_expire,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context,
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_context *context,
unsigned int timestamp),
- TP_ARGS(cmdbatch, context, timestamp)
+ TP_ARGS(syncobj, context, timestamp)
);
DECLARE_EVENT_CLASS(syncpoint_fence_template,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name),
- TP_ARGS(cmdbatch, name),
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name),
+ TP_ARGS(syncobj, name),
TP_STRUCT__entry(
__string(fence_name, name)
- __field(unsigned int, cmdbatch_context_id)
+ __field(unsigned int, syncobj_context_id)
),
TP_fast_assign(
- __entry->cmdbatch_context_id = cmdbatch->context->id;
+ __entry->syncobj_context_id = syncobj->base.context->id;
__assign_str(fence_name, name);
),
TP_printk("ctx=%d fence=%s",
- __entry->cmdbatch_context_id, __get_str(fence_name))
+ __entry->syncobj_context_id, __get_str(fence_name))
);
DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name),
- TP_ARGS(cmdbatch, name)
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name),
+ TP_ARGS(syncobj, name)
);
DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence_expire,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name),
- TP_ARGS(cmdbatch, name)
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name),
+ TP_ARGS(syncobj, name)
);
TRACE_EVENT(kgsl_msg,
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index bb44b6d82ccd..ebb49230d4d7 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/qpnp/qpnp-revid.h>
#define FG_ADC_RR_EN_CTL 0x46
#define FG_ADC_RR_SKIN_TEMP_LSB 0x50
@@ -150,13 +151,18 @@
#define FG_ADC_RR_TEMP_FS_VOLTAGE_NUM 5000000
#define FG_ADC_RR_TEMP_FS_VOLTAGE_DEN 3
-#define FG_ADC_RR_DIE_TEMP_OFFSET 600000
+#define FG_ADC_RR_DIE_TEMP_OFFSET 601400
#define FG_ADC_RR_DIE_TEMP_SLOPE 2
#define FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC 25000
-#define FG_ADC_RR_CHG_TEMP_OFFSET 1288000
-#define FG_ADC_RR_CHG_TEMP_SLOPE 4
-#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC 27000
+#define FAB_ID_GF 0x30
+#define FAB_ID_SMIC 0x11
+#define FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV 1296794
+#define FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C 3858
+#define FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV 1339518
+#define FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C 3598
+#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC 25000
+#define FG_ADC_RR_CHG_THRESHOLD_SCALE 4
#define FG_ADC_RR_VOLT_INPUT_FACTOR 8
#define FG_ADC_RR_CURR_INPUT_FACTOR 2
@@ -201,6 +207,8 @@ struct rradc_chip {
struct iio_chan_spec *iio_chans;
unsigned int nchannels;
struct rradc_chan_prop *chan_props;
+ struct device_node *revid_dev_node;
+ struct pmic_revid_data *pmic_fab_id;
};
struct rradc_channels {
@@ -347,16 +355,34 @@ static int rradc_post_process_chg_temp_hot(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
int *result_millidegc)
{
- int64_t temp = 0;
+ int64_t uv = 0, offset = 0, slope = 0;
- temp = (int64_t) adc_code * 4;
- temp = temp * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM;
- temp = div64_s64(temp, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+ if (chip->revid_dev_node) {
+ switch (chip->pmic_fab_id->fab_id) {
+ case FAB_ID_GF:
+ offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+ break;
+ case FAB_ID_SMIC:
+ offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ pr_err("No temperature scaling coefficients\n");
+ return -EINVAL;
+ }
+
+ uv = (int64_t) adc_code * FG_ADC_RR_CHG_THRESHOLD_SCALE;
+ uv = uv * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM;
+ uv = div64_s64(uv, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
FG_MAX_ADC_READINGS));
- temp = FG_ADC_RR_CHG_TEMP_OFFSET - temp;
- temp = div64_s64(temp, FG_ADC_RR_CHG_TEMP_SLOPE);
- temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
- *result_millidegc = temp;
+ uv = offset - uv;
+ uv = div64_s64((uv * FG_ADC_SCALE_MILLI_FACTOR), slope);
+ uv = uv + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+ *result_millidegc = uv;
return 0;
}
@@ -380,15 +406,33 @@ static int rradc_post_process_chg_temp(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
int *result_millidegc)
{
- int64_t temp = 0;
+ int64_t uv = 0, offset = 0, slope = 0;
- temp = ((int64_t) adc_code * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM);
- temp = div64_s64(temp, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+ if (chip->revid_dev_node) {
+ switch (chip->pmic_fab_id->fab_id) {
+ case FAB_ID_GF:
+ offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+ break;
+ case FAB_ID_SMIC:
+ offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ pr_err("No temperature scaling coefficients\n");
+ return -EINVAL;
+ }
+
+ uv = ((int64_t) adc_code * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM);
+ uv = div64_s64(uv, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
FG_MAX_ADC_READINGS));
- temp = FG_ADC_RR_CHG_TEMP_OFFSET - temp;
- temp = div64_s64(temp, FG_ADC_RR_CHG_TEMP_SLOPE);
- temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
- *result_millidegc = temp;
+ uv = offset - uv;
+ uv = div64_s64((uv * FG_ADC_SCALE_MILLI_FACTOR), slope);
+ uv += FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+ *result_millidegc = uv;
return 0;
}
@@ -516,7 +560,7 @@ static int rradc_do_conversion(struct rradc_chip *chip,
buf[0] &= FG_RR_ADC_STS_CHANNEL_READING_MASK;
if (buf[0] != FG_RR_ADC_STS_CHANNEL_READING_MASK) {
- pr_warn("%s is not ready; nothing to read\n",
+ pr_debug("%s is not ready; nothing to read\n",
rradc_chans[prop->channel].datasheet_name);
rc = -ENODATA;
goto fail;
@@ -653,6 +697,22 @@ static int rradc_get_dt_data(struct rradc_chip *chip, struct device_node *node)
}
chip->base = base;
+ chip->revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+ if (chip->revid_dev_node) {
+ chip->pmic_fab_id = get_revid_data(chip->revid_dev_node);
+ if (IS_ERR(chip->pmic_fab_id)) {
+ rc = PTR_ERR(chip->pmic_fab_id);
+ if (rc != -EPROBE_DEFER)
+ pr_err("Unable to get pmic_revid rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->pmic_fab_id->fab_id == -EINVAL) {
+ rc = chip->pmic_fab_id->fab_id;
+ pr_debug("Unable to read fabid rc=%d\n", rc);
+ }
+ }
+
iio_chan = chip->iio_chans;
for (i = 0; i < RR_ADC_MAX; i++) {
diff --git a/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c b/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c
index 0a93f11e2b7e..e5edaf5f908d 100644
--- a/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c
+++ b/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c
@@ -4,378 +4,268 @@
*
*/
-#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/of_gpio.h>
-#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/miscdevice.h>
-
#include "pixart_ots.h"
-#include "pixart_platform.h"
-
-static int pat9125_init_input_data(void);
-
-#define pat9125_name "pixart_pat9125"
-
-#define pat9125_DEV_NAME pat9125_name
-
-static struct pat9125_linux_data_t pat9125data;
-static int pat9125_i2c_write(u8 reg, u8 *data, int len);
-static int pat9125_i2c_read(u8 reg, u8 *data);
+struct pixart_pat9125_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ int irq_gpio;
+ u32 irq_flags;
+};
-extern unsigned char ReadData(unsigned char addr)
+static int pat9125_i2c_write(struct i2c_client *client, u8 reg, u8 *data,
+ int len)
{
- u8 data = 0xff;
-
- pat9125_i2c_read(addr, &data);
- return data;
-}
-extern void WriteData(unsigned char addr, unsigned char data)
-{
- pat9125_i2c_write(addr, &data, 1);
-}
-extern void delay_ms(int ms)
-{
- msleep(ms);
-}
-static int pat9125_i2c_write(u8 reg, u8 *data, int len)
-{
- u8 buf[20];
- int rc;
- int ret = 0;
- int i;
+ u8 buf[MAX_BUF_SIZE];
+ int ret = 0, i;
+ struct device *dev = &client->dev;
buf[0] = reg;
- if (len >= 20) {
- pr_debug(
- "%s (%d) : FAILED: buffer size is limitted(20) %d\n",
- __func__, __LINE__, len);
- dev_err(&pat9125data.client->dev, "pat9125_i2c_write FAILED: buffer size is limitted(20)\n");
+ if (len >= MAX_BUF_SIZE) {
+ dev_err(dev, "%s Failed: buffer size is %d [Max Limit is %d]\n",
+ __func__, len, MAX_BUF_SIZE);
return -ENODEV;
}
-
for (i = 0 ; i < len; i++)
buf[i+1] = data[i];
-
/* Returns negative errno, or else the number of bytes written. */
- rc = i2c_master_send(pat9125data.client, buf, len+1);
-
- if (rc != len+1) {
- pr_debug(
- "%s (%d) : FAILED: writing to reg 0x%x\n",
- __func__, __LINE__, reg);
-
- ret = -ENODEV;
- }
+ ret = i2c_master_send(client, buf, len+1);
+ if (ret != len+1)
+ dev_err(dev, "%s Failed: writing to reg 0x%x\n", __func__, reg);
return ret;
}
-static int pat9125_i2c_read(u8 reg, u8 *data)
+static int pat9125_i2c_read(struct i2c_client *client, u8 reg, u8 *data)
{
-
- u8 buf[20];
- int rc;
+ u8 buf[MAX_BUF_SIZE];
+ int ret;
+ struct device *dev = &client->dev;
buf[0] = reg;
-
/*
- * If everything went ok (i.e. 1 msg transmitted),
- *return #bytes transmitted, else error code.
- * thus if transmit is ok return value 1
+ * If everything went ok (1 msg transmitted), return #bytes transmitted,
+ * else error code. thus if transmit is ok return value 1
*/
- rc = i2c_master_send(pat9125data.client, buf, 1);
- if (rc != 1) {
- pr_debug(
- "%s (%d) : FAILED: writing to address 0x%x\n",
- __func__, __LINE__, reg);
- return -ENODEV;
+ ret = i2c_master_send(client, buf, 1);
+ if (ret != 1) {
+ dev_err(dev, "%s Failed: writing to reg 0x%x\n", __func__, reg);
+ return ret;
}
-
/* returns negative errno, or else the number of bytes read */
- rc = i2c_master_recv(pat9125data.client, buf, 1);
- if (rc != 1) {
- pr_debug(
- "%s (%d) : FAILED: reading data\n",
- __func__, __LINE__);
- return -ENODEV;
+ ret = i2c_master_recv(client, buf, 1);
+ if (ret != 1) {
+ dev_err(dev, "%s Failed: reading reg 0x%x\n", __func__, reg);
+ return ret;
}
-
*data = buf[0];
- return 0;
-}
-
-void pixart_pat9125_ist(void)
-{
-
-}
-static irqreturn_t pixart_pat9125_irq(int irq, void *handle)
-{
- pixart_pat9125_ist();
- return IRQ_HANDLED;
-}
-
-static int pat9125_start(void)
-{
- int err = (-1);
- pr_debug(">>> %s (%d)\n", __func__, __LINE__);
-
- err = request_threaded_irq(pat9125data.irq, NULL, pixart_pat9125_irq,
- pat9125data.irq_flags,
- "pixart_pat9125_irq",
- &pat9125data);
- if (err)
- pr_debug("irq %d busy?\n", pat9125data.irq);
-
- pat9125data.last_jiffies = jiffies_64;
-
- return err;
-}
-
-static void pat9125_stop(void)
-{
- free_irq(pat9125data.irq, &pat9125data);
-}
-
-static ssize_t pat9125_fops_read(struct file *filp,
- char *buf, size_t count, loff_t *l)
-{
- return 0;
+ return ret;
}
-static ssize_t pat9125_fops_write(struct file *filp,
- const char *buf, size_t count, loff_t *f_ops)
+unsigned char read_data(struct i2c_client *client, u8 addr)
{
- return 0;
-}
+ u8 data = 0xff;
-static long pat9125_fops_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- return 0;
+ pat9125_i2c_read(client, addr, &data);
+ return data;
}
-static int pat9125_fops_open(struct inode *inode, struct file *filp)
+void write_data(struct i2c_client *client, u8 addr, u8 data)
{
- return 0;
+ pat9125_i2c_write(client, addr, &data, 1);
}
-static int pat9125_fops_release(struct inode *inode, struct file *filp)
+static irqreturn_t pixart_pat9125_irq(int irq, void *data)
{
- pr_debug(">>> %s (%d)\n", __func__, __LINE__);
- return 0;
+ return IRQ_HANDLED;
}
-static const struct file_operations pat9125_fops = {
-owner: THIS_MODULE,
- read : pat9125_fops_read,
- write : pat9125_fops_write,
- /* ioctl : pat9125_fops_ioctl, */
- unlocked_ioctl : pat9125_fops_ioctl,
- open : pat9125_fops_open,
- release : pat9125_fops_release,
-};
-/*----------------------------------------------------------------------------*/
-struct miscdevice pat9125_device = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = pat9125_name,
- .fops = &pat9125_fops,
-};
static ssize_t pat9125_test_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- char s[256];
- char *p = s;
-
- pr_debug("%s (%d) : write_reg_store\n", __func__, __LINE__);
-
- memcpy(s, buf, sizeof(s));
-
+ char s[256], *p = s;
+ int reg_data = 0, i;
+ long rd_addr, wr_addr, wr_data;
+ struct pixart_pat9125_data *data =
+ (struct pixart_pat9125_data *)dev->driver_data;
+ struct i2c_client *client = data->client;
+
+ for (i = 0; i < sizeof(s); i++)
+ s[i] = buf[i];
*(s+1) = '\0';
*(s+4) = '\0';
*(s+7) = '\0';
/* example(in console): echo w 12 34 > rw_reg */
if (*p == 'w') {
- long write_addr, write_data;
-
p += 2;
- if (!kstrtol(p, 16, &write_addr)) {
+ if (!kstrtol(p, 16, &wr_addr)) {
p += 3;
- if (!kstrtol(p, 16, &write_data)) {
- pr_debug(
- "w 0x%x 0x%x\n",
- (u8)write_addr, (u8)write_data);
- WriteData((u8)write_addr, (u8)write_data);
+ if (!kstrtol(p, 16, &wr_data)) {
+ dev_dbg(dev, "w 0x%x 0x%x\n",
+ (u8)wr_addr, (u8)wr_data);
+ write_data(client, (u8)wr_addr, (u8)wr_data);
}
}
- /* example(in console): echo r 12 > rw_reg */
- } else if (*p == 'r') {
- long read_addr;
-
+ }
+ /* example(in console): echo r 12 > rw_reg */
+ else if (*p == 'r') {
p += 2;
- if (!kstrtol(p, 16, &read_addr)) {
- int data = 0;
-
- data = ReadData((u8)read_addr);
- pr_debug(
- "r 0x%x 0x%x\n",
- (unsigned int)read_addr, data);
+ if (!kstrtol(p, 16, &rd_addr)) {
+ reg_data = read_data(client, (u8)rd_addr);
+ dev_dbg(dev, "r 0x%x 0x%x\n",
+ (unsigned int)rd_addr, reg_data);
}
}
return count;
}
-static ssize_t pat9125_test_show(
- struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t pat9125_test_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
-
- /* cat */
- pr_debug("%s (%d) :\n", __func__, __LINE__);
-
return 0;
}
-static DEVICE_ATTR(
- test,
- S_IRUGO | S_IWUSR | S_IWGRP, pat9125_test_show, pat9125_test_store);
-static struct device_attribute *pat9125_attr_list[] = {
- &dev_attr_test,
-};
+static DEVICE_ATTR(test, S_IRUGO | S_IWUSR | S_IWGRP,
+ pat9125_test_show, pat9125_test_store);
-static int pat9125_create_attr(struct device *dev)
-{
- int idx, err = 0;
- int num = ARRAY_SIZE(pat9125_attr_list);
+static struct attribute *pat9125_attr_list[] = {
+ &dev_attr_test.attr,
+ NULL,
+};
- if (!dev)
- return -EINVAL;
- for (idx = 0; idx < num; idx++) {
- err = device_create_file(dev, pat9125_attr_list[idx]);
- if (err) {
- pr_debug(
- "device_create_file (%s) = %d\n",
- pat9125_attr_list[idx]->attr.name, err);
- break;
- }
- }
- return err;
-}
+static struct attribute_group pat9125_attr_grp = {
+ .attrs = pat9125_attr_list,
+};
-static int pat9125_i2c_probe(
- struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pat9125_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
int err = 0;
+ struct pixart_pat9125_data *data;
+ struct input_dev *input;
struct device_node *np;
+ struct device *dev = &client->dev;
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
-
- pr_debug("%s (%d) : probe module....\n", __func__, __LINE__);
-
- memset(&pat9125data, 0, sizeof(pat9125data));
- err = i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE);
- if (err < 0)
- goto error_return;
-
- pat9125data.client = client;
- err = misc_register(&pat9125_device);
- if (err) {
- pr_debug("pat9125_device register failed\n");
- goto error_return;
+ err = i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE);
+ if (err < 0) {
+ dev_err(dev, "I2C not supported\n");
+ return -ENXIO;
}
- pat9125data.pat9125_device = pat9125_device.this_device;
- err = pat9125_create_attr(pat9125data.pat9125_device);
- if (err) {
- pr_debug("create attribute err = %d\n", err);
- goto error_return;
+ if (client->dev.of_node) {
+ data = devm_kzalloc(dev, sizeof(struct pixart_pat9125_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ } else {
+ data = client->dev.platform_data;
+ if (!data) {
+ dev_err(dev, "Invalid pat9125 data\n");
+ return -EINVAL;
+ }
}
+ data->client = client;
- if (pat9125_init_input_data() < 0)
- goto error_return;
-
- /* interrupt initialization */
- pat9125data.i2c_dev = &client->dev;
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "Failed to alloc input device\n");
+ return -ENOMEM;
+ }
- np = pat9125data.i2c_dev->of_node;
- pat9125data.irq_gpio = of_get_named_gpio_flags(np,
- "pixart_pat9125,irq-gpio", 0, &pat9125data.irq_flags);
+ i2c_set_clientdata(client, data);
+ input_set_drvdata(input, data);
+ input->name = PAT9125_DEV_NAME;
- pr_debug(
- "irq_gpio: %d, irq_flags: 0x%x\n",
- pat9125data.irq_gpio, pat9125data.irq_flags);
+ data->input = input;
+ err = input_register_device(data->input);
+ if (err < 0) {
+ dev_err(dev, "Failed to register input device\n");
+ goto err_register_input_device;
+ }
- if (!gpio_is_valid(pat9125data.irq_gpio)) {
- err = (-1);
- pr_debug(
- "invalid irq_gpio: %d\n",
- pat9125data.irq_gpio);
- goto error_return;
+ if (!gpio_is_valid(data->irq_gpio)) {
+ dev_err(dev, "invalid irq_gpio: %d\n", data->irq_gpio);
+ return -EINVAL;
}
- err = gpio_request(pat9125data.irq_gpio, "pixart_pat9125_irq_gpio");
+ err = gpio_request(data->irq_gpio, "pixart_pat9125_irq_gpio");
if (err) {
- pr_debug(
- "unable to request gpio [%d], [%d]\n",
- pat9125data.irq_gpio, err);
- goto error_return;
+ dev_err(dev, "unable to request gpio %d\n", data->irq_gpio);
+ return err;
}
- err = gpio_direction_input(pat9125data.irq_gpio);
- if (err) {
- pr_debug("unable to set dir for gpio[%d], [%d]\n",
- pat9125data.irq_gpio, err);
- goto error_return;
+ err = gpio_direction_input(data->irq_gpio);
+ if (err) {
+ dev_err(dev, "unable to set dir for gpio %d\n", data->irq_gpio);
+ goto free_gpio;
}
- pat9125data.irq = gpio_to_irq(pat9125data.irq_gpio);
+ if (!ots_sensor_init(client)) {
+ err = -ENODEV;
+ goto err_sensor_init;
+ }
- if (!OTS_Sensor_Init())
- goto error_return;
+ err = devm_request_threaded_irq(dev, client->irq, NULL,
+ pixart_pat9125_irq, (unsigned long)data->irq_flags,
+ "pixart_pat9125_irq", data);
+ if (err) {
+ dev_err(dev, "Req irq %d failed, errno:%d\n", client->irq, err);
+ goto err_request_threaded_irq;
+ }
- if (!pat9125_start())
- goto error_return;
+ err = sysfs_create_group(&(input->dev.kobj), &pat9125_attr_grp);
+ if (err) {
+ dev_err(dev, "Failed to create sysfs group, errno:%d\n", err);
+ goto err_sysfs_create;
+ }
return 0;
-error_return:
-
+err_sysfs_create:
+err_request_threaded_irq:
+err_sensor_init:
+free_gpio:
+ gpio_free(data->irq_gpio);
+err_register_input_device:
+ input_free_device(data->input);
return err;
-
}
-
static int pat9125_i2c_remove(struct i2c_client *client)
{
-
+ struct pixart_pat9125_data *data = i2c_get_clientdata(client);
+
+ devm_free_irq(&client->dev, client->irq, data);
+ if (gpio_is_valid(data->irq_gpio))
+ gpio_free(data->irq_gpio);
+ input_unregister_device(data->input);
+ devm_kfree(&client->dev, data);
+ data = NULL;
return 0;
}
static int pat9125_suspend(struct device *dev)
-{ pr_debug("%s (%d) : pat9125 suspend\n", __func__, __LINE__);
+{
return 0;
}
static int pat9125_resume(struct device *dev)
{
- pr_debug("%s (%d) : pat9125 resume\n", __func__, __LINE__);
return 0;
}
static const struct i2c_device_id pat9125_device_id[] = {
- {pat9125_DEV_NAME, 0},
+ {PAT9125_DEV_NAME, 0},
{}
};
-
MODULE_DEVICE_TABLE(i2c, pat9125_device_id);
static const struct dev_pm_ops pat9125_pm_ops = {
@@ -390,7 +280,7 @@ static const struct of_device_id pixart_pat9125_match_table[] = {
static struct i2c_driver pat9125_i2c_driver = {
.driver = {
- .name = pat9125_DEV_NAME,
+ .name = PAT9125_DEV_NAME,
.owner = THIS_MODULE,
.pm = &pat9125_pm_ops,
.of_match_table = pixart_pat9125_match_table,
@@ -399,72 +289,8 @@ static struct i2c_driver pat9125_i2c_driver = {
.remove = pat9125_i2c_remove,
.id_table = pat9125_device_id,
};
-static int pat9125_open(struct input_dev *dev)
-{
- pr_debug(">>> %s (%d)\n", __func__, __LINE__);
- return 0;
-}
-
-static void pat9125_close(struct input_dev *dev)
-{
- pr_debug(">>> %s (%d)\n", __func__, __LINE__);
-}
-
-static int pat9125_init_input_data(void)
-{
- int ret = 0;
-
- pr_debug("%s (%d) : initialize data\n", __func__, __LINE__);
-
- pat9125data.pat9125_input_dev = input_allocate_device();
-
- if (!pat9125data.pat9125_input_dev) {
- pr_debug(
- "%s (%d) : could not allocate mouse input device\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
-
- input_set_drvdata(pat9125data.pat9125_input_dev, &pat9125data);
- pat9125data.pat9125_input_dev->name = "Pixart pat9125";
-
- pat9125data.pat9125_input_dev->open = pat9125_open;
- pat9125data.pat9125_input_dev->close = pat9125_close;
-
- ret = input_register_device(pat9125data.pat9125_input_dev);
- if (ret < 0) {
- input_free_device(pat9125data.pat9125_input_dev);
- pr_debug(
- "%s (%d) : could not register input device\n",
- __func__, __LINE__);
- return ret;
- }
-
- return 0;
-}
-
-static int __init pat9125_linux_init(void)
-{
- return i2c_add_driver(&pat9125_i2c_driver);
-}
-
-
+module_i2c_driver(pat9125_i2c_driver);
-
-static void __exit pat9125_linux_exit(void)
-{
- pr_debug("%s (%d) : exit module\n", __func__, __LINE__);
- pat9125_stop();
- misc_register(&pat9125_device);
- i2c_del_driver(&pat9125_i2c_driver);
-}
-
-
-module_init(pat9125_linux_init);
-module_exit(pat9125_linux_exit);
MODULE_AUTHOR("pixart");
MODULE_DESCRIPTION("pixart pat9125 driver");
MODULE_LICENSE("GPL");
-
-
-
diff --git a/drivers/input/misc/ots_pat9125/pixart_ots.c b/drivers/input/misc/ots_pat9125/pixart_ots.c
index 70736197de3c..fa73ffe40985 100644
--- a/drivers/input/misc/ots_pat9125/pixart_ots.c
+++ b/drivers/input/misc/ots_pat9125/pixart_ots.c
@@ -4,68 +4,74 @@
*
*/
+#include "pixart_platform.h"
#include "pixart_ots.h"
-static void OTS_WriteRead(uint8_t address, uint8_t wdata);
+static void ots_write_read(struct i2c_client *client, u8 address, u8 wdata)
+{
+ u8 read_value;
+
+ do {
+ write_data(client, address, wdata);
+ read_value = read_data(client, address);
+ } while (read_value != wdata);
+}
-bool OTS_Sensor_Init(void)
+bool ots_sensor_init(struct i2c_client *client)
{
unsigned char sensor_pid = 0, read_id_ok = 0;
/*
* Read sensor_pid in address 0x00 to check if the
- * serial link is valid, read value should be 0x31.
+ * serial link is valid, read value should be 0x31.
*/
- sensor_pid = ReadData(0x00);
+ sensor_pid = read_data(client, PIXART_PAT9125_PRODUCT_ID1_REG);
- if (sensor_pid == 0x31) {
+ if (sensor_pid == PIXART_PAT9125_SENSOR_ID) {
read_id_ok = 1;
/*
- *PAT9125 sensor recommended settings:
- * switch to bank0, not allowed to perform OTS_RegWriteRead
+ * PAT9125 sensor recommended settings:
+ * switch to bank0, not allowed to perform ots_write_read
*/
- WriteData(0x7F, 0x00);
+ write_data(client, PIXART_PAT9125_SELECT_BANK_REG,
+ PIXART_PAT9125_BANK0);
/*
* software reset (i.e. set bit7 to 1).
* It will reset to 0 automatically
* so perform OTS_RegWriteRead is not allowed.
*/
- WriteData(0x06, 0x97);
+ write_data(client, PIXART_PAT9125_CONFIG_REG,
+ PIXART_PAT9125_RESET);
/* delay 1ms */
- delay_ms(1);
+ usleep_range(RESET_DELAY_US, RESET_DELAY_US + 1);
/* disable write protect */
- OTS_WriteRead(0x09, 0x5A);
+ ots_write_read(client, PIXART_PAT9125_WRITE_PROTECT_REG,
+ PIXART_PAT9125_DISABLE_WRITE_PROTECT);
/* set X-axis resolution (depends on application) */
- OTS_WriteRead(0x0D, 0x65);
+ ots_write_read(client, PIXART_PAT9125_SET_CPI_RES_X_REG,
+ PIXART_PAT9125_CPI_RESOLUTION_X);
/* set Y-axis resolution (depends on application) */
- OTS_WriteRead(0x0E, 0xFF);
+ ots_write_read(client, PIXART_PAT9125_SET_CPI_RES_Y_REG,
+ PIXART_PAT9125_CPI_RESOLUTION_Y);
/* set 12-bit X/Y data format (depends on application) */
- OTS_WriteRead(0x19, 0x04);
+ ots_write_read(client, PIXART_PAT9125_ORIENTATION_REG,
+ PIXART_PAT9125_MOTION_DATA_LENGTH);
/* ONLY for VDD=VDDA=1.7~1.9V: for power saving */
- OTS_WriteRead(0x4B, 0x04);
+ ots_write_read(client, PIXART_PAT9125_VOLTAGE_SEGMENT_SEL_REG,
+ PIXART_PAT9125_LOW_VOLTAGE_SEGMENT);
- if (ReadData(0x5E) == 0x04) {
- OTS_WriteRead(0x5E, 0x08);
- if (ReadData(0x5D) == 0x10)
- OTS_WriteRead(0x5D, 0x19);
+ if (read_data(client, PIXART_PAT9125_MISC2_REG) == 0x04) {
+ ots_write_read(client, PIXART_PAT9125_MISC2_REG, 0x08);
+ if (read_data(client, PIXART_PAT9125_MISC1_REG) == 0x10)
+ ots_write_read(client, PIXART_PAT9125_MISC1_REG,
+ 0x19);
}
- OTS_WriteRead(0x09, 0x00);/* enable write protect */
+ /* enable write protect */
+ ots_write_read(client, PIXART_PAT9125_WRITE_PROTECT_REG,
+ PIXART_PAT9125_ENABLE_WRITE_PROTECT);
}
return read_id_ok;
}
-
-static void OTS_WriteRead(uint8_t address, uint8_t wdata)
-{
- uint8_t read_value;
-
- do {
- /* Write data to specified address */
- WriteData(address, wdata);
- /* Read back previous written data */
- read_value = ReadData(address);
- /* Check if the data is correctly written */
- } while (read_value != wdata);
-}
diff --git a/drivers/input/misc/ots_pat9125/pixart_ots.h b/drivers/input/misc/ots_pat9125/pixart_ots.h
index ba1da1396ad1..a66ded5c9d08 100644
--- a/drivers/input/misc/ots_pat9125/pixart_ots.h
+++ b/drivers/input/misc/ots_pat9125/pixart_ots.h
@@ -4,13 +4,42 @@
*
*/
-#ifndef _PIXART_OTS_H_
-#define _PIXART_OTS_H_
+#ifndef __PIXART_OTS_H_
+#define __PIXART_OTS_H_
-#include "pixart_platform.h"
+#define PAT9125_DEV_NAME "pixart_pat9125"
+#define MAX_BUF_SIZE 20
+#define RESET_DELAY_US 1000
-/* export funtions */
-bool OTS_Sensor_Init(void);
-void OTS_Sensor_ReadMotion(int16_t *dx, int16_t *dy);
+/* Register addresses */
+#define PIXART_PAT9125_PRODUCT_ID1_REG 0x00
+#define PIXART_PAT9125_PRODUCT_ID2_REG 0x01
+#define PIXART_PAT9125_MOTION_STATUS_REG 0x02
+#define PIXART_PAT9125_DELTA_X_LO_REG 0x03
+#define PIXART_PAT9125_DELTA_Y_LO_REG 0x04
+#define PIXART_PAT9125_CONFIG_REG 0x06
+#define PIXART_PAT9125_WRITE_PROTECT_REG 0x09
+#define PIXART_PAT9125_SET_CPI_RES_X_REG 0x0D
+#define PIXART_PAT9125_SET_CPI_RES_Y_REG 0x0E
+#define PIXART_PAT9125_DELTA_XY_HI_REG 0x12
+#define PIXART_PAT9125_ORIENTATION_REG 0x19
+#define PIXART_PAT9125_VOLTAGE_SEGMENT_SEL_REG 0x4B
+#define PIXART_PAT9125_SELECT_BANK_REG 0x7F
+#define PIXART_PAT9125_MISC1_REG 0x5D
+#define PIXART_PAT9125_MISC2_REG 0x5E
+/*Register configuration data */
+#define PIXART_PAT9125_SENSOR_ID 0x31
+#define PIXART_PAT9125_RESET 0x97
+#define PIXART_PAT9125_MOTION_DATA_LENGTH 0x04
+#define PIXART_PAT9125_BANK0 0x00
+#define PIXART_PAT9125_DISABLE_WRITE_PROTECT 0x5A
+#define PIXART_PAT9125_ENABLE_WRITE_PROTECT 0x00
+#define PIXART_PAT9125_CPI_RESOLUTION_X 0x65
+#define PIXART_PAT9125_CPI_RESOLUTION_Y 0xFF
+#define PIXART_PAT9125_LOW_VOLTAGE_SEGMENT 0x04
+#define PIXART_PAT9125_VALID_MOTION_DATA 0x80
+
+/* Export functions */
+bool ots_sensor_init(struct i2c_client *);
#endif
diff --git a/drivers/input/misc/ots_pat9125/pixart_platform.h b/drivers/input/misc/ots_pat9125/pixart_platform.h
index a025fd06343e..1fe448fdc2cb 100644
--- a/drivers/input/misc/ots_pat9125/pixart_platform.h
+++ b/drivers/input/misc/ots_pat9125/pixart_platform.h
@@ -4,22 +4,14 @@
*
*/
-#ifndef _PIXART_PLATFORM_
-#define _PIXART_PLATFORM_
+#ifndef __PIXART_PLATFORM_H_
+#define __PIXART_PLATFORM_H_
-#include <linux/input.h>
-#include <linux/pm.h>
-#include <linux/spi/spi.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/i2c.h>
#include <linux/delay.h>
-#include <linux/types.h>
/* extern functions */
-extern unsigned char ReadData(unsigned char addr);
-extern void WriteData(unsigned char addr, unsigned char data);
+extern unsigned char read_data(struct i2c_client *, u8 addr);
+extern void write_data(struct i2c_client *, u8 addr, u8 data);
#endif
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c69927bd4ff2..afa519aa8203 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1888,8 +1888,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
return NULL;
smmu_domain->secure_vmid = VMID_INVAL;
- /* disable coherent htw by default */
- smmu_domain->attributes = (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE);
INIT_LIST_HEAD(&smmu_domain->pte_info_list);
INIT_LIST_HEAD(&smmu_domain->unassign_list);
INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
@@ -2263,15 +2261,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto err_destroy_domain_context;
}
- if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE))
- && !(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)) {
- dev_err(dev,
- "Can't attach: this domain wants coherent htw but %s doesn't support it\n",
- dev_name(smmu_domain->smmu->dev));
- ret = -EINVAL;
- goto err_destroy_domain_context;
- }
-
/* Looks ok, so add the device to the domain */
ret = arm_smmu_domain_add_master(smmu_domain, cfg);
if (ret)
@@ -2977,11 +2966,6 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
ret = 0;
break;
- case DOMAIN_ATTR_COHERENT_HTW_DISABLE:
- *((int *)data) = !!(smmu_domain->attributes &
- (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE));
- ret = 0;
- break;
case DOMAIN_ATTR_SECURE_VMID:
*((int *)data) = smmu_domain->secure_vmid;
ret = 0;
@@ -3083,29 +3067,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
break;
- case DOMAIN_ATTR_COHERENT_HTW_DISABLE:
- {
- struct arm_smmu_device *smmu;
- int htw_disable = *((int *)data);
-
- smmu = smmu_domain->smmu;
-
- if (smmu && !(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
- && !htw_disable) {
- dev_err(smmu->dev,
- "Can't enable coherent htw on this domain: this SMMU doesn't support it\n");
- ret = -EINVAL;
- goto out_unlock;
- }
-
- if (htw_disable)
- smmu_domain->attributes |=
- (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE);
- else
- smmu_domain->attributes &=
- ~(1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE);
- break;
- }
case DOMAIN_ATTR_SECURE_VMID:
BUG_ON(smmu_domain->secure_vmid != VMID_INVAL);
smmu_domain->secure_vmid = *((int *)data);
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index ea8db1a431d0..266f7065fca4 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -649,7 +649,7 @@ err:
int fast_smmu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping)
{
- int htw_disable = 1, atomic_domain = 1;
+ int atomic_domain = 1;
struct iommu_domain *domain = mapping->domain;
struct iommu_pgtbl_info info;
size_t size = mapping->bits << PAGE_SHIFT;
@@ -657,10 +657,6 @@ int fast_smmu_attach_device(struct device *dev,
if (mapping->base + size > (SZ_1G * 4ULL))
return -EINVAL;
- if (iommu_domain_set_attr(domain, DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &htw_disable))
- return -EINVAL;
-
if (iommu_domain_set_attr(domain, DOMAIN_ATTR_ATOMIC,
&atomic_domain))
return -EINVAL;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 4036997f49c7..5e47e2481300 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -481,9 +481,11 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
pte |= (prot & IOMMU_PRIV) ? ARM_LPAE_PTE_AP_PRIV_RO
: ARM_LPAE_PTE_AP_RO;
- if (prot & IOMMU_CACHE)
+ if (prot & IOMMU_CACHE) {
pte |= (ARM_LPAE_MAIR_ATTR_IDX_CACHE
<< ARM_LPAE_PTE_ATTRINDX_SHIFT);
+ pte |= ARM_LPAE_PTE_SH_OS;
+ }
if (prot & IOMMU_DEVICE)
pte |= (ARM_LPAE_MAIR_ATTR_IDX_DEV <<
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index a0227fd05939..3b54fd4a77e6 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -48,8 +48,6 @@ static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
return "DOMAIN_ATTR_FSL_PAMUV1";
case DOMAIN_ATTR_NESTING:
return "DOMAIN_ATTR_NESTING";
- case DOMAIN_ATTR_COHERENT_HTW_DISABLE:
- return "DOMAIN_ATTR_COHERENT_HTW_DISABLE";
case DOMAIN_ATTR_PT_BASE_ADDR:
return "DOMAIN_ATTR_PT_BASE_ADDR";
case DOMAIN_ATTR_SECURE_VMID:
@@ -96,7 +94,6 @@ static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored)
{
struct iommu_debug_attachment *attach = s->private;
phys_addr_t pt_phys;
- int coherent_htw_disable;
int secure_vmid;
seq_printf(s, "Domain: 0x%p\n", attach->domain);
@@ -110,14 +107,6 @@ static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored)
pt_virt, &pt_phys);
}
- seq_puts(s, "COHERENT_HTW_DISABLE: ");
- if (iommu_domain_get_attr(attach->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &coherent_htw_disable))
- seq_puts(s, "(Unknown)\n");
- else
- seq_printf(s, "%d\n", coherent_htw_disable);
-
seq_puts(s, "SECURE_VMID: ");
if (iommu_domain_get_attr(attach->domain,
DOMAIN_ATTR_SECURE_VMID,
@@ -733,7 +722,6 @@ static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
SZ_1M * 20, 0 };
enum iommu_attr attrs[] = {
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_ATOMIC,
};
int htw_disable = 1, atomic = 1;
@@ -764,7 +752,6 @@ static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
SZ_1M * 20, 0 };
enum iommu_attr attrs[] = {
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_ATOMIC,
DOMAIN_ATTR_SECURE_VMID,
};
@@ -797,7 +784,6 @@ static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
enum iommu_attr attrs[] = {
DOMAIN_ATTR_FAST,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_ATOMIC,
};
int one = 1;
@@ -1507,7 +1493,6 @@ static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
int val, bool is_secure)
{
- int htw_disable = 1;
struct bus_type *bus;
bus = msm_iommu_get_bus(ddev->dev);
@@ -1520,13 +1505,6 @@ static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
return -ENOMEM;
}
- if (iommu_domain_set_attr(ddev->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &htw_disable)) {
- pr_err("Couldn't disable coherent htw\n");
- goto out_domain_free;
- }
-
if (is_secure && iommu_domain_set_attr(ddev->domain,
DOMAIN_ATTR_SECURE_VMID,
&val)) {
diff --git a/drivers/iommu/msm_dma_iommu_mapping.c b/drivers/iommu/msm_dma_iommu_mapping.c
index 0a8728ce36dc..25fe36ab6339 100644
--- a/drivers/iommu/msm_dma_iommu_mapping.c
+++ b/drivers/iommu/msm_dma_iommu_mapping.c
@@ -17,6 +17,7 @@
#include <linux/rbtree.h>
#include <linux/mutex.h>
#include <linux/err.h>
+#include <asm/barrier.h>
#include <linux/msm_dma_iommu_mapping.h>
@@ -216,10 +217,13 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
sg->dma_length = iommu_map->sgl.dma_length;
kref_get(&iommu_map->ref);
- /*
- * Need to do cache operations here based on "dir" in the
- * future if we go with coherent mappings.
- */
+ if (is_device_dma_coherent(dev))
+ /*
+ * Ensure all outstanding changes for coherent
+ * buffers are applied to the cache before any
+ * DMA occurs.
+ */
+ dmb(ish);
ret = nents;
}
mutex_unlock(&iommu_meta->lock);
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
index 03a61407aef8..feede3a14e07 100644
--- a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
@@ -1427,7 +1427,6 @@ static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
struct device *dev)
{
int rc = 0;
- int disable_htw = 1;
if (!cb || !dev) {
pr_err("Error: invalid input params\n");
@@ -1465,21 +1464,7 @@ static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
goto end;
}
- /*
- * Set the domain attributes
- * disable L2 redirect since it decreases
- * performance
- */
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- pr_err("Error: couldn't disable coherent HTW\n");
- rc = -ENODEV;
- goto err_set_attr;
- }
return 0;
-err_set_attr:
- arm_iommu_release_mapping(cb->mapping);
end:
return rc;
}
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 3ac4c3af3208..258e08c1b34f 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -114,6 +114,13 @@ static int msm_cpp_update_gdscr_status(struct cpp_device *cpp_dev,
bool status);
static int msm_cpp_buffer_private_ops(struct cpp_device *cpp_dev,
uint32_t buff_mgr_ops, uint32_t id, void *arg);
+static void msm_cpp_set_micro_irq_mask(struct cpp_device *cpp_dev,
+ uint8_t enable, uint32_t irq_mask);
+static void msm_cpp_flush_queue_and_release_buffer(struct cpp_device *cpp_dev,
+ int queue_len);
+static int msm_cpp_dump_frame_cmd(struct msm_cpp_frame_info_t *frame_info);
+static int msm_cpp_dump_addr(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *frame_info);
#if CONFIG_MSM_CPP_DBG
#define CPP_DBG(fmt, args...) pr_err(fmt, ##args)
@@ -636,6 +643,127 @@ static int32_t msm_cpp_poll_rx_empty(void __iomem *cpp_base)
return rc;
}
+static int msm_cpp_dump_addr(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *frame_info)
+{
+ int32_t s_base, p_base;
+ uint32_t rd_off, wr0_off, wr1_off, wr2_off, wr3_off;
+ uint32_t wr0_mdata_off, wr1_mdata_off, wr2_mdata_off, wr3_mdata_off;
+ uint32_t rd_ref_off, wr_ref_off;
+ uint32_t s_size, p_size;
+ uint8_t tnr_enabled, ubwc_enabled, cds_en;
+ int32_t i = 0;
+ uint32_t *cpp_frame_msg;
+
+ cpp_frame_msg = frame_info->cpp_cmd_msg;
+
+ /* Update stripe/plane size and base offsets */
+ s_base = cpp_dev->payload_params.stripe_base;
+ s_size = cpp_dev->payload_params.stripe_size;
+ p_base = cpp_dev->payload_params.plane_base;
+ p_size = cpp_dev->payload_params.plane_size;
+
+ /* Fetch engine Offset */
+ rd_off = cpp_dev->payload_params.rd_pntr_off;
+ /* Write engine offsets */
+ wr0_off = cpp_dev->payload_params.wr_0_pntr_off;
+ wr1_off = wr0_off + 1;
+ wr2_off = wr1_off + 1;
+ wr3_off = wr2_off + 1;
+ /* Reference engine offsets */
+ rd_ref_off = cpp_dev->payload_params.rd_ref_pntr_off;
+ wr_ref_off = cpp_dev->payload_params.wr_ref_pntr_off;
+ /* Meta data offsets */
+ wr0_mdata_off =
+ cpp_dev->payload_params.wr_0_meta_data_wr_pntr_off;
+ wr1_mdata_off = (wr0_mdata_off + 1);
+ wr2_mdata_off = (wr1_mdata_off + 1);
+ wr3_mdata_off = (wr2_mdata_off + 1);
+
+ tnr_enabled = ((frame_info->feature_mask & TNR_MASK) >> 2);
+ ubwc_enabled = ((frame_info->feature_mask & UBWC_MASK) >> 5);
+ cds_en = ((frame_info->feature_mask & CDS_MASK) >> 6);
+
+ for (i = 0; i < frame_info->num_strips; i++) {
+ pr_err("stripe %d: in %x, out1 %x out2 %x, out3 %x, out4 %x\n",
+ i, cpp_frame_msg[s_base + rd_off + i * s_size],
+ cpp_frame_msg[s_base + wr0_off + i * s_size],
+ cpp_frame_msg[s_base + wr1_off + i * s_size],
+ cpp_frame_msg[s_base + wr2_off + i * s_size],
+ cpp_frame_msg[s_base + wr3_off + i * s_size]);
+
+ if (tnr_enabled) {
+ pr_err("stripe %d: read_ref %x, write_ref %x\n", i,
+ cpp_frame_msg[s_base + rd_ref_off + i * s_size],
+ cpp_frame_msg[s_base + wr_ref_off + i * s_size]
+ );
+ }
+
+ if (cds_en) {
+ pr_err("stripe %d:, dsdn_off %x\n", i,
+ cpp_frame_msg[s_base + rd_ref_off + i * s_size]
+ );
+ }
+
+ if (ubwc_enabled) {
+ pr_err("stripe %d: metadata %x, %x, %x, %x\n", i,
+ cpp_frame_msg[s_base + wr0_mdata_off +
+ i * s_size],
+ cpp_frame_msg[s_base + wr1_mdata_off +
+ i * s_size],
+ cpp_frame_msg[s_base + wr2_mdata_off +
+ i * s_size],
+ cpp_frame_msg[s_base + wr3_mdata_off +
+ i * s_size]
+ );
+ }
+
+ }
+ return 0;
+}
+
+static void msm_cpp_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags, void *token)
+{
+ struct cpp_device *cpp_dev = NULL;
+ struct msm_cpp_frame_info_t *processed_frame[MAX_CPP_PROCESSING_FRAME];
+ int32_t i = 0, queue_len = 0;
+ struct msm_device_queue *queue = NULL;
+
+ if (token) {
+ cpp_dev = token;
+ disable_irq(cpp_dev->irq->start);
+ if (atomic_read(&cpp_timer.used)) {
+ atomic_set(&cpp_timer.used, 0);
+ del_timer_sync(&cpp_timer.cpp_timer);
+ }
+ mutex_lock(&cpp_dev->mutex);
+ tasklet_kill(&cpp_dev->cpp_tasklet);
+ cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
+ queue = &cpp_timer.data.cpp_dev->processing_q;
+ queue_len = queue->len;
+ if (!queue_len) {
+ pr_err("%s:%d: Invalid queuelen\n", __func__, __LINE__);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ mutex_unlock(&cpp_dev->mutex);
+ return;
+ }
+ for (i = 0; i < queue_len; i++) {
+ if (cpp_timer.data.processed_frame[i]) {
+ processed_frame[i] =
+ cpp_timer.data.processed_frame[i];
+ pr_err("Fault on identity=0x%x, frame_id=%03d\n",
+ processed_frame[i]->identity,
+ processed_frame[i]->frame_id);
+ msm_cpp_dump_addr(cpp_dev, processed_frame[i]);
+ msm_cpp_dump_frame_cmd(processed_frame[i]);
+ }
+ }
+ msm_cpp_flush_queue_and_release_buffer(cpp_dev, queue_len);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ mutex_unlock(&cpp_dev->mutex);
+ }
+}
static int cpp_init_mem(struct cpp_device *cpp_dev)
{
@@ -652,6 +780,9 @@ static int cpp_init_mem(struct cpp_device *cpp_dev)
return -ENODEV;
cpp_dev->iommu_hdl = iommu_hdl;
+ cam_smmu_reg_client_page_fault_handler(
+ cpp_dev->iommu_hdl,
+ msm_cpp_iommu_fault_handler, cpp_dev);
return 0;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index 7bbd8aa53342..c11c4b61d832 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -448,7 +448,6 @@ int sde_smmu_probe(struct platform_device *pdev)
struct sde_smmu_domain smmu_domain;
const struct of_device_id *match;
struct sde_module_power *mp;
- int disable_htw = 1;
char name[MAX_CLIENT_NAME_LEN];
if (!mdata) {
@@ -535,13 +534,6 @@ int sde_smmu_probe(struct platform_device *pdev)
goto disable_power;
}
- rc = iommu_domain_set_attr(sde_smmu->mmu_mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
- if (rc) {
- SDEROT_ERR("couldn't disable coherent HTW\n");
- goto release_mapping;
- }
-
if (smmu_domain.domain == SDE_IOMMU_DOMAIN_ROT_SECURE) {
int secure_vmid = VMID_CP_PIXEL;
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 25fccab99fb3..a3080be8cd7a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -1166,7 +1166,6 @@ static int msm_vidc_setup_context_bank(struct context_bank_info *cb,
struct device *dev)
{
int rc = 0;
- int disable_htw = 1;
int secure_vmid = VMID_INVAL;
struct bus_type *bus;
@@ -1192,14 +1191,6 @@ static int msm_vidc_setup_context_bank(struct context_bank_info *cb,
goto remove_cb;
}
- rc = iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
- if (rc) {
- dprintk(VIDC_ERR, "%s - disable coherent HTW failed: %s %d\n",
- __func__, dev_name(dev), rc);
- goto release_mapping;
- }
-
if (cb->is_secure) {
secure_vmid = get_secure_vmid(cb);
rc = iommu_domain_set_attr(cb->mapping->domain,
diff --git a/drivers/media/platform/msm/vidc/venus_boot.c b/drivers/media/platform/msm/vidc/venus_boot.c
index 925c97a5b6e8..85c3e15edded 100644
--- a/drivers/media/platform/msm/vidc/venus_boot.c
+++ b/drivers/media/platform/msm/vidc/venus_boot.c
@@ -190,8 +190,6 @@ static int pil_venus_auth_and_reset(void)
{
int rc;
- /* Need to enable this for new SMMU to set the device attribute */
- bool disable_htw = true;
phys_addr_t fw_bias = venus_data->resources->firmware_base;
void __iomem *reg_base = venus_data->reg_base;
u32 ver;
@@ -278,17 +276,6 @@ static int pil_venus_auth_and_reset(void)
if (iommu_present) {
phys_addr_t pa = fw_bias;
- /* Enable this for new SMMU to set the device attribute */
- rc = iommu_domain_set_attr(venus_data->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (rc) {
- dprintk(VIDC_ERR,
- "%s: Failed to disable COHERENT_HTW: %s\n",
- __func__, dev_name(dev));
- goto release_mapping;
- }
-
rc = arm_iommu_attach_device(dev, venus_data->mapping);
if (rc) {
dprintk(VIDC_ERR,
diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c
index ec6955452391..1e56d445c6e1 100644
--- a/drivers/net/wireless/cnss/cnss_pci.c
+++ b/drivers/net/wireless/cnss/cnss_pci.c
@@ -1404,7 +1404,6 @@ static int cnss_wlan_is_codeswap_supported(u16 revision)
static int cnss_smmu_init(struct device *dev)
{
struct dma_iommu_mapping *mapping;
- int disable_htw = 1;
int atomic_ctx = 1;
int ret;
@@ -1418,15 +1417,6 @@ static int cnss_smmu_init(struct device *dev)
}
ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret) {
- pr_err("%s: set disable_htw attribute failed, err = %d\n",
- __func__, ret);
- goto set_attr_fail;
- }
-
- ret = iommu_domain_set_attr(mapping->domain,
DOMAIN_ATTR_ATOMIC,
&atomic_ctx);
if (ret) {
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index af1e5a70d585..352defe6204b 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -170,7 +170,12 @@ static void gsi_handle_glob_err(uint32_t err)
gsi_ctx->per.notify_cb(&per_notify);
break;
case GSI_ERR_TYPE_CHAN:
- BUG_ON(log->virt_idx >= GSI_MAX_CHAN);
+ if (log->virt_idx >= gsi_ctx->max_ch) {
+ GSIERR("Unexpected ch %d\n", log->virt_idx);
+ WARN_ON(1);
+ return;
+ }
+
ch = &gsi_ctx->chan[log->virt_idx];
chan_notify.chan_user_data = ch->props.chan_user_data;
chan_notify.err_desc = err & 0xFFFF;
@@ -213,7 +218,12 @@ static void gsi_handle_glob_err(uint32_t err)
WARN_ON(1);
break;
case GSI_ERR_TYPE_EVT:
- BUG_ON(log->virt_idx >= GSI_MAX_EVT_RING);
+ if (log->virt_idx >= gsi_ctx->max_ev) {
+ GSIERR("Unexpected ev %d\n", log->virt_idx);
+ WARN_ON(1);
+ return;
+ }
+
ev = &gsi_ctx->evtr[log->virt_idx];
evt_notify.user_data = ev->props.user_data;
evt_notify.err_desc = err & 0xFFFF;
@@ -257,6 +267,9 @@ static void gsi_handle_glob_ee(int ee)
if (val & GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK) {
err = gsi_readl(gsi_ctx->base +
GSI_EE_n_ERROR_LOG_OFFS(ee));
+ if (gsi_ctx->per.ver >= GSI_VER_1_2)
+ gsi_writel(0, gsi_ctx->base +
+ GSI_EE_n_ERROR_LOG_OFFS(ee));
gsi_writel(clr, gsi_ctx->base +
GSI_EE_n_ERROR_LOG_CLR_OFFS(ee));
gsi_handle_glob_err(err);
@@ -311,7 +324,12 @@ static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
uint64_t rp;
ch_id = evt->chid;
- BUG_ON(ch_id >= GSI_MAX_CHAN);
+ if (ch_id >= gsi_ctx->max_ch) {
+ GSIERR("Unexpected ch %d\n", ch_id);
+ WARN_ON(1);
+ return;
+ }
+
ch_ctx = &gsi_ctx->chan[ch_id];
BUG_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI);
rp = evt->xfer_ptr;
@@ -567,6 +585,75 @@ static irqreturn_t gsi_isr(int irq, void *ctxt)
return IRQ_HANDLED;
}
+static uint32_t gsi_get_max_channels(enum gsi_ver ver)
+{
+ uint32_t reg;
+
+ switch (ver) {
+ case GSI_VER_1_0:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK) >>
+ GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_2:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK) >>
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_3:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+ reg = (reg &
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
+ break;
+ default:
+ GSIERR("bad gsi version %d\n", ver);
+ WARN_ON(1);
+ reg = 0;
+ }
+
+ GSIDBG("max channels %d\n", reg);
+
+ return reg;
+}
+
+static uint32_t gsi_get_max_event_rings(enum gsi_ver ver)
+{
+ uint32_t reg;
+
+ switch (ver) {
+ case GSI_VER_1_0:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK) >>
+ GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_2:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK) >>
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_3:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+ reg = (reg &
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
+ break;
+ default:
+ GSIERR("bad gsi version %d\n", ver);
+ WARN_ON(1);
+ reg = 0;
+ }
+
+ GSIDBG("max event rings %d\n", reg);
+
+ return reg;
+}
int gsi_complete_clk_grant(unsigned long dev_hdl)
{
unsigned long flags;
@@ -611,6 +698,11 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
return -GSI_STATUS_INVALID_PARAMS;
}
+ if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) {
+ GSIERR("bad params gsi_ver=%d\n", props->ver);
+ return -GSI_STATUS_INVALID_PARAMS;
+ }
+
if (!props->notify_cb) {
GSIERR("notify callback must be provided\n");
return -GSI_STATUS_INVALID_PARAMS;
@@ -668,8 +760,25 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
mutex_init(&gsi_ctx->mlock);
atomic_set(&gsi_ctx->num_chan, 0);
atomic_set(&gsi_ctx->num_evt_ring, 0);
- /* only support 16 un-reserved + 7 reserved event virtual IDs */
- gsi_ctx->evt_bmap = ~0x7E03FF;
+ gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
+ if (gsi_ctx->max_ch == 0) {
+ devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+ devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+ GSIERR("failed to get max channels\n");
+ return -GSI_STATUS_ERROR;
+ }
+ gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
+ if (gsi_ctx->max_ev == 0) {
+ devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+ devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+ GSIERR("failed to get max event rings\n");
+ return -GSI_STATUS_ERROR;
+ }
+
+ /* bitmap is max events excludes reserved events */
+ gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
+ gsi_ctx->evt_bmap |= ((1 << (GSI_MHI_ER_END + 1)) - 1) ^
+ ((1 << GSI_MHI_ER_START) - 1);
/*
* enable all interrupts but GSI_BREAK_POINT.
@@ -693,6 +802,10 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
else
GSIERR("Manager EE has not enabled GSI, GSI un-usable\n");
+ if (gsi_ctx->per.ver >= GSI_VER_1_2)
+ gsi_writel(0, gsi_ctx->base +
+ GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee));
+
*dev_hdl = (uintptr_t)gsi_ctx;
return GSI_STATUS_SUCCESS;
@@ -1059,7 +1172,7 @@ int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
return -GSI_STATUS_NODEV;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1093,7 +1206,7 @@ int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
return -GSI_STATUS_NODEV;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1160,7 +1273,7 @@ int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1194,7 +1307,7 @@ int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
return -GSI_STATUS_NODEV;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1255,7 +1368,7 @@ int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1291,7 +1404,7 @@ int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1382,7 +1495,7 @@ static int gsi_validate_channel_props(struct gsi_chan_props *props)
{
uint64_t ra;
- if (props->ch_id >= GSI_MAX_CHAN) {
+ if (props->ch_id >= gsi_ctx->max_ch) {
GSIERR("ch_id %u invalid\n", props->ch_id);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1573,7 +1686,7 @@ int gsi_write_channel_scratch(unsigned long chan_hdl,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1610,7 +1723,7 @@ int gsi_query_channel_db_addr(unsigned long chan_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1642,7 +1755,7 @@ int gsi_start_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1694,7 +1807,7 @@ int gsi_stop_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1763,7 +1876,7 @@ int gsi_stop_db_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1832,7 +1945,7 @@ int gsi_reset_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1898,7 +2011,7 @@ int gsi_dealloc_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2021,7 +2134,7 @@ int gsi_query_channel_info(unsigned long chan_hdl,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !info) {
+ if (chan_hdl >= gsi_ctx->max_ch || !info) {
GSIERR("bad params chan_hdl=%lu info=%p\n", chan_hdl, info);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2091,7 +2204,7 @@ int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !is_empty) {
+ if (chan_hdl >= gsi_ctx->max_ch || !is_empty) {
GSIERR("bad params chan_hdl=%lu is_empty=%p\n",
chan_hdl, is_empty);
return -GSI_STATUS_INVALID_PARAMS;
@@ -2155,7 +2268,7 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !num_xfers || !xfer) {
+ if (chan_hdl >= gsi_ctx->max_ch || !num_xfers || !xfer) {
GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%p\n",
chan_hdl, num_xfers, xfer);
return -GSI_STATUS_INVALID_PARAMS;
@@ -2242,7 +2355,7 @@ int gsi_start_xfer(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2278,7 +2391,7 @@ int gsi_poll_channel(unsigned long chan_hdl,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !notify) {
+ if (chan_hdl >= gsi_ctx->max_ch || !notify) {
GSIERR("bad params chan_hdl=%lu notify=%p\n", chan_hdl, notify);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2327,7 +2440,7 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2390,7 +2503,7 @@ int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2426,7 +2539,7 @@ int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2471,9 +2584,9 @@ static void gsi_configure_ieps(void *base)
gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS);
- gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_OFFS);
- gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_OFFS);
- gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_OFFS);
+ gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
+ gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
+ gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
@@ -2502,9 +2615,9 @@ static void gsi_configure_bck_prs_matrix(void *base)
gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS);
gsi_writel(0x00000000,
gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS);
- gsi_writel(0x00ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS);
+ gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS);
gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS);
- gsi_writel(0xfdffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS);
+ gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS);
gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS);
gsi_writel(0xffffffff, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS);
gsi_writel(0xfffffffe, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS);
@@ -2551,15 +2664,35 @@ int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
}
/* Enable the MCS and set to x2 clocks */
- value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
- GSI_GSI_CFG_GSI_ENABLE_BMSK) |
- ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
- GSI_GSI_CFG_MCS_ENABLE_BMSK) |
- ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
- GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
- ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
- GSI_GSI_CFG_UC_IS_MCS_BMSK));
- gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
+ if (gsi_ctx->per.ver >= GSI_VER_1_2) {
+ value = ((1 << GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT) &
+ GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK);
+ gsi_writel(value, gsi_base + GSI_GSI_MCS_CFG_OFFS);
+
+ value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
+ GSI_GSI_CFG_GSI_ENABLE_BMSK) |
+ ((0 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
+ GSI_GSI_CFG_MCS_ENABLE_BMSK) |
+ ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
+ GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
+ ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
+ GSI_GSI_CFG_UC_IS_MCS_BMSK) |
+ ((0 << GSI_GSI_CFG_GSI_PWR_CLPS_SHFT) &
+ GSI_GSI_CFG_GSI_PWR_CLPS_BMSK) |
+ ((0 << GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT) &
+ GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK));
+ gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
+ } else {
+ value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
+ GSI_GSI_CFG_GSI_ENABLE_BMSK) |
+ ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
+ GSI_GSI_CFG_MCS_ENABLE_BMSK) |
+ ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
+ GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
+ ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
+ GSI_GSI_CFG_UC_IS_MCS_BMSK));
+ gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
+ }
iounmap(gsi_base);
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index 1d438ffb8b76..0b94ed2d3a92 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -19,8 +19,8 @@
#include <linux/spinlock.h>
#include <linux/msm_gsi.h>
-#define GSI_MAX_CHAN 31
-#define GSI_MAX_EVT_RING 23
+#define GSI_CHAN_MAX 31
+#define GSI_EVT_RING_MAX 23
#define GSI_NO_EVT_ERINDEX 31
#define gsi_readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
@@ -130,8 +130,8 @@ struct gsi_ctx {
struct device *dev;
struct gsi_per_props per;
bool per_registered;
- struct gsi_chan_ctx chan[GSI_MAX_CHAN];
- struct gsi_evt_ctx evtr[GSI_MAX_EVT_RING];
+ struct gsi_chan_ctx chan[GSI_CHAN_MAX];
+ struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX];
struct mutex mlock;
spinlock_t slock;
unsigned long evt_bmap;
@@ -141,6 +141,8 @@ struct gsi_ctx {
struct gsi_ee_scratch scratch;
int num_ch_dp_stats;
struct workqueue_struct *dp_stat_wq;
+ u32 max_ch;
+ u32 max_ev;
};
enum gsi_re_type {
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index 2ab8b79acc6d..5eb9084292a4 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -71,7 +71,7 @@ static ssize_t gsi_dump_evt(struct file *file,
TDBG("arg1=%u arg2=%u\n", arg1, arg2);
- if (arg1 >= GSI_MAX_EVT_RING) {
+ if (arg1 >= gsi_ctx->max_ev) {
TERR("invalid evt ring id %u\n", arg1);
return -EFAULT;
}
@@ -184,7 +184,7 @@ static ssize_t gsi_dump_ch(struct file *file,
TDBG("arg1=%u arg2=%u\n", arg1, arg2);
- if (arg1 >= GSI_MAX_CHAN) {
+ if (arg1 >= gsi_ctx->max_ch) {
TERR("invalid chan id %u\n", arg1);
return -EFAULT;
}
@@ -271,9 +271,30 @@ static ssize_t gsi_dump_ee(struct file *file,
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee));
TERR("EE%2d STATUS 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d HW_PARAM 0x%x\n", gsi_ctx->per.ee, val);
+ if (gsi_ctx->per.ver == GSI_VER_1_0) {
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM 0x%x\n", gsi_ctx->per.ee, val);
+ } else if (gsi_ctx->per.ver == GSI_VER_1_2) {
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
+ } else if (gsi_ctx->per.ver == GSI_VER_1_3) {
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_2 0x%x\n", gsi_ctx->per.ee, val);
+ } else {
+ WARN_ON(1);
+ }
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_SW_VERSION_OFFS(gsi_ctx->per.ee));
TERR("EE%2d SW_VERSION 0x%x\n", gsi_ctx->per.ee, val);
@@ -329,7 +350,7 @@ static ssize_t gsi_dump_map(struct file *file,
int i;
TERR("EVT bitmap 0x%lx\n", gsi_ctx->evt_bmap);
- for (i = 0; i < GSI_MAX_CHAN; i++) {
+ for (i = 0; i < gsi_ctx->max_ch; i++) {
ctx = &gsi_ctx->chan[i];
if (ctx->allocated) {
@@ -402,8 +423,8 @@ static ssize_t gsi_dump_stats(struct file *file,
if (ch_id == -1) {
min = 0;
- max = GSI_MAX_CHAN;
- } else if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ max = gsi_ctx->max_ch;
+ } else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
} else {
@@ -464,7 +485,7 @@ static ssize_t gsi_enable_dp_stats(struct file *file,
if (kstrtos32(dbg_buff + 1, 0, &ch_id))
goto error;
- if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
}
@@ -540,7 +561,7 @@ static ssize_t gsi_set_max_elem_dp_stats(struct file *file,
/* get */
if (kstrtou32(dbg_buff, 0, &ch_id))
goto error;
- if (ch_id >= GSI_MAX_CHAN)
+ if (ch_id >= gsi_ctx->max_ch)
goto error;
PRT_STAT("ch %d: max_re_expected=%d\n", ch_id,
gsi_ctx->chan[ch_id].props.max_re_expected);
@@ -553,7 +574,7 @@ static ssize_t gsi_set_max_elem_dp_stats(struct file *file,
TDBG("ch_id=%u max_elem=%u\n", ch_id, max_elem);
- if (ch_id >= GSI_MAX_CHAN) {
+ if (ch_id >= gsi_ctx->max_ch) {
TERR("invalid chan id %u\n", ch_id);
goto error;
}
@@ -572,7 +593,7 @@ static void gsi_wq_print_dp_stats(struct work_struct *work)
{
int ch_id;
- for (ch_id = 0; ch_id < GSI_MAX_CHAN; ch_id++) {
+ for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
if (gsi_ctx->chan[ch_id].print_dp_stats)
gsi_dump_ch_stats(&gsi_ctx->chan[ch_id]);
}
@@ -618,7 +639,7 @@ static void gsi_wq_update_dp_stats(struct work_struct *work)
{
int ch_id;
- for (ch_id = 0; ch_id < GSI_MAX_CHAN; ch_id++) {
+ for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
if (gsi_ctx->chan[ch_id].allocated &&
gsi_ctx->chan[ch_id].props.prot != GSI_CHAN_PROT_GPI &&
gsi_ctx->chan[ch_id].enable_dp_stats)
@@ -649,8 +670,8 @@ static ssize_t gsi_rst_stats(struct file *file,
if (ch_id == -1) {
min = 0;
- max = GSI_MAX_CHAN;
- } else if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ max = gsi_ctx->max_ch;
+ } else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
} else {
@@ -691,7 +712,7 @@ static ssize_t gsi_print_dp_stats(struct file *file,
if (kstrtos32(dbg_buff + 1, 0, &ch_id))
goto error;
- if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
}
diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h
index 36a74105b490..fa1e84896f73 100644
--- a/drivers/platform/msm/gsi/gsi_reg.h
+++ b/drivers/platform/msm/gsi/gsi_reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,10 @@
#define GSI_GSI_CFG_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000000)
#define GSI_GSI_CFG_RMSK 0xf
+#define GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20
+#define GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5
+#define GSI_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10
+#define GSI_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4
#define GSI_GSI_CFG_UC_IS_MCS_BMSK 0x8
#define GSI_GSI_CFG_UC_IS_MCS_SHFT 0x3
#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4
@@ -26,6 +30,11 @@
#define GSI_GSI_CFG_GSI_ENABLE_BMSK 0x1
#define GSI_GSI_CFG_GSI_ENABLE_SHFT 0x0
+#define GSI_GSI_MCS_CFG_OFFS \
+ (GSI_GSI_REG_BASE_OFFS + 0x0000B000)
+#define GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK 0x1
+#define GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT 0x0
+
#define GSI_GSI_MANAGER_MCS_CODE_VER_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000008)
#define GSI_GSI_MANAGER_MCS_CODE_VER_RMSK 0xffffffff
@@ -99,8 +108,20 @@
#define GSI_GSI_CGC_CTRL_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000060)
#define GSI_GSI_CGC_CTRL_RMSK 0x3f
-#define GSI_GSI_CGC_CTRL_REGION_6_DEBUG_CNTRS_EN_BMSK 0x20
-#define GSI_GSI_CGC_CTRL_REGION_6_DEBUG_CNTRS_EN_SHFT 0x5
+#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_BMSK 0x800
+#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_SHFT 0xb
+#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_BMSK0x400
+#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_SHFT 0xa
+#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_BMSK0x200
+#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_SHFT 0x9
+#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_BMSK 0x100
+#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_SHFT 0x8
+#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_BMSK 0x80
+#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_SHFT 0x7
+#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_BMSK 0x40
+#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_SHFT 0x6
+#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_BMSK 0x20
+#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_SHFT 0x5
#define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_BMSK 0x10
#define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_SHFT 0x4
#define GSI_GSI_CGC_CTRL_REGION_4_HW_CGC_EN_BMSK 0x8
@@ -619,23 +640,23 @@
#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_BMSK 0xfff
#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_SHFT 0x0
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_OFFS \
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000430)
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_RMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_IRAM_PTR_BMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_IRAM_PTR_SHFT 0x0
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_SHFT 0x0
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_OFFS \
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000434)
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_RMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_IRAM_PTR_BMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_IRAM_PTR_SHFT 0x0
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_SHFT 0x0
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_OFFS \
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000438)
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_RMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_IRAM_PTR_BMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_IRAM_PTR_SHFT 0x0
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_SHFT 0x0
#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x0000043c)
@@ -701,7 +722,9 @@
#define GSI_GSI_DEBUG_BUSY_REG_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00001010)
-#define GSI_GSI_DEBUG_BUSY_REG_RMSK 0x7f
+#define GSI_GSI_DEBUG_BUSY_REG_RMSK 0xff
+#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_BMSK 0x80
+#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_SHFT 0x7
#define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_BMSK 0x40
#define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_SHFT 0x6
#define GSI_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_BMSK 0x20
@@ -1345,22 +1368,150 @@
#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0xffffffff
#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0
-#define GSI_EE_n_GSI_HW_PARAM_OFFS(n) \
+/* v1.0 */
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_RMSK 0x7fffffff
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_MAXn 3
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0
+
+/* v1.2 */
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n))
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_MAXn 2
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0
+
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_MAXn 2
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \
+ 0x80000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \
+ 0x40000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0
+
+/* v1.3 */
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n))
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0
+
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f03c + 0x4000 * (n))
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \
+ 0x80000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \
+ 0x40000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0
+
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(n) \
(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
-#define GSI_EE_n_GSI_HW_PARAM_RMSK 0x7fffffff
-#define GSI_EE_n_GSI_HW_PARAM_MAXn 3
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a
-#define GSI_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000
-#define GSI_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14
-#define GSI_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000
-#define GSI_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10
-#define GSI_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00
-#define GSI_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8
-#define GSI_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff
-#define GSI_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
#define GSI_EE_n_GSI_SW_VERSION_OFFS(n) \
(GSI_GSI_REG_BASE_OFFS + 0x0001f044 + 0x4000 * (n))
@@ -1662,7 +1813,7 @@
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_RMSK 0xffffffff
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_MAXn 3
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \
- 0xffffffff
+ 0x00003fff
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(n) \
@@ -1670,7 +1821,7 @@
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_RMSK 0xffffffff
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_MAXn 3
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \
- 0xffffffff
+ 0x000003ff
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) \
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 9cb0b1f3c379..804c89dc9533 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -207,7 +207,6 @@ struct platform_device *ipa_pdev;
static struct {
bool present;
bool arm_smmu;
- bool disable_htw;
bool fast_map;
bool s1_bypass;
u32 ipa_base;
@@ -4313,9 +4312,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
- smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
- "qcom,smmu-disable-htw");
-
/* Get IPA HW Version */
result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
&ipa_drv_res->ipa_hw_type);
@@ -4502,7 +4498,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
static int ipa_smmu_wlan_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -4519,17 +4514,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
}
cb->valid = true;
- if (smmu_info.disable_htw) {
- ret = iommu_domain_set_attr(cb->iommu,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret) {
- IPAERR("couldn't disable coherent HTW\n");
- cb->valid = false;
- return -EIO;
- }
- }
-
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->iommu,
DOMAIN_ATTR_S1_BYPASS,
@@ -4589,7 +4573,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
static int ipa_smmu_uc_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int ret;
int fast = 1;
@@ -4628,18 +4611,6 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- }
-
IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
@@ -4694,7 +4665,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
int result;
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -4731,18 +4701,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- IPADBG("SMMU disable HTW\n");
- }
-
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_S1_BYPASS,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 24fbc5c738d8..ab62dbcddd22 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -251,7 +251,6 @@ struct platform_device *ipa3_pdev;
static struct {
bool present;
bool arm_smmu;
- bool disable_htw;
bool fast_map;
bool s1_bypass;
bool use_64_bit_dma_mask;
@@ -3791,6 +3790,32 @@ static int ipa3_gsi_pre_fw_load_init(void)
return 0;
}
+static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type)
+{
+ enum gsi_ver gsi_ver;
+
+ switch (ipa_hw_type) {
+ case IPA_HW_v3_0:
+ case IPA_HW_v3_1:
+ gsi_ver = GSI_VER_1_0;
+ break;
+ case IPA_HW_v3_5:
+ gsi_ver = GSI_VER_1_2;
+ break;
+ case IPA_HW_v3_5_1:
+ gsi_ver = GSI_VER_1_3;
+ break;
+ default:
+ IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
+ WARN_ON(1);
+ gsi_ver = GSI_VER_ERR;
+ }
+
+ IPADBG("GSI version %d\n", gsi_ver);
+
+ return gsi_ver;
+}
+
/**
* ipa3_post_init() - Initialize the IPA Driver (Part II).
* This part contains all initialization which requires interaction with
@@ -3820,6 +3845,7 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
memset(&gsi_props, 0, sizeof(gsi_props));
+ gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
gsi_props.ee = resource_p->ee;
gsi_props.intr = GSI_INTR_IRQ;
gsi_props.irq = resource_p->transport_irq;
@@ -4695,9 +4721,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_drv_res->ipa_tz_unlock_reg_num = 0;
ipa_drv_res->ipa_tz_unlock_reg = NULL;
- smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
- "qcom,smmu-disable-htw");
-
/* Get IPA HW Version */
result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
&ipa_drv_res->ipa_hw_type);
@@ -4953,7 +4976,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
static int ipa_smmu_wlan_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -4973,17 +4995,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
}
cb->valid = true;
- if (smmu_info.disable_htw) {
- ret = iommu_domain_set_attr(cb->iommu,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret) {
- IPAERR("couldn't disable coherent HTW\n");
- cb->valid = false;
- return -EIO;
- }
- }
-
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->iommu,
DOMAIN_ATTR_S1_BYPASS,
@@ -5056,7 +5067,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
static int ipa_smmu_uc_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int bypass = 1;
int fast = 1;
@@ -5102,18 +5112,6 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- }
-
IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
@@ -5168,7 +5166,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
int result;
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -5216,17 +5213,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- IPADBG("SMMU disable HTW\n");
- }
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_S1_BYPASS,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 6f86448319db..8e85822d9719 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -481,7 +481,7 @@ struct ipa_gsi_ep_mem_info {
struct ipa3_status_stats {
struct ipahal_pkt_status status[IPA_MAX_STATUS_STAT_NUM];
- int curr;
+ unsigned int curr;
};
/**
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 6d826590cabc..45fedfa72bda 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -569,7 +569,6 @@ err_disable_vregs:
static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
{
- int disable_htw = 1;
int atomic_ctx = 1;
int rc;
int bypass_enable = 1;
@@ -587,17 +586,6 @@ static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
dev_info(ctx->dev, "IOMMU mapping created: %p\n", ctx->mapping);
rc = iommu_domain_set_attr(ctx->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (rc) {
- /* This error can be ignored and not considered fatal,
- * but let the users know this happened
- */
- dev_err(ctx->dev, "Warning: disable coherent HTW failed (%d)\n",
- rc);
- }
-
- rc = iommu_domain_set_attr(ctx->mapping->domain,
DOMAIN_ATTR_ATOMIC,
&atomic_ctx);
if (rc) {
diff --git a/drivers/power/qcom-charger/fg-core.h b/drivers/power/qcom-charger/fg-core.h
index 515f31a44ce7..7e08b71e3b6a 100644
--- a/drivers/power/qcom-charger/fg-core.h
+++ b/drivers/power/qcom-charger/fg-core.h
@@ -54,6 +54,8 @@
CHARS_PER_ITEM) + 1) \
#define FG_SRAM_ADDRESS_MAX 255
+#define PROFILE_LEN 224
+#define PROFILE_COMP_LEN 148
#define BUCKET_COUNT 8
#define BUCKET_SOC_PCT (256 / BUCKET_COUNT)
@@ -66,6 +68,7 @@ enum fg_debug_flag {
FG_SRAM_READ = BIT(4), /* Show SRAM reads */
FG_BUS_WRITE = BIT(5), /* Show REGMAP writes */
FG_BUS_READ = BIT(6), /* Show REGMAP reads */
+ FG_CAP_LEARN = BIT(7), /* Show capacity learning */
};
/* SRAM access */
@@ -117,10 +120,15 @@ enum fg_sram_param_id {
FG_SRAM_OCV,
FG_SRAM_RSLOW,
FG_SRAM_ALG_FLAGS,
+ FG_SRAM_CC_SOC,
+ FG_SRAM_CC_SOC_SW,
+ FG_SRAM_ACT_BATT_CAP,
/* Entries below here are configurable during initialization */
FG_SRAM_CUTOFF_VOLT,
FG_SRAM_EMPTY_VOLT,
FG_SRAM_VBATT_LOW,
+ FG_SRAM_FLOAT_VOLT,
+ FG_SRAM_VBATT_FULL,
FG_SRAM_ESR_TIMER_DISCHG_MAX,
FG_SRAM_ESR_TIMER_DISCHG_INIT,
FG_SRAM_ESR_TIMER_CHG_MAX,
@@ -177,6 +185,16 @@ struct fg_dt_props {
int esr_timer_charging;
int esr_timer_awake;
int esr_timer_asleep;
+ bool force_load_profile;
+ int cl_start_soc;
+ int cl_max_temp;
+ int cl_min_temp;
+ int cl_max_cap_inc;
+ int cl_max_cap_dec;
+ int cl_max_cap_limit;
+ int cl_min_cap_limit;
+ int jeita_hyst_temp;
+ int batt_temp_delta;
};
/* parameters from battery profile */
@@ -184,6 +202,7 @@ struct fg_batt_props {
const char *batt_type_str;
char *batt_profile;
int float_volt_uv;
+ int vbatt_full_mv;
int fastchg_curr_ma;
int batt_id_kohm;
};
@@ -197,11 +216,21 @@ struct fg_cyc_ctr_data {
struct mutex lock;
};
+struct fg_cap_learning {
+ bool active;
+ int init_cc_soc_sw;
+ int64_t nom_cap_uah;
+ int64_t init_cc_uah;
+ int64_t final_cc_uah;
+ int64_t learned_cc_uah;
+ struct mutex lock;
+};
+
struct fg_irq_info {
const char *name;
const irq_handler_t handler;
- int irq;
bool wakeable;
+ int irq;
};
struct fg_chip {
@@ -216,29 +245,34 @@ struct fg_chip {
struct fg_irq_info *irqs;
struct votable *awake_votable;
struct fg_sram_param *sp;
+ struct fg_alg_flag *alg_flags;
int *debug_mask;
- char *batt_profile;
+ char batt_profile[PROFILE_LEN];
struct fg_dt_props dt;
struct fg_batt_props bp;
struct fg_cyc_ctr_data cyc_ctr;
struct notifier_block nb;
+ struct fg_cap_learning cl;
struct mutex bus_lock;
struct mutex sram_rw_lock;
u32 batt_soc_base;
u32 batt_info_base;
u32 mem_if_base;
- int nom_cap_uah;
+ int batt_id;
int status;
- int prev_status;
- bool batt_id_avail;
+ int charge_done;
+ int last_soc;
+ int last_batt_temp;
+ int health;
+ bool profile_available;
bool profile_loaded;
bool battery_missing;
+ bool fg_restarting;
struct completion soc_update;
struct completion soc_ready;
struct delayed_work profile_load_work;
struct work_struct status_change_work;
struct work_struct cycle_count_work;
- struct fg_alg_flag *alg_flags;
};
/* Debugfs data structures are below */
diff --git a/drivers/power/qcom-charger/fg-reg.h b/drivers/power/qcom-charger/fg-reg.h
index 9d5874340a8e..431e28a7eb1f 100644
--- a/drivers/power/qcom-charger/fg-reg.h
+++ b/drivers/power/qcom-charger/fg-reg.h
@@ -126,6 +126,7 @@
/* BATT_INFO_BATT_TEMP_CFG */
#define JEITA_TEMP_HYST_MASK GENMASK(5, 4)
+#define JEITA_TEMP_HYST_SHIFT 4
#define JEITA_TEMP_NO_HYST 0x0
#define JEITA_TEMP_HYST_1C 0x1
#define JEITA_TEMP_HYST_2C 0x2
diff --git a/drivers/power/qcom-charger/fg-util.c b/drivers/power/qcom-charger/fg-util.c
index bf5a446452a4..790e56bd3dae 100644
--- a/drivers/power/qcom-charger/fg-util.c
+++ b/drivers/power/qcom-charger/fg-util.c
@@ -83,6 +83,9 @@ int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
if (!chip)
return -ENXIO;
+ if (chip->battery_missing)
+ return -ENODATA;
+
if (!fg_sram_address_valid(address, len))
return -EFAULT;
@@ -147,6 +150,9 @@ int fg_sram_read(struct fg_chip *chip, u16 address, u8 offset,
if (!chip)
return -ENXIO;
+ if (chip->battery_missing)
+ return -ENODATA;
+
if (!fg_sram_address_valid(address, len))
return -EFAULT;
diff --git a/drivers/power/qcom-charger/qpnp-fg-gen3.c b/drivers/power/qcom-charger/qpnp-fg-gen3.c
index 7739952f3254..f8c1ad5963af 100644
--- a/drivers/power/qcom-charger/qpnp-fg-gen3.c
+++ b/drivers/power/qcom-charger/qpnp-fg-gen3.c
@@ -35,6 +35,8 @@
#define CUTOFF_VOLT_OFFSET 0
#define SYS_TERM_CURR_WORD 6
#define SYS_TERM_CURR_OFFSET 0
+#define VBATT_FULL_WORD 7
+#define VBATT_FULL_OFFSET 0
#define DELTA_SOC_THR_WORD 12
#define DELTA_SOC_THR_OFFSET 3
#define RECHARGE_SOC_THR_WORD 14
@@ -65,12 +67,18 @@
#define BATT_SOC_OFFSET 0
#define MONOTONIC_SOC_WORD 94
#define MONOTONIC_SOC_OFFSET 2
+#define CC_SOC_WORD 95
+#define CC_SOC_OFFSET 0
+#define CC_SOC_SW_WORD 96
+#define CC_SOC_SW_OFFSET 0
#define VOLTAGE_PRED_WORD 97
#define VOLTAGE_PRED_OFFSET 0
#define OCV_WORD 97
#define OCV_OFFSET 2
#define RSLOW_WORD 101
#define RSLOW_OFFSET 0
+#define ACT_BATT_CAP_WORD 117
+#define ACT_BATT_CAP_OFFSET 0
#define LAST_BATT_SOC_WORD 119
#define LAST_BATT_SOC_OFFSET 0
#define LAST_MONOTONIC_SOC_WORD 119
@@ -89,17 +97,23 @@
#define EMPTY_VOLT_v2_OFFSET 3
#define VBATT_LOW_v2_WORD 16
#define VBATT_LOW_v2_OFFSET 0
+#define FLOAT_VOLT_v2_WORD 16
+#define FLOAT_VOLT_v2_OFFSET 2
+static int fg_decode_voltage_15b(struct fg_sram_param *sp,
+ enum fg_sram_param_id id, int val);
static int fg_decode_value_16b(struct fg_sram_param *sp,
enum fg_sram_param_id id, int val);
static int fg_decode_default(struct fg_sram_param *sp,
enum fg_sram_param_id id, int val);
static int fg_decode_batt_soc(struct fg_sram_param *sp,
enum fg_sram_param_id id, int val);
+static int fg_decode_cc_soc(struct fg_sram_param *sp,
+ enum fg_sram_param_id id, int value);
static void fg_encode_voltage(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf);
+ enum fg_sram_param_id id, int val_mv, u8 *buf);
static void fg_encode_current(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf);
+ enum fg_sram_param_id id, int val_ma, u8 *buf);
static void fg_encode_default(struct fg_sram_param *sp,
enum fg_sram_param_id id, int val, u8 *buf);
@@ -120,13 +134,19 @@ static struct fg_sram_param pmicobalt_v1_sram_params[] = {
PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
fg_decode_batt_soc),
PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 244141,
- 1000, 0, NULL, fg_decode_value_16b),
+ 1000, 0, NULL, fg_decode_voltage_15b),
PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 244141, 1000, 0, NULL,
- fg_decode_value_16b),
+ fg_decode_voltage_15b),
PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 244141, 1000, 0, NULL,
fg_decode_value_16b),
PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL,
fg_decode_default),
+ PARAM(CC_SOC, CC_SOC_WORD, CC_SOC_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(ACT_BATT_CAP, ACT_BATT_CAP_WORD, ACT_BATT_CAP_OFFSET, 2, 1, 1, 0,
+ NULL, fg_decode_default),
/* Entries below here are configurable during initialization */
PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
244141, 0, fg_encode_voltage, NULL),
@@ -134,11 +154,13 @@ static struct fg_sram_param pmicobalt_v1_sram_params[] = {
-2500, fg_encode_voltage, NULL),
PARAM(VBATT_LOW, VBATT_LOW_WORD, VBATT_LOW_OFFSET, 1, 100000, 390625,
-2500, fg_encode_voltage, NULL),
+ PARAM(VBATT_FULL, VBATT_FULL_WORD, VBATT_FULL_OFFSET, 2, 1000000,
+ 244141, 0, fg_encode_voltage, NULL),
PARAM(SYS_TERM_CURR, SYS_TERM_CURR_WORD, SYS_TERM_CURR_OFFSET, 3,
1000000, 122070, 0, fg_encode_current, NULL),
PARAM(CHG_TERM_CURR, CHG_TERM_CURR_WORD, CHG_TERM_CURR_OFFSET, 1,
100000, 390625, 0, fg_encode_current, NULL),
- PARAM(DELTA_SOC_THR, DELTA_SOC_THR_WORD, DELTA_SOC_THR_OFFSET, 1, 256,
+ PARAM(DELTA_SOC_THR, DELTA_SOC_THR_WORD, DELTA_SOC_THR_OFFSET, 1, 2048,
100, 0, fg_encode_default, NULL),
PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_WORD, RECHARGE_SOC_THR_OFFSET,
1, 256, 100, 0, fg_encode_default, NULL),
@@ -158,13 +180,19 @@ static struct fg_sram_param pmicobalt_v2_sram_params[] = {
PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
fg_decode_batt_soc),
PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 244141,
- 1000, 0, NULL, fg_decode_value_16b),
+ 1000, 0, NULL, fg_decode_voltage_15b),
PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 244141, 1000, 0, NULL,
- fg_decode_value_16b),
+ fg_decode_voltage_15b),
PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 244141, 1000, 0, NULL,
fg_decode_value_16b),
PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL,
fg_decode_default),
+ PARAM(CC_SOC, CC_SOC_WORD, CC_SOC_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(ACT_BATT_CAP, ACT_BATT_CAP_WORD, ACT_BATT_CAP_OFFSET, 2, 1, 1, 0,
+ NULL, fg_decode_default),
/* Entries below here are configurable during initialization */
PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
244141, 0, fg_encode_voltage, NULL),
@@ -172,12 +200,16 @@ static struct fg_sram_param pmicobalt_v2_sram_params[] = {
15625, -2000, fg_encode_voltage, NULL),
PARAM(VBATT_LOW, VBATT_LOW_v2_WORD, VBATT_LOW_v2_OFFSET, 1, 1000,
15625, -2000, fg_encode_voltage, NULL),
+ PARAM(FLOAT_VOLT, FLOAT_VOLT_v2_WORD, FLOAT_VOLT_v2_OFFSET, 1, 1000,
+ 15625, -2000, fg_encode_voltage, NULL),
+ PARAM(VBATT_FULL, VBATT_FULL_WORD, VBATT_FULL_OFFSET, 2, 1000000,
+ 244141, 0, fg_encode_voltage, NULL),
PARAM(SYS_TERM_CURR, SYS_TERM_CURR_WORD, SYS_TERM_CURR_OFFSET, 3,
1000000, 122070, 0, fg_encode_current, NULL),
PARAM(CHG_TERM_CURR, CHG_TERM_CURR_v2_WORD, CHG_TERM_CURR_v2_OFFSET, 1,
100000, 390625, 0, fg_encode_current, NULL),
PARAM(DELTA_SOC_THR, DELTA_SOC_THR_v2_WORD, DELTA_SOC_THR_v2_OFFSET, 1,
- 256, 100, 0, fg_encode_default, NULL),
+ 2048, 100, 0, fg_encode_default, NULL),
PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_v2_WORD,
RECHARGE_SOC_THR_v2_OFFSET, 1, 256, 100, 0, fg_encode_default,
NULL),
@@ -264,8 +296,36 @@ module_param_named(
sram_update_period_ms, fg_sram_update_period_ms, int, S_IRUSR | S_IWUSR
);
+static bool fg_sram_dump;
+module_param_named(
+ sram_dump, fg_sram_dump, bool, S_IRUSR | S_IWUSR
+);
+
+static int fg_restart;
+
/* All getters HERE */
+#define VOLTAGE_15BIT_MASK GENMASK(14, 0)
+static int fg_decode_voltage_15b(struct fg_sram_param *sp,
+ enum fg_sram_param_id id, int value)
+{
+ value &= VOLTAGE_15BIT_MASK;
+ sp[id].value = div_u64((u64)value * sp[id].numrtr, sp[id].denmtr);
+ pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+ sp[id].value);
+ return sp[id].value;
+}
+
+static int fg_decode_cc_soc(struct fg_sram_param *sp,
+ enum fg_sram_param_id id, int value)
+{
+ sp[id].value = div_s64((s64)value * sp[id].numrtr, sp[id].denmtr);
+ sp[id].value = sign_extend32(sp[id].value, 31);
+ pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+ sp[id].value);
+ return sp[id].value;
+}
+
static int fg_decode_value_16b(struct fg_sram_param *sp,
enum fg_sram_param_id id, int value)
{
@@ -302,14 +362,14 @@ static int fg_decode(struct fg_sram_param *sp, enum fg_sram_param_id id,
}
static void fg_encode_voltage(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf)
+ enum fg_sram_param_id id, int val_mv, u8 *buf)
{
int i, mask = 0xff;
int64_t temp;
- val += sp[id].offset;
- temp = (int64_t)div_u64((u64)val * sp[id].numrtr, sp[id].denmtr);
- pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val);
+ val_mv += sp[id].offset;
+ temp = (int64_t)div_u64((u64)val_mv * sp[id].numrtr, sp[id].denmtr);
+ pr_debug("temp: %llx id: %d, val_mv: %d, buf: [ ", temp, id, val_mv);
for (i = 0; i < sp[id].len; i++) {
buf[i] = temp & mask;
temp >>= 8;
@@ -319,15 +379,15 @@ static void fg_encode_voltage(struct fg_sram_param *sp,
}
static void fg_encode_current(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf)
+ enum fg_sram_param_id id, int val_ma, u8 *buf)
{
int i, mask = 0xff;
int64_t temp;
s64 current_ma;
- current_ma = val;
+ current_ma = val_ma;
temp = (int64_t)div_s64(current_ma * sp[id].numrtr, sp[id].denmtr);
- pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val);
+ pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val_ma);
for (i = 0; i < sp[id].len; i++) {
buf[i] = temp & mask;
temp >>= 8;
@@ -378,6 +438,9 @@ static int fg_get_sram_prop(struct fg_chip *chip, enum fg_sram_param_id id,
if (id < 0 || id > FG_SRAM_MAX || chip->sp[id].len > sizeof(buf))
return -EINVAL;
+ if (chip->battery_missing)
+ return -ENODATA;
+
rc = fg_sram_read(chip, chip->sp[id].addr_word, chip->sp[id].addr_byte,
buf, chip->sp[id].len, FG_IMA_DEFAULT);
if (rc < 0) {
@@ -393,6 +456,35 @@ static int fg_get_sram_prop(struct fg_chip *chip, enum fg_sram_param_id id,
return 0;
}
+#define CC_SOC_30BIT GENMASK(29, 0)
+static int fg_get_cc_soc(struct fg_chip *chip, int *val)
+{
+ int rc, cc_soc;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC, &cc_soc);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC, rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = div_s64(cc_soc * chip->cl.nom_cap_uah, CC_SOC_30BIT);
+ return 0;
+}
+
+static int fg_get_cc_soc_sw(struct fg_chip *chip, int *val)
+{
+ int rc, cc_soc;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = div_s64(cc_soc * chip->cl.learned_cc_uah, CC_SOC_30BIT);
+ return 0;
+}
+
#define BATT_TEMP_NUMR 1
#define BATT_TEMP_DENR 1
static int fg_get_battery_temp(struct fg_chip *chip, int *val)
@@ -543,7 +635,6 @@ static int fg_get_msoc_raw(struct fg_chip *chip, int *val)
}
fg_dbg(chip, FG_POWER_SUPPLY, "raw: 0x%02x\n", cap[0]);
-
*val = cap[0];
return 0;
}
@@ -593,14 +684,12 @@ static int fg_get_batt_id(struct fg_chip *chip, int *val)
return rc;
}
- chip->batt_id_avail = true;
fg_dbg(chip, FG_STATUS, "batt_id: %d\n", batt_id);
*val = batt_id;
return 0;
}
-#define PROFILE_LEN 224
static int fg_get_batt_profile(struct fg_chip *chip)
{
struct device_node *node = chip->dev->of_node;
@@ -614,13 +703,14 @@ static int fg_get_batt_profile(struct fg_chip *chip)
return rc;
}
+ batt_id /= 1000;
+ chip->batt_id = batt_id;
batt_node = of_find_node_by_name(node, "qcom,battery-data");
if (!batt_node) {
pr_err("Batterydata not available\n");
return -ENXIO;
}
- batt_id /= 1000;
profile_node = of_batterydata_get_best_profile(batt_node, batt_id,
NULL);
if (IS_ERR(profile_node))
@@ -652,6 +742,13 @@ static int fg_get_batt_profile(struct fg_chip *chip)
chip->bp.fastchg_curr_ma = -EINVAL;
}
+ rc = of_property_read_u32(profile_node, "qcom,fg-cc-cv-threshold-mv",
+ &chip->bp.vbatt_full_mv);
+ if (rc < 0) {
+ pr_err("battery cc_cv threshold unavailable, rc:%d\n", rc);
+ chip->bp.vbatt_full_mv = -EINVAL;
+ }
+
data = of_get_property(profile_node, "qcom,fg-profile-data", &len);
if (!data) {
pr_err("No profile data available\n");
@@ -663,6 +760,7 @@ static int fg_get_batt_profile(struct fg_chip *chip)
return -EINVAL;
}
+ chip->profile_available = true;
memcpy(chip->batt_profile, data, len);
return 0;
}
@@ -673,6 +771,27 @@ static inline void get_temp_setpoint(int threshold, u8 *val)
*val = DIV_ROUND_CLOSEST((threshold + 30) * 10, 5);
}
+static inline void get_batt_temp_delta(int delta, u8 *val)
+{
+ switch (delta) {
+ case 2:
+ *val = BTEMP_DELTA_2K;
+ break;
+ case 4:
+ *val = BTEMP_DELTA_4K;
+ break;
+ case 6:
+ *val = BTEMP_DELTA_6K;
+ break;
+ case 10:
+ *val = BTEMP_DELTA_10K;
+ break;
+ default:
+ *val = BTEMP_DELTA_2K;
+ break;
+ };
+}
+
static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
int flags)
{
@@ -739,38 +858,313 @@ static bool is_charger_available(struct fg_chip *chip)
return true;
}
+static int fg_save_learned_cap_to_sram(struct fg_chip *chip)
+{
+ int16_t cc_mah;
+ int rc;
+
+ if (chip->battery_missing || !chip->cl.learned_cc_uah)
+ return -EPERM;
+
+ cc_mah = div64_s64(chip->cl.learned_cc_uah, 1000);
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_ACT_BATT_CAP].addr_word,
+ chip->sp[FG_SRAM_ACT_BATT_CAP].addr_byte, (u8 *)&cc_mah,
+ chip->sp[FG_SRAM_ACT_BATT_CAP].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing act_batt_cap, rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_dbg(chip, FG_CAP_LEARN, "learned capacity %llduah/%dmah stored\n",
+ chip->cl.learned_cc_uah, cc_mah);
+ return 0;
+}
+
+#define CAPACITY_DELTA_DECIPCT 500
+static int fg_load_learned_cap_from_sram(struct fg_chip *chip)
+{
+ int rc, act_cap_mah;
+ int64_t delta_cc_uah, pct_nom_cap_uah;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_mah);
+ if (rc < 0) {
+ pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->cl.learned_cc_uah = act_cap_mah * 1000;
+ if (chip->cl.learned_cc_uah == 0)
+ chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
+
+ if (chip->cl.learned_cc_uah != chip->cl.nom_cap_uah) {
+ delta_cc_uah = abs(chip->cl.learned_cc_uah -
+ chip->cl.nom_cap_uah);
+ pct_nom_cap_uah = div64_s64((int64_t)chip->cl.nom_cap_uah *
+ CAPACITY_DELTA_DECIPCT, 1000);
+ /*
+ * If the learned capacity is out of range by 50% from the
+ * nominal capacity, then overwrite the learned capacity with
+ * the nominal capacity.
+ */
+ if (chip->cl.nom_cap_uah && delta_cc_uah > pct_nom_cap_uah) {
+ fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah: %lld is higher than expected\n",
+ chip->cl.learned_cc_uah);
+ fg_dbg(chip, FG_CAP_LEARN, "Capping it to nominal:%lld\n",
+ chip->cl.nom_cap_uah);
+ chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
+ rc = fg_save_learned_cap_to_sram(chip);
+ if (rc < 0)
+ pr_err("Error in saving learned_cc_uah, rc=%d\n",
+ rc);
+ }
+ }
+
+ fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah:%lld nom_cap_uah: %lld\n",
+ chip->cl.learned_cc_uah, chip->cl.nom_cap_uah);
+ return 0;
+}
+
+static bool is_temp_valid_cap_learning(struct fg_chip *chip)
+{
+ int rc, batt_temp;
+
+ rc = fg_get_battery_temp(chip, &batt_temp);
+ if (rc < 0) {
+ pr_err("Error in getting batt_temp\n");
+ return false;
+ }
+
+ if (batt_temp > chip->dt.cl_max_temp ||
+ batt_temp < chip->dt.cl_min_temp) {
+ fg_dbg(chip, FG_CAP_LEARN, "batt temp %d out of range [%d %d]\n",
+ batt_temp, chip->dt.cl_min_temp, chip->dt.cl_max_temp);
+ return false;
+ }
+
+ return true;
+}
+
+static void fg_cap_learning_post_process(struct fg_chip *chip)
+{
+ int64_t max_inc_val, min_dec_val, old_cap;
+ int rc;
+
+ max_inc_val = chip->cl.learned_cc_uah
+ * (1000 + chip->dt.cl_max_cap_inc);
+ do_div(max_inc_val, 1000);
+
+ min_dec_val = chip->cl.learned_cc_uah
+ * (1000 - chip->dt.cl_max_cap_dec);
+ do_div(min_dec_val, 1000);
+
+ old_cap = chip->cl.learned_cc_uah;
+ if (chip->cl.final_cc_uah > max_inc_val)
+ chip->cl.learned_cc_uah = max_inc_val;
+ else if (chip->cl.final_cc_uah < min_dec_val)
+ chip->cl.learned_cc_uah = min_dec_val;
+ else
+ chip->cl.learned_cc_uah =
+ chip->cl.final_cc_uah;
+
+ if (chip->dt.cl_max_cap_limit) {
+ max_inc_val = (int64_t)chip->cl.nom_cap_uah * (1000 +
+ chip->dt.cl_max_cap_limit);
+ do_div(max_inc_val, 1000);
+ if (chip->cl.final_cc_uah > max_inc_val) {
+ fg_dbg(chip, FG_CAP_LEARN, "learning capacity %lld goes above max limit %lld\n",
+ chip->cl.final_cc_uah, max_inc_val);
+ chip->cl.learned_cc_uah = max_inc_val;
+ }
+ }
+
+ if (chip->dt.cl_min_cap_limit) {
+ min_dec_val = (int64_t)chip->cl.nom_cap_uah * (1000 -
+ chip->dt.cl_min_cap_limit);
+ do_div(min_dec_val, 1000);
+ if (chip->cl.final_cc_uah < min_dec_val) {
+ fg_dbg(chip, FG_CAP_LEARN, "learning capacity %lld goes below min limit %lld\n",
+ chip->cl.final_cc_uah, min_dec_val);
+ chip->cl.learned_cc_uah = min_dec_val;
+ }
+ }
+
+ rc = fg_save_learned_cap_to_sram(chip);
+ if (rc < 0)
+ pr_err("Error in saving learned_cc_uah, rc=%d\n", rc);
+
+ fg_dbg(chip, FG_CAP_LEARN, "final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
+ chip->cl.final_cc_uah, old_cap, chip->cl.learned_cc_uah);
+}
+
+static int fg_cap_learning_process_full_data(struct fg_chip *chip)
+{
+ int rc, cc_soc_sw, cc_soc_delta_pct;
+ int64_t delta_cc_uah;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+ return rc;
+ }
+
+ cc_soc_delta_pct = DIV_ROUND_CLOSEST(
+ abs(cc_soc_sw - chip->cl.init_cc_soc_sw) * 100,
+ CC_SOC_30BIT);
+ delta_cc_uah = div64_s64(chip->cl.learned_cc_uah * cc_soc_delta_pct,
+ 100);
+ chip->cl.final_cc_uah = chip->cl.init_cc_uah + delta_cc_uah;
+ fg_dbg(chip, FG_CAP_LEARN, "Current cc_soc=%d cc_soc_delta_pct=%d total_cc_uah=%lld\n",
+ cc_soc_sw, cc_soc_delta_pct, chip->cl.final_cc_uah);
+ return 0;
+}
+
+static int fg_cap_learning_begin(struct fg_chip *chip, int batt_soc)
+{
+ int rc, cc_soc_sw;
+
+ if (DIV_ROUND_CLOSEST(batt_soc * 100, FULL_SOC_RAW) >
+ chip->dt.cl_start_soc) {
+ fg_dbg(chip, FG_CAP_LEARN, "Battery SOC %d is high!, not starting\n",
+ batt_soc);
+ return -EINVAL;
+ }
+
+ chip->cl.init_cc_uah = div64_s64(chip->cl.learned_cc_uah * batt_soc,
+ FULL_SOC_RAW);
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->cl.init_cc_soc_sw = cc_soc_sw;
+ chip->cl.active = true;
+ fg_dbg(chip, FG_CAP_LEARN, "Capacity learning started @ battery SOC %d init_cc_soc_sw:%d\n",
+ batt_soc, chip->cl.init_cc_soc_sw);
+ return 0;
+}
+
+static int fg_cap_learning_done(struct fg_chip *chip)
+{
+ int rc, cc_soc_sw;
+
+ rc = fg_cap_learning_process_full_data(chip);
+ if (rc < 0) {
+ pr_err("Error in processing cap learning full data, rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ /* Write a FULL value to cc_soc_sw */
+ cc_soc_sw = CC_SOC_30BIT;
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
+ chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
+ chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
+ goto out;
+ }
+
+ fg_cap_learning_post_process(chip);
+out:
+ return rc;
+}
+
+#define FULL_SOC_RAW 255
+static void fg_cap_learning_update(struct fg_chip *chip)
+{
+ int rc, batt_soc;
+
+ mutex_lock(&chip->cl.lock);
+
+ if (!is_temp_valid_cap_learning(chip) || !chip->cl.learned_cc_uah ||
+ chip->battery_missing) {
+ fg_dbg(chip, FG_CAP_LEARN, "Aborting cap_learning %lld\n",
+ chip->cl.learned_cc_uah);
+ chip->cl.active = false;
+ chip->cl.init_cc_uah = 0;
+ goto out;
+ }
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+ if (rc < 0) {
+ pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+ goto out;
+ }
+
+ fg_dbg(chip, FG_CAP_LEARN, "Chg_status: %d cl_active: %d batt_soc: %d\n",
+ chip->status, chip->cl.active, batt_soc);
+
+ /* Initialize the starting point of learning capacity */
+ if (!chip->cl.active) {
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ rc = fg_cap_learning_begin(chip, batt_soc);
+ chip->cl.active = (rc == 0);
+ }
+
+ } else {
+ if (chip->status == POWER_SUPPLY_STATUS_FULL &&
+ chip->charge_done) {
+ rc = fg_cap_learning_done(chip);
+ if (rc < 0)
+ pr_err("Error in completing capacity learning, rc=%d\n",
+ rc);
+
+ chip->cl.active = false;
+ chip->cl.init_cc_uah = 0;
+ }
+
+ if (chip->status == POWER_SUPPLY_STATUS_NOT_CHARGING) {
+ fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n",
+ batt_soc);
+ chip->cl.active = false;
+ chip->cl.init_cc_uah = 0;
+ }
+ }
+
+out:
+ mutex_unlock(&chip->cl.lock);
+}
+
static void status_change_work(struct work_struct *work)
{
struct fg_chip *chip = container_of(work,
struct fg_chip, status_change_work);
union power_supply_propval prop = {0, };
+ int prev_status, rc;
if (!is_charger_available(chip)) {
fg_dbg(chip, FG_STATUS, "Charger not available?!\n");
- return;
+ goto out;
}
- power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
+ prev_status = chip->status;
+ rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
&prop);
- chip->prev_status = chip->status;
- chip->status = prop.intval;
+ if (rc < 0) {
+ pr_err("Error in getting charging status, rc=%d\n", rc);
+ goto out;
+ }
- if (chip->cyc_ctr.en && chip->prev_status != chip->status)
- schedule_work(&chip->cycle_count_work);
+ chip->status = prop.intval;
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_DONE, &prop);
+ if (rc < 0) {
+ pr_err("Error in getting charge_done, rc=%d\n", rc);
+ goto out;
+ }
- switch (prop.intval) {
- case POWER_SUPPLY_STATUS_CHARGING:
- fg_dbg(chip, FG_POWER_SUPPLY, "Charging\n");
- break;
- case POWER_SUPPLY_STATUS_DISCHARGING:
- fg_dbg(chip, FG_POWER_SUPPLY, "Discharging\n");
- break;
- case POWER_SUPPLY_STATUS_FULL:
- fg_dbg(chip, FG_POWER_SUPPLY, "Full\n");
- break;
- default:
- break;
+ chip->charge_done = prop.intval;
+ fg_dbg(chip, FG_POWER_SUPPLY, "prev_status: %d curr_status:%d charge_done: %d\n",
+ prev_status, chip->status, chip->charge_done);
+ if (prev_status != chip->status) {
+ if (chip->cyc_ctr.en)
+ schedule_work(&chip->cycle_count_work);
+ fg_cap_learning_update(chip);
}
+
+out:
+ pm_relax(chip->dev);
}
static void restore_cycle_counter(struct fg_chip *chip)
@@ -912,64 +1306,84 @@ static int fg_get_cycle_count(struct fg_chip *chip)
return count;
}
-#define PROFILE_COMP_LEN 32
-#define SOC_READY_WAIT_MS 2000
-static void profile_load_work(struct work_struct *work)
+static void dump_sram(u8 *buf, int len)
{
- struct fg_chip *chip = container_of(work,
- struct fg_chip,
- profile_load_work.work);
- int rc;
- u8 buf[PROFILE_COMP_LEN], val;
- bool tried_again = false, profiles_same = false;
+ int i;
+ char str[16];
- if (!chip->batt_id_avail) {
- pr_err("batt_id not available\n");
- return;
+ for (i = 0; i < len; i += 4) {
+ str[0] = '\0';
+ fill_string(str, sizeof(str), buf + i, 4);
+ pr_info("%03d %s\n", PROFILE_LOAD_WORD + (i / 4), str);
}
+}
+
+static bool is_profile_load_required(struct fg_chip *chip)
+{
+ u8 buf[PROFILE_COMP_LEN], val;
+ bool profiles_same = false;
+ int rc;
rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
if (rc < 0) {
pr_err("failed to read profile integrity rc=%d\n", rc);
- return;
+ return false;
}
- vote(chip->awake_votable, PROFILE_LOAD, true, 0);
+ /* Check if integrity bit is set */
if (val == 0x01) {
fg_dbg(chip, FG_STATUS, "Battery profile integrity bit is set\n");
rc = fg_sram_read(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
buf, PROFILE_COMP_LEN, FG_IMA_DEFAULT);
if (rc < 0) {
pr_err("Error in reading battery profile, rc:%d\n", rc);
- goto out;
+ return false;
}
profiles_same = memcmp(chip->batt_profile, buf,
PROFILE_COMP_LEN) == 0;
if (profiles_same) {
- fg_dbg(chip, FG_STATUS, "Battery profile is same\n");
- goto done;
+ fg_dbg(chip, FG_STATUS, "Battery profile is same, not loading it\n");
+ return false;
}
- fg_dbg(chip, FG_STATUS, "profiles are different?\n");
- }
- clear_cycle_counter(chip);
- fg_dbg(chip, FG_STATUS, "profile loading started\n");
- rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
- if (rc < 0) {
- pr_err("Error in writing to %04x, rc=%d\n",
- BATT_SOC_RESTART(chip), rc);
- goto out;
+ if (!chip->dt.force_load_profile) {
+ pr_warn("Profiles doesn't match, skipping loading it since force_load_profile is disabled\n");
+ if (fg_sram_dump) {
+ pr_info("FG: loaded profile:\n");
+ dump_sram(buf, PROFILE_COMP_LEN);
+ pr_info("FG: available profile:\n");
+ dump_sram(chip->batt_profile, PROFILE_LEN);
+ }
+ return false;
+ }
+
+ fg_dbg(chip, FG_STATUS, "Profiles are different, loading the correct one\n");
+ } else {
+ fg_dbg(chip, FG_STATUS, "Profile integrity bit is not set\n");
+ if (fg_sram_dump) {
+ pr_info("FG: profile to be loaded:\n");
+ dump_sram(chip->batt_profile, PROFILE_LEN);
+ }
}
+ return true;
+}
- /* load battery profile */
- rc = fg_sram_write(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
- chip->batt_profile, PROFILE_LEN, FG_IMA_ATOMIC);
+#define SOC_READY_WAIT_MS 2000
+static int __fg_restart(struct fg_chip *chip)
+{
+ int rc, msoc;
+ bool tried_again = false;
+
+ rc = fg_get_prop_capacity(chip, &msoc);
if (rc < 0) {
- pr_err("Error in writing battery profile, rc:%d\n", rc);
- goto out;
+ pr_err("Error in getting capacity, rc=%d\n", rc);
+ return rc;
}
+ chip->last_soc = msoc;
+ chip->fg_restarting = true;
+ reinit_completion(&chip->soc_ready);
rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT,
RESTART_GO_BIT);
if (rc < 0) {
@@ -991,6 +1405,57 @@ wait:
goto out;
}
+ rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
+ if (rc < 0) {
+ pr_err("Error in writing to %04x, rc=%d\n",
+ BATT_SOC_RESTART(chip), rc);
+ goto out;
+ }
+out:
+ chip->fg_restarting = false;
+ return rc;
+}
+
+static void profile_load_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ profile_load_work.work);
+ u8 buf[2], val;
+ int rc;
+
+ vote(chip->awake_votable, PROFILE_LOAD, true, 0);
+ if (!is_profile_load_required(chip))
+ goto done;
+
+ clear_cycle_counter(chip);
+ mutex_lock(&chip->cl.lock);
+ chip->cl.learned_cc_uah = 0;
+ chip->cl.active = false;
+ mutex_unlock(&chip->cl.lock);
+
+ fg_dbg(chip, FG_STATUS, "profile loading started\n");
+ rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
+ if (rc < 0) {
+ pr_err("Error in writing to %04x, rc=%d\n",
+ BATT_SOC_RESTART(chip), rc);
+ goto out;
+ }
+
+ /* load battery profile */
+ rc = fg_sram_write(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
+ chip->batt_profile, PROFILE_LEN, FG_IMA_ATOMIC);
+ if (rc < 0) {
+ pr_err("Error in writing battery profile, rc:%d\n", rc);
+ goto out;
+ }
+
+ rc = __fg_restart(chip);
+ if (rc < 0) {
+ pr_err("Error in restarting FG, rc=%d\n", rc);
+ goto out;
+ }
+
fg_dbg(chip, FG_STATUS, "SOC is ready\n");
/* Set the profile integrity bit */
@@ -1002,26 +1467,67 @@ wait:
goto out;
}
- fg_dbg(chip, FG_STATUS, "profile loaded successfully");
done:
rc = fg_sram_read(chip, NOM_CAP_WORD, NOM_CAP_OFFSET, buf, 2,
FG_IMA_DEFAULT);
if (rc < 0) {
pr_err("Error in reading %04x[%d] rc=%d\n", NOM_CAP_WORD,
NOM_CAP_OFFSET, rc);
- goto out;
+ } else {
+ chip->cl.nom_cap_uah = (int)(buf[0] | buf[1] << 8) * 1000;
+ rc = fg_load_learned_cap_from_sram(chip);
+ if (rc < 0)
+ pr_err("Error in loading capacity learning data, rc:%d\n",
+ rc);
}
- chip->nom_cap_uah = (int)(buf[0] | buf[1] << 8) * 1000;
chip->profile_loaded = true;
+ fg_dbg(chip, FG_STATUS, "profile loaded successfully");
out:
vote(chip->awake_votable, PROFILE_LOAD, false, 0);
- rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
- if (rc < 0)
- pr_err("Error in writing to %04x, rc=%d\n",
- BATT_SOC_RESTART(chip), rc);
}
+static int fg_restart_sysfs(const char *val, const struct kernel_param *kp)
+{
+ int rc;
+ struct power_supply *bms_psy;
+ struct fg_chip *chip;
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_err("Unable to set fg_restart: %d\n", rc);
+ return rc;
+ }
+
+ if (fg_restart != 1) {
+ pr_err("Bad value %d\n", fg_restart);
+ return -EINVAL;
+ }
+
+ bms_psy = power_supply_get_by_name("bms");
+ if (!bms_psy) {
+ pr_err("bms psy not found\n");
+ return 0;
+ }
+
+ chip = power_supply_get_drvdata(bms_psy);
+ rc = __fg_restart(chip);
+ if (rc < 0) {
+ pr_err("Error in restarting FG, rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_info("FG restart done\n");
+ return rc;
+}
+
+static struct kernel_param_ops fg_restart_ops = {
+ .set = fg_restart_sysfs,
+ .get = param_get_int,
+};
+
+module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644);
+
/* PSY CALLBACKS STAY HERE */
static int fg_psy_get_property(struct power_supply *psy,
@@ -1033,7 +1539,10 @@ static int fg_psy_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CAPACITY:
- rc = fg_get_prop_capacity(chip, &pval->intval);
+ if (chip->fg_restarting)
+ pval->intval = chip->last_soc;
+ else
+ rc = fg_get_prop_capacity(chip, &pval->intval);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
rc = fg_get_battery_voltage(chip, &pval->intval);
@@ -1051,7 +1560,7 @@ static int fg_psy_get_property(struct power_supply *psy,
rc = fg_get_sram_prop(chip, FG_SRAM_OCV, &pval->intval);
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- pval->intval = chip->nom_cap_uah;
+ pval->intval = chip->cl.nom_cap_uah;
break;
case POWER_SUPPLY_PROP_RESISTANCE_ID:
rc = fg_get_batt_id(chip, &pval->intval);
@@ -1067,6 +1576,18 @@ static int fg_psy_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
pval->intval = chip->cyc_ctr.id;
break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
+ rc = fg_get_cc_soc(chip, &pval->intval);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ pval->intval = chip->cl.init_cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ pval->intval = chip->cl.learned_cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ rc = fg_get_cc_soc_sw(chip, &pval->intval);
+ break;
default:
break;
}
@@ -1125,8 +1646,14 @@ static int fg_notifier_cb(struct notifier_block *nb,
return NOTIFY_OK;
if ((strcmp(psy->desc->name, "battery") == 0)
- || (strcmp(psy->desc->name, "usb") == 0))
+ || (strcmp(psy->desc->name, "usb") == 0)) {
+ /*
+ * We cannot vote for awake votable here as that takes
+ * a mutex lock and this is executed in an atomic context.
+ */
+ pm_stay_awake(chip->dev);
schedule_work(&chip->status_change_work);
+ }
return NOTIFY_OK;
}
@@ -1144,6 +1671,10 @@ static enum power_supply_property fg_psy_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+ POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
};
static const struct power_supply_desc fg_psy_desc = {
@@ -1182,6 +1713,32 @@ static int fg_hw_init(struct fg_chip *chip)
return rc;
}
+ /* This SRAM register is only present in v2.0 */
+ if (chip->pmic_rev_id->rev4 == PMICOBALT_V2P0_REV4 &&
+ chip->bp.float_volt_uv > 0) {
+ fg_encode(chip->sp, FG_SRAM_FLOAT_VOLT,
+ chip->bp.float_volt_uv / 1000, buf);
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_FLOAT_VOLT].addr_word,
+ chip->sp[FG_SRAM_FLOAT_VOLT].addr_byte, buf,
+ chip->sp[FG_SRAM_FLOAT_VOLT].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing float_volt, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (chip->bp.vbatt_full_mv > 0) {
+ fg_encode(chip->sp, FG_SRAM_VBATT_FULL, chip->bp.vbatt_full_mv,
+ buf);
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_VBATT_FULL].addr_word,
+ chip->sp[FG_SRAM_VBATT_FULL].addr_byte, buf,
+ chip->sp[FG_SRAM_VBATT_FULL].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing vbatt_full, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
fg_encode(chip->sp, FG_SRAM_CHG_TERM_CURR, chip->dt.chg_term_curr_ma,
buf);
rc = fg_sram_write(chip, chip->sp[FG_SRAM_CHG_TERM_CURR].addr_word,
@@ -1303,36 +1860,32 @@ static int fg_hw_init(struct fg_chip *chip)
if (chip->cyc_ctr.en)
restore_cycle_counter(chip);
- return 0;
-}
-
-static int fg_memif_init(struct fg_chip *chip)
-{
- return fg_ima_init(chip);
-}
-
-static int fg_batt_profile_init(struct fg_chip *chip)
-{
- int rc;
-
- if (!chip->batt_profile) {
- chip->batt_profile = devm_kcalloc(chip->dev, PROFILE_LEN,
- sizeof(*chip->batt_profile),
- GFP_KERNEL);
- if (!chip->batt_profile)
- return -ENOMEM;
+ if (chip->dt.jeita_hyst_temp >= 0) {
+ val = chip->dt.jeita_hyst_temp << JEITA_TEMP_HYST_SHIFT;
+ rc = fg_masked_write(chip, BATT_INFO_BATT_TEMP_CFG(chip),
+ JEITA_TEMP_HYST_MASK, val);
+ if (rc < 0) {
+ pr_err("Error in writing batt_temp_cfg, rc=%d\n", rc);
+ return rc;
+ }
}
- rc = fg_get_batt_profile(chip);
+ get_batt_temp_delta(chip->dt.batt_temp_delta, &val);
+ rc = fg_masked_write(chip, BATT_INFO_BATT_TMPR_INTR(chip),
+ CHANGE_THOLD_MASK, val);
if (rc < 0) {
- pr_err("Error in getting battery profile, rc:%d\n", rc);
+ pr_err("Error in writing batt_temp_delta, rc=%d\n", rc);
return rc;
}
- schedule_delayed_work(&chip->profile_load_work, msecs_to_jiffies(0));
return 0;
}
+static int fg_memif_init(struct fg_chip *chip)
+{
+ return fg_ima_init(chip);
+}
+
/* INTERRUPT HANDLERS STAY HERE */
static irqreturn_t fg_vbatt_low_irq_handler(int irq, void *data)
@@ -1360,16 +1913,16 @@ static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data)
chip->battery_missing = (status & BT_MISS_BIT);
if (chip->battery_missing) {
- chip->batt_id_avail = false;
+ chip->profile_available = false;
chip->profile_loaded = false;
clear_cycle_counter(chip);
} else {
- rc = fg_batt_profile_init(chip);
+ rc = fg_get_batt_profile(chip);
if (rc < 0) {
- pr_err("Error in initializing battery profile, rc=%d\n",
- rc);
+ pr_err("Error in getting battery profile, rc:%d\n", rc);
return IRQ_HANDLED;
}
+ schedule_delayed_work(&chip->profile_load_work, 0);
}
return IRQ_HANDLED;
@@ -1378,8 +1931,33 @@ static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data)
static irqreturn_t fg_delta_batt_temp_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
+ union power_supply_propval prop = {0, };
+ int rc, batt_temp;
fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+ rc = fg_get_battery_temp(chip, &batt_temp);
+ if (rc < 0) {
+ pr_err("Error in getting batt_temp\n");
+ return IRQ_HANDLED;
+ }
+
+ if (!is_charger_available(chip)) {
+ chip->last_batt_temp = batt_temp;
+ return IRQ_HANDLED;
+ }
+
+ power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+ &prop);
+ chip->health = prop.intval;
+
+ if (chip->last_batt_temp != batt_temp) {
+ chip->last_batt_temp = batt_temp;
+ power_supply_changed(chip->batt_psy);
+ }
+
+ if (abs(chip->last_batt_temp - batt_temp) > 30)
+ pr_warn("Battery temperature last:%d current: %d\n",
+ chip->last_batt_temp, batt_temp);
return IRQ_HANDLED;
}
@@ -1412,6 +1990,10 @@ static irqreturn_t fg_delta_soc_irq_handler(int irq, void *data)
power_supply_changed(chip->batt_psy);
fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+
+ if (chip->cl.active)
+ fg_cap_learning_update(chip);
+
return IRQ_HANDLED;
}
@@ -1445,39 +2027,79 @@ static irqreturn_t fg_dummy_irq_handler(int irq, void *data)
static struct fg_irq_info fg_irqs[FG_IRQ_MAX] = {
/* BATT_SOC irqs */
[MSOC_FULL_IRQ] = {
- "msoc-full", fg_soc_irq_handler, true },
+ .name = "msoc-full",
+ .handler = fg_soc_irq_handler,
+ },
[MSOC_HIGH_IRQ] = {
- "msoc-high", fg_soc_irq_handler, true },
+ .name = "msoc-high",
+ .handler = fg_soc_irq_handler,
+ .wakeable = true,
+ },
[MSOC_EMPTY_IRQ] = {
- "msoc-empty", fg_empty_soc_irq_handler, true },
+ .name = "msoc-empty",
+ .handler = fg_empty_soc_irq_handler,
+ .wakeable = true,
+ },
[MSOC_LOW_IRQ] = {
- "msoc-low", fg_soc_irq_handler },
+ .name = "msoc-low",
+ .handler = fg_soc_irq_handler,
+ .wakeable = true,
+ },
[MSOC_DELTA_IRQ] = {
- "msoc-delta", fg_delta_soc_irq_handler, true },
+ .name = "msoc-delta",
+ .handler = fg_delta_soc_irq_handler,
+ .wakeable = true,
+ },
[BSOC_DELTA_IRQ] = {
- "bsoc-delta", fg_delta_soc_irq_handler, true },
+ .name = "bsoc-delta",
+ .handler = fg_dummy_irq_handler,
+ },
[SOC_READY_IRQ] = {
- "soc-ready", fg_first_est_irq_handler, true },
+ .name = "soc-ready",
+ .handler = fg_first_est_irq_handler,
+ .wakeable = true,
+ },
[SOC_UPDATE_IRQ] = {
- "soc-update", fg_soc_update_irq_handler },
+ .name = "soc-update",
+ .handler = fg_soc_update_irq_handler,
+ },
/* BATT_INFO irqs */
[BATT_TEMP_DELTA_IRQ] = {
- "batt-temp-delta", fg_delta_batt_temp_irq_handler },
+ .name = "batt-temp-delta",
+ .handler = fg_delta_batt_temp_irq_handler,
+ .wakeable = true,
+ },
[BATT_MISSING_IRQ] = {
- "batt-missing", fg_batt_missing_irq_handler, true },
+ .name = "batt-missing",
+ .handler = fg_batt_missing_irq_handler,
+ .wakeable = true,
+ },
[ESR_DELTA_IRQ] = {
- "esr-delta", fg_dummy_irq_handler },
+ .name = "esr-delta",
+ .handler = fg_dummy_irq_handler,
+ },
[VBATT_LOW_IRQ] = {
- "vbatt-low", fg_vbatt_low_irq_handler, true },
+ .name = "vbatt-low",
+ .handler = fg_vbatt_low_irq_handler,
+ .wakeable = true,
+ },
[VBATT_PRED_DELTA_IRQ] = {
- "vbatt-pred-delta", fg_dummy_irq_handler },
+ .name = "vbatt-pred-delta",
+ .handler = fg_dummy_irq_handler,
+ },
/* MEM_IF irqs */
[DMA_GRANT_IRQ] = {
- "dma-grant", fg_dummy_irq_handler },
+ .name = "dma-grant",
+ .handler = fg_dummy_irq_handler,
+ },
[MEM_XCP_IRQ] = {
- "mem-xcp", fg_dummy_irq_handler },
+ .name = "mem-xcp",
+ .handler = fg_dummy_irq_handler,
+ },
[IMA_RDY_IRQ] = {
- "ima-rdy", fg_dummy_irq_handler },
+ .name = "ima-rdy",
+ .handler = fg_dummy_irq_handler,
+ },
};
static int fg_get_irq_index_byname(const char *name)
@@ -1542,12 +2164,21 @@ static int fg_register_interrupts(struct fg_chip *chip)
#define DEFAULT_BATT_TEMP_COOL 5
#define DEFAULT_BATT_TEMP_WARM 45
#define DEFAULT_BATT_TEMP_HOT 50
+#define DEFAULT_CL_START_SOC 15
+#define DEFAULT_CL_MIN_TEMP_DECIDEGC 150
+#define DEFAULT_CL_MAX_TEMP_DECIDEGC 450
+#define DEFAULT_CL_MAX_INC_DECIPERC 5
+#define DEFAULT_CL_MAX_DEC_DECIPERC 100
+#define DEFAULT_CL_MIN_LIM_DECIPERC 0
+#define DEFAULT_CL_MAX_LIM_DECIPERC 0
+#define BTEMP_DELTA_LOW 2
+#define BTEMP_DELTA_HIGH 10
static int fg_parse_dt(struct fg_chip *chip)
{
struct device_node *child, *revid_node, *node = chip->dev->of_node;
u32 base, temp;
u8 subtype;
- int rc, len;
+ int rc;
if (!node) {
dev_err(chip->dev, "device tree node missing\n");
@@ -1638,6 +2269,11 @@ static int fg_parse_dt(struct fg_chip *chip)
}
}
+ rc = fg_get_batt_profile(chip);
+ if (rc < 0)
+ pr_warn("profile for batt_id=%dKOhms not found..using OTP, rc:%d\n",
+ chip->batt_id, rc);
+
/* Read all the optional properties below */
rc = of_property_read_u32(node, "qcom,fg-cutoff-voltage", &temp);
if (rc < 0)
@@ -1691,15 +2327,14 @@ static int fg_parse_dt(struct fg_chip *chip)
chip->dt.jeita_thresholds[JEITA_COOL] = DEFAULT_BATT_TEMP_COOL;
chip->dt.jeita_thresholds[JEITA_WARM] = DEFAULT_BATT_TEMP_WARM;
chip->dt.jeita_thresholds[JEITA_HOT] = DEFAULT_BATT_TEMP_HOT;
- if (of_find_property(node, "qcom,fg-jeita-thresholds", &len)) {
- if (len == NUM_JEITA_LEVELS) {
- rc = of_property_read_u32_array(node,
- "qcom,fg-jeita-thresholds",
- chip->dt.jeita_thresholds, len);
- if (rc < 0)
- pr_warn("Error reading Jeita thresholds, default values will be used rc:%d\n",
- rc);
- }
+ if (of_property_count_elems_of_size(node, "qcom,fg-jeita-thresholds",
+ sizeof(u32)) == NUM_JEITA_LEVELS) {
+ rc = of_property_read_u32_array(node,
+ "qcom,fg-jeita-thresholds",
+ chip->dt.jeita_thresholds, NUM_JEITA_LEVELS);
+ if (rc < 0)
+ pr_warn("Error reading Jeita thresholds, default values will be used rc:%d\n",
+ rc);
}
rc = of_property_read_u32(node, "qcom,fg-esr-timer-charging", &temp);
@@ -1724,6 +2359,63 @@ static int fg_parse_dt(struct fg_chip *chip)
if (chip->cyc_ctr.en)
chip->cyc_ctr.id = 1;
+ chip->dt.force_load_profile = of_property_read_bool(node,
+ "qcom,fg-force-load-profile");
+
+ rc = of_property_read_u32(node, "qcom,cl-start-capacity", &temp);
+ if (rc < 0)
+ chip->dt.cl_start_soc = DEFAULT_CL_START_SOC;
+ else
+ chip->dt.cl_start_soc = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-min-temp", &temp);
+ if (rc < 0)
+ chip->dt.cl_min_temp = DEFAULT_CL_MIN_TEMP_DECIDEGC;
+ else
+ chip->dt.cl_min_temp = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-temp", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_temp = DEFAULT_CL_MAX_TEMP_DECIDEGC;
+ else
+ chip->dt.cl_max_temp = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-increment", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_cap_inc = DEFAULT_CL_MAX_INC_DECIPERC;
+ else
+ chip->dt.cl_max_cap_inc = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-decrement", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_cap_dec = DEFAULT_CL_MAX_DEC_DECIPERC;
+ else
+ chip->dt.cl_max_cap_dec = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-min-limit", &temp);
+ if (rc < 0)
+ chip->dt.cl_min_cap_limit = DEFAULT_CL_MIN_LIM_DECIPERC;
+ else
+ chip->dt.cl_min_cap_limit = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-limit", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_cap_limit = DEFAULT_CL_MAX_LIM_DECIPERC;
+ else
+ chip->dt.cl_max_cap_limit = temp;
+
+ rc = of_property_read_u32(node, "qcom,fg-jeita-hyst-temp", &temp);
+ if (rc < 0)
+ chip->dt.jeita_hyst_temp = -EINVAL;
+ else
+ chip->dt.jeita_hyst_temp = temp;
+
+ rc = of_property_read_u32(node, "qcom,fg-batt-temp-delta", &temp);
+ if (rc < 0)
+ chip->dt.batt_temp_delta = -EINVAL;
+ else if (temp > BTEMP_DELTA_LOW && temp <= BTEMP_DELTA_HIGH)
+ chip->dt.batt_temp_delta = temp;
+
return 0;
}
@@ -1776,6 +2468,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
mutex_init(&chip->bus_lock);
mutex_init(&chip->sram_rw_lock);
mutex_init(&chip->cyc_ctr.lock);
+ mutex_init(&chip->cl.lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
@@ -1836,10 +2529,8 @@ static int fg_gen3_probe(struct platform_device *pdev)
goto exit;
}
- rc = fg_batt_profile_init(chip);
- if (rc < 0)
- dev_warn(chip->dev, "Error in initializing battery profile, rc:%d\n",
- rc);
+ if (chip->profile_available)
+ schedule_delayed_work(&chip->profile_load_work, 0);
device_init_wakeup(chip->dev, true);
pr_debug("FG GEN3 driver successfully probed\n");
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index 1b63f51088ee..ee576d300054 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -58,6 +58,13 @@ static struct smb_params v1_params = {
.max_u = 4800000,
.step_u = 25000,
},
+ .otg_cl = {
+ .name = "usb otg current limit",
+ .reg = OTG_CURRENT_LIMIT_CFG_REG,
+ .min_u = 250000,
+ .max_u = 2000000,
+ .step_u = 250000,
+ },
.dc_icl = {
.name = "dc input current limit",
.reg = DCIN_CURRENT_LIMIT_CFG_REG,
@@ -202,6 +209,7 @@ struct smb_dt_props {
bool no_battery;
int fcc_ua;
int usb_icl_ua;
+ int otg_cl_ua;
int dc_icl_ua;
int fv_uv;
int wipower_max_uw;
@@ -226,6 +234,7 @@ module_param_named(
pl_master_percent, __pl_master_percent, int, S_IRUSR | S_IWUSR
);
+#define MICRO_1P5A 1500000
static int smb2_parse_dt(struct smb2 *chip)
{
struct smb_charger *chg = &chip->chg;
@@ -278,6 +287,11 @@ static int smb2_parse_dt(struct smb2 *chip)
chip->dt.usb_icl_ua = -EINVAL;
rc = of_property_read_u32(node,
+ "qcom,otg-cl-ua", &chip->dt.otg_cl_ua);
+ if (rc < 0)
+ chip->dt.otg_cl_ua = MICRO_1P5A;
+
+ rc = of_property_read_u32(node,
"qcom,dc-icl-ua", &chip->dt.dc_icl_ua);
if (rc < 0)
chip->dt.dc_icl_ua = -EINVAL;
@@ -981,6 +995,8 @@ static int smb2_init_hw(struct smb2 *chip)
smblib_get_charge_param(chg, &chg->param.dc_icl,
&chip->dt.dc_icl_ua);
+ chg->otg_cl_ua = chip->dt.otg_cl_ua;
+
/* votes must be cast before configuring software control */
vote(chg->pl_disable_votable,
PL_INDIRECT_VOTER, true, 0);
diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c
index e9c189ae17e7..0067ec5c2ca2 100644
--- a/drivers/power/qcom-charger/smb-lib.c
+++ b/drivers/power/qcom-charger/smb-lib.c
@@ -12,6 +12,7 @@
#include <linux/device.h>
#include <linux/regmap.h>
+#include <linux/delay.h>
#include <linux/iio/consumer.h>
#include <linux/power_supply.h>
#include <linux/regulator/driver.h>
@@ -642,6 +643,31 @@ suspend:
return rc;
}
+#define MICRO_250MA 250000
+static int smblib_otg_cl_config(struct smb_charger *chg, int otg_cl_ua)
+{
+ int rc = 0;
+
+ rc = smblib_set_charge_param(chg, &chg->param.otg_cl, otg_cl_ua);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't set otg current limit rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* configure PFM/PWM mode for OTG regulator */
+ rc = smblib_masked_write(chg, DC_ENG_SSUPPLY_CFG3_REG,
+ ENG_SSUPPLY_CFG_SKIP_TH_V0P2_BIT,
+ otg_cl_ua > MICRO_250MA ? 1 : 0);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't write DC_ENG_SSUPPLY_CFG3_REG rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
static int smblib_dc_icl_vote_callback(struct votable *votable, void *data,
int icl_ua, const char *client)
{
@@ -746,14 +772,36 @@ static int smblib_pl_enable_indirect_vote_callback(struct votable *votable,
* OTG REGULATOR *
*****************/
+#define OTG_SOFT_START_DELAY_MS 20
int smblib_vbus_regulator_enable(struct regulator_dev *rdev)
{
struct smb_charger *chg = rdev_get_drvdata(rdev);
+ u8 stat;
int rc = 0;
- rc = regmap_write(chg->regmap, CMD_OTG_REG, OTG_EN_BIT);
- if (rc < 0)
+ rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+ if (rc < 0) {
dev_err(chg->dev, "Couldn't enable OTG regulator rc=%d\n", rc);
+ return rc;
+ }
+
+ msleep(OTG_SOFT_START_DELAY_MS);
+ rc = smblib_read(chg, OTG_STATUS_REG, &stat);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read OTG_STATUS_REG rc=%d\n", rc);
+ return rc;
+ }
+ if (stat & BOOST_SOFTSTART_DONE_BIT)
+ smblib_otg_cl_config(chg, chg->otg_cl_ua);
return rc;
}
@@ -763,9 +811,22 @@ int smblib_vbus_regulator_disable(struct regulator_dev *rdev)
struct smb_charger *chg = rdev_get_drvdata(rdev);
int rc = 0;
- rc = regmap_write(chg->regmap, CMD_OTG_REG, 0);
- if (rc < 0)
+ rc = smblib_write(chg, CMD_OTG_REG, 0);
+ if (rc < 0) {
dev_err(chg->dev, "Couldn't disable OTG regulator rc=%d\n", rc);
+ return rc;
+ }
+
+ smblib_otg_cl_config(chg, MICRO_250MA);
+
+ rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT, 0);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
return rc;
}
diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h
index c9732c25dfcd..5b4c2016adc8 100644
--- a/drivers/power/qcom-charger/smb-lib.h
+++ b/drivers/power/qcom-charger/smb-lib.h
@@ -77,6 +77,7 @@ struct smb_params {
struct smb_chg_param fv;
struct smb_chg_param usb_icl;
struct smb_chg_param icl_stat;
+ struct smb_chg_param otg_cl;
struct smb_chg_param dc_icl;
struct smb_chg_param dc_icl_pt_lv;
struct smb_chg_param dc_icl_pt_hv;
@@ -167,6 +168,8 @@ struct smb_charger {
int thermal_levels;
int *thermal_mitigation;
+ int otg_cl_ua;
+
int fake_capacity;
bool step_chg_enabled;
diff --git a/drivers/power/qcom-charger/smb-reg.h b/drivers/power/qcom-charger/smb-reg.h
index c4ad72e254f9..8a49a8fb38ba 100644
--- a/drivers/power/qcom-charger/smb-reg.h
+++ b/drivers/power/qcom-charger/smb-reg.h
@@ -366,6 +366,9 @@ enum {
#define OTG_EN_SRC_CFG_BIT BIT(1)
#define CONCURRENT_MODE_CFG_BIT BIT(0)
+#define OTG_ENG_OTG_CFG_REG (OTG_BASE + 0xC0)
+#define ENG_BUCKBOOST_HALT1_8_MODE_BIT BIT(0)
+
/* BATIF Peripheral Registers */
/* BATIF Interrupt Bits */
#define BAT_7_RT_STS_BIT BIT(7)
@@ -766,6 +769,13 @@ enum {
ZIN_ICL_HV_MAX_MV = 11000,
};
+#define DC_ENG_SSUPPLY_CFG3_REG (DCIN_BASE + 0xC2)
+#define ENG_SSUPPLY_HI_CAP_BIT BIT(6)
+#define ENG_SSUPPLY_HI_RES_BIT BIT(5)
+#define ENG_SSUPPLY_CFG_SKIP_TH_V0P2_BIT BIT(3)
+#define ENG_SSUPPLY_CFG_SYSOV_TH_4P8_BIT BIT(2)
+#define ENG_SSUPPLY_5V_OV_OPT_BIT BIT(0)
+
/* MISC Peripheral Registers */
#define REVISION1_REG (MISC_BASE + 0x00)
#define DIG_MINOR_MASK GENMASK(7, 0)
diff --git a/drivers/pwm/pwm-qpnp.c b/drivers/pwm/pwm-qpnp.c
index ac71f2c75472..6d0c1fbe566b 100644
--- a/drivers/pwm/pwm-qpnp.c
+++ b/drivers/pwm/pwm-qpnp.c
@@ -1879,7 +1879,7 @@ static int qpnp_parse_dt_config(struct platform_device *pdev,
int rc, enable, lut_entry_size, list_size, i;
const char *lable;
const __be32 *prop;
- u64 size;
+ u32 size;
struct device_node *node;
int found_pwm_subnode = 0;
int found_lpg_subnode = 0;
@@ -1968,11 +1968,18 @@ static int qpnp_parse_dt_config(struct platform_device *pdev,
return rc;
prop = of_get_address_by_name(pdev->dev.of_node, QPNP_LPG_LUT_BASE,
- &size, 0);
+ 0, 0);
if (!prop) {
chip->flags |= QPNP_PWM_LUT_NOT_SUPPORTED;
} else {
lpg_config->lut_base_addr = be32_to_cpu(*prop);
+ rc = of_property_read_u32(of_node, "qcom,lpg-lut-size", &size);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Error reading qcom,lpg-lut-size, rc=%d\n",
+ rc);
+ return rc;
+ }
+
/*
* Each entry of LUT is of 2 bytes for generic LUT and of 1 byte
* for KPDBL/GLED LUT.
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 25b522806c3e..2aa588ba610b 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -3469,7 +3469,6 @@ static void icnss_bw_deinit(struct icnss_priv *priv)
static int icnss_smmu_init(struct icnss_priv *priv)
{
struct dma_iommu_mapping *mapping;
- int disable_htw = 1;
int atomic_ctx = 1;
int s1_bypass = 1;
int ret = 0;
@@ -3486,15 +3485,6 @@ static int icnss_smmu_init(struct icnss_priv *priv)
}
ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret < 0) {
- icnss_pr_err("Set disable_htw attribute failed, err = %d\n",
- ret);
- goto set_attr_fail;
- }
-
- ret = iommu_domain_set_attr(mapping->domain,
DOMAIN_ATTR_ATOMIC,
&atomic_ctx);
if (ret < 0) {
diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
index 9b44fb03cf94..83e3775ed533 100644
--- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
+++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
@@ -741,7 +741,6 @@ static int msm_audio_smmu_init(struct device *dev)
{
struct dma_iommu_mapping *mapping;
int ret;
- int disable_htw = 1;
mapping = arm_iommu_create_mapping(
msm_iommu_get_bus(dev),
@@ -750,10 +749,6 @@ static int msm_audio_smmu_init(struct device *dev)
if (IS_ERR(mapping))
return PTR_ERR(mapping);
- iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
-
ret = arm_iommu_attach_device(dev, mapping);
if (ret) {
dev_err(dev, "%s: Attach failed, err = %d\n",
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 7ad798ace1e5..4e35ed9654b7 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -333,6 +333,77 @@ static struct usb_descriptor_header *ncm_hs_function[] = {
NULL,
};
+/* Super Speed Support */
+static struct usb_endpoint_descriptor ncm_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = USB_MS_TO_HS_INTERVAL(NCM_STATUS_INTERVAL_MS),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_notify_comp_desc = {
+ .bLength = sizeof(ncm_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(NCM_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ncm_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_in_comp_desc = {
+ .bLength = sizeof(ncm_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ncm_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_out_comp_desc = {
+ .bLength = sizeof(ncm_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ncm_ss_function[] = {
+ (struct usb_descriptor_header *) &ncm_iad_desc,
+ /* CDC NCM control descriptors */
+ (struct usb_descriptor_header *) &ncm_control_intf,
+ (struct usb_descriptor_header *) &ncm_header_desc,
+ (struct usb_descriptor_header *) &ncm_union_desc,
+ (struct usb_descriptor_header *) &ecm_desc,
+ (struct usb_descriptor_header *) &ncm_desc,
+ (struct usb_descriptor_header *) &ncm_ss_notify_desc,
+ (struct usb_descriptor_header *) &ncm_ss_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ncm_data_nop_intf,
+ (struct usb_descriptor_header *) &ncm_data_intf,
+ (struct usb_descriptor_header *) &ncm_ss_in_desc,
+ (struct usb_descriptor_header *) &ncm_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &ncm_ss_out_desc,
+ (struct usb_descriptor_header *) &ncm_ss_out_comp_desc,
+ NULL,
+};
+
/* string descriptors: */
#define STRING_CTRL_IDX 0
@@ -1431,8 +1502,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
hs_ncm_notify_desc.bEndpointAddress =
fs_ncm_notify_desc.bEndpointAddress;
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ncm_ss_in_desc.bEndpointAddress =
+ fs_ncm_in_desc.bEndpointAddress;
+ ncm_ss_out_desc.bEndpointAddress =
+ fs_ncm_out_desc.bEndpointAddress;
+ ncm_ss_notify_desc.bEndpointAddress =
+ fs_ncm_notify_desc.bEndpointAddress;
+ }
+
status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
- NULL);
+ ncm_ss_function);
if (status)
goto fail;
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index a5368cdf2254..55918d47a21a 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -193,6 +193,7 @@ enum mdss_qos_settings {
MDSS_QOS_REMAPPER,
MDSS_QOS_IB_NOCR,
MDSS_QOS_WB2_WRITE_GATHER_EN,
+ MDSS_QOS_WB_QOS,
MDSS_QOS_MAX,
};
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index 57e18a7dc5e1..72b262e8171a 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -45,6 +45,18 @@
#define VDDA_UA_ON_LOAD 100000 /* uA units */
#define VDDA_UA_OFF_LOAD 100 /* uA units */
+#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3
+static u32 supported_modes[] = {
+ HDMI_VFRMT_640x480p60_4_3,
+ HDMI_VFRMT_720x480p60_4_3, HDMI_VFRMT_720x480p60_16_9,
+ HDMI_VFRMT_1280x720p60_16_9,
+ HDMI_VFRMT_1920x1080p60_16_9,
+ HDMI_VFRMT_3840x2160p24_16_9, HDMI_VFRMT_3840x2160p30_16_9,
+ HDMI_VFRMT_3840x2160p60_16_9,
+ HDMI_VFRMT_4096x2160p24_256_135, HDMI_VFRMT_4096x2160p30_256_135,
+ HDMI_VFRMT_4096x2160p60_256_135, HDMI_EVFRMT_4096x2160p24_16_9
+};
+
static void mdss_dp_put_dt_clk_data(struct device *dev,
struct dss_module_power *module_power)
{
@@ -789,17 +801,34 @@ void mdss_dp_config_ctrl(struct mdss_dp_drv_pdata *dp)
cap = &dp->dpcd;
- data = dp->lane_cnt - 1;
- data <<= 4;
+ data |= (2 << 13); /* Default-> LSCLK DIV: 1/4 LCLK */
+
+ /* Color Format */
+ switch (dp->panel_data.panel_info.out_format) {
+ case MDP_Y_CBCR_H2V2:
+ data |= (1 << 11); /* YUV420 */
+ break;
+ case MDP_Y_CBCR_H2V1:
+ data |= (2 << 11); /* YUV422 */
+ break;
+ default:
+ data |= (0 << 11); /* RGB */
+ break;
+ }
+
+ /* Scrambler reset enable */
+ if (cap->scrambler_reset)
+ data |= (1 << 10);
+
+ if (dp->edid.color_depth != 6)
+ data |= 0x100; /* Default: 8 bits */
+
+ /* Num of Lanes */
+ data |= ((dp->lane_cnt - 1) << 4);
if (cap->enhanced_frame)
data |= 0x40;
- if (dp->edid.color_depth == 8) {
- /* 0 == 6 bits, 1 == 8 bits */
- data |= 0x100; /* bit 8 */
- }
-
if (!timing->interlaced) /* progressive */
data |= 0x04;
@@ -863,6 +892,8 @@ static int dp_audio_info_setup(struct platform_device *pdev,
mdss_dp_set_safe_to_exit_level(&dp_ctrl->ctrl_io, dp_ctrl->lane_cnt);
mdss_dp_audio_enable(&dp_ctrl->ctrl_io, true);
+ dp_ctrl->wait_for_audio_comp = true;
+
return rc;
} /* dp_audio_info_setup */
@@ -885,6 +916,17 @@ static int dp_get_audio_edid_blk(struct platform_device *pdev,
return rc;
} /* dp_get_audio_edid_blk */
+static void dp_audio_codec_teardown_done(struct platform_device *pdev)
+{
+ struct mdss_dp_drv_pdata *dp = platform_get_drvdata(pdev);
+
+ if (!dp)
+ pr_err("invalid input\n");
+
+ pr_debug("audio codec teardown done\n");
+ complete_all(&dp->audio_comp);
+}
+
static int mdss_dp_init_ext_disp(struct mdss_dp_drv_pdata *dp)
{
int ret = 0;
@@ -906,6 +948,8 @@ static int mdss_dp_init_ext_disp(struct mdss_dp_drv_pdata *dp)
dp_get_audio_edid_blk;
dp->ext_audio_data.codec_ops.cable_status =
dp_get_cable_status;
+ dp->ext_audio_data.codec_ops.teardown_done =
+ dp_audio_codec_teardown_done;
if (!dp->pdev->dev.of_node) {
pr_err("%s cannot find dp dev.of_node\n", __func__);
@@ -936,8 +980,6 @@ end:
return ret;
}
-#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3
-
static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic)
{
struct mdss_panel_info *pinfo;
@@ -949,7 +991,6 @@ static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic)
return -EINVAL;
}
- dp_drv->ds_data.ds_registered = false;
ret = hdmi_get_supported_mode(&timing, &dp_drv->ds_data, vic);
pinfo = &dp_drv->panel_data.panel_info;
@@ -987,6 +1028,13 @@ static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic)
return 0;
} /* dp_init_panel_info */
+static inline void mdss_dp_set_audio_switch_node(
+ struct mdss_dp_drv_pdata *dp, int val)
+{
+ if (dp && dp->ext_audio_data.intf_ops.notify)
+ dp->ext_audio_data.intf_ops.notify(dp->ext_pdev,
+ val);
+}
int mdss_dp_on(struct mdss_panel_data *pdata)
{
@@ -1054,6 +1102,9 @@ int mdss_dp_on(struct mdss_panel_data *pdata)
goto exit;
}
+ mdss_dp_phy_share_lane_config(&dp_drv->phy_io,
+ orientation, dp_drv->dpcd.max_lane_count);
+
pr_debug("link_rate = 0x%x\n", dp_drv->link_rate);
dp_drv->power_data[DP_CTRL_PM].clk_config[0].rate =
@@ -1096,6 +1147,7 @@ int mdss_dp_on(struct mdss_panel_data *pdata)
pr_debug("mainlink ready\n");
dp_drv->power_on = true;
+ mdss_dp_set_audio_switch_node(dp_drv, true);
pr_debug("End-\n");
exit:
@@ -1119,14 +1171,15 @@ int mdss_dp_off(struct mdss_panel_data *pdata)
mutex_lock(&dp_drv->train_mutex);
reinit_completion(&dp_drv->idle_comp);
-
- mdss_dp_state_ctrl(&dp_drv->ctrl_io, 0);
+ mdss_dp_state_ctrl(&dp_drv->ctrl_io, ST_PUSH_IDLE);
if (dp_drv->link_clks_on)
mdss_dp_mainlink_ctrl(&dp_drv->ctrl_io, false);
mdss_dp_aux_ctrl(&dp_drv->ctrl_io, false);
+ mdss_dp_audio_enable(&dp_drv->ctrl_io, false);
+
mdss_dp_irq_disable(dp_drv);
mdss_dp_config_gpios(dp_drv, false);
@@ -1147,14 +1200,6 @@ int mdss_dp_off(struct mdss_panel_data *pdata)
return 0;
}
-static inline void mdss_dp_set_audio_switch_node(
- struct mdss_dp_drv_pdata *dp, int val)
-{
- if (dp && dp->ext_audio_data.intf_ops.notify)
- dp->ext_audio_data.intf_ops.notify(dp->ext_pdev,
- val);
-}
-
static void mdss_dp_send_cable_notification(
struct mdss_dp_drv_pdata *dp, int val)
{
@@ -1169,6 +1214,38 @@ static void mdss_dp_send_cable_notification(
dp->ext_audio_data.type, val);
}
+static void mdss_dp_audio_codec_wait(struct mdss_dp_drv_pdata *dp)
+{
+ const int audio_completion_timeout_ms = HZ * 3;
+ int ret = 0;
+
+ if (!dp->wait_for_audio_comp)
+ return;
+
+ reinit_completion(&dp->audio_comp);
+ ret = wait_for_completion_timeout(&dp->audio_comp,
+ audio_completion_timeout_ms);
+ if (ret <= 0)
+ pr_warn("audio codec teardown timed out\n");
+
+ dp->wait_for_audio_comp = false;
+}
+
+static void mdss_dp_notify_clients(struct mdss_dp_drv_pdata *dp, bool enable)
+{
+ if (enable) {
+ mdss_dp_send_cable_notification(dp, enable);
+ } else {
+ mdss_dp_set_audio_switch_node(dp, enable);
+ mdss_dp_audio_codec_wait(dp);
+ mdss_dp_send_cable_notification(dp, enable);
+ }
+
+ pr_debug("notify state %s done\n",
+ enable ? "ENABLE" : "DISABLE");
+}
+
+
static int mdss_dp_edid_init(struct mdss_panel_data *pdata)
{
struct mdss_dp_drv_pdata *dp_drv = NULL;
@@ -1183,6 +1260,10 @@ static int mdss_dp_edid_init(struct mdss_panel_data *pdata)
dp_drv = container_of(pdata, struct mdss_dp_drv_pdata,
panel_data);
+ dp_drv->ds_data.ds_registered = true;
+ dp_drv->ds_data.modes_num = ARRAY_SIZE(supported_modes);
+ dp_drv->ds_data.modes = supported_modes;
+
dp_drv->max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
edid_init_data.kobj = dp_drv->kobj;
edid_init_data.ds_data = dp_drv->ds_data;
@@ -1236,15 +1317,19 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata)
mdss_dp_aux_init(dp_drv);
+ mdss_dp_phy_initialize(dp_drv);
+ mdss_dp_ctrl_reset(&dp_drv->ctrl_io);
mdss_dp_phy_reset(&dp_drv->ctrl_io);
mdss_dp_aux_reset(&dp_drv->ctrl_io);
- mdss_dp_phy_initialize(dp_drv);
mdss_dp_aux_ctrl(&dp_drv->ctrl_io, true);
pr_debug("Ctrl_hw_rev =0x%x, phy hw_rev =0x%x\n",
mdss_dp_get_ctrl_hw_version(&dp_drv->ctrl_io),
mdss_dp_get_phy_hw_version(&dp_drv->phy_io));
+ pr_debug("plug Orientation = %d\n",
+ usbpd_get_plug_orientation(dp_drv->pd));
+
mdss_dp_phy_aux_setup(&dp_drv->phy_io);
mdss_dp_irq_enable(dp_drv);
@@ -1264,8 +1349,7 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata)
goto edid_error;
}
- mdss_dp_send_cable_notification(dp_drv, true);
- mdss_dp_set_audio_switch_node(dp_drv, true);
+ mdss_dp_notify_clients(dp_drv, true);
dp_drv->dp_initialized = true;
return ret;
@@ -1771,8 +1855,7 @@ static void dp_send_events(struct mdss_dp_drv_pdata *dp, u32 events)
{
spin_lock(&dp->event_lock);
dp->current_event = events;
- queue_delayed_work(dp->workq,
- &dp->dwork, HZ);
+ queue_delayed_work(dp->workq, &dp->dwork, HZ / 100);
spin_unlock(&dp->event_lock);
}
@@ -1883,8 +1966,7 @@ static void usbpd_disconnect_callback(struct usbpd_svid_handler *hdlr)
mutex_lock(&dp_drv->pd_msg_mutex);
dp_drv->cable_connected = false;
mutex_unlock(&dp_drv->pd_msg_mutex);
- mdss_dp_send_cable_notification(dp_drv, false);
- mdss_dp_set_audio_switch_node(dp_drv, false);
+ mdss_dp_notify_clients(dp_drv, false);
}
static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
@@ -2135,6 +2217,8 @@ static int mdss_dp_probe(struct platform_device *pdev)
mdss_dp_device_register(dp_drv);
dp_drv->inited = true;
+ dp_drv->wait_for_audio_comp = false;
+ init_completion(&dp_drv->audio_comp);
pr_debug("done\n");
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index 4710cf7a98e2..ddadb7b6709c 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -399,6 +399,7 @@ struct mdss_dp_drv_pdata {
struct completion train_comp;
struct completion idle_comp;
struct completion video_comp;
+ struct completion audio_comp;
struct mutex aux_mutex;
struct mutex train_mutex;
struct mutex pd_msg_mutex;
@@ -423,6 +424,7 @@ struct mdss_dp_drv_pdata {
char delay_start;
u32 bpp;
struct dp_statistic dp_stat;
+ bool wait_for_audio_comp;
/* event */
struct workqueue_struct *workq;
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index d9297a7af764..27e982437961 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -510,11 +510,20 @@ char mdss_dp_gen_link_clk(struct mdss_panel_info *pinfo, char lane_cnt)
pr_debug("clk_rate=%llu, bpp= %d, lane_cnt=%d\n",
pinfo->clk_rate, pinfo->bpp, lane_cnt);
- min_link_rate = (u32)div_u64((pinfo->clk_rate * 10),
- (lane_cnt * encoding_factx10));
- min_link_rate = (min_link_rate * pinfo->bpp)
- / (DP_LINK_RATE_MULTIPLIER);
+
+ /*
+ * The max pixel clock supported is 675Mhz. The
+ * current calculations below will make sure
+ * the min_link_rate is within 32 bit limits.
+ * Any changes in the section of code should
+ * consider this limitation.
+ */
+ min_link_rate = pinfo->clk_rate
+ / (lane_cnt * encoding_factx10);
min_link_rate /= ln_to_link_ratio;
+ min_link_rate = (min_link_rate * pinfo->bpp);
+ min_link_rate = (u32)div_u64(min_link_rate * 10,
+ DP_LINK_RATE_MULTIPLIER);
pr_debug("min_link_rate = %d\n", min_link_rate);
@@ -1113,17 +1122,17 @@ static void dp_host_train_set(struct mdss_dp_drv_pdata *ep, int train)
}
char vm_pre_emphasis[4][4] = {
- {0x00, 0x06, 0x09, 0x0C}, /* pe0, 0 db */
- {0x00, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */
- {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */
- {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
+ {0x00, 0x09, 0x11, 0x0C}, /* pe0, 0 db */
+ {0x00, 0x0A, 0x10, 0xFF}, /* pe1, 3.5 db */
+ {0x00, 0x0C, 0xFF, 0xFF}, /* pe2, 6.0 db */
+ {0x00, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
};
/* voltage swing, 0.2v and 1.0v are not support */
char vm_voltage_swing[4][4] = {
- {0x0a, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */
- {0x07, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */
- {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */
+ {0x07, 0x0f, 0x12, 0x1E}, /* sw0, 0.4v */
+ {0x11, 0x1D, 0x1F, 0xFF}, /* sw1, 0.6 v */
+ {0x18, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8 v */
{0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */
};
@@ -1375,7 +1384,8 @@ train_start:
clear:
dp_clear_training_pattern(dp);
if (ret != -1) {
- mdss_dp_setup_tr_unit(&dp->ctrl_io);
+ mdss_dp_setup_tr_unit(&dp->ctrl_io, dp->link_rate,
+ dp->lane_cnt, dp->vic);
mdss_dp_state_ctrl(&dp->ctrl_io, ST_SEND_VIDEO);
pr_debug("State_ctrl set to SEND_VIDEO\n");
}
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.c b/drivers/video/fbdev/msm/mdss_dp_util.c
index bdf5d92f7053..f1245a024a88 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.c
+++ b/drivers/video/fbdev/msm/mdss_dp_util.c
@@ -143,6 +143,18 @@ void mdss_dp_aux_reset(struct dss_io_data *ctrl_io)
writel_relaxed(aux_ctrl, ctrl_io->base + DP_AUX_CTRL);
}
+/* reset DP controller */
+void mdss_dp_ctrl_reset(struct dss_io_data *ctrl_io)
+{
+ u32 sw_reset = readl_relaxed(ctrl_io->base + DP_SW_RESET);
+
+ sw_reset |= BIT(0);
+ writel_relaxed(sw_reset, ctrl_io->base + DP_SW_RESET);
+ udelay(1000);
+ sw_reset &= ~BIT(0);
+ writel_relaxed(sw_reset, ctrl_io->base + DP_SW_RESET);
+}
+
/* reset DP Mainlink */
void mdss_dp_mainlink_reset(struct dss_io_data *ctrl_io)
{
@@ -284,13 +296,47 @@ void mdss_dp_sw_mvid_nvid(struct dss_io_data *ctrl_io)
writel_relaxed(0x3c, ctrl_io->base + DP_SOFTWARE_NVID);
}
-void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io)
+void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate,
+ u8 ln_cnt, u32 res)
{
- /* Current Tr unit configuration supports only 1080p */
+ u32 dp_tu = 0x0;
+ u32 valid_boundary = 0x0;
+ u32 valid_boundary2 = 0x0;
+ struct dp_vc_tu_mapping_table const *tu_entry = tu_table;
+
writel_relaxed(0x21, ctrl_io->base + DP_MISC1_MISC0);
- writel_relaxed(0x0f0016, ctrl_io->base + DP_VALID_BOUNDARY);
- writel_relaxed(0x1f, ctrl_io->base + DP_TU);
- writel_relaxed(0x0, ctrl_io->base + DP_VALID_BOUNDARY_2);
+
+ for (; tu_entry != tu_table + ARRAY_SIZE(tu_table); ++tu_entry) {
+ if ((tu_entry->vic == res) &&
+ (tu_entry->lanes == ln_cnt) &&
+ (tu_entry->lrate == link_rate))
+ break;
+ }
+
+ if (tu_entry == tu_table + ARRAY_SIZE(tu_table)) {
+ pr_err("requested ln_cnt=%d, lrate=0x%x not supported\n",
+ ln_cnt, link_rate);
+ return;
+ }
+
+ dp_tu |= tu_entry->tu_size_minus1;
+ valid_boundary |= tu_entry->valid_boundary_link;
+ valid_boundary |= (tu_entry->delay_start_link << 16);
+
+ valid_boundary2 |= (tu_entry->valid_lower_boundary_link << 1);
+ valid_boundary2 |= (tu_entry->upper_boundary_count << 16);
+ valid_boundary2 |= (tu_entry->lower_boundary_count << 20);
+
+ if (tu_entry->boundary_moderation_en)
+ valid_boundary2 |= BIT(0);
+
+ writel_relaxed(valid_boundary, ctrl_io->base + DP_VALID_BOUNDARY);
+ writel_relaxed(dp_tu, ctrl_io->base + DP_TU);
+ writel_relaxed(valid_boundary2, ctrl_io->base + DP_VALID_BOUNDARY_2);
+
+ pr_debug("valid_boundary=0x%x, valid_boundary2=0x%x\n",
+ valid_boundary, valid_boundary2);
+ pr_debug("dp_tu=0x%x\n", dp_tu);
}
void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io,
@@ -441,6 +487,17 @@ u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp)
return config;
}
+void mdss_dp_phy_share_lane_config(struct dss_io_data *phy_io,
+ u8 orientation, u8 ln_cnt)
+{
+ u32 info = 0x0;
+
+ info |= (ln_cnt & 0x0F);
+ info |= ((orientation & 0x0F) << 4);
+ pr_debug("Shared Info = 0x%x\n", info);
+ writel_relaxed(info, phy_io->base + DP_PHY_SPARE0);
+}
+
void mdss_dp_config_audio_acr_ctrl(struct dss_io_data *ctrl_io, char link_rate)
{
u32 acr_ctrl = 0;
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.h b/drivers/video/fbdev/msm/mdss_dp_util.h
index 5eb9d092476f..cf2286f9b58a 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.h
+++ b/drivers/video/fbdev/msm/mdss_dp_util.h
@@ -150,6 +150,8 @@
#define DP_PHY_AUX_INTERRUPT_MASK (0x00000044)
#define DP_PHY_AUX_INTERRUPT_CLEAR (0x00000048)
+#define DP_PHY_SPARE0 0x00A8
+
#define QSERDES_TX0_OFFSET 0x0400
#define QSERDES_TX1_OFFSET 0x0800
@@ -200,17 +202,72 @@ struct edp_cmd {
char next; /* next command */
};
+struct dp_vc_tu_mapping_table {
+ u32 vic;
+ u8 lanes;
+ u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20) */
+ u8 bpp;
+ u8 valid_boundary_link;
+ u16 delay_start_link;
+ bool boundary_moderation_en;
+ u8 valid_lower_boundary_link;
+ u8 upper_boundary_count;
+ u8 lower_boundary_count;
+ u8 tu_size_minus1;
+};
+
+static const struct dp_vc_tu_mapping_table tu_table[] = {
+ {HDMI_VFRMT_640x480p60_4_3, 4, 06, 24,
+ 0x07, 0x0056, false, 0x00, 0x00, 0x00, 0x3b},
+ {HDMI_VFRMT_640x480p60_4_3, 2, 06, 24,
+ 0x0e, 0x004f, false, 0x00, 0x00, 0x00, 0x3b},
+ {HDMI_VFRMT_640x480p60_4_3, 1, 06, 24,
+ 0x15, 0x0039, false, 0x00, 0x00, 0x00, 0x2c},
+ {HDMI_VFRMT_720x480p60_4_3, 1, 06, 24,
+ 0x13, 0x0038, true, 0x12, 0x0c, 0x0b, 0x24},
+ {HDMI_VFRMT_720x480p60_16_9, 1, 06, 24,
+ 0x13, 0x0038, true, 0x12, 0x0c, 0x0b, 0x24},
+ {HDMI_VFRMT_1280x720p60_16_9, 4, 06, 24,
+ 0x0c, 0x0020, false, 0x00, 0x00, 0x00, 0x1f},
+ {HDMI_VFRMT_1280x720p60_16_9, 2, 06, 24,
+ 0x16, 0x0015, false, 0x00, 0x00, 0x00, 0x1f},
+ {HDMI_VFRMT_1280x720p60_16_9, 1, 10, 24,
+ 0x21, 0x001a, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_1920x1080p60_16_9, 4, 06, 24,
+ 0x16, 0x000f, false, 0x00, 0x00, 0x00, 0x1f},
+ {HDMI_VFRMT_1920x1080p60_16_9, 2, 10, 24,
+ 0x21, 0x0011, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_1920x1080p60_16_9, 1, 20, 24,
+ 0x21, 0x001a, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_3840x2160p24_16_9, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_3840x2160p30_16_9, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_3840x2160p60_16_9, 4, 20, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_4096x2160p24_256_135, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_4096x2160p30_256_135, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_4096x2160p60_256_135, 4, 20, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_EVFRMT_4096x2160p24_16_9, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+};
+
int dp_aux_read(void *ep, struct edp_cmd *cmds);
int dp_aux_write(void *ep, struct edp_cmd *cmd);
void mdss_dp_state_ctrl(struct dss_io_data *ctrl_io, u32 data);
u32 mdss_dp_get_ctrl_hw_version(struct dss_io_data *ctrl_io);
u32 mdss_dp_get_phy_hw_version(struct dss_io_data *phy_io);
+void mdss_dp_ctrl_reset(struct dss_io_data *ctrl_io);
void mdss_dp_aux_reset(struct dss_io_data *ctrl_io);
void mdss_dp_mainlink_reset(struct dss_io_data *ctrl_io);
void mdss_dp_phy_reset(struct dss_io_data *ctrl_io);
void mdss_dp_switch_usb3_phy_to_dp_mode(struct dss_io_data *tcsr_reg_io);
void mdss_dp_assert_phy_reset(struct dss_io_data *ctrl_io, bool assert);
-void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io);
+void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate,
+ u8 ln_cnt, u32 res);
void mdss_dp_phy_aux_setup(struct dss_io_data *phy_io);
void mdss_dp_hpd_configure(struct dss_io_data *ctrl_io, bool enable);
void mdss_dp_aux_ctrl(struct dss_io_data *ctrl_io, bool enable);
@@ -231,6 +288,8 @@ void mdss_dp_usbpd_ext_dp_status(struct usbpd_dp_status *dp_status);
u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp);
void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io,
struct lane_mapping l_map);
+void mdss_dp_phy_share_lane_config(struct dss_io_data *phy_io,
+ u8 orientation, u8 ln_cnt);
void mdss_dp_config_audio_acr_ctrl(struct dss_io_data *ctrl_io,
char link_rate);
void mdss_dp_audio_setup_sdps(struct dss_io_data *ctrl_io);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index 8ffba091e2b2..01fc01425a3a 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -856,6 +856,48 @@ static int mdss_dsi_panel_low_power_config(struct mdss_panel_data *pdata,
return 0;
}
+static void mdss_dsi_parse_mdp_kickoff_threshold(struct device_node *np,
+ struct mdss_panel_info *pinfo)
+{
+ int len, rc;
+ const u32 *src;
+ u32 tmp;
+ u32 max_delay_us;
+
+ pinfo->mdp_koff_thshold = false;
+ src = of_get_property(np, "qcom,mdss-mdp-kickoff-threshold", &len);
+ if (!src || (len == 0))
+ return;
+
+ rc = of_property_read_u32(np, "qcom,mdss-mdp-kickoff-delay", &tmp);
+ if (!rc)
+ pinfo->mdp_koff_delay = tmp;
+ else
+ return;
+
+ if (pinfo->mipi.frame_rate == 0) {
+ pr_err("cannot enable guard window, unexpected panel fps\n");
+ return;
+ }
+
+ pinfo->mdp_koff_thshold_low = be32_to_cpu(src[0]);
+ pinfo->mdp_koff_thshold_high = be32_to_cpu(src[1]);
+ max_delay_us = 1000000 / pinfo->mipi.frame_rate;
+
+ /* enable the feature if threshold is valid */
+ if ((pinfo->mdp_koff_thshold_low < pinfo->mdp_koff_thshold_high) &&
+ ((pinfo->mdp_koff_delay > 0) ||
+ (pinfo->mdp_koff_delay < max_delay_us)))
+ pinfo->mdp_koff_thshold = true;
+
+ pr_debug("panel kickoff thshold:[%d, %d] delay:%d (max:%d) enable:%d\n",
+ pinfo->mdp_koff_thshold_low,
+ pinfo->mdp_koff_thshold_high,
+ pinfo->mdp_koff_delay,
+ max_delay_us,
+ pinfo->mdp_koff_thshold);
+}
+
static void mdss_dsi_parse_trigger(struct device_node *np, char *trigger,
char *trigger_key)
{
@@ -2497,6 +2539,8 @@ static int mdss_panel_parse_dt(struct device_node *np,
rc = of_property_read_u32(np, "qcom,mdss-mdp-transfer-time-us", &tmp);
pinfo->mdp_transfer_time_us = (!rc ? tmp : DEFAULT_MDP_TRANSFER_TIME);
+ mdss_dsi_parse_mdp_kickoff_threshold(np, pinfo);
+
pinfo->mipi.lp11_init = of_property_read_bool(np,
"qcom,mdss-dsi-lp11-init");
rc = of_property_read_u32(np, "qcom,mdss-dsi-init-delay-us", &tmp);
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.c b/drivers/video/fbdev/msm/mdss_hdmi_util.c
index 9ed909e9a387..b3d929b15b44 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.c
@@ -560,7 +560,7 @@ int msm_hdmi_get_timing_info(
int hdmi_get_supported_mode(struct msm_hdmi_mode_timing_info *info,
struct hdmi_util_ds_data *ds_data, u32 mode)
{
- int ret;
+ int ret, i = 0;
if (!info)
return -EINVAL;
@@ -570,9 +570,23 @@ int hdmi_get_supported_mode(struct msm_hdmi_mode_timing_info *info,
ret = msm_hdmi_get_timing_info(info, mode);
- if (!ret && ds_data && ds_data->ds_registered && ds_data->ds_max_clk) {
- if (info->pixel_freq > ds_data->ds_max_clk)
- info->supported = false;
+ if (!ret && ds_data && ds_data->ds_registered) {
+ if (ds_data->ds_max_clk) {
+ if (info->pixel_freq > ds_data->ds_max_clk)
+ info->supported = false;
+ }
+
+ if (ds_data->modes_num) {
+ u32 *modes = ds_data->modes;
+
+ for (i = 0; i < ds_data->modes_num; i++) {
+ if (info->video_format == *modes++)
+ break;
+ }
+
+ if (i == ds_data->modes_num)
+ info->supported = false;
+ }
}
return ret;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.h b/drivers/video/fbdev/msm/mdss_hdmi_util.h
index e65cf915fe92..8a7e4d1ebafc 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.h
@@ -459,6 +459,8 @@ struct hdmi_tx_ddc_ctrl {
struct hdmi_util_ds_data {
bool ds_registered;
u32 ds_max_clk;
+ u32 modes_num;
+ u32 *modes;
};
static inline int hdmi_tx_get_v_total(const struct msm_hdmi_mode_timing_info *t)
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 81e3438befca..6845b386807b 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1992,6 +1992,8 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
set_bit(MDSS_QOS_REMAPPER, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_WB_QOS, mdata->mdss_qos_map);
set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map); /* cdp supported */
mdata->enable_cdp = false; /* disable cdp */
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index 0085163ada52..8ac63aaaefce 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -122,6 +122,11 @@
*/
#define MDSS_MDP_DS_OVERFETCH_SIZE 5
+#define QOS_LUT_NRT_READ 0x0
+#define QOS_LUT_CWB_READ 0xe4000000
+#define PANIC_LUT_NRT_READ 0x0
+#define ROBUST_LUT_NRT_READ 0xFFFF
+
/* hw cursor can only be setup in highest mixer stage */
#define HW_CURSOR_STAGE(mdata) \
(((mdata)->max_target_zorder + MDSS_MDP_STAGE_0) - 1)
@@ -407,7 +412,7 @@ struct mdss_mdp_cwb {
struct list_head data_queue;
int valid;
u32 wb_idx;
- struct mdp_output_layer *layer;
+ struct mdp_output_layer layer;
void *priv_data;
struct msm_sync_pt_data cwb_sync_pt_data;
struct blocking_notifier_head notifier_head;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index ebc7d2144eb9..eb1e0b5c47a6 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -3424,6 +3424,7 @@ int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl)
mutex_lock(&cwb->queue_lock);
cwb_data = list_first_entry_or_null(&cwb->data_queue,
struct mdss_mdp_wb_data, next);
+ __list_del_entry(&cwb_data->next);
mutex_unlock(&cwb->queue_lock);
if (cwb_data == NULL) {
pr_err("no output buffer for cwb\n");
@@ -3453,14 +3454,14 @@ int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl)
sctl->opmode |= MDSS_MDP_CTL_OP_WFD_MODE;
/* Select CWB data point */
- data_point = (cwb->layer->flags & MDP_COMMIT_CWB_DSPP) ? 0x4 : 0;
+ data_point = (cwb->layer.flags & MDP_COMMIT_CWB_DSPP) ? 0x4 : 0;
writel_relaxed(data_point, mdata->mdp_base + mdata->ppb_ctl[2]);
if (sctl)
writel_relaxed(data_point + 1,
mdata->mdp_base + mdata->ppb_ctl[3]);
- /* Flush WB */
- ctl->flush_bits |= BIT(16);
+ /* Flush WB and CTL */
+ ctl->flush_bits |= BIT(16) | BIT(17);
opmode = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_TOP) | ctl->opmode;
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, opmode);
@@ -3469,6 +3470,10 @@ int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl)
sctl->opmode;
mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP, opmode);
}
+
+ /* Increase commit count to signal CWB release fence */
+ atomic_inc(&cwb->cwb_sync_pt_data.commit_cnt);
+
goto cwb_setup_done;
cwb_setup_fail:
diff --git a/drivers/video/fbdev/msm/mdss_mdp_hwio.h b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
index 76fd2d12ac95..294e05c2fbb0 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_hwio.h
+++ b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
@@ -541,6 +541,10 @@ enum mdss_mdp_writeback_index {
#define MDSS_MDP_REG_WB_N16_INIT_PHASE_Y_C12 0x06C
#define MDSS_MDP_REG_WB_OUT_SIZE 0x074
#define MDSS_MDP_REG_WB_ALPHA_X_VALUE 0x078
+#define MDSS_MDP_REG_WB_DANGER_LUT 0x084
+#define MDSS_MDP_REG_WB_SAFE_LUT 0x088
+#define MDSS_MDP_REG_WB_CREQ_LUT 0x08c
+#define MDSS_MDP_REG_WB_QOS_CTRL 0x090
#define MDSS_MDP_REG_WB_CSC_BASE 0x260
#define MDSS_MDP_REG_WB_DST_ADDR_SW_STATUS 0x2B0
#define MDSS_MDP_REG_WB_CDP_CTRL 0x2B4
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index 72d6175686b7..4eb121f01aca 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -73,6 +73,7 @@ struct mdss_mdp_cmd_ctx {
struct mutex clk_mtx;
spinlock_t clk_lock;
spinlock_t koff_lock;
+ spinlock_t ctlstart_lock;
struct work_struct gate_clk_work;
struct delayed_work delayed_off_clk_work;
struct work_struct pp_done_work;
@@ -144,15 +145,11 @@ static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
u32 init;
u32 height;
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
-
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
if (!mixer) {
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
- if (!mixer) {
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ if (!mixer)
goto exit;
- }
}
init = mdss_mdp_pingpong_read(mixer->pingpong_base,
@@ -160,10 +157,8 @@ static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
height = mdss_mdp_pingpong_read(mixer->pingpong_base,
MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT) & 0xffff;
- if (height < init) {
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ if (height < init)
goto exit;
- }
cnt = mdss_mdp_pingpong_read(mixer->pingpong_base,
MDSS_MDP_REG_PP_INT_COUNT_VAL) & 0xffff;
@@ -173,13 +168,21 @@ static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
else
cnt -= init;
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
-
pr_debug("cnt=%d init=%d height=%d\n", cnt, init, height);
exit:
return cnt;
}
+static inline u32 mdss_mdp_cmd_line_count_wrapper(struct mdss_mdp_ctl *ctl)
+{
+ u32 ret;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ ret = mdss_mdp_cmd_line_count(ctl);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return ret;
+}
+
static int mdss_mdp_tearcheck_enable(struct mdss_mdp_ctl *ctl, bool enable)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
@@ -2677,12 +2680,42 @@ static int mdss_mdp_disable_autorefresh(struct mdss_mdp_ctl *ctl,
return 0;
}
+static bool wait_for_read_ptr_if_late(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_ctl *sctl, struct mdss_panel_info *pinfo)
+{
+ u32 line_count;
+ u32 sline_count = 0;
+ bool ret = true;
+ u32 low_threshold = pinfo->mdp_koff_thshold_low;
+ u32 high_threshold = pinfo->mdp_koff_thshold_high;
+
+ /* read the line count */
+ line_count = mdss_mdp_cmd_line_count(ctl);
+ if (sctl)
+ sline_count = mdss_mdp_cmd_line_count(sctl);
+
+ /* if line count is between the range, return to trigger transfer */
+ if (((line_count > low_threshold) && (line_count < high_threshold)) &&
+ (!sctl || ((sline_count > low_threshold) &&
+ (sline_count < high_threshold))))
+ ret = false;
+
+ pr_debug("threshold:[%d, %d]\n", low_threshold, high_threshold);
+ pr_debug("line:%d sline:%d ret:%d\n", line_count, sline_count, ret);
+ MDSS_XLOG(line_count, sline_count, ret);
+
+ return ret;
+}
static void __mdss_mdp_kickoff(struct mdss_mdp_ctl *ctl,
- struct mdss_mdp_cmd_ctx *ctx)
+ struct mdss_mdp_ctl *sctl, struct mdss_mdp_cmd_ctx *ctx)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
bool is_pp_split = is_pingpong_split(ctl->mfd);
+ struct mdss_panel_info *pinfo = NULL;
+
+ if (ctl->panel_data)
+ pinfo = &ctl->panel_data->panel_info;
MDSS_XLOG(ctx->autorefresh_state);
@@ -2707,9 +2740,33 @@ static void __mdss_mdp_kickoff(struct mdss_mdp_ctl *ctl,
ctx->autorefresh_state = MDP_AUTOREFRESH_ON;
} else {
+
+ /*
+ * Some panels can require that mdp is within some range
+ * of the scanlines in order to trigger the tansfer.
+ * If that is the case, make sure the panel scanline
+ * is within the limit to start.
+ * Acquire an spinlock for this operation to raise the
+ * priority of this thread and make sure the context
+ * is maintained, so we can have the less time possible
+ * between the check of the scanline and the kickoff.
+ */
+ if (pinfo && pinfo->mdp_koff_thshold) {
+ spin_lock(&ctx->ctlstart_lock);
+ if (wait_for_read_ptr_if_late(ctl, sctl, pinfo)) {
+ spin_unlock(&ctx->ctlstart_lock);
+ usleep_range(pinfo->mdp_koff_delay,
+ pinfo->mdp_koff_delay + 10);
+ spin_lock(&ctx->ctlstart_lock);
+ }
+ }
+
/* SW Kickoff */
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
MDSS_XLOG(0x11, ctx->autorefresh_state);
+
+ if (pinfo && pinfo->mdp_koff_thshold)
+ spin_unlock(&ctx->ctlstart_lock);
}
}
@@ -2841,7 +2898,7 @@ static int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
}
/* Kickoff */
- __mdss_mdp_kickoff(ctl, ctx);
+ __mdss_mdp_kickoff(ctl, sctl, ctx);
mdss_mdp_cmd_post_programming(ctl);
@@ -3267,6 +3324,7 @@ static int mdss_mdp_cmd_ctx_setup(struct mdss_mdp_ctl *ctl,
init_completion(&ctx->autorefresh_done);
spin_lock_init(&ctx->clk_lock);
spin_lock_init(&ctx->koff_lock);
+ spin_lock_init(&ctx->ctlstart_lock);
mutex_init(&ctx->clk_mtx);
mutex_init(&ctx->mdp_rdptr_lock);
mutex_init(&ctx->mdp_wrptr_lock);
@@ -3557,7 +3615,7 @@ int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
ctl->ops.wait_pingpong = mdss_mdp_cmd_wait4pingpong;
ctl->ops.add_vsync_handler = mdss_mdp_cmd_add_vsync_handler;
ctl->ops.remove_vsync_handler = mdss_mdp_cmd_remove_vsync_handler;
- ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count;
+ ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count_wrapper;
ctl->ops.restore_fnc = mdss_mdp_cmd_restore;
ctl->ops.early_wake_up_fnc = mdss_mdp_cmd_early_wake_up;
ctl->ops.reconfigure = mdss_mdp_cmd_reconfigure;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
index 40b10e368309..e6e03e7d54b2 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
@@ -124,6 +124,30 @@ static inline void mdp_wb_write(struct mdss_mdp_writeback_ctx *ctx,
writel_relaxed(val, ctx->base + reg);
}
+static void mdss_mdp_set_qos_wb(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_writeback_ctx *ctx)
+{
+ u32 wb_qos_setup = QOS_LUT_NRT_READ;
+ struct mdss_mdp_cwb *cwb = NULL;
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (false == test_bit(MDSS_QOS_WB_QOS, mdata->mdss_qos_map))
+ return;
+
+ mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+ cwb = &mdp5_data->cwb;
+
+ if (cwb->valid)
+ wb_qos_setup = QOS_LUT_CWB_READ;
+ else
+ wb_qos_setup = QOS_LUT_NRT_READ;
+
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DANGER_LUT, PANIC_LUT_NRT_READ);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_SAFE_LUT, ROBUST_LUT_NRT_READ);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_CREQ_LUT, wb_qos_setup);
+}
+
static void mdss_mdp_set_ot_limit_wb(struct mdss_mdp_writeback_ctx *ctx,
int is_wfd)
{
@@ -447,7 +471,7 @@ int mdss_mdp_writeback_prepare_cwb(struct mdss_mdp_ctl *ctl,
cwb = &mdp5_data->cwb;
ctx = (struct mdss_mdp_writeback_ctx *)cwb->priv_data;
- buffer = &cwb->layer->buffer;
+ buffer = &cwb->layer.buffer;
ctx->opmode = 0;
ctx->img_width = buffer->width;
@@ -495,6 +519,8 @@ int mdss_mdp_writeback_prepare_cwb(struct mdss_mdp_ctl *ctl,
if (ctl->mdata->default_ot_wr_limit || ctl->mdata->default_ot_rd_limit)
mdss_mdp_set_ot_limit_wb(ctx, false);
+ mdss_mdp_set_qos_wb(ctl, ctx);
+
return ret;
}
@@ -897,6 +923,8 @@ static int mdss_mdp_writeback_display(struct mdss_mdp_ctl *ctl, void *arg)
ctl->mdata->default_ot_rd_limit)
mdss_mdp_set_ot_limit_wb(ctx, true);
+ mdss_mdp_set_qos_wb(ctl, ctx);
+
wb_args = (struct mdss_mdp_writeback_arg *) arg;
if (!wb_args)
return -ENOENT;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index 91d4332700b6..0f0df2256f74 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -2285,12 +2285,12 @@ end:
return ret;
}
-int __is_cwb_requested(uint32_t output_layer_flags)
+int __is_cwb_requested(uint32_t commit_flags)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
int req = 0;
- req = output_layer_flags & MDP_COMMIT_CWB_EN;
+ req = commit_flags & MDP_COMMIT_CWB_EN;
if (req && !test_bit(MDSS_CAPS_CWB_SUPPORTED, mdata->mdss_caps_map)) {
pr_err("CWB not supported");
return -ENODEV;
@@ -2330,7 +2330,7 @@ int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd,
return -EINVAL;
if (commit->output_layer) {
- ret = __is_cwb_requested(commit->output_layer->flags);
+ ret = __is_cwb_requested(commit->flags);
if (IS_ERR_VALUE(ret)) {
return ret;
} else if (ret) {
@@ -2493,7 +2493,7 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
}
if (commit->output_layer) {
- rc = __is_cwb_requested(commit->output_layer->flags);
+ rc = __is_cwb_requested(commit->flags);
if (IS_ERR_VALUE(rc)) {
return rc;
} else if (rc) {
@@ -2553,7 +2553,7 @@ int mdss_mdp_layer_pre_commit_cwb(struct msm_fb_data_type *mfd,
return rc;
}
- mdp5_data->cwb.layer = commit->output_layer;
+ mdp5_data->cwb.layer = *commit->output_layer;
mdp5_data->cwb.wb_idx = commit->output_layer->writeback_ndx;
mutex_lock(&mdp5_data->cwb.queue_lock);
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index a633528b5373..463d26643dde 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -635,6 +635,10 @@ struct mdss_panel_info {
u32 saved_fporch;
/* current fps, once is programmed in hw */
int current_fps;
+ u32 mdp_koff_thshold_low;
+ u32 mdp_koff_thshold_high;
+ bool mdp_koff_thshold;
+ u32 mdp_koff_delay;
int panel_max_fps;
int panel_max_vtotal;
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
index b5da4ad1a86b..eab7bcaaa156 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.c
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -573,7 +573,6 @@ int mdss_smmu_probe(struct platform_device *pdev)
struct mdss_smmu_domain smmu_domain;
const struct of_device_id *match;
struct dss_module_power *mp;
- int disable_htw = 1;
char name[MAX_CLIENT_NAME_LEN];
const __be32 *address = NULL, *size = NULL;
@@ -667,13 +666,6 @@ int mdss_smmu_probe(struct platform_device *pdev)
goto disable_power;
}
- rc = iommu_domain_set_attr(mdss_smmu->mmu_mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
- if (rc) {
- pr_err("couldn't disable coherent HTW\n");
- goto release_mapping;
- }
-
if (smmu_domain.domain == MDSS_IOMMU_DOMAIN_SECURE ||
smmu_domain.domain == MDSS_IOMMU_DOMAIN_ROT_SECURE) {
int secure_vmid = VMID_CP_PIXEL;
diff --git a/drivers/video/fbdev/msm/msm_ext_display.c b/drivers/video/fbdev/msm/msm_ext_display.c
index e229f52057d4..4899231787f2 100644
--- a/drivers/video/fbdev/msm/msm_ext_display.c
+++ b/drivers/video/fbdev/msm/msm_ext_display.c
@@ -365,6 +365,7 @@ static int msm_ext_disp_hpd(struct platform_device *pdev,
ext_disp->ops->get_audio_edid_blk = NULL;
ext_disp->ops->cable_status = NULL;
ext_disp->ops->get_intf_id = NULL;
+ ext_disp->ops->teardown_done = NULL;
}
ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX;
@@ -463,6 +464,20 @@ end:
return ret;
}
+static void msm_ext_disp_teardown_done(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct msm_ext_disp_init_data *data = NULL;
+
+ ret = msm_ext_disp_get_intf_data_helper(pdev, &data);
+ if (ret || !data) {
+ pr_err("invalid input");
+ return;
+ }
+
+ data->codec_ops.teardown_done(data->pdev);
+}
+
static int msm_ext_disp_get_intf_id(struct platform_device *pdev)
{
int ret = 0;
@@ -545,6 +560,8 @@ static int msm_ext_disp_notify(struct platform_device *pdev,
msm_ext_disp_cable_status;
ext_disp->ops->get_intf_id =
msm_ext_disp_get_intf_id;
+ ext_disp->ops->teardown_done =
+ msm_ext_disp_teardown_done;
}
switch_set_state(&ext_disp->audio_sdev, (int)new_state);
@@ -614,6 +631,7 @@ static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack)
ext_disp->ops->get_audio_edid_blk = NULL;
ext_disp->ops->cable_status = NULL;
ext_disp->ops->get_intf_id = NULL;
+ ext_disp->ops->teardown_done = NULL;
}
ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index b15e6edb8f2c..933f1866b811 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3602,6 +3602,7 @@ int ext4_can_truncate(struct inode *inode)
int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
{
+#if 0
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
struct address_space *mapping = inode->i_mapping;
@@ -3725,6 +3726,12 @@ out_dio:
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
+#else
+ /*
+ * Disabled as per b/28760453
+ */
+ return -EOPNOTSUPP;
+#endif
}
int ext4_inode_attach_jinode(struct inode *inode)
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 1a96fdaa33d5..e133705d794a 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -26,6 +26,10 @@ SUBSYS(cpu)
SUBSYS(cpuacct)
#endif
+#if IS_ENABLED(CONFIG_CGROUP_SCHEDTUNE)
+SUBSYS(schedtune)
+#endif
+
#if IS_ENABLED(CONFIG_BLK_CGROUP)
SUBSYS(io)
#endif
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index f4f5af978c7c..c34a68ce901a 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -121,7 +121,6 @@ enum iommu_attr {
DOMAIN_ATTR_FSL_PAMU_ENABLE,
DOMAIN_ATTR_FSL_PAMUV1,
DOMAIN_ATTR_NESTING, /* two stages of translation */
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_PT_BASE_ADDR,
DOMAIN_ATTR_SECURE_VMID,
DOMAIN_ATTR_ATOMIC,
diff --git a/include/linux/msm_ext_display.h b/include/linux/msm_ext_display.h
index 873a778d5370..59ba776b5f9b 100644
--- a/include/linux/msm_ext_display.h
+++ b/include/linux/msm_ext_display.h
@@ -108,6 +108,7 @@ struct msm_ext_disp_audio_codec_ops {
struct msm_ext_disp_audio_edid_blk *blk);
int (*cable_status)(struct platform_device *pdev, u32 vote);
int (*get_intf_id)(struct platform_device *pdev);
+ void (*teardown_done)(struct platform_device *pdev);
};
/*
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index c95a529b029b..fb2607dd365b 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -13,6 +13,14 @@
#define MSM_GSI_H
#include <linux/types.h>
+enum gsi_ver {
+ GSI_VER_ERR = 0,
+ GSI_VER_1_0 = 1,
+ GSI_VER_1_2 = 2,
+ GSI_VER_1_3 = 3,
+ GSI_VER_MAX,
+};
+
enum gsi_status {
GSI_STATUS_SUCCESS = 0,
GSI_STATUS_ERROR = 1,
@@ -65,6 +73,7 @@ enum gsi_intr_type {
/**
* gsi_per_props - Peripheral related properties
*
+ * @gsi: GSI core version
* @ee: EE where this driver and peripheral driver runs
* @intr: control interrupt type
* @intvec: write data for MSI write
@@ -87,6 +96,7 @@ enum gsi_intr_type {
*
*/
struct gsi_per_props {
+ enum gsi_ver ver;
unsigned int ee;
enum gsi_intr_type intr;
uint32_t intvec;
diff --git a/include/trace/events/trace_msm_low_power.h b/include/trace/events/trace_msm_low_power.h
index e14cab59e90a..97eefc665130 100644
--- a/include/trace/events/trace_msm_low_power.h
+++ b/include/trace/events/trace_msm_low_power.h
@@ -192,6 +192,64 @@ TRACE_EVENT(cluster_exit,
__entry->from_idle)
);
+TRACE_EVENT(cluster_pred_select,
+
+ TP_PROTO(const char *name, int index, u32 sleep_us,
+ u32 latency, int pred, u32 pred_us),
+
+ TP_ARGS(name, index, sleep_us, latency, pred, pred_us),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(int, index)
+ __field(u32, sleep_us)
+ __field(u32, latency)
+ __field(int, pred)
+ __field(u32, pred_us)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->index = index;
+ __entry->sleep_us = sleep_us;
+ __entry->latency = latency;
+ __entry->pred = pred;
+ __entry->pred_us = pred_us;
+ ),
+
+ TP_printk("name:%s idx:%d sleep_time:%u latency:%u pred:%d pred_us:%u",
+ __entry->name, __entry->index, __entry->sleep_us,
+ __entry->latency, __entry->pred, __entry->pred_us)
+);
+
+TRACE_EVENT(cluster_pred_hist,
+
+ TP_PROTO(const char *name, int idx, u32 resi,
+ u32 sample, u32 tmr),
+
+ TP_ARGS(name, idx, resi, sample, tmr),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(int, idx)
+ __field(u32, resi)
+ __field(u32, sample)
+ __field(u32, tmr)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->idx = idx;
+ __entry->resi = resi;
+ __entry->sample = sample;
+ __entry->tmr = tmr;
+ ),
+
+ TP_printk("name:%s idx:%d resi:%u sample:%u tmr:%u",
+ __entry->name, __entry->idx, __entry->resi,
+ __entry->sample, __entry->tmr)
+);
+
TRACE_EVENT(pre_pc_cb,
TP_PROTO(int tzflag),
diff --git a/init/Kconfig b/init/Kconfig
index 311669332867..eb9e1a0aa688 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1002,6 +1002,23 @@ config CGROUP_CPUACCT
config PAGE_COUNTER
bool
+config CGROUP_SCHEDTUNE
+ bool "CFS tasks boosting cgroup subsystem (EXPERIMENTAL)"
+ depends on SCHED_TUNE
+ help
+ This option provides the "schedtune" controller which improves the
+ flexibility of the task boosting mechanism by introducing the support
+ to define "per task" boost values.
+
+ This new controller:
+ 1. allows only a two layers hierarchy, where the root defines the
+ system-wide boost value and its direct childrens define each one a
+ different "class of tasks" to be boosted with a different value
+ 2. supports up to 16 different task classes, each one which could be
+ configured with a different boost value
+
+ Say N if unsure.
+
config MEMCG
bool "Memory Resource Controller for Control Groups"
select PAGE_COUNTER
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index 4c44b1a4ad98..3c964d6d3856 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -1,7 +1,231 @@
+#include <linux/cgroup.h>
+#include <linux/err.h>
+#include <linux/percpu.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+
#include "sched.h"
unsigned int sysctl_sched_cfs_boost __read_mostly;
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+
+/*
+ * EAS scheduler tunables for task groups.
+ */
+
+/* SchdTune tunables for a group of tasks */
+struct schedtune {
+ /* SchedTune CGroup subsystem */
+ struct cgroup_subsys_state css;
+
+ /* Boost group allocated ID */
+ int idx;
+
+ /* Boost value for tasks on that SchedTune CGroup */
+ int boost;
+
+};
+
+static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
+{
+ return css ? container_of(css, struct schedtune, css) : NULL;
+}
+
+static inline struct schedtune *task_schedtune(struct task_struct *tsk)
+{
+ return css_st(task_css(tsk, schedtune_cgrp_id));
+}
+
+static inline struct schedtune *parent_st(struct schedtune *st)
+{
+ return css_st(st->css.parent);
+}
+
+/*
+ * SchedTune root control group
+ * The root control group is used to defined a system-wide boosting tuning,
+ * which is applied to all tasks in the system.
+ * Task specific boost tuning could be specified by creating and
+ * configuring a child control group under the root one.
+ * By default, system-wide boosting is disabled, i.e. no boosting is applied
+ * to tasks which are not into a child control group.
+ */
+static struct schedtune
+root_schedtune = {
+ .boost = 0,
+};
+
+/*
+ * Maximum number of boost groups to support
+ * When per-task boosting is used we still allow only limited number of
+ * boost groups for two main reasons:
+ * 1. on a real system we usually have only few classes of workloads which
+ * make sense to boost with different values (e.g. background vs foreground
+ * tasks, interactive vs low-priority tasks)
+ * 2. a limited number allows for a simpler and more memory/time efficient
+ * implementation especially for the computation of the per-CPU boost
+ * value
+ */
+#define BOOSTGROUPS_COUNT 5
+
+/* Array of configured boostgroups */
+static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
+ &root_schedtune,
+ NULL,
+};
+
+/* SchedTune boost groups
+ * Keep track of all the boost groups which impact on CPU, for example when a
+ * CPU has two RUNNABLE tasks belonging to two different boost groups and thus
+ * likely with different boost values.
+ * Since on each system we expect only a limited number of boost groups, here
+ * we use a simple array to keep track of the metrics required to compute the
+ * maximum per-CPU boosting value.
+ */
+struct boost_groups {
+ /* Maximum boost value for all RUNNABLE tasks on a CPU */
+ unsigned boost_max;
+ struct {
+ /* The boost for tasks on that boost group */
+ unsigned boost;
+ /* Count of RUNNABLE tasks on that boost group */
+ unsigned tasks;
+ } group[BOOSTGROUPS_COUNT];
+};
+
+/* Boost groups affecting each CPU in the system */
+DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups);
+
+static u64
+boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+ struct schedtune *st = css_st(css);
+
+ return st->boost;
+}
+
+static int
+boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 boost)
+{
+ struct schedtune *st = css_st(css);
+
+ if (boost < 0 || boost > 100)
+ return -EINVAL;
+
+ st->boost = boost;
+ if (css == &root_schedtune.css)
+ sysctl_sched_cfs_boost = boost;
+
+ return 0;
+}
+
+static struct cftype files[] = {
+ {
+ .name = "boost",
+ .read_u64 = boost_read,
+ .write_u64 = boost_write,
+ },
+ { } /* terminate */
+};
+
+static int
+schedtune_boostgroup_init(struct schedtune *st)
+{
+ /* Keep track of allocated boost groups */
+ allocated_group[st->idx] = st;
+
+ return 0;
+}
+
+static int
+schedtune_init(void)
+{
+ struct boost_groups *bg;
+ int cpu;
+
+ /* Initialize the per CPU boost groups */
+ for_each_possible_cpu(cpu) {
+ bg = &per_cpu(cpu_boost_groups, cpu);
+ memset(bg, 0, sizeof(struct boost_groups));
+ }
+
+ pr_info(" schedtune configured to support %d boost groups\n",
+ BOOSTGROUPS_COUNT);
+ return 0;
+}
+
+static struct cgroup_subsys_state *
+schedtune_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+ struct schedtune *st;
+ int idx;
+
+ if (!parent_css) {
+ schedtune_init();
+ return &root_schedtune.css;
+ }
+
+ /* Allow only single level hierachies */
+ if (parent_css != &root_schedtune.css) {
+ pr_err("Nested SchedTune boosting groups not allowed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Allow only a limited number of boosting groups */
+ for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx)
+ if (!allocated_group[idx])
+ break;
+ if (idx == BOOSTGROUPS_COUNT) {
+ pr_err("Trying to create more than %d SchedTune boosting groups\n",
+ BOOSTGROUPS_COUNT);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto out;
+
+ /* Initialize per CPUs boost group support */
+ st->idx = idx;
+ if (schedtune_boostgroup_init(st))
+ goto release;
+
+ return &st->css;
+
+release:
+ kfree(st);
+out:
+ return ERR_PTR(-ENOMEM);
+}
+
+static void
+schedtune_boostgroup_release(struct schedtune *st)
+{
+ /* Keep track of allocated boost groups */
+ allocated_group[st->idx] = NULL;
+}
+
+static void
+schedtune_css_free(struct cgroup_subsys_state *css)
+{
+ struct schedtune *st = css_st(css);
+
+ schedtune_boostgroup_release(st);
+ kfree(st);
+}
+
+struct cgroup_subsys schedtune_cgrp_subsys = {
+ .css_alloc = schedtune_css_alloc,
+ .css_free = schedtune_css_free,
+ .legacy_cftypes = files,
+ .early_init = 1,
+ .allow_attach = subsys_cgroup_allow_attach,
+};
+
+#endif /* CONFIG_CGROUP_SCHEDTUNE */
+
int
sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8e2f4ab15498..587dbe09c47d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -636,7 +636,11 @@ static struct ctl_table kern_table[] = {
.procname = "sched_cfs_boost",
.data = &sysctl_sched_cfs_boost,
.maxlen = sizeof(sysctl_sched_cfs_boost),
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+ .mode = 0444,
+#else
.mode = 0644,
+#endif
.proc_handler = &sysctl_sched_cfs_boost_handler,
.extra1 = &zero,
.extra2 = &one_hundred,
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 2b3f46c049d4..554522934c44 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -74,7 +74,7 @@ next_tag:
/* Extract a tag from the data */
tag = data[dp++];
- if (tag == 0) {
+ if (tag == ASN1_EOC) {
/* It appears to be an EOC. */
if (data[dp++] != 0)
goto invalid_eoc;
@@ -96,10 +96,8 @@ next_tag:
/* Extract the length */
len = data[dp++];
- if (len <= 0x7f) {
- dp += len;
- goto next_tag;
- }
+ if (len <= 0x7f)
+ goto check_length;
if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
/* Indefinite length */
@@ -110,14 +108,18 @@ next_tag:
}
n = len - 0x80;
- if (unlikely(n > sizeof(size_t) - 1))
+ if (unlikely(n > sizeof(len) - 1))
goto length_too_long;
if (unlikely(n > datalen - dp))
goto data_overrun_error;
- for (len = 0; n > 0; n--) {
+ len = 0;
+ for (; n > 0; n--) {
len <<= 8;
len |= data[dp++];
}
+check_length:
+ if (len > datalen - dp)
+ goto data_overrun_error;
dp += len;
goto next_tag;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b8f7e621e16e..32027efa5033 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -89,7 +89,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1;
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
/* rfc5961 challenge ack rate limiting */
-int sysctl_tcp_challenge_ack_limit = 100;
+int sysctl_tcp_challenge_ack_limit = 1000;
int sysctl_tcp_stdurg __read_mostly;
int sysctl_tcp_rfc1337 __read_mostly;
@@ -3428,7 +3428,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
static u32 challenge_timestamp;
static unsigned int challenge_count;
struct tcp_sock *tp = tcp_sk(sk);
- u32 now;
+ u32 count, now;
/* First check our per-socket dupack rate limit. */
if (tcp_oow_rate_limited(sock_net(sk), skb,
@@ -3436,13 +3436,18 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
&tp->last_oow_ack_time))
return;
- /* Then check the check host-wide RFC 5961 rate limit. */
+ /* Then check host-wide RFC 5961 rate limit. */
now = jiffies / HZ;
if (now != challenge_timestamp) {
+ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
+
challenge_timestamp = now;
- challenge_count = 0;
+ WRITE_ONCE(challenge_count, half +
+ prandom_u32_max(sysctl_tcp_challenge_ack_limit));
}
- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+ count = READ_ONCE(challenge_count);
+ if (count > 0) {
+ WRITE_ONCE(challenge_count, count - 1);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk);
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 5c40c55a4a0c..547af163c5c0 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -253,218 +253,218 @@ static void msm_pcm_routng_cfg_matrix_map_pp(struct route_payload payload,
#define SLIMBUS_EXTPROC_RX AFE_PORT_INVALID
struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
- { PRIMARY_I2S_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_RX},
- { PRIMARY_I2S_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_TX},
- { SLIMBUS_0_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_RX},
- { SLIMBUS_0_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_TX},
- { HDMI_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_HDMI},
- { INT_BT_SCO_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_RX},
- { INT_BT_SCO_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_TX},
- { INT_FM_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_RX},
- { INT_FM_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_TX},
- { RT_PROXY_PORT_001_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_RX},
- { RT_PROXY_PORT_001_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_TX},
- { AFE_PORT_ID_PRIMARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { PRIMARY_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_RX},
+ { PRIMARY_I2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_TX},
+ { SLIMBUS_0_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_RX},
+ { SLIMBUS_0_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_TX},
+ { HDMI_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_HDMI},
+ { INT_BT_SCO_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_RX},
+ { INT_BT_SCO_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_TX},
+ { INT_FM_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_RX},
+ { INT_FM_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_TX},
+ { RT_PROXY_PORT_001_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_RX},
+ { RT_PROXY_PORT_001_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_TX},
+ { AFE_PORT_ID_PRIMARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_AUXPCM_RX},
- { AFE_PORT_ID_PRIMARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_AUXPCM_TX},
- { VOICE_PLAYBACK_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { VOICE_PLAYBACK_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_VOICE_PLAYBACK_TX},
- { VOICE2_PLAYBACK_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { VOICE2_PLAYBACK_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_VOICE2_PLAYBACK_TX},
- { VOICE_RECORD_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_RX},
- { VOICE_RECORD_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_TX},
- { MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_MI2S_RX},
- { MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_MI2S_TX},
- { SECONDARY_I2S_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX},
- { SLIMBUS_1_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX},
- { SLIMBUS_1_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX},
- { SLIMBUS_2_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_RX},
- { SLIMBUS_4_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX},
- { SLIMBUS_4_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX},
- { SLIMBUS_3_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX},
- { SLIMBUS_3_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_TX},
- { SLIMBUS_5_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_TX},
- { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_STUB_RX},
- { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_STUB_TX},
- { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_STUB_1_TX},
- { AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { VOICE_RECORD_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_RX},
+ { VOICE_RECORD_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_TX},
+ { MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_RX},
+ { MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_TX},
+ { SECONDARY_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX},
+ { SLIMBUS_1_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX},
+ { SLIMBUS_1_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX},
+ { SLIMBUS_2_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_RX},
+ { SLIMBUS_4_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX},
+ { SLIMBUS_4_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX},
+ { SLIMBUS_3_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX},
+ { SLIMBUS_3_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_TX},
+ { SLIMBUS_5_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_TX},
+ { SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_RX},
+ { SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_TX},
+ { SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_1_TX},
+ { AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_MI2S_RX},
- { AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_MI2S_TX},
- { AFE_PORT_ID_SECONDARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_MI2S_RX},
- { AFE_PORT_ID_SECONDARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_MI2S_TX},
- { AFE_PORT_ID_PRIMARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_MI2S_RX},
- { AFE_PORT_ID_PRIMARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_MI2S_TX},
- { AFE_PORT_ID_TERTIARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_MI2S_RX},
- { AFE_PORT_ID_TERTIARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_MI2S_TX},
- { AUDIO_PORT_ID_I2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AUDIO_PORT_ID_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_AUDIO_I2S_RX},
- { AFE_PORT_ID_SECONDARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_AUXPCM_RX},
- { AFE_PORT_ID_SECONDARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_AUXPCM_TX},
- { SLIMBUS_6_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_RX},
- { SLIMBUS_6_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_TX},
- { AFE_PORT_ID_SPDIF_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SPDIF_RX},
- { AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { SLIMBUS_6_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_RX},
+ { SLIMBUS_6_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_TX},
+ { AFE_PORT_ID_SPDIF_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SPDIF_RX},
+ { AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_MI2S_RX_SD1},
- { SLIMBUS_5_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_RX},
- { AFE_PORT_ID_QUINARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { SLIMBUS_5_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_RX},
+ { AFE_PORT_ID_QUINARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUIN_MI2S_RX},
- { AFE_PORT_ID_QUINARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUINARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUIN_MI2S_TX},
- { AFE_PORT_ID_SENARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SENARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SENARY_MI2S_TX},
- { AFE_PORT_ID_PRIMARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_0},
- { AFE_PORT_ID_PRIMARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_0},
- { AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_1},
- { AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_1},
- { AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_2},
- { AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_2},
- { AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_3},
- { AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_3},
- { AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_4},
- { AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_4},
- { AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_5},
- { AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_5},
- { AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_6},
- { AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_6},
- { AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_7},
- { AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_7},
- { AFE_PORT_ID_SECONDARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_0},
- { AFE_PORT_ID_SECONDARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_0},
- { AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_1},
- { AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_1},
- { AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_2},
- { AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_2},
- { AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_3},
- { AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_3},
- { AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_4},
- { AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_4},
- { AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_5},
- { AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_5},
- { AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_6},
- { AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_6},
- { AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_7},
- { AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_7},
- { AFE_PORT_ID_TERTIARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_0},
- { AFE_PORT_ID_TERTIARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_0},
- { AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_1},
- { AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_1},
- { AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_2},
- { AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_2},
- { AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_3},
- { AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_3},
- { AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_4},
- { AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_4},
- { AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_5},
- { AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_5},
- { AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_6},
- { AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_6},
- { AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_7},
- { AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_7},
- { AFE_PORT_ID_QUATERNARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_0},
- { AFE_PORT_ID_QUATERNARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_0},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_1},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_1},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_2},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_2},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_3},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_3},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_4},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_4},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_5},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_5},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_6},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_6},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_7},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_7},
- { INT_BT_A2DP_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_A2DP_RX},
- { SLIMBUS_7_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_RX},
- { SLIMBUS_7_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_TX},
- { SLIMBUS_8_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_RX},
- { SLIMBUS_8_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_TX},
- { AFE_PORT_ID_USB_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_RX},
- { AFE_PORT_ID_USB_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_TX},
- { DISPLAY_PORT_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_DISPLAY_PORT},
- { AFE_PORT_ID_TERTIARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { INT_BT_A2DP_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_A2DP_RX},
+ { SLIMBUS_7_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_RX},
+ { SLIMBUS_7_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_TX},
+ { SLIMBUS_8_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_RX},
+ { SLIMBUS_8_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_TX},
+ { AFE_PORT_ID_USB_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_RX},
+ { AFE_PORT_ID_USB_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_TX},
+ { DISPLAY_PORT_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_DISPLAY_PORT},
+ { AFE_PORT_ID_TERTIARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_AUXPCM_RX},
- { AFE_PORT_ID_TERTIARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_AUXPCM_TX},
- { AFE_PORT_ID_QUATERNARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_AUXPCM_RX},
- { AFE_PORT_ID_QUATERNARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_AUXPCM_TX},
};
@@ -2006,11 +2006,20 @@ static int msm_routing_slim_0_rx_aanc_mux_put(struct snd_kcontrol *kcontrol,
static int msm_routing_get_port_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
+ int idx = 0, shift = 0;
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- if (test_bit(mc->shift,
- (unsigned long *)&msm_bedais[mc->reg].port_sessions))
+ idx = mc->shift/(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+ shift = mc->shift%(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+
+ if (idx >= BE_DAI_PORT_SESSIONS_IDX_MAX) {
+ pr_err("%s: Invalid idx = %d\n", __func__, idx);
+ return -EINVAL;
+ }
+
+ if (test_bit(shift,
+ (unsigned long *)&msm_bedais[mc->reg].port_sessions[idx]))
ucontrol->value.integer.value[0] = 1;
else
ucontrol->value.integer.value[0] = 0;
@@ -2024,22 +2033,32 @@ static int msm_routing_get_port_mixer(struct snd_kcontrol *kcontrol,
static int msm_routing_put_port_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
+ int idx = 0, shift = 0;
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- pr_debug("%s: reg 0x%x shift 0x%x val %ld\n", __func__, mc->reg,
- mc->shift, ucontrol->value.integer.value[0]);
+ idx = mc->shift/(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+ shift = mc->shift%(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+
+ if (idx >= BE_DAI_PORT_SESSIONS_IDX_MAX) {
+ pr_err("%s: Invalid idx = %d\n", __func__, idx);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: reg 0x%x shift 0x%x val %ld idx %d reminder shift %d\n",
+ __func__, mc->reg, mc->shift,
+ ucontrol->value.integer.value[0], idx, shift);
if (ucontrol->value.integer.value[0]) {
afe_loopback(1, msm_bedais[mc->reg].port_id,
msm_bedais[mc->shift].port_id);
- set_bit(mc->shift,
- (unsigned long *)&msm_bedais[mc->reg].port_sessions);
+ set_bit(shift,
+ (unsigned long *)&msm_bedais[mc->reg].port_sessions[idx]);
} else {
afe_loopback(0, msm_bedais[mc->reg].port_id,
msm_bedais[mc->shift].port_id);
- clear_bit(mc->shift,
- (unsigned long *)&msm_bedais[mc->reg].port_sessions);
+ clear_bit(shift,
+ (unsigned long *)&msm_bedais[mc->reg].port_sessions[idx]);
}
return 1;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index 6b7f2113e0f6..8e3086849d92 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -355,6 +355,7 @@ enum {
#define ADM_PP_PARAM_MUTE_BIT 1
#define ADM_PP_PARAM_LATENCY_ID 1
#define ADM_PP_PARAM_LATENCY_BIT 2
+#define BE_DAI_PORT_SESSIONS_IDX_MAX 4
struct msm_pcm_routing_evt {
void (*event_func)(enum msm_pcm_routing_event, void *);
@@ -365,10 +366,15 @@ struct msm_pcm_routing_bdai_data {
u16 port_id; /* AFE port ID */
u8 active; /* track if this backend is enabled */
unsigned long fe_sessions; /* Front-end sessions */
- u64 port_sessions; /* track Tx BE ports -> Rx BE
- * number of BE should not exceed
- * the size of this field
- */
+ /*
+ * Track Tx BE ports -> Rx BE ports.
+ * port_sessions[0] used to track BE 0 to BE 63.
+ * port_sessions[1] used to track BE 64 to BE 127.
+ * port_sessions[2] used to track BE 128 to BE 191.
+ * port_sessions[3] used to track BE 192 to BE 255.
+ */
+ u64 port_sessions[BE_DAI_PORT_SESSIONS_IDX_MAX];
+
unsigned int sample_rate;
unsigned int channel;
unsigned int format;