summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt31
-rw-r--r--Documentation/devicetree/bindings/gpu/adreno-iommu.txt2
-rw-r--r--Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt4
-rw-r--r--Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt21
-rw-r--r--Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt18
-rw-r--r--Documentation/devicetree/bindings/platform/msm/ipa.txt1
-rw-r--r--Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt112
-rw-r--r--Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt6
-rw-r--r--Documentation/devicetree/bindings/pwm/pwm-qpnp.txt1
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi54
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi50
-rw-r--r--arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm-gdsc-falcon.dtsi159
-rw-r--r--arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi7
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi50
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi42
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi47
-rw-r--r--arch/arm/boot/dts/qcom/msmcobalt.dtsi13
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi23
-rw-r--r--arch/arm/boot/dts/qcom/msmfalcon.dtsi43
-rw-r--r--arch/arm/boot/dts/qcom/msmtriton.dtsi76
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig3
-rw-r--r--arch/arm64/configs/msmcortex_defconfig3
-rw-r--r--arch/arm64/configs/msmfalcon_defconfig1
-rw-r--r--drivers/char/adsprpc.c26
-rw-r--r--drivers/clk/msm/clock-gcc-cobalt.c2
-rw-r--r--drivers/clk/msm/clock-osm.c33
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c83
-rw-r--r--drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h1
-rw-r--r--drivers/clk/qcom/gcc-msmfalcon.c30
-rw-r--r--drivers/cpuidle/lpm-levels.c321
-rw-r--r--drivers/cpuidle/lpm-levels.h17
-rw-r--r--drivers/gpu/msm/Makefile2
-rw-r--r--drivers/gpu/msm/adreno.c156
-rw-r--r--drivers/gpu/msm/adreno.h67
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c254
-rw-r--r--drivers/gpu/msm/adreno_a5xx_preempt.c2
-rw-r--r--drivers/gpu/msm/adreno_debugfs.c76
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c1244
-rw-r--r--drivers/gpu/msm/adreno_dispatch.h38
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.c54
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.h27
-rw-r--r--drivers/gpu/msm/adreno_perfcounter.c27
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c178
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.h6
-rw-r--r--drivers/gpu/msm/adreno_trace.h64
-rw-r--r--drivers/gpu/msm/kgsl.c268
-rw-r--r--drivers/gpu/msm/kgsl.h19
-rw-r--r--drivers/gpu/msm/kgsl_cffdump.c4
-rw-r--r--drivers/gpu/msm/kgsl_cffdump.h6
-rw-r--r--drivers/gpu/msm/kgsl_cmdbatch.h168
-rw-r--r--drivers/gpu/msm/kgsl_compat.h8
-rw-r--r--drivers/gpu/msm/kgsl_device.h14
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.c (renamed from drivers/gpu/msm/kgsl_cmdbatch.c)642
-rw-r--r--drivers/gpu/msm/kgsl_drawobj.h198
-rw-r--r--drivers/gpu/msm/kgsl_iommu.c13
-rw-r--r--drivers/gpu/msm/kgsl_mmu.h2
-rw-r--r--drivers/gpu/msm/kgsl_trace.h44
-rw-r--r--drivers/iio/adc/qcom-rradc.c100
-rw-r--r--drivers/input/misc/Kconfig2
-rw-r--r--drivers/input/misc/Makefile1
-rw-r--r--drivers/input/misc/ots_pat9125/Kconfig14
-rw-r--r--drivers/input/misc/ots_pat9125/Makefile7
-rw-r--r--drivers/input/misc/ots_pat9125/pat9125_linux_driver.c546
-rw-r--r--drivers/input/misc/ots_pat9125/pixart_ots.c72
-rw-r--r--drivers/input/misc/ots_pat9125/pixart_ots.h44
-rw-r--r--drivers/input/misc/ots_pat9125/pixart_platform.h18
-rw-r--r--drivers/iommu/arm-smmu.c39
-rw-r--r--drivers/iommu/dma-mapping-fast.c6
-rw-r--r--drivers/iommu/io-pgtable-arm.c4
-rw-r--r--drivers/iommu/iommu-debug.c22
-rw-r--r--drivers/iommu/msm_dma_iommu_mapping.c12
-rw-r--r--drivers/leds/leds-qpnp-flash-v2.c107
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c15
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c131
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c8
-rw-r--r--drivers/media/platform/msm/vidc/msm_venc.c105
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_dcvs.c8
-rw-r--r--drivers/media/platform/msm/vidc/msm_vidc_res_parse.c9
-rw-r--r--drivers/media/platform/msm/vidc/venus_boot.c13
-rw-r--r--drivers/media/platform/msm/vidc/venus_hfi.c18
-rw-r--r--drivers/mfd/wcd934x-regmap.c43
-rw-r--r--drivers/mfd/wcd9xxx-regmap.h7
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_utils.c1
-rw-r--r--drivers/misc/qcom/qdsp6v2/audio_utils_aio.c20
-rw-r--r--drivers/net/wireless/cnss/cnss_pci.c10
-rw-r--r--drivers/platform/msm/gsi/gsi.c215
-rw-r--r--drivers/platform/msm/gsi/gsi.h10
-rw-r--r--drivers/platform/msm/gsi/gsi_dbg.c53
-rw-r--r--drivers/platform/msm/gsi/gsi_reg.h217
-rw-r--r--drivers/platform/msm/ipa/ipa_api.c12
-rw-r--r--drivers/platform/msm/ipa/ipa_clients/ipa_usb.c16
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa.c42
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_dp.c26
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c26
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c12
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c68
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c24
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_i.h2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c21
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c18
-rw-r--r--drivers/platform/msm/msm_11ad/msm_11ad.c12
-rw-r--r--drivers/power/power_supply_sysfs.c1
-rw-r--r--drivers/power/qcom-charger/fg-core.h63
-rw-r--r--drivers/power/qcom-charger/fg-reg.h1
-rw-r--r--drivers/power/qcom-charger/fg-util.c44
-rw-r--r--drivers/power/qcom-charger/qpnp-fg-gen3.c1297
-rw-r--r--drivers/power/qcom-charger/qpnp-smb2.c30
-rw-r--r--drivers/power/qcom-charger/smb-lib.c109
-rw-r--r--drivers/power/qcom-charger/smb-lib.h9
-rw-r--r--drivers/power/qcom-charger/smb-reg.h10
-rw-r--r--drivers/power/qcom-charger/smb138x-charger.c17
-rw-r--r--drivers/pwm/pwm-qpnp.c11
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c19
-rw-r--r--drivers/scsi/ufs/ufshcd.c78
-rw-r--r--drivers/scsi/ufs/ufshcd.h9
-rw-r--r--drivers/soc/qcom/glink.c4
-rw-r--r--drivers/soc/qcom/icnss.c79
-rw-r--r--drivers/soc/qcom/msm_smem.c6
-rw-r--r--drivers/soc/qcom/qdsp6v2/msm_audio_ion.c5
-rw-r--r--drivers/soc/qcom/qsee_ipc_irq_bridge.c12
-rw-r--r--drivers/soc/qcom/rpm-smd.c8
-rw-r--r--drivers/usb/dwc3/gadget.c22
-rw-r--r--drivers/usb/gadget/function/f_midi.c2
-rw-r--r--drivers/usb/gadget/function/f_ncm.c82
-rw-r--r--drivers/usb/pd/policy_engine.c174
-rw-r--r--drivers/video/fbdev/core/fbcmap.c8
-rw-r--r--drivers/video/fbdev/msm/mdss.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.c158
-rw-r--r--drivers/video/fbdev/msm/mdss_dp.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_aux.c172
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.c106
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_util.h61
-rw-r--r--drivers/video/fbdev/msm/mdss_dsi_panel.c44
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_edid.c170
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.c39
-rw-r--r--drivers/video/fbdev/msm/mdss_hdmi_util.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h7
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c11
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_hwio.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c84
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c30
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_layer.c10
-rw-r--r--drivers/video/fbdev/msm/mdss_panel.h4
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.c8
-rw-r--r--drivers/video/fbdev/msm/msm_ext_display.c18
-rw-r--r--fs/ext4/inode.c7
-rw-r--r--include/dt-bindings/clock/qcom,gcc-msmfalcon.h2
-rw-r--r--include/linux/cgroup_subsys.h4
-rw-r--r--include/linux/diagchar.h18
-rw-r--r--include/linux/iommu.h1
-rw-r--r--include/linux/leds-qpnp-flash.h3
-rw-r--r--include/linux/mfd/wcd934x/registers.h2
-rw-r--r--include/linux/msm_ext_display.h1
-rw-r--r--include/linux/msm_gsi.h10
-rw-r--r--include/linux/power_supply.h1
-rw-r--r--include/soc/qcom/smem.h2
-rw-r--r--include/sound/wcd-dsp-mgr.h18
-rw-r--r--include/trace/events/trace_msm_low_power.h58
-rw-r--r--init/Kconfig17
-rw-r--r--kernel/sched/core.c39
-rw-r--r--kernel/sched/fair.c65
-rw-r--r--kernel/sched/hmp.c7
-rw-r--r--kernel/sched/sched.h2
-rw-r--r--kernel/sched/tune.c224
-rw-r--r--kernel/sysctl.c4
-rw-r--r--lib/asn1_decoder.c16
-rw-r--r--net/core/dev.c3
-rw-r--r--net/ipv4/tcp_input.c15
-rw-r--r--net/wireless/db.txt2
-rw-r--r--sound/soc/codecs/msm_hdmi_codec_rx.c7
-rw-r--r--sound/soc/codecs/wcd-dsp-mgr.c62
-rw-r--r--sound/soc/codecs/wcd-spi.c87
-rw-r--r--sound/soc/codecs/wcd9335.c4
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsd.c33
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsd.h1
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c120
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h7
-rw-r--r--sound/soc/codecs/wcd934x/wcd934x.c108
-rw-r--r--sound/soc/codecs/wcd9xxx-common-v2.c5
-rw-r--r--sound/soc/codecs/wcd9xxx-resmgr-v2.c2
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c287
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h14
192 files changed, 7629 insertions, 3697 deletions
diff --git a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
index 90abf0305319..68b8f09238e0 100644
--- a/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
+++ b/Documentation/devicetree/bindings/fb/mdss-dsi-panel.txt
@@ -249,6 +249,35 @@ Optional properties:
60 = 60 frames per second (default)
- qcom,mdss-dsi-panel-clockrate: A 64 bit value specifies the panel clock speed in Hz.
0 = default value.
+- qcom,mdss-mdp-kickoff-threshold: This property can be used to define a region
+ (in terms of scanlines) where the
+hardware is allowed
+ to trigger a data transfer from MDP to DSI.
+ If this property is used, the region must be defined setting
+ two values, the low and the high thresholds:
+ <low_threshold high_threshold>
+ Where following condition must be met:
+ low_threshold < high_threshold
+ These values will be used by the driver in such way that if
+ the Driver receives a request to kickoff a transfer (MDP to DSI),
+ the transfer will be triggered only if the following condition
+ is satisfied:
+ low_threshold < scanline < high_threshold
+ If the condition is not met, then the driver will delay the
+ transfer by the time defined in the following property:
+ "qcom,mdss-mdp-kickoff-delay".
+ So in order to use this property, the delay property must
+ be defined as well and greater than 0.
+- qcom,mdss-mdp-kickoff-delay: This property defines the delay in microseconds that
+ the driver will delay before triggering an MDP transfer if the
+ thresholds defined by the following property are not met:
+ "qcom,mdss-mdp-kickoff-threshold".
+ So in order to use this property, the threshold property must
+ be defined as well. Note that this delay cannot be zero
+ and also should not be greater than
+the fps window.
+ i.e. For 60fps value should not exceed
+16666 uS.
- qcom,mdss-mdp-transfer-time-us: Specifies the dsi transfer time for command mode
panels in microseconds. Driver uses this number to adjust
the clock rate according to the expected transfer time.
@@ -568,6 +597,8 @@ Example:
qcom,mdss-dsi-dma-trigger = <0>;
qcom,mdss-dsi-panel-framerate = <60>;
qcom,mdss-dsi-panel-clockrate = <424000000>;
+ qcom,mdss-mdp-kickoff-threshold = <11 2430>;
+ qcom,mdss-mdp-kickoff-delay = <1000>;
qcom,mdss-mdp-transfer-time-us = <12500>;
qcom,mdss-dsi-panel-timings = [7d 25 1d 00 37 33
22 27 1e 03 04 00];
diff --git a/Documentation/devicetree/bindings/gpu/adreno-iommu.txt b/Documentation/devicetree/bindings/gpu/adreno-iommu.txt
index de88a6eba7a5..b399145ea8a2 100644
--- a/Documentation/devicetree/bindings/gpu/adreno-iommu.txt
+++ b/Documentation/devicetree/bindings/gpu/adreno-iommu.txt
@@ -36,8 +36,6 @@ Optional properties:
for secure buffer allocation
- qcom,secure_align_mask: A mask for determining how secure buffers need to
be aligned
-- qcom,coherent-htw: A boolean specifying if coherent hardware table walks should
- be enabled.
- List of sub nodes, one for each of the translation context banks supported.
The driver uses the names of these nodes to determine how they are used,
diff --git a/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt b/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt
index 721a4f72563e..1ab49edfe30c 100644
--- a/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt
+++ b/Documentation/devicetree/bindings/iio/adc/qcom-rradc.txt
@@ -41,6 +41,10 @@ The channel list supported by the RRADC driver is available in the enum rradc_ch
located at at drivers/iio/adc/qcom-rradc.c. Clients can use this index from the enum
as the channel number while requesting ADC reads.
+Optional property:
+- qcom,pmic-revid : Phandle pointing to the revision peripheral node. Use it to query the
+ PMIC fabrication ID for applying the appropriate temperature
+ compensation parameters.
Example:
/* RRADC node */
diff --git a/Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt b/Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt
index 02f21835f870..1b68c695d2c5 100644
--- a/Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt
+++ b/Documentation/devicetree/bindings/input/pixart-pat9125-switch.txt
@@ -6,5 +6,24 @@ to the Host processor. The host processor reads the direction and number of
steps over I2C and passes the data to the rest of the system.
Required properties:
+ - compatible : should be "pixart,pat9125".
+ - reg : i2c slave address of the device.
+ - interrupt-parent : parent of interrupt.
+ - interrupts : interrupt to indicate motion of the rotating switch.
- - compatible : should be "pixart,pat9125".
+Optional properties:
+ - pixart,inverse-x : boolean, use this to invert the x data before sending it to input framework
+ - pixart,inverse-y : boolean, use this to invert the y data before sending it to input framework
+ - pixart,press-enabled : boolean, use this to enable detection of pressing the button
+
+Required properties if 'pixart,press-enabled' DT property is defined:
+ - pixart,press-keycode : keycode to be sent when press is detected by the driver.
+
+Example:
+ pixart_pat9125@75 {
+ compatible = "pixart,pat9125";
+ reg = <0x75>;
+ interrupt-parent = <&msm_gpio>;
+ interrupts = <98 0x2008>;
+ pixart,irq-gpio = <&msm_gpio 98 0x2008>;
+ };
diff --git a/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt b/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt
index bd05c8bebfc8..8365762e520f 100644
--- a/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt
+++ b/Documentation/devicetree/bindings/leds/leds-qpnp-flash-v2.txt
@@ -38,18 +38,28 @@ Optional properties:
Default value is 4500000 uA.
- qcom,rparasitic-uohm : Integer property for flash current predictive mitigation indicating
parasitic component of battery resistance. Default value is 0 uOhm.
-- qcom,lmh-ocv-threshold-uv : Required property for flash current preemptive mitigation.
+- qcom,lmh-ocv-threshold-uv : Required property for flash current preemptive LMH mitigation.
Default value is 3700000 uV.
-- qcom,lmh-rbatt-threshold-uohm : Required property for flash current preemptive mitigation.
+- qcom,lmh-rbatt-threshold-uohm : Required property for flash current preemptive LMH mitigation.
Default value is 400000 uOhm.
-- qcom,lmh-mitigation-sel : Optional property to configure flash current preemptive mitigation.
+- qcom,lmh-mitigation-sel : Optional property to configure flash current preemptive LMH mitigation.
Accepted values are:
0: MITIGATION_DISABLED
1: MITIGATION_BY_ILED_THRESHOLD
2: MITIGATION_BY_SW
Default value is 2.
-- qcom,lmh-level : Optional property to configure flash current preemptive mitigation.
+- qcom,chgr-mitigation-sel : Optional property to configure flash current preemptive charger mitigation.
+ Accepted values are:
+ 0: MITIGATION_DISABLED
+ 1: MITIGATION_BY_ILED_THRESHOLD
+ 2: MITIGATION_BY_SW
+ Default value is 2.
+- qcom,lmh-level : Optional property to configure flash current preemptive LMH mitigation.
Accepted values are 0, 1, and 3. Default value is 0.
+- qcom,iled-thrsh-ma : Optional property to configure the led current threshold at which HW
+ preemptive mitigation is triggered. Unit is mA. Default value is 1000.
+ Accepted values are in the range 0 - 3100, with steps of 100.
+ 0 disables autonomous HW mitigation.
- qcom,thermal-derate-en : Boolean property to enable flash current thermal mitigation.
- qcom,thermal-derate-current : Array of currrent limits for thermal mitigation. Required if
qcom,thermal-derate-en is specified. Unit is mA. Format is
diff --git a/Documentation/devicetree/bindings/platform/msm/ipa.txt b/Documentation/devicetree/bindings/platform/msm/ipa.txt
index a8db893f6709..80f2d8f43e35 100644
--- a/Documentation/devicetree/bindings/platform/msm/ipa.txt
+++ b/Documentation/devicetree/bindings/platform/msm/ipa.txt
@@ -36,7 +36,6 @@ Optional:
compatible "qcom,ipa-smmu-wlan-cb"
- ipa_smmu_uc: uc SMMU device
compatible "qcom,ipa-smmu-uc-cb"
-- qcom,smmu-disable-htw: boolean value to turn off SMMU page table caching
- qcom,use-a2-service: determine if A2 service will be used
- qcom,use-ipa-tethering-bridge: determine if tethering bridge will be used
- qcom,use-ipa-bamdma-a2-bridge: determine if a2/ipa hw bridge will be used
diff --git a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt
index bd358593fcb3..caabcd347a72 100644
--- a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt
+++ b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-fg-gen3.txt
@@ -87,7 +87,8 @@ First Level Node - FG Gen3 device
Value type: <u32>
Definition: Percentage of monotonic SOC increase upon which the delta
SOC interrupt will be triggered. If this property is not
- specified, then the default value will be 1.
+ specified, then the default value will be 1. Possible
+ values are in the range of 0 to 12.
- qcom,fg-recharge-soc-thr
Usage: optional
@@ -145,6 +146,112 @@ First Level Node - FG Gen3 device
Value type: <bool>
Definition: Enables the cycle counter feature.
+- qcom,fg-force-load-profile
+ Usage: optional
+ Value type: <bool>
+ Definition: If set, battery profile will be force loaded if the profile
+ loaded earlier by bootloader doesn't match with the profile
+ available in the device tree.
+
+- qcom,cl-start-capacity
+ Usage: optional
+ Value type: <u32>
+ Definition: Battery SOC threshold to start the capacity learning.
+ If this is not specified, then the default value used
+ will be 15.
+
+- qcom,cl-min-temp
+ Usage: optional
+ Value type: <u32>
+ Definition: Lower limit of battery temperature to start the capacity
+ learning. If this is not specified, then the default value
+ used will be 150. Unit is in decidegC.
+
+- qcom,cl-max-temp
+ Usage: optional
+ Value type: <u32>
+ Definition: Upper limit of battery temperature to start the capacity
+ learning. If this is not specified, then the default value
+ used will be 450 (45C). Unit is in decidegC.
+
+- qcom,cl-max-increment
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum capacity increment allowed per capacity learning
+ cycle. If this is not specified, then the default value
+ used will be 5 (0.5%). Unit is in decipercentage.
+
+- qcom,cl-max-decrement
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum capacity decrement allowed per capacity learning
+ cycle. If this is not specified, then the default value
+ used will be 100 (10%). Unit is in decipercentage.
+
+- qcom,cl-min-limit
+ Usage: optional
+ Value type: <u32>
+ Definition: Minimum limit that the capacity cannot go below in a
+ capacity learning cycle. If this is not specified, then
+ the default value is 0. Unit is in decipercentage.
+
+- qcom,cl-max-limit
+ Usage: optional
+ Value type: <u32>
+ Definition: Maximum limit that the capacity cannot go above in a
+ capacity learning cycle. If this is not specified, then
+ the default value is 0. Unit is in decipercentage.
+
+- qcom,fg-jeita-hyst-temp
+ Usage: optional
+ Value type: <u32>
+ Definition: Hysteresis applied to Jeita temperature comparison.
+ Possible values are:
+ 0 - No hysteresis
+ 1,2,3 - Value in Celsius.
+
+- qcom,fg-batt-temp-delta
+ Usage: optional
+ Value type: <u32>
+ Definition: Battery temperature delta interrupt threshold. Possible
+ values are: 2, 4, 6 and 10. Unit is in Kelvin.
+
+- qcom,hold-soc-while-full:
+ Usage: optional
+ Value type: <bool>
+ Definition: A boolean property that when defined holds SOC at 100% when
+ the battery is full.
+
+- qcom,ki-coeff-soc-dischg:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: Array of monotonic SOC threshold values to change the ki
+ coefficient for medium discharge current during discharge.
+ This should be defined in the ascending order and in the
+ range of 0-100. Array limit is set to 3.
+
+- qcom,ki-coeff-med-dischg:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: Array of ki coefficient values for medium discharge current
+ during discharge. These values will be applied when the
+ monotonic SOC goes below the SOC threshold specified under
+ qcom,ki-coeff-soc-dischg. Array limit is set to 3. This
+ property should be specified if qcom,ki-coeff-soc-dischg
+ is specified to make it fully functional. Value has no
+ unit. Allowed range is 0 to 62200 in micro units.
+
+- qcom,ki-coeff-hi-dischg:
+ Usage: optional
+ Value type: <prop-encoded-array>
+ Definition: Array of ki coefficient values for high discharge current
+ during discharge. These values will be applied when the
+ monotonic SOC goes below the SOC threshold specified under
+ qcom,ki-coeff-soc-dischg. Array limit is set to 3. This
+ property should be specified if qcom,ki-coeff-soc-dischg
+ is specified to make it fully functional. Value has no
+ unit. Allowed range is 0 to 62200 in micro units.
+
==========================================================
Second Level Nodes - Peripherals managed by FG Gen3 driver
==========================================================
@@ -175,6 +282,9 @@ pmicobalt_fg: qpnp,fg {
qcom,pmic-revid = <&pmicobalt_revid>;
io-channels = <&pmicobalt_rradc 3>;
io-channel-names = "rradc_batt_id";
+ qcom,ki-coeff-soc-dischg = <30 60 90>;
+ qcom,ki-coeff-med-dischg = <800 1000 1400>;
+ qcom,ki-coeff-hi-dischg = <1200 1500 2100>;
status = "okay";
qcom,fg-batt-soc@4000 {
diff --git a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
index 21404dfc4b7b..12ac75a8608c 100644
--- a/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
+++ b/Documentation/devicetree/bindings/power/qcom-charger/qpnp-smb2.txt
@@ -53,6 +53,12 @@ Charger specific properties:
Definition: Specifies the USB input current limit in micro-amps.
If the value is not present, 1.5Amps is used as default.
+- qcom,usb-ocl-ua
+ Usage: optional
+ Value type: <u32>
+ Definition: Specifies the OTG output current limit in micro-amps.
+ If the value is not present, 1.5Amps is used as default
+
- qcom,dc-icl-ua
Usage: optional
Value type: <u32>
diff --git a/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt b/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt
index c784a01d6411..8cb513b5605f 100644
--- a/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt
+++ b/Documentation/devicetree/bindings/pwm/pwm-qpnp.txt
@@ -15,6 +15,7 @@ Required device bindings:
- reg-names: Name for the above register.
"qpnp-lpg-channel-base" = physical base address of the
controller's LPG channel register.
+- qcom,lpg-lut-size: LPG LUT size.
- qcom,channel-id: channel Id for the PWM.
- qcom,supported-sizes: Supported PWM sizes.
Following three pwm sizes lists are supported by PWM/LPG controllers.
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi
index 95a8e80ccdbd..9ad9e4adce00 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-cmd.dtsi
@@ -43,7 +43,7 @@
qcom,mdss-dsi-t-clk-pre = <0x24>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
- qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-dsi-te-pin-select = <1>;
qcom,mdss-dsi-wr-mem-start = <0x2c>;
qcom,mdss-dsi-wr-mem-continue = <0x3c>;
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi
index fd11be721dbb..6b549a4af6eb 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dsc-wqxga-video.dtsi
@@ -76,7 +76,7 @@
qcom,mdss-dsi-t-clk-pre = <0x24>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
- qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,compression-mode = "dsc";
qcom,config-select = <&dsi_nt35597_dsc_video_config0>;
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
index b6f19b78ea70..1e42d0846acf 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-cmd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -59,33 +59,33 @@
qcom,mdss-dsi-te-check-enable;
qcom,mdss-dsi-te-using-te-pin;
qcom,ulps-enabled;
- qcom,mdss-dsi-on-command = [15 01 00 00 10 00 02 ff 10
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 ba 03
- 15 01 00 00 10 00 02 e5 01
- 15 01 00 00 10 00 02 35 00
- 15 01 00 00 10 00 02 bb 10
- 15 01 00 00 10 00 02 b0 03
- 15 01 00 00 10 00 02 ff e0
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 6b 3d
- 15 01 00 00 10 00 02 6c 3d
- 15 01 00 00 10 00 02 6d 3d
- 15 01 00 00 10 00 02 6e 3d
- 15 01 00 00 10 00 02 6f 3d
- 15 01 00 00 10 00 02 35 02
- 15 01 00 00 10 00 02 36 72
- 15 01 00 00 10 00 02 37 10
- 15 01 00 00 10 00 02 08 c0
- 15 01 00 00 10 00 02 ff 24
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 c6 06
- 15 01 00 00 10 00 02 ff 10
- 05 01 00 00 a0 00 02 11 00
- 05 01 00 00 a0 00 02 29 00];
+ qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 ba 03
+ 15 01 00 00 00 00 02 e5 01
+ 15 01 00 00 00 00 02 35 00
+ 15 01 00 00 00 00 02 bb 10
+ 15 01 00 00 00 00 02 b0 03
+ 15 01 00 00 00 00 02 ff e0
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 6b 3d
+ 15 01 00 00 00 00 02 6c 3d
+ 15 01 00 00 00 00 02 6d 3d
+ 15 01 00 00 00 00 02 6e 3d
+ 15 01 00 00 00 00 02 6f 3d
+ 15 01 00 00 00 00 02 35 02
+ 15 01 00 00 00 00 02 36 72
+ 15 01 00 00 00 00 02 37 10
+ 15 01 00 00 00 00 02 08 c0
+ 15 01 00 00 00 00 02 ff 24
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 c6 06
+ 15 01 00 00 00 00 02 ff 10
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 32 00 02 29 00];
- qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 0a 00 02 28 00
+ 05 01 00 00 3c 00 02 10 00];
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
index 367384a8c3e5..82413bfbca89 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35597-dualmipi-wqxga-video.dtsi
@@ -29,30 +29,30 @@
qcom,mdss-dsi-bpp = <24>;
qcom,mdss-dsi-underflow-color = <0xff>;
qcom,mdss-dsi-border-color = <0>;
- qcom,mdss-dsi-on-command = [15 01 00 00 10 00 02 ff 10
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 ba 03
- 15 01 00 00 10 00 02 e5 01
- 15 01 00 00 10 00 02 35 00
- 15 01 00 00 10 00 02 bb 03
- 15 01 00 00 10 00 02 b0 03
- 39 01 00 00 10 00 06 3b 03 08 08 64 9a
- 15 01 00 00 10 00 02 ff e0
- 15 01 00 00 10 00 02 fb 01
- 15 01 00 00 10 00 02 6b 3d
- 15 01 00 00 10 00 02 6c 3d
- 15 01 00 00 10 00 02 6d 3d
- 15 01 00 00 10 00 02 6e 3d
- 15 01 00 00 10 00 02 6f 3d
- 15 01 00 00 10 00 02 35 02
- 15 01 00 00 10 00 02 36 72
- 15 01 00 00 10 00 02 37 10
- 15 01 00 00 10 00 02 08 c0
- 15 01 00 00 10 00 02 ff 10
- 05 01 00 00 a0 00 02 11 00
- 05 01 00 00 a0 00 02 29 00];
- qcom,mdss-dsi-off-command = [05 01 00 00 78 00 02 28 00
- 05 01 00 00 78 00 02 10 00];
+ qcom,mdss-dsi-on-command = [15 01 00 00 00 00 02 ff 10
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 ba 03
+ 15 01 00 00 00 00 02 e5 01
+ 15 01 00 00 00 00 02 35 00
+ 15 01 00 00 00 00 02 bb 03
+ 15 01 00 00 00 00 02 b0 03
+ 39 01 00 00 00 00 06 3b 03 08 08 64 9a
+ 15 01 00 00 00 00 02 ff e0
+ 15 01 00 00 00 00 02 fb 01
+ 15 01 00 00 00 00 02 6b 3d
+ 15 01 00 00 00 00 02 6c 3d
+ 15 01 00 00 00 00 02 6d 3d
+ 15 01 00 00 00 00 02 6e 3d
+ 15 01 00 00 00 00 02 6f 3d
+ 15 01 00 00 00 00 02 35 02
+ 15 01 00 00 00 00 02 36 72
+ 15 01 00 00 00 00 02 37 10
+ 15 01 00 00 00 00 02 08 c0
+ 15 01 00 00 00 00 02 ff 10
+ 05 01 00 00 78 00 02 11 00
+ 05 01 00 00 32 00 02 29 00];
+ qcom,mdss-dsi-off-command = [05 01 00 00 0a 00 02 28 00
+ 05 01 00 00 3c 00 02 10 00];
qcom,mdss-dsi-on-command-state = "dsi_lp_mode";
qcom,mdss-dsi-off-command-state = "dsi_hs_mode";
qcom,mdss-dsi-h-sync-pulse = <0>;
@@ -69,7 +69,7 @@
qcom,mdss-dsi-t-clk-pre = <0x2d>;
qcom,mdss-dsi-dma-trigger = "trigger_sw";
qcom,mdss-dsi-mdp-trigger = "none";
- qcom,mdss-dsi-reset-sequence = <1 20>, <0 20>, <1 50>;
+ qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-dsi-min-refresh-rate = <55>;
qcom,mdss-dsi-max-refresh-rate = <60>;
qcom,mdss-dsi-pan-enable-dynamic-fps;
diff --git a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
index 90df1d0c1ac0..69067f5f1cc7 100644
--- a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
+++ b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-ascent-3450mah.dtsi
@@ -13,6 +13,7 @@
qcom,ascent_3450mah {
/* #Ascent_860_82209_0000_3450mAh_averaged_MasterSlave_Jul20th2016*/
qcom,max-voltage-uv = <4350000>;
+ qcom,fg-cc-cv-threshold-mv = <4340>;
qcom,nom-batt-capacity-mah = <3450>;
qcom,batt-id-kohm = <60>;
qcom,battery-beta = <3435>;
diff --git a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
index 2c1edde56d6a..c3f23b75fa9c 100644
--- a/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
+++ b/arch/arm/boot/dts/qcom/fg-gen3-batterydata-itech-3000mah.dtsi
@@ -13,6 +13,7 @@
qcom,itech_3000mah {
/* #Itech_B00826LF_3000mAh_ver1660_averaged_MasterSlave_Jul20th2016*/
qcom,max-voltage-uv = <4350000>;
+ qcom,fg-cc-cv-threshold-mv = <4340>;
qcom,nom-batt-capacity-mah = <3000>;
qcom,batt-id-kohm = <100>;
qcom,battery-beta = <3450>;
diff --git a/arch/arm/boot/dts/qcom/msm-gdsc-falcon.dtsi b/arch/arm/boot/dts/qcom/msm-gdsc-falcon.dtsi
new file mode 100644
index 000000000000..6550ddcad86c
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm-gdsc-falcon.dtsi
@@ -0,0 +1,159 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ /* GCC GDSCs */
+ gdsc_usb30: qcom,gdsc@10f004 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_usb30";
+ reg = <0x10f004 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_ufs: qcom,gdsc@175004 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_ufs";
+ reg = <0x175004 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_hlos1_vote_lpass_adsp: qcom,gdsc@17d034 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_hlos1_vote_lpass_adsp";
+ reg = <0x17d034 0x4>;
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ status = "disabled";
+ };
+
+ gdsc_hlos1_vote_turing_adsp: qcom,gdsc@17d04c {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_hlos1_vote_turing_adsp";
+ reg = <0x17d04c 0x4>;
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ status = "disabled";
+ };
+
+ gdsc_hlos2_vote_turing_adsp: qcom,gdsc@17e04c {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_hlos2_vote_turing_adsp";
+ reg = <0x17e04c 0x4>;
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ status = "disabled";
+ };
+
+ /* MMSS GDSCs */
+ bimc_smmu_hw_ctrl: syscon@c8ce024 {
+ compatible = "syscon";
+ reg = <0xc8ce024 0x4>;
+ };
+
+ gdsc_bimc_smmu: qcom,gdsc@c8ce020 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_bimc_smmu";
+ reg = <0xc8ce020 0x4>;
+ hw-ctrl-addr = <&bimc_smmu_hw_ctrl>;
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <500>;
+ status = "disabled";
+ };
+
+ gdsc_venus: qcom,gdsc@c8c1024 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_venus";
+ reg = <0xc8c1024 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_venus_core0: qcom,gdsc@c8c1040 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_venus_core0";
+ reg = <0xc8c1040 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_camss_top: qcom,gdsc@c8c34a0 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_camss_top";
+ reg = <0xc8c34a0 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_vfe0: qcom,gdsc@c8c3664 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_vfe0";
+ reg = <0xc8c3664 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_vfe1: qcom,gdsc@c8c3674 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_vfe1";
+ reg = <0xc8c3674 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_cpp: qcom,gdsc@c8c36d4 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_cpp";
+ reg = <0xc8c36d4 0x4>;
+ status = "disabled";
+ };
+
+ gdsc_mdss: qcom,gdsc@c8c2304 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_mdss";
+ reg = <0xc8c2304 0x4>;
+ status = "disabled";
+ };
+
+ /* GPU GDSCs */
+ gpu_cx_hw_ctrl: syscon@5066008 {
+ compatible = "syscon";
+ reg = <0x5066008 0x4>;
+ };
+
+ gdsc_gpu_cx: qcom,gdsc@5066004 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_gpu_cx";
+ reg = <0x5066004 0x4>;
+ hw-ctrl-addr = <&gpu_cx_hw_ctrl>;
+ qcom,no-status-check-on-disable;
+ qcom,gds-timeout = <2000>;
+ status = "disabled";
+ };
+
+ /* GPU GX GDSCs */
+ gpu_gx_domain_addr: syscon@5065130 {
+ compatible = "syscon";
+ reg = <0x5065130 0x4>;
+ };
+
+ gpu_gx_sw_reset: syscon@5066090 {
+ compatible = "syscon";
+ reg = <0x5066090 0x4>;
+ };
+
+ gdsc_gpu_gx: qcom,gdsc@5066094 {
+ compatible = "qcom,gdsc";
+ regulator-name = "gdsc_gpu_gx";
+ reg = <0x5066094 0x4>;
+ domain-addr = <&gpu_gx_domain_addr>;
+ sw-reset = <&gpu_gx_sw_reset>;
+ qcom,retain-periph;
+ qcom,reset-aon-logic;
+ status = "disabled";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
index 4f76276b2790..28d230dfb6bf 100644
--- a/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pmicobalt.dtsi
@@ -311,6 +311,7 @@
#address-cells = <1>;
#size-cells = <0>;
#io-channel-cells = <1>;
+ qcom,pmic-revid = <&pmicobalt_revid>;
};
pmicobalt_fg: qpnp,fg {
@@ -386,6 +387,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <1>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <0>;
@@ -399,6 +401,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <2>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <1>;
@@ -412,6 +415,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <3>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <2>;
@@ -424,6 +428,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <4>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <3>;
@@ -436,6 +441,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <5>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <4>;
@@ -448,6 +454,7 @@
<0xb042 0x7e>;
reg-names = "qpnp-lpg-channel-base",
"qpnp-lpg-lut-base";
+ qcom,lpg-lut-size = <0x7e>;
qcom,channel-id = <6>;
qcom,supported-sizes = <6>, <9>;
qcom,ramp-index = <5>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
index 085ca0187ee6..fcceac6e2469 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-cdp.dtsi
@@ -21,6 +21,8 @@
qca,bt-vdd-pa-supply = <&pmcobalt_l17_pin_ctrl>;
qca,bt-vdd-ldo-supply = <&pmcobalt_l25_pin_ctrl>;
qca,bt-chip-pwd-supply = <&pmicobalt_bob_pin1>;
+ clocks = <&clock_gcc clk_rf_clk2>;
+ clock-names = "rf_clk2";
qca,bt-vdd-io-voltage-level = <1352000 1352000>;
qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
index d973bc5ed84f..0278cbde90ce 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mdss-panels.dtsi
@@ -82,6 +82,7 @@
qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,cmd-sync-wait-broadcast;
qcom,esd-check-enabled;
qcom,mdss-dsi-panel-status-check-mode = "bta_check";
};
@@ -90,6 +91,7 @@
qcom,mdss-dsi-panel-timings = [00 1c 08 07 23 22 07 07 05 03 04 00];
qcom,mdss-dsi-t-clk-post = <0x0d>;
qcom,mdss-dsi-t-clk-pre = <0x2d>;
+ qcom,cmd-sync-wait-broadcast;
};
&dsi_dual_nt35597_truly_video {
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
index 99402e3033ed..f9bb6e512d33 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-mtp.dtsi
@@ -22,6 +22,8 @@
qca,bt-vdd-pa-supply = <&pmcobalt_l17_pin_ctrl>;
qca,bt-vdd-ldo-supply = <&pmcobalt_l25_pin_ctrl>;
qca,bt-chip-pwd-supply = <&pmicobalt_bob_pin1>;
+ clocks = <&clock_gcc clk_rf_clk2>;
+ clock-names = "rf_clk2";
qca,bt-vdd-io-voltage-level = <1352000 1352000>;
qca,bt-vdd-xtal-voltage-level = <2040000 2040000>;
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi
index 6018124caf68..c6d7defbf35c 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-pm.dtsi
@@ -24,7 +24,7 @@
qcom,vctl-port = <0x0>;
qcom,phase-port = <0x1>;
qcom,saw2-avs-ctl = <0x1010031>;
- qcom,saw2-avs-limit = <0x4000208>;
+ qcom,saw2-avs-limit = <0x4580458>;
qcom,pfm-port = <0x2>;
};
@@ -40,7 +40,7 @@
qcom,vctl-port = <0x0>;
qcom,phase-port = <0x1>;
qcom,saw2-avs-ctl = <0x1010031>;
- qcom,saw2-avs-limit = <0x4000208>;
+ qcom,saw2-avs-limit = <0x4580458>;
qcom,pfm-port = <0x2>;
};
@@ -279,6 +279,52 @@
qcom,sleep-stats-version = <2>;
};
+ qcom,rpm-rail-stats@200000 {
+ compatible = "qcom,rpm-rail-stats";
+ reg = <0x200000 0x100>,
+ <0x29000c 0x4>;
+ reg-names = "phys_addr_base",
+ "offset_addr";
+ };
+
+ qcom,rpm-log@200000 {
+ compatible = "qcom,rpm-log";
+ reg = <0x200000 0x4000>,
+ <0x290018 0x4>;
+ qcom,rpm-addr-phys = <0x200000>;
+ qcom,offset-version = <4>;
+ qcom,offset-page-buffer-addr = <36>;
+ qcom,offset-log-len = <40>;
+ qcom,offset-log-len-mask = <44>;
+ qcom,offset-page-indices = <56>;
+ };
+
+ qcom,rpm-master-stats@778150 {
+ compatible = "qcom,rpm-master-stats";
+ reg = <0x778150 0x5000>;
+ qcom,masters = "APSS", "MPSS", "ADSP", "SLPI";
+ qcom,master-stats-version = <2>;
+ qcom,master-offset = <4096>;
+ };
+
+ rpm_msg_ram: memory@0x200000 {
+ compatible = "qcom,rpm-msg-ram";
+ reg = <0x200000 0x1000>,
+ <0x290000 0x1000>;
+ };
+
+ rpm_code_ram: rpm-memory@0x778000 {
+ compatible = "qcom,rpm-code-ram";
+ reg = <0x778000 0x5000>;
+ };
+
+ qcom,system-stats {
+ compatible = "qcom,system-stats";
+ qcom,rpm-msg-ram = <&rpm_msg_ram>;
+ qcom,rpm-code-ram = <&rpm_code_ram>;
+ qcom,masters = "APSS", "MPSS", "ADSP", "SLPI";
+ };
+
qcom,mpm@7781b8 {
compatible = "qcom,mpm-v2";
reg = <0x7781b8 0x1000>, /* MSM_RPM_MPM_BASE 4K */
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
index c2d45ec3ef07..2a61cccad273 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-regulator.dtsi
@@ -537,6 +537,28 @@
qcom,enable-time = <500>;
};
};
+
+ qcom,pmcobalt@1 {
+ pmcobalt_s10: regulator@2f00 {
+ compatible = "qcom,qpnp-regulator";
+ reg = <0x2f00 0x100>;
+ regulator-name = "pmcobalt_s10";
+ regulator-min-microvolt = <572000>;
+ regulator-max-microvolt = <1112000>;
+ qcom,enable-time = <500>;
+ regulator-always-on;
+ };
+
+ pmcobalt_s13: regulator@3800 {
+ compatible = "qcom,qpnp-regulator";
+ reg = <0x3800 0x100>;
+ regulator-name = "pmcobalt_s13";
+ regulator-min-microvolt = <572000>;
+ regulator-max-microvolt = <1112000>;
+ qcom,enable-time = <500>;
+ regulator-always-on;
+ };
+ };
};
/* Stub regulators */
@@ -590,6 +612,9 @@
qcom,cpr-panic-reg-name-list =
"PWR_CPRH_STATUS", "APCLUS0_L2_SAW4_PMIC_STS";
+ qcom,cpr-aging-ref-voltage = <1112000>;
+ vdd-supply = <&pmcobalt_s10>;
+
thread@0 {
qcom,cpr-thread-id = <0>;
qcom,cpr-consecutive-up = <0>;
@@ -712,6 +737,13 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-max-voltage-adjustment = <15000>;
+ qcom,cpr-aging-ref-corner = <22>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
+ qcom,allow-aging-open-loop-voltage-adjustment =
+ <1>;
};
};
};
@@ -752,6 +784,9 @@
qcom,cpr-panic-reg-name-list =
"PERF_CPRH_STATUS", "APCLUS1_L2_SAW4_PMIC_STS";
+ qcom,cpr-aging-ref-voltage = <1112000>;
+ vdd-supply = <&pmcobalt_s13>;
+
thread@0 {
qcom,cpr-thread-id = <0>;
qcom,cpr-consecutive-up = <0>;
@@ -894,6 +929,13 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-max-voltage-adjustment = <15000>;
+ qcom,cpr-aging-ref-corner = <25>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
+ qcom,allow-aging-open-loop-voltage-adjustment =
+ <1>;
};
};
};
diff --git a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
index 74aae4051ad6..8016a3822a7f 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt-v2.dtsi
@@ -227,9 +227,20 @@
qcom,max-bandwidth-per-pipe-kbps = <4700000>;
};
+&pmcobalt_s10 {
+ regulator-min-microvolt = <568000>;
+ regulator-max-microvolt = <1056000>;
+};
+
+&pmcobalt_s13 {
+ regulator-min-microvolt = <568000>;
+ regulator-max-microvolt = <1056000>;
+};
+
&apc0_cpr {
compatible = "qcom,cprh-msmcobalt-v2-kbss-regulator";
qcom,cpr-corner-switch-delay-time = <1042>;
+ qcom,cpr-aging-ref-voltage = <1056000>;
};
&apc0_pwrcl_vreg {
@@ -371,11 +382,16 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-ref-corner = <22 22>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
};
&apc1_cpr {
compatible = "qcom,cprh-msmcobalt-v2-kbss-regulator";
qcom,cpr-corner-switch-delay-time = <1042>;
+ qcom,cpr-aging-ref-voltage = <1056000>;
};
&apc1_perfcl_vreg {
@@ -527,6 +543,10 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-aging-ref-corner = <30 26>;
+ qcom,cpr-aging-ro-scaling-factor = <2950>;
+ qcom,allow-aging-voltage-adjustment = <0>;
};
&pm8005_s1 {
@@ -649,11 +669,10 @@
qcom,load-freq-tbl =
/* Encoders */
<1105920 533000000 0x55555555>, /* 4kx2304@30 */ /*TURBO*/
- < 979200 444000000 0x55555555>, /* 1080p@120,1440p@60,
+ <1036800 444000000 0x55555555>, /* 720p@240, 1080p@120,1440p@60,
* UHD@30 */ /*NOMINAL*/
- < 939700 355200000 0x55555555>, /* 4kx2304@24 */ /*SVSL1*/
- < 489600 269330000 0x55555555>, /* 1080p@60, 2560x1440@30 */
- /* SVS */
+ < 829440 355200000 0x55555555>, /* UHD/4096x2160@30 SVSL1 */
+ < 489600 269330000 0x55555555>, /* 1080p@60 SVS */
< 432000 200000000 0x55555555>, /* 720p@120, 1080p@30 */
/* SVS2 */
@@ -665,7 +684,7 @@
<1675472 355200000 0xffffffff>, /* 4kx2304@44 */ /*SVSL1*/
<1105920 269330000 0xffffffff>, /* UHD/4k2304@30, 1080p@120 */
/* SVS */
- < 864000 200000000 0xffffffff>; /* 720p@240, 1080p@60 */
+ < 829440 200000000 0xffffffff>; /* 720p@120, 1080p@60 */
/* SVS2 */
qcom,imem-ab-tbl =
@@ -681,11 +700,11 @@
<1728000 1728000 2211840 0x3f00000c>,
/* Encoder */
/* Load > Nominal, Nominal <-> Turbo Eg. 4kx2304@30 */
- <972000 972000 1105920 0x04000004>,
+ <1036800 1036800 1105920 0x04000004>,
/* Load > SVSL1, SVSL1<-> Nominal Eg. 3840x2160@30 */
- <939700 939700 972000 0x04000004>,
+ < 829440 829440 1036800 0x04000004>,
/* Load > SVS , SVS <-> SVSL1 Eg. 4kx2304@24 */
- <489600 489600 939700 0x04000004>;
+ < 489600 489600 829440 0x04000004>;
qcom,dcvs-limit = /* Min Frame size, Min MBs/sec */
<32400 30>, /* Encoder 3840x2160@30 */
@@ -693,6 +712,18 @@
};
+&soc {
+ /* Gold L2 SAW */
+ qcom,spm@178120000 {
+ qcom,saw2-avs-limit = <0x4200420>;
+ };
+
+ /* Silver L2 SAW */
+ qcom,spm@179120000 {
+ qcom,saw2-avs-limit = <0x4200420>;
+ };
+};
+
/* GPU overrides */
&msm_gpu {
/* Updated chip ID */
diff --git a/arch/arm/boot/dts/qcom/msmcobalt.dtsi b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
index aaa0bec51a27..b21e2dcf8c1a 100644
--- a/arch/arm/boot/dts/qcom/msmcobalt.dtsi
+++ b/arch/arm/boot/dts/qcom/msmcobalt.dtsi
@@ -1200,41 +1200,49 @@
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 2>;
qcom,secure-context-bank;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb1 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 8>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb2 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 9>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb3 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 10>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb4 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 11>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb6 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 5>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb7 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 6>;
+ dma-coherent;
};
qcom,msm_fastrpc_compute_cb8 {
compatible = "qcom,msm-fastrpc-compute-cb";
label = "adsprpc-smd";
iommus = <&lpass_q6_smmu 7>;
+ dma-coherent;
};
};
@@ -2889,11 +2897,6 @@
vdd-3.3-ch0-supply = <&pmcobalt_l25_pin_ctrl>;
qcom,vdd-0.8-cx-mx-config = <800000 800000>;
qcom,vdd-3.3-ch0-config = <3104000 3312000>;
- qcom,msm-bus,name = "msm-icnss";
- qcom,msm-bus,num-cases = <2>;
- qcom,msm-bus,num-paths = <1>;
- qcom,msm-bus,vectors-KBps = <81 10065 0 0>,
- <81 10065 0 16000>;
qcom,icnss-vadc = <&pmcobalt_vadc>;
qcom,icnss-adc_tm = <&pmcobalt_adc_tm>;
};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi b/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi
index bdbcd9d7b6f9..e5db2766c553 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon-smp2p.dtsi
@@ -172,4 +172,27 @@
compatible = "qcom,smp2pgpio_test_smp2p_5_out";
gpios = <&smp2pgpio_smp2p_5_out 0 0>;
};
+
+ /* ssr - inbound entry from lpass */
+ smp2pgpio_ssr_smp2p_2_in: qcom,smp2pgpio-ssr-smp2p-2-in {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "slave-kernel";
+ qcom,remote-pid = <2>;
+ qcom,is-inbound;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
+
+ /* ssr - outbound entry to lpass */
+ smp2pgpio_ssr_smp2p_2_out: qcom,smp2pgpio-ssr-smp2p-2-out {
+ compatible = "qcom,smp2pgpio";
+ qcom,entry-name = "master-kernel";
+ qcom,remote-pid = <2>;
+ gpio-controller;
+ #gpio-cells = <2>;
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ };
};
diff --git a/arch/arm/boot/dts/qcom/msmfalcon.dtsi b/arch/arm/boot/dts/qcom/msmfalcon.dtsi
index df04d1a57683..2b2a201db8bc 100644
--- a/arch/arm/boot/dts/qcom/msmfalcon.dtsi
+++ b/arch/arm/boot/dts/qcom/msmfalcon.dtsi
@@ -670,12 +670,44 @@
<0 425 0>; /* CE11 */
qcom,wlan-msa-memory = <0x100000>;
};
+
+ qcom,lpass@15700000 {
+ compatible = "qcom,pil-tz-generic";
+ reg = <0x15700000 0x00100>;
+ interrupts = <0 162 1>;
+
+ vdd_cx-supply = <&pmfalcon_s3b_level>;
+ qcom,proxy-reg-names = "vdd_cx";
+ qcom,vdd_cx-uV-uA = <RPM_SMD_REGULATOR_LEVEL_TURBO 100000>;
+
+ clocks = <&clock_rpmcc CXO_PIL_LPASS_CLK>;
+ clock-names = "xo";
+ qcom,proxy-clock-names = "xo";
+
+ qcom,pas-id = <1>;
+ qcom,proxy-timeout-ms = <10000>;
+ qcom,smem-id = <423>;
+ qcom,sysmon-id = <1>;
+ qcom,ssctl-instance-id = <0x14>;
+ qcom,firmware-name = "adsp";
+ memory-region = <&adsp_fw_mem>;
+
+ /* GPIO inputs from lpass */
+ qcom,gpio-err-fatal = <&smp2pgpio_ssr_smp2p_2_in 0 0>;
+ qcom,gpio-proxy-unvote = <&smp2pgpio_ssr_smp2p_2_in 2 0>;
+ qcom,gpio-err-ready = <&smp2pgpio_ssr_smp2p_2_in 1 0>;
+ qcom,gpio-stop-ack = <&smp2pgpio_ssr_smp2p_2_in 3 0>;
+
+ /* GPIO output to lpass */
+ qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_2_out 0 0>;
+ status = "ok";
+ };
};
#include "msmfalcon-ion.dtsi"
#include "msmfalcon-bus.dtsi"
#include "msmfalcon-regulator.dtsi"
-#include "msm-gdsc-cobalt.dtsi"
+#include "msm-gdsc-falcon.dtsi"
&gdsc_usb30 {
clock-names = "core_clk";
@@ -699,6 +731,14 @@
status = "ok";
};
+&gdsc_hlos1_vote_turing_adsp {
+ status = "ok";
+};
+
+&gdsc_hlos2_vote_turing_adsp {
+ status = "ok";
+};
+
&gdsc_venus {
status = "ok";
};
@@ -749,5 +789,6 @@
&gdsc_gpu_cx {
status = "ok";
};
+
#include "msm-pmfalcon.dtsi"
#include "msm-pm2falcon.dtsi"
diff --git a/arch/arm/boot/dts/qcom/msmtriton.dtsi b/arch/arm/boot/dts/qcom/msmtriton.dtsi
index 71b4da9bb2d0..09bb5f081602 100644
--- a/arch/arm/boot/dts/qcom/msmtriton.dtsi
+++ b/arch/arm/boot/dts/qcom/msmtriton.dtsi
@@ -16,6 +16,7 @@
#include <dt-bindings/clock/qcom,mmcc-msmfalcon.h>
#include <dt-bindings/clock/qcom,rpmcc.h>
#include <dt-bindings/interrupt-controller/arm-gic.h>
+#include <dt-bindings/regulator/qcom,rpm-smd-regulator.h>
/ {
model = "Qualcomm Technologies, Inc. MSMTRITON";
@@ -543,3 +544,78 @@
};
#include "msmtriton-ion.dtsi"
+#include "msmfalcon-regulator.dtsi"
+#include "msm-gdsc-falcon.dtsi"
+
+&gdsc_usb30 {
+ clock-names = "core_clk";
+ clocks = <&clock_gcc GCC_USB30_MASTER_CLK>;
+ status = "ok";
+};
+
+&gdsc_ufs {
+ status = "ok";
+};
+
+&gdsc_bimc_smmu {
+ clock-names = "bus_clk";
+ clocks = <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>;
+ proxy-supply = <&gdsc_bimc_smmu>;
+ qcom,proxy-consumer-enable;
+ status = "ok";
+};
+
+&gdsc_hlos1_vote_lpass_adsp {
+ status = "ok";
+};
+
+&gdsc_venus {
+ status = "ok";
+};
+
+&gdsc_venus_core0 {
+ qcom,support-hw-trigger;
+ status = "ok";
+};
+
+&gdsc_camss_top {
+ status = "ok";
+};
+
+&gdsc_vfe0 {
+ parent-supply = <&gdsc_camss_top>;
+ status = "ok";
+};
+
+&gdsc_vfe1 {
+ parent-supply = <&gdsc_camss_top>;
+ status = "ok";
+};
+
+&gdsc_cpp {
+ parent-supply = <&gdsc_camss_top>;
+ status = "ok";
+};
+
+&gdsc_mdss {
+ clock-names = "bus_clk", "rot_clk";
+ clocks = <&clock_mmss MMSS_MDSS_AXI_CLK>,
+ <&clock_mmss MMSS_MDSS_ROT_CLK>;
+ proxy-supply = <&gdsc_mdss>;
+ qcom,proxy-consumer-enable;
+ status = "ok";
+};
+
+&gdsc_gpu_gx {
+ clock-names = "bimc_core_clk", "core_clk", "core_root_clk";
+ clocks = <&clock_gcc GCC_GPU_BIMC_GFX_CLK>,
+ <&clock_gfx GPUCC_GFX3D_CLK>,
+ <&clock_gfx GFX3D_CLK_SRC>;
+ qcom,force-enable-root-clk;
+ parent-supply = <&gfx_vreg_corner>;
+ status = "ok";
+};
+
+&gdsc_gpu_cx {
+ status = "ok";
+};
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 036d6aa5c062..799e43f09a11 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -13,6 +13,7 @@ CONFIG_LOG_CPU_MAX_BUF_SHIFT=17
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_SCHED_HMP=y
CONFIG_SCHED_HMP_CSTATE_AWARE=y
@@ -21,6 +22,7 @@ CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
CONFIG_SCHED_AUTOGROUP=y
+CONFIG_SCHED_TUNE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
@@ -435,6 +437,7 @@ CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_GSI=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index 77f0129776a3..dfd658d815fe 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -13,6 +13,7 @@ CONFIG_CGROUP_DEBUG=y
CONFIG_CGROUP_FREEZER=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
+CONFIG_CGROUP_SCHEDTUNE=y
CONFIG_CGROUP_SCHED=y
CONFIG_RT_GROUP_SCHED=y
CONFIG_SCHED_HMP=y
@@ -21,6 +22,7 @@ CONFIG_SCHED_CORE_CTL=y
CONFIG_NAMESPACES=y
# CONFIG_UTS_NS is not set
# CONFIG_PID_NS is not set
+CONFIG_SCHED_TUNE=y
CONFIG_BLK_DEV_INITRD=y
# CONFIG_RD_XZ is not set
# CONFIG_RD_LZO is not set
@@ -437,6 +439,7 @@ CONFIG_USB_CONFIGFS_F_MTP=y
CONFIG_USB_CONFIGFS_F_PTP=y
CONFIG_USB_CONFIGFS_F_ACC=y
CONFIG_USB_CONFIGFS_UEVENT=y
+CONFIG_USB_CONFIGFS_F_MIDI=y
CONFIG_USB_CONFIGFS_F_HID=y
CONFIG_USB_CONFIGFS_F_DIAG=y
CONFIG_USB_CONFIGFS_F_GSI=y
diff --git a/arch/arm64/configs/msmfalcon_defconfig b/arch/arm64/configs/msmfalcon_defconfig
index 34f0da3c37a4..348c34a94119 100644
--- a/arch/arm64/configs/msmfalcon_defconfig
+++ b/arch/arm64/configs/msmfalcon_defconfig
@@ -519,6 +519,7 @@ CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_IRQ_HELPER=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
CONFIG_ICNSS=y
+CONFIG_ICNSS_DEBUG=y
CONFIG_MSM_GLADIATOR_ERP_V2=y
CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y
CONFIG_MSM_GLADIATOR_HANG_DETECT=y
diff --git a/drivers/char/adsprpc.c b/drivers/char/adsprpc.c
index 13116f010e89..67c1207d35be 100644
--- a/drivers/char/adsprpc.c
+++ b/drivers/char/adsprpc.c
@@ -163,6 +163,7 @@ struct fastrpc_smmu {
int enabled;
int faults;
int secure;
+ int coherent;
};
struct fastrpc_session_ctx {
@@ -1129,6 +1130,8 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
for (oix = 0; oix < inbufs + outbufs; ++oix) {
int i = ctx->overps[oix]->raix;
struct fastrpc_mmap *map = ctx->maps[i];
+ if (ctx->fl->sctx->smmu.coherent)
+ continue;
if (map && map->uncached)
continue;
if (rpra[i].buf.len && ctx->overps[oix]->mstart)
@@ -1141,7 +1144,8 @@ static int get_args(uint32_t kernel, struct smq_invoke_ctx *ctx)
rpra[inh + i].buf.len = ctx->lpra[inh + i].buf.len;
rpra[inh + i].h = ctx->lpra[inh + i].h;
}
- dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
+ if (!ctx->fl->sctx->smmu.coherent)
+ dmac_flush_range((char *)rpra, (char *)rpra + ctx->used);
bail:
return err;
}
@@ -1372,13 +1376,15 @@ static int fastrpc_internal_invoke(struct fastrpc_file *fl, uint32_t mode,
goto bail;
}
- inv_args_pre(ctx);
- if (FASTRPC_MODE_SERIAL == mode)
- inv_args(ctx);
+ if (!fl->sctx->smmu.coherent) {
+ inv_args_pre(ctx);
+ if (mode == FASTRPC_MODE_SERIAL)
+ inv_args(ctx);
+ }
VERIFY(err, 0 == fastrpc_invoke_send(ctx, kernel, invoke->handle));
if (err)
goto bail;
- if (FASTRPC_MODE_PARALLEL == mode)
+ if (mode == FASTRPC_MODE_PARALLEL && !fl->sctx->smmu.coherent)
inv_args(ctx);
wait:
if (kernel)
@@ -2275,7 +2281,6 @@ static int fastrpc_cb_probe(struct device *dev)
const char *name;
unsigned int start = 0x80000000;
int err = 0, i;
- int disable_htw = 1;
int secure_vmid = VMID_CP_PIXEL;
VERIFY(err, 0 != (name = of_get_property(dev->of_node, "label", NULL)));
@@ -2302,6 +2307,8 @@ static int fastrpc_cb_probe(struct device *dev)
sess = &chan->session[chan->sesscount];
sess->smmu.cb = iommuspec.args[0];
sess->used = 0;
+ sess->smmu.coherent = of_property_read_bool(dev->of_node,
+ "dma-coherent");
sess->smmu.secure = of_property_read_bool(dev->of_node,
"qcom,secure-context-bank");
if (sess->smmu.secure)
@@ -2311,9 +2318,6 @@ static int fastrpc_cb_probe(struct device *dev)
start, 0x7fffffff)));
if (err)
goto bail;
- iommu_domain_set_attr(sess->smmu.mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
iommu_set_fault_handler(sess->smmu.mapping->domain,
fastrpc_smmu_fault_handler, sess);
if (sess->smmu.secure)
@@ -2341,7 +2345,6 @@ static int fastrpc_cb_legacy_probe(struct device *dev)
unsigned int *range = 0, range_size = 0;
unsigned int *sids = 0, sids_size = 0;
int err = 0, ret = 0, i;
- int disable_htw = 1;
VERIFY(err, 0 != (domains_child_node = of_get_child_by_name(
dev->of_node,
@@ -2395,9 +2398,6 @@ static int fastrpc_cb_legacy_probe(struct device *dev)
range[0], range[1])));
if (err)
goto bail;
- iommu_domain_set_attr(first_sess->smmu.mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
VERIFY(err, !arm_iommu_attach_device(first_sess->dev,
first_sess->smmu.mapping));
if (err)
diff --git a/drivers/clk/msm/clock-gcc-cobalt.c b/drivers/clk/msm/clock-gcc-cobalt.c
index 05272118af16..46e791b3cb99 100644
--- a/drivers/clk/msm/clock-gcc-cobalt.c
+++ b/drivers/clk/msm/clock-gcc-cobalt.c
@@ -2374,7 +2374,7 @@ static struct mux_clk gcc_debug_mux = {
{ &debug_cpu_clk.c, 0x00c0 },
{ &snoc_clk.c, 0x0000 },
{ &cnoc_clk.c, 0x000e },
- { &bimc_clk.c, 0x00a9 },
+ { &bimc_clk.c, 0x014e },
{ &gcc_mmss_sys_noc_axi_clk.c, 0x001f },
{ &gcc_mmss_noc_cfg_ahb_clk.c, 0x0020 },
{ &gcc_usb30_master_clk.c, 0x003e },
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index d6cdbbc78827..d29fd60719c9 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -202,6 +202,8 @@ enum clk_osm_trace_packet_id {
#define TRACE_CTRL_EN_MASK BIT(0)
#define TRACE_CTRL_ENABLE 1
#define TRACE_CTRL_DISABLE 0
+#define TRACE_CTRL_ENABLE_WDOG_STATUS BIT(30)
+#define TRACE_CTRL_ENABLE_WDOG_STATUS_MASK BIT(30)
#define TRACE_CTRL_PACKET_TYPE_MASK BVAL(2, 1, 3)
#define TRACE_CTRL_PACKET_TYPE_SHIFT 1
#define TRACE_CTRL_PERIODIC_TRACE_EN_MASK BIT(3)
@@ -221,6 +223,11 @@ enum clk_osm_trace_packet_id {
#define PERFCL_EFUSE_SHIFT 29
#define PERFCL_EFUSE_MASK 0x7
+#define MSMCOBALTV1_PWRCL_BOOT_RATE 1478400000
+#define MSMCOBALTV1_PERFCL_BOOT_RATE 1536000000
+#define MSMCOBALTV2_PWRCL_BOOT_RATE 1555200000
+#define MSMCOBALTV2_PERFCL_BOOT_RATE 1728000000
+
static void __iomem *virt_base;
static void __iomem *debug_base;
@@ -2687,6 +2694,18 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
return rc;
}
+ if (msmcobalt_v2) {
+ /* Enable OSM WDOG registers */
+ clk_osm_masked_write_reg(&pwrcl_clk,
+ TRACE_CTRL_ENABLE_WDOG_STATUS,
+ TRACE_CTRL,
+ TRACE_CTRL_ENABLE_WDOG_STATUS_MASK);
+ clk_osm_masked_write_reg(&perfcl_clk,
+ TRACE_CTRL_ENABLE_WDOG_STATUS,
+ TRACE_CTRL,
+ TRACE_CTRL_ENABLE_WDOG_STATUS_MASK);
+ }
+
/*
* The hmss_gpll0 clock runs at 300 MHz. Ensure it is at the correct
* frequency before enabling OSM. LUT index 0 is always sourced from
@@ -2700,18 +2719,22 @@ static int cpu_clock_osm_driver_probe(struct platform_device *pdev)
}
clk_prepare_enable(&sys_apcsaux_clk_gcc.c);
- /* Set 300MHz index */
- rc = clk_set_rate(&pwrcl_clk.c, init_rate);
+ /* Set boot rate */
+ rc = clk_set_rate(&pwrcl_clk.c, msmcobalt_v1 ?
+ MSMCOBALTV1_PWRCL_BOOT_RATE :
+ MSMCOBALTV2_PWRCL_BOOT_RATE);
if (rc) {
- dev_err(&pdev->dev, "Unable to set init rate on pwr cluster, rc=%d\n",
+ dev_err(&pdev->dev, "Unable to set boot rate on pwr cluster, rc=%d\n",
rc);
clk_disable_unprepare(&sys_apcsaux_clk_gcc.c);
return rc;
}
- rc = clk_set_rate(&perfcl_clk.c, init_rate);
+ rc = clk_set_rate(&perfcl_clk.c, msmcobalt_v1 ?
+ MSMCOBALTV1_PERFCL_BOOT_RATE :
+ MSMCOBALTV2_PERFCL_BOOT_RATE);
if (rc) {
- dev_err(&pdev->dev, "Unable to set init rate on perf cluster, rc=%d\n",
+ dev_err(&pdev->dev, "Unable to set boot rate on perf cluster, rc=%d\n",
rc);
clk_disable_unprepare(&sys_apcsaux_clk_gcc.c);
return rc;
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
index 9a080e4ee39b..a574a9cd2b5a 100644
--- a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
+++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt-util.c
@@ -18,6 +18,7 @@
#include <linux/iopoll.h>
#include <linux/delay.h>
#include <linux/clk/msm-clock-generic.h>
+#include <linux/usb/usbpd.h>
#include "mdss-pll.h"
#include "mdss-dp-pll.h"
@@ -172,9 +173,27 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
{
u32 res = 0;
struct mdss_pll_resources *dp_res = vco->priv;
+ u8 orientation, ln_cnt;
+ u32 spare_value;
+
+ spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+ ln_cnt = spare_value & 0x0F;
+ orientation = (spare_value & 0xF0) >> 4;
+ pr_debug("%s: spare_value=0x%x, ln_cnt=0x%x, orientation=0x%x\n",
+ __func__, spare_value, ln_cnt, orientation);
+
+ if (ln_cnt != 4) {
+ if (orientation == ORIENTATION_CC2)
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_PD_CTL, 0x2d);
+ else
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_PD_CTL, 0x35);
+ } else {
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_PD_CTL, 0x3d);
+ }
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_PD_CTL, 0x3d);
/* Make sure the PHY register writes are done */
wmb();
MDSS_PLL_REG_W(dp_res->pll_base,
@@ -314,8 +333,13 @@ int dp_config_vco_rate(struct dp_pll_vco_clk *vco, unsigned long rate)
/* Make sure the PLL register writes are done */
wmb();
- MDSS_PLL_REG_W(dp_res->phy_base,
- DP_PHY_MODE, 0x58);
+ if (orientation == ORIENTATION_CC2)
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_MODE, 0x48);
+ else
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ DP_PHY_MODE, 0x58);
+
MDSS_PLL_REG_W(dp_res->phy_base,
DP_PHY_TX0_TX1_LANE_CTL, 0x05);
MDSS_PLL_REG_W(dp_res->phy_base,
@@ -427,6 +451,12 @@ static int dp_pll_enable(struct clk *c)
u32 status;
struct dp_pll_vco_clk *vco = mdss_dp_to_vco_clk(c);
struct mdss_pll_resources *dp_res = vco->priv;
+ u8 orientation, ln_cnt;
+ u32 spare_value, bias_en, drvr_en;
+
+ spare_value = MDSS_PLL_REG_R(dp_res->phy_base, DP_PHY_SPARE0);
+ ln_cnt = spare_value & 0x0F;
+ orientation = (spare_value & 0xF0) >> 4;
MDSS_PLL_REG_W(dp_res->phy_base,
DP_PHY_CFG, 0x01);
@@ -474,18 +504,45 @@ static int dp_pll_enable(struct clk *c)
pr_debug("%s: PLL is locked\n", __func__);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ if (ln_cnt == 1) {
+ bias_en = 0x3e;
+ drvr_en = 0x13;
+ } else {
+ bias_en = 0x3f;
+ drvr_en = 0x10;
+ }
+
+ if (ln_cnt != 4) {
+ if (orientation == ORIENTATION_CC1) {
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x3f);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
- 0x10);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ drvr_en);
+ } else {
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
- 0x3f);
- MDSS_PLL_REG_W(dp_res->phy_base,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
+ drvr_en);
+ }
+ } else {
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX0_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_HIGHZ_DRVR_EN,
- 0x10);
+ drvr_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_TRANSCEIVER_BIAS_EN,
+ bias_en);
+ MDSS_PLL_REG_W(dp_res->phy_base,
+ QSERDES_TX1_OFFSET + TXn_HIGHZ_DRVR_EN,
+ drvr_en);
+ }
+
MDSS_PLL_REG_W(dp_res->phy_base,
QSERDES_TX0_OFFSET + TXn_TX_POL_INV,
0x0a);
@@ -615,7 +672,7 @@ int dp_vco_prepare(struct clk *c)
rc = dp_pll_enable(c);
if (rc) {
mdss_pll_resource_enable(dp_pll_res, false);
- pr_err("ndx=%d failed to enable dsi pll\n",
+ pr_err("ndx=%d failed to enable dp pll\n",
dp_pll_res->index);
goto error;
}
diff --git a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
index d89545b38e64..28f21ed1fe0d 100644
--- a/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
+++ b/drivers/clk/msm/mdss/mdss-dp-pll-cobalt.h
@@ -41,6 +41,7 @@
#define DP_PHY_TX0_TX1_LANE_CTL 0x0068
#define DP_PHY_TX2_TX3_LANE_CTL 0x0084
+#define DP_PHY_SPARE0 0x00A8
#define DP_PHY_STATUS 0x00BC
/* Tx registers */
diff --git a/drivers/clk/qcom/gcc-msmfalcon.c b/drivers/clk/qcom/gcc-msmfalcon.c
index d353cc9ade73..78858a3c6f1c 100644
--- a/drivers/clk/qcom/gcc-msmfalcon.c
+++ b/drivers/clk/qcom/gcc-msmfalcon.c
@@ -2527,6 +2527,32 @@ static struct clk_branch hlos1_vote_lpass_adsp_smmu_clk = {
},
};
+static struct clk_branch hlos1_vote_turing_adsp_smmu_clk = {
+ .halt_reg = 0x7d048,
+ .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .clkr = {
+ .enable_reg = 0x7d048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "hlos1_vote_turing_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
+static struct clk_branch hlos2_vote_turing_adsp_smmu_clk = {
+ .halt_reg = 0x7e048,
+ .halt_check = BRANCH_HALT_NO_CHECK_ON_DISABLE,
+ .clkr = {
+ .enable_reg = 0x7e048,
+ .enable_mask = BIT(0),
+ .hw.init = &(struct clk_init_data){
+ .name = "hlos2_vote_turing_adsp_smmu_clk",
+ .ops = &clk_branch2_ops,
+ },
+ },
+};
+
static struct clk_fixed_factor gcc_ce1_ahb_m_clk = {
.hw.init = &(struct clk_init_data){
.name = "gcc_ce1_ahb_m_clk",
@@ -2683,6 +2709,10 @@ static struct clk_regmap *gcc_falcon_clocks[] = {
[GCC_UFS_ICE_CORE_HW_CTL_CLK] = &gcc_ufs_ice_core_hw_ctl_clk.clkr,
[GCC_UFS_PHY_AUX_HW_CTL_CLK] = &gcc_ufs_phy_aux_hw_ctl_clk.clkr,
[GCC_UFS_UNIPRO_CORE_HW_CTL_CLK] = &gcc_ufs_unipro_core_hw_ctl_clk.clkr,
+ [HLOS1_VOTE_TURING_ADSP_SMMU_CLK] =
+ &hlos1_vote_turing_adsp_smmu_clk.clkr,
+ [HLOS2_VOTE_TURING_ADSP_SMMU_CLK] =
+ &hlos2_vote_turing_adsp_smmu_clk.clkr,
};
static const struct qcom_reset_map gcc_falcon_resets[] = {
diff --git a/drivers/cpuidle/lpm-levels.c b/drivers/cpuidle/lpm-levels.c
index ced95aa2b649..37e504381313 100644
--- a/drivers/cpuidle/lpm-levels.c
+++ b/drivers/cpuidle/lpm-levels.c
@@ -85,8 +85,6 @@ struct lpm_debug {
struct lpm_cluster *lpm_root_node;
-#define MAXSAMPLES 5
-
static bool lpm_prediction;
module_param_named(lpm_prediction,
lpm_prediction, bool, S_IRUGO | S_IWUSR | S_IWGRP);
@@ -108,6 +106,7 @@ struct lpm_history {
uint32_t hptr;
uint32_t hinvalid;
uint32_t htmr_wkup;
+ int64_t stime;
};
static DEFINE_PER_CPU(struct lpm_history, hist);
@@ -359,9 +358,6 @@ static enum hrtimer_restart lpm_hrtimer_cb(struct hrtimer *h)
static void histtimer_cancel(void)
{
- if (!lpm_prediction)
- return;
-
hrtimer_try_to_cancel(&histtimer);
}
@@ -383,6 +379,51 @@ static void histtimer_start(uint32_t time_us)
hrtimer_start(&histtimer, hist_ktime, HRTIMER_MODE_REL_PINNED);
}
+static void cluster_timer_init(struct lpm_cluster *cluster)
+{
+ struct list_head *list;
+
+ if (!cluster)
+ return;
+
+ hrtimer_init(&cluster->histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+
+ list_for_each(list, &cluster->child) {
+ struct lpm_cluster *n;
+
+ n = list_entry(list, typeof(*n), list);
+ cluster_timer_init(n);
+ }
+}
+
+static void clusttimer_cancel(void)
+{
+ int cpu = raw_smp_processor_id();
+ struct lpm_cluster *cluster = per_cpu(cpu_cluster, cpu);
+
+ hrtimer_try_to_cancel(&cluster->histtimer);
+ hrtimer_try_to_cancel(&cluster->parent->histtimer);
+}
+
+static enum hrtimer_restart clusttimer_fn(struct hrtimer *h)
+{
+ struct lpm_cluster *cluster = container_of(h,
+ struct lpm_cluster, histtimer);
+
+ cluster->history.hinvalid = 1;
+ return HRTIMER_NORESTART;
+}
+
+static void clusttimer_start(struct lpm_cluster *cluster, uint32_t time_us)
+{
+ uint64_t time_ns = time_us * NSEC_PER_USEC;
+ ktime_t clust_ktime = ns_to_ktime(time_ns);
+
+ cluster->histtimer.function = clusttimer_fn;
+ hrtimer_start(&cluster->histtimer, clust_ktime,
+ HRTIMER_MODE_REL_PINNED);
+}
+
static void msm_pm_set_timer(uint32_t modified_time_us)
{
u64 modified_time_ns = modified_time_us * NSEC_PER_USEC;
@@ -492,14 +533,17 @@ static uint64_t lpm_cpuidle_predict(struct cpuidle_device *dev,
if (history->hinvalid) {
history->hinvalid = 0;
history->htmr_wkup = 1;
+ history->stime = 0;
return 0;
}
/*
* Predict only when all the samples are collected.
*/
- if (history->nsamp < MAXSAMPLES)
+ if (history->nsamp < MAXSAMPLES) {
+ history->stime = 0;
return 0;
+ }
/*
* Check if the samples are not much deviated, if so use the
@@ -540,6 +584,7 @@ again:
*/
if (((avg > stddev * 6) && (divisor >= (MAXSAMPLES - 1)))
|| stddev <= ref_stddev) {
+ history->stime = ktime_to_us(ktime_get()) + avg;
return avg;
} else if (divisor > (MAXSAMPLES - 1)) {
thresh = max - 1;
@@ -567,6 +612,8 @@ again:
*idx_restrict = j;
do_div(total, failed);
*idx_restrict_time = total;
+ history->stime = ktime_to_us(ktime_get())
+ + *idx_restrict_time;
break;
}
}
@@ -584,6 +631,7 @@ static inline void invalidate_predict_history(struct cpuidle_device *dev)
if (history->hinvalid) {
history->hinvalid = 0;
history->htmr_wkup = 1;
+ history->stime = 0;
}
}
@@ -603,6 +651,7 @@ static void clear_predict_history(void)
history->mode[i] = -1;
history->hptr = 0;
history->nsamp = 0;
+ history->stime = 0;
}
}
}
@@ -724,12 +773,14 @@ static int cpu_power_select(struct cpuidle_device *dev,
}
static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
- struct cpumask *mask, bool from_idle)
+ struct cpumask *mask, bool from_idle, uint32_t *pred_time)
{
int cpu;
int next_cpu = raw_smp_processor_id();
ktime_t next_event;
struct cpumask online_cpus_in_cluster;
+ struct lpm_history *history;
+ int64_t prediction = LONG_MAX;
next_event.tv64 = KTIME_MAX;
if (!suspend_wake_time)
@@ -754,11 +805,21 @@ static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
next_event.tv64 = next_event_c->tv64;
next_cpu = cpu;
}
+
+ if (from_idle && lpm_prediction) {
+ history = &per_cpu(hist, cpu);
+ if (history->stime && (history->stime < prediction))
+ prediction = history->stime;
+ }
}
if (mask)
cpumask_copy(mask, cpumask_of(next_cpu));
+ if (from_idle && lpm_prediction) {
+ if (prediction > ktime_to_us(ktime_get()))
+ *pred_time = prediction - ktime_to_us(ktime_get());
+ }
if (ktime_to_us(next_event) > ktime_to_us(ktime_get()))
return ktime_to_us(ktime_sub(next_event, ktime_get()));
@@ -766,18 +827,193 @@ static uint64_t get_cluster_sleep_time(struct lpm_cluster *cluster,
return 0;
}
-static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
+static int cluster_predict(struct lpm_cluster *cluster,
+ uint32_t *pred_us)
+{
+ int i, j;
+ int ret = 0;
+ struct cluster_history *history = &cluster->history;
+ int64_t cur_time = ktime_to_us(ktime_get());
+
+ if (!lpm_prediction)
+ return 0;
+
+ if (history->hinvalid) {
+ history->hinvalid = 0;
+ history->htmr_wkup = 1;
+ history->flag = 0;
+ return ret;
+ }
+
+ if (history->nsamp == MAXSAMPLES) {
+ for (i = 0; i < MAXSAMPLES; i++) {
+ if ((cur_time - history->stime[i])
+ > CLUST_SMPL_INVLD_TIME)
+ history->nsamp--;
+ }
+ }
+
+ if (history->nsamp < MAXSAMPLES) {
+ history->flag = 0;
+ return ret;
+ }
+
+ if (history->flag == 2)
+ history->flag = 0;
+
+ if (history->htmr_wkup != 1) {
+ uint64_t total = 0;
+
+ if (history->flag == 1) {
+ for (i = 0; i < MAXSAMPLES; i++)
+ total += history->resi[i];
+ do_div(total, MAXSAMPLES);
+ *pred_us = total;
+ return 2;
+ }
+
+ for (j = 1; j < cluster->nlevels; j++) {
+ uint32_t failed = 0;
+
+ total = 0;
+ for (i = 0; i < MAXSAMPLES; i++) {
+ if ((history->mode[i] == j) && (history->resi[i]
+ < cluster->levels[j].pwr.min_residency)) {
+ failed++;
+ total += history->resi[i];
+ }
+ }
+
+ if (failed > (MAXSAMPLES-2)) {
+ do_div(total, failed);
+ *pred_us = total;
+ history->flag = 1;
+ return 1;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static void update_cluster_history_time(struct cluster_history *history,
+ int idx, uint64_t start)
+{
+ history->entry_idx = idx;
+ history->entry_time = start;
+}
+
+static void update_cluster_history(struct cluster_history *history, int idx)
+{
+ uint32_t tmr = 0;
+ uint32_t residency = 0;
+ struct lpm_cluster *cluster =
+ container_of(history, struct lpm_cluster, history);
+
+ if (!lpm_prediction)
+ return;
+
+ if ((history->entry_idx == -1) || (history->entry_idx == idx)) {
+ residency = ktime_to_us(ktime_get()) - history->entry_time;
+ history->stime[history->hptr] = history->entry_time;
+ } else
+ return;
+
+ if (history->htmr_wkup) {
+ if (!history->hptr)
+ history->hptr = MAXSAMPLES-1;
+ else
+ history->hptr--;
+
+ history->resi[history->hptr] += residency;
+
+ history->htmr_wkup = 0;
+ tmr = 1;
+ } else {
+ history->resi[history->hptr] = residency;
+ }
+
+ history->mode[history->hptr] = idx;
+
+ history->entry_idx = INT_MIN;
+ history->entry_time = 0;
+
+ if (history->nsamp < MAXSAMPLES)
+ history->nsamp++;
+
+ trace_cluster_pred_hist(cluster->cluster_name,
+ history->mode[history->hptr], history->resi[history->hptr],
+ history->hptr, tmr);
+
+ (history->hptr)++;
+
+ if (history->hptr >= MAXSAMPLES)
+ history->hptr = 0;
+}
+
+static void clear_cl_history_each(struct cluster_history *history)
+{
+ int i;
+
+ for (i = 0; i < MAXSAMPLES; i++) {
+ history->resi[i] = 0;
+ history->mode[i] = -1;
+ history->stime[i] = 0;
+ }
+ history->hptr = 0;
+ history->nsamp = 0;
+ history->flag = 0;
+ history->hinvalid = 0;
+ history->htmr_wkup = 0;
+}
+
+static void clear_cl_predict_history(void)
+{
+ struct lpm_cluster *cluster = lpm_root_node;
+ struct list_head *list;
+
+ if (!lpm_prediction)
+ return;
+
+ clear_cl_history_each(&cluster->history);
+
+ list_for_each(list, &cluster->child) {
+ struct lpm_cluster *n;
+
+ n = list_entry(list, typeof(*n), list);
+ clear_cl_history_each(&n->history);
+ }
+}
+
+static int cluster_select(struct lpm_cluster *cluster, bool from_idle,
+ int *ispred)
{
int best_level = -1;
int i;
struct cpumask mask;
uint32_t latency_us = ~0U;
uint32_t sleep_us;
+ uint32_t cpupred_us = 0, pred_us = 0;
+ int pred_mode = 0, predicted = 0;
if (!cluster)
return -EINVAL;
- sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL, from_idle);
+ sleep_us = (uint32_t)get_cluster_sleep_time(cluster, NULL,
+ from_idle, &cpupred_us);
+
+ if (from_idle) {
+ pred_mode = cluster_predict(cluster, &pred_us);
+
+ if (cpupred_us && pred_mode && (cpupred_us < pred_us))
+ pred_us = cpupred_us;
+
+ if (pred_us && pred_mode && (pred_us < sleep_us))
+ predicted = 1;
+
+ if (predicted && (pred_us == cpupred_us))
+ predicted = 2;
+ }
if (cpumask_and(&mask, cpu_online_mask, &cluster->child_cpus))
latency_us = pm_qos_request_for_cpumask(PM_QOS_CPU_DMA_LATENCY,
@@ -823,10 +1059,19 @@ static int cluster_select(struct lpm_cluster *cluster, bool from_idle)
best_level = i;
- if (sleep_us <= pwr_params->max_residency)
+ if (predicted ? (pred_us <= pwr_params->max_residency)
+ : (sleep_us <= pwr_params->max_residency))
break;
}
+ if ((best_level == (cluster->nlevels - 1)) && (pred_mode == 2))
+ cluster->history.flag = 2;
+
+ *ispred = predicted;
+
+ trace_cluster_pred_select(cluster->cluster_name, best_level, sleep_us,
+ latency_us, predicted, pred_us);
+
return best_level;
}
@@ -840,7 +1085,7 @@ static void cluster_notify(struct lpm_cluster *cluster,
}
static int cluster_configure(struct lpm_cluster *cluster, int idx,
- bool from_idle)
+ bool from_idle, int predicted)
{
struct lpm_cluster_level *level = &cluster->levels[idx];
int ret, i;
@@ -858,6 +1103,10 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
cluster->num_children_in_sync.bits[0],
cluster->child_cpus.bits[0], from_idle);
lpm_stats_cluster_enter(cluster->stats, idx);
+
+ if (from_idle && lpm_prediction)
+ update_cluster_history_time(&cluster->history, idx,
+ ktime_to_us(ktime_get()));
}
for (i = 0; i < cluster->ndevices; i++) {
@@ -869,8 +1118,10 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
if (level->notify_rpm) {
struct cpumask nextcpu, *cpumask;
uint64_t us;
+ uint32_t pred_us;
- us = get_cluster_sleep_time(cluster, &nextcpu, from_idle);
+ us = get_cluster_sleep_time(cluster, &nextcpu,
+ from_idle, &pred_us);
cpumask = level->disable_dynamic_routing ? NULL : &nextcpu;
ret = msm_rpm_enter_sleep(0, cpumask);
@@ -881,6 +1132,8 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
us = us + 1;
clear_predict_history();
+ clear_cl_predict_history();
+
do_div(us, USEC_PER_SEC/SCLK_HZ);
msm_mpm_enter_sleep(us, from_idle, cpumask);
}
@@ -891,6 +1144,15 @@ static int cluster_configure(struct lpm_cluster *cluster, int idx,
sched_set_cluster_dstate(&cluster->child_cpus, idx, 0, 0);
cluster->last_level = idx;
+
+ if (predicted && (idx < (cluster->nlevels - 1))) {
+ struct power_params *pwr_params = &cluster->levels[idx].pwr;
+
+ tick_broadcast_exit();
+ clusttimer_start(cluster, pwr_params->max_residency + tmr_add);
+ tick_broadcast_enter();
+ }
+
return 0;
failed_set_mode:
@@ -909,6 +1171,7 @@ static void cluster_prepare(struct lpm_cluster *cluster,
int64_t start_time)
{
int i;
+ int predicted = 0;
if (!cluster)
return;
@@ -939,12 +1202,28 @@ static void cluster_prepare(struct lpm_cluster *cluster,
&cluster->child_cpus))
goto failed;
- i = cluster_select(cluster, from_idle);
+ i = cluster_select(cluster, from_idle, &predicted);
+
+ if (((i < 0) || (i == cluster->default_level))
+ && predicted && from_idle) {
+ update_cluster_history_time(&cluster->history,
+ -1, ktime_to_us(ktime_get()));
+
+ if (i < 0) {
+ struct power_params *pwr_params =
+ &cluster->levels[0].pwr;
+
+ tick_broadcast_exit();
+ clusttimer_start(cluster,
+ pwr_params->max_residency + tmr_add);
+ tick_broadcast_enter();
+ }
+ }
if (i < 0)
goto failed;
- if (cluster_configure(cluster, i, from_idle))
+ if (cluster_configure(cluster, i, from_idle, predicted))
goto failed;
cluster->stats->sleep_time = start_time;
@@ -988,6 +1267,10 @@ static void cluster_unprepare(struct lpm_cluster *cluster,
&lvl->num_cpu_votes, cpu);
}
+ if (from_idle && first_cpu &&
+ (cluster->last_level == cluster->default_level))
+ update_cluster_history(&cluster->history, cluster->last_level);
+
if (!first_cpu || cluster->last_level == cluster->default_level)
goto unlock_return;
@@ -1029,6 +1312,10 @@ static void cluster_unprepare(struct lpm_cluster *cluster,
sched_set_cluster_dstate(&cluster->child_cpus, 0, 0, 0);
cluster_notify(cluster, &cluster->levels[last_level], false);
+
+ if (from_idle)
+ update_cluster_history(&cluster->history, last_level);
+
cluster_unprepare(cluster->parent, &cluster->child_cpus,
last_level, from_idle, end_time);
unlock_return:
@@ -1288,7 +1575,10 @@ exit:
update_history(dev, idx);
trace_cpu_idle_exit(idx, success);
local_irq_enable();
- histtimer_cancel();
+ if (lpm_prediction) {
+ histtimer_cancel();
+ clusttimer_cancel();
+ }
return idx;
}
@@ -1561,6 +1851,7 @@ static int lpm_probe(struct platform_device *pdev)
suspend_set_ops(&lpm_suspend_ops);
hrtimer_init(&lpm_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
hrtimer_init(&histtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ cluster_timer_init(lpm_root_node);
ret = remote_spin_lock_init(&scm_handoff_lock, SCM_HANDOFF_LOCK_ID);
if (ret) {
diff --git a/drivers/cpuidle/lpm-levels.h b/drivers/cpuidle/lpm-levels.h
index 63fe0a0fbc08..3c9665ea8981 100644
--- a/drivers/cpuidle/lpm-levels.h
+++ b/drivers/cpuidle/lpm-levels.h
@@ -14,6 +14,8 @@
#include <soc/qcom/spm.h>
#define NR_LPM_LEVELS 8
+#define MAXSAMPLES 5
+#define CLUST_SMPL_INVLD_TIME 40000
extern bool use_psci;
@@ -85,6 +87,19 @@ struct low_power_ops {
enum msm_pm_l2_scm_flag tz_flag;
};
+struct cluster_history {
+ uint32_t resi[MAXSAMPLES];
+ int mode[MAXSAMPLES];
+ int64_t stime[MAXSAMPLES];
+ uint32_t hptr;
+ uint32_t hinvalid;
+ uint32_t htmr_wkup;
+ uint64_t entry_time;
+ int entry_idx;
+ int nsamp;
+ int flag;
+};
+
struct lpm_cluster {
struct list_head list;
struct list_head child;
@@ -109,6 +124,8 @@ struct lpm_cluster {
unsigned int psci_mode_shift;
unsigned int psci_mode_mask;
bool no_saw_devices;
+ struct cluster_history history;
+ struct hrtimer histtimer;
};
int set_l2_mode(struct low_power_ops *ops, int mode, bool notify_rpm);
diff --git a/drivers/gpu/msm/Makefile b/drivers/gpu/msm/Makefile
index 90aee3cad5ad..625a2640b4c4 100644
--- a/drivers/gpu/msm/Makefile
+++ b/drivers/gpu/msm/Makefile
@@ -3,7 +3,7 @@ ccflags-y := -Idrivers/staging/android
msm_kgsl_core-y = \
kgsl.o \
kgsl_trace.o \
- kgsl_cmdbatch.o \
+ kgsl_drawobj.o \
kgsl_ioctl.o \
kgsl_sharedmem.o \
kgsl_pwrctrl.o \
diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c
index 1356835d0e93..94d828027f20 100644
--- a/drivers/gpu/msm/adreno.c
+++ b/drivers/gpu/msm/adreno.c
@@ -40,6 +40,7 @@
/* Include the master list of GPU cores that are supported */
#include "adreno-gpulist.h"
+#include "adreno_dispatch.h"
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "adreno."
@@ -1015,8 +1016,8 @@ static void _adreno_free_memories(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv))
- kgsl_free_global(device, &adreno_dev->cmdbatch_profile_buffer);
+ if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv))
+ kgsl_free_global(device, &adreno_dev->profile_buffer);
/* Free local copies of firmware and other command streams */
kfree(adreno_dev->pfp_fw);
@@ -1187,22 +1188,22 @@ static int adreno_init(struct kgsl_device *device)
}
/*
- * Allocate a small chunk of memory for precise cmdbatch profiling for
+ * Allocate a small chunk of memory for precise drawobj profiling for
* those targets that have the always on timer
*/
if (!adreno_is_a3xx(adreno_dev)) {
int r = kgsl_allocate_global(device,
- &adreno_dev->cmdbatch_profile_buffer, PAGE_SIZE,
+ &adreno_dev->profile_buffer, PAGE_SIZE,
0, 0, "alwayson");
- adreno_dev->cmdbatch_profile_index = 0;
+ adreno_dev->profile_index = 0;
if (r == 0) {
- set_bit(ADRENO_DEVICE_CMDBATCH_PROFILE,
+ set_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE,
&adreno_dev->priv);
kgsl_sharedmem_set(device,
- &adreno_dev->cmdbatch_profile_buffer, 0, 0,
+ &adreno_dev->profile_buffer, 0, 0,
PAGE_SIZE);
}
@@ -1242,86 +1243,6 @@ static bool regulators_left_on(struct kgsl_device *device)
return false;
}
-static void _setup_throttling_counters(struct adreno_device *adreno_dev)
-{
- int i, ret;
-
- if (!adreno_is_a540(adreno_dev))
- return;
-
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
- return;
-
- for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
- /* reset throttled cycles ivalue */
- adreno_dev->busy_data.throttle_cycles[i] = 0;
-
- if (adreno_dev->gpmu_throttle_counters[i] != 0)
- continue;
- ret = adreno_perfcounter_get(adreno_dev,
- KGSL_PERFCOUNTER_GROUP_GPMU_PWR,
- ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i,
- &adreno_dev->gpmu_throttle_counters[i],
- NULL,
- PERFCOUNTER_FLAG_KERNEL);
- WARN_ONCE(ret, "Unable to get clock throttling counter %x\n",
- ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i);
- }
-}
-
-/* FW driven idle 10% throttle */
-#define IDLE_10PCT 0
-/* number of cycles when clock is throttled by 50% (CRC) */
-#define CRC_50PCT 1
-/* number of cycles when clock is throttled by more than 50% (CRC) */
-#define CRC_MORE50PCT 2
-/* number of cycles when clock is throttle by less than 50% (CRC) */
-#define CRC_LESS50PCT 3
-
-static uint64_t _read_throttling_counters(struct adreno_device *adreno_dev)
-{
- int i, adj;
- uint32_t th[ADRENO_GPMU_THROTTLE_COUNTERS];
- struct adreno_busy_data *busy = &adreno_dev->busy_data;
-
- if (!adreno_is_a540(adreno_dev))
- return 0;
-
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
- return 0;
-
- if (!test_bit(ADRENO_THROTTLING_CTRL, &adreno_dev->pwrctrl_flag))
- return 0;
-
- for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
- if (!adreno_dev->gpmu_throttle_counters[i])
- return 0;
-
- th[i] = counter_delta(KGSL_DEVICE(adreno_dev),
- adreno_dev->gpmu_throttle_counters[i],
- &busy->throttle_cycles[i]);
- }
- adj = th[CRC_MORE50PCT] - th[IDLE_10PCT];
- adj = th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (adj < 0 ? 0 : adj) * 3;
-
- trace_kgsl_clock_throttling(
- th[IDLE_10PCT], th[CRC_50PCT],
- th[CRC_MORE50PCT], th[CRC_LESS50PCT],
- adj);
- return adj;
-}
-
-static void _update_threshold_count(struct adreno_device *adreno_dev,
- uint64_t adj)
-{
- if (adreno_is_a530(adreno_dev))
- kgsl_regread(KGSL_DEVICE(adreno_dev),
- adreno_dev->lm_threshold_count,
- &adreno_dev->lm_threshold_cross);
- else if (adreno_is_a540(adreno_dev))
- adreno_dev->lm_threshold_cross = adj;
-}
-
static void _set_secvid(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@@ -1418,8 +1339,8 @@ static int _adreno_start(struct adreno_device *adreno_dev)
}
}
- if (device->pwrctrl.bus_control) {
+ if (device->pwrctrl.bus_control) {
/* VBIF waiting for RAM */
if (adreno_dev->starved_ram_lo == 0) {
ret = adreno_perfcounter_get(adreno_dev,
@@ -1455,20 +1376,6 @@ static int _adreno_start(struct adreno_device *adreno_dev)
adreno_dev->busy_data.vbif_ram_cycles = 0;
adreno_dev->busy_data.vbif_starved_ram = 0;
- if (adreno_is_a530(adreno_dev) && ADRENO_FEATURE(adreno_dev, ADRENO_LM)
- && adreno_dev->lm_threshold_count == 0) {
-
- ret = adreno_perfcounter_get(adreno_dev,
- KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 27,
- &adreno_dev->lm_threshold_count, NULL,
- PERFCOUNTER_FLAG_KERNEL);
- /* Ignore noncritical ret - used for debugfs */
- if (ret)
- adreno_dev->lm_threshold_count = 0;
- }
-
- _setup_throttling_counters(adreno_dev);
-
/* Restore performance counter registers with saved values */
adreno_perfcounter_restore(adreno_dev);
@@ -1653,14 +1560,9 @@ static inline bool adreno_try_soft_reset(struct kgsl_device *device, int fault)
int adreno_reset(struct kgsl_device *device, int fault)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
int ret = -EINVAL;
int i = 0;
- /* broadcast to HW - reset is coming */
- if (gpudev->pre_reset)
- gpudev->pre_reset(adreno_dev);
-
/* Try soft reset first */
if (adreno_try_soft_reset(device, fault)) {
/* Make sure VBIF is cleared before resetting */
@@ -2340,12 +2242,12 @@ int adreno_idle(struct kgsl_device *device)
* adreno_drain() - Drain the dispatch queue
* @device: Pointer to the KGSL device structure for the GPU
*
- * Drain the dispatcher of existing command batches. This halts
+ * Drain the dispatcher of existing drawobjs. This halts
* additional commands from being issued until the gate is completed.
*/
static int adreno_drain(struct kgsl_device *device)
{
- reinit_completion(&device->cmdbatch_gate);
+ reinit_completion(&device->halt_gate);
return 0;
}
@@ -2580,27 +2482,6 @@ static inline s64 adreno_ticks_to_us(u32 ticks, u32 freq)
return ticks / freq;
}
-static unsigned int counter_delta(struct kgsl_device *device,
- unsigned int reg, unsigned int *counter)
-{
- unsigned int val;
- unsigned int ret = 0;
-
- /* Read the value */
- kgsl_regread(device, reg, &val);
-
- /* Return 0 for the first read */
- if (*counter != 0) {
- if (val < *counter)
- ret = (0xFFFFFFFF - *counter) + val;
- else
- ret = val - *counter;
- }
-
- *counter = val;
- return ret;
-}
-
/**
* adreno_power_stats() - Reads the counters needed for freq decisions
* @device: Pointer to device whose counters are read
@@ -2612,6 +2493,7 @@ static void adreno_power_stats(struct kgsl_device *device,
struct kgsl_power_stats *stats)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct adreno_busy_data *busy = &adreno_dev->busy_data;
uint64_t adj = 0;
@@ -2625,8 +2507,11 @@ static void adreno_power_stats(struct kgsl_device *device,
gpu_busy = counter_delta(device, adreno_dev->perfctr_pwr_lo,
&busy->gpu_busy);
- adj = _read_throttling_counters(adreno_dev);
- gpu_busy += adj;
+ if (gpudev->read_throttling_counters) {
+ adj = gpudev->read_throttling_counters(adreno_dev);
+ gpu_busy += adj;
+ }
+
stats->busy_time = adreno_ticks_to_us(gpu_busy,
kgsl_pwrctrl_active_freq(pwr));
}
@@ -2647,8 +2532,9 @@ static void adreno_power_stats(struct kgsl_device *device,
stats->ram_time = ram_cycles;
stats->ram_wait = starved_ram;
}
- if (adreno_dev->lm_threshold_count)
- _update_threshold_count(adreno_dev, adj);
+ if (adreno_dev->lm_threshold_count &&
+ gpudev->count_throttles)
+ gpudev->count_throttles(adreno_dev, adj);
}
static unsigned int adreno_gpuid(struct kgsl_device *device,
@@ -2825,7 +2711,7 @@ static const struct kgsl_functable adreno_functable = {
.getproperty_compat = adreno_getproperty_compat,
.waittimestamp = adreno_waittimestamp,
.readtimestamp = adreno_readtimestamp,
- .issueibcmds = adreno_ringbuffer_issueibcmds,
+ .queue_cmds = adreno_dispatcher_queue_cmds,
.ioctl = adreno_ioctl,
.compat_ioctl = adreno_compat_ioctl,
.power_stats = adreno_power_stats,
diff --git a/drivers/gpu/msm/adreno.h b/drivers/gpu/msm/adreno.h
index d4858f3f818e..0f3403cb0095 100644
--- a/drivers/gpu/msm/adreno.h
+++ b/drivers/gpu/msm/adreno.h
@@ -76,13 +76,13 @@
KGSL_CONTEXT_PREEMPT_STYLE_SHIFT)
/*
- * return the dispatcher cmdqueue in which the given cmdbatch should
+ * return the dispatcher drawqueue in which the given drawobj should
* be submitted
*/
-#define ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(c) \
+#define ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(c) \
(&((ADRENO_CONTEXT(c->context))->rb->dispatch_q))
-#define ADRENO_CMDBATCH_RB(c) \
+#define ADRENO_DRAWOBJ_RB(c) \
((ADRENO_CONTEXT(c->context))->rb)
/* Adreno core features */
@@ -346,8 +346,8 @@ struct adreno_gpu_core {
* @halt: Atomic variable to check whether the GPU is currently halted
* @ctx_d_debugfs: Context debugfs node
* @pwrctrl_flag: Flag to hold adreno specific power attributes
- * @cmdbatch_profile_buffer: Memdesc holding the cmdbatch profiling buffer
- * @cmdbatch_profile_index: Index to store the start/stop ticks in the profiling
+ * @profile_buffer: Memdesc holding the drawobj profiling buffer
+ * @profile_index: Index to store the start/stop ticks in the profiling
* buffer
* @sp_local_gpuaddr: Base GPU virtual address for SP local memory
* @sp_pvt_gpuaddr: Base GPU virtual address for SP private memory
@@ -404,8 +404,8 @@ struct adreno_device {
struct dentry *ctx_d_debugfs;
unsigned long pwrctrl_flag;
- struct kgsl_memdesc cmdbatch_profile_buffer;
- unsigned int cmdbatch_profile_index;
+ struct kgsl_memdesc profile_buffer;
+ unsigned int profile_index;
uint64_t sp_local_gpuaddr;
uint64_t sp_pvt_gpuaddr;
const struct firmware *lm_fw;
@@ -441,7 +441,7 @@ struct adreno_device {
* @ADRENO_DEVICE_STARTED - Set if the device start sequence is in progress
* @ADRENO_DEVICE_FAULT - Set if the device is currently in fault (and shouldn't
* send any more commands to the ringbuffer)
- * @ADRENO_DEVICE_CMDBATCH_PROFILE - Set if the device supports command batch
+ * @ADRENO_DEVICE_DRAWOBJ_PROFILE - Set if the device supports drawobj
* profiling via the ALWAYSON counter
* @ADRENO_DEVICE_PREEMPTION - Turn on/off preemption
* @ADRENO_DEVICE_SOFT_FAULT_DETECT - Set if soft fault detect is enabled
@@ -459,7 +459,7 @@ enum adreno_device_flags {
ADRENO_DEVICE_HANG_INTR = 4,
ADRENO_DEVICE_STARTED = 5,
ADRENO_DEVICE_FAULT = 6,
- ADRENO_DEVICE_CMDBATCH_PROFILE = 7,
+ ADRENO_DEVICE_DRAWOBJ_PROFILE = 7,
ADRENO_DEVICE_GPU_REGULATOR_ENABLED = 8,
ADRENO_DEVICE_PREEMPTION = 9,
ADRENO_DEVICE_SOFT_FAULT_DETECT = 10,
@@ -469,22 +469,22 @@ enum adreno_device_flags {
};
/**
- * struct adreno_cmdbatch_profile_entry - a single command batch entry in the
+ * struct adreno_drawobj_profile_entry - a single drawobj entry in the
* kernel profiling buffer
- * @started: Number of GPU ticks at start of the command batch
- * @retired: Number of GPU ticks at the end of the command batch
+ * @started: Number of GPU ticks at start of the drawobj
+ * @retired: Number of GPU ticks at the end of the drawobj
*/
-struct adreno_cmdbatch_profile_entry {
+struct adreno_drawobj_profile_entry {
uint64_t started;
uint64_t retired;
};
-#define ADRENO_CMDBATCH_PROFILE_COUNT \
- (PAGE_SIZE / sizeof(struct adreno_cmdbatch_profile_entry))
+#define ADRENO_DRAWOBJ_PROFILE_COUNT \
+ (PAGE_SIZE / sizeof(struct adreno_drawobj_profile_entry))
-#define ADRENO_CMDBATCH_PROFILE_OFFSET(_index, _member) \
- ((_index) * sizeof(struct adreno_cmdbatch_profile_entry) \
- + offsetof(struct adreno_cmdbatch_profile_entry, _member))
+#define ADRENO_DRAWOBJ_PROFILE_OFFSET(_index, _member) \
+ ((_index) * sizeof(struct adreno_drawobj_profile_entry) \
+ + offsetof(struct adreno_drawobj_profile_entry, _member))
/**
@@ -756,6 +756,10 @@ struct adreno_gpudev {
void (*pwrlevel_change_settings)(struct adreno_device *,
unsigned int prelevel, unsigned int postlevel,
bool post);
+ uint64_t (*read_throttling_counters)(struct adreno_device *);
+ void (*count_throttles)(struct adreno_device *, uint64_t adj);
+ int (*enable_pwr_counters)(struct adreno_device *,
+ unsigned int counter);
unsigned int (*preemption_pre_ibsubmit)(struct adreno_device *,
struct adreno_ringbuffer *rb,
unsigned int *, struct kgsl_context *);
@@ -765,7 +769,6 @@ struct adreno_gpudev {
int (*preemption_init)(struct adreno_device *);
void (*preemption_schedule)(struct adreno_device *);
void (*enable_64bit)(struct adreno_device *);
- void (*pre_reset)(struct adreno_device *);
void (*clk_set_options)(struct adreno_device *,
const char *, struct clk *);
};
@@ -776,7 +779,7 @@ struct adreno_gpudev {
* @KGSL_FT_REPLAY: Replay the faulting command
* @KGSL_FT_SKIPIB: Skip the faulting indirect buffer
* @KGSL_FT_SKIPFRAME: Skip the frame containing the faulting IB
- * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command batch
+ * @KGSL_FT_DISABLE: Tells the dispatcher to disable FT for the command obj
* @KGSL_FT_TEMP_DISABLE: Disables FT for all commands
* @KGSL_FT_THROTTLE: Disable the context if it faults too often
* @KGSL_FT_SKIPCMD: Skip the command containing the faulting IB
@@ -793,7 +796,7 @@ enum kgsl_ft_policy_bits {
/* KGSL_FT_MAX_BITS is used to calculate the mask */
KGSL_FT_MAX_BITS,
/* Internal bits - set during GFT */
- /* Skip the PM dump on replayed command batches */
+ /* Skip the PM dump on replayed command obj's */
KGSL_FT_SKIP_PMDUMP = 31,
};
@@ -882,7 +885,7 @@ int adreno_reset(struct kgsl_device *device, int fault);
void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch);
+ struct kgsl_drawobj *drawobj);
int adreno_coresight_init(struct adreno_device *adreno_dev);
@@ -1467,4 +1470,24 @@ static inline void adreno_ringbuffer_set_pagetable(struct adreno_ringbuffer *rb,
spin_unlock_irqrestore(&rb->preempt_lock, flags);
}
+static inline unsigned int counter_delta(struct kgsl_device *device,
+ unsigned int reg, unsigned int *counter)
+{
+ unsigned int val;
+ unsigned int ret = 0;
+
+ /* Read the value */
+ kgsl_regread(device, reg, &val);
+
+ /* Return 0 for the first read */
+ if (*counter != 0) {
+ if (val < *counter)
+ ret = (0xFFFFFFFF - *counter) + val;
+ else
+ ret = val - *counter;
+ }
+
+ *counter = val;
+ return ret;
+}
#endif /*__ADRENO_H */
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index d52981d10ff5..2891940b8f5b 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -27,6 +27,7 @@
#include "kgsl_sharedmem.h"
#include "kgsl_log.h"
#include "kgsl.h"
+#include "kgsl_trace.h"
#include "adreno_a5xx_packets.h"
static int zap_ucode_loaded;
@@ -1406,105 +1407,10 @@ static void a530_lm_enable(struct adreno_device *adreno_dev)
adreno_is_a530v2(adreno_dev) ? 0x00060011 : 0x00000011);
}
-static bool llm_is_enabled(struct adreno_device *adreno_dev)
-{
- unsigned int r;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- kgsl_regread(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, &r);
- return r & (GPMU_BCL_ENABLED | GPMU_LLM_ENABLED);
-}
-
-
-static void sleep_llm(struct adreno_device *adreno_dev)
-{
- unsigned int r, retry;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- if (!llm_is_enabled(adreno_dev))
- return;
-
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL, &r);
-
- if ((r & STATE_OF_CHILD) == 0) {
- /* If both children are on, sleep CHILD_O1 first */
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- STATE_OF_CHILD, STATE_OF_CHILD_01 | IDLE_FULL_LM_SLEEP);
- /* Wait for IDLE_FULL_ACK before continuing */
- for (retry = 0; retry < 5; retry++) {
- udelay(1);
- kgsl_regread(device,
- A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- if (r & IDLE_FULL_ACK)
- break;
- }
-
- if (retry == 5)
- KGSL_CORE_ERR("GPMU: LLM failed to idle: 0x%X\n", r);
- }
-
- /* Now turn off both children */
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- 0, STATE_OF_CHILD | IDLE_FULL_LM_SLEEP);
-
- /* wait for WAKEUP_ACK to be zero */
- for (retry = 0; retry < 5; retry++) {
- udelay(1);
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- if ((r & WAKEUP_ACK) == 0)
- break;
- }
-
- if (retry == 5)
- KGSL_CORE_ERR("GPMU: LLM failed to sleep: 0x%X\n", r);
-}
-
-static void wake_llm(struct adreno_device *adreno_dev)
-{
- unsigned int r, retry;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- if (!llm_is_enabled(adreno_dev))
- return;
-
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- STATE_OF_CHILD, STATE_OF_CHILD_01);
-
- if (((device->pwrctrl.num_pwrlevels - 2) -
- device->pwrctrl.active_pwrlevel) <= LM_DCVS_LIMIT)
- return;
-
- udelay(1);
-
- /* Turn on all children */
- kgsl_regrmw(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL,
- STATE_OF_CHILD | IDLE_FULL_LM_SLEEP, 0);
-
- /* Wait for IDLE_FULL_ACK to be zero and WAKEUP_ACK to be set */
- for (retry = 0; retry < 5; retry++) {
- udelay(1);
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- if ((r & (WAKEUP_ACK | IDLE_FULL_ACK)) == WAKEUP_ACK)
- break;
- }
-
- if (retry == 5)
- KGSL_CORE_ERR("GPMU: LLM failed to wake: 0x%X\n", r);
-}
-
-static bool llm_is_awake(struct adreno_device *adreno_dev)
-{
- unsigned int r;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- kgsl_regread(device, A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS, &r);
- return r & WAKEUP_ACK;
-}
-
static void a540_lm_init(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- uint32_t agc_lm_config =
+ uint32_t agc_lm_config = AGC_BCL_DISABLED |
((ADRENO_CHIPID_PATCH(adreno_dev->chipid) & 0x3)
<< AGC_GPU_VERSION_SHIFT);
unsigned int r;
@@ -1518,11 +1424,6 @@ static void a540_lm_init(struct adreno_device *adreno_dev)
AGC_LM_CONFIG_ISENSE_ENABLE;
kgsl_regread(device, A5XX_GPMU_TEMP_SENSOR_CONFIG, &r);
- if (!(r & GPMU_BCL_ENABLED))
- agc_lm_config |= AGC_BCL_DISABLED;
-
- if (r & GPMU_LLM_ENABLED)
- agc_lm_config |= AGC_LLM_ENABLED;
if ((r & GPMU_ISENSE_STATUS) == GPMU_ISENSE_END_POINT_CAL_ERR) {
KGSL_CORE_ERR(
@@ -1551,9 +1452,6 @@ static void a540_lm_init(struct adreno_device *adreno_dev)
kgsl_regwrite(device, A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK,
VOLTAGE_INTR_EN);
-
- if (lm_on(adreno_dev))
- wake_llm(adreno_dev);
}
@@ -1646,6 +1544,76 @@ static void a5xx_clk_set_options(struct adreno_device *adreno_dev,
}
}
+static void a5xx_count_throttles(struct adreno_device *adreno_dev,
+ uint64_t adj)
+{
+ if (adreno_is_a530(adreno_dev))
+ kgsl_regread(KGSL_DEVICE(adreno_dev),
+ adreno_dev->lm_threshold_count,
+ &adreno_dev->lm_threshold_cross);
+ else if (adreno_is_a540(adreno_dev))
+ adreno_dev->lm_threshold_cross = adj;
+}
+
+static int a5xx_enable_pwr_counters(struct adreno_device *adreno_dev,
+ unsigned int counter)
+{
+ /*
+ * On 5XX we have to emulate the PWR counters which are physically
+ * missing. Program countable 6 on RBBM_PERFCTR_RBBM_0 as a substitute
+ * for PWR:1. Don't emulate PWR:0 as nobody uses it and we don't want
+ * to take away too many of the generic RBBM counters.
+ */
+
+ if (counter == 0)
+ return -EINVAL;
+
+ kgsl_regwrite(KGSL_DEVICE(adreno_dev), A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
+
+ return 0;
+}
+
+/* FW driven idle 10% throttle */
+#define IDLE_10PCT 0
+/* number of cycles when clock is throttled by 50% (CRC) */
+#define CRC_50PCT 1
+/* number of cycles when clock is throttled by more than 50% (CRC) */
+#define CRC_MORE50PCT 2
+/* number of cycles when clock is throttle by less than 50% (CRC) */
+#define CRC_LESS50PCT 3
+
+static uint64_t a5xx_read_throttling_counters(struct adreno_device *adreno_dev)
+{
+ int i, adj;
+ uint32_t th[ADRENO_GPMU_THROTTLE_COUNTERS];
+ struct adreno_busy_data *busy = &adreno_dev->busy_data;
+
+ if (!adreno_is_a540(adreno_dev))
+ return 0;
+
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
+ return 0;
+
+ if (!test_bit(ADRENO_THROTTLING_CTRL, &adreno_dev->pwrctrl_flag))
+ return 0;
+
+ for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
+ if (!adreno_dev->gpmu_throttle_counters[i])
+ return 0;
+
+ th[i] = counter_delta(KGSL_DEVICE(adreno_dev),
+ adreno_dev->gpmu_throttle_counters[i],
+ &busy->throttle_cycles[i]);
+ }
+ adj = th[CRC_MORE50PCT] - th[IDLE_10PCT];
+ adj = th[CRC_50PCT] + th[CRC_LESS50PCT] / 3 + (adj < 0 ? 0 : adj) * 3;
+
+ trace_kgsl_clock_throttling(
+ th[IDLE_10PCT], th[CRC_50PCT],
+ th[CRC_MORE50PCT], th[CRC_LESS50PCT],
+ adj);
+ return adj;
+}
static void a5xx_enable_64bit(struct adreno_device *adreno_dev)
{
@@ -1665,14 +1633,6 @@ static void a5xx_enable_64bit(struct adreno_device *adreno_dev)
kgsl_regwrite(device, A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
}
-static void a5xx_pre_reset(struct adreno_device *adreno_dev)
-{
- if (adreno_is_a540(adreno_dev) && lm_on(adreno_dev)) {
- if (llm_is_awake(adreno_dev))
- sleep_llm(adreno_dev);
- }
-}
-
/*
* a5xx_gpmu_reset() - Re-enable GPMU based power features and restart GPMU
* @work: Pointer to the work struct for gpmu reset
@@ -1707,17 +1667,47 @@ static void a5xx_gpmu_reset(struct work_struct *work)
if (a5xx_regulator_enable(adreno_dev))
goto out;
- a5xx_pre_reset(adreno_dev);
-
/* Soft reset of the GPMU block */
kgsl_regwrite(device, A5XX_RBBM_BLOCK_SW_RESET_CMD, BIT(16));
+ /* GPU comes up in secured mode, make it unsecured by default */
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
+ kgsl_regwrite(device, A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+
+
a5xx_gpmu_init(adreno_dev);
out:
mutex_unlock(&device->mutex);
}
+static void _setup_throttling_counters(struct adreno_device *adreno_dev)
+{
+ int i, ret;
+
+ if (!adreno_is_a540(adreno_dev))
+ return;
+
+ if (!ADRENO_FEATURE(adreno_dev, ADRENO_GPMU))
+ return;
+
+ for (i = 0; i < ADRENO_GPMU_THROTTLE_COUNTERS; i++) {
+ /* reset throttled cycles ivalue */
+ adreno_dev->busy_data.throttle_cycles[i] = 0;
+
+ if (adreno_dev->gpmu_throttle_counters[i] != 0)
+ continue;
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_GPMU_PWR,
+ ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i,
+ &adreno_dev->gpmu_throttle_counters[i],
+ NULL,
+ PERFCOUNTER_FLAG_KERNEL);
+ WARN_ONCE(ret, "Unable to get clock throttling counter %x\n",
+ ADRENO_GPMU_THROTTLE_COUNTERS_BASE_REG + i);
+ }
+}
+
/*
* a5xx_start() - Device start
* @adreno_dev: Pointer to adreno device
@@ -1729,6 +1719,21 @@ static void a5xx_start(struct adreno_device *adreno_dev)
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
unsigned int bit;
+ int ret;
+
+ if (adreno_is_a530(adreno_dev) && ADRENO_FEATURE(adreno_dev, ADRENO_LM)
+ && adreno_dev->lm_threshold_count == 0) {
+
+ ret = adreno_perfcounter_get(adreno_dev,
+ KGSL_PERFCOUNTER_GROUP_GPMU_PWR, 27,
+ &adreno_dev->lm_threshold_count, NULL,
+ PERFCOUNTER_FLAG_KERNEL);
+ /* Ignore noncritical ret - used for debugfs */
+ if (ret)
+ adreno_dev->lm_threshold_count = 0;
+ }
+
+ _setup_throttling_counters(adreno_dev);
adreno_vbif_start(adreno_dev, a5xx_vbif_platforms,
ARRAY_SIZE(a5xx_vbif_platforms));
@@ -2034,11 +2039,6 @@ static int a5xx_post_start(struct adreno_device *adreno_dev)
static int a5xx_gpmu_init(struct adreno_device *adreno_dev)
{
int ret;
- struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
-
- /* GPU comes up in secured mode, make it unsecured by default */
- if (!ADRENO_FEATURE(adreno_dev, ADRENO_CONTENT_PROTECTION))
- kgsl_regwrite(device, A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
/* Set up LM before initializing the GPMU */
a5xx_lm_init(adreno_dev);
@@ -2359,20 +2359,10 @@ static int a5xx_rb_start(struct adreno_device *adreno_dev,
if (ret)
return ret;
- /* Set up LM before initializing the GPMU */
- a5xx_lm_init(adreno_dev);
-
- /* Enable SPTP based power collapse before enabling GPMU */
- a5xx_enable_pc(adreno_dev);
-
- /* Program the GPMU */
- ret = a5xx_gpmu_start(adreno_dev);
+ ret = a5xx_gpmu_init(adreno_dev);
if (ret)
return ret;
- /* Enable limits management */
- a5xx_lm_enable(adreno_dev);
-
a5xx_post_start(adreno_dev);
return 0;
@@ -3534,6 +3524,9 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
.regulator_enable = a5xx_regulator_enable,
.regulator_disable = a5xx_regulator_disable,
.pwrlevel_change_settings = a5xx_pwrlevel_change_settings,
+ .read_throttling_counters = a5xx_read_throttling_counters,
+ .count_throttles = a5xx_count_throttles,
+ .enable_pwr_counters = a5xx_enable_pwr_counters,
.preemption_pre_ibsubmit = a5xx_preemption_pre_ibsubmit,
.preemption_yield_enable =
a5xx_preemption_yield_enable,
@@ -3542,6 +3535,5 @@ struct adreno_gpudev adreno_a5xx_gpudev = {
.preemption_init = a5xx_preemption_init,
.preemption_schedule = a5xx_preemption_schedule,
.enable_64bit = a5xx_enable_64bit,
- .pre_reset = a5xx_pre_reset,
.clk_set_options = a5xx_clk_set_options,
};
diff --git a/drivers/gpu/msm/adreno_a5xx_preempt.c b/drivers/gpu/msm/adreno_a5xx_preempt.c
index 4baee4a5c0b1..09c550c9f58c 100644
--- a/drivers/gpu/msm/adreno_a5xx_preempt.c
+++ b/drivers/gpu/msm/adreno_a5xx_preempt.c
@@ -37,7 +37,7 @@ static void _update_wptr(struct adreno_device *adreno_dev)
rb->wptr);
rb->dispatch_q.expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
+ msecs_to_jiffies(adreno_drawobj_timeout);
}
spin_unlock_irqrestore(&rb->preempt_lock, flags);
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index 680827e5b848..fffe08038bcd 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -129,7 +129,7 @@ typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i,
static void sync_event_print(struct seq_file *s,
- struct kgsl_cmdbatch_sync_event *sync_event)
+ struct kgsl_drawobj_sync_event *sync_event)
{
switch (sync_event->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP: {
@@ -153,12 +153,12 @@ struct flag_entry {
const char *str;
};
-static const struct flag_entry cmdbatch_flags[] = {KGSL_CMDBATCH_FLAGS};
+static const struct flag_entry drawobj_flags[] = {KGSL_DRAWOBJ_FLAGS};
-static const struct flag_entry cmdbatch_priv[] = {
- { CMDBATCH_FLAG_SKIP, "skip"},
- { CMDBATCH_FLAG_FORCE_PREAMBLE, "force_preamble"},
- { CMDBATCH_FLAG_WFI, "wait_for_idle" },
+static const struct flag_entry cmdobj_priv[] = {
+ { CMDOBJ_SKIP, "skip"},
+ { CMDOBJ_FORCE_PREAMBLE, "force_preamble"},
+ { CMDOBJ_WFI, "wait_for_idle" },
};
static const struct flag_entry context_flags[] = {KGSL_CONTEXT_FLAGS};
@@ -199,42 +199,54 @@ static void print_flags(struct seq_file *s, const struct flag_entry *table,
seq_puts(s, "None");
}
-static void cmdbatch_print(struct seq_file *s, struct kgsl_cmdbatch *cmdbatch)
+static void syncobj_print(struct seq_file *s,
+ struct kgsl_drawobj_sync *syncobj)
{
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
unsigned int i;
- /* print fences first, since they block this cmdbatch */
+ seq_puts(s, " syncobj ");
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ event = &syncobj->synclist[i];
- if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
+ if (!kgsl_drawobj_event_pending(syncobj, i))
continue;
- /*
- * Timestamp is 0 for KGSL_CONTEXT_SYNC, but print it anyways
- * so that it is clear if the fence was a separate submit
- * or part of an IB submit.
- */
- seq_printf(s, "\t%d ", cmdbatch->timestamp);
sync_event_print(s, event);
seq_puts(s, "\n");
}
+}
- /* if this flag is set, there won't be an IB */
- if (cmdbatch->flags & KGSL_CONTEXT_SYNC)
- return;
+static void cmdobj_print(struct seq_file *s,
+ struct kgsl_drawobj_cmd *cmdobj)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- seq_printf(s, "\t%d: ", cmdbatch->timestamp);
+ if (drawobj->type == CMDOBJ_TYPE)
+ seq_puts(s, " cmdobj ");
+ else
+ seq_puts(s, " markerobj ");
- seq_puts(s, " flags: ");
- print_flags(s, cmdbatch_flags, ARRAY_SIZE(cmdbatch_flags),
- cmdbatch->flags);
+ seq_printf(s, "\t %d ", drawobj->timestamp);
seq_puts(s, " priv: ");
- print_flags(s, cmdbatch_priv, ARRAY_SIZE(cmdbatch_priv),
- cmdbatch->priv);
+ print_flags(s, cmdobj_priv, ARRAY_SIZE(cmdobj_priv),
+ cmdobj->priv);
+}
+
+static void drawobj_print(struct seq_file *s,
+ struct kgsl_drawobj *drawobj)
+{
+ if (drawobj->type == SYNCOBJ_TYPE)
+ syncobj_print(s, SYNCOBJ(drawobj));
+ else if ((drawobj->type == CMDOBJ_TYPE) ||
+ (drawobj->type == MARKEROBJ_TYPE))
+ cmdobj_print(s, CMDOBJ(drawobj));
+
+ seq_puts(s, " flags: ");
+ print_flags(s, drawobj_flags, ARRAY_SIZE(drawobj_flags),
+ drawobj->flags);
seq_puts(s, "\n");
}
@@ -285,13 +297,13 @@ static int ctx_print(struct seq_file *s, void *unused)
queued, consumed, retired,
drawctxt->internal_timestamp);
- seq_puts(s, "cmdqueue:\n");
+ seq_puts(s, "drawqueue:\n");
spin_lock(&drawctxt->lock);
- for (i = drawctxt->cmdqueue_head;
- i != drawctxt->cmdqueue_tail;
- i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE))
- cmdbatch_print(s, drawctxt->cmdqueue[i]);
+ for (i = drawctxt->drawqueue_head;
+ i != drawctxt->drawqueue_tail;
+ i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE))
+ drawobj_print(s, drawctxt->drawqueue[i]);
spin_unlock(&drawctxt->lock);
seq_puts(s, "events:\n");
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 522c32743d3d..cb4108b4e1f9 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -25,7 +25,7 @@
#include "adreno_trace.h"
#include "kgsl_sharedmem.h"
-#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
+#define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
/* Time in ms after which the dispatcher tries to schedule an unscheduled RB */
unsigned int adreno_dispatch_starvation_time = 2000;
@@ -43,13 +43,13 @@ unsigned int adreno_dispatch_time_slice = 25;
unsigned int adreno_disp_preempt_fair_sched;
/* Number of commands that can be queued in a context before it sleeps */
-static unsigned int _context_cmdqueue_size = 50;
+static unsigned int _context_drawqueue_size = 50;
/* Number of milliseconds to wait for the context queue to clear */
static unsigned int _context_queue_wait = 10000;
-/* Number of command batches sent at a time from a single context */
-static unsigned int _context_cmdbatch_burst = 5;
+/* Number of drawobjs sent at a time from a single context */
+static unsigned int _context_drawobj_burst = 5;
/*
* GFT throttle parameters. If GFT recovered more than
@@ -73,24 +73,25 @@ static unsigned int _dispatcher_q_inflight_hi = 15;
static unsigned int _dispatcher_q_inflight_lo = 4;
/* Command batch timeout (in milliseconds) */
-unsigned int adreno_cmdbatch_timeout = 2000;
+unsigned int adreno_drawobj_timeout = 2000;
/* Interval for reading and comparing fault detection registers */
static unsigned int _fault_timer_interval = 200;
-#define CMDQUEUE_RB(_cmdqueue) \
+#define DRAWQUEUE_RB(_drawqueue) \
((struct adreno_ringbuffer *) \
- container_of((_cmdqueue), struct adreno_ringbuffer, dispatch_q))
+ container_of((_drawqueue),\
+ struct adreno_ringbuffer, dispatch_q))
-#define CMDQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q)
+#define DRAWQUEUE(_ringbuffer) (&(_ringbuffer)->dispatch_q)
-static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue);
+static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_drawqueue *drawqueue);
-static inline bool cmdqueue_is_current(
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static inline bool drawqueue_is_current(
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
- struct adreno_ringbuffer *rb = CMDQUEUE_RB(cmdqueue);
+ struct adreno_ringbuffer *rb = DRAWQUEUE_RB(drawqueue);
struct adreno_device *adreno_dev = ADRENO_RB_DEVICE(rb);
return (adreno_dev->cur_rb == rb);
@@ -114,7 +115,8 @@ static int __count_context(struct adreno_context *drawctxt, void *data)
return time_after(jiffies, expires) ? 0 : 1;
}
-static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data)
+static int __count_drawqueue_context(struct adreno_context *drawctxt,
+ void *data)
{
unsigned long expires = drawctxt->active_time + msecs_to_jiffies(100);
@@ -122,7 +124,7 @@ static int __count_cmdqueue_context(struct adreno_context *drawctxt, void *data)
return 0;
return (&drawctxt->rb->dispatch_q ==
- (struct adreno_dispatcher_cmdqueue *) data) ? 1 : 0;
+ (struct adreno_dispatcher_drawqueue *) data) ? 1 : 0;
}
static int _adreno_count_active_contexts(struct adreno_device *adreno_dev,
@@ -142,7 +144,7 @@ static int _adreno_count_active_contexts(struct adreno_device *adreno_dev,
}
static void _track_context(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue,
+ struct adreno_dispatcher_drawqueue *drawqueue,
struct adreno_context *drawctxt)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
@@ -154,9 +156,9 @@ static void _track_context(struct adreno_device *adreno_dev,
device->active_context_count =
_adreno_count_active_contexts(adreno_dev,
__count_context, NULL);
- cmdqueue->active_context_count =
+ drawqueue->active_context_count =
_adreno_count_active_contexts(adreno_dev,
- __count_cmdqueue_context, cmdqueue);
+ __count_drawqueue_context, drawqueue);
spin_unlock(&adreno_dev->active_list_lock);
}
@@ -169,9 +171,9 @@ static void _track_context(struct adreno_device *adreno_dev,
*/
static inline int
-_cmdqueue_inflight(struct adreno_dispatcher_cmdqueue *cmdqueue)
+_drawqueue_inflight(struct adreno_dispatcher_drawqueue *drawqueue)
{
- return (cmdqueue->active_context_count > 1)
+ return (drawqueue->active_context_count > 1)
? _dispatcher_q_inflight_lo : _dispatcher_q_inflight_hi;
}
@@ -271,20 +273,20 @@ static void start_fault_timer(struct adreno_device *adreno_dev)
}
/**
- * _retire_marker() - Retire a marker command batch without sending it to the
- * hardware
- * @cmdbatch: Pointer to the cmdbatch to retire
+ * _retire_timestamp() - Retire object without sending it
+ * to the hardware
+ * @drawobj: Pointer to the object to retire
*
- * In some cases marker commands can be retired by the software without going to
- * the GPU. In those cases, update the memstore from the CPU, kick off the
- * event engine to handle expired events and destroy the command batch.
+ * In some cases ibs can be retired by the software
+ * without going to the GPU. In those cases, update the
+ * memstore from the CPU, kick off the event engine to handle
+ * expired events and destroy the ib.
*/
-static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
+static void _retire_timestamp(struct kgsl_drawobj *drawobj)
{
- struct kgsl_context *context = cmdbatch->context;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_context *context = drawobj->context;
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
struct kgsl_device *device = context->device;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
/*
* Write the start and end timestamp to the memstore to keep the
@@ -292,11 +294,11 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
*/
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, soptimestamp),
- cmdbatch->timestamp);
+ drawobj->timestamp);
kgsl_sharedmem_writel(device, &device->memstore,
KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp),
- cmdbatch->timestamp);
+ drawobj->timestamp);
/* Retire pending GPU events for the object */
@@ -307,13 +309,13 @@ static void _retire_marker(struct kgsl_cmdbatch *cmdbatch)
* rptr scratch out address. At this point GPU clocks turned off.
* So avoid reading GPU register directly for A3xx.
*/
- if (adreno_is_a3xx(adreno_dev))
- trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
- 0);
+ if (adreno_is_a3xx(ADRENO_DEVICE(device)))
+ trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb,
+ 0, 0);
else
- trace_adreno_cmdbatch_retired(cmdbatch, -1, 0, 0, drawctxt->rb,
- adreno_get_rptr(drawctxt->rb));
- kgsl_cmdbatch_destroy(cmdbatch);
+ trace_adreno_cmdbatch_retired(drawobj, -1, 0, 0, drawctxt->rb,
+ adreno_get_rptr(drawctxt->rb), 0);
+ kgsl_drawobj_destroy(drawobj);
}
static int _check_context_queue(struct adreno_context *drawctxt)
@@ -330,7 +332,7 @@ static int _check_context_queue(struct adreno_context *drawctxt)
if (kgsl_context_invalid(&drawctxt->base))
ret = 1;
else
- ret = drawctxt->queued < _context_cmdqueue_size ? 1 : 0;
+ ret = drawctxt->queued < _context_drawqueue_size ? 1 : 0;
spin_unlock(&drawctxt->lock);
@@ -341,176 +343,151 @@ static int _check_context_queue(struct adreno_context *drawctxt)
* return true if this is a marker command and the dependent timestamp has
* retired
*/
-static bool _marker_expired(struct kgsl_cmdbatch *cmdbatch)
-{
- return (cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
- kgsl_check_timestamp(cmdbatch->device, cmdbatch->context,
- cmdbatch->marker_timestamp);
-}
-
-static inline void _pop_cmdbatch(struct adreno_context *drawctxt)
+static bool _marker_expired(struct kgsl_drawobj_cmd *markerobj)
{
- drawctxt->cmdqueue_head = CMDQUEUE_NEXT(drawctxt->cmdqueue_head,
- ADRENO_CONTEXT_CMDQUEUE_SIZE);
- drawctxt->queued--;
-}
-/**
- * Removes all expired marker and sync cmdbatches from
- * the context queue when marker command and dependent
- * timestamp are retired. This function is recursive.
- * returns cmdbatch if context has command, NULL otherwise.
- */
-static struct kgsl_cmdbatch *_expire_markers(struct adreno_context *drawctxt)
-{
- struct kgsl_cmdbatch *cmdbatch;
-
- if (drawctxt->cmdqueue_head == drawctxt->cmdqueue_tail)
- return NULL;
-
- cmdbatch = drawctxt->cmdqueue[drawctxt->cmdqueue_head];
-
- if (cmdbatch == NULL)
- return NULL;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(markerobj);
- /* Check to see if this is a marker we can skip over */
- if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
- _marker_expired(cmdbatch)) {
- _pop_cmdbatch(drawctxt);
- _retire_marker(cmdbatch);
- return _expire_markers(drawctxt);
- }
-
- if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
- if (!kgsl_cmdbatch_events_pending(cmdbatch)) {
- _pop_cmdbatch(drawctxt);
- kgsl_cmdbatch_destroy(cmdbatch);
- return _expire_markers(drawctxt);
- }
- }
-
- return cmdbatch;
+ return (drawobj->flags & KGSL_DRAWOBJ_MARKER) &&
+ kgsl_check_timestamp(drawobj->device, drawobj->context,
+ markerobj->marker_timestamp);
}
-static void expire_markers(struct adreno_context *drawctxt)
+static inline void _pop_drawobj(struct adreno_context *drawctxt)
{
- spin_lock(&drawctxt->lock);
- _expire_markers(drawctxt);
- spin_unlock(&drawctxt->lock);
+ drawctxt->drawqueue_head = DRAWQUEUE_NEXT(drawctxt->drawqueue_head,
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE);
+ drawctxt->queued--;
}
-static struct kgsl_cmdbatch *_get_cmdbatch(struct adreno_context *drawctxt)
+static int _retire_markerobj(struct kgsl_drawobj_cmd *cmdobj,
+ struct adreno_context *drawctxt)
{
- struct kgsl_cmdbatch *cmdbatch;
- bool pending = false;
-
- cmdbatch = _expire_markers(drawctxt);
-
- if (cmdbatch == NULL)
- return NULL;
+ if (_marker_expired(cmdobj)) {
+ _pop_drawobj(drawctxt);
+ _retire_timestamp(DRAWOBJ(cmdobj));
+ return 0;
+ }
/*
- * If the marker isn't expired but the SKIP bit is set
- * then there are real commands following this one in
- * the queue. This means that we need to dispatch the
- * command so that we can keep the timestamp accounting
- * correct. If skip isn't set then we block this queue
+ * If the marker isn't expired but the SKIP bit
+ * is set then there are real commands following
+ * this one in the queue. This means that we
+ * need to dispatch the command so that we can
+ * keep the timestamp accounting correct. If
+ * skip isn't set then we block this queue
* until the dependent timestamp expires
*/
- if ((cmdbatch->flags & KGSL_CMDBATCH_MARKER) &&
- (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)))
- pending = true;
+ return test_bit(CMDOBJ_SKIP, &cmdobj->priv) ? 1 : -EAGAIN;
+}
- if (kgsl_cmdbatch_events_pending(cmdbatch))
- pending = true;
+static int _retire_syncobj(struct kgsl_drawobj_sync *syncobj,
+ struct adreno_context *drawctxt)
+{
+ if (!kgsl_drawobj_events_pending(syncobj)) {
+ _pop_drawobj(drawctxt);
+ kgsl_drawobj_destroy(DRAWOBJ(syncobj));
+ return 0;
+ }
/*
- * If changes are pending and the canary timer hasn't been
- * started yet, start it
+ * If we got here, there are pending events for sync object.
+ * Start the canary timer if it hasnt been started already.
*/
- if (pending) {
- /*
- * If syncpoints are pending start the canary timer if
- * it hasn't already been started
- */
- if (!cmdbatch->timeout_jiffies) {
- cmdbatch->timeout_jiffies =
- jiffies + msecs_to_jiffies(5000);
- mod_timer(&cmdbatch->timer, cmdbatch->timeout_jiffies);
- }
-
- return ERR_PTR(-EAGAIN);
+ if (!syncobj->timeout_jiffies) {
+ syncobj->timeout_jiffies = jiffies + msecs_to_jiffies(5000);
+ mod_timer(&syncobj->timer, syncobj->timeout_jiffies);
}
- _pop_cmdbatch(drawctxt);
- return cmdbatch;
+ return -EAGAIN;
}
-/**
- * adreno_dispatcher_get_cmdbatch() - Get a new command from a context queue
- * @drawctxt: Pointer to the adreno draw context
- *
- * Dequeue a new command batch from the context list
+/*
+ * Retires all expired marker and sync objs from the context
+ * queue and returns one of the below
+ * a) next drawobj that needs to be sent to ringbuffer
+ * b) -EAGAIN for syncobj with syncpoints pending.
+ * c) -EAGAIN for markerobj whose marker timestamp has not expired yet.
+ * c) NULL for no commands remaining in drawqueue.
*/
-static struct kgsl_cmdbatch *adreno_dispatcher_get_cmdbatch(
- struct adreno_context *drawctxt)
+static struct kgsl_drawobj *_process_drawqueue_get_next_drawobj(
+ struct adreno_context *drawctxt)
{
- struct kgsl_cmdbatch *cmdbatch;
+ struct kgsl_drawobj *drawobj;
+ unsigned int i = drawctxt->drawqueue_head;
+ int ret = 0;
- spin_lock(&drawctxt->lock);
- cmdbatch = _get_cmdbatch(drawctxt);
- spin_unlock(&drawctxt->lock);
+ if (drawctxt->drawqueue_head == drawctxt->drawqueue_tail)
+ return NULL;
- /*
- * Delete the timer and wait for timer handler to finish executing
- * on another core before queueing the buffer. We must do this
- * without holding any spin lock that the timer handler might be using
- */
- if (!IS_ERR_OR_NULL(cmdbatch))
- del_timer_sync(&cmdbatch->timer);
+ for (i = drawctxt->drawqueue_head; i != drawctxt->drawqueue_tail;
+ i = DRAWQUEUE_NEXT(i, ADRENO_CONTEXT_DRAWQUEUE_SIZE)) {
+
+ drawobj = drawctxt->drawqueue[i];
+
+ if (drawobj == NULL)
+ return NULL;
+
+ if (drawobj->type == CMDOBJ_TYPE)
+ return drawobj;
+ else if (drawobj->type == MARKEROBJ_TYPE) {
+ ret = _retire_markerobj(CMDOBJ(drawobj), drawctxt);
+ /* Special case where marker needs to be sent to GPU */
+ if (ret == 1)
+ return drawobj;
+ } else if (drawobj->type == SYNCOBJ_TYPE)
+ ret = _retire_syncobj(SYNCOBJ(drawobj), drawctxt);
+
+ if (ret == -EAGAIN)
+ return ERR_PTR(-EAGAIN);
+
+ continue;
+ }
- return cmdbatch;
+ return NULL;
}
/**
- * adreno_dispatcher_requeue_cmdbatch() - Put a command back on the context
+ * adreno_dispatcher_requeue_cmdobj() - Put a command back on the context
* queue
* @drawctxt: Pointer to the adreno draw context
- * @cmdbatch: Pointer to the KGSL cmdbatch to requeue
+ * @cmdobj: Pointer to the KGSL command object to requeue
*
* Failure to submit a command to the ringbuffer isn't the fault of the command
* being submitted so if a failure happens, push it back on the head of the the
* context queue to be reconsidered again unless the context got detached.
*/
-static inline int adreno_dispatcher_requeue_cmdbatch(
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch)
+static inline int adreno_dispatcher_requeue_cmdobj(
+ struct adreno_context *drawctxt,
+ struct kgsl_drawobj_cmd *cmdobj)
{
unsigned int prev;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
spin_lock(&drawctxt->lock);
if (kgsl_context_detached(&drawctxt->base) ||
kgsl_context_invalid(&drawctxt->base)) {
spin_unlock(&drawctxt->lock);
- /* get rid of this cmdbatch since the context is bad */
- kgsl_cmdbatch_destroy(cmdbatch);
+ /* get rid of this drawobj since the context is bad */
+ kgsl_drawobj_destroy(drawobj);
return -ENOENT;
}
- prev = drawctxt->cmdqueue_head == 0 ?
- (ADRENO_CONTEXT_CMDQUEUE_SIZE - 1) :
- (drawctxt->cmdqueue_head - 1);
+ prev = drawctxt->drawqueue_head == 0 ?
+ (ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1) :
+ (drawctxt->drawqueue_head - 1);
/*
* The maximum queue size always needs to be one less then the size of
- * the ringbuffer queue so there is "room" to put the cmdbatch back in
+ * the ringbuffer queue so there is "room" to put the drawobj back in
*/
- BUG_ON(prev == drawctxt->cmdqueue_tail);
+ WARN_ON(prev == drawctxt->drawqueue_tail);
- drawctxt->cmdqueue[prev] = cmdbatch;
+ drawctxt->drawqueue[prev] = drawobj;
drawctxt->queued++;
/* Reset the command queue head to reflect the newly requeued change */
- drawctxt->cmdqueue_head = prev;
+ drawctxt->drawqueue_head = prev;
spin_unlock(&drawctxt->lock);
return 0;
}
@@ -545,21 +522,22 @@ static void dispatcher_queue_context(struct adreno_device *adreno_dev,
}
/**
- * sendcmd() - Send a command batch to the GPU hardware
+ * sendcmd() - Send a drawobj to the GPU hardware
* @dispatcher: Pointer to the adreno dispatcher struct
- * @cmdbatch: Pointer to the KGSL cmdbatch being sent
+ * @drawobj: Pointer to the KGSL drawobj being sent
*
- * Send a KGSL command batch to the GPU hardware
+ * Send a KGSL drawobj to the GPU hardware
*/
static int sendcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
- struct adreno_dispatcher_cmdqueue *dispatch_q =
- ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
+ struct adreno_dispatcher_drawqueue *dispatch_q =
+ ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj);
struct adreno_submit_time time;
uint64_t secs = 0;
unsigned long nsecs = 0;
@@ -588,15 +566,15 @@ static int sendcmd(struct adreno_device *adreno_dev,
set_bit(ADRENO_DISPATCHER_POWER, &dispatcher->priv);
}
- if (test_bit(ADRENO_DEVICE_CMDBATCH_PROFILE, &adreno_dev->priv)) {
- set_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv);
- cmdbatch->profile_index = adreno_dev->cmdbatch_profile_index;
- adreno_dev->cmdbatch_profile_index =
- (adreno_dev->cmdbatch_profile_index + 1) %
- ADRENO_CMDBATCH_PROFILE_COUNT;
+ if (test_bit(ADRENO_DEVICE_DRAWOBJ_PROFILE, &adreno_dev->priv)) {
+ set_bit(CMDOBJ_PROFILE, &cmdobj->priv);
+ cmdobj->profile_index = adreno_dev->profile_index;
+ adreno_dev->profile_index =
+ (adreno_dev->profile_index + 1) %
+ ADRENO_DRAWOBJ_PROFILE_COUNT;
}
- ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdbatch, &time);
+ ret = adreno_ringbuffer_submitcmd(adreno_dev, cmdobj, &time);
/*
* On the first command, if the submission was successful, then read the
@@ -649,17 +627,17 @@ static int sendcmd(struct adreno_device *adreno_dev,
secs = time.ktime;
nsecs = do_div(secs, 1000000000);
- trace_adreno_cmdbatch_submitted(cmdbatch, (int) dispatcher->inflight,
+ trace_adreno_cmdbatch_submitted(drawobj, (int) dispatcher->inflight,
time.ticks, (unsigned long) secs, nsecs / 1000, drawctxt->rb,
adreno_get_rptr(drawctxt->rb));
mutex_unlock(&device->mutex);
- cmdbatch->submit_ticks = time.ticks;
+ cmdobj->submit_ticks = time.ticks;
- dispatch_q->cmd_q[dispatch_q->tail] = cmdbatch;
+ dispatch_q->cmd_q[dispatch_q->tail] = cmdobj;
dispatch_q->tail = (dispatch_q->tail + 1) %
- ADRENO_DISPATCH_CMDQUEUE_SIZE;
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE;
/*
* For the first submission in any given command queue update the
@@ -670,7 +648,7 @@ static int sendcmd(struct adreno_device *adreno_dev,
if (dispatch_q->inflight == 1)
dispatch_q->expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
+ msecs_to_jiffies(adreno_drawobj_timeout);
/*
* If we believe ourselves to be current and preemption isn't a thing,
@@ -678,7 +656,7 @@ static int sendcmd(struct adreno_device *adreno_dev,
* thing and the timer will be set up in due time
*/
if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
- if (cmdqueue_is_current(dispatch_q))
+ if (drawqueue_is_current(dispatch_q))
mod_timer(&dispatcher->timer, dispatch_q->expires);
}
@@ -704,75 +682,70 @@ static int sendcmd(struct adreno_device *adreno_dev,
static int dispatcher_context_sendcmds(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt)
{
- struct adreno_dispatcher_cmdqueue *dispatch_q =
+ struct adreno_dispatcher_drawqueue *dispatch_q =
&(drawctxt->rb->dispatch_q);
int count = 0;
int ret = 0;
- int inflight = _cmdqueue_inflight(dispatch_q);
+ int inflight = _drawqueue_inflight(dispatch_q);
unsigned int timestamp;
if (dispatch_q->inflight >= inflight) {
- expire_markers(drawctxt);
+ spin_lock(&drawctxt->lock);
+ _process_drawqueue_get_next_drawobj(drawctxt);
+ spin_unlock(&drawctxt->lock);
return -EBUSY;
}
/*
- * Each context can send a specific number of command batches per cycle
+ * Each context can send a specific number of drawobjs per cycle
*/
- while ((count < _context_cmdbatch_burst) &&
+ while ((count < _context_drawobj_burst) &&
(dispatch_q->inflight < inflight)) {
- struct kgsl_cmdbatch *cmdbatch;
+ struct kgsl_drawobj *drawobj;
+ struct kgsl_drawobj_cmd *cmdobj;
if (adreno_gpu_fault(adreno_dev) != 0)
break;
- cmdbatch = adreno_dispatcher_get_cmdbatch(drawctxt);
+ spin_lock(&drawctxt->lock);
+ drawobj = _process_drawqueue_get_next_drawobj(drawctxt);
/*
- * adreno_context_get_cmdbatch returns -EAGAIN if the current
- * cmdbatch has pending sync points so no more to do here.
+ * adreno_context_get_drawobj returns -EAGAIN if the current
+ * drawobj has pending sync points so no more to do here.
* When the sync points are satisfied then the context will get
* reqeueued
*/
- if (IS_ERR_OR_NULL(cmdbatch)) {
- if (IS_ERR(cmdbatch))
- ret = PTR_ERR(cmdbatch);
+ if (IS_ERR_OR_NULL(drawobj)) {
+ if (IS_ERR(drawobj))
+ ret = PTR_ERR(drawobj);
+ spin_unlock(&drawctxt->lock);
break;
}
+ _pop_drawobj(drawctxt);
+ spin_unlock(&drawctxt->lock);
- /*
- * If this is a synchronization submission then there are no
- * commands to submit. Discard it and get the next item from
- * the queue. Decrement count so this packet doesn't count
- * against the burst for the context
- */
-
- if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
- kgsl_cmdbatch_destroy(cmdbatch);
- continue;
- }
-
- timestamp = cmdbatch->timestamp;
-
- ret = sendcmd(adreno_dev, cmdbatch);
+ timestamp = drawobj->timestamp;
+ cmdobj = CMDOBJ(drawobj);
+ ret = sendcmd(adreno_dev, cmdobj);
/*
- * On error from sendcmd() try to requeue the command batch
+ * On error from sendcmd() try to requeue the cmdobj
* unless we got back -ENOENT which means that the context has
* been detached and there will be no more deliveries from here
*/
if (ret != 0) {
- /* Destroy the cmdbatch on -ENOENT */
+ /* Destroy the cmdobj on -ENOENT */
if (ret == -ENOENT)
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
else {
/*
* If the requeue returns an error, return that
* instead of whatever sendcmd() sent us
*/
- int r = adreno_dispatcher_requeue_cmdbatch(
- drawctxt, cmdbatch);
+ int r = adreno_dispatcher_requeue_cmdobj(
+ drawctxt, cmdobj);
if (r)
ret = r;
}
@@ -934,99 +907,87 @@ static void adreno_dispatcher_issuecmds(struct adreno_device *adreno_dev)
/**
* get_timestamp() - Return the next timestamp for the context
* @drawctxt - Pointer to an adreno draw context struct
- * @cmdbatch - Pointer to a command batch
+ * @drawobj - Pointer to a drawobj
* @timestamp - Pointer to a timestamp value possibly passed from the user
+ * @user_ts - user generated timestamp
*
* Assign a timestamp based on the settings of the draw context and the command
* batch.
*/
static int get_timestamp(struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch, unsigned int *timestamp)
+ struct kgsl_drawobj *drawobj, unsigned int *timestamp,
+ unsigned int user_ts)
{
- /* Synchronization commands don't get a timestamp */
- if (cmdbatch->flags & KGSL_CMDBATCH_SYNC) {
- *timestamp = 0;
- return 0;
- }
if (drawctxt->base.flags & KGSL_CONTEXT_USER_GENERATED_TS) {
/*
* User specified timestamps need to be greater than the last
* issued timestamp in the context
*/
- if (timestamp_cmp(drawctxt->timestamp, *timestamp) >= 0)
+ if (timestamp_cmp(drawctxt->timestamp, user_ts) >= 0)
return -ERANGE;
- drawctxt->timestamp = *timestamp;
+ drawctxt->timestamp = user_ts;
} else
drawctxt->timestamp++;
*timestamp = drawctxt->timestamp;
+ drawobj->timestamp = *timestamp;
return 0;
}
-/**
- * adreno_dispactcher_queue_cmd() - Queue a new command in the context
- * @adreno_dev: Pointer to the adreno device struct
- * @drawctxt: Pointer to the adreno draw context
- * @cmdbatch: Pointer to the command batch being submitted
- * @timestamp: Pointer to the requested timestamp
- *
- * Queue a command in the context - if there isn't any room in the queue, then
- * block until there is
- */
-int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp)
+static void _set_ft_policy(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt,
+ struct kgsl_drawobj_cmd *cmdobj)
{
- struct adreno_dispatcher_cmdqueue *dispatch_q =
- ADRENO_CMDBATCH_DISPATCH_CMDQUEUE(cmdbatch);
- int ret;
-
- spin_lock(&drawctxt->lock);
-
- if (kgsl_context_detached(&drawctxt->base)) {
- spin_unlock(&drawctxt->lock);
- return -ENOENT;
- }
+ /*
+ * Set the fault tolerance policy for the command batch - assuming the
+ * context hasn't disabled FT use the current device policy
+ */
+ if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
+ set_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy);
+ else
+ cmdobj->fault_policy = adreno_dev->ft_policy;
+}
+static void _cmdobj_set_flags(struct adreno_context *drawctxt,
+ struct kgsl_drawobj_cmd *cmdobj)
+{
/*
* Force the preamble for this submission only - this is usually
* requested by the dispatcher as part of fault recovery
*/
-
if (test_and_clear_bit(ADRENO_CONTEXT_FORCE_PREAMBLE,
&drawctxt->base.priv))
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv);
/*
- * Force the premable if set from userspace in the context or cmdbatch
- * flags
+ * Force the premable if set from userspace in the context or
+ * command obj flags
*/
-
if ((drawctxt->base.flags & KGSL_CONTEXT_CTX_SWITCH) ||
- (cmdbatch->flags & KGSL_CMDBATCH_CTX_SWITCH))
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+ (cmdobj->base.flags & KGSL_DRAWOBJ_CTX_SWITCH))
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv);
- /* Skip this cmdbatch commands if IFH_NOP is enabled */
+ /* Skip this ib if IFH_NOP is enabled */
if (drawctxt->base.flags & KGSL_CONTEXT_IFH_NOP)
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
+ set_bit(CMDOBJ_SKIP, &cmdobj->priv);
/*
* If we are waiting for the end of frame and it hasn't appeared yet,
- * then mark the command batch as skipped. It will still progress
+ * then mark the command obj as skipped. It will still progress
* through the pipeline but it won't actually send any commands
*/
if (test_bit(ADRENO_CONTEXT_SKIP_EOF, &drawctxt->base.priv)) {
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
+ set_bit(CMDOBJ_SKIP, &cmdobj->priv);
/*
- * If this command batch represents the EOF then clear the way
+ * If this command obj represents the EOF then clear the way
* for the dispatcher to continue submitting
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_END_OF_FRAME) {
+ if (cmdobj->base.flags & KGSL_DRAWOBJ_END_OF_FRAME) {
clear_bit(ADRENO_CONTEXT_SKIP_EOF,
&drawctxt->base.priv);
@@ -1038,10 +999,84 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
&drawctxt->base.priv);
}
}
+}
- /* Wait for room in the context queue */
+static inline int _check_context_state(struct kgsl_context *context)
+{
+ if (kgsl_context_invalid(context))
+ return -EDEADLK;
+
+ if (kgsl_context_detached(context))
+ return -ENOENT;
+
+ return 0;
+}
+
+static inline bool _verify_ib(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_memobj_node *ib)
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_process_private *private = dev_priv->process_priv;
+
+ /* The maximum allowable size for an IB in the CP is 0xFFFFF dwords */
+ if (ib->size == 0 || ((ib->size >> 2) > 0xFFFFF)) {
+ pr_context(device, context, "ctxt %d invalid ib size %lld\n",
+ context->id, ib->size);
+ return false;
+ }
+
+ /* Make sure that the address is mapped */
+ if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) {
+ pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n",
+ context->id, ib->gpuaddr);
+ return false;
+ }
+
+ return true;
+}
+
+static inline int _verify_cmdobj(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count)
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct kgsl_memobj_node *ib;
+ unsigned int i;
+
+ for (i = 0; i < count; i++) {
+ /* Verify the IBs before they get queued */
+ if (drawobj[i]->type == CMDOBJ_TYPE) {
+ struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj[i]);
+
+ list_for_each_entry(ib, &cmdobj->cmdlist, node)
+ if (_verify_ib(dev_priv,
+ &ADRENO_CONTEXT(context)->base, ib)
+ == false)
+ return -EINVAL;
+ /*
+ * Clear the wake on touch bit to indicate an IB has
+ * been submitted since the last time we set it.
+ * But only clear it when we have rendering commands.
+ */
+ device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH;
+ }
+
+ /* A3XX does not have support for drawobj profiling */
+ if (adreno_is_a3xx(ADRENO_DEVICE(device)) &&
+ (drawobj[i]->flags & KGSL_DRAWOBJ_PROFILING))
+ return -EOPNOTSUPP;
+ }
- while (drawctxt->queued >= _context_cmdqueue_size) {
+ return 0;
+}
+
+static inline int _wait_for_room_in_context_queue(
+ struct adreno_context *drawctxt)
+{
+ int ret = 0;
+
+ /* Wait for room in the context queue */
+ while (drawctxt->queued >= _context_drawqueue_size) {
trace_adreno_drawctxt_sleep(drawctxt);
spin_unlock(&drawctxt->lock);
@@ -1052,98 +1087,210 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
spin_lock(&drawctxt->lock);
trace_adreno_drawctxt_wake(drawctxt);
- if (ret <= 0) {
- spin_unlock(&drawctxt->lock);
+ if (ret <= 0)
return (ret == 0) ? -ETIMEDOUT : (int) ret;
- }
}
+
+ return 0;
+}
+
+static unsigned int _check_context_state_to_queue_cmds(
+ struct adreno_context *drawctxt)
+{
+ int ret = _check_context_state(&drawctxt->base);
+
+ if (ret)
+ return ret;
+
+ ret = _wait_for_room_in_context_queue(drawctxt);
+ if (ret)
+ return ret;
+
/*
* Account for the possiblity that the context got invalidated
* while we were sleeping
*/
+ return _check_context_state(&drawctxt->base);
+}
- if (kgsl_context_invalid(&drawctxt->base)) {
- spin_unlock(&drawctxt->lock);
- return -EDEADLK;
- }
- if (kgsl_context_detached(&drawctxt->base)) {
- spin_unlock(&drawctxt->lock);
- return -ENOENT;
- }
+static void _queue_drawobj(struct adreno_context *drawctxt,
+ struct kgsl_drawobj *drawobj)
+{
+ /* Put the command into the queue */
+ drawctxt->drawqueue[drawctxt->drawqueue_tail] = drawobj;
+ drawctxt->drawqueue_tail = (drawctxt->drawqueue_tail + 1) %
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE;
+ drawctxt->queued++;
+ trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued);
+}
- ret = get_timestamp(drawctxt, cmdbatch, timestamp);
- if (ret) {
- spin_unlock(&drawctxt->lock);
+static int _queue_markerobj(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *markerobj,
+ uint32_t *timestamp, unsigned int user_ts)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(markerobj);
+ int ret;
+
+ ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts);
+ if (ret)
return ret;
+
+ /*
+ * See if we can fastpath this thing - if nothing is queued
+ * and nothing is inflight retire without bothering the GPU
+ */
+ if (!drawctxt->queued && kgsl_check_timestamp(drawobj->device,
+ drawobj->context, drawctxt->queued_timestamp)) {
+ trace_adreno_cmdbatch_queued(drawobj, drawctxt->queued);
+ _retire_timestamp(drawobj);
+ return 1;
}
- cmdbatch->timestamp = *timestamp;
+ /*
+ * Remember the last queued timestamp - the marker will block
+ * until that timestamp is expired (unless another command
+ * comes along and forces the marker to execute)
+ */
- if (cmdbatch->flags & KGSL_CMDBATCH_MARKER) {
+ markerobj->marker_timestamp = drawctxt->queued_timestamp;
+ drawctxt->queued_timestamp = *timestamp;
+ _set_ft_policy(adreno_dev, drawctxt, markerobj);
+ _cmdobj_set_flags(drawctxt, markerobj);
- /*
- * See if we can fastpath this thing - if nothing is queued
- * and nothing is inflight retire without bothering the GPU
- */
+ _queue_drawobj(drawctxt, drawobj);
- if (!drawctxt->queued && kgsl_check_timestamp(cmdbatch->device,
- cmdbatch->context, drawctxt->queued_timestamp)) {
- trace_adreno_cmdbatch_queued(cmdbatch,
- drawctxt->queued);
+ return 0;
+}
- _retire_marker(cmdbatch);
- spin_unlock(&drawctxt->lock);
- return 0;
- }
+static int _queue_cmdobj(struct adreno_device *adreno_dev,
+ struct adreno_context *drawctxt, struct kgsl_drawobj_cmd *cmdobj,
+ uint32_t *timestamp, unsigned int user_ts)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ unsigned int j;
+ int ret;
- /*
- * Remember the last queued timestamp - the marker will block
- * until that timestamp is expired (unless another command
- * comes along and forces the marker to execute)
- */
+ ret = get_timestamp(drawctxt, drawobj, timestamp, user_ts);
+ if (ret)
+ return ret;
+
+ /*
+ * If this is a real command then we need to force any markers
+ * queued before it to dispatch to keep time linear - set the
+ * skip bit so the commands get NOPed.
+ */
+ j = drawctxt->drawqueue_head;
+
+ while (j != drawctxt->drawqueue_tail) {
+ if (drawctxt->drawqueue[j]->type == MARKEROBJ_TYPE) {
+ struct kgsl_drawobj_cmd *markerobj =
+ CMDOBJ(drawctxt->drawqueue[j]);
+ set_bit(CMDOBJ_SKIP, &markerobj->priv);
+ }
- cmdbatch->marker_timestamp = drawctxt->queued_timestamp;
+ j = DRAWQUEUE_NEXT(j, ADRENO_CONTEXT_DRAWQUEUE_SIZE);
}
- /* SYNC commands have timestamp 0 and will get optimized out anyway */
- if (!(cmdbatch->flags & KGSL_CONTEXT_SYNC))
- drawctxt->queued_timestamp = *timestamp;
+ drawctxt->queued_timestamp = *timestamp;
+ _set_ft_policy(adreno_dev, drawctxt, cmdobj);
+ _cmdobj_set_flags(drawctxt, cmdobj);
- /*
- * Set the fault tolerance policy for the command batch - assuming the
- * context hasn't disabled FT use the current device policy
- */
+ _queue_drawobj(drawctxt, drawobj);
- if (drawctxt->base.flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
- set_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy);
- else
- cmdbatch->fault_policy = adreno_dev->ft_policy;
+ return 0;
+}
- /* Put the command into the queue */
- drawctxt->cmdqueue[drawctxt->cmdqueue_tail] = cmdbatch;
- drawctxt->cmdqueue_tail = (drawctxt->cmdqueue_tail + 1) %
- ADRENO_CONTEXT_CMDQUEUE_SIZE;
+static void _queue_syncobj(struct adreno_context *drawctxt,
+ struct kgsl_drawobj_sync *syncobj, uint32_t *timestamp)
+{
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
- /*
- * If this is a real command then we need to force any markers queued
- * before it to dispatch to keep time linear - set the skip bit so
- * the commands get NOPed.
- */
+ *timestamp = 0;
+ drawobj->timestamp = 0;
- if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER)) {
- unsigned int i = drawctxt->cmdqueue_head;
+ _queue_drawobj(drawctxt, drawobj);
+}
- while (i != drawctxt->cmdqueue_tail) {
- if (drawctxt->cmdqueue[i]->flags & KGSL_CMDBATCH_MARKER)
- set_bit(CMDBATCH_FLAG_SKIP,
- &drawctxt->cmdqueue[i]->priv);
+/**
+ * adreno_dispactcher_queue_drawobj() - Queue a new draw object in the context
+ * @dev_priv: Pointer to the device private struct
+ * @context: Pointer to the kgsl draw context
+ * @drawobj: Pointer to the array of drawobj's being submitted
+ * @count: Number of drawobj's being submitted
+ * @timestamp: Pointer to the requested timestamp
+ *
+ * Queue a command in the context - if there isn't any room in the queue, then
+ * block until there is
+ */
+int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count, uint32_t *timestamp)
- i = CMDQUEUE_NEXT(i, ADRENO_CONTEXT_CMDQUEUE_SIZE);
+{
+ struct kgsl_device *device = dev_priv->device;
+ struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
+ struct adreno_dispatcher_drawqueue *dispatch_q;
+ int ret;
+ unsigned int i, user_ts;
+
+ ret = _check_context_state(&drawctxt->base);
+ if (ret)
+ return ret;
+
+ ret = _verify_cmdobj(dev_priv, context, drawobj, count);
+ if (ret)
+ return ret;
+
+ /* wait for the suspend gate */
+ wait_for_completion(&device->halt_gate);
+
+ spin_lock(&drawctxt->lock);
+
+ ret = _check_context_state_to_queue_cmds(drawctxt);
+ if (ret) {
+ spin_unlock(&drawctxt->lock);
+ return ret;
+ }
+
+ user_ts = *timestamp;
+
+ for (i = 0; i < count; i++) {
+
+ switch (drawobj[i]->type) {
+ case MARKEROBJ_TYPE:
+ ret = _queue_markerobj(adreno_dev, drawctxt,
+ CMDOBJ(drawobj[i]),
+ timestamp, user_ts);
+ if (ret == 1) {
+ spin_unlock(&drawctxt->lock);
+ goto done;
+ } else if (ret) {
+ spin_unlock(&drawctxt->lock);
+ return ret;
+ }
+ break;
+ case CMDOBJ_TYPE:
+ ret = _queue_cmdobj(adreno_dev, drawctxt,
+ CMDOBJ(drawobj[i]),
+ timestamp, user_ts);
+ if (ret) {
+ spin_unlock(&drawctxt->lock);
+ return ret;
+ }
+ break;
+ case SYNCOBJ_TYPE:
+ _queue_syncobj(drawctxt, SYNCOBJ(drawobj[i]),
+ timestamp);
+ break;
+ default:
+ spin_unlock(&drawctxt->lock);
+ return -EINVAL;
}
+
}
- drawctxt->queued++;
- trace_adreno_cmdbatch_queued(cmdbatch, drawctxt->queued);
+ dispatch_q = ADRENO_DRAWOBJ_DISPATCH_DRAWQUEUE(drawobj[0]);
_track_context(adreno_dev, dispatch_q, drawctxt);
@@ -1163,8 +1310,11 @@ int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
* queue will try to schedule new commands anyway.
*/
- if (dispatch_q->inflight < _context_cmdbatch_burst)
+ if (dispatch_q->inflight < _context_drawobj_burst)
adreno_dispatcher_issuecmds(adreno_dev);
+done:
+ if (test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv))
+ return -EPROTO;
return 0;
}
@@ -1208,15 +1358,15 @@ static void mark_guilty_context(struct kgsl_device *device, unsigned int id)
}
/*
- * If an IB inside of the command batch has a gpuaddr that matches the base
+ * If an IB inside of the drawobj has a gpuaddr that matches the base
* passed in then zero the size which effectively skips it when it is submitted
* in the ringbuffer.
*/
-static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base)
+static void _skip_ib(struct kgsl_drawobj_cmd *cmdobj, uint64_t base)
{
struct kgsl_memobj_node *ib;
- list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
+ list_for_each_entry(ib, &cmdobj->cmdlist, node) {
if (ib->gpuaddr == base) {
ib->priv |= MEMOBJ_SKIP;
if (base)
@@ -1225,10 +1375,11 @@ static void cmdbatch_skip_ib(struct kgsl_cmdbatch *cmdbatch, uint64_t base)
}
}
-static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmdbatch **replay, int count)
+static void _skip_cmd(struct kgsl_drawobj_cmd *cmdobj,
+ struct kgsl_drawobj_cmd **replay, int count)
{
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
int i;
/*
@@ -1243,9 +1394,9 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch,
* b) force preamble for next commandbatch
*/
for (i = 1; i < count; i++) {
- if (replay[i]->context->id == cmdbatch->context->id) {
+ if (DRAWOBJ(replay[i])->context->id == drawobj->context->id) {
replay[i]->fault_policy = replay[0]->fault_policy;
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv);
set_bit(KGSL_FT_SKIPCMD, &replay[i]->fault_recovery);
break;
}
@@ -1262,41 +1413,44 @@ static void cmdbatch_skip_cmd(struct kgsl_cmdbatch *cmdbatch,
drawctxt->fault_policy = replay[0]->fault_policy;
}
- /* set the flags to skip this cmdbatch */
- set_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv);
- cmdbatch->fault_recovery = 0;
+ /* set the flags to skip this cmdobj */
+ set_bit(CMDOBJ_SKIP, &cmdobj->priv);
+ cmdobj->fault_recovery = 0;
}
-static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmdbatch **replay, int count)
+static void _skip_frame(struct kgsl_drawobj_cmd *cmdobj,
+ struct kgsl_drawobj_cmd **replay, int count)
{
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
int skip = 1;
int i;
for (i = 0; i < count; i++) {
+ struct kgsl_drawobj *replay_obj = DRAWOBJ(replay[i]);
+
/*
- * Only operate on command batches that belong to the
+ * Only operate on drawobj's that belong to the
* faulting context
*/
- if (replay[i]->context->id != cmdbatch->context->id)
+ if (replay_obj->context->id != drawobj->context->id)
continue;
/*
- * Skip all the command batches in this context until
+ * Skip all the drawobjs in this context until
* the EOF flag is seen. If the EOF flag is seen then
* force the preamble for the next command.
*/
if (skip) {
- set_bit(CMDBATCH_FLAG_SKIP, &replay[i]->priv);
+ set_bit(CMDOBJ_SKIP, &replay[i]->priv);
- if (replay[i]->flags & KGSL_CMDBATCH_END_OF_FRAME)
+ if (replay_obj->flags & KGSL_DRAWOBJ_END_OF_FRAME)
skip = 0;
} else {
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv);
return;
}
}
@@ -1318,26 +1472,28 @@ static void cmdbatch_skip_frame(struct kgsl_cmdbatch *cmdbatch,
set_bit(ADRENO_CONTEXT_FORCE_PREAMBLE, &drawctxt->base.priv);
}
-static void remove_invalidated_cmdbatches(struct kgsl_device *device,
- struct kgsl_cmdbatch **replay, int count)
+static void remove_invalidated_cmdobjs(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd **replay, int count)
{
int i;
for (i = 0; i < count; i++) {
- struct kgsl_cmdbatch *cmd = replay[i];
- if (cmd == NULL)
+ struct kgsl_drawobj_cmd *cmdobj = replay[i];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+
+ if (cmdobj == NULL)
continue;
- if (kgsl_context_detached(cmd->context) ||
- kgsl_context_invalid(cmd->context)) {
+ if (kgsl_context_detached(drawobj->context) ||
+ kgsl_context_invalid(drawobj->context)) {
replay[i] = NULL;
mutex_lock(&device->mutex);
kgsl_cancel_events_timestamp(device,
- &cmd->context->events, cmd->timestamp);
+ &drawobj->context->events, drawobj->timestamp);
mutex_unlock(&device->mutex);
- kgsl_cmdbatch_destroy(cmd);
+ kgsl_drawobj_destroy(drawobj);
}
}
}
@@ -1361,9 +1517,10 @@ static inline const char *_kgsl_context_comm(struct kgsl_context *context)
static void adreno_fault_header(struct kgsl_device *device,
- struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch)
+ struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
unsigned int status, rptr, wptr, ib1sz, ib2sz;
uint64_t ib1base, ib2base;
@@ -1377,22 +1534,22 @@ static void adreno_fault_header(struct kgsl_device *device,
ADRENO_REG_CP_IB2_BASE_HI, &ib2base);
adreno_readreg(adreno_dev, ADRENO_REG_CP_IB2_BUFSZ, &ib2sz);
- if (cmdbatch != NULL) {
+ if (drawobj != NULL) {
struct adreno_context *drawctxt =
- ADRENO_CONTEXT(cmdbatch->context);
+ ADRENO_CONTEXT(drawobj->context);
- trace_adreno_gpu_fault(cmdbatch->context->id,
- cmdbatch->timestamp,
+ trace_adreno_gpu_fault(drawobj->context->id,
+ drawobj->timestamp,
status, rptr, wptr, ib1base, ib1sz,
ib2base, ib2sz, drawctxt->rb->id);
- pr_fault(device, cmdbatch,
+ pr_fault(device, drawobj,
"gpu fault ctx %d ts %d status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
- cmdbatch->context->id, cmdbatch->timestamp, status,
+ drawobj->context->id, drawobj->timestamp, status,
rptr, wptr, ib1base, ib1sz, ib2base, ib2sz);
if (rb != NULL)
- pr_fault(device, cmdbatch,
+ pr_fault(device, drawobj,
"gpu fault rb %d rb sw r/w %4.4x/%4.4x\n",
rb->id, rptr, rb->wptr);
} else {
@@ -1411,33 +1568,34 @@ static void adreno_fault_header(struct kgsl_device *device,
void adreno_fault_skipcmd_detached(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj *drawobj)
{
if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) &&
kgsl_context_detached(&drawctxt->base)) {
- pr_context(KGSL_DEVICE(adreno_dev), cmdbatch->context,
- "gpu detached context %d\n", cmdbatch->context->id);
+ pr_context(KGSL_DEVICE(adreno_dev), drawobj->context,
+ "gpu detached context %d\n", drawobj->context->id);
clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv);
}
}
/**
- * process_cmdbatch_fault() - Process a cmdbatch for fault policies
- * @device: Device on which the cmdbatch caused a fault
- * @replay: List of cmdbatches that are to be replayed on the device. The
- * faulting cmdbatch is the first command in the replay list and the remaining
- * cmdbatches in the list are commands that were submitted to the same queue
+ * process_cmdobj_fault() - Process a cmdobj for fault policies
+ * @device: Device on which the cmdobj caused a fault
+ * @replay: List of cmdobj's that are to be replayed on the device. The
+ * first command in the replay list is the faulting command and the remaining
+ * cmdobj's in the list are commands that were submitted to the same queue
* as the faulting one.
- * @count: Number of cmdbatches in replay
+ * @count: Number of cmdobj's in replay
* @base: The IB1 base at the time of fault
* @fault: The fault type
*/
-static void process_cmdbatch_fault(struct kgsl_device *device,
- struct kgsl_cmdbatch **replay, int count,
+static void process_cmdobj_fault(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd **replay, int count,
unsigned int base,
int fault)
{
- struct kgsl_cmdbatch *cmdbatch = replay[0];
+ struct kgsl_drawobj_cmd *cmdobj = replay[0];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
int i;
char *state = "failed";
@@ -1451,18 +1609,18 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
* where 1st and 4th gpu hang are more than 3 seconds apart we
* won't disable GFT and invalidate the context.
*/
- if (test_bit(KGSL_FT_THROTTLE, &cmdbatch->fault_policy)) {
- if (time_after(jiffies, (cmdbatch->context->fault_time
+ if (test_bit(KGSL_FT_THROTTLE, &cmdobj->fault_policy)) {
+ if (time_after(jiffies, (drawobj->context->fault_time
+ msecs_to_jiffies(_fault_throttle_time)))) {
- cmdbatch->context->fault_time = jiffies;
- cmdbatch->context->fault_count = 1;
+ drawobj->context->fault_time = jiffies;
+ drawobj->context->fault_count = 1;
} else {
- cmdbatch->context->fault_count++;
- if (cmdbatch->context->fault_count >
+ drawobj->context->fault_count++;
+ if (drawobj->context->fault_count >
_fault_throttle_burst) {
set_bit(KGSL_FT_DISABLE,
- &cmdbatch->fault_policy);
- pr_context(device, cmdbatch->context,
+ &cmdobj->fault_policy);
+ pr_context(device, drawobj->context,
"gpu fault threshold exceeded %d faults in %d msecs\n",
_fault_throttle_burst,
_fault_throttle_time);
@@ -1471,45 +1629,45 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
}
/*
- * If FT is disabled for this cmdbatch invalidate immediately
+ * If FT is disabled for this cmdobj invalidate immediately
*/
- if (test_bit(KGSL_FT_DISABLE, &cmdbatch->fault_policy) ||
- test_bit(KGSL_FT_TEMP_DISABLE, &cmdbatch->fault_policy)) {
+ if (test_bit(KGSL_FT_DISABLE, &cmdobj->fault_policy) ||
+ test_bit(KGSL_FT_TEMP_DISABLE, &cmdobj->fault_policy)) {
state = "skipped";
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+ bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG);
}
/* If the context is detached do not run FT on context */
- if (kgsl_context_detached(cmdbatch->context)) {
+ if (kgsl_context_detached(drawobj->context)) {
state = "detached";
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+ bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG);
}
/*
- * Set a flag so we don't print another PM dump if the cmdbatch fails
+ * Set a flag so we don't print another PM dump if the cmdobj fails
* again on replay
*/
- set_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy);
+ set_bit(KGSL_FT_SKIP_PMDUMP, &cmdobj->fault_policy);
/*
* A hardware fault generally means something was deterministically
- * wrong with the command batch - no point in trying to replay it
+ * wrong with the cmdobj - no point in trying to replay it
* Clear the replay bit and move on to the next policy level
*/
if (fault & ADRENO_HARD_FAULT)
- clear_bit(KGSL_FT_REPLAY, &(cmdbatch->fault_policy));
+ clear_bit(KGSL_FT_REPLAY, &(cmdobj->fault_policy));
/*
* A timeout fault means the IB timed out - clear the policy and
* invalidate - this will clear the FT_SKIP_PMDUMP bit but that is okay
- * because we won't see this cmdbatch again
+ * because we won't see this cmdobj again
*/
if (fault & ADRENO_TIMEOUT_FAULT)
- bitmap_zero(&cmdbatch->fault_policy, BITS_PER_LONG);
+ bitmap_zero(&cmdobj->fault_policy, BITS_PER_LONG);
/*
* If the context had a GPU page fault then it is likely it would fault
@@ -1517,83 +1675,84 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
*/
if (test_bit(KGSL_CONTEXT_PRIV_PAGEFAULT,
- &cmdbatch->context->priv)) {
+ &drawobj->context->priv)) {
/* we'll need to resume the mmu later... */
- clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy);
+ clear_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy);
clear_bit(KGSL_CONTEXT_PRIV_PAGEFAULT,
- &cmdbatch->context->priv);
+ &drawobj->context->priv);
}
/*
- * Execute the fault tolerance policy. Each command batch stores the
+ * Execute the fault tolerance policy. Each cmdobj stores the
* current fault policy that was set when it was queued.
* As the options are tried in descending priority
* (REPLAY -> SKIPIBS -> SKIPFRAME -> NOTHING) the bits are cleared
- * from the cmdbatch policy so the next thing can be tried if the
+ * from the cmdobj policy so the next thing can be tried if the
* change comes around again
*/
- /* Replay the hanging command batch again */
- if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_REPLAY));
- set_bit(KGSL_FT_REPLAY, &cmdbatch->fault_recovery);
+ /* Replay the hanging cmdobj again */
+ if (test_and_clear_bit(KGSL_FT_REPLAY, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_REPLAY));
+ set_bit(KGSL_FT_REPLAY, &cmdobj->fault_recovery);
return;
}
/*
* Skip the last IB1 that was played but replay everything else.
- * Note that the last IB1 might not be in the "hung" command batch
+ * Note that the last IB1 might not be in the "hung" cmdobj
* because the CP may have caused a page-fault while it was prefetching
* the next IB1/IB2. walk all outstanding commands and zap the
* supposedly bad IB1 where ever it lurks.
*/
- if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPIB));
- set_bit(KGSL_FT_SKIPIB, &cmdbatch->fault_recovery);
+ if (test_and_clear_bit(KGSL_FT_SKIPIB, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPIB));
+ set_bit(KGSL_FT_SKIPIB, &cmdobj->fault_recovery);
for (i = 0; i < count; i++) {
if (replay[i] != NULL &&
- replay[i]->context->id == cmdbatch->context->id)
- cmdbatch_skip_ib(replay[i], base);
+ DRAWOBJ(replay[i])->context->id ==
+ drawobj->context->id)
+ _skip_ib(replay[i], base);
}
return;
}
- /* Skip the faulted command batch submission */
- if (test_and_clear_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch, BIT(KGSL_FT_SKIPCMD));
+ /* Skip the faulted cmdobj submission */
+ if (test_and_clear_bit(KGSL_FT_SKIPCMD, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj, BIT(KGSL_FT_SKIPCMD));
- /* Skip faulting command batch */
- cmdbatch_skip_cmd(cmdbatch, replay, count);
+ /* Skip faulting cmdobj */
+ _skip_cmd(cmdobj, replay, count);
return;
}
- if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_policy)) {
- trace_adreno_cmdbatch_recovery(cmdbatch,
+ if (test_and_clear_bit(KGSL_FT_SKIPFRAME, &cmdobj->fault_policy)) {
+ trace_adreno_cmdbatch_recovery(cmdobj,
BIT(KGSL_FT_SKIPFRAME));
- set_bit(KGSL_FT_SKIPFRAME, &cmdbatch->fault_recovery);
+ set_bit(KGSL_FT_SKIPFRAME, &cmdobj->fault_recovery);
/*
- * Skip all the pending command batches for this context until
+ * Skip all the pending cmdobj's for this context until
* the EOF frame is seen
*/
- cmdbatch_skip_frame(cmdbatch, replay, count);
+ _skip_frame(cmdobj, replay, count);
return;
}
/* If we get here then all the policies failed */
- pr_context(device, cmdbatch->context, "gpu %s ctx %d ts %d\n",
- state, cmdbatch->context->id, cmdbatch->timestamp);
+ pr_context(device, drawobj->context, "gpu %s ctx %d ts %d\n",
+ state, drawobj->context->id, drawobj->timestamp);
/* Mark the context as failed */
- mark_guilty_context(device, cmdbatch->context->id);
+ mark_guilty_context(device, drawobj->context->id);
/* Invalidate the context */
- adreno_drawctxt_invalidate(device, cmdbatch->context);
+ adreno_drawctxt_invalidate(device, drawobj->context);
}
/**
@@ -1605,12 +1764,12 @@ static void process_cmdbatch_fault(struct kgsl_device *device,
* @base: The IB1 base during the fault
*/
static void recover_dispatch_q(struct kgsl_device *device,
- struct adreno_dispatcher_cmdqueue *dispatch_q,
+ struct adreno_dispatcher_drawqueue *dispatch_q,
int fault,
unsigned int base)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct kgsl_cmdbatch **replay = NULL;
+ struct kgsl_drawobj_cmd **replay;
unsigned int ptr;
int first = 0;
int count = 0;
@@ -1624,14 +1783,16 @@ static void recover_dispatch_q(struct kgsl_device *device,
/* Recovery failed - mark everybody on this q guilty */
while (ptr != dispatch_q->tail) {
- struct kgsl_context *context =
- dispatch_q->cmd_q[ptr]->context;
+ struct kgsl_drawobj_cmd *cmdobj =
+ dispatch_q->cmd_q[ptr];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- mark_guilty_context(device, context->id);
- adreno_drawctxt_invalidate(device, context);
- kgsl_cmdbatch_destroy(dispatch_q->cmd_q[ptr]);
+ mark_guilty_context(device, drawobj->context->id);
+ adreno_drawctxt_invalidate(device, drawobj->context);
+ kgsl_drawobj_destroy(drawobj);
- ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ ptr = DRAWQUEUE_NEXT(ptr,
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE);
}
/*
@@ -1643,22 +1804,22 @@ static void recover_dispatch_q(struct kgsl_device *device,
goto replay;
}
- /* Copy the inflight command batches into the temporary storage */
+ /* Copy the inflight cmdobj's into the temporary storage */
ptr = dispatch_q->head;
while (ptr != dispatch_q->tail) {
replay[count++] = dispatch_q->cmd_q[ptr];
- ptr = CMDQUEUE_NEXT(ptr, ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ ptr = DRAWQUEUE_NEXT(ptr, ADRENO_DISPATCH_DRAWQUEUE_SIZE);
}
if (fault && count)
- process_cmdbatch_fault(device, replay,
+ process_cmdobj_fault(device, replay,
count, base, fault);
replay:
dispatch_q->inflight = 0;
dispatch_q->head = dispatch_q->tail = 0;
- /* Remove any pending command batches that have been invalidated */
- remove_invalidated_cmdbatches(device, replay, count);
+ /* Remove any pending cmdobj's that have been invalidated */
+ remove_invalidated_cmdobjs(device, replay, count);
/* Replay the pending command buffers */
for (i = 0; i < count; i++) {
@@ -1674,16 +1835,16 @@ replay:
*/
if (first == 0) {
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &replay[i]->priv);
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &replay[i]->priv);
first = 1;
}
/*
- * Force each command batch to wait for idle - this avoids weird
+ * Force each cmdobj to wait for idle - this avoids weird
* CP parse issues
*/
- set_bit(CMDBATCH_FLAG_WFI, &replay[i]->priv);
+ set_bit(CMDOBJ_WFI, &replay[i]->priv);
ret = sendcmd(adreno_dev, replay[i]);
@@ -1693,15 +1854,18 @@ replay:
*/
if (ret) {
- pr_context(device, replay[i]->context,
+ pr_context(device, replay[i]->base.context,
"gpu reset failed ctx %d ts %d\n",
- replay[i]->context->id, replay[i]->timestamp);
+ replay[i]->base.context->id,
+ replay[i]->base.timestamp);
/* Mark this context as guilty (failed recovery) */
- mark_guilty_context(device, replay[i]->context->id);
+ mark_guilty_context(device,
+ replay[i]->base.context->id);
- adreno_drawctxt_invalidate(device, replay[i]->context);
- remove_invalidated_cmdbatches(device, &replay[i],
+ adreno_drawctxt_invalidate(device,
+ replay[i]->base.context);
+ remove_invalidated_cmdobjs(device, &replay[i],
count - i);
}
}
@@ -1713,36 +1877,38 @@ replay:
}
static void do_header_and_snapshot(struct kgsl_device *device,
- struct adreno_ringbuffer *rb, struct kgsl_cmdbatch *cmdbatch)
+ struct adreno_ringbuffer *rb, struct kgsl_drawobj_cmd *cmdobj)
{
- /* Always dump the snapshot on a non-cmdbatch failure */
- if (cmdbatch == NULL) {
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+
+ /* Always dump the snapshot on a non-drawobj failure */
+ if (cmdobj == NULL) {
adreno_fault_header(device, rb, NULL);
kgsl_device_snapshot(device, NULL);
return;
}
/* Skip everything if the PMDUMP flag is set */
- if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdbatch->fault_policy))
+ if (test_bit(KGSL_FT_SKIP_PMDUMP, &cmdobj->fault_policy))
return;
/* Print the fault header */
- adreno_fault_header(device, rb, cmdbatch);
+ adreno_fault_header(device, rb, cmdobj);
- if (!(cmdbatch->context->flags & KGSL_CONTEXT_NO_SNAPSHOT))
- kgsl_device_snapshot(device, cmdbatch->context);
+ if (!(drawobj->context->flags & KGSL_CONTEXT_NO_SNAPSHOT))
+ kgsl_device_snapshot(device, drawobj->context);
}
static int dispatcher_do_fault(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct adreno_dispatcher_cmdqueue *dispatch_q = NULL, *dispatch_q_temp;
+ struct adreno_dispatcher_drawqueue *dispatch_q = NULL, *dispatch_q_temp;
struct adreno_ringbuffer *rb;
struct adreno_ringbuffer *hung_rb = NULL;
unsigned int reg;
uint64_t base;
- struct kgsl_cmdbatch *cmdbatch = NULL;
+ struct kgsl_drawobj_cmd *cmdobj = NULL;
int ret, i;
int fault;
int halt;
@@ -1792,10 +1958,10 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
adreno_writereg(adreno_dev, ADRENO_REG_CP_ME_CNTL, reg);
}
/*
- * retire cmdbatches from all the dispatch_q's before starting recovery
+ * retire cmdobj's from all the dispatch_q's before starting recovery
*/
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- adreno_dispatch_retire_cmdqueue(adreno_dev,
+ adreno_dispatch_retire_drawqueue(adreno_dev,
&(rb->dispatch_q));
/* Select the active dispatch_q */
if (base == rb->buffer_desc.gpuaddr) {
@@ -1814,15 +1980,15 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
}
}
- if (dispatch_q && !adreno_cmdqueue_is_empty(dispatch_q)) {
- cmdbatch = dispatch_q->cmd_q[dispatch_q->head];
- trace_adreno_cmdbatch_fault(cmdbatch, fault);
+ if (dispatch_q && !adreno_drawqueue_is_empty(dispatch_q)) {
+ cmdobj = dispatch_q->cmd_q[dispatch_q->head];
+ trace_adreno_cmdbatch_fault(cmdobj, fault);
}
adreno_readreg64(adreno_dev, ADRENO_REG_CP_IB1_BASE,
ADRENO_REG_CP_IB1_BASE_HI, &base);
- do_header_and_snapshot(device, hung_rb, cmdbatch);
+ do_header_and_snapshot(device, hung_rb, cmdobj);
/* Terminate the stalled transaction and resume the IOMMU */
if (fault & ADRENO_IOMMU_PAGE_FAULT)
@@ -1876,23 +2042,24 @@ static int dispatcher_do_fault(struct adreno_device *adreno_dev)
return 1;
}
-static inline int cmdbatch_consumed(struct kgsl_cmdbatch *cmdbatch,
+static inline int drawobj_consumed(struct kgsl_drawobj *drawobj,
unsigned int consumed, unsigned int retired)
{
- return ((timestamp_cmp(cmdbatch->timestamp, consumed) >= 0) &&
- (timestamp_cmp(retired, cmdbatch->timestamp) < 0));
+ return ((timestamp_cmp(drawobj->timestamp, consumed) >= 0) &&
+ (timestamp_cmp(retired, drawobj->timestamp) < 0));
}
static void _print_recovery(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
static struct {
unsigned int mask;
const char *str;
} flags[] = { ADRENO_FT_TYPES };
- int i, nr = find_first_bit(&cmdbatch->fault_recovery, BITS_PER_LONG);
+ int i, nr = find_first_bit(&cmdobj->fault_recovery, BITS_PER_LONG);
char *result = "unknown";
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
for (i = 0; i < ARRAY_SIZE(flags); i++) {
if (flags[i].mask == BIT(nr)) {
@@ -1901,40 +2068,41 @@ static void _print_recovery(struct kgsl_device *device,
}
}
- pr_context(device, cmdbatch->context,
+ pr_context(device, drawobj->context,
"gpu %s ctx %d ts %d policy %lX\n",
- result, cmdbatch->context->id, cmdbatch->timestamp,
- cmdbatch->fault_recovery);
+ result, drawobj->context->id, drawobj->timestamp,
+ cmdobj->fault_recovery);
}
-static void cmdbatch_profile_ticks(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch, uint64_t *start, uint64_t *retire)
+static void cmdobj_profile_ticks(struct adreno_device *adreno_dev,
+ struct kgsl_drawobj_cmd *cmdobj, uint64_t *start, uint64_t *retire)
{
- void *ptr = adreno_dev->cmdbatch_profile_buffer.hostptr;
- struct adreno_cmdbatch_profile_entry *entry;
+ void *ptr = adreno_dev->profile_buffer.hostptr;
+ struct adreno_drawobj_profile_entry *entry;
- entry = (struct adreno_cmdbatch_profile_entry *)
- (ptr + (cmdbatch->profile_index * sizeof(*entry)));
+ entry = (struct adreno_drawobj_profile_entry *)
+ (ptr + (cmdobj->profile_index * sizeof(*entry)));
rmb();
*start = entry->started;
*retire = entry->retired;
}
-static void retire_cmdbatch(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch)
+static void retire_cmdobj(struct adreno_device *adreno_dev,
+ struct kgsl_drawobj_cmd *cmdobj)
{
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
- struct adreno_context *drawctxt = ADRENO_CONTEXT(cmdbatch->context);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
+ struct adreno_context *drawctxt = ADRENO_CONTEXT(drawobj->context);
uint64_t start = 0, end = 0;
- if (cmdbatch->fault_recovery != 0) {
- set_bit(ADRENO_CONTEXT_FAULT, &cmdbatch->context->priv);
- _print_recovery(KGSL_DEVICE(adreno_dev), cmdbatch);
+ if (cmdobj->fault_recovery != 0) {
+ set_bit(ADRENO_CONTEXT_FAULT, &drawobj->context->priv);
+ _print_recovery(KGSL_DEVICE(adreno_dev), cmdobj);
}
- if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv))
- cmdbatch_profile_ticks(adreno_dev, cmdbatch, &start, &end);
+ if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv))
+ cmdobj_profile_ticks(adreno_dev, cmdobj, &start, &end);
/*
* For A3xx we still get the rptr from the CP_RB_RPTR instead of
@@ -1942,48 +2110,49 @@ static void retire_cmdbatch(struct adreno_device *adreno_dev,
* So avoid reading GPU register directly for A3xx.
*/
if (adreno_is_a3xx(adreno_dev))
- trace_adreno_cmdbatch_retired(cmdbatch,
- (int) dispatcher->inflight, start, end,
- ADRENO_CMDBATCH_RB(cmdbatch), 0);
+ trace_adreno_cmdbatch_retired(drawobj,
+ (int) dispatcher->inflight, start, end,
+ ADRENO_DRAWOBJ_RB(drawobj), 0, cmdobj->fault_recovery);
else
- trace_adreno_cmdbatch_retired(cmdbatch,
- (int) dispatcher->inflight, start, end,
- ADRENO_CMDBATCH_RB(cmdbatch),
- adreno_get_rptr(drawctxt->rb));
+ trace_adreno_cmdbatch_retired(drawobj,
+ (int) dispatcher->inflight, start, end,
+ ADRENO_DRAWOBJ_RB(drawobj),
+ adreno_get_rptr(drawctxt->rb), cmdobj->fault_recovery);
drawctxt->submit_retire_ticks[drawctxt->ticks_index] =
- end - cmdbatch->submit_ticks;
+ end - cmdobj->submit_ticks;
drawctxt->ticks_index = (drawctxt->ticks_index + 1) %
SUBMIT_RETIRE_TICKS_SIZE;
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
}
-static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static int adreno_dispatch_retire_drawqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_dispatcher *dispatcher = &adreno_dev->dispatcher;
int count = 0;
- while (!adreno_cmdqueue_is_empty(cmdqueue)) {
- struct kgsl_cmdbatch *cmdbatch =
- cmdqueue->cmd_q[cmdqueue->head];
+ while (!adreno_drawqueue_is_empty(drawqueue)) {
+ struct kgsl_drawobj_cmd *cmdobj =
+ drawqueue->cmd_q[drawqueue->head];
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- if (!kgsl_check_timestamp(device, cmdbatch->context,
- cmdbatch->timestamp))
+ if (!kgsl_check_timestamp(device, drawobj->context,
+ drawobj->timestamp))
break;
- retire_cmdbatch(adreno_dev, cmdbatch);
+ retire_cmdobj(adreno_dev, cmdobj);
dispatcher->inflight--;
- cmdqueue->inflight--;
+ drawqueue->inflight--;
- cmdqueue->cmd_q[cmdqueue->head] = NULL;
+ drawqueue->cmd_q[drawqueue->head] = NULL;
- cmdqueue->head = CMDQUEUE_NEXT(cmdqueue->head,
- ADRENO_DISPATCH_CMDQUEUE_SIZE);
+ drawqueue->head = DRAWQUEUE_NEXT(drawqueue->head,
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE);
count++;
}
@@ -1992,13 +2161,14 @@ static int adreno_dispatch_retire_cmdqueue(struct adreno_device *adreno_dev,
}
static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
- struct kgsl_cmdbatch *cmdbatch = cmdqueue->cmd_q[cmdqueue->head];
+ struct kgsl_drawobj *drawobj =
+ DRAWOBJ(drawqueue->cmd_q[drawqueue->head]);
/* Don't timeout if the timer hasn't expired yet (duh) */
- if (time_is_after_jiffies(cmdqueue->expires))
+ if (time_is_after_jiffies(drawqueue->expires))
return;
/* Don't timeout if the IB timeout is disabled globally */
@@ -2006,30 +2176,30 @@ static void _adreno_dispatch_check_timeout(struct adreno_device *adreno_dev,
return;
/* Don't time out if the context has disabled it */
- if (cmdbatch->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
+ if (drawobj->context->flags & KGSL_CONTEXT_NO_FAULT_TOLERANCE)
return;
- pr_context(device, cmdbatch->context, "gpu timeout ctx %d ts %d\n",
- cmdbatch->context->id, cmdbatch->timestamp);
+ pr_context(device, drawobj->context, "gpu timeout ctx %d ts %d\n",
+ drawobj->context->id, drawobj->timestamp);
adreno_set_gpu_fault(adreno_dev, ADRENO_TIMEOUT_FAULT);
}
-static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static int adreno_dispatch_process_drawqueue(struct adreno_device *adreno_dev,
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
- int count = adreno_dispatch_retire_cmdqueue(adreno_dev, cmdqueue);
+ int count = adreno_dispatch_retire_drawqueue(adreno_dev, drawqueue);
/* Nothing to do if there are no pending commands */
- if (adreno_cmdqueue_is_empty(cmdqueue))
+ if (adreno_drawqueue_is_empty(drawqueue))
return count;
- /* Don't update the cmdqueue timeout if we are about to preempt out */
+ /* Don't update the drawqueue timeout if we are about to preempt out */
if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE))
return count;
- /* Don't update the cmdqueue timeout if it isn't active */
- if (!cmdqueue_is_current(cmdqueue))
+ /* Don't update the drawqueue timeout if it isn't active */
+ if (!drawqueue_is_current(drawqueue))
return count;
/*
@@ -2038,17 +2208,17 @@ static int adreno_dispatch_process_cmdqueue(struct adreno_device *adreno_dev,
*/
if (count) {
- cmdqueue->expires = jiffies +
- msecs_to_jiffies(adreno_cmdbatch_timeout);
+ drawqueue->expires = jiffies +
+ msecs_to_jiffies(adreno_drawobj_timeout);
return count;
}
/*
* If we get here then 1) the ringbuffer is current and 2) we haven't
* retired anything. Check to see if the timeout if valid for the
- * current cmdbatch and fault if it has expired
+ * current drawobj and fault if it has expired
*/
- _adreno_dispatch_check_timeout(adreno_dev, cmdqueue);
+ _adreno_dispatch_check_timeout(adreno_dev, drawqueue);
return 0;
}
@@ -2067,11 +2237,11 @@ static void _dispatcher_update_timers(struct adreno_device *adreno_dev)
/* Check to see if we need to update the command timer */
if (adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE)) {
- struct adreno_dispatcher_cmdqueue *cmdqueue =
- CMDQUEUE(adreno_dev->cur_rb);
+ struct adreno_dispatcher_drawqueue *drawqueue =
+ DRAWQUEUE(adreno_dev->cur_rb);
- if (!adreno_cmdqueue_is_empty(cmdqueue))
- mod_timer(&dispatcher->timer, cmdqueue->expires);
+ if (!adreno_drawqueue_is_empty(drawqueue))
+ mod_timer(&dispatcher->timer, drawqueue->expires);
}
}
@@ -2111,14 +2281,14 @@ static void adreno_dispatcher_work(struct work_struct *work)
/*
* As long as there are inflight commands, process retired comamnds from
- * all cmdqueues
+ * all drawqueues
*/
for (i = 0; i < adreno_dev->num_ringbuffers; i++) {
- struct adreno_dispatcher_cmdqueue *cmdqueue =
- CMDQUEUE(&adreno_dev->ringbuffers[i]);
+ struct adreno_dispatcher_drawqueue *drawqueue =
+ DRAWQUEUE(&adreno_dev->ringbuffers[i]);
- count += adreno_dispatch_process_cmdqueue(adreno_dev,
- cmdqueue);
+ count += adreno_dispatch_process_drawqueue(adreno_dev,
+ drawqueue);
if (dispatcher->inflight == 0)
break;
}
@@ -2178,7 +2348,7 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device,
}
/*
- * This is called on a regular basis while command batches are inflight. Fault
+ * This is called on a regular basis while cmdobj's are inflight. Fault
* detection registers are read and compared to the existing values - if they
* changed then the GPU is still running. If they are the same between
* subsequent calls then the GPU may have faulted
@@ -2230,7 +2400,7 @@ static void adreno_dispatcher_timer(unsigned long data)
*/
void adreno_dispatcher_start(struct kgsl_device *device)
{
- complete_all(&device->cmdbatch_gate);
+ complete_all(&device->halt_gate);
/* Schedule the work loop to get things going */
adreno_dispatcher_schedule(device);
@@ -2267,13 +2437,13 @@ void adreno_dispatcher_close(struct adreno_device *adreno_dev)
del_timer_sync(&dispatcher->fault_timer);
FOR_EACH_RINGBUFFER(adreno_dev, rb, i) {
- struct adreno_dispatcher_cmdqueue *dispatch_q =
+ struct adreno_dispatcher_drawqueue *dispatch_q =
&(rb->dispatch_q);
- while (!adreno_cmdqueue_is_empty(dispatch_q)) {
- kgsl_cmdbatch_destroy(
- dispatch_q->cmd_q[dispatch_q->head]);
+ while (!adreno_drawqueue_is_empty(dispatch_q)) {
+ kgsl_drawobj_destroy(
+ DRAWOBJ(dispatch_q->cmd_q[dispatch_q->head]));
dispatch_q->head = (dispatch_q->head + 1)
- % ADRENO_DISPATCH_CMDQUEUE_SIZE;
+ % ADRENO_DISPATCH_DRAWQUEUE_SIZE;
}
}
@@ -2332,23 +2502,23 @@ static ssize_t _show_uint(struct adreno_dispatcher *dispatcher,
*((unsigned int *) attr->value));
}
-static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_CMDQUEUE_SIZE,
+static DISPATCHER_UINT_ATTR(inflight, 0644, ADRENO_DISPATCH_DRAWQUEUE_SIZE,
_dispatcher_q_inflight_hi);
static DISPATCHER_UINT_ATTR(inflight_low_latency, 0644,
- ADRENO_DISPATCH_CMDQUEUE_SIZE, _dispatcher_q_inflight_lo);
+ ADRENO_DISPATCH_DRAWQUEUE_SIZE, _dispatcher_q_inflight_lo);
/*
* Our code that "puts back" a command from the context is much cleaner
* if we are sure that there will always be enough room in the
* ringbuffer so restrict the maximum size of the context queue to
- * ADRENO_CONTEXT_CMDQUEUE_SIZE - 1
+ * ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1
*/
-static DISPATCHER_UINT_ATTR(context_cmdqueue_size, 0644,
- ADRENO_CONTEXT_CMDQUEUE_SIZE - 1, _context_cmdqueue_size);
+static DISPATCHER_UINT_ATTR(context_drawqueue_size, 0644,
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE - 1, _context_drawqueue_size);
static DISPATCHER_UINT_ATTR(context_burst_count, 0644, 0,
- _context_cmdbatch_burst);
-static DISPATCHER_UINT_ATTR(cmdbatch_timeout, 0644, 0,
- adreno_cmdbatch_timeout);
+ _context_drawobj_burst);
+static DISPATCHER_UINT_ATTR(drawobj_timeout, 0644, 0,
+ adreno_drawobj_timeout);
static DISPATCHER_UINT_ATTR(context_queue_wait, 0644, 0, _context_queue_wait);
static DISPATCHER_UINT_ATTR(fault_detect_interval, 0644, 0,
_fault_timer_interval);
@@ -2366,9 +2536,9 @@ static DISPATCHER_UINT_ATTR(dispatch_starvation_time, 0644, 0,
static struct attribute *dispatcher_attrs[] = {
&dispatcher_attr_inflight.attr,
&dispatcher_attr_inflight_low_latency.attr,
- &dispatcher_attr_context_cmdqueue_size.attr,
+ &dispatcher_attr_context_drawqueue_size.attr,
&dispatcher_attr_context_burst_count.attr,
- &dispatcher_attr_cmdbatch_timeout.attr,
+ &dispatcher_attr_drawobj_timeout.attr,
&dispatcher_attr_context_queue_wait.attr,
&dispatcher_attr_fault_detect_interval.attr,
&dispatcher_attr_fault_throttle_time.attr,
diff --git a/drivers/gpu/msm/adreno_dispatch.h b/drivers/gpu/msm/adreno_dispatch.h
index 699c3e4adb27..cb9106fedc82 100644
--- a/drivers/gpu/msm/adreno_dispatch.h
+++ b/drivers/gpu/msm/adreno_dispatch.h
@@ -15,7 +15,7 @@
#define ____ADRENO_DISPATCHER_H
extern unsigned int adreno_disp_preempt_fair_sched;
-extern unsigned int adreno_cmdbatch_timeout;
+extern unsigned int adreno_drawobj_timeout;
extern unsigned int adreno_dispatch_starvation_time;
extern unsigned int adreno_dispatch_time_slice;
@@ -44,21 +44,21 @@ enum adreno_dispatcher_starve_timer_states {
* sizes that can be chosen at runtime
*/
-#define ADRENO_DISPATCH_CMDQUEUE_SIZE 128
+#define ADRENO_DISPATCH_DRAWQUEUE_SIZE 128
-#define CMDQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
+#define DRAWQUEUE_NEXT(_i, _s) (((_i) + 1) % (_s))
/**
- * struct adreno_dispatcher_cmdqueue - List of commands for a RB level
- * @cmd_q: List of command batches submitted to dispatcher
+ * struct adreno_dispatcher_drawqueue - List of commands for a RB level
+ * @cmd_q: List of command obj's submitted to dispatcher
* @inflight: Number of commands inflight in this q
* @head: Head pointer to the q
* @tail: Queues tail pointer
- * @active_context_count: Number of active contexts seen in this rb cmdqueue
- * @expires: The jiffies value at which this cmdqueue has run too long
+ * @active_context_count: Number of active contexts seen in this rb drawqueue
+ * @expires: The jiffies value at which this drawqueue has run too long
*/
-struct adreno_dispatcher_cmdqueue {
- struct kgsl_cmdbatch *cmd_q[ADRENO_DISPATCH_CMDQUEUE_SIZE];
+struct adreno_dispatcher_drawqueue {
+ struct kgsl_drawobj_cmd *cmd_q[ADRENO_DISPATCH_DRAWQUEUE_SIZE];
unsigned int inflight;
unsigned int head;
unsigned int tail;
@@ -70,10 +70,10 @@ struct adreno_dispatcher_cmdqueue {
* struct adreno_dispatcher - container for the adreno GPU dispatcher
* @mutex: Mutex to protect the structure
* @state: Current state of the dispatcher (active or paused)
- * @timer: Timer to monitor the progress of the command batches
- * @inflight: Number of command batch operations pending in the ringbuffer
+ * @timer: Timer to monitor the progress of the drawobjs
+ * @inflight: Number of drawobj operations pending in the ringbuffer
* @fault: Non-zero if a fault was detected.
- * @pending: Priority list of contexts waiting to submit command batches
+ * @pending: Priority list of contexts waiting to submit drawobjs
* @plist_lock: Spin lock to protect the pending queue
* @work: work_struct to put the dispatcher in a work queue
* @kobj: kobject for the dispatcher directory in the device sysfs node
@@ -109,9 +109,9 @@ int adreno_dispatcher_idle(struct adreno_device *adreno_dev);
void adreno_dispatcher_irq_fault(struct adreno_device *adreno_dev);
void adreno_dispatcher_stop(struct adreno_device *adreno_dev);
-int adreno_dispatcher_queue_cmd(struct adreno_device *adreno_dev,
- struct adreno_context *drawctxt, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp);
+int adreno_dispatcher_queue_cmds(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count, uint32_t *timestamp);
void adreno_dispatcher_schedule(struct kgsl_device *device);
void adreno_dispatcher_pause(struct adreno_device *adreno_dev);
@@ -120,11 +120,11 @@ void adreno_dispatcher_queue_context(struct kgsl_device *device,
void adreno_dispatcher_preempt_callback(struct adreno_device *adreno_dev,
int bit);
void adreno_preempt_process_dispatch_queue(struct adreno_device *adreno_dev,
- struct adreno_dispatcher_cmdqueue *dispatch_q);
+ struct adreno_dispatcher_drawqueue *dispatch_q);
-static inline bool adreno_cmdqueue_is_empty(
- struct adreno_dispatcher_cmdqueue *cmdqueue)
+static inline bool adreno_drawqueue_is_empty(
+ struct adreno_dispatcher_drawqueue *drawqueue)
{
- return (cmdqueue != NULL && cmdqueue->head == cmdqueue->tail);
+ return (drawqueue != NULL && drawqueue->head == drawqueue->tail);
}
#endif /* __ADRENO_DISPATCHER_H */
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index d9ebe37d0cf0..3a110ed221a8 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -59,14 +59,14 @@ void adreno_drawctxt_dump(struct kgsl_device *device,
kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED, &retire);
/*
- * We may have cmdbatch timer running, which also uses same
+ * We may have kgsl sync obj timer running, which also uses same
* lock, take a lock with software interrupt disabled (bh)
* to avoid spin lock recursion.
*
* Use Spin trylock because dispatcher can acquire drawctxt->lock
* if context is pending and the fence it is waiting on just got
* signalled. Dispatcher acquires drawctxt->lock and tries to
- * delete the cmdbatch timer using del_timer_sync().
+ * delete the sync obj timer using del_timer_sync().
* del_timer_sync() waits till timer and its pending handlers
* are deleted. But if the timer expires at the same time,
* timer handler could be waiting on drawctxt->lock leading to a
@@ -83,23 +83,27 @@ void adreno_drawctxt_dump(struct kgsl_device *device,
context->id, queue, drawctxt->submitted_timestamp,
start, retire);
- if (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
- struct kgsl_cmdbatch *cmdbatch =
- drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+ if (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
+ struct kgsl_drawobj *drawobj =
+ drawctxt->drawqueue[drawctxt->drawqueue_head];
- if (test_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv)) {
+ if (test_bit(ADRENO_CONTEXT_FENCE_LOG, &context->priv)) {
dev_err(device->dev,
" possible deadlock. Context %d might be blocked for itself\n",
context->id);
goto stats;
}
- if (kgsl_cmdbatch_events_pending(cmdbatch)) {
- dev_err(device->dev,
- " context[%d] (ts=%d) Active sync points:\n",
- context->id, cmdbatch->timestamp);
+ if (drawobj->type == SYNCOBJ_TYPE) {
+ struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
+
+ if (kgsl_drawobj_events_pending(syncobj)) {
+ dev_err(device->dev,
+ " context[%d] (ts=%d) Active sync points:\n",
+ context->id, drawobj->timestamp);
- kgsl_dump_syncpoints(device, cmdbatch);
+ kgsl_dump_syncpoints(device, syncobj);
+ }
}
}
@@ -229,19 +233,19 @@ done:
return ret;
}
-static int drawctxt_detach_cmdbatches(struct adreno_context *drawctxt,
- struct kgsl_cmdbatch **list)
+static int drawctxt_detach_drawobjs(struct adreno_context *drawctxt,
+ struct kgsl_drawobj **list)
{
int count = 0;
- while (drawctxt->cmdqueue_head != drawctxt->cmdqueue_tail) {
- struct kgsl_cmdbatch *cmdbatch =
- drawctxt->cmdqueue[drawctxt->cmdqueue_head];
+ while (drawctxt->drawqueue_head != drawctxt->drawqueue_tail) {
+ struct kgsl_drawobj *drawobj =
+ drawctxt->drawqueue[drawctxt->drawqueue_head];
- drawctxt->cmdqueue_head = (drawctxt->cmdqueue_head + 1) %
- ADRENO_CONTEXT_CMDQUEUE_SIZE;
+ drawctxt->drawqueue_head = (drawctxt->drawqueue_head + 1) %
+ ADRENO_CONTEXT_DRAWQUEUE_SIZE;
- list[count++] = cmdbatch;
+ list[count++] = drawobj;
}
return count;
@@ -259,7 +263,7 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device,
struct kgsl_context *context)
{
struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
- struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE];
+ struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
int i, count;
trace_adreno_drawctxt_invalidate(drawctxt);
@@ -280,13 +284,13 @@ void adreno_drawctxt_invalidate(struct kgsl_device *device,
drawctxt->timestamp);
/* Get rid of commands still waiting in the queue */
- count = drawctxt_detach_cmdbatches(drawctxt, list);
+ count = drawctxt_detach_drawobjs(drawctxt, list);
spin_unlock(&drawctxt->lock);
for (i = 0; i < count; i++) {
kgsl_cancel_events_timestamp(device, &context->events,
list[i]->timestamp);
- kgsl_cmdbatch_destroy(list[i]);
+ kgsl_drawobj_destroy(list[i]);
}
/* Make sure all pending events are processed or cancelled */
@@ -453,7 +457,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
struct adreno_context *drawctxt;
struct adreno_ringbuffer *rb;
int ret, count, i;
- struct kgsl_cmdbatch *list[ADRENO_CONTEXT_CMDQUEUE_SIZE];
+ struct kgsl_drawobj *list[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
if (context == NULL)
return;
@@ -468,7 +472,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
spin_unlock(&adreno_dev->active_list_lock);
spin_lock(&drawctxt->lock);
- count = drawctxt_detach_cmdbatches(drawctxt, list);
+ count = drawctxt_detach_drawobjs(drawctxt, list);
spin_unlock(&drawctxt->lock);
for (i = 0; i < count; i++) {
@@ -478,7 +482,7 @@ void adreno_drawctxt_detach(struct kgsl_context *context)
* detached status here.
*/
adreno_fault_skipcmd_detached(adreno_dev, drawctxt, list[i]);
- kgsl_cmdbatch_destroy(list[i]);
+ kgsl_drawobj_destroy(list[i]);
}
/*
diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h
index 5ea911954991..0578f16ae9e1 100644
--- a/drivers/gpu/msm/adreno_drawctxt.h
+++ b/drivers/gpu/msm/adreno_drawctxt.h
@@ -18,7 +18,7 @@ struct adreno_context_type {
const char *str;
};
-#define ADRENO_CONTEXT_CMDQUEUE_SIZE 128
+#define ADRENO_CONTEXT_DRAWQUEUE_SIZE 128
#define SUBMIT_RETIRE_TICKS_SIZE 7
struct kgsl_device;
@@ -32,20 +32,21 @@ struct kgsl_context;
* @internal_timestamp: Global timestamp of the last issued command
* NOTE: guarded by device->mutex, not drawctxt->mutex!
* @type: Context type (GL, CL, RS)
- * @mutex: Mutex to protect the cmdqueue
- * @cmdqueue: Queue of command batches waiting to be dispatched for this context
- * @cmdqueue_head: Head of the cmdqueue queue
- * @cmdqueue_tail: Tail of the cmdqueue queue
+ * @mutex: Mutex to protect the drawqueue
+ * @drawqueue: Queue of drawobjs waiting to be dispatched for this
+ * context
+ * @drawqueue_head: Head of the drawqueue queue
+ * @drawqueue_tail: Tail of the drawqueue queue
* @pending: Priority list node for the dispatcher list of pending contexts
* @wq: Workqueue structure for contexts to sleep pending room in the queue
* @waiting: Workqueue structure for contexts waiting for a timestamp or event
- * @queued: Number of commands queued in the cmdqueue
- * @fault_policy: GFT fault policy set in cmdbatch_skip_cmd();
+ * @queued: Number of commands queued in the drawqueue
+ * @fault_policy: GFT fault policy set in _skip_cmd();
* @debug_root: debugfs entry for this context.
* @queued_timestamp: The last timestamp that was queued on this context
* @rb: The ringbuffer in which this context submits commands.
* @submitted_timestamp: The last timestamp that was submitted for this context
- * @submit_retire_ticks: Array to hold cmdbatch execution times from submit
+ * @submit_retire_ticks: Array to hold command obj execution times from submit
* to retire
* @ticks_index: The index into submit_retire_ticks[] where the new delta will
* be written.
@@ -60,9 +61,9 @@ struct adreno_context {
spinlock_t lock;
/* Dispatcher */
- struct kgsl_cmdbatch *cmdqueue[ADRENO_CONTEXT_CMDQUEUE_SIZE];
- unsigned int cmdqueue_head;
- unsigned int cmdqueue_tail;
+ struct kgsl_drawobj *drawqueue[ADRENO_CONTEXT_DRAWQUEUE_SIZE];
+ unsigned int drawqueue_head;
+ unsigned int drawqueue_tail;
struct plist_node pending;
wait_queue_head_t wq;
@@ -92,8 +93,9 @@ struct adreno_context {
* @ADRENO_CONTEXT_SKIP_EOF - Context skip IBs until the next end of frame
* marker.
* @ADRENO_CONTEXT_FORCE_PREAMBLE - Force the preamble for the next submission.
- * @ADRENO_CONTEXT_SKIP_CMD - Context's command batch is skipped during
+ * @ADRENO_CONTEXT_SKIP_CMD - Context's drawobj's skipped during
fault tolerance.
+ * @ADRENO_CONTEXT_FENCE_LOG - Dump fences on this context.
*/
enum adreno_context_priv {
ADRENO_CONTEXT_FAULT = KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC,
@@ -102,6 +104,7 @@ enum adreno_context_priv {
ADRENO_CONTEXT_SKIP_EOF,
ADRENO_CONTEXT_FORCE_PREAMBLE,
ADRENO_CONTEXT_SKIP_CMD,
+ ADRENO_CONTEXT_FENCE_LOG,
};
/* Flags for adreno_drawctxt_switch() */
diff --git a/drivers/gpu/msm/adreno_perfcounter.c b/drivers/gpu/msm/adreno_perfcounter.c
index 8e354d71a291..42f8119ad8b4 100644
--- a/drivers/gpu/msm/adreno_perfcounter.c
+++ b/drivers/gpu/msm/adreno_perfcounter.c
@@ -598,28 +598,6 @@ int adreno_perfcounter_put(struct adreno_device *adreno_dev,
return -EINVAL;
}
-static int _perfcounter_enable_pwr(struct adreno_device *adreno_dev,
- unsigned int counter)
-{
- /* PWR counters enabled by default on A3XX/A4XX so nothing to do */
- if (adreno_is_a3xx(adreno_dev) || adreno_is_a4xx(adreno_dev))
- return 0;
-
- /*
- * On 5XX we have to emulate the PWR counters which are physically
- * missing. Program countable 6 on RBBM_PERFCTR_RBBM_0 as a substitute
- * for PWR:1. Don't emulate PWR:0 as nobody uses it and we don't want
- * to take away too many of the generic RBBM counters.
- */
-
- if (counter == 0)
- return -EINVAL;
-
- kgsl_regwrite(KGSL_DEVICE(adreno_dev), A5XX_RBBM_PERFCTR_RBBM_SEL_0, 6);
-
- return 0;
-}
-
static void _perfcounter_enable_vbif(struct adreno_device *adreno_dev,
struct adreno_perfcounters *counters, unsigned int counter,
unsigned int countable)
@@ -771,6 +749,7 @@ static int adreno_perfcounter_enable(struct adreno_device *adreno_dev,
unsigned int group, unsigned int counter, unsigned int countable)
{
struct adreno_perfcounters *counters = ADRENO_PERFCOUNTERS(adreno_dev);
+ struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
if (counters == NULL)
return -EINVAL;
@@ -786,7 +765,9 @@ static int adreno_perfcounter_enable(struct adreno_device *adreno_dev,
/* alwayson counter is global, so init value is 0 */
break;
case KGSL_PERFCOUNTER_GROUP_PWR:
- return _perfcounter_enable_pwr(adreno_dev, counter);
+ if (gpudev->enable_pwr_counters)
+ return gpudev->enable_pwr_counters(adreno_dev, counter);
+ return 0;
case KGSL_PERFCOUNTER_GROUP_VBIF:
if (countable > VBIF2_PERF_CNT_SEL_MASK)
return -EINVAL;
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index 07ef09034d7c..fc0602a60ac1 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -671,96 +671,17 @@ adreno_ringbuffer_issuecmds(struct adreno_ringbuffer *rb,
sizedwords, 0, NULL);
}
-/**
- * _ringbuffer_verify_ib() - Check if an IB's size is within a permitted limit
- * @device: The kgsl device pointer
- * @ibdesc: Pointer to the IB descriptor
- */
-static inline bool _ringbuffer_verify_ib(struct kgsl_device_private *dev_priv,
- struct kgsl_context *context, struct kgsl_memobj_node *ib)
-{
- struct kgsl_device *device = dev_priv->device;
- struct kgsl_process_private *private = dev_priv->process_priv;
-
- /* The maximum allowable size for an IB in the CP is 0xFFFFF dwords */
- if (ib->size == 0 || ((ib->size >> 2) > 0xFFFFF)) {
- pr_context(device, context, "ctxt %d invalid ib size %lld\n",
- context->id, ib->size);
- return false;
- }
-
- /* Make sure that the address is mapped */
- if (!kgsl_mmu_gpuaddr_in_range(private->pagetable, ib->gpuaddr)) {
- pr_context(device, context, "ctxt %d invalid ib gpuaddr %llX\n",
- context->id, ib->gpuaddr);
- return false;
- }
-
- return true;
-}
-
-int
-adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
- struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamp)
-{
- struct kgsl_device *device = dev_priv->device;
- struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
- struct adreno_context *drawctxt = ADRENO_CONTEXT(context);
- struct kgsl_memobj_node *ib;
- int ret;
-
- if (kgsl_context_invalid(context))
- return -EDEADLK;
-
- /* Verify the IBs before they get queued */
- list_for_each_entry(ib, &cmdbatch->cmdlist, node)
- if (_ringbuffer_verify_ib(dev_priv, context, ib) == false)
- return -EINVAL;
-
- /* wait for the suspend gate */
- wait_for_completion(&device->cmdbatch_gate);
-
- /*
- * Clear the wake on touch bit to indicate an IB has been
- * submitted since the last time we set it. But only clear
- * it when we have rendering commands.
- */
- if (!(cmdbatch->flags & KGSL_CMDBATCH_MARKER)
- && !(cmdbatch->flags & KGSL_CMDBATCH_SYNC))
- device->flags &= ~KGSL_FLAG_WAKE_ON_TOUCH;
-
- /* A3XX does not have support for command batch profiling */
- if (adreno_is_a3xx(adreno_dev) &&
- (cmdbatch->flags & KGSL_CMDBATCH_PROFILING))
- return -EOPNOTSUPP;
-
- /* Queue the command in the ringbuffer */
- ret = adreno_dispatcher_queue_cmd(adreno_dev, drawctxt, cmdbatch,
- timestamp);
-
- /*
- * Return -EPROTO if the device has faulted since the last time we
- * checked - userspace uses this to perform post-fault activities
- */
- if (!ret && test_and_clear_bit(ADRENO_CONTEXT_FAULT, &context->priv))
- ret = -EPROTO;
-
- return ret;
-}
-
static void adreno_ringbuffer_set_constraint(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj *drawobj)
{
- struct kgsl_context *context = cmdbatch->context;
+ struct kgsl_context *context = drawobj->context;
/*
* Check if the context has a constraint and constraint flags are
* set.
*/
if (context->pwr_constraint.type &&
((context->flags & KGSL_CONTEXT_PWR_CONSTRAINT) ||
- (cmdbatch->flags & KGSL_CONTEXT_PWR_CONSTRAINT)))
+ (drawobj->flags & KGSL_CONTEXT_PWR_CONSTRAINT)))
kgsl_pwrctrl_set_constraint(device, &context->pwr_constraint,
context->id);
}
@@ -792,10 +713,12 @@ static inline int _get_alwayson_counter(struct adreno_device *adreno_dev,
/* adreno_rindbuffer_submitcmd - submit userspace IBs to the GPU */
int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch, struct adreno_submit_time *time)
+ struct kgsl_drawobj_cmd *cmdobj,
+ struct adreno_submit_time *time)
{
struct kgsl_device *device = KGSL_DEVICE(adreno_dev);
struct adreno_gpudev *gpudev = ADRENO_GPU_DEVICE(adreno_dev);
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
struct kgsl_memobj_node *ib;
unsigned int numibs = 0;
unsigned int *link;
@@ -803,25 +726,25 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
struct kgsl_context *context;
struct adreno_context *drawctxt;
bool use_preamble = true;
- bool cmdbatch_user_profiling = false;
- bool cmdbatch_kernel_profiling = false;
+ bool user_profiling = false;
+ bool kernel_profiling = false;
int flags = KGSL_CMD_FLAGS_NONE;
int ret;
struct adreno_ringbuffer *rb;
- struct kgsl_cmdbatch_profiling_buffer *profile_buffer = NULL;
+ struct kgsl_drawobj_profiling_buffer *profile_buffer = NULL;
unsigned int dwords = 0;
struct adreno_submit_time local;
- struct kgsl_mem_entry *entry = cmdbatch->profiling_buf_entry;
+ struct kgsl_mem_entry *entry = cmdobj->profiling_buf_entry;
if (entry)
profile_buffer = kgsl_gpuaddr_to_vaddr(&entry->memdesc,
- cmdbatch->profiling_buffer_gpuaddr);
+ cmdobj->profiling_buffer_gpuaddr);
- context = cmdbatch->context;
+ context = drawobj->context;
drawctxt = ADRENO_CONTEXT(context);
/* Get the total IBs in the list */
- list_for_each_entry(ib, &cmdbatch->cmdlist, node)
+ list_for_each_entry(ib, &cmdobj->cmdlist, node)
numibs++;
rb = drawctxt->rb;
@@ -838,14 +761,14 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
* c) force preamble for commandbatch
*/
if (test_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv) &&
- (!test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv))) {
+ (!test_bit(CMDOBJ_SKIP, &cmdobj->priv))) {
- set_bit(KGSL_FT_SKIPCMD, &cmdbatch->fault_recovery);
- cmdbatch->fault_policy = drawctxt->fault_policy;
- set_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv);
+ set_bit(KGSL_FT_SKIPCMD, &cmdobj->fault_recovery);
+ cmdobj->fault_policy = drawctxt->fault_policy;
+ set_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv);
/* if context is detached print fault recovery */
- adreno_fault_skipcmd_detached(adreno_dev, drawctxt, cmdbatch);
+ adreno_fault_skipcmd_detached(adreno_dev, drawctxt, drawobj);
/* clear the drawctxt flags */
clear_bit(ADRENO_CONTEXT_SKIP_CMD, &drawctxt->base.priv);
@@ -857,7 +780,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
if a context switch hasn't occured */
if ((drawctxt->base.flags & KGSL_CONTEXT_PREAMBLE) &&
- !test_bit(CMDBATCH_FLAG_FORCE_PREAMBLE, &cmdbatch->priv) &&
+ !test_bit(CMDOBJ_FORCE_PREAMBLE, &cmdobj->priv) &&
(rb->drawctxt_active == drawctxt))
use_preamble = false;
@@ -867,7 +790,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
* the accounting sane. Set start_index and numibs to 0 to just
* generate the start and end markers and skip everything else
*/
- if (test_bit(CMDBATCH_FLAG_SKIP, &cmdbatch->priv)) {
+ if (test_bit(CMDOBJ_SKIP, &cmdobj->priv)) {
use_preamble = false;
numibs = 0;
}
@@ -884,9 +807,9 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
/* Each IB takes up 30 dwords in worst case */
dwords += (numibs * 30);
- if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING &&
+ if (drawobj->flags & KGSL_DRAWOBJ_PROFILING &&
!adreno_is_a3xx(adreno_dev) && profile_buffer) {
- cmdbatch_user_profiling = true;
+ user_profiling = true;
dwords += 6;
/*
@@ -907,8 +830,8 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
time = &local;
}
- if (test_bit(CMDBATCH_FLAG_PROFILE, &cmdbatch->priv)) {
- cmdbatch_kernel_profiling = true;
+ if (test_bit(CMDOBJ_PROFILE, &cmdobj->priv)) {
+ kernel_profiling = true;
dwords += 6;
if (adreno_is_a5xx(adreno_dev))
dwords += 2;
@@ -929,26 +852,26 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
*cmds++ = cp_packet(adreno_dev, CP_NOP, 1);
*cmds++ = KGSL_START_OF_IB_IDENTIFIER;
- if (cmdbatch_kernel_profiling) {
+ if (kernel_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- adreno_dev->cmdbatch_profile_buffer.gpuaddr +
- ADRENO_CMDBATCH_PROFILE_OFFSET(cmdbatch->profile_index,
+ adreno_dev->profile_buffer.gpuaddr +
+ ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index,
started));
}
/*
- * Add cmds to read the GPU ticks at the start of the cmdbatch and
- * write it into the appropriate cmdbatch profiling buffer offset
+ * Add cmds to read the GPU ticks at the start of command obj and
+ * write it into the appropriate command obj profiling buffer offset
*/
- if (cmdbatch_user_profiling) {
+ if (user_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- cmdbatch->profiling_buffer_gpuaddr +
- offsetof(struct kgsl_cmdbatch_profiling_buffer,
+ cmdobj->profiling_buffer_gpuaddr +
+ offsetof(struct kgsl_drawobj_profiling_buffer,
gpu_ticks_submitted));
}
if (numibs) {
- list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
+ list_for_each_entry(ib, &cmdobj->cmdlist, node) {
/*
* Skip 0 sized IBs - these are presumed to have been
* removed from consideration by the FT policy
@@ -972,21 +895,21 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
adreno_is_preemption_enabled(adreno_dev))
cmds += gpudev->preemption_yield_enable(cmds);
- if (cmdbatch_kernel_profiling) {
+ if (kernel_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- adreno_dev->cmdbatch_profile_buffer.gpuaddr +
- ADRENO_CMDBATCH_PROFILE_OFFSET(cmdbatch->profile_index,
+ adreno_dev->profile_buffer.gpuaddr +
+ ADRENO_DRAWOBJ_PROFILE_OFFSET(cmdobj->profile_index,
retired));
}
/*
- * Add cmds to read the GPU ticks at the end of the cmdbatch and
- * write it into the appropriate cmdbatch profiling buffer offset
+ * Add cmds to read the GPU ticks at the end of command obj and
+ * write it into the appropriate command obj profiling buffer offset
*/
- if (cmdbatch_user_profiling) {
+ if (user_profiling) {
cmds += _get_alwayson_counter(adreno_dev, cmds,
- cmdbatch->profiling_buffer_gpuaddr +
- offsetof(struct kgsl_cmdbatch_profiling_buffer,
+ cmdobj->profiling_buffer_gpuaddr +
+ offsetof(struct kgsl_drawobj_profiling_buffer,
gpu_ticks_retired));
}
@@ -1012,7 +935,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
goto done;
}
- if (test_bit(CMDBATCH_FLAG_WFI, &cmdbatch->priv))
+ if (test_bit(CMDOBJ_WFI, &cmdobj->priv))
flags = KGSL_CMD_FLAGS_WFI;
/*
@@ -1025,26 +948,26 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
flags |= KGSL_CMD_FLAGS_PWRON_FIXUP;
/* Set the constraints before adding to ringbuffer */
- adreno_ringbuffer_set_constraint(device, cmdbatch);
+ adreno_ringbuffer_set_constraint(device, drawobj);
/* CFF stuff executed only if CFF is enabled */
- kgsl_cffdump_capture_ib_desc(device, context, cmdbatch);
+ kgsl_cffdump_capture_ib_desc(device, context, cmdobj);
ret = adreno_ringbuffer_addcmds(rb, flags,
&link[0], (cmds - link),
- cmdbatch->timestamp, time);
+ drawobj->timestamp, time);
if (!ret) {
- cmdbatch->global_ts = drawctxt->internal_timestamp;
+ cmdobj->global_ts = drawctxt->internal_timestamp;
/* Put the timevalues in the profiling buffer */
- if (cmdbatch_user_profiling) {
+ if (user_profiling) {
/*
* Return kernel clock time to the the client
* if requested
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING_KTIME) {
+ if (drawobj->flags & KGSL_DRAWOBJ_PROFILING_KTIME) {
uint64_t secs = time->ktime;
profile_buffer->wall_clock_ns =
@@ -1069,9 +992,8 @@ done:
kgsl_memdesc_unmap(&entry->memdesc);
- trace_kgsl_issueibcmds(device, context->id, cmdbatch,
- numibs, cmdbatch->timestamp,
- cmdbatch->flags, ret, drawctxt->type);
+ trace_kgsl_issueibcmds(device, context->id, numibs, drawobj->timestamp,
+ drawobj->flags, ret, drawctxt->type);
kfree(link);
return ret;
diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h
index b126f710b5e6..63374af1e3f7 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.h
+++ b/drivers/gpu/msm/adreno_ringbuffer.h
@@ -119,7 +119,7 @@ struct adreno_ringbuffer {
struct adreno_context *drawctxt_active;
struct kgsl_memdesc preemption_desc;
struct kgsl_memdesc pagetable_desc;
- struct adreno_dispatcher_cmdqueue dispatch_q;
+ struct adreno_dispatcher_drawqueue dispatch_q;
wait_queue_head_t ts_expire_waitq;
unsigned int wptr_preempt_end;
unsigned int gpr11;
@@ -136,11 +136,11 @@ int cp_secure_mode(struct adreno_device *adreno_dev, uint *cmds, int set);
int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj *drawobj,
uint32_t *timestamp);
int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
- struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj_cmd *cmdobj,
struct adreno_submit_time *time);
int adreno_ringbuffer_probe(struct adreno_device *adreno_dev, bool nopreempt);
diff --git a/drivers/gpu/msm/adreno_trace.h b/drivers/gpu/msm/adreno_trace.h
index f52ddfa894d5..16ca0980cfbe 100644
--- a/drivers/gpu/msm/adreno_trace.h
+++ b/drivers/gpu/msm/adreno_trace.h
@@ -27,8 +27,8 @@
#include "adreno_a5xx.h"
TRACE_EVENT(adreno_cmdbatch_queued,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int queued),
- TP_ARGS(cmdbatch, queued),
+ TP_PROTO(struct kgsl_drawobj *drawobj, unsigned int queued),
+ TP_ARGS(drawobj, queued),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -37,26 +37,26 @@ TRACE_EVENT(adreno_cmdbatch_queued,
__field(unsigned int, prio)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->queued = queued;
- __entry->flags = cmdbatch->flags;
- __entry->prio = cmdbatch->context->priority;
+ __entry->flags = drawobj->flags;
+ __entry->prio = drawobj->context->priority;
),
TP_printk(
"ctx=%u ctx_prio=%u ts=%u queued=%u flags=%s",
__entry->id, __entry->prio,
__entry->timestamp, __entry->queued,
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "none"
+ KGSL_DRAWOBJ_FLAGS) : "none"
)
);
TRACE_EVENT(adreno_cmdbatch_submitted,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight, uint64_t ticks,
+ TP_PROTO(struct kgsl_drawobj *drawobj, int inflight, uint64_t ticks,
unsigned long secs, unsigned long usecs,
struct adreno_ringbuffer *rb, unsigned int rptr),
- TP_ARGS(cmdbatch, inflight, ticks, secs, usecs, rb, rptr),
+ TP_ARGS(drawobj, inflight, ticks, secs, usecs, rb, rptr),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -72,14 +72,14 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
__field(int, q_inflight)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->inflight = inflight;
- __entry->flags = cmdbatch->flags;
+ __entry->flags = drawobj->flags;
__entry->ticks = ticks;
__entry->secs = secs;
__entry->usecs = usecs;
- __entry->prio = cmdbatch->context->priority;
+ __entry->prio = drawobj->context->priority;
__entry->rb_id = rb->id;
__entry->rptr = rptr;
__entry->wptr = rb->wptr;
@@ -90,7 +90,7 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
__entry->id, __entry->prio, __entry->timestamp,
__entry->inflight,
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "none",
+ KGSL_DRAWOBJ_FLAGS) : "none",
__entry->ticks, __entry->secs, __entry->usecs,
__entry->rb_id, __entry->rptr, __entry->wptr,
__entry->q_inflight
@@ -98,10 +98,11 @@ TRACE_EVENT(adreno_cmdbatch_submitted,
);
TRACE_EVENT(adreno_cmdbatch_retired,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, int inflight,
+ TP_PROTO(struct kgsl_drawobj *drawobj, int inflight,
uint64_t start, uint64_t retire,
- struct adreno_ringbuffer *rb, unsigned int rptr),
- TP_ARGS(cmdbatch, inflight, start, retire, rb, rptr),
+ struct adreno_ringbuffer *rb, unsigned int rptr,
+ unsigned long fault_recovery),
+ TP_ARGS(drawobj, inflight, start, retire, rb, rptr, fault_recovery),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
@@ -115,16 +116,17 @@ TRACE_EVENT(adreno_cmdbatch_retired,
__field(unsigned int, rptr)
__field(unsigned int, wptr)
__field(int, q_inflight)
+ __field(unsigned long, fault_recovery)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = drawobj->context->id;
+ __entry->timestamp = drawobj->timestamp;
__entry->inflight = inflight;
- __entry->recovery = cmdbatch->fault_recovery;
- __entry->flags = cmdbatch->flags;
+ __entry->recovery = fault_recovery;
+ __entry->flags = drawobj->flags;
__entry->start = start;
__entry->retire = retire;
- __entry->prio = cmdbatch->context->priority;
+ __entry->prio = drawobj->context->priority;
__entry->rb_id = rb->id;
__entry->rptr = rptr;
__entry->wptr = rb->wptr;
@@ -138,7 +140,7 @@ TRACE_EVENT(adreno_cmdbatch_retired,
__print_flags(__entry->recovery, "|",
ADRENO_FT_TYPES) : "none",
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "none",
+ KGSL_DRAWOBJ_FLAGS) : "none",
__entry->start,
__entry->retire,
__entry->rb_id, __entry->rptr, __entry->wptr,
@@ -147,16 +149,16 @@ TRACE_EVENT(adreno_cmdbatch_retired,
);
TRACE_EVENT(adreno_cmdbatch_fault,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int fault),
- TP_ARGS(cmdbatch, fault),
+ TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int fault),
+ TP_ARGS(cmdobj, fault),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
__field(unsigned int, fault)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = cmdobj->base.context->id;
+ __entry->timestamp = cmdobj->base.timestamp;
__entry->fault = fault;
),
TP_printk(
@@ -171,16 +173,16 @@ TRACE_EVENT(adreno_cmdbatch_fault,
);
TRACE_EVENT(adreno_cmdbatch_recovery,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, unsigned int action),
- TP_ARGS(cmdbatch, action),
+ TP_PROTO(struct kgsl_drawobj_cmd *cmdobj, unsigned int action),
+ TP_ARGS(cmdobj, action),
TP_STRUCT__entry(
__field(unsigned int, id)
__field(unsigned int, timestamp)
__field(unsigned int, action)
),
TP_fast_assign(
- __entry->id = cmdbatch->context->id;
- __entry->timestamp = cmdbatch->timestamp;
+ __entry->id = cmdobj->base.context->id;
+ __entry->timestamp = cmdobj->base.timestamp;
__entry->action = action;
),
TP_printk(
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 88581b079246..add4590bbb90 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -36,7 +36,7 @@
#include "kgsl_cffdump.h"
#include "kgsl_log.h"
#include "kgsl_sharedmem.h"
-#include "kgsl_cmdbatch.h"
+#include "kgsl_drawobj.h"
#include "kgsl_device.h"
#include "kgsl_trace.h"
#include "kgsl_sync.h"
@@ -1497,11 +1497,17 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_ringbuffer_issueibcmds *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch = NULL;
+ struct kgsl_drawobj *drawobj;
+ struct kgsl_drawobj_cmd *cmdobj;
long result = -EINVAL;
/* The legacy functions don't support synchronization commands */
- if ((param->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER)))
+ if ((param->flags & (KGSL_DRAWOBJ_SYNC | KGSL_DRAWOBJ_MARKER)))
+ return -EINVAL;
+
+ /* Sanity check the number of IBs */
+ if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST &&
+ (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS))
return -EINVAL;
/* Get the context */
@@ -1509,23 +1515,20 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
if (context == NULL)
return -EINVAL;
- /* Create a command batch */
- cmdbatch = kgsl_cmdbatch_create(device, context, param->flags);
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
- goto done;
+ cmdobj = kgsl_drawobj_cmd_create(device, context, param->flags,
+ CMDOBJ_TYPE);
+ if (IS_ERR(cmdobj)) {
+ kgsl_context_put(context);
+ return PTR_ERR(cmdobj);
}
- if (param->flags & KGSL_CMDBATCH_SUBMIT_IB_LIST) {
- /* Sanity check the number of IBs */
- if (param->numibs == 0 || param->numibs > KGSL_MAX_NUMIBS) {
- result = -EINVAL;
- goto done;
- }
- result = kgsl_cmdbatch_add_ibdesc_list(device, cmdbatch,
+ drawobj = DRAWOBJ(cmdobj);
+
+ if (param->flags & KGSL_DRAWOBJ_SUBMIT_IB_LIST)
+ result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj,
(void __user *) param->ibdesc_addr,
param->numibs);
- } else {
+ else {
struct kgsl_ibdesc ibdesc;
/* Ultra legacy path */
@@ -1533,83 +1536,119 @@ long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
ibdesc.sizedwords = param->numibs;
ibdesc.ctrl = 0;
- result = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc);
+ result = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
}
- if (result)
- goto done;
-
- result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, &param->timestamp);
+ if (result == 0)
+ result = dev_priv->device->ftbl->queue_cmds(dev_priv, context,
+ &drawobj, 1, &param->timestamp);
-done:
/*
* -EPROTO is a "success" error - it just tells the user that the
* context had previously faulted
*/
if (result && result != -EPROTO)
- kgsl_cmdbatch_destroy(cmdbatch);
+ kgsl_drawobj_destroy(drawobj);
kgsl_context_put(context);
return result;
}
+/* Returns 0 on failure. Returns command type(s) on success */
+static unsigned int _process_command_input(struct kgsl_device *device,
+ unsigned int flags, unsigned int numcmds,
+ unsigned int numobjs, unsigned int numsyncs)
+{
+ if (numcmds > KGSL_MAX_NUMIBS ||
+ numobjs > KGSL_MAX_NUMIBS ||
+ numsyncs > KGSL_MAX_SYNCPOINTS)
+ return 0;
+
+ /*
+ * The SYNC bit is supposed to identify a dummy sync object
+ * so warn the user if they specified any IBs with it.
+ * A MARKER command can either have IBs or not but if the
+ * command has 0 IBs it is automatically assumed to be a marker.
+ */
+
+ /* If they specify the flag, go with what they say */
+ if (flags & KGSL_DRAWOBJ_MARKER)
+ return MARKEROBJ_TYPE;
+ else if (flags & KGSL_DRAWOBJ_SYNC)
+ return SYNCOBJ_TYPE;
+
+ /* If not, deduce what they meant */
+ if (numsyncs && numcmds)
+ return SYNCOBJ_TYPE | CMDOBJ_TYPE;
+ else if (numsyncs)
+ return SYNCOBJ_TYPE;
+ else if (numcmds)
+ return CMDOBJ_TYPE;
+ else if (numcmds == 0)
+ return MARKEROBJ_TYPE;
+
+ return 0;
+}
+
long kgsl_ioctl_submit_commands(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_submit_commands *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch = NULL;
- long result = -EINVAL;
-
- /*
- * The SYNC bit is supposed to identify a dummy sync object so warn the
- * user if they specified any IBs with it. A MARKER command can either
- * have IBs or not but if the command has 0 IBs it is automatically
- * assumed to be a marker. If none of the above make sure that the user
- * specified a sane number of IBs
- */
-
- if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds)
- KGSL_DEV_ERR_ONCE(device,
- "Commands specified with the SYNC flag. They will be ignored\n");
- else if (param->numcmds > KGSL_MAX_NUMIBS)
- return -EINVAL;
- else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0)
- param->flags |= KGSL_CMDBATCH_MARKER;
+ struct kgsl_drawobj *drawobj[2];
+ unsigned int type;
+ long result;
+ unsigned int i = 0;
- /* Make sure that we don't have too many syncpoints */
- if (param->numsyncs > KGSL_MAX_SYNCPOINTS)
+ type = _process_command_input(device, param->flags, param->numcmds, 0,
+ param->numsyncs);
+ if (!type)
return -EINVAL;
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context == NULL)
return -EINVAL;
- /* Create a command batch */
- cmdbatch = kgsl_cmdbatch_create(device, context, param->flags);
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
- goto done;
+ if (type & SYNCOBJ_TYPE) {
+ struct kgsl_drawobj_sync *syncobj =
+ kgsl_drawobj_sync_create(device, context);
+ if (IS_ERR(syncobj)) {
+ result = PTR_ERR(syncobj);
+ goto done;
+ }
+
+ drawobj[i++] = DRAWOBJ(syncobj);
+
+ result = kgsl_drawobj_sync_add_syncpoints(device, syncobj,
+ param->synclist, param->numsyncs);
+ if (result)
+ goto done;
}
- result = kgsl_cmdbatch_add_ibdesc_list(device, cmdbatch,
- param->cmdlist, param->numcmds);
- if (result)
- goto done;
+ if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) {
+ struct kgsl_drawobj_cmd *cmdobj =
+ kgsl_drawobj_cmd_create(device,
+ context, param->flags, type);
+ if (IS_ERR(cmdobj)) {
+ result = PTR_ERR(cmdobj);
+ goto done;
+ }
- result = kgsl_cmdbatch_add_syncpoints(device, cmdbatch,
- param->synclist, param->numsyncs);
- if (result)
- goto done;
+ drawobj[i++] = DRAWOBJ(cmdobj);
- /* If no profiling buffer was specified, clear the flag */
- if (cmdbatch->profiling_buf_entry == NULL)
- cmdbatch->flags &= ~KGSL_CMDBATCH_PROFILING;
+ result = kgsl_drawobj_cmd_add_ibdesc_list(device, cmdobj,
+ param->cmdlist, param->numcmds);
+ if (result)
+ goto done;
- result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, &param->timestamp);
+ /* If no profiling buffer was specified, clear the flag */
+ if (cmdobj->profiling_buf_entry == NULL)
+ DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING;
+ }
+
+ result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
+ i, &param->timestamp);
done:
/*
@@ -1617,7 +1656,9 @@ done:
* context had previously faulted
*/
if (result && result != -EPROTO)
- kgsl_cmdbatch_destroy(cmdbatch);
+ while (i--)
+ kgsl_drawobj_destroy(drawobj[i]);
+
kgsl_context_put(context);
return result;
@@ -1629,63 +1670,69 @@ long kgsl_ioctl_gpu_command(struct kgsl_device_private *dev_priv,
struct kgsl_gpu_command *param = data;
struct kgsl_device *device = dev_priv->device;
struct kgsl_context *context;
- struct kgsl_cmdbatch *cmdbatch = NULL;
-
- long result = -EINVAL;
+ struct kgsl_drawobj *drawobj[2];
+ unsigned int type;
+ long result;
+ unsigned int i = 0;
- /*
- * The SYNC bit is supposed to identify a dummy sync object so warn the
- * user if they specified any IBs with it. A MARKER command can either
- * have IBs or not but if the command has 0 IBs it is automatically
- * assumed to be a marker. If none of the above make sure that the user
- * specified a sane number of IBs
- */
- if ((param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds)
- KGSL_DEV_ERR_ONCE(device,
- "Commands specified with the SYNC flag. They will be ignored\n");
- else if (!(param->flags & KGSL_CMDBATCH_SYNC) && param->numcmds == 0)
- param->flags |= KGSL_CMDBATCH_MARKER;
-
- /* Make sure that the memobj and syncpoint count isn't too big */
- if (param->numcmds > KGSL_MAX_NUMIBS ||
- param->numobjs > KGSL_MAX_NUMIBS ||
- param->numsyncs > KGSL_MAX_SYNCPOINTS)
+ type = _process_command_input(device, param->flags, param->numcmds,
+ param->numobjs, param->numsyncs);
+ if (!type)
return -EINVAL;
context = kgsl_context_get_owner(dev_priv, param->context_id);
if (context == NULL)
return -EINVAL;
- cmdbatch = kgsl_cmdbatch_create(device, context, param->flags);
- if (IS_ERR(cmdbatch)) {
- result = PTR_ERR(cmdbatch);
- goto done;
+ if (type & SYNCOBJ_TYPE) {
+ struct kgsl_drawobj_sync *syncobj =
+ kgsl_drawobj_sync_create(device, context);
+
+ if (IS_ERR(syncobj)) {
+ result = PTR_ERR(syncobj);
+ goto done;
+ }
+
+ drawobj[i++] = DRAWOBJ(syncobj);
+
+ result = kgsl_drawobj_sync_add_synclist(device, syncobj,
+ to_user_ptr(param->synclist),
+ param->syncsize, param->numsyncs);
+ if (result)
+ goto done;
}
- result = kgsl_cmdbatch_add_cmdlist(device, cmdbatch,
- to_user_ptr(param->cmdlist),
- param->cmdsize, param->numcmds);
- if (result)
- goto done;
+ if (type & (CMDOBJ_TYPE | MARKEROBJ_TYPE)) {
+ struct kgsl_drawobj_cmd *cmdobj =
+ kgsl_drawobj_cmd_create(device,
+ context, param->flags, type);
- result = kgsl_cmdbatch_add_memlist(device, cmdbatch,
- to_user_ptr(param->objlist),
- param->objsize, param->numobjs);
- if (result)
- goto done;
+ if (IS_ERR(cmdobj)) {
+ result = PTR_ERR(cmdobj);
+ goto done;
+ }
- result = kgsl_cmdbatch_add_synclist(device, cmdbatch,
- to_user_ptr(param->synclist),
- param->syncsize, param->numsyncs);
- if (result)
- goto done;
+ drawobj[i++] = DRAWOBJ(cmdobj);
+
+ result = kgsl_drawobj_cmd_add_cmdlist(device, cmdobj,
+ to_user_ptr(param->cmdlist),
+ param->cmdsize, param->numcmds);
+ if (result)
+ goto done;
- /* If no profiling buffer was specified, clear the flag */
- if (cmdbatch->profiling_buf_entry == NULL)
- cmdbatch->flags &= ~KGSL_CMDBATCH_PROFILING;
+ result = kgsl_drawobj_cmd_add_memlist(device, cmdobj,
+ to_user_ptr(param->objlist),
+ param->objsize, param->numobjs);
+ if (result)
+ goto done;
+
+ /* If no profiling buffer was specified, clear the flag */
+ if (cmdobj->profiling_buf_entry == NULL)
+ DRAWOBJ(cmdobj)->flags &= ~KGSL_DRAWOBJ_PROFILING;
+ }
- result = dev_priv->device->ftbl->issueibcmds(dev_priv, context,
- cmdbatch, &param->timestamp);
+ result = device->ftbl->queue_cmds(dev_priv, context, drawobj,
+ i, &param->timestamp);
done:
/*
@@ -1693,7 +1740,8 @@ done:
* context had previously faulted
*/
if (result && result != -EPROTO)
- kgsl_cmdbatch_destroy(cmdbatch);
+ while (i--)
+ kgsl_drawobj_destroy(drawobj[i]);
kgsl_context_put(context);
return result;
@@ -4600,7 +4648,7 @@ static void kgsl_core_exit(void)
kgsl_driver.class = NULL;
}
- kgsl_cmdbatch_exit();
+ kgsl_drawobj_exit();
kgsl_memfree_exit();
unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
@@ -4676,7 +4724,7 @@ static int __init kgsl_core_init(void)
kgsl_events_init();
- result = kgsl_cmdbatch_init();
+ result = kgsl_drawobj_init();
if (result)
goto err;
diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h
index 7ac84b777051..826c4edb3582 100644
--- a/drivers/gpu/msm/kgsl.h
+++ b/drivers/gpu/msm/kgsl.h
@@ -28,6 +28,25 @@
#include <linux/uaccess.h>
#include <asm/cacheflush.h>
+/*
+ * --- kgsl drawobj flags ---
+ * These flags are same as --- drawobj flags ---
+ * but renamed to reflect that cmdbatch is renamed to drawobj.
+ */
+#define KGSL_DRAWOBJ_MEMLIST KGSL_CMDBATCH_MEMLIST
+#define KGSL_DRAWOBJ_MARKER KGSL_CMDBATCH_MARKER
+#define KGSL_DRAWOBJ_SUBMIT_IB_LIST KGSL_CMDBATCH_SUBMIT_IB_LIST
+#define KGSL_DRAWOBJ_CTX_SWITCH KGSL_CMDBATCH_CTX_SWITCH
+#define KGSL_DRAWOBJ_PROFILING KGSL_CMDBATCH_PROFILING
+#define KGSL_DRAWOBJ_PROFILING_KTIME KGSL_CMDBATCH_PROFILING_KTIME
+#define KGSL_DRAWOBJ_END_OF_FRAME KGSL_CMDBATCH_END_OF_FRAME
+#define KGSL_DRAWOBJ_SYNC KGSL_CMDBATCH_SYNC
+#define KGSL_DRAWOBJ_PWR_CONSTRAINT KGSL_CMDBATCH_PWR_CONSTRAINT
+#define KGSL_DRAWOBJ_SPARSE KGSL_CMDBATCH_SPARSE
+
+#define kgsl_drawobj_profiling_buffer kgsl_cmdbatch_profiling_buffer
+
+
/* The number of memstore arrays limits the number of contexts allowed.
* If more contexts are needed, update multiple for MEMSTORE_SIZE
*/
diff --git a/drivers/gpu/msm/kgsl_cffdump.c b/drivers/gpu/msm/kgsl_cffdump.c
index 8e783f8ce017..3337570477f9 100644
--- a/drivers/gpu/msm/kgsl_cffdump.c
+++ b/drivers/gpu/msm/kgsl_cffdump.c
@@ -705,7 +705,7 @@ static int kgsl_cffdump_capture_adreno_ib_cff(struct kgsl_device *device,
*/
int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
int ret = 0;
struct kgsl_memobj_node *ib;
@@ -713,7 +713,7 @@ int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
if (!device->cff_dump_enable)
return 0;
/* Dump CFF for IB and all objects in it */
- list_for_each_entry(ib, &cmdbatch->cmdlist, node) {
+ list_for_each_entry(ib, &cmdobj->cmdlist, node) {
ret = kgsl_cffdump_capture_adreno_ib_cff(
device, context->proc_priv, ib->gpuaddr,
ib->size >> 2);
diff --git a/drivers/gpu/msm/kgsl_cffdump.h b/drivers/gpu/msm/kgsl_cffdump.h
index 315a097ba817..14bc397cb570 100644
--- a/drivers/gpu/msm/kgsl_cffdump.h
+++ b/drivers/gpu/msm/kgsl_cffdump.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2011,2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2011,2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -58,7 +58,7 @@ int kgsl_cff_dump_enable_set(void *data, u64 val);
int kgsl_cff_dump_enable_get(void *data, u64 *val);
int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch);
+ struct kgsl_drawobj_cmd *cmdobj);
void kgsl_cffdump_printline(int id, uint opcode, uint op1, uint op2,
uint op3, uint op4, uint op5);
@@ -164,7 +164,7 @@ static inline void kgsl_cffdump_user_event(struct kgsl_device *device,
static inline int kgsl_cffdump_capture_ib_desc(struct kgsl_device *device,
struct kgsl_context *context,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_cmd *cmdobj)
{
return 0;
}
diff --git a/drivers/gpu/msm/kgsl_cmdbatch.h b/drivers/gpu/msm/kgsl_cmdbatch.h
deleted file mode 100644
index d5cbf375b5d3..000000000000
--- a/drivers/gpu/msm/kgsl_cmdbatch.h
+++ /dev/null
@@ -1,168 +0,0 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 and
- * only version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#ifndef __KGSL_CMDBATCH_H
-#define __KGSL_CMDBATCH_H
-
-#define KGSL_CMDBATCH_FLAGS \
- { KGSL_CMDBATCH_MARKER, "MARKER" }, \
- { KGSL_CMDBATCH_CTX_SWITCH, "CTX_SWITCH" }, \
- { KGSL_CMDBATCH_SYNC, "SYNC" }, \
- { KGSL_CMDBATCH_END_OF_FRAME, "EOF" }, \
- { KGSL_CMDBATCH_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \
- { KGSL_CMDBATCH_SUBMIT_IB_LIST, "IB_LIST" }
-
-/**
- * struct kgsl_cmdbatch - KGSl command descriptor
- * @device: KGSL GPU device that the command was created for
- * @context: KGSL context that created the command
- * @timestamp: Timestamp assigned to the command
- * @flags: flags
- * @priv: Internal flags
- * @fault_policy: Internal policy describing how to handle this command in case
- * of a fault
- * @fault_recovery: recovery actions actually tried for this batch
- * @refcount: kref structure to maintain the reference count
- * @cmdlist: List of IBs to issue
- * @memlist: List of all memory used in this command batch
- * @synclist: Array of context/timestamp tuples to wait for before issuing
- * @numsyncs: Number of sync entries in the array
- * @pending: Bitmask of sync events that are active
- * @timer: a timer used to track possible sync timeouts for this cmdbatch
- * @marker_timestamp: For markers, the timestamp of the last "real" command that
- * was queued
- * @profiling_buf_entry: Mem entry containing the profiling buffer
- * @profiling_buffer_gpuaddr: GPU virt address of the profile buffer added here
- * for easy access
- * @profile_index: Index to store the start/stop ticks in the kernel profiling
- * buffer
- * @submit_ticks: Variable to hold ticks at the time of cmdbatch submit.
- * @global_ts: The ringbuffer timestamp corresponding to this cmdbatch
- * @timeout_jiffies: For a syncpoint cmdbatch the jiffies at which the
- * timer will expire
- * This structure defines an atomic batch of command buffers issued from
- * userspace.
- */
-struct kgsl_cmdbatch {
- struct kgsl_device *device;
- struct kgsl_context *context;
- uint32_t timestamp;
- uint32_t flags;
- unsigned long priv;
- unsigned long fault_policy;
- unsigned long fault_recovery;
- struct kref refcount;
- struct list_head cmdlist;
- struct list_head memlist;
- struct kgsl_cmdbatch_sync_event *synclist;
- unsigned int numsyncs;
- unsigned long pending;
- struct timer_list timer;
- unsigned int marker_timestamp;
- struct kgsl_mem_entry *profiling_buf_entry;
- uint64_t profiling_buffer_gpuaddr;
- unsigned int profile_index;
- uint64_t submit_ticks;
- unsigned int global_ts;
- unsigned long timeout_jiffies;
-};
-
-/**
- * struct kgsl_cmdbatch_sync_event
- * @id: identifer (positiion within the pending bitmap)
- * @type: Syncpoint type
- * @cmdbatch: Pointer to the cmdbatch that owns the sync event
- * @context: Pointer to the KGSL context that owns the cmdbatch
- * @timestamp: Pending timestamp for the event
- * @handle: Pointer to a sync fence handle
- * @device: Pointer to the KGSL device
- */
-struct kgsl_cmdbatch_sync_event {
- unsigned int id;
- int type;
- struct kgsl_cmdbatch *cmdbatch;
- struct kgsl_context *context;
- unsigned int timestamp;
- struct kgsl_sync_fence_waiter *handle;
- struct kgsl_device *device;
-};
-
-/**
- * enum kgsl_cmdbatch_priv - Internal cmdbatch flags
- * @CMDBATCH_FLAG_SKIP - skip the entire command batch
- * @CMDBATCH_FLAG_FORCE_PREAMBLE - Force the preamble on for the cmdbatch
- * @CMDBATCH_FLAG_WFI - Force wait-for-idle for the submission
- * @CMDBATCH_FLAG_PROFILE - store the start / retire ticks for the command batch
- * in the profiling buffer
- * @CMDBATCH_FLAG_FENCE_LOG - Set if the cmdbatch is dumping fence logs via the
- * cmdbatch timer - this is used to avoid recursion
- */
-
-enum kgsl_cmdbatch_priv {
- CMDBATCH_FLAG_SKIP = 0,
- CMDBATCH_FLAG_FORCE_PREAMBLE,
- CMDBATCH_FLAG_WFI,
- CMDBATCH_FLAG_PROFILE,
- CMDBATCH_FLAG_FENCE_LOG,
-};
-
-
-int kgsl_cmdbatch_add_memobj(struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_ibdesc *ibdesc);
-
-int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch,
- struct kgsl_cmd_syncpoint *sync);
-
-struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int flags);
-int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, struct kgsl_ibdesc *ibdesc);
-int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count);
-int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count);
-int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
- unsigned int size, unsigned int count);
-int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
- unsigned int size, unsigned int count);
-int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
- unsigned int size, unsigned int count);
-
-int kgsl_cmdbatch_init(void);
-void kgsl_cmdbatch_exit(void);
-
-void kgsl_dump_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch);
-
-void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch);
-
-void kgsl_cmdbatch_destroy_object(struct kref *kref);
-
-static inline bool kgsl_cmdbatch_events_pending(struct kgsl_cmdbatch *cmdbatch)
-{
- return !bitmap_empty(&cmdbatch->pending, KGSL_MAX_SYNCPOINTS);
-}
-
-static inline bool kgsl_cmdbatch_event_pending(struct kgsl_cmdbatch *cmdbatch,
- unsigned int bit)
-{
- if (bit >= KGSL_MAX_SYNCPOINTS)
- return false;
-
- return test_bit(bit, &cmdbatch->pending);
-}
-
-#endif /* __KGSL_CMDBATCH_H */
diff --git a/drivers/gpu/msm/kgsl_compat.h b/drivers/gpu/msm/kgsl_compat.h
index ca1685e5fcf5..7681d74fb108 100644
--- a/drivers/gpu/msm/kgsl_compat.h
+++ b/drivers/gpu/msm/kgsl_compat.h
@@ -236,8 +236,8 @@ static inline compat_size_t sizet_to_compat(size_t size)
return (compat_size_t)size;
}
-int kgsl_cmdbatch_create_compat(struct kgsl_device *device, unsigned int flags,
- struct kgsl_cmdbatch *cmdbatch, void __user *cmdlist,
+int kgsl_drawobj_create_compat(struct kgsl_device *device, unsigned int flags,
+ struct kgsl_drawobj *drawobj, void __user *cmdlist,
unsigned int numcmds, void __user *synclist,
unsigned int numsyncs);
@@ -245,8 +245,8 @@ long kgsl_compat_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg);
#else
-static inline int kgsl_cmdbatch_create_compat(struct kgsl_device *device,
- unsigned int flags, struct kgsl_cmdbatch *cmdbatch,
+static inline int kgsl_drawobj_create_compat(struct kgsl_device *device,
+ unsigned int flags, struct kgsl_drawobj *drawobj,
void __user *cmdlist, unsigned int numcmds,
void __user *synclist, unsigned int numsyncs)
{
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index 24511a4de6f1..04935e8d0019 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -25,7 +25,7 @@
#include "kgsl_pwrscale.h"
#include "kgsl_snapshot.h"
#include "kgsl_sharedmem.h"
-#include "kgsl_cmdbatch.h"
+#include "kgsl_drawobj.h"
#define KGSL_IOCTL_FUNC(_cmd, _func) \
[_IOC_NR((_cmd))] = \
@@ -127,9 +127,9 @@ struct kgsl_functable {
unsigned int msecs);
int (*readtimestamp) (struct kgsl_device *device, void *priv,
enum kgsl_timestamp_type type, unsigned int *timestamp);
- int (*issueibcmds) (struct kgsl_device_private *dev_priv,
- struct kgsl_context *context, struct kgsl_cmdbatch *cmdbatch,
- uint32_t *timestamps);
+ int (*queue_cmds)(struct kgsl_device_private *dev_priv,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj[],
+ uint32_t count, uint32_t *timestamp);
void (*power_stats)(struct kgsl_device *device,
struct kgsl_power_stats *stats);
unsigned int (*gpuid)(struct kgsl_device *device, unsigned int *chipid);
@@ -186,7 +186,7 @@ long kgsl_ioctl_helper(struct file *filep, unsigned int cmd, unsigned long arg,
/**
* struct kgsl_memobj_node - Memory object descriptor
- * @node: Local list node for the cmdbatch
+ * @node: Local list node for the object
* @id: GPU memory ID for the object
* offset: Offset within the object
* @gpuaddr: GPU address for the object
@@ -235,7 +235,7 @@ struct kgsl_device {
struct kgsl_mmu mmu;
struct completion hwaccess_gate;
- struct completion cmdbatch_gate;
+ struct completion halt_gate;
const struct kgsl_functable *ftbl;
struct work_struct idle_check_ws;
struct timer_list idle_timer;
@@ -292,7 +292,7 @@ struct kgsl_device {
#define KGSL_DEVICE_COMMON_INIT(_dev) \
.hwaccess_gate = COMPLETION_INITIALIZER((_dev).hwaccess_gate),\
- .cmdbatch_gate = COMPLETION_INITIALIZER((_dev).cmdbatch_gate),\
+ .halt_gate = COMPLETION_INITIALIZER((_dev).halt_gate),\
.idle_check_ws = __WORK_INITIALIZER((_dev).idle_check_ws,\
kgsl_idle_check),\
.context_idr = IDR_INIT((_dev).context_idr),\
diff --git a/drivers/gpu/msm/kgsl_cmdbatch.c b/drivers/gpu/msm/kgsl_drawobj.c
index 6272410ce544..7840daa6a3e2 100644
--- a/drivers/gpu/msm/kgsl_cmdbatch.c
+++ b/drivers/gpu/msm/kgsl_drawobj.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -11,17 +11,17 @@
*/
/*
- * KGSL command batch management
- * A command batch is a single submission from userland. The cmdbatch
+ * KGSL drawobj management
+ * A drawobj is a single submission from userland. The drawobj
* encapsulates everything about the submission : command buffers, flags and
* sync points.
*
* Sync points are events that need to expire before the
- * cmdbatch can be queued to the hardware. All synpoints are contained in an
- * array of kgsl_cmdbatch_sync_event structs in the command batch. There can be
+ * drawobj can be queued to the hardware. All synpoints are contained in an
+ * array of kgsl_drawobj_sync_event structs in the drawobj. There can be
* multiple types of events both internal ones (GPU events) and external
* triggers. As the events expire bits are cleared in a pending bitmap stored
- * in the command batch. The GPU will submit the command as soon as the bitmap
+ * in the drawobj. The GPU will submit the command as soon as the bitmap
* goes to zero indicating no more pending events.
*/
@@ -31,7 +31,7 @@
#include "kgsl.h"
#include "kgsl_device.h"
-#include "kgsl_cmdbatch.h"
+#include "kgsl_drawobj.h"
#include "kgsl_sync.h"
#include "kgsl_trace.h"
#include "kgsl_compat.h"
@@ -42,26 +42,43 @@
*/
static struct kmem_cache *memobjs_cache;
-/**
- * kgsl_cmdbatch_put() - Decrement the refcount for a command batch object
- * @cmdbatch: Pointer to the command batch object
- */
-static inline void kgsl_cmdbatch_put(struct kgsl_cmdbatch *cmdbatch)
+static void drawobj_destroy_object(struct kref *kref)
{
- if (cmdbatch)
- kref_put(&cmdbatch->refcount, kgsl_cmdbatch_destroy_object);
+ struct kgsl_drawobj *drawobj = container_of(kref,
+ struct kgsl_drawobj, refcount);
+ struct kgsl_drawobj_sync *syncobj;
+
+ kgsl_context_put(drawobj->context);
+
+ switch (drawobj->type) {
+ case SYNCOBJ_TYPE:
+ syncobj = SYNCOBJ(drawobj);
+ kfree(syncobj->synclist);
+ kfree(syncobj);
+ break;
+ case CMDOBJ_TYPE:
+ case MARKEROBJ_TYPE:
+ kfree(CMDOBJ(drawobj));
+ break;
+ }
+}
+
+static inline void drawobj_put(struct kgsl_drawobj *drawobj)
+{
+ if (drawobj)
+ kref_put(&drawobj->refcount, drawobj_destroy_object);
}
void kgsl_dump_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch)
+ struct kgsl_drawobj_sync *syncobj)
{
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
unsigned int i;
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ event = &syncobj->synclist[i];
- if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
+ if (!kgsl_drawobj_event_pending(syncobj, i))
continue;
switch (event->type) {
@@ -90,32 +107,33 @@ void kgsl_dump_syncpoints(struct kgsl_device *device,
}
}
-static void _kgsl_cmdbatch_timer(unsigned long data)
+static void syncobj_timer(unsigned long data)
{
struct kgsl_device *device;
- struct kgsl_cmdbatch *cmdbatch = (struct kgsl_cmdbatch *) data;
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync *syncobj = (struct kgsl_drawobj_sync *) data;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ struct kgsl_drawobj_sync_event *event;
unsigned int i;
- if (cmdbatch == NULL || cmdbatch->context == NULL)
+ if (syncobj == NULL || drawobj->context == NULL)
return;
- device = cmdbatch->context->device;
+ device = drawobj->context->device;
dev_err(device->dev,
"kgsl: possible gpu syncpoint deadlock for context %d timestamp %d\n",
- cmdbatch->context->id, cmdbatch->timestamp);
+ drawobj->context->id, drawobj->timestamp);
- set_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv);
- kgsl_context_dump(cmdbatch->context);
- clear_bit(CMDBATCH_FLAG_FENCE_LOG, &cmdbatch->priv);
+ set_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
+ kgsl_context_dump(drawobj->context);
+ clear_bit(ADRENO_CONTEXT_FENCE_LOG, &drawobj->context->priv);
dev_err(device->dev, " pending events:\n");
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ event = &syncobj->synclist[i];
- if (!kgsl_cmdbatch_event_pending(cmdbatch, i))
+ if (!kgsl_drawobj_event_pending(syncobj, i))
continue;
switch (event->type) {
@@ -137,48 +155,31 @@ static void _kgsl_cmdbatch_timer(unsigned long data)
dev_err(device->dev, "--gpu syncpoint deadlock print end--\n");
}
-/**
- * kgsl_cmdbatch_destroy_object() - Destroy a cmdbatch object
- * @kref: Pointer to the kref structure for this object
- *
- * Actually destroy a command batch object. Called from kgsl_cmdbatch_put
- */
-void kgsl_cmdbatch_destroy_object(struct kref *kref)
-{
- struct kgsl_cmdbatch *cmdbatch = container_of(kref,
- struct kgsl_cmdbatch, refcount);
-
- kgsl_context_put(cmdbatch->context);
-
- kfree(cmdbatch->synclist);
- kfree(cmdbatch);
-}
-EXPORT_SYMBOL(kgsl_cmdbatch_destroy_object);
-
/*
* a generic function to retire a pending sync event and (possibly)
* kick the dispatcher
*/
-static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device,
- struct kgsl_cmdbatch_sync_event *event)
+static void drawobj_sync_expire(struct kgsl_device *device,
+ struct kgsl_drawobj_sync_event *event)
{
+ struct kgsl_drawobj_sync *syncobj = event->syncobj;
/*
* Clear the event from the pending mask - if it is already clear, then
* leave without doing anything useful
*/
- if (!test_and_clear_bit(event->id, &event->cmdbatch->pending))
+ if (!test_and_clear_bit(event->id, &syncobj->pending))
return;
/*
* If no more pending events, delete the timer and schedule the command
* for dispatch
*/
- if (!kgsl_cmdbatch_events_pending(event->cmdbatch)) {
- del_timer_sync(&event->cmdbatch->timer);
+ if (!kgsl_drawobj_events_pending(event->syncobj)) {
+ del_timer_sync(&syncobj->timer);
if (device->ftbl->drawctxt_sched)
device->ftbl->drawctxt_sched(device,
- event->cmdbatch->context);
+ event->syncobj->base.context);
}
}
@@ -186,20 +187,20 @@ static void kgsl_cmdbatch_sync_expire(struct kgsl_device *device,
* This function is called by the GPU event when the sync event timestamp
* expires
*/
-static void kgsl_cmdbatch_sync_func(struct kgsl_device *device,
+static void drawobj_sync_func(struct kgsl_device *device,
struct kgsl_event_group *group, void *priv, int result)
{
- struct kgsl_cmdbatch_sync_event *event = priv;
+ struct kgsl_drawobj_sync_event *event = priv;
- trace_syncpoint_timestamp_expire(event->cmdbatch,
+ trace_syncpoint_timestamp_expire(event->syncobj,
event->context, event->timestamp);
- kgsl_cmdbatch_sync_expire(device, event);
+ drawobj_sync_expire(device, event);
kgsl_context_put(event->context);
- kgsl_cmdbatch_put(event->cmdbatch);
+ drawobj_put(&event->syncobj->base);
}
-static inline void _free_memobj_list(struct list_head *list)
+static inline void memobj_list_free(struct list_head *list)
{
struct kgsl_memobj_node *mem, *tmpmem;
@@ -210,39 +211,28 @@ static inline void _free_memobj_list(struct list_head *list)
}
}
-/**
- * kgsl_cmdbatch_destroy() - Destroy a cmdbatch structure
- * @cmdbatch: Pointer to the command batch object to destroy
- *
- * Start the process of destroying a command batch. Cancel any pending events
- * and decrement the refcount. Asynchronous events can still signal after
- * kgsl_cmdbatch_destroy has returned.
- */
-void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
+static void drawobj_destroy_sync(struct kgsl_drawobj *drawobj)
{
- unsigned int i;
+ struct kgsl_drawobj_sync *syncobj = SYNCOBJ(drawobj);
unsigned long pending;
-
- if (IS_ERR_OR_NULL(cmdbatch))
- return;
+ unsigned int i;
/* Zap the canary timer */
- del_timer_sync(&cmdbatch->timer);
+ del_timer_sync(&syncobj->timer);
/*
* Copy off the pending list and clear all pending events - this will
* render any subsequent asynchronous callback harmless
*/
- bitmap_copy(&pending, &cmdbatch->pending, KGSL_MAX_SYNCPOINTS);
- bitmap_zero(&cmdbatch->pending, KGSL_MAX_SYNCPOINTS);
+ bitmap_copy(&pending, &syncobj->pending, KGSL_MAX_SYNCPOINTS);
+ bitmap_zero(&syncobj->pending, KGSL_MAX_SYNCPOINTS);
/*
* Clear all pending events - this will render any subsequent async
* callbacks harmless
*/
-
- for (i = 0; i < cmdbatch->numsyncs; i++) {
- struct kgsl_cmdbatch_sync_event *event = &cmdbatch->synclist[i];
+ for (i = 0; i < syncobj->numsyncs; i++) {
+ struct kgsl_drawobj_sync_event *event = &syncobj->synclist[i];
/* Don't do anything if the event has already expired */
if (!test_bit(i, &pending))
@@ -250,127 +240,152 @@ void kgsl_cmdbatch_destroy(struct kgsl_cmdbatch *cmdbatch)
switch (event->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
- kgsl_cancel_event(cmdbatch->device,
+ kgsl_cancel_event(drawobj->device,
&event->context->events, event->timestamp,
- kgsl_cmdbatch_sync_func, event);
+ drawobj_sync_func, event);
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
if (kgsl_sync_fence_async_cancel(event->handle))
- kgsl_cmdbatch_put(cmdbatch);
+ drawobj_put(drawobj);
break;
}
}
/*
- * Release the the refcount on the mem entry associated with the
- * cmdbatch profiling buffer
+ * If we cancelled an event, there's a good chance that the context is
+ * on a dispatcher queue, so schedule to get it removed.
+ */
+ if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) &&
+ drawobj->device->ftbl->drawctxt_sched)
+ drawobj->device->ftbl->drawctxt_sched(drawobj->device,
+ drawobj->context);
+
+}
+
+static void drawobj_destroy_cmd(struct kgsl_drawobj *drawobj)
+{
+ struct kgsl_drawobj_cmd *cmdobj = CMDOBJ(drawobj);
+
+ /*
+ * Release the refcount on the mem entry associated with the
+ * ib profiling buffer
*/
- if (cmdbatch->flags & KGSL_CMDBATCH_PROFILING)
- kgsl_mem_entry_put(cmdbatch->profiling_buf_entry);
+ if (cmdobj->base.flags & KGSL_DRAWOBJ_PROFILING)
+ kgsl_mem_entry_put(cmdobj->profiling_buf_entry);
/* Destroy the cmdlist we created */
- _free_memobj_list(&cmdbatch->cmdlist);
+ memobj_list_free(&cmdobj->cmdlist);
/* Destroy the memlist we created */
- _free_memobj_list(&cmdbatch->memlist);
+ memobj_list_free(&cmdobj->memlist);
+}
- /*
- * If we cancelled an event, there's a good chance that the context is
- * on a dispatcher queue, so schedule to get it removed.
+/**
+ * kgsl_drawobj_destroy() - Destroy a kgsl object structure
+ * @obj: Pointer to the kgsl object to destroy
+ *
+ * Start the process of destroying a command batch. Cancel any pending events
+ * and decrement the refcount. Asynchronous events can still signal after
+ * kgsl_drawobj_destroy has returned.
*/
- if (!bitmap_empty(&pending, KGSL_MAX_SYNCPOINTS) &&
- cmdbatch->device->ftbl->drawctxt_sched)
- cmdbatch->device->ftbl->drawctxt_sched(cmdbatch->device,
- cmdbatch->context);
+void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj)
+{
+ if (!drawobj)
+ return;
+
+ if (drawobj->type & SYNCOBJ_TYPE)
+ drawobj_destroy_sync(drawobj);
+ else if (drawobj->type & (CMDOBJ_TYPE | MARKEROBJ_TYPE))
+ drawobj_destroy_cmd(drawobj);
+ else
+ return;
- kgsl_cmdbatch_put(cmdbatch);
+ drawobj_put(drawobj);
}
-EXPORT_SYMBOL(kgsl_cmdbatch_destroy);
+EXPORT_SYMBOL(kgsl_drawobj_destroy);
-/*
- * A callback that gets registered with kgsl_sync_fence_async_wait and is fired
- * when a fence is expired
- */
-static void kgsl_cmdbatch_sync_fence_func(void *priv)
+static void drawobj_sync_fence_func(void *priv)
{
- struct kgsl_cmdbatch_sync_event *event = priv;
+ struct kgsl_drawobj_sync_event *event = priv;
- trace_syncpoint_fence_expire(event->cmdbatch,
+ trace_syncpoint_fence_expire(event->syncobj,
event->handle ? event->handle->name : "unknown");
- kgsl_cmdbatch_sync_expire(event->device, event);
+ drawobj_sync_expire(event->device, event);
- kgsl_cmdbatch_put(event->cmdbatch);
+ drawobj_put(&event->syncobj->base);
}
-/* kgsl_cmdbatch_add_sync_fence() - Add a new sync fence syncpoint
+/* drawobj_add_sync_fence() - Add a new sync fence syncpoint
* @device: KGSL device
- * @cmdbatch: KGSL cmdbatch to add the sync point to
- * @priv: Private sructure passed by the user
+ * @syncobj: KGSL sync obj to add the sync point to
+ * @priv: Private structure passed by the user
*
- * Add a new fence sync syncpoint to the cmdbatch.
+ * Add a new fence sync syncpoint to the sync obj.
*/
-static int kgsl_cmdbatch_add_sync_fence(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void *priv)
+static int drawobj_add_sync_fence(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void *priv)
{
struct kgsl_cmd_syncpoint_fence *sync = priv;
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ struct kgsl_drawobj_sync_event *event;
unsigned int id;
- kref_get(&cmdbatch->refcount);
+ kref_get(&drawobj->refcount);
- id = cmdbatch->numsyncs++;
+ id = syncobj->numsyncs++;
- event = &cmdbatch->synclist[id];
+ event = &syncobj->synclist[id];
event->id = id;
event->type = KGSL_CMD_SYNCPOINT_TYPE_FENCE;
- event->cmdbatch = cmdbatch;
+ event->syncobj = syncobj;
event->device = device;
event->context = NULL;
- set_bit(event->id, &cmdbatch->pending);
+ set_bit(event->id, &syncobj->pending);
event->handle = kgsl_sync_fence_async_wait(sync->fd,
- kgsl_cmdbatch_sync_fence_func, event);
+ drawobj_sync_fence_func, event);
if (IS_ERR_OR_NULL(event->handle)) {
int ret = PTR_ERR(event->handle);
- clear_bit(event->id, &cmdbatch->pending);
+ clear_bit(event->id, &syncobj->pending);
event->handle = NULL;
- kgsl_cmdbatch_put(cmdbatch);
+ drawobj_put(drawobj);
/*
* If ret == 0 the fence was already signaled - print a trace
* message so we can track that
*/
if (ret == 0)
- trace_syncpoint_fence_expire(cmdbatch, "signaled");
+ trace_syncpoint_fence_expire(syncobj, "signaled");
return ret;
}
- trace_syncpoint_fence(cmdbatch, event->handle->name);
+ trace_syncpoint_fence(syncobj, event->handle->name);
return 0;
}
-/* kgsl_cmdbatch_add_sync_timestamp() - Add a new sync point for a cmdbatch
+/* drawobj_add_sync_timestamp() - Add a new sync point for a sync obj
* @device: KGSL device
- * @cmdbatch: KGSL cmdbatch to add the sync point to
- * @priv: Private sructure passed by the user
+ * @syncobj: KGSL sync obj to add the sync point to
+ * @priv: Private structure passed by the user
*
- * Add a new sync point timestamp event to the cmdbatch.
+ * Add a new sync point timestamp event to the sync obj.
*/
-static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void *priv)
+static int drawobj_add_sync_timestamp(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void *priv)
{
struct kgsl_cmd_syncpoint_timestamp *sync = priv;
- struct kgsl_context *context = kgsl_context_get(cmdbatch->device,
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ struct kgsl_context *context = kgsl_context_get(device,
sync->context_id);
- struct kgsl_cmdbatch_sync_event *event;
+ struct kgsl_drawobj_sync_event *event;
int ret = -EINVAL;
unsigned int id;
@@ -384,8 +399,9 @@ static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
* create a sync point on a future timestamp.
*/
- if (context == cmdbatch->context) {
+ if (context == drawobj->context) {
unsigned int queued;
+
kgsl_readtimestamp(device, context, KGSL_TIMESTAMP_QUEUED,
&queued);
@@ -397,29 +413,29 @@ static int kgsl_cmdbatch_add_sync_timestamp(struct kgsl_device *device,
}
}
- kref_get(&cmdbatch->refcount);
+ kref_get(&drawobj->refcount);
- id = cmdbatch->numsyncs++;
+ id = syncobj->numsyncs++;
- event = &cmdbatch->synclist[id];
+ event = &syncobj->synclist[id];
event->id = id;
event->type = KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP;
- event->cmdbatch = cmdbatch;
+ event->syncobj = syncobj;
event->context = context;
event->timestamp = sync->timestamp;
event->device = device;
- set_bit(event->id, &cmdbatch->pending);
+ set_bit(event->id, &syncobj->pending);
ret = kgsl_add_event(device, &context->events, sync->timestamp,
- kgsl_cmdbatch_sync_func, event);
+ drawobj_sync_func, event);
if (ret) {
- clear_bit(event->id, &cmdbatch->pending);
- kgsl_cmdbatch_put(cmdbatch);
+ clear_bit(event->id, &syncobj->pending);
+ drawobj_put(drawobj);
} else {
- trace_syncpoint_timestamp(cmdbatch, context, sync->timestamp);
+ trace_syncpoint_timestamp(syncobj, context, sync->timestamp);
}
done:
@@ -430,43 +446,46 @@ done:
}
/**
- * kgsl_cmdbatch_add_sync() - Add a sync point to a command batch
+ * kgsl_drawobj_sync_add_sync() - Add a sync point to a command
+ * batch
* @device: Pointer to the KGSL device struct for the GPU
- * @cmdbatch: Pointer to the cmdbatch
+ * @syncobj: Pointer to the sync obj
* @sync: Pointer to the user-specified struct defining the syncpoint
*
- * Create a new sync point in the cmdbatch based on the user specified
- * parameters
+ * Create a new sync point in the sync obj based on the
+ * user specified parameters
*/
-int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch,
+int kgsl_drawobj_sync_add_sync(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj,
struct kgsl_cmd_syncpoint *sync)
{
void *priv;
int ret, psize;
- int (*func)(struct kgsl_device *device, struct kgsl_cmdbatch *cmdbatch,
+ struct kgsl_drawobj *drawobj = DRAWOBJ(syncobj);
+ int (*func)(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj,
void *priv);
switch (sync->type) {
case KGSL_CMD_SYNCPOINT_TYPE_TIMESTAMP:
psize = sizeof(struct kgsl_cmd_syncpoint_timestamp);
- func = kgsl_cmdbatch_add_sync_timestamp;
+ func = drawobj_add_sync_timestamp;
break;
case KGSL_CMD_SYNCPOINT_TYPE_FENCE:
psize = sizeof(struct kgsl_cmd_syncpoint_fence);
- func = kgsl_cmdbatch_add_sync_fence;
+ func = drawobj_add_sync_fence;
break;
default:
KGSL_DRV_ERR(device,
"bad syncpoint type ctxt %d type 0x%x size %zu\n",
- cmdbatch->context->id, sync->type, sync->size);
+ drawobj->context->id, sync->type, sync->size);
return -EINVAL;
}
if (sync->size != psize) {
KGSL_DRV_ERR(device,
"bad syncpoint size ctxt %d type 0x%x size %zu\n",
- cmdbatch->context->id, sync->type, sync->size);
+ drawobj->context->id, sync->type, sync->size);
return -EINVAL;
}
@@ -479,30 +498,32 @@ int kgsl_cmdbatch_add_sync(struct kgsl_device *device,
return -EFAULT;
}
- ret = func(device, cmdbatch, priv);
+ ret = func(device, syncobj, priv);
kfree(priv);
return ret;
}
static void add_profiling_buffer(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, uint64_t gpuaddr, uint64_t size,
+ struct kgsl_drawobj_cmd *cmdobj,
+ uint64_t gpuaddr, uint64_t size,
unsigned int id, uint64_t offset)
{
struct kgsl_mem_entry *entry;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
- if (!(cmdbatch->flags & KGSL_CMDBATCH_PROFILING))
+ if (!(drawobj->flags & KGSL_DRAWOBJ_PROFILING))
return;
/* Only the first buffer entry counts - ignore the rest */
- if (cmdbatch->profiling_buf_entry != NULL)
+ if (cmdobj->profiling_buf_entry != NULL)
return;
if (id != 0)
- entry = kgsl_sharedmem_find_id(cmdbatch->context->proc_priv,
+ entry = kgsl_sharedmem_find_id(drawobj->context->proc_priv,
id);
else
- entry = kgsl_sharedmem_find(cmdbatch->context->proc_priv,
+ entry = kgsl_sharedmem_find(drawobj->context->proc_priv,
gpuaddr);
if (entry != NULL) {
@@ -515,47 +536,50 @@ static void add_profiling_buffer(struct kgsl_device *device,
if (entry == NULL) {
KGSL_DRV_ERR(device,
"ignore bad profile buffer ctxt %d id %d offset %lld gpuaddr %llx size %lld\n",
- cmdbatch->context->id, id, offset, gpuaddr, size);
+ drawobj->context->id, id, offset, gpuaddr, size);
return;
}
- cmdbatch->profiling_buf_entry = entry;
+ cmdobj->profiling_buf_entry = entry;
if (id != 0)
- cmdbatch->profiling_buffer_gpuaddr =
+ cmdobj->profiling_buffer_gpuaddr =
entry->memdesc.gpuaddr + offset;
else
- cmdbatch->profiling_buffer_gpuaddr = gpuaddr;
+ cmdobj->profiling_buffer_gpuaddr = gpuaddr;
}
/**
- * kgsl_cmdbatch_add_ibdesc() - Add a legacy ibdesc to a command batch
- * @cmdbatch: Pointer to the cmdbatch
+ * kgsl_drawobj_cmd_add_ibdesc() - Add a legacy ibdesc to a command
+ * batch
+ * @cmdobj: Pointer to the ib
* @ibdesc: Pointer to the user-specified struct defining the memory or IB
*
- * Create a new memory entry in the cmdbatch based on the user specified
- * parameters
+ * Create a new memory entry in the ib based on the
+ * user specified parameters
*/
-int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, struct kgsl_ibdesc *ibdesc)
+int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc)
{
uint64_t gpuaddr = (uint64_t) ibdesc->gpuaddr;
uint64_t size = (uint64_t) ibdesc->sizedwords << 2;
struct kgsl_memobj_node *mem;
+ struct kgsl_drawobj *drawobj = DRAWOBJ(cmdobj);
/* sanitize the ibdesc ctrl flags */
ibdesc->ctrl &= KGSL_IBDESC_MEMLIST | KGSL_IBDESC_PROFILING_BUFFER;
- if (cmdbatch->flags & KGSL_CMDBATCH_MEMLIST &&
+ if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
ibdesc->ctrl & KGSL_IBDESC_MEMLIST) {
if (ibdesc->ctrl & KGSL_IBDESC_PROFILING_BUFFER) {
- add_profiling_buffer(device, cmdbatch,
+ add_profiling_buffer(device, cmdobj,
gpuaddr, size, 0, 0);
return 0;
}
}
- if (cmdbatch->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER))
+ /* Ignore if SYNC or MARKER is specified */
+ if (drawobj->type & (SYNCOBJ_TYPE | MARKEROBJ_TYPE))
return 0;
mem = kmem_cache_alloc(memobjs_cache, GFP_KERNEL);
@@ -569,74 +593,121 @@ int kgsl_cmdbatch_add_ibdesc(struct kgsl_device *device,
mem->offset = 0;
mem->flags = 0;
- if (cmdbatch->flags & KGSL_CMDBATCH_MEMLIST &&
- ibdesc->ctrl & KGSL_IBDESC_MEMLIST) {
+ if (drawobj->flags & KGSL_DRAWOBJ_MEMLIST &&
+ ibdesc->ctrl & KGSL_IBDESC_MEMLIST)
/* add to the memlist */
- list_add_tail(&mem->node, &cmdbatch->memlist);
- } else {
+ list_add_tail(&mem->node, &cmdobj->memlist);
+ else {
/* set the preamble flag if directed to */
- if (cmdbatch->context->flags & KGSL_CONTEXT_PREAMBLE &&
- list_empty(&cmdbatch->cmdlist))
+ if (drawobj->context->flags & KGSL_CONTEXT_PREAMBLE &&
+ list_empty(&cmdobj->cmdlist))
mem->flags = KGSL_CMDLIST_CTXTSWITCH_PREAMBLE;
/* add to the cmd list */
- list_add_tail(&mem->node, &cmdbatch->cmdlist);
+ list_add_tail(&mem->node, &cmdobj->cmdlist);
}
return 0;
}
+static inline int drawobj_init(struct kgsl_device *device,
+ struct kgsl_context *context, struct kgsl_drawobj *drawobj,
+ unsigned int type)
+{
+ /*
+ * Increase the reference count on the context so it doesn't disappear
+ * during the lifetime of this object
+ */
+ if (!_kgsl_context_get(context))
+ return -ENOENT;
+
+ kref_init(&drawobj->refcount);
+
+ drawobj->device = device;
+ drawobj->context = context;
+ drawobj->type = type;
+
+ return 0;
+}
+
/**
- * kgsl_cmdbatch_create() - Create a new cmdbatch structure
+ * kgsl_drawobj_sync_create() - Create a new sync obj
+ * structure
* @device: Pointer to a KGSL device struct
* @context: Pointer to a KGSL context struct
- * @flags: Flags for the cmdbatch
*
- * Allocate an new cmdbatch structure
+ * Allocate an new kgsl_drawobj_sync structure
*/
-struct kgsl_cmdbatch *kgsl_cmdbatch_create(struct kgsl_device *device,
- struct kgsl_context *context, unsigned int flags)
+struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device,
+ struct kgsl_context *context)
{
- struct kgsl_cmdbatch *cmdbatch = kzalloc(sizeof(*cmdbatch), GFP_KERNEL);
- if (cmdbatch == NULL)
+ struct kgsl_drawobj_sync *syncobj = kzalloc(sizeof(*syncobj),
+ GFP_KERNEL);
+ if (syncobj == NULL)
return ERR_PTR(-ENOMEM);
- /*
- * Increase the reference count on the context so it doesn't disappear
- * during the lifetime of this command batch
- */
+ if (drawobj_init(device, context, DRAWOBJ(syncobj), SYNCOBJ_TYPE)) {
+ kfree(syncobj);
+ return ERR_PTR(-ENOENT);
+ }
+
+ /* Add a timer to help debug sync deadlocks */
+ setup_timer(&syncobj->timer, syncobj_timer, (unsigned long) syncobj);
+
+ return syncobj;
+}
+
+/**
+ * kgsl_drawobj_cmd_create() - Create a new command obj
+ * structure
+ * @device: Pointer to a KGSL device struct
+ * @context: Pointer to a KGSL context struct
+ * @flags: Flags for the command obj
+ * @type: type of cmdobj MARKER/CMD
+ *
+ * Allocate a new kgsl_drawobj_cmd structure
+ */
+struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int flags,
+ unsigned int type)
+{
+ struct kgsl_drawobj_cmd *cmdobj = kzalloc(sizeof(*cmdobj), GFP_KERNEL);
+ struct kgsl_drawobj *drawobj;
+
+ if (cmdobj == NULL)
+ return ERR_PTR(-ENOMEM);
- if (!_kgsl_context_get(context)) {
- kfree(cmdbatch);
+ type &= CMDOBJ_TYPE | MARKEROBJ_TYPE;
+ if (type == 0) {
+ kfree(cmdobj);
+ return ERR_PTR(-EINVAL);
+ }
+
+ drawobj = DRAWOBJ(cmdobj);
+
+ if (drawobj_init(device, context, drawobj, type)) {
+ kfree(cmdobj);
return ERR_PTR(-ENOENT);
}
- kref_init(&cmdbatch->refcount);
- INIT_LIST_HEAD(&cmdbatch->cmdlist);
- INIT_LIST_HEAD(&cmdbatch->memlist);
-
- cmdbatch->device = device;
- cmdbatch->context = context;
- /* sanitize our flags for cmdbatches */
- cmdbatch->flags = flags & (KGSL_CMDBATCH_CTX_SWITCH
- | KGSL_CMDBATCH_MARKER
- | KGSL_CMDBATCH_END_OF_FRAME
- | KGSL_CMDBATCH_SYNC
- | KGSL_CMDBATCH_PWR_CONSTRAINT
- | KGSL_CMDBATCH_MEMLIST
- | KGSL_CMDBATCH_PROFILING
- | KGSL_CMDBATCH_PROFILING_KTIME);
+ /* sanitize our flags for drawobj's */
+ drawobj->flags = flags & (KGSL_DRAWOBJ_CTX_SWITCH
+ | KGSL_DRAWOBJ_MARKER
+ | KGSL_DRAWOBJ_END_OF_FRAME
+ | KGSL_DRAWOBJ_PWR_CONSTRAINT
+ | KGSL_DRAWOBJ_MEMLIST
+ | KGSL_DRAWOBJ_PROFILING
+ | KGSL_DRAWOBJ_PROFILING_KTIME);
- /* Add a timer to help debug sync deadlocks */
- setup_timer(&cmdbatch->timer, _kgsl_cmdbatch_timer,
- (unsigned long) cmdbatch);
+ INIT_LIST_HEAD(&cmdobj->cmdlist);
+ INIT_LIST_HEAD(&cmdobj->memlist);
- return cmdbatch;
+ return cmdobj;
}
#ifdef CONFIG_COMPAT
static int add_ibdesc_list_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
{
int i, ret = 0;
struct kgsl_ibdesc_compat ibdesc32;
@@ -654,7 +725,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device,
ibdesc.sizedwords = (size_t) ibdesc32.sizedwords;
ibdesc.ctrl = (unsigned int) ibdesc32.ctrl;
- ret = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc);
+ ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
if (ret)
break;
@@ -665,7 +736,7 @@ static int add_ibdesc_list_compat(struct kgsl_device *device,
}
static int add_syncpoints_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
{
struct kgsl_cmd_syncpoint_compat sync32;
struct kgsl_cmd_syncpoint sync;
@@ -683,7 +754,7 @@ static int add_syncpoints_compat(struct kgsl_device *device,
sync.priv = compat_ptr(sync32.priv);
sync.size = (size_t) sync32.size;
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+ ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
if (ret)
break;
@@ -694,26 +765,54 @@ static int add_syncpoints_compat(struct kgsl_device *device,
}
#else
static int add_ibdesc_list_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
{
return -EINVAL;
}
static int add_syncpoints_compat(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
{
return -EINVAL;
}
#endif
-int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+/* Returns:
+ * -EINVAL: Bad data
+ * 0: All data fields are empty (nothing to do)
+ * 1: All list information is valid
+ */
+static int _verify_input_list(unsigned int count, void __user *ptr,
+ unsigned int size)
+{
+ /* Return early if nothing going on */
+ if (count == 0 && ptr == NULL && size == 0)
+ return 0;
+
+ /* Sanity check inputs */
+ if (count == 0 || ptr == NULL || size == 0)
+ return -EINVAL;
+
+ return 1;
+}
+
+int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count)
{
struct kgsl_ibdesc ibdesc;
+ struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
int i, ret;
+ /* Ignore everything if this is a MARKER */
+ if (baseobj->type & MARKEROBJ_TYPE)
+ return 0;
+
+ ret = _verify_input_list(count, ptr, sizeof(ibdesc));
+ if (ret <= 0)
+ return -EINVAL;
+
if (is_compat_task())
- return add_ibdesc_list_compat(device, cmdbatch, ptr, count);
+ return add_ibdesc_list_compat(device, cmdobj, ptr, count);
for (i = 0; i < count; i++) {
memset(&ibdesc, 0, sizeof(ibdesc));
@@ -721,7 +820,7 @@ int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
if (copy_from_user(&ibdesc, ptr, sizeof(ibdesc)))
return -EFAULT;
- ret = kgsl_cmdbatch_add_ibdesc(device, cmdbatch, &ibdesc);
+ ret = kgsl_drawobj_cmd_add_ibdesc(device, cmdobj, &ibdesc);
if (ret)
return ret;
@@ -731,8 +830,8 @@ int kgsl_cmdbatch_add_ibdesc_list(struct kgsl_device *device,
return 0;
}
-int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr, int count)
+int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr, int count)
{
struct kgsl_cmd_syncpoint sync;
int i, ret;
@@ -740,17 +839,14 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
if (count == 0)
return 0;
- if (count > KGSL_MAX_SYNCPOINTS)
- return -EINVAL;
-
- cmdbatch->synclist = kcalloc(count,
- sizeof(struct kgsl_cmdbatch_sync_event), GFP_KERNEL);
+ syncobj->synclist = kcalloc(count,
+ sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
- if (cmdbatch->synclist == NULL)
+ if (syncobj->synclist == NULL)
return -ENOMEM;
if (is_compat_task())
- return add_syncpoints_compat(device, cmdbatch, ptr, count);
+ return add_syncpoints_compat(device, syncobj, ptr, count);
for (i = 0; i < count; i++) {
memset(&sync, 0, sizeof(sync));
@@ -758,7 +854,7 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
if (copy_from_user(&sync, ptr, sizeof(sync)))
return -EFAULT;
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+ ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
if (ret)
return ret;
@@ -768,7 +864,7 @@ int kgsl_cmdbatch_add_syncpoints(struct kgsl_device *device,
return 0;
}
-static int kgsl_cmdbatch_add_object(struct list_head *head,
+static int drawobj_add_object(struct list_head *head,
struct kgsl_command_object *obj)
{
struct kgsl_memobj_node *mem;
@@ -793,24 +889,22 @@ static int kgsl_cmdbatch_add_object(struct list_head *head,
KGSL_CMDLIST_CTXTSWITCH_PREAMBLE | \
KGSL_CMDLIST_IB_PREAMBLE)
-int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
+/* This can only accept MARKEROBJ_TYPE and CMDOBJ_TYPE */
+int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
unsigned int size, unsigned int count)
{
struct kgsl_command_object obj;
- int i, ret = 0;
+ struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
+ int i, ret;
- /* Return early if nothing going on */
- if (count == 0 && ptr == NULL && size == 0)
+ /* Ignore everything if this is a MARKER */
+ if (baseobj->type & MARKEROBJ_TYPE)
return 0;
- /* Sanity check inputs */
- if (count == 0 || ptr == NULL || size == 0)
- return -EINVAL;
-
- /* Ignore all if SYNC or MARKER is specified */
- if (cmdbatch->flags & (KGSL_CMDBATCH_SYNC | KGSL_CMDBATCH_MARKER))
- return 0;
+ ret = _verify_input_list(count, ptr, size);
+ if (ret <= 0)
+ return ret;
for (i = 0; i < count; i++) {
memset(&obj, 0, sizeof(obj));
@@ -823,12 +917,12 @@ int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
if (!(obj.flags & CMDLIST_FLAGS)) {
KGSL_DRV_ERR(device,
"invalid cmdobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n",
- cmdbatch->context->id, obj.flags, obj.id,
+ baseobj->context->id, obj.flags, obj.id,
obj.offset, obj.gpuaddr, obj.size);
return -EINVAL;
}
- ret = kgsl_cmdbatch_add_object(&cmdbatch->cmdlist, &obj);
+ ret = drawobj_add_object(&cmdobj->cmdlist, &obj);
if (ret)
return ret;
@@ -838,20 +932,21 @@ int kgsl_cmdbatch_add_cmdlist(struct kgsl_device *device,
return 0;
}
-int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
+int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
unsigned int size, unsigned int count)
{
struct kgsl_command_object obj;
- int i, ret = 0;
+ struct kgsl_drawobj *baseobj = DRAWOBJ(cmdobj);
+ int i, ret;
- /* Return early if nothing going on */
- if (count == 0 && ptr == NULL && size == 0)
+ /* Ignore everything if this is a MARKER */
+ if (baseobj->type & MARKEROBJ_TYPE)
return 0;
- /* Sanity check inputs */
- if (count == 0 || ptr == NULL || size == 0)
- return -EINVAL;
+ ret = _verify_input_list(count, ptr, size);
+ if (ret <= 0)
+ return ret;
for (i = 0; i < count; i++) {
memset(&obj, 0, sizeof(obj));
@@ -863,17 +958,16 @@ int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
if (!(obj.flags & KGSL_OBJLIST_MEMOBJ)) {
KGSL_DRV_ERR(device,
"invalid memobj ctxt %d flags %d id %d offset %lld addr %lld size %lld\n",
- cmdbatch->context->id, obj.flags, obj.id,
- obj.offset, obj.gpuaddr, obj.size);
+ DRAWOBJ(cmdobj)->context->id, obj.flags,
+ obj.id, obj.offset, obj.gpuaddr, obj.size);
return -EINVAL;
}
if (obj.flags & KGSL_OBJLIST_PROFILE)
- add_profiling_buffer(device, cmdbatch, obj.gpuaddr,
+ add_profiling_buffer(device, cmdobj, obj.gpuaddr,
obj.size, obj.id, obj.offset);
else {
- ret = kgsl_cmdbatch_add_object(&cmdbatch->memlist,
- &obj);
+ ret = drawobj_add_object(&cmdobj->memlist, &obj);
if (ret)
return ret;
}
@@ -884,29 +978,23 @@ int kgsl_cmdbatch_add_memlist(struct kgsl_device *device,
return 0;
}
-int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
- struct kgsl_cmdbatch *cmdbatch, void __user *ptr,
+int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr,
unsigned int size, unsigned int count)
{
struct kgsl_command_syncpoint syncpoint;
struct kgsl_cmd_syncpoint sync;
- int i, ret = 0;
-
- /* Return early if nothing going on */
- if (count == 0 && ptr == NULL && size == 0)
- return 0;
-
- /* Sanity check inputs */
- if (count == 0 || ptr == NULL || size == 0)
- return -EINVAL;
+ int i, ret;
- if (count > KGSL_MAX_SYNCPOINTS)
+ /* If creating a sync and the data is not there or wrong then error */
+ ret = _verify_input_list(count, ptr, size);
+ if (ret <= 0)
return -EINVAL;
- cmdbatch->synclist = kcalloc(count,
- sizeof(struct kgsl_cmdbatch_sync_event), GFP_KERNEL);
+ syncobj->synclist = kcalloc(count,
+ sizeof(struct kgsl_drawobj_sync_event), GFP_KERNEL);
- if (cmdbatch->synclist == NULL)
+ if (syncobj->synclist == NULL)
return -ENOMEM;
for (i = 0; i < count; i++) {
@@ -920,7 +1008,7 @@ int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
sync.priv = to_user_ptr(syncpoint.priv);
sync.size = syncpoint.size;
- ret = kgsl_cmdbatch_add_sync(device, cmdbatch, &sync);
+ ret = kgsl_drawobj_sync_add_sync(device, syncobj, &sync);
if (ret)
return ret;
@@ -930,13 +1018,13 @@ int kgsl_cmdbatch_add_synclist(struct kgsl_device *device,
return 0;
}
-void kgsl_cmdbatch_exit(void)
+void kgsl_drawobj_exit(void)
{
if (memobjs_cache != NULL)
kmem_cache_destroy(memobjs_cache);
}
-int kgsl_cmdbatch_init(void)
+int kgsl_drawobj_init(void)
{
memobjs_cache = KMEM_CACHE(kgsl_memobj_node, 0);
if (memobjs_cache == NULL) {
diff --git a/drivers/gpu/msm/kgsl_drawobj.h b/drivers/gpu/msm/kgsl_drawobj.h
new file mode 100644
index 000000000000..89ed944c539a
--- /dev/null
+++ b/drivers/gpu/msm/kgsl_drawobj.h
@@ -0,0 +1,198 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __KGSL_DRAWOBJ_H
+#define __KGSL_DRAWOBJ_H
+
+#define DRAWOBJ(obj) (&obj->base)
+#define SYNCOBJ(obj) \
+ container_of(obj, struct kgsl_drawobj_sync, base)
+#define CMDOBJ(obj) \
+ container_of(obj, struct kgsl_drawobj_cmd, base)
+
+#define CMDOBJ_TYPE BIT(0)
+#define MARKEROBJ_TYPE BIT(1)
+#define SYNCOBJ_TYPE BIT(2)
+
+/**
+ * struct kgsl_drawobj - KGSL drawobj descriptor
+ * @device: KGSL GPU device that the command was created for
+ * @context: KGSL context that created the command
+ * @type: Object type
+ * @timestamp: Timestamp assigned to the command
+ * @flags: flags
+ * @refcount: kref structure to maintain the reference count
+ */
+struct kgsl_drawobj {
+ struct kgsl_device *device;
+ struct kgsl_context *context;
+ uint32_t type;
+ uint32_t timestamp;
+ unsigned long flags;
+ struct kref refcount;
+};
+
+/**
+ * struct kgsl_drawobj_cmd - KGSL command obj, This covers marker
+ * cmds also since markers are special form of cmds that do not
+ * need their cmds to be executed.
+ * @base: Base kgsl_drawobj
+ * @priv: Internal flags
+ * @global_ts: The ringbuffer timestamp corresponding to this
+ * command obj
+ * @fault_policy: Internal policy describing how to handle this command in case
+ * of a fault
+ * @fault_recovery: recovery actions actually tried for this batch
+ * be hung
+ * @refcount: kref structure to maintain the reference count
+ * @cmdlist: List of IBs to issue
+ * @memlist: List of all memory used in this command batch
+ * @marker_timestamp: For markers, the timestamp of the last "real" command that
+ * was queued
+ * @profiling_buf_entry: Mem entry containing the profiling buffer
+ * @profiling_buffer_gpuaddr: GPU virt address of the profile buffer added here
+ * for easy access
+ * @profile_index: Index to store the start/stop ticks in the kernel profiling
+ * buffer
+ * @submit_ticks: Variable to hold ticks at the time of
+ * command obj submit.
+
+ */
+struct kgsl_drawobj_cmd {
+ struct kgsl_drawobj base;
+ unsigned long priv;
+ unsigned int global_ts;
+ unsigned long fault_policy;
+ unsigned long fault_recovery;
+ struct list_head cmdlist;
+ struct list_head memlist;
+ unsigned int marker_timestamp;
+ struct kgsl_mem_entry *profiling_buf_entry;
+ uint64_t profiling_buffer_gpuaddr;
+ unsigned int profile_index;
+ uint64_t submit_ticks;
+};
+
+/**
+ * struct kgsl_drawobj_sync - KGSL sync object
+ * @base: Base kgsl_drawobj, this needs to be the first entry
+ * @synclist: Array of context/timestamp tuples to wait for before issuing
+ * @numsyncs: Number of sync entries in the array
+ * @pending: Bitmask of sync events that are active
+ * @timer: a timer used to track possible sync timeouts for this
+ * sync obj
+ * @timeout_jiffies: For a sync obj the jiffies at
+ * which the timer will expire
+ */
+struct kgsl_drawobj_sync {
+ struct kgsl_drawobj base;
+ struct kgsl_drawobj_sync_event *synclist;
+ unsigned int numsyncs;
+ unsigned long pending;
+ struct timer_list timer;
+ unsigned long timeout_jiffies;
+};
+
+/**
+ * struct kgsl_drawobj_sync_event
+ * @id: identifer (positiion within the pending bitmap)
+ * @type: Syncpoint type
+ * @syncobj: Pointer to the syncobj that owns the sync event
+ * @context: KGSL context for whose timestamp we want to
+ * register this event
+ * @timestamp: Pending timestamp for the event
+ * @handle: Pointer to a sync fence handle
+ * @device: Pointer to the KGSL device
+ */
+struct kgsl_drawobj_sync_event {
+ unsigned int id;
+ int type;
+ struct kgsl_drawobj_sync *syncobj;
+ struct kgsl_context *context;
+ unsigned int timestamp;
+ struct kgsl_sync_fence_waiter *handle;
+ struct kgsl_device *device;
+};
+
+#define KGSL_DRAWOBJ_FLAGS \
+ { KGSL_DRAWOBJ_MARKER, "MARKER" }, \
+ { KGSL_DRAWOBJ_CTX_SWITCH, "CTX_SWITCH" }, \
+ { KGSL_DRAWOBJ_SYNC, "SYNC" }, \
+ { KGSL_DRAWOBJ_END_OF_FRAME, "EOF" }, \
+ { KGSL_DRAWOBJ_PWR_CONSTRAINT, "PWR_CONSTRAINT" }, \
+ { KGSL_DRAWOBJ_SUBMIT_IB_LIST, "IB_LIST" }
+
+/**
+ * enum kgsl_drawobj_cmd_priv - Internal command obj flags
+ * @CMDOBJ_SKIP - skip the entire command obj
+ * @CMDOBJ_FORCE_PREAMBLE - Force the preamble on for
+ * command obj
+ * @CMDOBJ_WFI - Force wait-for-idle for the submission
+ * @CMDOBJ_PROFILE - store the start / retire ticks for
+ * the command obj in the profiling buffer
+ */
+enum kgsl_drawobj_cmd_priv {
+ CMDOBJ_SKIP = 0,
+ CMDOBJ_FORCE_PREAMBLE,
+ CMDOBJ_WFI,
+ CMDOBJ_PROFILE,
+};
+
+struct kgsl_drawobj_cmd *kgsl_drawobj_cmd_create(struct kgsl_device *device,
+ struct kgsl_context *context, unsigned int flags,
+ unsigned int type);
+int kgsl_drawobj_cmd_add_ibdesc(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, struct kgsl_ibdesc *ibdesc);
+int kgsl_drawobj_cmd_add_ibdesc_list(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr, int count);
+int kgsl_drawobj_cmd_add_cmdlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
+ unsigned int size, unsigned int count);
+int kgsl_drawobj_cmd_add_memlist(struct kgsl_device *device,
+ struct kgsl_drawobj_cmd *cmdobj, void __user *ptr,
+ unsigned int size, unsigned int count);
+
+struct kgsl_drawobj_sync *kgsl_drawobj_sync_create(struct kgsl_device *device,
+ struct kgsl_context *context);
+int kgsl_drawobj_sync_add_syncpoints(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr,
+ int count);
+int kgsl_drawobj_sync_add_synclist(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj, void __user *ptr,
+ unsigned int size, unsigned int count);
+int kgsl_drawobj_sync_add_sync(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_cmd_syncpoint *sync);
+
+int kgsl_drawobj_init(void);
+void kgsl_drawobj_exit(void);
+
+void kgsl_dump_syncpoints(struct kgsl_device *device,
+ struct kgsl_drawobj_sync *syncobj);
+
+void kgsl_drawobj_destroy(struct kgsl_drawobj *drawobj);
+
+static inline bool kgsl_drawobj_events_pending(
+ struct kgsl_drawobj_sync *syncobj)
+{
+ return !bitmap_empty(&syncobj->pending, KGSL_MAX_SYNCPOINTS);
+}
+
+static inline bool kgsl_drawobj_event_pending(
+ struct kgsl_drawobj_sync *syncobj, unsigned int bit)
+{
+ if (bit >= KGSL_MAX_SYNCPOINTS)
+ return false;
+
+ return test_bit(bit, &syncobj->pending);
+}
+#endif /* __KGSL_DRAWOBJ_H */
diff --git a/drivers/gpu/msm/kgsl_iommu.c b/drivers/gpu/msm/kgsl_iommu.c
index 71b6086423d6..9f35a3197a4c 100644
--- a/drivers/gpu/msm/kgsl_iommu.c
+++ b/drivers/gpu/msm/kgsl_iommu.c
@@ -1118,7 +1118,6 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
{
int ret = 0;
struct kgsl_iommu_pt *iommu_pt = NULL;
- int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
unsigned int cb_num;
struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
@@ -1128,9 +1127,6 @@ static int _init_global_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
if (IS_ERR(iommu_pt))
return PTR_ERR(iommu_pt);
- iommu_domain_set_attr(iommu_pt->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
-
if (kgsl_mmu_is_perprocess(mmu)) {
ret = iommu_domain_set_attr(iommu_pt->domain,
DOMAIN_ATTR_PROCID, &pt->name);
@@ -1189,7 +1185,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
int ret = 0;
struct kgsl_iommu_pt *iommu_pt = NULL;
struct kgsl_iommu *iommu = _IOMMU_PRIV(mmu);
- int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_SECURE];
int secure_vmid = VMID_CP_PIXEL;
unsigned int cb_num;
@@ -1207,9 +1202,6 @@ static int _init_secure_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
if (IS_ERR(iommu_pt))
return PTR_ERR(iommu_pt);
- iommu_domain_set_attr(iommu_pt->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
-
ret = iommu_domain_set_attr(iommu_pt->domain,
DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
if (ret) {
@@ -1251,7 +1243,6 @@ static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
struct kgsl_iommu_context *ctx = &iommu->ctx[KGSL_IOMMU_CONTEXT_USER];
int dynamic = 1;
unsigned int cb_num = ctx->cb_num;
- int disable_htw = !MMU_FEATURE(mmu, KGSL_MMU_COHERENT_HTW);
iommu_pt = _alloc_pt(ctx->dev, mmu, pt);
@@ -1278,9 +1269,6 @@ static int _init_per_process_pt(struct kgsl_mmu *mmu, struct kgsl_pagetable *pt)
goto done;
}
- iommu_domain_set_attr(iommu_pt->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
-
ret = _attach_pt(iommu_pt, ctx);
if (ret)
goto done;
@@ -2492,7 +2480,6 @@ static const struct {
{ "qcom,global_pt", KGSL_MMU_GLOBAL_PAGETABLE },
{ "qcom,hyp_secure_alloc", KGSL_MMU_HYP_SECURE_ALLOC },
{ "qcom,force-32bit", KGSL_MMU_FORCE_32BIT },
- { "qcom,coherent-htw", KGSL_MMU_COHERENT_HTW },
};
static int _kgsl_iommu_probe(struct kgsl_device *device,
diff --git a/drivers/gpu/msm/kgsl_mmu.h b/drivers/gpu/msm/kgsl_mmu.h
index acbc0e784cf2..3e32c25b3dbe 100644
--- a/drivers/gpu/msm/kgsl_mmu.h
+++ b/drivers/gpu/msm/kgsl_mmu.h
@@ -130,8 +130,6 @@ struct kgsl_mmu_pt_ops {
#define KGSL_MMU_FORCE_32BIT BIT(5)
/* 64 bit address is live */
#define KGSL_MMU_64BIT BIT(6)
-/* MMU can do coherent hardware table walks */
-#define KGSL_MMU_COHERENT_HTW BIT(7)
/* The MMU supports non-contigious pages */
#define KGSL_MMU_PAGED BIT(8)
/* The device requires a guard page */
diff --git a/drivers/gpu/msm/kgsl_trace.h b/drivers/gpu/msm/kgsl_trace.h
index 4ef9f80177d6..6438c6e65b97 100644
--- a/drivers/gpu/msm/kgsl_trace.h
+++ b/drivers/gpu/msm/kgsl_trace.h
@@ -36,14 +36,13 @@ TRACE_EVENT(kgsl_issueibcmds,
TP_PROTO(struct kgsl_device *device,
int drawctxt_id,
- struct kgsl_cmdbatch *cmdbatch,
unsigned int numibs,
int timestamp,
int flags,
int result,
unsigned int type),
- TP_ARGS(device, drawctxt_id, cmdbatch, numibs, timestamp,
+ TP_ARGS(device, drawctxt_id, numibs, timestamp,
flags, result, type),
TP_STRUCT__entry(
@@ -74,7 +73,7 @@ TRACE_EVENT(kgsl_issueibcmds,
__entry->numibs,
__entry->timestamp,
__entry->flags ? __print_flags(__entry->flags, "|",
- KGSL_CMDBATCH_FLAGS) : "None",
+ KGSL_DRAWOBJ_FLAGS) : "None",
__entry->result,
__print_symbolic(__entry->drawctxt_type, KGSL_CONTEXT_TYPES)
)
@@ -1028,59 +1027,62 @@ TRACE_EVENT(kgsl_pagetable_destroy,
);
DECLARE_EVENT_CLASS(syncpoint_timestamp_template,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context,
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_context *context,
unsigned int timestamp),
- TP_ARGS(cmdbatch, context, timestamp),
+ TP_ARGS(syncobj, context, timestamp),
TP_STRUCT__entry(
- __field(unsigned int, cmdbatch_context_id)
+ __field(unsigned int, syncobj_context_id)
__field(unsigned int, context_id)
__field(unsigned int, timestamp)
),
TP_fast_assign(
- __entry->cmdbatch_context_id = cmdbatch->context->id;
+ __entry->syncobj_context_id = syncobj->base.context->id;
__entry->context_id = context->id;
__entry->timestamp = timestamp;
),
TP_printk("ctx=%d sync ctx=%d ts=%d",
- __entry->cmdbatch_context_id, __entry->context_id,
+ __entry->syncobj_context_id, __entry->context_id,
__entry->timestamp)
);
DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context,
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_context *context,
unsigned int timestamp),
- TP_ARGS(cmdbatch, context, timestamp)
+ TP_ARGS(syncobj, context, timestamp)
);
DEFINE_EVENT(syncpoint_timestamp_template, syncpoint_timestamp_expire,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, struct kgsl_context *context,
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj,
+ struct kgsl_context *context,
unsigned int timestamp),
- TP_ARGS(cmdbatch, context, timestamp)
+ TP_ARGS(syncobj, context, timestamp)
);
DECLARE_EVENT_CLASS(syncpoint_fence_template,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name),
- TP_ARGS(cmdbatch, name),
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name),
+ TP_ARGS(syncobj, name),
TP_STRUCT__entry(
__string(fence_name, name)
- __field(unsigned int, cmdbatch_context_id)
+ __field(unsigned int, syncobj_context_id)
),
TP_fast_assign(
- __entry->cmdbatch_context_id = cmdbatch->context->id;
+ __entry->syncobj_context_id = syncobj->base.context->id;
__assign_str(fence_name, name);
),
TP_printk("ctx=%d fence=%s",
- __entry->cmdbatch_context_id, __get_str(fence_name))
+ __entry->syncobj_context_id, __get_str(fence_name))
);
DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name),
- TP_ARGS(cmdbatch, name)
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name),
+ TP_ARGS(syncobj, name)
);
DEFINE_EVENT(syncpoint_fence_template, syncpoint_fence_expire,
- TP_PROTO(struct kgsl_cmdbatch *cmdbatch, char *name),
- TP_ARGS(cmdbatch, name)
+ TP_PROTO(struct kgsl_drawobj_sync *syncobj, char *name),
+ TP_ARGS(syncobj, name)
);
TRACE_EVENT(kgsl_msg,
diff --git a/drivers/iio/adc/qcom-rradc.c b/drivers/iio/adc/qcom-rradc.c
index bb44b6d82ccd..ebb49230d4d7 100644
--- a/drivers/iio/adc/qcom-rradc.c
+++ b/drivers/iio/adc/qcom-rradc.c
@@ -20,6 +20,7 @@
#include <linux/of.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
+#include <linux/qpnp/qpnp-revid.h>
#define FG_ADC_RR_EN_CTL 0x46
#define FG_ADC_RR_SKIN_TEMP_LSB 0x50
@@ -150,13 +151,18 @@
#define FG_ADC_RR_TEMP_FS_VOLTAGE_NUM 5000000
#define FG_ADC_RR_TEMP_FS_VOLTAGE_DEN 3
-#define FG_ADC_RR_DIE_TEMP_OFFSET 600000
+#define FG_ADC_RR_DIE_TEMP_OFFSET 601400
#define FG_ADC_RR_DIE_TEMP_SLOPE 2
#define FG_ADC_RR_DIE_TEMP_OFFSET_MILLI_DEGC 25000
-#define FG_ADC_RR_CHG_TEMP_OFFSET 1288000
-#define FG_ADC_RR_CHG_TEMP_SLOPE 4
-#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC 27000
+#define FAB_ID_GF 0x30
+#define FAB_ID_SMIC 0x11
+#define FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV 1296794
+#define FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C 3858
+#define FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV 1339518
+#define FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C 3598
+#define FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC 25000
+#define FG_ADC_RR_CHG_THRESHOLD_SCALE 4
#define FG_ADC_RR_VOLT_INPUT_FACTOR 8
#define FG_ADC_RR_CURR_INPUT_FACTOR 2
@@ -201,6 +207,8 @@ struct rradc_chip {
struct iio_chan_spec *iio_chans;
unsigned int nchannels;
struct rradc_chan_prop *chan_props;
+ struct device_node *revid_dev_node;
+ struct pmic_revid_data *pmic_fab_id;
};
struct rradc_channels {
@@ -347,16 +355,34 @@ static int rradc_post_process_chg_temp_hot(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
int *result_millidegc)
{
- int64_t temp = 0;
+ int64_t uv = 0, offset = 0, slope = 0;
- temp = (int64_t) adc_code * 4;
- temp = temp * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM;
- temp = div64_s64(temp, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+ if (chip->revid_dev_node) {
+ switch (chip->pmic_fab_id->fab_id) {
+ case FAB_ID_GF:
+ offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+ break;
+ case FAB_ID_SMIC:
+ offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ pr_err("No temperature scaling coefficients\n");
+ return -EINVAL;
+ }
+
+ uv = (int64_t) adc_code * FG_ADC_RR_CHG_THRESHOLD_SCALE;
+ uv = uv * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM;
+ uv = div64_s64(uv, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
FG_MAX_ADC_READINGS));
- temp = FG_ADC_RR_CHG_TEMP_OFFSET - temp;
- temp = div64_s64(temp, FG_ADC_RR_CHG_TEMP_SLOPE);
- temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
- *result_millidegc = temp;
+ uv = offset - uv;
+ uv = div64_s64((uv * FG_ADC_SCALE_MILLI_FACTOR), slope);
+ uv = uv + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+ *result_millidegc = uv;
return 0;
}
@@ -380,15 +406,33 @@ static int rradc_post_process_chg_temp(struct rradc_chip *chip,
struct rradc_chan_prop *prop, u16 adc_code,
int *result_millidegc)
{
- int64_t temp = 0;
+ int64_t uv = 0, offset = 0, slope = 0;
- temp = ((int64_t) adc_code * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM);
- temp = div64_s64(temp, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
+ if (chip->revid_dev_node) {
+ switch (chip->pmic_fab_id->fab_id) {
+ case FAB_ID_GF:
+ offset = FG_ADC_RR_CHG_TEMP_GF_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_GF_SLOPE_UV_PER_C;
+ break;
+ case FAB_ID_SMIC:
+ offset = FG_ADC_RR_CHG_TEMP_SMIC_OFFSET_UV;
+ slope = FG_ADC_RR_CHG_TEMP_SMIC_SLOPE_UV_PER_C;
+ break;
+ default:
+ return -EINVAL;
+ }
+ } else {
+ pr_err("No temperature scaling coefficients\n");
+ return -EINVAL;
+ }
+
+ uv = ((int64_t) adc_code * FG_ADC_RR_TEMP_FS_VOLTAGE_NUM);
+ uv = div64_s64(uv, (FG_ADC_RR_TEMP_FS_VOLTAGE_DEN *
FG_MAX_ADC_READINGS));
- temp = FG_ADC_RR_CHG_TEMP_OFFSET - temp;
- temp = div64_s64(temp, FG_ADC_RR_CHG_TEMP_SLOPE);
- temp = temp + FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
- *result_millidegc = temp;
+ uv = offset - uv;
+ uv = div64_s64((uv * FG_ADC_SCALE_MILLI_FACTOR), slope);
+ uv += FG_ADC_RR_CHG_TEMP_OFFSET_MILLI_DEGC;
+ *result_millidegc = uv;
return 0;
}
@@ -516,7 +560,7 @@ static int rradc_do_conversion(struct rradc_chip *chip,
buf[0] &= FG_RR_ADC_STS_CHANNEL_READING_MASK;
if (buf[0] != FG_RR_ADC_STS_CHANNEL_READING_MASK) {
- pr_warn("%s is not ready; nothing to read\n",
+ pr_debug("%s is not ready; nothing to read\n",
rradc_chans[prop->channel].datasheet_name);
rc = -ENODATA;
goto fail;
@@ -653,6 +697,22 @@ static int rradc_get_dt_data(struct rradc_chip *chip, struct device_node *node)
}
chip->base = base;
+ chip->revid_dev_node = of_parse_phandle(node, "qcom,pmic-revid", 0);
+ if (chip->revid_dev_node) {
+ chip->pmic_fab_id = get_revid_data(chip->revid_dev_node);
+ if (IS_ERR(chip->pmic_fab_id)) {
+ rc = PTR_ERR(chip->pmic_fab_id);
+ if (rc != -EPROBE_DEFER)
+ pr_err("Unable to get pmic_revid rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->pmic_fab_id->fab_id == -EINVAL) {
+ rc = chip->pmic_fab_id->fab_id;
+ pr_debug("Unable to read fabid rc=%d\n", rc);
+ }
+ }
+
iio_chan = chip->iio_chans;
for (i = 0; i < RR_ADC_MAX; i++) {
diff --git a/drivers/input/misc/Kconfig b/drivers/input/misc/Kconfig
index 31369d8c0ef3..9c1380b65b77 100644
--- a/drivers/input/misc/Kconfig
+++ b/drivers/input/misc/Kconfig
@@ -823,4 +823,6 @@ config INPUT_DRV2667_HAPTICS
To compile this driver as a module, choose M here: the
module will be called drv2667-haptics.
+source "drivers/input/misc/ots_pat9125/Kconfig"
+
endif
diff --git a/drivers/input/misc/Makefile b/drivers/input/misc/Makefile
index 4019f19dd848..4e806ac056ce 100644
--- a/drivers/input/misc/Makefile
+++ b/drivers/input/misc/Makefile
@@ -78,3 +78,4 @@ obj-$(CONFIG_INPUT_WM831X_ON) += wm831x-on.o
obj-$(CONFIG_INPUT_XEN_KBDDEV_FRONTEND) += xen-kbdfront.o
obj-$(CONFIG_INPUT_YEALINK) += yealink.o
obj-$(CONFIG_INPUT_IDEAPAD_SLIDEBAR) += ideapad_slidebar.o
+obj-$(CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH) += ots_pat9125/
diff --git a/drivers/input/misc/ots_pat9125/Kconfig b/drivers/input/misc/ots_pat9125/Kconfig
new file mode 100644
index 000000000000..af82edd0faae
--- /dev/null
+++ b/drivers/input/misc/ots_pat9125/Kconfig
@@ -0,0 +1,14 @@
+#
+# PixArt OTS switch driver configuration
+#
+
+config INPUT_PIXART_OTS_PAT9125_SWITCH
+ tristate "PixArt PAT9125 Rotating Switch driver"
+ depends on INPUT && I2C && GPIOLIB
+ help
+ Say Y to enable support for the PixArt OTS pat9125
+ rotating switch driver.
+ If unsure, say N.
+
+ To compile this driver as a module, choose M here: the
+ module will be called ots_pat9125.
diff --git a/drivers/input/misc/ots_pat9125/Makefile b/drivers/input/misc/ots_pat9125/Makefile
new file mode 100644
index 000000000000..a697caf69644
--- /dev/null
+++ b/drivers/input/misc/ots_pat9125/Makefile
@@ -0,0 +1,7 @@
+#
+# Makefile for the PixArt OST switch driver.
+#
+
+# Each configuration option enables a list of files.
+
+obj-$(CONFIG_INPUT_PIXART_OTS_PAT9125_SWITCH) += pat9125_linux_driver.o pixart_ots.o
diff --git a/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c b/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c
index 0a93f11e2b7e..fa44b7d866f8 100644
--- a/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c
+++ b/drivers/input/misc/ots_pat9125/pat9125_linux_driver.c
@@ -4,378 +4,352 @@
*
*/
-#include <linux/kernel.h>
#include <linux/input.h>
#include <linux/pm.h>
#include <linux/i2c.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
-#include <linux/gpio.h>
#include <linux/of_gpio.h>
#include <linux/delay.h>
-#include <linux/dma-mapping.h>
-#include <linux/miscdevice.h>
-
#include "pixart_ots.h"
-#include "pixart_platform.h"
-
-static int pat9125_init_input_data(void);
-
-#define pat9125_name "pixart_pat9125"
-
-#define pat9125_DEV_NAME pat9125_name
-
-static struct pat9125_linux_data_t pat9125data;
-static int pat9125_i2c_write(u8 reg, u8 *data, int len);
-static int pat9125_i2c_read(u8 reg, u8 *data);
-
-extern unsigned char ReadData(unsigned char addr)
-{
- u8 data = 0xff;
+struct pixart_pat9125_data {
+ struct i2c_client *client;
+ struct input_dev *input;
+ int irq_gpio;
+ u32 irq_flags;
+ u32 press_keycode;
+ bool press_en;
+ bool inverse_x;
+ bool inverse_y;
+};
- pat9125_i2c_read(addr, &data);
- return data;
-}
-extern void WriteData(unsigned char addr, unsigned char data)
+static int pat9125_i2c_write(struct i2c_client *client, u8 reg, u8 *data,
+ int len)
{
- pat9125_i2c_write(addr, &data, 1);
-}
-extern void delay_ms(int ms)
-{
- msleep(ms);
-}
-static int pat9125_i2c_write(u8 reg, u8 *data, int len)
-{
- u8 buf[20];
- int rc;
- int ret = 0;
- int i;
+ u8 buf[MAX_BUF_SIZE];
+ int ret = 0, i;
+ struct device *dev = &client->dev;
buf[0] = reg;
- if (len >= 20) {
- pr_debug(
- "%s (%d) : FAILED: buffer size is limitted(20) %d\n",
- __func__, __LINE__, len);
- dev_err(&pat9125data.client->dev, "pat9125_i2c_write FAILED: buffer size is limitted(20)\n");
+ if (len >= MAX_BUF_SIZE) {
+ dev_err(dev, "%s Failed: buffer size is %d [Max Limit is %d]\n",
+ __func__, len, MAX_BUF_SIZE);
return -ENODEV;
}
-
for (i = 0 ; i < len; i++)
buf[i+1] = data[i];
-
/* Returns negative errno, or else the number of bytes written. */
- rc = i2c_master_send(pat9125data.client, buf, len+1);
-
- if (rc != len+1) {
- pr_debug(
- "%s (%d) : FAILED: writing to reg 0x%x\n",
- __func__, __LINE__, reg);
-
- ret = -ENODEV;
- }
+ ret = i2c_master_send(client, buf, len+1);
+ if (ret != len+1)
+ dev_err(dev, "%s Failed: writing to reg 0x%x\n", __func__, reg);
return ret;
}
-static int pat9125_i2c_read(u8 reg, u8 *data)
+static int pat9125_i2c_read(struct i2c_client *client, u8 reg, u8 *data)
{
-
- u8 buf[20];
- int rc;
+ u8 buf[MAX_BUF_SIZE];
+ int ret;
+ struct device *dev = &client->dev;
buf[0] = reg;
-
/*
- * If everything went ok (i.e. 1 msg transmitted),
- *return #bytes transmitted, else error code.
- * thus if transmit is ok return value 1
+ * If everything went ok (1 msg transmitted), return #bytes transmitted,
+ * else error code. thus if transmit is ok return value 1
*/
- rc = i2c_master_send(pat9125data.client, buf, 1);
- if (rc != 1) {
- pr_debug(
- "%s (%d) : FAILED: writing to address 0x%x\n",
- __func__, __LINE__, reg);
- return -ENODEV;
+ ret = i2c_master_send(client, buf, 1);
+ if (ret != 1) {
+ dev_err(dev, "%s Failed: writing to reg 0x%x\n", __func__, reg);
+ return ret;
}
-
/* returns negative errno, or else the number of bytes read */
- rc = i2c_master_recv(pat9125data.client, buf, 1);
- if (rc != 1) {
- pr_debug(
- "%s (%d) : FAILED: reading data\n",
- __func__, __LINE__);
- return -ENODEV;
+ ret = i2c_master_recv(client, buf, 1);
+ if (ret != 1) {
+ dev_err(dev, "%s Failed: reading reg 0x%x\n", __func__, reg);
+ return ret;
}
-
*data = buf[0];
- return 0;
-}
-
-void pixart_pat9125_ist(void)
-{
-
-}
-static irqreturn_t pixart_pat9125_irq(int irq, void *handle)
-{
- pixart_pat9125_ist();
- return IRQ_HANDLED;
+ return ret;
}
-static int pat9125_start(void)
+u8 read_data(struct i2c_client *client, u8 addr)
{
- int err = (-1);
- pr_debug(">>> %s (%d)\n", __func__, __LINE__);
-
- err = request_threaded_irq(pat9125data.irq, NULL, pixart_pat9125_irq,
- pat9125data.irq_flags,
- "pixart_pat9125_irq",
- &pat9125data);
- if (err)
- pr_debug("irq %d busy?\n", pat9125data.irq);
-
- pat9125data.last_jiffies = jiffies_64;
+ u8 data = 0xff;
- return err;
+ pat9125_i2c_read(client, addr, &data);
+ return data;
}
-static void pat9125_stop(void)
+void write_data(struct i2c_client *client, u8 addr, u8 data)
{
- free_irq(pat9125data.irq, &pat9125data);
+ pat9125_i2c_write(client, addr, &data, 1);
}
-static ssize_t pat9125_fops_read(struct file *filp,
- char *buf, size_t count, loff_t *l)
+static irqreturn_t pat9125_irq(int irq, void *dev_data)
{
- return 0;
-}
+ u8 delta_x = 0, delta_y = 0, motion;
+ struct pixart_pat9125_data *data = dev_data;
+ struct input_dev *ipdev = data->input;
+ struct device *dev = &data->client->dev;
+
+ motion = read_data(data->client, PIXART_PAT9125_MOTION_STATUS_REG);
+ do {
+ /* check if MOTION bit is set or not */
+ if (motion & PIXART_PAT9125_VALID_MOTION_DATA) {
+ delta_x = read_data(data->client,
+ PIXART_PAT9125_DELTA_X_LO_REG);
+ delta_y = read_data(data->client,
+ PIXART_PAT9125_DELTA_Y_LO_REG);
+
+ /* Inverse x depending upon the device orientation */
+ delta_x = (data->inverse_x) ? -delta_x : delta_x;
+ /* Inverse y depending upon the device orientation */
+ delta_y = (data->inverse_y) ? -delta_y : delta_y;
+ }
-static ssize_t pat9125_fops_write(struct file *filp,
- const char *buf, size_t count, loff_t *f_ops)
-{
- return 0;
-}
+ dev_dbg(dev, "motion = %x, delta_x = %x, delta_y = %x\n",
+ motion, delta_x, delta_y);
-static long pat9125_fops_ioctl(struct file *file,
- unsigned int cmd, unsigned long arg)
-{
- return 0;
-}
+ if (delta_x != 0) {
+ /* Send delta_x as REL_WHEEL for rotation */
+ input_report_rel(ipdev, REL_WHEEL, (s8) delta_x);
+ input_sync(ipdev);
+ }
-static int pat9125_fops_open(struct inode *inode, struct file *filp)
-{
- return 0;
-}
+ if (data->press_en && delta_y != 0) {
+ if ((s8) delta_y > 0) {
+ /* Send DOWN event for press keycode */
+ input_report_key(ipdev, data->press_keycode, 1);
+ input_sync(ipdev);
+ } else {
+ /* Send UP event for press keycode */
+ input_report_key(ipdev, data->press_keycode, 0);
+ input_sync(ipdev);
+ }
+ }
+ usleep_range(PIXART_SAMPLING_PERIOD_US_MIN,
+ PIXART_SAMPLING_PERIOD_US_MAX);
-static int pat9125_fops_release(struct inode *inode, struct file *filp)
-{
- pr_debug(">>> %s (%d)\n", __func__, __LINE__);
- return 0;
+ motion = read_data(data->client,
+ PIXART_PAT9125_MOTION_STATUS_REG);
+ } while (motion & PIXART_PAT9125_VALID_MOTION_DATA);
+
+ return IRQ_HANDLED;
}
-static const struct file_operations pat9125_fops = {
-owner: THIS_MODULE,
- read : pat9125_fops_read,
- write : pat9125_fops_write,
- /* ioctl : pat9125_fops_ioctl, */
- unlocked_ioctl : pat9125_fops_ioctl,
- open : pat9125_fops_open,
- release : pat9125_fops_release,
-};
-/*----------------------------------------------------------------------------*/
-struct miscdevice pat9125_device = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = pat9125_name,
- .fops = &pat9125_fops,
-};
static ssize_t pat9125_test_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
- char s[256];
- char *p = s;
-
- pr_debug("%s (%d) : write_reg_store\n", __func__, __LINE__);
-
- memcpy(s, buf, sizeof(s));
-
+ char s[256], *p = s;
+ int reg_data = 0, i;
+ long rd_addr, wr_addr, wr_data;
+ struct pixart_pat9125_data *data =
+ (struct pixart_pat9125_data *)dev->driver_data;
+ struct i2c_client *client = data->client;
+
+ for (i = 0; i < sizeof(s); i++)
+ s[i] = buf[i];
*(s+1) = '\0';
*(s+4) = '\0';
*(s+7) = '\0';
/* example(in console): echo w 12 34 > rw_reg */
if (*p == 'w') {
- long write_addr, write_data;
-
p += 2;
- if (!kstrtol(p, 16, &write_addr)) {
+ if (!kstrtol(p, 16, &wr_addr)) {
p += 3;
- if (!kstrtol(p, 16, &write_data)) {
- pr_debug(
- "w 0x%x 0x%x\n",
- (u8)write_addr, (u8)write_data);
- WriteData((u8)write_addr, (u8)write_data);
+ if (!kstrtol(p, 16, &wr_data)) {
+ dev_dbg(dev, "w 0x%x 0x%x\n",
+ (u8)wr_addr, (u8)wr_data);
+ write_data(client, (u8)wr_addr, (u8)wr_data);
}
}
- /* example(in console): echo r 12 > rw_reg */
- } else if (*p == 'r') {
- long read_addr;
-
+ }
+ /* example(in console): echo r 12 > rw_reg */
+ else if (*p == 'r') {
p += 2;
- if (!kstrtol(p, 16, &read_addr)) {
- int data = 0;
-
- data = ReadData((u8)read_addr);
- pr_debug(
- "r 0x%x 0x%x\n",
- (unsigned int)read_addr, data);
+ if (!kstrtol(p, 16, &rd_addr)) {
+ reg_data = read_data(client, (u8)rd_addr);
+ dev_dbg(dev, "r 0x%x 0x%x\n",
+ (unsigned int)rd_addr, reg_data);
}
}
return count;
}
-static ssize_t pat9125_test_show(
- struct device *dev, struct device_attribute *attr, char *buf)
+static ssize_t pat9125_test_show(struct device *dev,
+ struct device_attribute *attr, char *buf)
{
-
- /* cat */
- pr_debug("%s (%d) :\n", __func__, __LINE__);
-
return 0;
}
-static DEVICE_ATTR(
- test,
- S_IRUGO | S_IWUSR | S_IWGRP, pat9125_test_show, pat9125_test_store);
-static struct device_attribute *pat9125_attr_list[] = {
- &dev_attr_test,
+static DEVICE_ATTR(test, S_IRUGO | S_IWUSR | S_IWGRP,
+ pat9125_test_show, pat9125_test_store);
+
+static struct attribute *pat9125_attr_list[] = {
+ &dev_attr_test.attr,
+ NULL,
};
-static int pat9125_create_attr(struct device *dev)
-{
- int idx, err = 0;
- int num = ARRAY_SIZE(pat9125_attr_list);
+static struct attribute_group pat9125_attr_grp = {
+ .attrs = pat9125_attr_list,
+};
- if (!dev)
- return -EINVAL;
- for (idx = 0; idx < num; idx++) {
- err = device_create_file(dev, pat9125_attr_list[idx]);
- if (err) {
- pr_debug(
- "device_create_file (%s) = %d\n",
- pat9125_attr_list[idx]->attr.name, err);
- break;
+static int pat9125_parse_dt(struct device *dev,
+ struct pixart_pat9125_data *data)
+{
+ struct device_node *np = dev->of_node;
+ u32 temp_val;
+ int ret;
+
+ data->inverse_x = of_property_read_bool(np, "pixart,inverse-x");
+ data->inverse_y = of_property_read_bool(np, "pixart,inverse-y");
+ data->press_en = of_property_read_bool(np, "pixart,press-enabled");
+ if (data->press_en) {
+ ret = of_property_read_u32(np, "pixart,press-keycode",
+ &temp_val);
+ if (!ret) {
+ data->press_keycode = temp_val;
+ } else {
+ dev_err(dev, "Unable to parse press-keycode\n");
+ return ret;
}
}
- return err;
+
+ return 0;
}
-static int pat9125_i2c_probe(
- struct i2c_client *client,
- const struct i2c_device_id *id)
+static int pat9125_i2c_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
{
int err = 0;
- struct device_node *np;
-
- struct i2c_adapter *adapter = to_i2c_adapter(client->dev.parent);
-
- pr_debug("%s (%d) : probe module....\n", __func__, __LINE__);
-
- memset(&pat9125data, 0, sizeof(pat9125data));
- err = i2c_check_functionality(adapter, I2C_FUNC_SMBUS_BYTE);
- if (err < 0)
- goto error_return;
-
- pat9125data.client = client;
- err = misc_register(&pat9125_device);
- if (err) {
- pr_debug("pat9125_device register failed\n");
- goto error_return;
+ struct pixart_pat9125_data *data;
+ struct input_dev *input;
+ struct device *dev = &client->dev;
+
+ err = i2c_check_functionality(client->adapter, I2C_FUNC_SMBUS_BYTE);
+ if (err < 0) {
+ dev_err(dev, "I2C not supported\n");
+ return -ENXIO;
}
- pat9125data.pat9125_device = pat9125_device.this_device;
- err = pat9125_create_attr(pat9125data.pat9125_device);
- if (err) {
- pr_debug("create attribute err = %d\n", err);
- goto error_return;
+ if (client->dev.of_node) {
+ data = devm_kzalloc(dev, sizeof(struct pixart_pat9125_data),
+ GFP_KERNEL);
+ if (!data)
+ return -ENOMEM;
+ err = pat9125_parse_dt(dev, data);
+ if (err) {
+ dev_err(dev, "DT parsing failed, errno:%d\n", err);
+ return err;
+ }
+ } else {
+ data = client->dev.platform_data;
+ if (!data) {
+ dev_err(dev, "Invalid pat9125 data\n");
+ return -EINVAL;
+ }
}
+ data->client = client;
- if (pat9125_init_input_data() < 0)
- goto error_return;
+ input = devm_input_allocate_device(dev);
+ if (!input) {
+ dev_err(dev, "Failed to alloc input device\n");
+ return -ENOMEM;
+ }
- /* interrupt initialization */
- pat9125data.i2c_dev = &client->dev;
+ input_set_capability(input, EV_REL, REL_WHEEL);
+ if (data->press_en)
+ input_set_capability(input, EV_KEY, data->press_keycode);
- np = pat9125data.i2c_dev->of_node;
- pat9125data.irq_gpio = of_get_named_gpio_flags(np,
- "pixart_pat9125,irq-gpio", 0, &pat9125data.irq_flags);
+ i2c_set_clientdata(client, data);
+ input_set_drvdata(input, data);
+ input->name = PAT9125_DEV_NAME;
- pr_debug(
- "irq_gpio: %d, irq_flags: 0x%x\n",
- pat9125data.irq_gpio, pat9125data.irq_flags);
+ data->input = input;
+ err = input_register_device(data->input);
+ if (err < 0) {
+ dev_err(dev, "Failed to register input device\n");
+ goto err_register_input_device;
+ }
- if (!gpio_is_valid(pat9125data.irq_gpio)) {
- err = (-1);
- pr_debug(
- "invalid irq_gpio: %d\n",
- pat9125data.irq_gpio);
- goto error_return;
+ if (!gpio_is_valid(data->irq_gpio)) {
+ dev_err(dev, "invalid irq_gpio: %d\n", data->irq_gpio);
+ return -EINVAL;
}
- err = gpio_request(pat9125data.irq_gpio, "pixart_pat9125_irq_gpio");
+ err = gpio_request(data->irq_gpio, "pixart_pat9125_irq_gpio");
if (err) {
- pr_debug(
- "unable to request gpio [%d], [%d]\n",
- pat9125data.irq_gpio, err);
- goto error_return;
+ dev_err(dev, "unable to request gpio %d\n", data->irq_gpio);
+ return err;
}
- err = gpio_direction_input(pat9125data.irq_gpio);
- if (err) {
- pr_debug("unable to set dir for gpio[%d], [%d]\n",
- pat9125data.irq_gpio, err);
- goto error_return;
+ err = gpio_direction_input(data->irq_gpio);
+ if (err) {
+ dev_err(dev, "unable to set dir for gpio %d\n", data->irq_gpio);
+ goto free_gpio;
}
- pat9125data.irq = gpio_to_irq(pat9125data.irq_gpio);
+ if (!ots_sensor_init(client)) {
+ err = -ENODEV;
+ goto err_sensor_init;
+ }
- if (!OTS_Sensor_Init())
- goto error_return;
+ err = devm_request_threaded_irq(dev, client->irq, NULL, pat9125_irq,
+ IRQF_ONESHOT | IRQF_TRIGGER_FALLING | IRQF_TRIGGER_LOW,
+ "pixart_pat9125_irq", data);
+ if (err) {
+ dev_err(dev, "Req irq %d failed, errno:%d\n", client->irq, err);
+ goto err_request_threaded_irq;
+ }
- if (!pat9125_start())
- goto error_return;
+ err = sysfs_create_group(&(input->dev.kobj), &pat9125_attr_grp);
+ if (err) {
+ dev_err(dev, "Failed to create sysfs group, errno:%d\n", err);
+ goto err_sysfs_create;
+ }
return 0;
-error_return:
-
+err_sysfs_create:
+err_request_threaded_irq:
+err_sensor_init:
+free_gpio:
+ gpio_free(data->irq_gpio);
+err_register_input_device:
+ input_free_device(data->input);
return err;
-
}
-
static int pat9125_i2c_remove(struct i2c_client *client)
{
-
+ struct pixart_pat9125_data *data = i2c_get_clientdata(client);
+
+ devm_free_irq(&client->dev, client->irq, data);
+ if (gpio_is_valid(data->irq_gpio))
+ gpio_free(data->irq_gpio);
+ input_unregister_device(data->input);
+ devm_kfree(&client->dev, data);
+ data = NULL;
return 0;
}
static int pat9125_suspend(struct device *dev)
-{ pr_debug("%s (%d) : pat9125 suspend\n", __func__, __LINE__);
+{
return 0;
}
static int pat9125_resume(struct device *dev)
{
- pr_debug("%s (%d) : pat9125 resume\n", __func__, __LINE__);
return 0;
}
static const struct i2c_device_id pat9125_device_id[] = {
- {pat9125_DEV_NAME, 0},
+ {PAT9125_DEV_NAME, 0},
{}
};
-
MODULE_DEVICE_TABLE(i2c, pat9125_device_id);
static const struct dev_pm_ops pat9125_pm_ops = {
@@ -390,7 +364,7 @@ static const struct of_device_id pixart_pat9125_match_table[] = {
static struct i2c_driver pat9125_i2c_driver = {
.driver = {
- .name = pat9125_DEV_NAME,
+ .name = PAT9125_DEV_NAME,
.owner = THIS_MODULE,
.pm = &pat9125_pm_ops,
.of_match_table = pixart_pat9125_match_table,
@@ -399,72 +373,8 @@ static struct i2c_driver pat9125_i2c_driver = {
.remove = pat9125_i2c_remove,
.id_table = pat9125_device_id,
};
-static int pat9125_open(struct input_dev *dev)
-{
- pr_debug(">>> %s (%d)\n", __func__, __LINE__);
- return 0;
-}
-
-static void pat9125_close(struct input_dev *dev)
-{
- pr_debug(">>> %s (%d)\n", __func__, __LINE__);
-}
-
-static int pat9125_init_input_data(void)
-{
- int ret = 0;
-
- pr_debug("%s (%d) : initialize data\n", __func__, __LINE__);
-
- pat9125data.pat9125_input_dev = input_allocate_device();
-
- if (!pat9125data.pat9125_input_dev) {
- pr_debug(
- "%s (%d) : could not allocate mouse input device\n",
- __func__, __LINE__);
- return -ENOMEM;
- }
-
- input_set_drvdata(pat9125data.pat9125_input_dev, &pat9125data);
- pat9125data.pat9125_input_dev->name = "Pixart pat9125";
-
- pat9125data.pat9125_input_dev->open = pat9125_open;
- pat9125data.pat9125_input_dev->close = pat9125_close;
-
- ret = input_register_device(pat9125data.pat9125_input_dev);
- if (ret < 0) {
- input_free_device(pat9125data.pat9125_input_dev);
- pr_debug(
- "%s (%d) : could not register input device\n",
- __func__, __LINE__);
- return ret;
- }
-
- return 0;
-}
-
-static int __init pat9125_linux_init(void)
-{
- return i2c_add_driver(&pat9125_i2c_driver);
-}
-
-
+module_i2c_driver(pat9125_i2c_driver);
-
-static void __exit pat9125_linux_exit(void)
-{
- pr_debug("%s (%d) : exit module\n", __func__, __LINE__);
- pat9125_stop();
- misc_register(&pat9125_device);
- i2c_del_driver(&pat9125_i2c_driver);
-}
-
-
-module_init(pat9125_linux_init);
-module_exit(pat9125_linux_exit);
MODULE_AUTHOR("pixart");
MODULE_DESCRIPTION("pixart pat9125 driver");
MODULE_LICENSE("GPL");
-
-
-
diff --git a/drivers/input/misc/ots_pat9125/pixart_ots.c b/drivers/input/misc/ots_pat9125/pixart_ots.c
index 70736197de3c..fa73ffe40985 100644
--- a/drivers/input/misc/ots_pat9125/pixart_ots.c
+++ b/drivers/input/misc/ots_pat9125/pixart_ots.c
@@ -4,68 +4,74 @@
*
*/
+#include "pixart_platform.h"
#include "pixart_ots.h"
-static void OTS_WriteRead(uint8_t address, uint8_t wdata);
+static void ots_write_read(struct i2c_client *client, u8 address, u8 wdata)
+{
+ u8 read_value;
+
+ do {
+ write_data(client, address, wdata);
+ read_value = read_data(client, address);
+ } while (read_value != wdata);
+}
-bool OTS_Sensor_Init(void)
+bool ots_sensor_init(struct i2c_client *client)
{
unsigned char sensor_pid = 0, read_id_ok = 0;
/*
* Read sensor_pid in address 0x00 to check if the
- * serial link is valid, read value should be 0x31.
+ * serial link is valid, read value should be 0x31.
*/
- sensor_pid = ReadData(0x00);
+ sensor_pid = read_data(client, PIXART_PAT9125_PRODUCT_ID1_REG);
- if (sensor_pid == 0x31) {
+ if (sensor_pid == PIXART_PAT9125_SENSOR_ID) {
read_id_ok = 1;
/*
- *PAT9125 sensor recommended settings:
- * switch to bank0, not allowed to perform OTS_RegWriteRead
+ * PAT9125 sensor recommended settings:
+ * switch to bank0, not allowed to perform ots_write_read
*/
- WriteData(0x7F, 0x00);
+ write_data(client, PIXART_PAT9125_SELECT_BANK_REG,
+ PIXART_PAT9125_BANK0);
/*
* software reset (i.e. set bit7 to 1).
* It will reset to 0 automatically
* so perform OTS_RegWriteRead is not allowed.
*/
- WriteData(0x06, 0x97);
+ write_data(client, PIXART_PAT9125_CONFIG_REG,
+ PIXART_PAT9125_RESET);
/* delay 1ms */
- delay_ms(1);
+ usleep_range(RESET_DELAY_US, RESET_DELAY_US + 1);
/* disable write protect */
- OTS_WriteRead(0x09, 0x5A);
+ ots_write_read(client, PIXART_PAT9125_WRITE_PROTECT_REG,
+ PIXART_PAT9125_DISABLE_WRITE_PROTECT);
/* set X-axis resolution (depends on application) */
- OTS_WriteRead(0x0D, 0x65);
+ ots_write_read(client, PIXART_PAT9125_SET_CPI_RES_X_REG,
+ PIXART_PAT9125_CPI_RESOLUTION_X);
/* set Y-axis resolution (depends on application) */
- OTS_WriteRead(0x0E, 0xFF);
+ ots_write_read(client, PIXART_PAT9125_SET_CPI_RES_Y_REG,
+ PIXART_PAT9125_CPI_RESOLUTION_Y);
/* set 12-bit X/Y data format (depends on application) */
- OTS_WriteRead(0x19, 0x04);
+ ots_write_read(client, PIXART_PAT9125_ORIENTATION_REG,
+ PIXART_PAT9125_MOTION_DATA_LENGTH);
/* ONLY for VDD=VDDA=1.7~1.9V: for power saving */
- OTS_WriteRead(0x4B, 0x04);
+ ots_write_read(client, PIXART_PAT9125_VOLTAGE_SEGMENT_SEL_REG,
+ PIXART_PAT9125_LOW_VOLTAGE_SEGMENT);
- if (ReadData(0x5E) == 0x04) {
- OTS_WriteRead(0x5E, 0x08);
- if (ReadData(0x5D) == 0x10)
- OTS_WriteRead(0x5D, 0x19);
+ if (read_data(client, PIXART_PAT9125_MISC2_REG) == 0x04) {
+ ots_write_read(client, PIXART_PAT9125_MISC2_REG, 0x08);
+ if (read_data(client, PIXART_PAT9125_MISC1_REG) == 0x10)
+ ots_write_read(client, PIXART_PAT9125_MISC1_REG,
+ 0x19);
}
- OTS_WriteRead(0x09, 0x00);/* enable write protect */
+ /* enable write protect */
+ ots_write_read(client, PIXART_PAT9125_WRITE_PROTECT_REG,
+ PIXART_PAT9125_ENABLE_WRITE_PROTECT);
}
return read_id_ok;
}
-
-static void OTS_WriteRead(uint8_t address, uint8_t wdata)
-{
- uint8_t read_value;
-
- do {
- /* Write data to specified address */
- WriteData(address, wdata);
- /* Read back previous written data */
- read_value = ReadData(address);
- /* Check if the data is correctly written */
- } while (read_value != wdata);
-}
diff --git a/drivers/input/misc/ots_pat9125/pixart_ots.h b/drivers/input/misc/ots_pat9125/pixart_ots.h
index ba1da1396ad1..91813929d811 100644
--- a/drivers/input/misc/ots_pat9125/pixart_ots.h
+++ b/drivers/input/misc/ots_pat9125/pixart_ots.h
@@ -4,13 +4,45 @@
*
*/
-#ifndef _PIXART_OTS_H_
-#define _PIXART_OTS_H_
+#ifndef __PIXART_OTS_H_
+#define __PIXART_OTS_H_
-#include "pixart_platform.h"
+#define PAT9125_DEV_NAME "pixart_pat9125"
+#define MAX_BUF_SIZE 20
+#define RESET_DELAY_US 1000
-/* export funtions */
-bool OTS_Sensor_Init(void);
-void OTS_Sensor_ReadMotion(int16_t *dx, int16_t *dy);
+/* Register addresses */
+#define PIXART_PAT9125_PRODUCT_ID1_REG 0x00
+#define PIXART_PAT9125_PRODUCT_ID2_REG 0x01
+#define PIXART_PAT9125_MOTION_STATUS_REG 0x02
+#define PIXART_PAT9125_DELTA_X_LO_REG 0x03
+#define PIXART_PAT9125_DELTA_Y_LO_REG 0x04
+#define PIXART_PAT9125_CONFIG_REG 0x06
+#define PIXART_PAT9125_WRITE_PROTECT_REG 0x09
+#define PIXART_PAT9125_SET_CPI_RES_X_REG 0x0D
+#define PIXART_PAT9125_SET_CPI_RES_Y_REG 0x0E
+#define PIXART_PAT9125_DELTA_XY_HI_REG 0x12
+#define PIXART_PAT9125_ORIENTATION_REG 0x19
+#define PIXART_PAT9125_VOLTAGE_SEGMENT_SEL_REG 0x4B
+#define PIXART_PAT9125_SELECT_BANK_REG 0x7F
+#define PIXART_PAT9125_MISC1_REG 0x5D
+#define PIXART_PAT9125_MISC2_REG 0x5E
+/*Register configuration data */
+#define PIXART_PAT9125_SENSOR_ID 0x31
+#define PIXART_PAT9125_RESET 0x97
+#define PIXART_PAT9125_MOTION_DATA_LENGTH 0x04
+#define PIXART_PAT9125_BANK0 0x00
+#define PIXART_PAT9125_DISABLE_WRITE_PROTECT 0x5A
+#define PIXART_PAT9125_ENABLE_WRITE_PROTECT 0x00
+#define PIXART_PAT9125_CPI_RESOLUTION_X 0x65
+#define PIXART_PAT9125_CPI_RESOLUTION_Y 0xFF
+#define PIXART_PAT9125_LOW_VOLTAGE_SEGMENT 0x04
+#define PIXART_PAT9125_VALID_MOTION_DATA 0x80
+
+#define PIXART_SAMPLING_PERIOD_US_MIN 4000
+#define PIXART_SAMPLING_PERIOD_US_MAX 8000
+
+/* Export functions */
+bool ots_sensor_init(struct i2c_client *);
#endif
diff --git a/drivers/input/misc/ots_pat9125/pixart_platform.h b/drivers/input/misc/ots_pat9125/pixart_platform.h
index a025fd06343e..1fe448fdc2cb 100644
--- a/drivers/input/misc/ots_pat9125/pixart_platform.h
+++ b/drivers/input/misc/ots_pat9125/pixart_platform.h
@@ -4,22 +4,14 @@
*
*/
-#ifndef _PIXART_PLATFORM_
-#define _PIXART_PLATFORM_
+#ifndef __PIXART_PLATFORM_H_
+#define __PIXART_PLATFORM_H_
-#include <linux/input.h>
-#include <linux/pm.h>
-#include <linux/spi/spi.h>
-#include <linux/module.h>
-#include <linux/interrupt.h>
-#include <linux/irq.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
+#include <linux/i2c.h>
#include <linux/delay.h>
-#include <linux/types.h>
/* extern functions */
-extern unsigned char ReadData(unsigned char addr);
-extern void WriteData(unsigned char addr, unsigned char data);
+extern unsigned char read_data(struct i2c_client *, u8 addr);
+extern void write_data(struct i2c_client *, u8 addr, u8 data);
#endif
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index c69927bd4ff2..afa519aa8203 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -1888,8 +1888,6 @@ static struct iommu_domain *arm_smmu_domain_alloc(unsigned type)
return NULL;
smmu_domain->secure_vmid = VMID_INVAL;
- /* disable coherent htw by default */
- smmu_domain->attributes = (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE);
INIT_LIST_HEAD(&smmu_domain->pte_info_list);
INIT_LIST_HEAD(&smmu_domain->unassign_list);
INIT_LIST_HEAD(&smmu_domain->secure_pool_list);
@@ -2263,15 +2261,6 @@ static int arm_smmu_attach_dev(struct iommu_domain *domain, struct device *dev)
goto err_destroy_domain_context;
}
- if (!(smmu_domain->attributes & (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE))
- && !(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)) {
- dev_err(dev,
- "Can't attach: this domain wants coherent htw but %s doesn't support it\n",
- dev_name(smmu_domain->smmu->dev));
- ret = -EINVAL;
- goto err_destroy_domain_context;
- }
-
/* Looks ok, so add the device to the domain */
ret = arm_smmu_domain_add_master(smmu_domain, cfg);
if (ret)
@@ -2977,11 +2966,6 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
*(int *)data = (smmu_domain->stage == ARM_SMMU_DOMAIN_NESTED);
ret = 0;
break;
- case DOMAIN_ATTR_COHERENT_HTW_DISABLE:
- *((int *)data) = !!(smmu_domain->attributes &
- (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE));
- ret = 0;
- break;
case DOMAIN_ATTR_SECURE_VMID:
*((int *)data) = smmu_domain->secure_vmid;
ret = 0;
@@ -3083,29 +3067,6 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
else
smmu_domain->stage = ARM_SMMU_DOMAIN_S1;
break;
- case DOMAIN_ATTR_COHERENT_HTW_DISABLE:
- {
- struct arm_smmu_device *smmu;
- int htw_disable = *((int *)data);
-
- smmu = smmu_domain->smmu;
-
- if (smmu && !(smmu->features & ARM_SMMU_FEAT_COHERENT_WALK)
- && !htw_disable) {
- dev_err(smmu->dev,
- "Can't enable coherent htw on this domain: this SMMU doesn't support it\n");
- ret = -EINVAL;
- goto out_unlock;
- }
-
- if (htw_disable)
- smmu_domain->attributes |=
- (1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE);
- else
- smmu_domain->attributes &=
- ~(1 << DOMAIN_ATTR_COHERENT_HTW_DISABLE);
- break;
- }
case DOMAIN_ATTR_SECURE_VMID:
BUG_ON(smmu_domain->secure_vmid != VMID_INVAL);
smmu_domain->secure_vmid = *((int *)data);
diff --git a/drivers/iommu/dma-mapping-fast.c b/drivers/iommu/dma-mapping-fast.c
index ea8db1a431d0..266f7065fca4 100644
--- a/drivers/iommu/dma-mapping-fast.c
+++ b/drivers/iommu/dma-mapping-fast.c
@@ -649,7 +649,7 @@ err:
int fast_smmu_attach_device(struct device *dev,
struct dma_iommu_mapping *mapping)
{
- int htw_disable = 1, atomic_domain = 1;
+ int atomic_domain = 1;
struct iommu_domain *domain = mapping->domain;
struct iommu_pgtbl_info info;
size_t size = mapping->bits << PAGE_SHIFT;
@@ -657,10 +657,6 @@ int fast_smmu_attach_device(struct device *dev,
if (mapping->base + size > (SZ_1G * 4ULL))
return -EINVAL;
- if (iommu_domain_set_attr(domain, DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &htw_disable))
- return -EINVAL;
-
if (iommu_domain_set_attr(domain, DOMAIN_ATTR_ATOMIC,
&atomic_domain))
return -EINVAL;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 4036997f49c7..3333f15f7f16 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -363,7 +363,7 @@ static int arm_lpae_init_pte(struct arm_lpae_io_pgtable *data,
else
pte |= ARM_LPAE_PTE_TYPE_BLOCK;
- pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_IS;
+ pte |= ARM_LPAE_PTE_AF | ARM_LPAE_PTE_SH_OS;
pte |= pfn_to_iopte(paddr >> data->pg_shift, data);
*ptep = pte;
@@ -940,7 +940,7 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN0_SHIFT) |
(ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN0_SHIFT);
else
- reg = (ARM_LPAE_TCR_SH_IS << ARM_LPAE_TCR_SH0_SHIFT) |
+ reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH0_SHIFT) |
(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN0_SHIFT) |
(ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN0_SHIFT);
diff --git a/drivers/iommu/iommu-debug.c b/drivers/iommu/iommu-debug.c
index a0227fd05939..3b54fd4a77e6 100644
--- a/drivers/iommu/iommu-debug.c
+++ b/drivers/iommu/iommu-debug.c
@@ -48,8 +48,6 @@ static const char *iommu_debug_attr_to_string(enum iommu_attr attr)
return "DOMAIN_ATTR_FSL_PAMUV1";
case DOMAIN_ATTR_NESTING:
return "DOMAIN_ATTR_NESTING";
- case DOMAIN_ATTR_COHERENT_HTW_DISABLE:
- return "DOMAIN_ATTR_COHERENT_HTW_DISABLE";
case DOMAIN_ATTR_PT_BASE_ADDR:
return "DOMAIN_ATTR_PT_BASE_ADDR";
case DOMAIN_ATTR_SECURE_VMID:
@@ -96,7 +94,6 @@ static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored)
{
struct iommu_debug_attachment *attach = s->private;
phys_addr_t pt_phys;
- int coherent_htw_disable;
int secure_vmid;
seq_printf(s, "Domain: 0x%p\n", attach->domain);
@@ -110,14 +107,6 @@ static int iommu_debug_attachment_info_show(struct seq_file *s, void *ignored)
pt_virt, &pt_phys);
}
- seq_puts(s, "COHERENT_HTW_DISABLE: ");
- if (iommu_domain_get_attr(attach->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &coherent_htw_disable))
- seq_puts(s, "(Unknown)\n");
- else
- seq_printf(s, "%d\n", coherent_htw_disable);
-
seq_puts(s, "SECURE_VMID: ");
if (iommu_domain_get_attr(attach->domain,
DOMAIN_ATTR_SECURE_VMID,
@@ -733,7 +722,6 @@ static int iommu_debug_profiling_show(struct seq_file *s, void *ignored)
const size_t sizes[] = { SZ_4K, SZ_64K, SZ_2M, SZ_1M * 12,
SZ_1M * 20, 0 };
enum iommu_attr attrs[] = {
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_ATOMIC,
};
int htw_disable = 1, atomic = 1;
@@ -764,7 +752,6 @@ static int iommu_debug_secure_profiling_show(struct seq_file *s, void *ignored)
SZ_1M * 20, 0 };
enum iommu_attr attrs[] = {
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_ATOMIC,
DOMAIN_ATTR_SECURE_VMID,
};
@@ -797,7 +784,6 @@ static int iommu_debug_profiling_fast_show(struct seq_file *s, void *ignored)
size_t sizes[] = {SZ_4K, SZ_8K, SZ_16K, SZ_64K, 0};
enum iommu_attr attrs[] = {
DOMAIN_ATTR_FAST,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_ATOMIC,
};
int one = 1;
@@ -1507,7 +1493,6 @@ static const struct file_operations iommu_debug_functional_arm_dma_api_fops = {
static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
int val, bool is_secure)
{
- int htw_disable = 1;
struct bus_type *bus;
bus = msm_iommu_get_bus(ddev->dev);
@@ -1520,13 +1505,6 @@ static int iommu_debug_attach_do_attach(struct iommu_debug_device *ddev,
return -ENOMEM;
}
- if (iommu_domain_set_attr(ddev->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &htw_disable)) {
- pr_err("Couldn't disable coherent htw\n");
- goto out_domain_free;
- }
-
if (is_secure && iommu_domain_set_attr(ddev->domain,
DOMAIN_ATTR_SECURE_VMID,
&val)) {
diff --git a/drivers/iommu/msm_dma_iommu_mapping.c b/drivers/iommu/msm_dma_iommu_mapping.c
index 0a8728ce36dc..25fe36ab6339 100644
--- a/drivers/iommu/msm_dma_iommu_mapping.c
+++ b/drivers/iommu/msm_dma_iommu_mapping.c
@@ -17,6 +17,7 @@
#include <linux/rbtree.h>
#include <linux/mutex.h>
#include <linux/err.h>
+#include <asm/barrier.h>
#include <linux/msm_dma_iommu_mapping.h>
@@ -216,10 +217,13 @@ static inline int __msm_dma_map_sg(struct device *dev, struct scatterlist *sg,
sg->dma_length = iommu_map->sgl.dma_length;
kref_get(&iommu_map->ref);
- /*
- * Need to do cache operations here based on "dir" in the
- * future if we go with coherent mappings.
- */
+ if (is_device_dma_coherent(dev))
+ /*
+ * Ensure all outstanding changes for coherent
+ * buffers are applied to the cache before any
+ * DMA occurs.
+ */
+ dmb(ish);
ret = nents;
}
mutex_unlock(&iommu_meta->lock);
diff --git a/drivers/leds/leds-qpnp-flash-v2.c b/drivers/leds/leds-qpnp-flash-v2.c
index 0cae5d2e5263..bc94dff08d21 100644
--- a/drivers/leds/leds-qpnp-flash-v2.c
+++ b/drivers/leds/leds-qpnp-flash-v2.c
@@ -48,6 +48,7 @@
#define FLASH_LED_REG_THERMAL_THRSH3(base) (base + 0x58)
#define FLASH_LED_REG_VPH_DROOP_THRESHOLD(base) (base + 0x61)
#define FLASH_LED_REG_VPH_DROOP_DEBOUNCE(base) (base + 0x62)
+#define FLASH_LED_REG_ILED_GRT_THRSH(base) (base + 0x67)
#define FLASH_LED_REG_MITIGATION_SEL(base) (base + 0x6E)
#define FLASH_LED_REG_MITIGATION_SW(base) (base + 0x6F)
#define FLASH_LED_REG_LMH_LEVEL(base) (base + 0x70)
@@ -62,22 +63,25 @@
#define FLASH_LED_ISC_WARMUP_DELAY_MASK GENMASK(1, 0)
#define FLASH_LED_CURRENT_DERATE_EN_MASK GENMASK(2, 0)
#define FLASH_LED_VPH_DROOP_DEBOUNCE_MASK GENMASK(1, 0)
+#define FLASH_LED_CHGR_MITIGATION_SEL_MASK GENMASK(5, 4)
#define FLASH_LED_LMH_MITIGATION_SEL_MASK GENMASK(1, 0)
+#define FLASH_LED_ILED_GRT_THRSH_MASK GENMASK(5, 0)
#define FLASH_LED_LMH_LEVEL_MASK GENMASK(1, 0)
#define FLASH_LED_VPH_DROOP_HYSTERESIS_MASK GENMASK(5, 4)
#define FLASH_LED_VPH_DROOP_THRESHOLD_MASK GENMASK(2, 0)
#define FLASH_LED_THERMAL_THRSH_MASK GENMASK(2, 0)
#define FLASH_LED_THERMAL_OTST_MASK GENMASK(2, 0)
-#define FLASH_LED_PREPARE_OPTIONS_MASK GENMASK(2, 0)
#define FLASH_LED_MOD_CTRL_MASK BIT(7)
#define FLASH_LED_HW_SW_STROBE_SEL_MASK BIT(2)
#define FLASH_LED_VPH_DROOP_FAULT_MASK BIT(4)
#define FLASH_LED_LMH_MITIGATION_EN_MASK BIT(0)
+#define FLASH_LED_CHGR_MITIGATION_EN_MASK BIT(4)
#define VPH_DROOP_DEBOUNCE_US_TO_VAL(val_us) (val_us / 8)
#define VPH_DROOP_HYST_MV_TO_VAL(val_mv) (val_mv / 25)
#define VPH_DROOP_THRESH_MV_TO_VAL(val_mv) ((val_mv / 100) - 25)
#define VPH_DROOP_THRESH_VAL_TO_UV(val) ((val + 25) * 100000)
+#define MITIGATION_THRSH_MA_TO_VAL(val_ma) (val_ma / 100)
#define FLASH_LED_ISC_WARMUP_DELAY_SHIFT 6
#define FLASH_LED_WARMUP_DELAY_DEFAULT 2
@@ -99,8 +103,13 @@
#define FLASH_LED_LMH_LEVEL_DEFAULT 0
#define FLASH_LED_LMH_MITIGATION_ENABLE 1
#define FLASH_LED_LMH_MITIGATION_DISABLE 0
-#define FLASH_LED_LMH_MITIGATION_SEL_DEFAULT 2
-#define FLASH_LED_LMH_MITIGATION_SEL_MAX 2
+#define FLASH_LED_CHGR_MITIGATION_ENABLE BIT(4)
+#define FLASH_LED_CHGR_MITIGATION_DISABLE 0
+#define FLASH_LED_MITIGATION_SEL_DEFAULT 2
+#define FLASH_LED_MITIGATION_SEL_MAX 2
+#define FLASH_LED_CHGR_MITIGATION_SEL_SHIFT 4
+#define FLASH_LED_MITIGATION_THRSH_DEFAULT 0xA
+#define FLASH_LED_MITIGATION_THRSH_MAX 0x1F
#define FLASH_LED_LMH_OCV_THRESH_DEFAULT_UV 3700000
#define FLASH_LED_LMH_RBATT_THRESH_DEFAULT_UOHM 400000
#define FLASH_LED_IRES_BASE 3
@@ -199,7 +208,9 @@ struct flash_led_platform_data {
u8 vph_droop_hysteresis;
u8 vph_droop_debounce;
u8 lmh_mitigation_sel;
+ u8 chgr_mitigation_sel;
u8 lmh_level;
+ u8 iled_thrsh_val;
u8 hw_strobe_option;
bool hdrm_auto_mode_en;
bool thermal_derate_en;
@@ -222,6 +233,7 @@ struct qpnp_flash_led {
int enable;
u16 base;
bool trigger_lmh;
+ bool trigger_chgr;
};
static int
@@ -352,12 +364,26 @@ static int qpnp_flash_led_init_settings(struct qpnp_flash_led *led)
return rc;
rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_MITIGATION_SEL(led->base),
+ FLASH_LED_CHGR_MITIGATION_SEL_MASK,
+ led->pdata->chgr_mitigation_sel);
+ if (rc < 0)
+ return rc;
+
+ rc = qpnp_flash_led_masked_write(led,
FLASH_LED_REG_LMH_LEVEL(led->base),
FLASH_LED_LMH_LEVEL_MASK,
led->pdata->lmh_level);
if (rc < 0)
return rc;
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_ILED_GRT_THRSH(led->base),
+ FLASH_LED_ILED_GRT_THRSH_MASK,
+ led->pdata->iled_thrsh_val);
+ if (rc < 0)
+ return rc;
+
return 0;
}
@@ -739,6 +765,18 @@ static int qpnp_flash_led_switch_disable(struct flash_switch_data *snode)
}
}
+ if (!led->trigger_chgr) {
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_MITIGATION_SW(led->base),
+ FLASH_LED_CHGR_MITIGATION_EN_MASK,
+ FLASH_LED_CHGR_MITIGATION_DISABLE);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev, "disable chgr mitigation failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
led->enable--;
if (led->enable == 0) {
rc = qpnp_flash_led_masked_write(led,
@@ -892,6 +930,18 @@ static int qpnp_flash_led_switch_set(struct flash_switch_data *snode, bool on)
}
}
+ if (led->trigger_chgr) {
+ rc = qpnp_flash_led_masked_write(led,
+ FLASH_LED_REG_MITIGATION_SW(led->base),
+ FLASH_LED_CHGR_MITIGATION_EN_MASK,
+ FLASH_LED_CHGR_MITIGATION_ENABLE);
+ if (rc < 0) {
+ dev_err(&led->pdev->dev, "trigger chgr mitigation failed, rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
rc = qpnp_flash_led_masked_write(led,
FLASH_LED_EN_LED_CTRL(led->base),
snode->led_mask, val);
@@ -951,6 +1001,10 @@ int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
*max_current = rc;
}
+ led->trigger_chgr = false;
+ if (options & PRE_FLASH)
+ led->trigger_chgr = true;
+
return 0;
}
@@ -959,17 +1013,24 @@ static void qpnp_flash_led_brightness_set(struct led_classdev *led_cdev,
{
struct flash_node_data *fnode = NULL;
struct flash_switch_data *snode = NULL;
- struct qpnp_flash_led *led = dev_get_drvdata(&fnode->pdev->dev);
+ struct qpnp_flash_led *led = NULL;
int rc;
if (!strncmp(led_cdev->name, "led:switch", strlen("led:switch"))) {
snode = container_of(led_cdev, struct flash_switch_data, cdev);
led = dev_get_drvdata(&snode->pdev->dev);
- } else {
+ } else if (!strncmp(led_cdev->name, "led:flash", strlen("led:flash")) ||
+ !strncmp(led_cdev->name, "led:torch",
+ strlen("led:torch"))) {
fnode = container_of(led_cdev, struct flash_node_data, cdev);
led = dev_get_drvdata(&fnode->pdev->dev);
}
+ if (!led) {
+ pr_err("Failed to get flash driver data\n");
+ return;
+ }
+
spin_lock(&led->lock);
if (snode) {
rc = qpnp_flash_led_switch_set(snode, value > 0);
@@ -1649,7 +1710,7 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
return rc;
}
- led->pdata->lmh_mitigation_sel = FLASH_LED_LMH_MITIGATION_SEL_DEFAULT;
+ led->pdata->lmh_mitigation_sel = FLASH_LED_MITIGATION_SEL_DEFAULT;
rc = of_property_read_u32(node, "qcom,lmh-mitigation-sel", &val);
if (!rc) {
led->pdata->lmh_mitigation_sel = val;
@@ -1659,11 +1720,43 @@ static int qpnp_flash_led_parse_common_dt(struct qpnp_flash_led *led,
return rc;
}
- if (led->pdata->lmh_mitigation_sel > FLASH_LED_LMH_MITIGATION_SEL_MAX) {
+ if (led->pdata->lmh_mitigation_sel > FLASH_LED_MITIGATION_SEL_MAX) {
dev_err(&led->pdev->dev, "Invalid lmh_mitigation_sel specified\n");
return -EINVAL;
}
+ led->pdata->chgr_mitigation_sel = FLASH_LED_MITIGATION_SEL_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,chgr-mitigation-sel", &val);
+ if (!rc) {
+ led->pdata->chgr_mitigation_sel = val;
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to parse chgr_mitigation_sel, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (led->pdata->chgr_mitigation_sel > FLASH_LED_MITIGATION_SEL_MAX) {
+ dev_err(&led->pdev->dev, "Invalid chgr_mitigation_sel specified\n");
+ return -EINVAL;
+ }
+
+ led->pdata->chgr_mitigation_sel <<= FLASH_LED_CHGR_MITIGATION_SEL_SHIFT;
+
+ led->pdata->iled_thrsh_val = FLASH_LED_MITIGATION_THRSH_DEFAULT;
+ rc = of_property_read_u32(node, "qcom,iled-thrsh-ma", &val);
+ if (!rc) {
+ led->pdata->iled_thrsh_val = MITIGATION_THRSH_MA_TO_VAL(val);
+ } else if (rc != -EINVAL) {
+ dev_err(&led->pdev->dev, "Unable to parse iled_thrsh_val, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ if (led->pdata->iled_thrsh_val > FLASH_LED_MITIGATION_THRSH_MAX) {
+ dev_err(&led->pdev->dev, "Invalid iled_thrsh_val specified\n");
+ return -EINVAL;
+ }
+
led->pdata->all_ramp_up_done_irq =
of_irq_get_byname(node, "all-ramp-up-done-irq");
if (led->pdata->all_ramp_up_done_irq < 0)
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
index 03a61407aef8..feede3a14e07 100644
--- a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
@@ -1427,7 +1427,6 @@ static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
struct device *dev)
{
int rc = 0;
- int disable_htw = 1;
if (!cb || !dev) {
pr_err("Error: invalid input params\n");
@@ -1465,21 +1464,7 @@ static int cam_smmu_setup_cb(struct cam_context_bank_info *cb,
goto end;
}
- /*
- * Set the domain attributes
- * disable L2 redirect since it decreases
- * performance
- */
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- pr_err("Error: couldn't disable coherent HTW\n");
- rc = -ENODEV;
- goto err_set_attr;
- }
return 0;
-err_set_attr:
- arm_iommu_release_mapping(cb->mapping);
end:
return rc;
}
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 3ac4c3af3208..258e08c1b34f 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -114,6 +114,13 @@ static int msm_cpp_update_gdscr_status(struct cpp_device *cpp_dev,
bool status);
static int msm_cpp_buffer_private_ops(struct cpp_device *cpp_dev,
uint32_t buff_mgr_ops, uint32_t id, void *arg);
+static void msm_cpp_set_micro_irq_mask(struct cpp_device *cpp_dev,
+ uint8_t enable, uint32_t irq_mask);
+static void msm_cpp_flush_queue_and_release_buffer(struct cpp_device *cpp_dev,
+ int queue_len);
+static int msm_cpp_dump_frame_cmd(struct msm_cpp_frame_info_t *frame_info);
+static int msm_cpp_dump_addr(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *frame_info);
#if CONFIG_MSM_CPP_DBG
#define CPP_DBG(fmt, args...) pr_err(fmt, ##args)
@@ -636,6 +643,127 @@ static int32_t msm_cpp_poll_rx_empty(void __iomem *cpp_base)
return rc;
}
+static int msm_cpp_dump_addr(struct cpp_device *cpp_dev,
+ struct msm_cpp_frame_info_t *frame_info)
+{
+ int32_t s_base, p_base;
+ uint32_t rd_off, wr0_off, wr1_off, wr2_off, wr3_off;
+ uint32_t wr0_mdata_off, wr1_mdata_off, wr2_mdata_off, wr3_mdata_off;
+ uint32_t rd_ref_off, wr_ref_off;
+ uint32_t s_size, p_size;
+ uint8_t tnr_enabled, ubwc_enabled, cds_en;
+ int32_t i = 0;
+ uint32_t *cpp_frame_msg;
+
+ cpp_frame_msg = frame_info->cpp_cmd_msg;
+
+ /* Update stripe/plane size and base offsets */
+ s_base = cpp_dev->payload_params.stripe_base;
+ s_size = cpp_dev->payload_params.stripe_size;
+ p_base = cpp_dev->payload_params.plane_base;
+ p_size = cpp_dev->payload_params.plane_size;
+
+ /* Fetch engine Offset */
+ rd_off = cpp_dev->payload_params.rd_pntr_off;
+ /* Write engine offsets */
+ wr0_off = cpp_dev->payload_params.wr_0_pntr_off;
+ wr1_off = wr0_off + 1;
+ wr2_off = wr1_off + 1;
+ wr3_off = wr2_off + 1;
+ /* Reference engine offsets */
+ rd_ref_off = cpp_dev->payload_params.rd_ref_pntr_off;
+ wr_ref_off = cpp_dev->payload_params.wr_ref_pntr_off;
+ /* Meta data offsets */
+ wr0_mdata_off =
+ cpp_dev->payload_params.wr_0_meta_data_wr_pntr_off;
+ wr1_mdata_off = (wr0_mdata_off + 1);
+ wr2_mdata_off = (wr1_mdata_off + 1);
+ wr3_mdata_off = (wr2_mdata_off + 1);
+
+ tnr_enabled = ((frame_info->feature_mask & TNR_MASK) >> 2);
+ ubwc_enabled = ((frame_info->feature_mask & UBWC_MASK) >> 5);
+ cds_en = ((frame_info->feature_mask & CDS_MASK) >> 6);
+
+ for (i = 0; i < frame_info->num_strips; i++) {
+ pr_err("stripe %d: in %x, out1 %x out2 %x, out3 %x, out4 %x\n",
+ i, cpp_frame_msg[s_base + rd_off + i * s_size],
+ cpp_frame_msg[s_base + wr0_off + i * s_size],
+ cpp_frame_msg[s_base + wr1_off + i * s_size],
+ cpp_frame_msg[s_base + wr2_off + i * s_size],
+ cpp_frame_msg[s_base + wr3_off + i * s_size]);
+
+ if (tnr_enabled) {
+ pr_err("stripe %d: read_ref %x, write_ref %x\n", i,
+ cpp_frame_msg[s_base + rd_ref_off + i * s_size],
+ cpp_frame_msg[s_base + wr_ref_off + i * s_size]
+ );
+ }
+
+ if (cds_en) {
+ pr_err("stripe %d:, dsdn_off %x\n", i,
+ cpp_frame_msg[s_base + rd_ref_off + i * s_size]
+ );
+ }
+
+ if (ubwc_enabled) {
+ pr_err("stripe %d: metadata %x, %x, %x, %x\n", i,
+ cpp_frame_msg[s_base + wr0_mdata_off +
+ i * s_size],
+ cpp_frame_msg[s_base + wr1_mdata_off +
+ i * s_size],
+ cpp_frame_msg[s_base + wr2_mdata_off +
+ i * s_size],
+ cpp_frame_msg[s_base + wr3_mdata_off +
+ i * s_size]
+ );
+ }
+
+ }
+ return 0;
+}
+
+static void msm_cpp_iommu_fault_handler(struct iommu_domain *domain,
+ struct device *dev, unsigned long iova, int flags, void *token)
+{
+ struct cpp_device *cpp_dev = NULL;
+ struct msm_cpp_frame_info_t *processed_frame[MAX_CPP_PROCESSING_FRAME];
+ int32_t i = 0, queue_len = 0;
+ struct msm_device_queue *queue = NULL;
+
+ if (token) {
+ cpp_dev = token;
+ disable_irq(cpp_dev->irq->start);
+ if (atomic_read(&cpp_timer.used)) {
+ atomic_set(&cpp_timer.used, 0);
+ del_timer_sync(&cpp_timer.cpp_timer);
+ }
+ mutex_lock(&cpp_dev->mutex);
+ tasklet_kill(&cpp_dev->cpp_tasklet);
+ cpp_load_fw(cpp_dev, cpp_dev->fw_name_bin);
+ queue = &cpp_timer.data.cpp_dev->processing_q;
+ queue_len = queue->len;
+ if (!queue_len) {
+ pr_err("%s:%d: Invalid queuelen\n", __func__, __LINE__);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ mutex_unlock(&cpp_dev->mutex);
+ return;
+ }
+ for (i = 0; i < queue_len; i++) {
+ if (cpp_timer.data.processed_frame[i]) {
+ processed_frame[i] =
+ cpp_timer.data.processed_frame[i];
+ pr_err("Fault on identity=0x%x, frame_id=%03d\n",
+ processed_frame[i]->identity,
+ processed_frame[i]->frame_id);
+ msm_cpp_dump_addr(cpp_dev, processed_frame[i]);
+ msm_cpp_dump_frame_cmd(processed_frame[i]);
+ }
+ }
+ msm_cpp_flush_queue_and_release_buffer(cpp_dev, queue_len);
+ msm_cpp_set_micro_irq_mask(cpp_dev, 1, 0x8);
+ mutex_unlock(&cpp_dev->mutex);
+ }
+}
static int cpp_init_mem(struct cpp_device *cpp_dev)
{
@@ -652,6 +780,9 @@ static int cpp_init_mem(struct cpp_device *cpp_dev)
return -ENODEV;
cpp_dev->iommu_hdl = iommu_hdl;
+ cam_smmu_reg_client_page_fault_handler(
+ cpp_dev->iommu_hdl,
+ msm_cpp_iommu_fault_handler, cpp_dev);
return 0;
}
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index 7bbd8aa53342..c11c4b61d832 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -448,7 +448,6 @@ int sde_smmu_probe(struct platform_device *pdev)
struct sde_smmu_domain smmu_domain;
const struct of_device_id *match;
struct sde_module_power *mp;
- int disable_htw = 1;
char name[MAX_CLIENT_NAME_LEN];
if (!mdata) {
@@ -535,13 +534,6 @@ int sde_smmu_probe(struct platform_device *pdev)
goto disable_power;
}
- rc = iommu_domain_set_attr(sde_smmu->mmu_mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
- if (rc) {
- SDEROT_ERR("couldn't disable coherent HTW\n");
- goto release_mapping;
- }
-
if (smmu_domain.domain == SDE_IOMMU_DOMAIN_ROT_SECURE) {
int secure_vmid = VMID_CP_PIXEL;
diff --git a/drivers/media/platform/msm/vidc/msm_venc.c b/drivers/media/platform/msm/vidc/msm_venc.c
index 9c855e89e3ff..f071aae3ccab 100644
--- a/drivers/media/platform/msm/vidc/msm_venc.c
+++ b/drivers/media/platform/msm/vidc/msm_venc.c
@@ -1400,6 +1400,49 @@ static struct msm_vidc_format venc_formats[] = {
},
};
+static void msm_venc_update_plane_count(struct msm_vidc_inst *inst, int type)
+{
+ struct v4l2_ctrl *ctrl = NULL;
+ u32 extradata = 0;
+
+ if (!inst)
+ return;
+
+ inst->fmts[type].num_planes = 1;
+
+ ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
+ V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
+
+ if (ctrl)
+ extradata = v4l2_ctrl_g_ctrl(ctrl);
+
+ if (type == CAPTURE_PORT) {
+ switch (extradata) {
+ case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
+ case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
+ case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
+ case V4L2_MPEG_VIDC_EXTRADATA_LTR:
+ case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
+ inst->fmts[CAPTURE_PORT].num_planes = 2;
+ default:
+ break;
+ }
+ } else if (type == OUTPUT_PORT) {
+ switch (extradata) {
+ case V4L2_MPEG_VIDC_EXTRADATA_INPUT_CROP:
+ case V4L2_MPEG_VIDC_EXTRADATA_DIGITAL_ZOOM:
+ case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
+ case V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS:
+ case V4L2_MPEG_VIDC_EXTRADATA_ROI_QP:
+ case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
+ inst->fmts[OUTPUT_PORT].num_planes = 2;
+ break;
+ default:
+ break;
+ }
+ }
+}
+
static int msm_venc_set_csc(struct msm_vidc_inst *inst);
static int msm_venc_queue_setup(struct vb2_queue *q,
@@ -1414,8 +1457,7 @@ static int msm_venc_queue_setup(struct vb2_queue *q,
enum hal_property property_id;
struct hfi_device *hdev;
struct hal_buffer_requirements *buff_req;
- struct v4l2_ctrl *ctrl = NULL;
- u32 extradata = 0, extra_idx = 0;
+ u32 extra_idx = 0;
struct hal_buffer_requirements *buff_req_buffer = NULL;
if (!q || !q->drv_priv) {
@@ -1471,21 +1513,8 @@ static int msm_venc_queue_setup(struct vb2_queue *q,
temp, *num_buffers);
}
- ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
- V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
- if (ctrl)
- extradata = v4l2_ctrl_g_ctrl(ctrl);
- switch (extradata) {
- case V4L2_MPEG_VIDC_EXTRADATA_MULTISLICE_INFO:
- case V4L2_MPEG_VIDC_EXTRADATA_NUM_CONCEALED_MB:
- case V4L2_MPEG_VIDC_EXTRADATA_METADATA_FILLER:
- case V4L2_MPEG_VIDC_EXTRADATA_LTR:
- case V4L2_MPEG_VIDC_EXTRADATA_METADATA_MBI:
- *num_planes = *num_planes + 1;
- default:
- break;
- }
- inst->fmts[CAPTURE_PORT].num_planes = *num_planes;
+ msm_venc_update_plane_count(inst, CAPTURE_PORT);
+ *num_planes = inst->fmts[CAPTURE_PORT].num_planes;
for (i = 0; i < *num_planes; i++) {
int extra_idx = EXTRADATA_IDX(*num_planes);
@@ -1543,24 +1572,9 @@ static int msm_venc_queue_setup(struct vb2_queue *q,
dprintk(VIDC_DBG, "actual input buffer count set to fw = %d\n",
*num_buffers);
- ctrl = v4l2_ctrl_find(&inst->ctrl_handler,
- V4L2_CID_MPEG_VIDC_VIDEO_EXTRADATA);
- if (ctrl)
- extradata = v4l2_ctrl_g_ctrl(ctrl);
- switch (extradata) {
- case V4L2_MPEG_VIDC_EXTRADATA_INPUT_CROP:
- case V4L2_MPEG_VIDC_EXTRADATA_DIGITAL_ZOOM:
- case V4L2_MPEG_VIDC_EXTRADATA_ASPECT_RATIO:
- case V4L2_MPEG_VIDC_EXTRADATA_YUV_STATS:
- case V4L2_MPEG_VIDC_EXTRADATA_ROI_QP:
- case V4L2_MPEG_VIDC_EXTRADATA_PQ_INFO:
- *num_planes = *num_planes + 1;
- break;
- default:
- break;
- }
+ msm_venc_update_plane_count(inst, OUTPUT_PORT);
+ *num_planes = inst->fmts[OUTPUT_PORT].num_planes;
- inst->fmts[OUTPUT_PORT].num_planes = *num_planes;
rc = call_hfi_op(hdev, session_set_property, inst->session,
property_id, &new_buf_count);
if (rc)
@@ -3629,6 +3643,9 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
memcpy(&inst->fmts[fmt->type], fmt,
sizeof(struct msm_vidc_format));
+ msm_venc_update_plane_count(inst, CAPTURE_PORT);
+ fmt->num_planes = inst->fmts[CAPTURE_PORT].num_planes;
+
rc = msm_comm_try_state(inst, MSM_VIDC_OPEN_DONE);
if (rc) {
dprintk(VIDC_ERR, "Failed to open instance\n");
@@ -3682,6 +3699,9 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
memcpy(&inst->fmts[fmt->type], fmt,
sizeof(struct msm_vidc_format));
+ msm_venc_update_plane_count(inst, OUTPUT_PORT);
+ fmt->num_planes = inst->fmts[OUTPUT_PORT].num_planes;
+
msm_comm_set_color_format(inst, HAL_BUFFER_INPUT, fmt->fourcc);
} else {
dprintk(VIDC_ERR, "%s - Unsupported buf type: %d\n",
@@ -3690,7 +3710,7 @@ int msm_venc_s_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
goto exit;
}
- f->fmt.pix_mp.num_planes = inst->fmts[fmt->type].num_planes;
+ f->fmt.pix_mp.num_planes = fmt->num_planes;
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE) {
struct hal_frame_size frame_sz = {0};
@@ -3743,7 +3763,7 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
const struct msm_vidc_format *fmt = NULL;
int rc = 0;
int i;
- u32 height, width;
+ u32 height, width, num_planes;
unsigned int extra_idx = 0;
struct hal_buffer_requirements *bufreq = NULL;
@@ -3764,10 +3784,14 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
fmt = &inst->fmts[CAPTURE_PORT];
height = inst->prop.height[CAPTURE_PORT];
width = inst->prop.width[CAPTURE_PORT];
+ msm_venc_update_plane_count(inst, CAPTURE_PORT);
+ num_planes = inst->fmts[CAPTURE_PORT].num_planes;
} else if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
fmt = &inst->fmts[OUTPUT_PORT];
height = inst->prop.height[OUTPUT_PORT];
width = inst->prop.width[OUTPUT_PORT];
+ msm_venc_update_plane_count(inst, OUTPUT_PORT);
+ num_planes = inst->fmts[OUTPUT_PORT].num_planes;
} else {
dprintk(VIDC_ERR, "Invalid type: %x\n", f->type);
return -ENOTSUPP;
@@ -3776,10 +3800,10 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
f->fmt.pix_mp.pixelformat = fmt->fourcc;
f->fmt.pix_mp.height = height;
f->fmt.pix_mp.width = width;
- f->fmt.pix_mp.num_planes = fmt->num_planes;
+ f->fmt.pix_mp.num_planes = num_planes;
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
- for (i = 0; i < fmt->num_planes; ++i) {
+ for (i = 0; i < num_planes; ++i) {
f->fmt.pix_mp.plane_fmt[i].sizeimage =
fmt->get_frame_size(i, height, width);
}
@@ -3790,7 +3814,7 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
f->fmt.pix_mp.plane_fmt[0].sizeimage =
bufreq ? bufreq->buffer_size : 0;
}
- extra_idx = EXTRADATA_IDX(fmt->num_planes);
+ extra_idx = EXTRADATA_IDX(num_planes);
if (extra_idx && (extra_idx < VIDEO_MAX_PLANES)) {
if (f->type == V4L2_BUF_TYPE_VIDEO_CAPTURE_MPLANE)
bufreq = get_buff_req_buffer(inst,
@@ -3803,7 +3827,7 @@ int msm_venc_g_fmt(struct msm_vidc_inst *inst, struct v4l2_format *f)
bufreq ? bufreq->buffer_size : 0;
}
- for (i = 0; i < fmt->num_planes; ++i) {
+ for (i = 0; i < num_planes; ++i) {
if (f->type == V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE) {
inst->bufq[OUTPUT_PORT].vb2_bufq.plane_sizes[i] =
f->fmt.pix_mp.plane_fmt[i].sizeimage;
@@ -4057,4 +4081,3 @@ int msm_venc_ctrl_init(struct msm_vidc_inst *inst)
return msm_comm_ctrl_init(inst, msm_venc_ctrls,
ARRAY_SIZE(msm_venc_ctrls), &msm_venc_ctrl_ops);
}
-
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
index 0e62811bf41b..3cd1c38f8f37 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_dcvs.c
@@ -599,8 +599,8 @@ static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
goto dcvs_decision_done;
}
if (msm_comm_turbo_session(inst) ||
- !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit ||
- instance_count > 1))
+ !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) ||
+ instance_count > 1)
is_dcvs_supported = false;
}
if (inst->session_type == MSM_VIDC_ENCODER) {
@@ -617,8 +617,8 @@ static bool msm_dcvs_check_supported(struct msm_vidc_inst *inst)
goto dcvs_decision_done;
}
if (msm_comm_turbo_session(inst) ||
- !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit ||
- instance_count > 1))
+ !IS_VALID_DCVS_SESSION(instance_load, dcvs_limit) ||
+ instance_count > 1)
is_dcvs_supported = false;
}
dcvs_decision_done:
diff --git a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
index 25fccab99fb3..a3080be8cd7a 100644
--- a/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
+++ b/drivers/media/platform/msm/vidc/msm_vidc_res_parse.c
@@ -1166,7 +1166,6 @@ static int msm_vidc_setup_context_bank(struct context_bank_info *cb,
struct device *dev)
{
int rc = 0;
- int disable_htw = 1;
int secure_vmid = VMID_INVAL;
struct bus_type *bus;
@@ -1192,14 +1191,6 @@ static int msm_vidc_setup_context_bank(struct context_bank_info *cb,
goto remove_cb;
}
- rc = iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
- if (rc) {
- dprintk(VIDC_ERR, "%s - disable coherent HTW failed: %s %d\n",
- __func__, dev_name(dev), rc);
- goto release_mapping;
- }
-
if (cb->is_secure) {
secure_vmid = get_secure_vmid(cb);
rc = iommu_domain_set_attr(cb->mapping->domain,
diff --git a/drivers/media/platform/msm/vidc/venus_boot.c b/drivers/media/platform/msm/vidc/venus_boot.c
index 925c97a5b6e8..85c3e15edded 100644
--- a/drivers/media/platform/msm/vidc/venus_boot.c
+++ b/drivers/media/platform/msm/vidc/venus_boot.c
@@ -190,8 +190,6 @@ static int pil_venus_auth_and_reset(void)
{
int rc;
- /* Need to enable this for new SMMU to set the device attribute */
- bool disable_htw = true;
phys_addr_t fw_bias = venus_data->resources->firmware_base;
void __iomem *reg_base = venus_data->reg_base;
u32 ver;
@@ -278,17 +276,6 @@ static int pil_venus_auth_and_reset(void)
if (iommu_present) {
phys_addr_t pa = fw_bias;
- /* Enable this for new SMMU to set the device attribute */
- rc = iommu_domain_set_attr(venus_data->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (rc) {
- dprintk(VIDC_ERR,
- "%s: Failed to disable COHERENT_HTW: %s\n",
- __func__, dev_name(dev));
- goto release_mapping;
- }
-
rc = arm_iommu_attach_device(dev, venus_data->mapping);
if (rc) {
dprintk(VIDC_ERR,
diff --git a/drivers/media/platform/msm/vidc/venus_hfi.c b/drivers/media/platform/msm/vidc/venus_hfi.c
index e0fb31de38ff..8332c7f4db43 100644
--- a/drivers/media/platform/msm/vidc/venus_hfi.c
+++ b/drivers/media/platform/msm/vidc/venus_hfi.c
@@ -3336,7 +3336,6 @@ static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet)
{
bool local_packet = false;
enum vidc_msg_prio log_level = VIDC_FW;
- unsigned int pending_packet_count = 0;
if (!device) {
dprintk(VIDC_ERR, "%s: Invalid params\n", __func__);
@@ -3361,23 +3360,6 @@ static void __flush_debug_queue(struct venus_hfi_device *device, u8 *packet)
log_level = VIDC_ERR;
}
- /*
- * In FATAL situation, print all the pending messages in msg
- * queue. This is useful for debugging. At this time, message
- * queues may be corrupted. Hence don't trust them and just print
- * first max_packets packets.
- */
-
- if (local_packet) {
- dprintk(VIDC_ERR,
- "Printing all pending messages in message Queue\n");
- while (!__iface_msgq_read(device, packet) &&
- pending_packet_count < max_packets) {
- __dump_packet(packet, log_level);
- pending_packet_count++;
- }
- }
-
while (!__iface_dbgq_read(device, packet)) {
struct hfi_msg_sys_coverage_packet *pkt =
(struct hfi_msg_sys_coverage_packet *) packet;
diff --git a/drivers/mfd/wcd934x-regmap.c b/drivers/mfd/wcd934x-regmap.c
index 02ddf3225af8..e07350a1e2ce 100644
--- a/drivers/mfd/wcd934x-regmap.c
+++ b/drivers/mfd/wcd934x-regmap.c
@@ -17,6 +17,18 @@
#include <linux/device.h>
#include "wcd9xxx-regmap.h"
+
+static const struct reg_sequence wcd934x_1_1_defaults[] = {
+ { WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0, 0x01 },
+ { WCD934X_BIAS_VBG_FINE_ADJ, 0x75 },
+ { WCD934X_HPH_REFBUFF_LP_CTL, 0x0E },
+ { WCD934X_EAR_DAC_CTL_ATEST, 0x08 },
+ { WCD934X_SIDO_NEW_VOUT_A_STARTUP, 0x17 },
+ { WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL, 0x40 },
+ { WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L, 0x81 },
+ { WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R, 0x81 },
+};
+
static const struct reg_default wcd934x_defaults[] = {
{ WCD934X_PAGE0_PAGE_REGISTER, 0x00 },
{ WCD934X_CODEC_RPM_CLK_BYPASS, 0x00 },
@@ -1803,6 +1815,37 @@ static const struct reg_default wcd934x_defaults[] = {
{ WCD934X_TEST_DEBUG_CODEC_DIAGS, 0x00 },
};
+/*
+ * wcd934x_regmap_register_patch: Update register defaults based on version
+ * @regmap: handle to wcd9xxx regmap
+ * @version: wcd934x version
+ *
+ * Returns error code in case of failure or 0 for success
+ */
+int wcd934x_regmap_register_patch(struct regmap *regmap, int revision)
+{
+ int rc = 0;
+
+ if (!regmap) {
+ pr_err("%s: regmap struct is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (revision) {
+ case TAVIL_VERSION_1_1:
+ case TAVIL_VERSION_WCD9340_1_1:
+ case TAVIL_VERSION_WCD9341_1_1:
+ regcache_cache_only(regmap, true);
+ rc = regmap_multi_reg_write(regmap, wcd934x_1_1_defaults,
+ ARRAY_SIZE(wcd934x_1_1_defaults));
+ regcache_cache_only(regmap, false);
+ break;
+ }
+
+ return rc;
+}
+EXPORT_SYMBOL(wcd934x_regmap_register_patch);
+
static bool wcd934x_is_readable_register(struct device *dev, unsigned int reg)
{
u8 pg_num, reg_offset;
diff --git a/drivers/mfd/wcd9xxx-regmap.h b/drivers/mfd/wcd9xxx-regmap.h
index 62e4a620c71c..6db8fc55acae 100644
--- a/drivers/mfd/wcd9xxx-regmap.h
+++ b/drivers/mfd/wcd9xxx-regmap.h
@@ -21,6 +21,8 @@ typedef int (*regmap_patch_fptr)(struct regmap *, int);
#ifdef CONFIG_WCD934X_CODEC
extern struct regmap_config wcd934x_regmap_config;
+extern int wcd934x_regmap_register_patch(struct regmap *regmap,
+ int version);
#endif
#ifdef CONFIG_WCD9335_CODEC
@@ -71,6 +73,11 @@ static inline regmap_patch_fptr wcd9xxx_get_regmap_reg_patch(int type)
apply_patch = wcd9335_regmap_register_patch;
break;
#endif
+#ifdef CONFIG_WCD934X_CODEC
+ case WCD934X:
+ apply_patch = wcd934x_regmap_register_patch;
+ break;
+#endif
default:
apply_patch = NULL;
break;
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils.c b/drivers/misc/qcom/qdsp6v2/audio_utils.c
index 065b426ca6d0..840597314a5f 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils.c
@@ -601,6 +601,7 @@ long audio_in_compat_ioctl(struct file *file,
}
case AUDIO_GET_CONFIG_32: {
struct msm_audio_config32 cfg_32;
+ memset(&cfg_32, 0, sizeof(cfg_32));
cfg_32.buffer_size = audio->pcm_cfg.buffer_size;
cfg_32.buffer_count = audio->pcm_cfg.buffer_count;
cfg_32.channel_count = audio->pcm_cfg.channel_count;
diff --git a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
index 0c44f79549d4..567c948b0efe 100644
--- a/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
+++ b/drivers/misc/qcom/qdsp6v2/audio_utils_aio.c
@@ -570,6 +570,8 @@ int audio_aio_release(struct inode *inode, struct file *file)
struct q6audio_aio *audio = file->private_data;
pr_debug("%s[%p]\n", __func__, audio);
mutex_lock(&audio->lock);
+ mutex_lock(&audio->read_lock);
+ mutex_lock(&audio->write_lock);
audio->wflush = 1;
if (audio->wakelock_voted &&
(audio->audio_ws_mgr != NULL) &&
@@ -595,6 +597,8 @@ int audio_aio_release(struct inode *inode, struct file *file)
wake_up(&audio->event_wait);
audio_aio_reset_event_queue(audio);
q6asm_audio_client_free(audio->ac);
+ mutex_unlock(&audio->write_lock);
+ mutex_unlock(&audio->read_lock);
mutex_unlock(&audio->lock);
mutex_destroy(&audio->lock);
mutex_destroy(&audio->read_lock);
@@ -1745,7 +1749,11 @@ static long audio_aio_ioctl(struct file *file, unsigned int cmd,
__func__);
rc = -EFAULT;
} else {
+ mutex_lock(&audio->read_lock);
+ mutex_lock(&audio->write_lock);
rc = audio_aio_ion_add(audio, &info);
+ mutex_unlock(&audio->write_lock);
+ mutex_unlock(&audio->read_lock);
}
mutex_unlock(&audio->lock);
break;
@@ -1760,7 +1768,11 @@ static long audio_aio_ioctl(struct file *file, unsigned int cmd,
__func__);
rc = -EFAULT;
} else {
+ mutex_lock(&audio->read_lock);
+ mutex_lock(&audio->write_lock);
rc = audio_aio_ion_remove(audio, &info);
+ mutex_unlock(&audio->write_lock);
+ mutex_unlock(&audio->read_lock);
}
mutex_unlock(&audio->lock);
break;
@@ -2064,7 +2076,11 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd,
} else {
info.fd = info_32.fd;
info.vaddr = compat_ptr(info_32.vaddr);
+ mutex_lock(&audio->read_lock);
+ mutex_lock(&audio->write_lock);
rc = audio_aio_ion_add(audio, &info);
+ mutex_unlock(&audio->write_lock);
+ mutex_unlock(&audio->read_lock);
}
mutex_unlock(&audio->lock);
break;
@@ -2081,7 +2097,11 @@ static long audio_aio_compat_ioctl(struct file *file, unsigned int cmd,
} else {
info.fd = info_32.fd;
info.vaddr = compat_ptr(info_32.vaddr);
+ mutex_lock(&audio->read_lock);
+ mutex_lock(&audio->write_lock);
rc = audio_aio_ion_remove(audio, &info);
+ mutex_unlock(&audio->write_lock);
+ mutex_unlock(&audio->read_lock);
}
mutex_unlock(&audio->lock);
break;
diff --git a/drivers/net/wireless/cnss/cnss_pci.c b/drivers/net/wireless/cnss/cnss_pci.c
index ec6955452391..1e56d445c6e1 100644
--- a/drivers/net/wireless/cnss/cnss_pci.c
+++ b/drivers/net/wireless/cnss/cnss_pci.c
@@ -1404,7 +1404,6 @@ static int cnss_wlan_is_codeswap_supported(u16 revision)
static int cnss_smmu_init(struct device *dev)
{
struct dma_iommu_mapping *mapping;
- int disable_htw = 1;
int atomic_ctx = 1;
int ret;
@@ -1418,15 +1417,6 @@ static int cnss_smmu_init(struct device *dev)
}
ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret) {
- pr_err("%s: set disable_htw attribute failed, err = %d\n",
- __func__, ret);
- goto set_attr_fail;
- }
-
- ret = iommu_domain_set_attr(mapping->domain,
DOMAIN_ATTR_ATOMIC,
&atomic_ctx);
if (ret) {
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index af1e5a70d585..352defe6204b 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -170,7 +170,12 @@ static void gsi_handle_glob_err(uint32_t err)
gsi_ctx->per.notify_cb(&per_notify);
break;
case GSI_ERR_TYPE_CHAN:
- BUG_ON(log->virt_idx >= GSI_MAX_CHAN);
+ if (log->virt_idx >= gsi_ctx->max_ch) {
+ GSIERR("Unexpected ch %d\n", log->virt_idx);
+ WARN_ON(1);
+ return;
+ }
+
ch = &gsi_ctx->chan[log->virt_idx];
chan_notify.chan_user_data = ch->props.chan_user_data;
chan_notify.err_desc = err & 0xFFFF;
@@ -213,7 +218,12 @@ static void gsi_handle_glob_err(uint32_t err)
WARN_ON(1);
break;
case GSI_ERR_TYPE_EVT:
- BUG_ON(log->virt_idx >= GSI_MAX_EVT_RING);
+ if (log->virt_idx >= gsi_ctx->max_ev) {
+ GSIERR("Unexpected ev %d\n", log->virt_idx);
+ WARN_ON(1);
+ return;
+ }
+
ev = &gsi_ctx->evtr[log->virt_idx];
evt_notify.user_data = ev->props.user_data;
evt_notify.err_desc = err & 0xFFFF;
@@ -257,6 +267,9 @@ static void gsi_handle_glob_ee(int ee)
if (val & GSI_EE_n_CNTXT_GLOB_IRQ_STTS_ERROR_INT_BMSK) {
err = gsi_readl(gsi_ctx->base +
GSI_EE_n_ERROR_LOG_OFFS(ee));
+ if (gsi_ctx->per.ver >= GSI_VER_1_2)
+ gsi_writel(0, gsi_ctx->base +
+ GSI_EE_n_ERROR_LOG_OFFS(ee));
gsi_writel(clr, gsi_ctx->base +
GSI_EE_n_ERROR_LOG_CLR_OFFS(ee));
gsi_handle_glob_err(err);
@@ -311,7 +324,12 @@ static void gsi_process_chan(struct gsi_xfer_compl_evt *evt,
uint64_t rp;
ch_id = evt->chid;
- BUG_ON(ch_id >= GSI_MAX_CHAN);
+ if (ch_id >= gsi_ctx->max_ch) {
+ GSIERR("Unexpected ch %d\n", ch_id);
+ WARN_ON(1);
+ return;
+ }
+
ch_ctx = &gsi_ctx->chan[ch_id];
BUG_ON(ch_ctx->props.prot != GSI_CHAN_PROT_GPI);
rp = evt->xfer_ptr;
@@ -567,6 +585,75 @@ static irqreturn_t gsi_isr(int irq, void *ctxt)
return IRQ_HANDLED;
}
+static uint32_t gsi_get_max_channels(enum gsi_ver ver)
+{
+ uint32_t reg;
+
+ switch (ver) {
+ case GSI_VER_1_0:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK) >>
+ GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_2:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK) >>
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_3:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+ reg = (reg &
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK) >>
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT;
+ break;
+ default:
+ GSIERR("bad gsi version %d\n", ver);
+ WARN_ON(1);
+ reg = 0;
+ }
+
+ GSIDBG("max channels %d\n", reg);
+
+ return reg;
+}
+
+static uint32_t gsi_get_max_event_rings(enum gsi_ver ver)
+{
+ uint32_t reg;
+
+ switch (ver) {
+ case GSI_VER_1_0:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK) >>
+ GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_2:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ reg = (reg & GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK) >>
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT;
+ break;
+ case GSI_VER_1_3:
+ reg = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+ reg = (reg &
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK) >>
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT;
+ break;
+ default:
+ GSIERR("bad gsi version %d\n", ver);
+ WARN_ON(1);
+ reg = 0;
+ }
+
+ GSIDBG("max event rings %d\n", reg);
+
+ return reg;
+}
int gsi_complete_clk_grant(unsigned long dev_hdl)
{
unsigned long flags;
@@ -611,6 +698,11 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
return -GSI_STATUS_INVALID_PARAMS;
}
+ if (props->ver <= GSI_VER_ERR || props->ver >= GSI_VER_MAX) {
+ GSIERR("bad params gsi_ver=%d\n", props->ver);
+ return -GSI_STATUS_INVALID_PARAMS;
+ }
+
if (!props->notify_cb) {
GSIERR("notify callback must be provided\n");
return -GSI_STATUS_INVALID_PARAMS;
@@ -668,8 +760,25 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
mutex_init(&gsi_ctx->mlock);
atomic_set(&gsi_ctx->num_chan, 0);
atomic_set(&gsi_ctx->num_evt_ring, 0);
- /* only support 16 un-reserved + 7 reserved event virtual IDs */
- gsi_ctx->evt_bmap = ~0x7E03FF;
+ gsi_ctx->max_ch = gsi_get_max_channels(gsi_ctx->per.ver);
+ if (gsi_ctx->max_ch == 0) {
+ devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+ devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+ GSIERR("failed to get max channels\n");
+ return -GSI_STATUS_ERROR;
+ }
+ gsi_ctx->max_ev = gsi_get_max_event_rings(gsi_ctx->per.ver);
+ if (gsi_ctx->max_ev == 0) {
+ devm_iounmap(gsi_ctx->dev, gsi_ctx->base);
+ devm_free_irq(gsi_ctx->dev, props->irq, gsi_ctx);
+ GSIERR("failed to get max event rings\n");
+ return -GSI_STATUS_ERROR;
+ }
+
+ /* bitmap is max events excludes reserved events */
+ gsi_ctx->evt_bmap = ~((1 << gsi_ctx->max_ev) - 1);
+ gsi_ctx->evt_bmap |= ((1 << (GSI_MHI_ER_END + 1)) - 1) ^
+ ((1 << GSI_MHI_ER_START) - 1);
/*
* enable all interrupts but GSI_BREAK_POINT.
@@ -693,6 +802,10 @@ int gsi_register_device(struct gsi_per_props *props, unsigned long *dev_hdl)
else
GSIERR("Manager EE has not enabled GSI, GSI un-usable\n");
+ if (gsi_ctx->per.ver >= GSI_VER_1_2)
+ gsi_writel(0, gsi_ctx->base +
+ GSI_EE_n_ERROR_LOG_OFFS(gsi_ctx->per.ee));
+
*dev_hdl = (uintptr_t)gsi_ctx;
return GSI_STATUS_SUCCESS;
@@ -1059,7 +1172,7 @@ int gsi_write_evt_ring_scratch(unsigned long evt_ring_hdl,
return -GSI_STATUS_NODEV;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1093,7 +1206,7 @@ int gsi_dealloc_evt_ring(unsigned long evt_ring_hdl)
return -GSI_STATUS_NODEV;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1160,7 +1273,7 @@ int gsi_query_evt_ring_db_addr(unsigned long evt_ring_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1194,7 +1307,7 @@ int gsi_reset_evt_ring(unsigned long evt_ring_hdl)
return -GSI_STATUS_NODEV;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1255,7 +1368,7 @@ int gsi_get_evt_ring_cfg(unsigned long evt_ring_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1291,7 +1404,7 @@ int gsi_set_evt_ring_cfg(unsigned long evt_ring_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (evt_ring_hdl >= GSI_MAX_EVT_RING) {
+ if (evt_ring_hdl >= gsi_ctx->max_ev) {
GSIERR("bad params evt_ring_hdl=%lu\n", evt_ring_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1382,7 +1495,7 @@ static int gsi_validate_channel_props(struct gsi_chan_props *props)
{
uint64_t ra;
- if (props->ch_id >= GSI_MAX_CHAN) {
+ if (props->ch_id >= gsi_ctx->max_ch) {
GSIERR("ch_id %u invalid\n", props->ch_id);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1573,7 +1686,7 @@ int gsi_write_channel_scratch(unsigned long chan_hdl,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1610,7 +1723,7 @@ int gsi_query_channel_db_addr(unsigned long chan_hdl,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1642,7 +1755,7 @@ int gsi_start_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1694,7 +1807,7 @@ int gsi_stop_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1763,7 +1876,7 @@ int gsi_stop_db_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1832,7 +1945,7 @@ int gsi_reset_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -1898,7 +2011,7 @@ int gsi_dealloc_channel(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2021,7 +2134,7 @@ int gsi_query_channel_info(unsigned long chan_hdl,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !info) {
+ if (chan_hdl >= gsi_ctx->max_ch || !info) {
GSIERR("bad params chan_hdl=%lu info=%p\n", chan_hdl, info);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2091,7 +2204,7 @@ int gsi_is_channel_empty(unsigned long chan_hdl, bool *is_empty)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !is_empty) {
+ if (chan_hdl >= gsi_ctx->max_ch || !is_empty) {
GSIERR("bad params chan_hdl=%lu is_empty=%p\n",
chan_hdl, is_empty);
return -GSI_STATUS_INVALID_PARAMS;
@@ -2155,7 +2268,7 @@ int gsi_queue_xfer(unsigned long chan_hdl, uint16_t num_xfers,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !num_xfers || !xfer) {
+ if (chan_hdl >= gsi_ctx->max_ch || !num_xfers || !xfer) {
GSIERR("bad params chan_hdl=%lu num_xfers=%u xfer=%p\n",
chan_hdl, num_xfers, xfer);
return -GSI_STATUS_INVALID_PARAMS;
@@ -2242,7 +2355,7 @@ int gsi_start_xfer(unsigned long chan_hdl)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2278,7 +2391,7 @@ int gsi_poll_channel(unsigned long chan_hdl,
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN || !notify) {
+ if (chan_hdl >= gsi_ctx->max_ch || !notify) {
GSIERR("bad params chan_hdl=%lu notify=%p\n", chan_hdl, notify);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2327,7 +2440,7 @@ int gsi_config_channel_mode(unsigned long chan_hdl, enum gsi_chan_mode mode)
return -GSI_STATUS_NODEV;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu mode=%u\n", chan_hdl, mode);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2390,7 +2503,7 @@ int gsi_get_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2426,7 +2539,7 @@ int gsi_set_channel_cfg(unsigned long chan_hdl, struct gsi_chan_props *props,
return -GSI_STATUS_INVALID_PARAMS;
}
- if (chan_hdl >= GSI_MAX_CHAN) {
+ if (chan_hdl >= gsi_ctx->max_ch) {
GSIERR("bad params chan_hdl=%lu\n", chan_hdl);
return -GSI_STATUS_INVALID_PARAMS;
}
@@ -2471,9 +2584,9 @@ static void gsi_configure_ieps(void *base)
gsi_writel(5, gsi_base + GSI_GSI_IRAM_PTR_EE_GENERIC_CMD_OFFS);
gsi_writel(6, gsi_base + GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_OFFS);
gsi_writel(7, gsi_base + GSI_GSI_IRAM_PTR_INT_MOD_STOPED_OFFS);
- gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_OFFS);
- gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_OFFS);
- gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_OFFS);
+ gsi_writel(8, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS);
+ gsi_writel(9, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS);
+ gsi_writel(10, gsi_base + GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS);
gsi_writel(11, gsi_base + GSI_GSI_IRAM_PTR_NEW_RE_OFFS);
gsi_writel(12, gsi_base + GSI_GSI_IRAM_PTR_READ_ENG_COMP_OFFS);
gsi_writel(13, gsi_base + GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS);
@@ -2502,9 +2615,9 @@ static void gsi_configure_bck_prs_matrix(void *base)
gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_LSB_OFFS);
gsi_writel(0x00000000,
gsi_base + GSI_IC_PROCESS_DESC_BCK_PRS_MSB_OFFS);
- gsi_writel(0x00ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS);
+ gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_LSB_OFFS);
gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_STOP_BCK_PRS_MSB_OFFS);
- gsi_writel(0xfdffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS);
+ gsi_writel(0xf9ffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_LSB_OFFS);
gsi_writel(0xffffffff, gsi_base + GSI_IC_TLV_RESET_BCK_PRS_MSB_OFFS);
gsi_writel(0xffffffff, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_LSB_OFFS);
gsi_writel(0xfffffffe, gsi_base + GSI_IC_RGSTR_TIMER_BCK_PRS_MSB_OFFS);
@@ -2551,15 +2664,35 @@ int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size)
}
/* Enable the MCS and set to x2 clocks */
- value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
- GSI_GSI_CFG_GSI_ENABLE_BMSK) |
- ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
- GSI_GSI_CFG_MCS_ENABLE_BMSK) |
- ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
- GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
- ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
- GSI_GSI_CFG_UC_IS_MCS_BMSK));
- gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
+ if (gsi_ctx->per.ver >= GSI_VER_1_2) {
+ value = ((1 << GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT) &
+ GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK);
+ gsi_writel(value, gsi_base + GSI_GSI_MCS_CFG_OFFS);
+
+ value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
+ GSI_GSI_CFG_GSI_ENABLE_BMSK) |
+ ((0 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
+ GSI_GSI_CFG_MCS_ENABLE_BMSK) |
+ ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
+ GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
+ ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
+ GSI_GSI_CFG_UC_IS_MCS_BMSK) |
+ ((0 << GSI_GSI_CFG_GSI_PWR_CLPS_SHFT) &
+ GSI_GSI_CFG_GSI_PWR_CLPS_BMSK) |
+ ((0 << GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT) &
+ GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK));
+ gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
+ } else {
+ value = (((1 << GSI_GSI_CFG_GSI_ENABLE_SHFT) &
+ GSI_GSI_CFG_GSI_ENABLE_BMSK) |
+ ((1 << GSI_GSI_CFG_MCS_ENABLE_SHFT) &
+ GSI_GSI_CFG_MCS_ENABLE_BMSK) |
+ ((1 << GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_SHFT) &
+ GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK) |
+ ((0 << GSI_GSI_CFG_UC_IS_MCS_SHFT) &
+ GSI_GSI_CFG_UC_IS_MCS_BMSK));
+ gsi_writel(value, gsi_base + GSI_GSI_CFG_OFFS);
+ }
iounmap(gsi_base);
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index 1d438ffb8b76..0b94ed2d3a92 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -19,8 +19,8 @@
#include <linux/spinlock.h>
#include <linux/msm_gsi.h>
-#define GSI_MAX_CHAN 31
-#define GSI_MAX_EVT_RING 23
+#define GSI_CHAN_MAX 31
+#define GSI_EVT_RING_MAX 23
#define GSI_NO_EVT_ERINDEX 31
#define gsi_readl(c) ({ u32 __v = readl_relaxed(c); __iormb(); __v; })
@@ -130,8 +130,8 @@ struct gsi_ctx {
struct device *dev;
struct gsi_per_props per;
bool per_registered;
- struct gsi_chan_ctx chan[GSI_MAX_CHAN];
- struct gsi_evt_ctx evtr[GSI_MAX_EVT_RING];
+ struct gsi_chan_ctx chan[GSI_CHAN_MAX];
+ struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX];
struct mutex mlock;
spinlock_t slock;
unsigned long evt_bmap;
@@ -141,6 +141,8 @@ struct gsi_ctx {
struct gsi_ee_scratch scratch;
int num_ch_dp_stats;
struct workqueue_struct *dp_stat_wq;
+ u32 max_ch;
+ u32 max_ev;
};
enum gsi_re_type {
diff --git a/drivers/platform/msm/gsi/gsi_dbg.c b/drivers/platform/msm/gsi/gsi_dbg.c
index 2ab8b79acc6d..5eb9084292a4 100644
--- a/drivers/platform/msm/gsi/gsi_dbg.c
+++ b/drivers/platform/msm/gsi/gsi_dbg.c
@@ -71,7 +71,7 @@ static ssize_t gsi_dump_evt(struct file *file,
TDBG("arg1=%u arg2=%u\n", arg1, arg2);
- if (arg1 >= GSI_MAX_EVT_RING) {
+ if (arg1 >= gsi_ctx->max_ev) {
TERR("invalid evt ring id %u\n", arg1);
return -EFAULT;
}
@@ -184,7 +184,7 @@ static ssize_t gsi_dump_ch(struct file *file,
TDBG("arg1=%u arg2=%u\n", arg1, arg2);
- if (arg1 >= GSI_MAX_CHAN) {
+ if (arg1 >= gsi_ctx->max_ch) {
TERR("invalid chan id %u\n", arg1);
return -EFAULT;
}
@@ -271,9 +271,30 @@ static ssize_t gsi_dump_ee(struct file *file,
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_STATUS_OFFS(gsi_ctx->per.ee));
TERR("EE%2d STATUS 0x%x\n", gsi_ctx->per.ee, val);
- val = gsi_readl(gsi_ctx->base +
- GSI_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
- TERR("EE%2d HW_PARAM 0x%x\n", gsi_ctx->per.ee, val);
+ if (gsi_ctx->per.ver == GSI_VER_1_0) {
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM 0x%x\n", gsi_ctx->per.ee, val);
+ } else if (gsi_ctx->per.ver == GSI_VER_1_2) {
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
+ } else if (gsi_ctx->per.ver == GSI_VER_1_3) {
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_0 0x%x\n", gsi_ctx->per.ee, val);
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_1 0x%x\n", gsi_ctx->per.ee, val);
+ val = gsi_readl(gsi_ctx->base +
+ GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(gsi_ctx->per.ee));
+ TERR("EE%2d HW_PARAM_2 0x%x\n", gsi_ctx->per.ee, val);
+ } else {
+ WARN_ON(1);
+ }
val = gsi_readl(gsi_ctx->base +
GSI_EE_n_GSI_SW_VERSION_OFFS(gsi_ctx->per.ee));
TERR("EE%2d SW_VERSION 0x%x\n", gsi_ctx->per.ee, val);
@@ -329,7 +350,7 @@ static ssize_t gsi_dump_map(struct file *file,
int i;
TERR("EVT bitmap 0x%lx\n", gsi_ctx->evt_bmap);
- for (i = 0; i < GSI_MAX_CHAN; i++) {
+ for (i = 0; i < gsi_ctx->max_ch; i++) {
ctx = &gsi_ctx->chan[i];
if (ctx->allocated) {
@@ -402,8 +423,8 @@ static ssize_t gsi_dump_stats(struct file *file,
if (ch_id == -1) {
min = 0;
- max = GSI_MAX_CHAN;
- } else if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ max = gsi_ctx->max_ch;
+ } else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
} else {
@@ -464,7 +485,7 @@ static ssize_t gsi_enable_dp_stats(struct file *file,
if (kstrtos32(dbg_buff + 1, 0, &ch_id))
goto error;
- if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
}
@@ -540,7 +561,7 @@ static ssize_t gsi_set_max_elem_dp_stats(struct file *file,
/* get */
if (kstrtou32(dbg_buff, 0, &ch_id))
goto error;
- if (ch_id >= GSI_MAX_CHAN)
+ if (ch_id >= gsi_ctx->max_ch)
goto error;
PRT_STAT("ch %d: max_re_expected=%d\n", ch_id,
gsi_ctx->chan[ch_id].props.max_re_expected);
@@ -553,7 +574,7 @@ static ssize_t gsi_set_max_elem_dp_stats(struct file *file,
TDBG("ch_id=%u max_elem=%u\n", ch_id, max_elem);
- if (ch_id >= GSI_MAX_CHAN) {
+ if (ch_id >= gsi_ctx->max_ch) {
TERR("invalid chan id %u\n", ch_id);
goto error;
}
@@ -572,7 +593,7 @@ static void gsi_wq_print_dp_stats(struct work_struct *work)
{
int ch_id;
- for (ch_id = 0; ch_id < GSI_MAX_CHAN; ch_id++) {
+ for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
if (gsi_ctx->chan[ch_id].print_dp_stats)
gsi_dump_ch_stats(&gsi_ctx->chan[ch_id]);
}
@@ -618,7 +639,7 @@ static void gsi_wq_update_dp_stats(struct work_struct *work)
{
int ch_id;
- for (ch_id = 0; ch_id < GSI_MAX_CHAN; ch_id++) {
+ for (ch_id = 0; ch_id < gsi_ctx->max_ch; ch_id++) {
if (gsi_ctx->chan[ch_id].allocated &&
gsi_ctx->chan[ch_id].props.prot != GSI_CHAN_PROT_GPI &&
gsi_ctx->chan[ch_id].enable_dp_stats)
@@ -649,8 +670,8 @@ static ssize_t gsi_rst_stats(struct file *file,
if (ch_id == -1) {
min = 0;
- max = GSI_MAX_CHAN;
- } else if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ max = gsi_ctx->max_ch;
+ } else if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
} else {
@@ -691,7 +712,7 @@ static ssize_t gsi_print_dp_stats(struct file *file,
if (kstrtos32(dbg_buff + 1, 0, &ch_id))
goto error;
- if (ch_id < 0 || ch_id >= GSI_MAX_CHAN ||
+ if (ch_id < 0 || ch_id >= gsi_ctx->max_ch ||
!gsi_ctx->chan[ch_id].allocated) {
goto error;
}
diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h
index 36a74105b490..fa1e84896f73 100644
--- a/drivers/platform/msm/gsi/gsi_reg.h
+++ b/drivers/platform/msm/gsi/gsi_reg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -17,6 +17,10 @@
#define GSI_GSI_CFG_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000000)
#define GSI_GSI_CFG_RMSK 0xf
+#define GSI_GSI_CFG_BP_MTRIX_DISABLE_BMSK 0x20
+#define GSI_GSI_CFG_BP_MTRIX_DISABLE_SHFT 0x5
+#define GSI_GSI_CFG_GSI_PWR_CLPS_BMSK 0x10
+#define GSI_GSI_CFG_GSI_PWR_CLPS_SHFT 0x4
#define GSI_GSI_CFG_UC_IS_MCS_BMSK 0x8
#define GSI_GSI_CFG_UC_IS_MCS_SHFT 0x3
#define GSI_GSI_CFG_DOUBLE_MCS_CLK_FREQ_BMSK 0x4
@@ -26,6 +30,11 @@
#define GSI_GSI_CFG_GSI_ENABLE_BMSK 0x1
#define GSI_GSI_CFG_GSI_ENABLE_SHFT 0x0
+#define GSI_GSI_MCS_CFG_OFFS \
+ (GSI_GSI_REG_BASE_OFFS + 0x0000B000)
+#define GSI_GSI_MCS_CFG_MCS_ENABLE_BMSK 0x1
+#define GSI_GSI_MCS_CFG_MCS_ENABLE_SHFT 0x0
+
#define GSI_GSI_MANAGER_MCS_CODE_VER_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000008)
#define GSI_GSI_MANAGER_MCS_CODE_VER_RMSK 0xffffffff
@@ -99,8 +108,20 @@
#define GSI_GSI_CGC_CTRL_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000060)
#define GSI_GSI_CGC_CTRL_RMSK 0x3f
-#define GSI_GSI_CGC_CTRL_REGION_6_DEBUG_CNTRS_EN_BMSK 0x20
-#define GSI_GSI_CGC_CTRL_REGION_6_DEBUG_CNTRS_EN_SHFT 0x5
+#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_BMSK 0x800
+#define GSI_GSI_CGC_CTRL_REGION_12_HW_CGC_EN_SHFT 0xb
+#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_BMSK0x400
+#define GSI_GSI_CGC_CTRL_REGION_11_HW_CGC_EN_SHFT 0xa
+#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_BMSK0x200
+#define GSI_GSI_CGC_CTRL_REGION_10_HW_CGC_EN_SHFT 0x9
+#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_BMSK 0x100
+#define GSI_GSI_CGC_CTRL_REGION_9_HW_CGC_EN_SHFT 0x8
+#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_BMSK 0x80
+#define GSI_GSI_CGC_CTRL_REGION_8_HW_CGC_EN_SHFT 0x7
+#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_BMSK 0x40
+#define GSI_GSI_CGC_CTRL_REGION_7_HW_CGC_EN_SHFT 0x6
+#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_BMSK 0x20
+#define GSI_GSI_CGC_CTRL_REGION_6_HW_CGC_EN_SHFT 0x5
#define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_BMSK 0x10
#define GSI_GSI_CGC_CTRL_REGION_5_HW_CGC_EN_SHFT 0x4
#define GSI_GSI_CGC_CTRL_REGION_4_HW_CGC_EN_BMSK 0x8
@@ -619,23 +640,23 @@
#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_BMSK 0xfff
#define GSI_GSI_IRAM_PTR_EVENT_GEN_COMP_IRAM_PTR_SHFT 0x0
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_OFFS \
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000430)
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_RMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_IRAM_PTR_BMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_DESC_PROC_COMP_IRAM_PTR_SHFT 0x0
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_0_IRAM_PTR_SHFT 0x0
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_OFFS \
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000434)
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_RMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_IRAM_PTR_BMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_RESET_COMP_IRAM_PTR_SHFT 0x0
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_2_IRAM_PTR_SHFT 0x0
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_OFFS \
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00000438)
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_RMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_IRAM_PTR_BMSK 0xfff
-#define GSI_GSI_IRAM_PTR_IPA_IF_STOP_COMP_IRAM_PTR_SHFT 0x0
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_RMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_BMSK 0xfff
+#define GSI_GSI_IRAM_PTR_PERIPH_IF_TLV_IN_1_IRAM_PTR_SHFT 0x0
#define GSI_GSI_IRAM_PTR_TIMER_EXPIRED_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x0000043c)
@@ -701,7 +722,9 @@
#define GSI_GSI_DEBUG_BUSY_REG_OFFS \
(GSI_GSI_REG_BASE_OFFS + 0x00001010)
-#define GSI_GSI_DEBUG_BUSY_REG_RMSK 0x7f
+#define GSI_GSI_DEBUG_BUSY_REG_RMSK 0xff
+#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_BMSK 0x80
+#define GSI_GSI_DEBUG_BUSY_REG_REE_PWR_CLPS_BUSY_SHFT 0x7
#define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_BMSK 0x40
#define GSI_GSI_DEBUG_BUSY_REG_INT_ENG_BUSY_SHFT 0x6
#define GSI_GSI_DEBUG_BUSY_REG_EV_ENG_BUSY_BMSK 0x20
@@ -1345,22 +1368,150 @@
#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0xffffffff
#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0
-#define GSI_EE_n_GSI_HW_PARAM_OFFS(n) \
+/* v1.0 */
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_RMSK 0x7fffffff
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_MAXn 3
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_0_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0
+
+/* v1.2 */
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n))
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_MAXn 2
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0
+
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_MAXn 2
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \
+ 0x80000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \
+ 0x40000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff
+#define GSI_V1_2_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0
+
+/* v1.3 */
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f038 + 0x4000 * (n))
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_RMSK 0xffffffff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_BMSK 0x80000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_USE_AXI_M_SHFT 0x1f
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_BMSK 0x7c000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_SEC_GRP_SHFT 0x1a
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_BMSK 0x3e00000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_PERIPH_CONF_ADDR_BUS_W_SHFT 0x15
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_BMSK 0x1f0000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_NUM_EES_SHFT 0x10
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_BMSK 0xff00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_CH_NUM_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_BMSK 0xff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_0_GSI_EV_CH_NUM_SHFT 0x0
+
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_OFFS(n) \
+ (GSI_GSI_REG_BASE_OFFS + 0x0001f03c + 0x4000 * (n))
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_RMSK 0xffffffff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_BMSK \
+ 0x80000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_2_EN_SHFT 0x1f
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_BMSK \
+ 0x40000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_BLK_INT_ACCESS_REGION_1_EN_SHFT 0x1e
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_BMSK 0x20000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SIMPLE_RD_WR_SHFT 0x1d
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_BMSK 0x10000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_ESCAPE_BUF_ONLY_SHFT 0x1c
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_BMSK 0x8000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_UC_IF_SHFT 0x1b
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_BMSK 0x4000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_DB_ENG_SHFT 0x1a
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_BMSK 0x2000000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_BP_MTRIX_SHFT 0x19
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_BMSK 0x1f00000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_TIMERS_SHFT 0x14
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_BMSK 0x80000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_USE_XPU_SHFT 0x13
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_BMSK 0x40000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_QRIB_EN_SHFT 0x12
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_BMSK 0x20000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_VMIDACR_EN_SHFT 0x11
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_BMSK 0x10000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_SEC_EN_SHFT 0x10
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_BMSK 0xf000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NONSEC_EN_SHFT 0xc
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_BMSK 0xf00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_NUM_QAD_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_BMSK 0xff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_1_GSI_M_DATA_BUS_W_SHFT 0x0
+
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_OFFS(n) \
(GSI_GSI_REG_BASE_OFFS + 0x0001f040 + 0x4000 * (n))
-#define GSI_EE_n_GSI_HW_PARAM_RMSK 0x7fffffff
-#define GSI_EE_n_GSI_HW_PARAM_MAXn 3
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_BMSK 0x7c000000
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_SEC_GRP_SHFT 0x1a
-#define GSI_EE_n_GSI_HW_PARAM_USE_AXI_M_BMSK 0x2000000
-#define GSI_EE_n_GSI_HW_PARAM_USE_AXI_M_SHFT 0x19
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_BMSK 0x1f00000
-#define GSI_EE_n_GSI_HW_PARAM_PERIPH_CONF_ADDR_BUS_W_SHFT 0x14
-#define GSI_EE_n_GSI_HW_PARAM_NUM_EES_BMSK 0xf0000
-#define GSI_EE_n_GSI_HW_PARAM_NUM_EES_SHFT 0x10
-#define GSI_EE_n_GSI_HW_PARAM_GSI_CH_NUM_BMSK 0xff00
-#define GSI_EE_n_GSI_HW_PARAM_GSI_CH_NUM_SHFT 0x8
-#define GSI_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_BMSK 0xff
-#define GSI_EE_n_GSI_HW_PARAM_GSI_EV_CH_NUM_SHFT 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_RMSK 0x7fff
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_MAXn 2
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_BMSK 0x4000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_FULL_LOGIC_SHFT 0xe
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_BMSK 0x2000
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_CH_PEND_TRANSLATE_SHFT 0xd
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_BMSK 0x1f00
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_EV_PER_EE_SHFT 0x8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_BMSK 0xf8
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_NUM_CH_PER_EE_SHFT 0x3
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_BMSK 0x7
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_SHFT 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_ONE_KB_FVAL 0x0
+#define GSI_V1_3_EE_n_GSI_HW_PARAM_2_GSI_IRAM_SIZE_TWO_KB_FVAL 0x1
#define GSI_EE_n_GSI_SW_VERSION_OFFS(n) \
(GSI_GSI_REG_BASE_OFFS + 0x0001f044 + 0x4000 * (n))
@@ -1662,7 +1813,7 @@
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_RMSK 0xffffffff
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_MAXn 3
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_BMSK \
- 0xffffffff
+ 0x00003fff
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_MSK_GSI_CH_BIT_MAP_MSK_SHFT 0x0
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_OFFS(n) \
@@ -1670,7 +1821,7 @@
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_RMSK 0xffffffff
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_MAXn 3
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_BMSK \
- 0xffffffff
+ 0x000003ff
#define GSI_INTER_EE_n_SRC_EV_CH_IRQ_MSK_EV_CH_BIT_MAP_MSK_SHFT 0x0
#define GSI_INTER_EE_n_SRC_GSI_CH_IRQ_CLR_OFFS(n) \
diff --git a/drivers/platform/msm/ipa/ipa_api.c b/drivers/platform/msm/ipa/ipa_api.c
index 05ce3969a5c7..75b193def36e 100644
--- a/drivers/platform/msm/ipa/ipa_api.c
+++ b/drivers/platform/msm/ipa/ipa_api.c
@@ -26,7 +26,8 @@
#define IPA_API_DISPATCH_RETURN(api, p...) \
do { \
if (!ipa_api_ctrl) { \
- pr_err("IPA HW is not supported on this target\n"); \
+ pr_err("%s:%d IPA HW is not supported\n", \
+ __func__, __LINE__); \
ret = -EPERM; \
} \
else { \
@@ -44,7 +45,8 @@
#define IPA_API_DISPATCH(api, p...) \
do { \
if (!ipa_api_ctrl) \
- pr_err("IPA HW is not supported on this target\n"); \
+ pr_err("%s:%d IPA HW is not supported\n", \
+ __func__, __LINE__); \
else { \
if (ipa_api_ctrl->api) { \
ipa_api_ctrl->api(p); \
@@ -59,7 +61,8 @@
#define IPA_API_DISPATCH_RETURN_PTR(api, p...) \
do { \
if (!ipa_api_ctrl) { \
- pr_err("IPA HW is not supported on this target\n"); \
+ pr_err("%s:%d IPA HW is not supported\n", \
+ __func__, __LINE__); \
ret = NULL; \
} \
else { \
@@ -77,7 +80,8 @@
#define IPA_API_DISPATCH_RETURN_BOOL(api, p...) \
do { \
if (!ipa_api_ctrl) { \
- pr_err("IPA HW is not supported on this target\n"); \
+ pr_err("%s:%d IPA HW is not supported\n", \
+ __func__, __LINE__); \
ret = false; \
} \
else { \
diff --git a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
index 838b78c1934d..d18308344431 100644
--- a/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
+++ b/drivers/platform/msm/ipa/ipa_clients/ipa_usb.c
@@ -2034,7 +2034,7 @@ static void ipa_usb_debugfs_init(void)
ipa3_usb_ctx->dent = debugfs_create_dir("ipa_usb", 0);
if (IS_ERR(ipa3_usb_ctx->dent)) {
- IPA_USB_ERR("fail to create folder in debug_fs.\n");
+ pr_err("fail to create folder in debug_fs.\n");
return;
}
@@ -2043,7 +2043,7 @@ static void ipa_usb_debugfs_init(void)
&ipa3_ipa_usb_ops);
if (!ipa3_usb_ctx->dfile_state_info ||
IS_ERR(ipa3_usb_ctx->dfile_state_info)) {
- IPA_USB_ERR("failed to create file for state_info\n");
+ pr_err("failed to create file for state_info\n");
goto fail;
}
@@ -2644,11 +2644,11 @@ static int __init ipa3_usb_init(void)
unsigned long flags;
int res;
- IPA_USB_DBG("entry\n");
+ pr_debug("entry\n");
ipa3_usb_ctx = kzalloc(sizeof(struct ipa3_usb_context), GFP_KERNEL);
if (ipa3_usb_ctx == NULL) {
- IPA_USB_ERR("failed to allocate memory\n");
- IPA_USB_ERR(":ipa_usb init failed\n");
+ pr_err("failed to allocate memory\n");
+ pr_err(":ipa_usb init failed\n");
return -EFAULT;
}
memset(ipa3_usb_ctx, 0, sizeof(struct ipa3_usb_context));
@@ -2680,19 +2680,19 @@ static int __init ipa3_usb_init(void)
ipa3_usb_ctx->wq = create_singlethread_workqueue("ipa_usb_wq");
if (!ipa3_usb_ctx->wq) {
- IPA_USB_ERR("failed to create workqueue\n");
+ pr_err("failed to create workqueue\n");
res = -EFAULT;
goto ipa_usb_workqueue_fail;
}
ipa_usb_debugfs_init();
- IPA_USB_INFO("exit: IPA_USB init success!\n");
+ pr_info("exit: IPA_USB init success!\n");
return 0;
ipa_usb_workqueue_fail:
- IPA_USB_ERR(":init failed (%d)\n", -res);
+ pr_err(":init failed (%d)\n", -res);
kfree(ipa3_usb_ctx);
return res;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa.c b/drivers/platform/msm/ipa/ipa_v2/ipa.c
index 9cb0b1f3c379..804c89dc9533 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa.c
@@ -207,7 +207,6 @@ struct platform_device *ipa_pdev;
static struct {
bool present;
bool arm_smmu;
- bool disable_htw;
bool fast_map;
bool s1_bypass;
u32 ipa_base;
@@ -4313,9 +4312,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_drv_res->wan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
ipa_drv_res->lan_rx_ring_size = IPA_GENERIC_RX_POOL_SZ;
- smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
- "qcom,smmu-disable-htw");
-
/* Get IPA HW Version */
result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
&ipa_drv_res->ipa_hw_type);
@@ -4502,7 +4498,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
static int ipa_smmu_wlan_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa2_get_wlan_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -4519,17 +4514,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
}
cb->valid = true;
- if (smmu_info.disable_htw) {
- ret = iommu_domain_set_attr(cb->iommu,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret) {
- IPAERR("couldn't disable coherent HTW\n");
- cb->valid = false;
- return -EIO;
- }
- }
-
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->iommu,
DOMAIN_ATTR_S1_BYPASS,
@@ -4589,7 +4573,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
static int ipa_smmu_uc_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa2_get_uc_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int ret;
int fast = 1;
@@ -4628,18 +4611,6 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- }
-
IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
@@ -4694,7 +4665,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa2_get_smmu_ctx();
int result;
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -4731,18 +4701,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- IPADBG("SMMU disable HTW\n");
- }
-
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_S1_BYPASS,
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
index 0eab77d27760..50c387ec785d 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_debugfs.c
@@ -1420,6 +1420,7 @@ static ssize_t ipa_read_nat4(struct file *file,
u16 enable, tbl_entry, flag;
u32 no_entrys = 0;
+ mutex_lock(&ipa_ctx->nat_mem.lock);
value = ipa_ctx->nat_mem.public_ip_addr;
pr_err(
"Table IP Address:%d.%d.%d.%d\n",
@@ -1573,6 +1574,7 @@ static ssize_t ipa_read_nat4(struct file *file,
}
}
pr_err("Current No. Nat Entries: %d\n", no_entrys);
+ mutex_unlock(&ipa_ctx->nat_mem.lock);
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
index 16f50030b960..3c2a6d4620ba 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_dp.c
@@ -3152,23 +3152,23 @@ static int ipa_assign_policy_v2(struct ipa_sys_connect_params *in,
} else if (in->client ==
IPA_CLIENT_APPS_WAN_CONS) {
sys->pyld_hdlr = ipa_wan_rx_pyld_hdlr;
- if (in->recycle_enabled) {
+ sys->rx_pool_sz = ipa_ctx->wan_rx_ring_size;
+ if (nr_cpu_ids > 1) {
sys->repl_hdlr =
- ipa_replenish_rx_cache_recycle;
- sys->rx_pool_sz =
- IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ ipa_fast_replenish_rx_cache;
+ sys->repl_trig_thresh =
+ sys->rx_pool_sz / 8;
} else {
- if (nr_cpu_ids > 1) {
- sys->repl_hdlr =
- ipa_fast_replenish_rx_cache;
- sys->repl_trig_thresh =
- sys->rx_pool_sz / 8;
- } else {
+ sys->repl_hdlr =
+ ipa_replenish_rx_cache;
+ }
+ if (in->napi_enabled) {
+ sys->rx_pool_sz =
+ IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ if (in->recycle_enabled) {
sys->repl_hdlr =
- ipa_replenish_rx_cache;
+ ipa_replenish_rx_cache_recycle;
}
- sys->rx_pool_sz =
- ipa_ctx->wan_rx_ring_size;
}
sys->ep->wakelock_client =
IPA_WAKELOCK_REF_CLIENT_WAN_RX;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
index 137a43a1217b..3f20941155a5 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -493,6 +493,8 @@ static int qmi_init_modem_send_sync_msg(void)
resp_desc.ei_array = ipa_init_modem_driver_resp_msg_data_v01_ei;
pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n");
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp),
QMI_SEND_REQ_TIMEOUT_MS);
@@ -538,7 +540,8 @@ int qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
QMI_IPA_INSTALL_FILTER_RULE_RESP_MAX_MSG_LEN_V01;
resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01;
resp_desc.ei_array = ipa_install_fltr_rule_resp_msg_data_v01_ei;
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
req,
sizeof(struct ipa_install_fltr_rule_req_msg_v01),
@@ -574,7 +577,8 @@ int qmi_enable_force_clear_datapath_send(
resp_desc.msg_id = QMI_IPA_ENABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
resp_desc.ei_array =
ipa_enable_force_clear_datapath_resp_msg_data_v01_ei;
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -618,7 +622,8 @@ int qmi_disable_force_clear_datapath_send(
resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
resp_desc.ei_array =
ipa_disable_force_clear_datapath_resp_msg_data_v01_ei;
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -688,7 +693,8 @@ int qmi_filter_notify_send(struct ipa_fltr_installed_notif_req_msg_v01 *req)
QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_MAX_MSG_LEN_V01;
resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01;
resp_desc.ei_array = ipa_fltr_installed_notif_resp_msg_data_v01_ei;
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -1089,7 +1095,8 @@ int ipa_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
resp_desc.ei_array = ipa_get_data_stats_resp_msg_data_v01_ei;
IPAWANDBG("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_get_data_stats_req_msg_v01),
&resp_desc, resp,
@@ -1118,7 +1125,8 @@ int ipa_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
resp_desc.ei_array = ipa_get_apn_data_stats_resp_msg_data_v01_ei;
IPAWANDBG("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_get_apn_data_stats_req_msg_v01),
&resp_desc, resp,
@@ -1150,7 +1158,8 @@ int ipa_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req)
resp_desc.ei_array = ipa_set_data_usage_quota_resp_msg_data_v01_ei;
IPAWANDBG("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_set_data_usage_quota_req_msg_v01),
&resp_desc, &resp, sizeof(resp),
@@ -1184,7 +1193,8 @@ int ipa_qmi_stop_data_qouta(void)
resp_desc.ei_array = ipa_stop_data_usage_quota_resp_msg_data_v01_ei;
IPAWANDBG("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp),
QMI_SEND_STATS_REQ_TIMEOUT_MS);
diff --git a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
index ebb93e246048..96003d7a16a0 100644
--- a/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v2/rmnet_ipa.c
@@ -1259,11 +1259,13 @@ static int handle_ingress_format(struct net_device *dev,
ipa_to_apps_ep_cfg.ipa_ep_cfg.aggr.aggr_pkt_limit =
in->u.ingress_format.agg_count;
- ipa_to_apps_ep_cfg.recycle_enabled = true;
- ep_cfg = (struct rmnet_phys_ep_conf_s *)
- rcu_dereference(dev->rx_handler_data);
- ep_cfg->recycle = ipa_recycle_wan_skb;
- pr_info("Wan Recycle Enabled\n");
+ if (ipa_rmnet_res.ipa_napi_enable) {
+ ipa_to_apps_ep_cfg.recycle_enabled = true;
+ ep_cfg = (struct rmnet_phys_ep_conf_s *)
+ rcu_dereference(dev->rx_handler_data);
+ ep_cfg->recycle = ipa_recycle_wan_skb;
+ pr_info("Wan Recycle Enabled\n");
+ }
}
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 24fbc5c738d8..ab62dbcddd22 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -251,7 +251,6 @@ struct platform_device *ipa3_pdev;
static struct {
bool present;
bool arm_smmu;
- bool disable_htw;
bool fast_map;
bool s1_bypass;
bool use_64_bit_dma_mask;
@@ -3791,6 +3790,32 @@ static int ipa3_gsi_pre_fw_load_init(void)
return 0;
}
+static enum gsi_ver ipa3_get_gsi_ver(enum ipa_hw_type ipa_hw_type)
+{
+ enum gsi_ver gsi_ver;
+
+ switch (ipa_hw_type) {
+ case IPA_HW_v3_0:
+ case IPA_HW_v3_1:
+ gsi_ver = GSI_VER_1_0;
+ break;
+ case IPA_HW_v3_5:
+ gsi_ver = GSI_VER_1_2;
+ break;
+ case IPA_HW_v3_5_1:
+ gsi_ver = GSI_VER_1_3;
+ break;
+ default:
+ IPAERR("No GSI version for ipa type %d\n", ipa_hw_type);
+ WARN_ON(1);
+ gsi_ver = GSI_VER_ERR;
+ }
+
+ IPADBG("GSI version %d\n", gsi_ver);
+
+ return gsi_ver;
+}
+
/**
* ipa3_post_init() - Initialize the IPA Driver (Part II).
* This part contains all initialization which requires interaction with
@@ -3820,6 +3845,7 @@ static int ipa3_post_init(const struct ipa3_plat_drv_res *resource_p,
if (ipa3_ctx->transport_prototype == IPA_TRANSPORT_TYPE_GSI) {
memset(&gsi_props, 0, sizeof(gsi_props));
+ gsi_props.ver = ipa3_get_gsi_ver(resource_p->ipa_hw_type);
gsi_props.ee = resource_p->ee;
gsi_props.intr = GSI_INTR_IRQ;
gsi_props.irq = resource_p->transport_irq;
@@ -4695,9 +4721,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
ipa_drv_res->ipa_tz_unlock_reg_num = 0;
ipa_drv_res->ipa_tz_unlock_reg = NULL;
- smmu_info.disable_htw = of_property_read_bool(pdev->dev.of_node,
- "qcom,smmu-disable-htw");
-
/* Get IPA HW Version */
result = of_property_read_u32(pdev->dev.of_node, "qcom,ipa-hw-ver",
&ipa_drv_res->ipa_hw_type);
@@ -4953,7 +4976,6 @@ static int get_ipa_dts_configuration(struct platform_device *pdev,
static int ipa_smmu_wlan_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa3_get_wlan_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -4973,17 +4995,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
}
cb->valid = true;
- if (smmu_info.disable_htw) {
- ret = iommu_domain_set_attr(cb->iommu,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret) {
- IPAERR("couldn't disable coherent HTW\n");
- cb->valid = false;
- return -EIO;
- }
- }
-
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->iommu,
DOMAIN_ATTR_S1_BYPASS,
@@ -5056,7 +5067,6 @@ static int ipa_smmu_wlan_cb_probe(struct device *dev)
static int ipa_smmu_uc_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa3_get_uc_smmu_ctx();
- int disable_htw = 1;
int atomic_ctx = 1;
int bypass = 1;
int fast = 1;
@@ -5102,18 +5112,6 @@ static int ipa_smmu_uc_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- IPADBG("UC CB PROBE sub pdev=%p disable htw\n", dev);
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- }
-
IPADBG("UC CB PROBE sub pdev=%p set attribute\n", dev);
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
@@ -5168,7 +5166,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
{
struct ipa_smmu_cb_ctx *cb = ipa3_get_smmu_ctx();
int result;
- int disable_htw = 1;
int atomic_ctx = 1;
int fast = 1;
int bypass = 1;
@@ -5216,17 +5213,6 @@ static int ipa_smmu_ap_cb_probe(struct device *dev)
IPADBG("SMMU mapping created\n");
cb->valid = true;
- if (smmu_info.disable_htw) {
- if (iommu_domain_set_attr(cb->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw)) {
- IPAERR("couldn't disable coherent HTW\n");
- arm_iommu_release_mapping(cb->mapping);
- cb->valid = false;
- return -EIO;
- }
- IPADBG("SMMU disable HTW\n");
- }
if (smmu_info.s1_bypass) {
if (iommu_domain_set_attr(cb->mapping->domain,
DOMAIN_ATTR_S1_BYPASS,
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
index 3915f652d87b..25e5e3b74f26 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_debugfs.c
@@ -1478,6 +1478,7 @@ static ssize_t ipa3_read_nat4(struct file *file,
u16 enable, tbl_entry, flag;
u32 no_entrys = 0;
+ mutex_lock(&ipa3_ctx->nat_mem.lock);
value = ipa3_ctx->nat_mem.public_ip_addr;
pr_err(
"Table IP Address:%d.%d.%d.%d\n",
@@ -1631,6 +1632,7 @@ static ssize_t ipa3_read_nat4(struct file *file,
}
}
pr_err("Current No. Nat Entries: %d\n", no_entrys);
+ mutex_unlock(&ipa3_ctx->nat_mem.lock);
return 0;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 643e40402499..94e8bba1fe01 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -3180,22 +3180,20 @@ static int ipa3_assign_policy(struct ipa_sys_connect_params *in,
IPA_CLIENT_APPS_WAN_CONS) {
sys->pyld_hdlr = ipa3_wan_rx_pyld_hdlr;
sys->free_rx_wrapper = ipa3_free_rx_wrapper;
- if (in->recycle_enabled) {
+ sys->rx_pool_sz = ipa3_ctx->wan_rx_ring_size;
+ if (nr_cpu_ids > 1) {
sys->repl_hdlr =
- ipa3_replenish_rx_cache_recycle;
- sys->rx_pool_sz =
- IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ ipa3_fast_replenish_rx_cache;
} else {
- if (nr_cpu_ids > 1) {
- sys->repl_hdlr =
- ipa3_fast_replenish_rx_cache;
- } else {
- sys->repl_hdlr =
- ipa3_replenish_rx_cache;
- }
- sys->rx_pool_sz =
- ipa3_ctx->wan_rx_ring_size;
+ sys->repl_hdlr =
+ ipa3_replenish_rx_cache;
}
+ if (in->napi_enabled)
+ sys->rx_pool_sz =
+ IPA_WAN_NAPI_CONS_RX_POOL_SZ;
+ if (in->napi_enabled && in->recycle_enabled)
+ sys->repl_hdlr =
+ ipa3_replenish_rx_cache_recycle;
in->ipa_ep_cfg.aggr.aggr_sw_eof_active
= true;
if (ipa3_ctx->
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
index 6f86448319db..8e85822d9719 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_i.h
@@ -481,7 +481,7 @@ struct ipa_gsi_ep_mem_info {
struct ipa3_status_stats {
struct ipahal_pkt_status status[IPA_MAX_STATUS_STAT_NUM];
- int curr;
+ unsigned int curr;
};
/**
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
index bf8a5ade04bd..a6b075583162 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_qmi_service.c
@@ -582,6 +582,8 @@ static int ipa3_qmi_init_modem_send_sync_msg(void)
resp_desc.ei_array = ipa3_init_modem_driver_resp_msg_data_v01_ei;
pr_info("Sending QMI_IPA_INIT_MODEM_DRIVER_REQ_V01\n");
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp),
QMI_SEND_REQ_TIMEOUT_MS);
@@ -623,6 +625,8 @@ int ipa3_qmi_filter_request_send(struct ipa_install_fltr_rule_req_msg_v01 *req)
resp_desc.msg_id = QMI_IPA_INSTALL_FILTER_RULE_RESP_V01;
resp_desc.ei_array = ipa3_install_fltr_rule_resp_msg_data_v01_ei;
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc,
req,
sizeof(struct ipa_install_fltr_rule_req_msg_v01),
@@ -703,6 +707,8 @@ int ipa3_qmi_enable_force_clear_datapath_send(
resp_desc.ei_array =
ipa3_enable_force_clear_datapath_resp_msg_data_v01_ei;
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -746,7 +752,8 @@ int ipa3_qmi_disable_force_clear_datapath_send(
resp_desc.msg_id = QMI_IPA_DISABLE_FORCE_CLEAR_DATAPATH_RESP_V01;
resp_desc.ei_array =
ipa3_disable_force_clear_datapath_resp_msg_data_v01_ei;
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -803,6 +810,8 @@ int ipa3_qmi_filter_notify_send(
resp_desc.msg_id = QMI_IPA_FILTER_INSTALLED_NOTIF_RESP_V01;
resp_desc.ei_array = ipa3_fltr_installed_notif_resp_msg_data_v01_ei;
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt,
&req_desc,
req,
@@ -1213,6 +1222,8 @@ int ipa3_qmi_get_data_stats(struct ipa_get_data_stats_req_msg_v01 *req,
IPAWANDBG_LOW("Sending QMI_IPA_GET_DATA_STATS_REQ_V01\n");
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_get_data_stats_req_msg_v01),
&resp_desc, resp,
@@ -1242,6 +1253,8 @@ int ipa3_qmi_get_network_stats(struct ipa_get_apn_data_stats_req_msg_v01 *req,
IPAWANDBG_LOW("Sending QMI_IPA_GET_APN_DATA_STATS_REQ_V01\n");
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_get_apn_data_stats_req_msg_v01),
&resp_desc, resp,
@@ -1273,7 +1286,8 @@ int ipa3_qmi_set_data_quota(struct ipa_set_data_usage_quota_req_msg_v01 *req)
resp_desc.ei_array = ipa3_set_data_usage_quota_resp_msg_data_v01_ei;
IPAWANDBG_LOW("Sending QMI_IPA_SET_DATA_USAGE_QUOTA_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, req,
sizeof(struct ipa_set_data_usage_quota_req_msg_v01),
&resp_desc, &resp, sizeof(resp),
@@ -1307,7 +1321,8 @@ int ipa3_qmi_stop_data_qouta(void)
resp_desc.ei_array = ipa3_stop_data_usage_quota_resp_msg_data_v01_ei;
IPAWANDBG_LOW("Sending QMI_IPA_STOP_DATA_USAGE_QUOTA_REQ_V01\n");
-
+ if (unlikely(!ipa_q6_clnt))
+ return -ETIMEDOUT;
rc = qmi_send_req_wait(ipa_q6_clnt, &req_desc, &req, sizeof(req),
&resp_desc, &resp, sizeof(resp),
QMI_SEND_STATS_REQ_TIMEOUT_MS);
diff --git a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
index a2fef45cc55f..f134852e046e 100644
--- a/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/rmnet_ipa.c
@@ -1271,11 +1271,13 @@ static int handle3_ingress_format(struct net_device *dev,
ipa_wan_ep_cfg->ipa_ep_cfg.aggr.aggr_pkt_limit =
in->u.ingress_format.agg_count;
- ipa_wan_ep_cfg->recycle_enabled = true;
- ep_cfg = (struct rmnet_phys_ep_conf_s *)
- rcu_dereference(dev->rx_handler_data);
- ep_cfg->recycle = ipa_recycle_wan_skb;
- pr_info("Wan Recycle Enabled\n");
+ if (ipa_wan_ep_cfg->napi_enabled) {
+ ipa_wan_ep_cfg->recycle_enabled = true;
+ ep_cfg = (struct rmnet_phys_ep_conf_s *)
+ rcu_dereference(dev->rx_handler_data);
+ ep_cfg->recycle = ipa_recycle_wan_skb;
+ pr_info("Wan Recycle Enabled\n");
+ }
}
}
@@ -1969,9 +1971,9 @@ static int get_ipa_rmnet_dts_configuration(struct platform_device *pdev,
ipa_rmnet_drv_res->ipa_advertise_sg_support ? "True" : "False");
ipa_rmnet_drv_res->ipa_napi_enable =
- of_property_read_bool(pdev->dev.of_node,
- "qcom,napi");
- pr_info("IPA napi = %s\n",
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,ipa-napi-enable");
+ pr_info("IPA Napi Enable = %s\n",
ipa_rmnet_drv_res->ipa_napi_enable ? "True" : "False");
return 0;
}
diff --git a/drivers/platform/msm/msm_11ad/msm_11ad.c b/drivers/platform/msm/msm_11ad/msm_11ad.c
index 6d826590cabc..45fedfa72bda 100644
--- a/drivers/platform/msm/msm_11ad/msm_11ad.c
+++ b/drivers/platform/msm/msm_11ad/msm_11ad.c
@@ -569,7 +569,6 @@ err_disable_vregs:
static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
{
- int disable_htw = 1;
int atomic_ctx = 1;
int rc;
int bypass_enable = 1;
@@ -587,17 +586,6 @@ static int msm_11ad_smmu_init(struct msm11ad_ctx *ctx)
dev_info(ctx->dev, "IOMMU mapping created: %p\n", ctx->mapping);
rc = iommu_domain_set_attr(ctx->mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (rc) {
- /* This error can be ignored and not considered fatal,
- * but let the users know this happened
- */
- dev_err(ctx->dev, "Warning: disable coherent HTW failed (%d)\n",
- rc);
- }
-
- rc = iommu_domain_set_attr(ctx->mapping->domain,
DOMAIN_ATTR_ATOMIC,
&atomic_ctx);
if (rc) {
diff --git a/drivers/power/power_supply_sysfs.c b/drivers/power/power_supply_sysfs.c
index 545a1e684b25..8af1eb66c699 100644
--- a/drivers/power/power_supply_sysfs.c
+++ b/drivers/power/power_supply_sysfs.c
@@ -269,6 +269,7 @@ static struct device_attribute power_supply_attrs[] = {
POWER_SUPPLY_ATTR(pd_active),
POWER_SUPPLY_ATTR(charger_temp),
POWER_SUPPLY_ATTR(charger_temp_max),
+ POWER_SUPPLY_ATTR(parallel_disable),
/* Local extensions of type int64_t */
POWER_SUPPLY_ATTR(charge_counter_ext),
/* Properties of type `const char *' */
diff --git a/drivers/power/qcom-charger/fg-core.h b/drivers/power/qcom-charger/fg-core.h
index 515f31a44ce7..adc640c7afe1 100644
--- a/drivers/power/qcom-charger/fg-core.h
+++ b/drivers/power/qcom-charger/fg-core.h
@@ -23,6 +23,7 @@
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/mutex.h>
+#include <linux/power_supply.h>
#include <linux/regmap.h>
#include <linux/slab.h>
#include <linux/string_helpers.h>
@@ -38,6 +39,7 @@
pr_debug(fmt, ##__VA_ARGS__); \
} while (0)
+/* Awake votable reasons */
#define SRAM_READ "fg_sram_read"
#define SRAM_WRITE "fg_sram_write"
#define PROFILE_LOAD "fg_profile_load"
@@ -54,9 +56,14 @@
CHARS_PER_ITEM) + 1) \
#define FG_SRAM_ADDRESS_MAX 255
+#define PROFILE_LEN 224
+#define PROFILE_COMP_LEN 148
#define BUCKET_COUNT 8
#define BUCKET_SOC_PCT (256 / BUCKET_COUNT)
+#define KI_COEFF_MAX 62200
+#define KI_COEFF_SOC_LEVELS 3
+
/* Debug flag definitions */
enum fg_debug_flag {
FG_IRQ = BIT(0), /* Show interrupts */
@@ -66,6 +73,7 @@ enum fg_debug_flag {
FG_SRAM_READ = BIT(4), /* Show SRAM reads */
FG_BUS_WRITE = BIT(5), /* Show REGMAP writes */
FG_BUS_READ = BIT(6), /* Show REGMAP reads */
+ FG_CAP_LEARN = BIT(7), /* Show capacity learning */
};
/* SRAM access */
@@ -117,10 +125,15 @@ enum fg_sram_param_id {
FG_SRAM_OCV,
FG_SRAM_RSLOW,
FG_SRAM_ALG_FLAGS,
+ FG_SRAM_CC_SOC,
+ FG_SRAM_CC_SOC_SW,
+ FG_SRAM_ACT_BATT_CAP,
/* Entries below here are configurable during initialization */
FG_SRAM_CUTOFF_VOLT,
FG_SRAM_EMPTY_VOLT,
FG_SRAM_VBATT_LOW,
+ FG_SRAM_FLOAT_VOLT,
+ FG_SRAM_VBATT_FULL,
FG_SRAM_ESR_TIMER_DISCHG_MAX,
FG_SRAM_ESR_TIMER_DISCHG_INIT,
FG_SRAM_ESR_TIMER_CHG_MAX,
@@ -129,6 +142,8 @@ enum fg_sram_param_id {
FG_SRAM_CHG_TERM_CURR,
FG_SRAM_DELTA_SOC_THR,
FG_SRAM_RECHARGE_SOC_THR,
+ FG_SRAM_KI_COEFF_MED_DISCHG,
+ FG_SRAM_KI_COEFF_HI_DISCHG,
FG_SRAM_MAX,
};
@@ -165,6 +180,8 @@ struct fg_alg_flag {
/* DT parameters for FG device */
struct fg_dt_props {
+ bool force_load_profile;
+ bool hold_soc_while_full;
int cutoff_volt_mv;
int empty_volt_mv;
int vbatt_low_thr_mv;
@@ -177,6 +194,18 @@ struct fg_dt_props {
int esr_timer_charging;
int esr_timer_awake;
int esr_timer_asleep;
+ int cl_start_soc;
+ int cl_max_temp;
+ int cl_min_temp;
+ int cl_max_cap_inc;
+ int cl_max_cap_dec;
+ int cl_max_cap_limit;
+ int cl_min_cap_limit;
+ int jeita_hyst_temp;
+ int batt_temp_delta;
+ int ki_coeff_soc[KI_COEFF_SOC_LEVELS];
+ int ki_coeff_med_dischg[KI_COEFF_SOC_LEVELS];
+ int ki_coeff_hi_dischg[KI_COEFF_SOC_LEVELS];
};
/* parameters from battery profile */
@@ -184,6 +213,7 @@ struct fg_batt_props {
const char *batt_type_str;
char *batt_profile;
int float_volt_uv;
+ int vbatt_full_mv;
int fastchg_curr_ma;
int batt_id_kohm;
};
@@ -197,11 +227,21 @@ struct fg_cyc_ctr_data {
struct mutex lock;
};
+struct fg_cap_learning {
+ bool active;
+ int init_cc_soc_sw;
+ int64_t nom_cap_uah;
+ int64_t init_cc_uah;
+ int64_t final_cc_uah;
+ int64_t learned_cc_uah;
+ struct mutex lock;
+};
+
struct fg_irq_info {
const char *name;
const irq_handler_t handler;
- int irq;
bool wakeable;
+ int irq;
};
struct fg_chip {
@@ -211,34 +251,44 @@ struct fg_chip {
struct dentry *dfs_root;
struct power_supply *fg_psy;
struct power_supply *batt_psy;
+ struct power_supply *usb_psy;
+ struct power_supply *dc_psy;
struct iio_channel *batt_id_chan;
struct fg_memif *sram;
struct fg_irq_info *irqs;
struct votable *awake_votable;
struct fg_sram_param *sp;
+ struct fg_alg_flag *alg_flags;
int *debug_mask;
- char *batt_profile;
+ char batt_profile[PROFILE_LEN];
struct fg_dt_props dt;
struct fg_batt_props bp;
struct fg_cyc_ctr_data cyc_ctr;
struct notifier_block nb;
+ struct fg_cap_learning cl;
struct mutex bus_lock;
struct mutex sram_rw_lock;
u32 batt_soc_base;
u32 batt_info_base;
u32 mem_if_base;
- int nom_cap_uah;
+ int batt_id;
int status;
- int prev_status;
- bool batt_id_avail;
+ int charge_done;
+ int last_soc;
+ int last_batt_temp;
+ int health;
+ bool profile_available;
bool profile_loaded;
bool battery_missing;
+ bool fg_restarting;
+ bool charge_full;
+ bool recharge_soc_adjusted;
+ bool ki_coeff_dischg_en;
struct completion soc_update;
struct completion soc_ready;
struct delayed_work profile_load_work;
struct work_struct status_change_work;
struct work_struct cycle_count_work;
- struct fg_alg_flag *alg_flags;
};
/* Debugfs data structures are below */
@@ -287,4 +337,5 @@ extern int fg_debugfs_create(struct fg_chip *chip);
extern void fill_string(char *str, size_t str_len, u8 *buf, int buf_len);
extern int64_t twos_compliment_extend(int64_t val, int s_bit_pos);
extern s64 fg_float_decode(u16 val);
+extern bool is_input_present(struct fg_chip *chip);
#endif
diff --git a/drivers/power/qcom-charger/fg-reg.h b/drivers/power/qcom-charger/fg-reg.h
index 9d5874340a8e..431e28a7eb1f 100644
--- a/drivers/power/qcom-charger/fg-reg.h
+++ b/drivers/power/qcom-charger/fg-reg.h
@@ -126,6 +126,7 @@
/* BATT_INFO_BATT_TEMP_CFG */
#define JEITA_TEMP_HYST_MASK GENMASK(5, 4)
+#define JEITA_TEMP_HYST_SHIFT 4
#define JEITA_TEMP_NO_HYST 0x0
#define JEITA_TEMP_HYST_1C 0x1
#define JEITA_TEMP_HYST_2C 0x2
diff --git a/drivers/power/qcom-charger/fg-util.c b/drivers/power/qcom-charger/fg-util.c
index bf5a446452a4..bbdbe48896d7 100644
--- a/drivers/power/qcom-charger/fg-util.c
+++ b/drivers/power/qcom-charger/fg-util.c
@@ -29,6 +29,43 @@ static struct fg_dbgfs dbgfs_data = {
},
};
+static bool is_usb_present(struct fg_chip *chip)
+{
+ union power_supply_propval pval = {0, };
+
+ if (!chip->usb_psy)
+ chip->usb_psy = power_supply_get_by_name("usb");
+
+ if (chip->usb_psy)
+ power_supply_get_property(chip->usb_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ else
+ return false;
+
+ return pval.intval != 0;
+}
+
+static bool is_dc_present(struct fg_chip *chip)
+{
+ union power_supply_propval pval = {0, };
+
+ if (!chip->dc_psy)
+ chip->dc_psy = power_supply_get_by_name("dc");
+
+ if (chip->dc_psy)
+ power_supply_get_property(chip->dc_psy,
+ POWER_SUPPLY_PROP_PRESENT, &pval);
+ else
+ return false;
+
+ return pval.intval != 0;
+}
+
+bool is_input_present(struct fg_chip *chip)
+{
+ return is_usb_present(chip) || is_dc_present(chip);
+}
+
#define EXPONENT_SHIFT 11
#define EXPONENT_OFFSET -9
#define MANTISSA_SIGN_BIT 10
@@ -83,6 +120,9 @@ int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
if (!chip)
return -ENXIO;
+ if (chip->battery_missing)
+ return -ENODATA;
+
if (!fg_sram_address_valid(address, len))
return -EFAULT;
@@ -95,6 +135,7 @@ int fg_sram_write(struct fg_chip *chip, u16 address, u8 offset,
* This interrupt need to be enabled only when it is
* required. It will be kept disabled other times.
*/
+ reinit_completion(&chip->soc_update);
enable_irq(chip->irqs[SOC_UPDATE_IRQ].irq);
atomic_access = true;
} else {
@@ -147,6 +188,9 @@ int fg_sram_read(struct fg_chip *chip, u16 address, u8 offset,
if (!chip)
return -ENXIO;
+ if (chip->battery_missing)
+ return -ENODATA;
+
if (!fg_sram_address_valid(address, len))
return -EFAULT;
diff --git a/drivers/power/qcom-charger/qpnp-fg-gen3.c b/drivers/power/qcom-charger/qpnp-fg-gen3.c
index 7739952f3254..30408218b7e7 100644
--- a/drivers/power/qcom-charger/qpnp-fg-gen3.c
+++ b/drivers/power/qcom-charger/qpnp-fg-gen3.c
@@ -17,7 +17,6 @@
#include <linux/of_platform.h>
#include <linux/of_batterydata.h>
#include <linux/platform_device.h>
-#include <linux/power_supply.h>
#include <linux/iio/consumer.h>
#include <linux/qpnp/qpnp-revid.h>
#include "fg-core.h"
@@ -35,6 +34,14 @@
#define CUTOFF_VOLT_OFFSET 0
#define SYS_TERM_CURR_WORD 6
#define SYS_TERM_CURR_OFFSET 0
+#define VBATT_FULL_WORD 7
+#define VBATT_FULL_OFFSET 0
+#define KI_COEFF_MED_DISCHG_WORD 9
+#define KI_COEFF_MED_DISCHG_OFFSET 3
+#define KI_COEFF_HI_DISCHG_WORD 10
+#define KI_COEFF_HI_DISCHG_OFFSET 0
+#define KI_COEFF_LOW_DISCHG_WORD 10
+#define KI_COEFF_LOW_DISCHG_OFFSET 2
#define DELTA_SOC_THR_WORD 12
#define DELTA_SOC_THR_OFFSET 3
#define RECHARGE_SOC_THR_WORD 14
@@ -63,14 +70,22 @@
#define PROFILE_INTEGRITY_OFFSET 3
#define BATT_SOC_WORD 91
#define BATT_SOC_OFFSET 0
+#define FULL_SOC_WORD 93
+#define FULL_SOC_OFFSET 2
#define MONOTONIC_SOC_WORD 94
#define MONOTONIC_SOC_OFFSET 2
+#define CC_SOC_WORD 95
+#define CC_SOC_OFFSET 0
+#define CC_SOC_SW_WORD 96
+#define CC_SOC_SW_OFFSET 0
#define VOLTAGE_PRED_WORD 97
#define VOLTAGE_PRED_OFFSET 0
#define OCV_WORD 97
#define OCV_OFFSET 2
#define RSLOW_WORD 101
#define RSLOW_OFFSET 0
+#define ACT_BATT_CAP_WORD 117
+#define ACT_BATT_CAP_OFFSET 0
#define LAST_BATT_SOC_WORD 119
#define LAST_BATT_SOC_OFFSET 0
#define LAST_MONOTONIC_SOC_WORD 119
@@ -79,6 +94,12 @@
#define ALG_FLAGS_OFFSET 1
/* v2 SRAM address and offset in ascending order */
+#define KI_COEFF_LOW_DISCHG_v2_WORD 9
+#define KI_COEFF_LOW_DISCHG_v2_OFFSET 3
+#define KI_COEFF_MED_DISCHG_v2_WORD 10
+#define KI_COEFF_MED_DISCHG_v2_OFFSET 0
+#define KI_COEFF_HI_DISCHG_v2_WORD 10
+#define KI_COEFF_HI_DISCHG_v2_OFFSET 1
#define DELTA_SOC_THR_v2_WORD 13
#define DELTA_SOC_THR_v2_OFFSET 0
#define RECHARGE_SOC_THR_v2_WORD 14
@@ -89,17 +110,21 @@
#define EMPTY_VOLT_v2_OFFSET 3
#define VBATT_LOW_v2_WORD 16
#define VBATT_LOW_v2_OFFSET 0
+#define FLOAT_VOLT_v2_WORD 16
+#define FLOAT_VOLT_v2_OFFSET 2
+static int fg_decode_voltage_15b(struct fg_sram_param *sp,
+ enum fg_sram_param_id id, int val);
static int fg_decode_value_16b(struct fg_sram_param *sp,
enum fg_sram_param_id id, int val);
static int fg_decode_default(struct fg_sram_param *sp,
enum fg_sram_param_id id, int val);
-static int fg_decode_batt_soc(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val);
+static int fg_decode_cc_soc(struct fg_sram_param *sp,
+ enum fg_sram_param_id id, int value);
static void fg_encode_voltage(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf);
+ enum fg_sram_param_id id, int val_mv, u8 *buf);
static void fg_encode_current(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf);
+ enum fg_sram_param_id id, int val_ma, u8 *buf);
static void fg_encode_default(struct fg_sram_param *sp,
enum fg_sram_param_id id, int val, u8 *buf);
@@ -118,15 +143,21 @@ static void fg_encode_default(struct fg_sram_param *sp,
static struct fg_sram_param pmicobalt_v1_sram_params[] = {
PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
- fg_decode_batt_soc),
+ fg_decode_default),
PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 244141,
- 1000, 0, NULL, fg_decode_value_16b),
+ 1000, 0, NULL, fg_decode_voltage_15b),
PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 244141, 1000, 0, NULL,
- fg_decode_value_16b),
+ fg_decode_voltage_15b),
PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 244141, 1000, 0, NULL,
fg_decode_value_16b),
PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL,
fg_decode_default),
+ PARAM(CC_SOC, CC_SOC_WORD, CC_SOC_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(ACT_BATT_CAP, ACT_BATT_CAP_WORD, ACT_BATT_CAP_OFFSET, 2, 1, 1, 0,
+ NULL, fg_decode_default),
/* Entries below here are configurable during initialization */
PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
244141, 0, fg_encode_voltage, NULL),
@@ -134,11 +165,13 @@ static struct fg_sram_param pmicobalt_v1_sram_params[] = {
-2500, fg_encode_voltage, NULL),
PARAM(VBATT_LOW, VBATT_LOW_WORD, VBATT_LOW_OFFSET, 1, 100000, 390625,
-2500, fg_encode_voltage, NULL),
+ PARAM(VBATT_FULL, VBATT_FULL_WORD, VBATT_FULL_OFFSET, 2, 1000000,
+ 244141, 0, fg_encode_voltage, NULL),
PARAM(SYS_TERM_CURR, SYS_TERM_CURR_WORD, SYS_TERM_CURR_OFFSET, 3,
1000000, 122070, 0, fg_encode_current, NULL),
PARAM(CHG_TERM_CURR, CHG_TERM_CURR_WORD, CHG_TERM_CURR_OFFSET, 1,
100000, 390625, 0, fg_encode_current, NULL),
- PARAM(DELTA_SOC_THR, DELTA_SOC_THR_WORD, DELTA_SOC_THR_OFFSET, 1, 256,
+ PARAM(DELTA_SOC_THR, DELTA_SOC_THR_WORD, DELTA_SOC_THR_OFFSET, 1, 2048,
100, 0, fg_encode_default, NULL),
PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_WORD, RECHARGE_SOC_THR_OFFSET,
1, 256, 100, 0, fg_encode_default, NULL),
@@ -152,19 +185,31 @@ static struct fg_sram_param pmicobalt_v1_sram_params[] = {
ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+ PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_WORD,
+ KI_COEFF_MED_DISCHG_OFFSET, 1, 1000, 244141, 0,
+ fg_encode_default, NULL),
+ PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_WORD,
+ KI_COEFF_HI_DISCHG_OFFSET, 1, 1000, 244141, 0,
+ fg_encode_default, NULL),
};
static struct fg_sram_param pmicobalt_v2_sram_params[] = {
PARAM(BATT_SOC, BATT_SOC_WORD, BATT_SOC_OFFSET, 4, 1, 1, 0, NULL,
- fg_decode_batt_soc),
+ fg_decode_default),
PARAM(VOLTAGE_PRED, VOLTAGE_PRED_WORD, VOLTAGE_PRED_OFFSET, 2, 244141,
- 1000, 0, NULL, fg_decode_value_16b),
+ 1000, 0, NULL, fg_decode_voltage_15b),
PARAM(OCV, OCV_WORD, OCV_OFFSET, 2, 244141, 1000, 0, NULL,
- fg_decode_value_16b),
+ fg_decode_voltage_15b),
PARAM(RSLOW, RSLOW_WORD, RSLOW_OFFSET, 2, 244141, 1000, 0, NULL,
fg_decode_value_16b),
PARAM(ALG_FLAGS, ALG_FLAGS_WORD, ALG_FLAGS_OFFSET, 1, 1, 1, 0, NULL,
fg_decode_default),
+ PARAM(CC_SOC, CC_SOC_WORD, CC_SOC_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(CC_SOC_SW, CC_SOC_SW_WORD, CC_SOC_SW_OFFSET, 4, 1, 1, 0, NULL,
+ fg_decode_cc_soc),
+ PARAM(ACT_BATT_CAP, ACT_BATT_CAP_WORD, ACT_BATT_CAP_OFFSET, 2, 1, 1, 0,
+ NULL, fg_decode_default),
/* Entries below here are configurable during initialization */
PARAM(CUTOFF_VOLT, CUTOFF_VOLT_WORD, CUTOFF_VOLT_OFFSET, 2, 1000000,
244141, 0, fg_encode_voltage, NULL),
@@ -172,12 +217,16 @@ static struct fg_sram_param pmicobalt_v2_sram_params[] = {
15625, -2000, fg_encode_voltage, NULL),
PARAM(VBATT_LOW, VBATT_LOW_v2_WORD, VBATT_LOW_v2_OFFSET, 1, 1000,
15625, -2000, fg_encode_voltage, NULL),
+ PARAM(FLOAT_VOLT, FLOAT_VOLT_v2_WORD, FLOAT_VOLT_v2_OFFSET, 1, 1000,
+ 15625, -2000, fg_encode_voltage, NULL),
+ PARAM(VBATT_FULL, VBATT_FULL_WORD, VBATT_FULL_OFFSET, 2, 1000000,
+ 244141, 0, fg_encode_voltage, NULL),
PARAM(SYS_TERM_CURR, SYS_TERM_CURR_WORD, SYS_TERM_CURR_OFFSET, 3,
1000000, 122070, 0, fg_encode_current, NULL),
PARAM(CHG_TERM_CURR, CHG_TERM_CURR_v2_WORD, CHG_TERM_CURR_v2_OFFSET, 1,
100000, 390625, 0, fg_encode_current, NULL),
PARAM(DELTA_SOC_THR, DELTA_SOC_THR_v2_WORD, DELTA_SOC_THR_v2_OFFSET, 1,
- 256, 100, 0, fg_encode_default, NULL),
+ 2048, 100, 0, fg_encode_default, NULL),
PARAM(RECHARGE_SOC_THR, RECHARGE_SOC_THR_v2_WORD,
RECHARGE_SOC_THR_v2_OFFSET, 1, 256, 100, 0, fg_encode_default,
NULL),
@@ -191,6 +240,12 @@ static struct fg_sram_param pmicobalt_v2_sram_params[] = {
ESR_TIMER_CHG_MAX_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
PARAM(ESR_TIMER_CHG_INIT, ESR_TIMER_CHG_INIT_WORD,
ESR_TIMER_CHG_INIT_OFFSET, 2, 1, 1, 0, fg_encode_default, NULL),
+ PARAM(KI_COEFF_MED_DISCHG, KI_COEFF_MED_DISCHG_v2_WORD,
+ KI_COEFF_MED_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
+ fg_encode_default, NULL),
+ PARAM(KI_COEFF_HI_DISCHG, KI_COEFF_HI_DISCHG_v2_WORD,
+ KI_COEFF_HI_DISCHG_v2_OFFSET, 1, 1000, 244141, 0,
+ fg_encode_default, NULL),
};
static struct fg_alg_flag pmicobalt_v1_alg_flags[] = {
@@ -264,21 +319,40 @@ module_param_named(
sram_update_period_ms, fg_sram_update_period_ms, int, S_IRUSR | S_IWUSR
);
+static bool fg_sram_dump;
+module_param_named(
+ sram_dump, fg_sram_dump, bool, S_IRUSR | S_IWUSR
+);
+
+static int fg_restart;
+
/* All getters HERE */
-static int fg_decode_value_16b(struct fg_sram_param *sp,
+#define VOLTAGE_15BIT_MASK GENMASK(14, 0)
+static int fg_decode_voltage_15b(struct fg_sram_param *sp,
enum fg_sram_param_id id, int value)
{
- sp[id].value = div_u64((u64)(u16)value * sp[id].numrtr, sp[id].denmtr);
+ value &= VOLTAGE_15BIT_MASK;
+ sp[id].value = div_u64((u64)value * sp[id].numrtr, sp[id].denmtr);
pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
sp[id].value);
return sp[id].value;
}
-static int fg_decode_batt_soc(struct fg_sram_param *sp,
+static int fg_decode_cc_soc(struct fg_sram_param *sp,
enum fg_sram_param_id id, int value)
{
- sp[id].value = (u32)value >> 24;
+ sp[id].value = div_s64((s64)value * sp[id].numrtr, sp[id].denmtr);
+ sp[id].value = sign_extend32(sp[id].value, 31);
+ pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
+ sp[id].value);
+ return sp[id].value;
+}
+
+static int fg_decode_value_16b(struct fg_sram_param *sp,
+ enum fg_sram_param_id id, int value)
+{
+ sp[id].value = div_u64((u64)(u16)value * sp[id].numrtr, sp[id].denmtr);
pr_debug("id: %d raw value: %x decoded value: %x\n", id, value,
sp[id].value);
return sp[id].value;
@@ -287,7 +361,8 @@ static int fg_decode_batt_soc(struct fg_sram_param *sp,
static int fg_decode_default(struct fg_sram_param *sp, enum fg_sram_param_id id,
int value)
{
- return value;
+ sp[id].value = value;
+ return sp[id].value;
}
static int fg_decode(struct fg_sram_param *sp, enum fg_sram_param_id id,
@@ -302,14 +377,14 @@ static int fg_decode(struct fg_sram_param *sp, enum fg_sram_param_id id,
}
static void fg_encode_voltage(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf)
+ enum fg_sram_param_id id, int val_mv, u8 *buf)
{
int i, mask = 0xff;
int64_t temp;
- val += sp[id].offset;
- temp = (int64_t)div_u64((u64)val * sp[id].numrtr, sp[id].denmtr);
- pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val);
+ val_mv += sp[id].offset;
+ temp = (int64_t)div_u64((u64)val_mv * sp[id].numrtr, sp[id].denmtr);
+ pr_debug("temp: %llx id: %d, val_mv: %d, buf: [ ", temp, id, val_mv);
for (i = 0; i < sp[id].len; i++) {
buf[i] = temp & mask;
temp >>= 8;
@@ -319,15 +394,15 @@ static void fg_encode_voltage(struct fg_sram_param *sp,
}
static void fg_encode_current(struct fg_sram_param *sp,
- enum fg_sram_param_id id, int val, u8 *buf)
+ enum fg_sram_param_id id, int val_ma, u8 *buf)
{
int i, mask = 0xff;
int64_t temp;
s64 current_ma;
- current_ma = val;
+ current_ma = val_ma;
temp = (int64_t)div_s64(current_ma * sp[id].numrtr, sp[id].denmtr);
- pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val);
+ pr_debug("temp: %llx id: %d, val: %d, buf: [ ", temp, id, val_ma);
for (i = 0; i < sp[id].len; i++) {
buf[i] = temp & mask;
temp >>= 8;
@@ -378,6 +453,9 @@ static int fg_get_sram_prop(struct fg_chip *chip, enum fg_sram_param_id id,
if (id < 0 || id > FG_SRAM_MAX || chip->sp[id].len > sizeof(buf))
return -EINVAL;
+ if (chip->battery_missing)
+ return -ENODATA;
+
rc = fg_sram_read(chip, chip->sp[id].addr_word, chip->sp[id].addr_byte,
buf, chip->sp[id].len, FG_IMA_DEFAULT);
if (rc < 0) {
@@ -393,6 +471,35 @@ static int fg_get_sram_prop(struct fg_chip *chip, enum fg_sram_param_id id,
return 0;
}
+#define CC_SOC_30BIT GENMASK(29, 0)
+static int fg_get_cc_soc(struct fg_chip *chip, int *val)
+{
+ int rc, cc_soc;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC, &cc_soc);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC, rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = div_s64(cc_soc * chip->cl.nom_cap_uah, CC_SOC_30BIT);
+ return 0;
+}
+
+static int fg_get_cc_soc_sw(struct fg_chip *chip, int *val)
+{
+ int rc, cc_soc;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+ return rc;
+ }
+
+ *val = div_s64(cc_soc * chip->cl.learned_cc_uah, CC_SOC_30BIT);
+ return 0;
+}
+
#define BATT_TEMP_NUMR 1
#define BATT_TEMP_DENR 1
static int fg_get_battery_temp(struct fg_chip *chip, int *val)
@@ -543,7 +650,6 @@ static int fg_get_msoc_raw(struct fg_chip *chip, int *val)
}
fg_dbg(chip, FG_POWER_SUPPLY, "raw: 0x%02x\n", cap[0]);
-
*val = cap[0];
return 0;
}
@@ -554,6 +660,11 @@ static int fg_get_prop_capacity(struct fg_chip *chip, int *val)
{
int rc, msoc;
+ if (chip->charge_full) {
+ *val = FULL_CAPACITY;
+ return 0;
+ }
+
rc = fg_get_msoc_raw(chip, &msoc);
if (rc < 0)
return rc;
@@ -593,14 +704,12 @@ static int fg_get_batt_id(struct fg_chip *chip, int *val)
return rc;
}
- chip->batt_id_avail = true;
fg_dbg(chip, FG_STATUS, "batt_id: %d\n", batt_id);
*val = batt_id;
return 0;
}
-#define PROFILE_LEN 224
static int fg_get_batt_profile(struct fg_chip *chip)
{
struct device_node *node = chip->dev->of_node;
@@ -614,13 +723,14 @@ static int fg_get_batt_profile(struct fg_chip *chip)
return rc;
}
+ batt_id /= 1000;
+ chip->batt_id = batt_id;
batt_node = of_find_node_by_name(node, "qcom,battery-data");
if (!batt_node) {
pr_err("Batterydata not available\n");
return -ENXIO;
}
- batt_id /= 1000;
profile_node = of_batterydata_get_best_profile(batt_node, batt_id,
NULL);
if (IS_ERR(profile_node))
@@ -652,6 +762,13 @@ static int fg_get_batt_profile(struct fg_chip *chip)
chip->bp.fastchg_curr_ma = -EINVAL;
}
+ rc = of_property_read_u32(profile_node, "qcom,fg-cc-cv-threshold-mv",
+ &chip->bp.vbatt_full_mv);
+ if (rc < 0) {
+ pr_err("battery cc_cv threshold unavailable, rc:%d\n", rc);
+ chip->bp.vbatt_full_mv = -EINVAL;
+ }
+
data = of_get_property(profile_node, "qcom,fg-profile-data", &len);
if (!data) {
pr_err("No profile data available\n");
@@ -663,6 +780,7 @@ static int fg_get_batt_profile(struct fg_chip *chip)
return -EINVAL;
}
+ chip->profile_available = true;
memcpy(chip->batt_profile, data, len);
return 0;
}
@@ -673,6 +791,27 @@ static inline void get_temp_setpoint(int threshold, u8 *val)
*val = DIV_ROUND_CLOSEST((threshold + 30) * 10, 5);
}
+static inline void get_batt_temp_delta(int delta, u8 *val)
+{
+ switch (delta) {
+ case 2:
+ *val = BTEMP_DELTA_2K;
+ break;
+ case 4:
+ *val = BTEMP_DELTA_4K;
+ break;
+ case 6:
+ *val = BTEMP_DELTA_6K;
+ break;
+ case 10:
+ *val = BTEMP_DELTA_10K;
+ break;
+ default:
+ *val = BTEMP_DELTA_2K;
+ break;
+ };
+}
+
static int fg_set_esr_timer(struct fg_chip *chip, int cycles, bool charging,
int flags)
{
@@ -739,38 +878,518 @@ static bool is_charger_available(struct fg_chip *chip)
return true;
}
+static int fg_save_learned_cap_to_sram(struct fg_chip *chip)
+{
+ int16_t cc_mah;
+ int rc;
+
+ if (chip->battery_missing || !chip->cl.learned_cc_uah)
+ return -EPERM;
+
+ cc_mah = div64_s64(chip->cl.learned_cc_uah, 1000);
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_ACT_BATT_CAP].addr_word,
+ chip->sp[FG_SRAM_ACT_BATT_CAP].addr_byte, (u8 *)&cc_mah,
+ chip->sp[FG_SRAM_ACT_BATT_CAP].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing act_batt_cap, rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_dbg(chip, FG_CAP_LEARN, "learned capacity %llduah/%dmah stored\n",
+ chip->cl.learned_cc_uah, cc_mah);
+ return 0;
+}
+
+#define CAPACITY_DELTA_DECIPCT 500
+static int fg_load_learned_cap_from_sram(struct fg_chip *chip)
+{
+ int rc, act_cap_mah;
+ int64_t delta_cc_uah, pct_nom_cap_uah;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_ACT_BATT_CAP, &act_cap_mah);
+ if (rc < 0) {
+ pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->cl.learned_cc_uah = act_cap_mah * 1000;
+ if (chip->cl.learned_cc_uah == 0)
+ chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
+
+ if (chip->cl.learned_cc_uah != chip->cl.nom_cap_uah) {
+ delta_cc_uah = abs(chip->cl.learned_cc_uah -
+ chip->cl.nom_cap_uah);
+ pct_nom_cap_uah = div64_s64((int64_t)chip->cl.nom_cap_uah *
+ CAPACITY_DELTA_DECIPCT, 1000);
+ /*
+ * If the learned capacity is out of range by 50% from the
+ * nominal capacity, then overwrite the learned capacity with
+ * the nominal capacity.
+ */
+ if (chip->cl.nom_cap_uah && delta_cc_uah > pct_nom_cap_uah) {
+ fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah: %lld is higher than expected\n",
+ chip->cl.learned_cc_uah);
+ fg_dbg(chip, FG_CAP_LEARN, "Capping it to nominal:%lld\n",
+ chip->cl.nom_cap_uah);
+ chip->cl.learned_cc_uah = chip->cl.nom_cap_uah;
+ rc = fg_save_learned_cap_to_sram(chip);
+ if (rc < 0)
+ pr_err("Error in saving learned_cc_uah, rc=%d\n",
+ rc);
+ }
+ }
+
+ fg_dbg(chip, FG_CAP_LEARN, "learned_cc_uah:%lld nom_cap_uah: %lld\n",
+ chip->cl.learned_cc_uah, chip->cl.nom_cap_uah);
+ return 0;
+}
+
+static bool is_temp_valid_cap_learning(struct fg_chip *chip)
+{
+ int rc, batt_temp;
+
+ rc = fg_get_battery_temp(chip, &batt_temp);
+ if (rc < 0) {
+ pr_err("Error in getting batt_temp\n");
+ return false;
+ }
+
+ if (batt_temp > chip->dt.cl_max_temp ||
+ batt_temp < chip->dt.cl_min_temp) {
+ fg_dbg(chip, FG_CAP_LEARN, "batt temp %d out of range [%d %d]\n",
+ batt_temp, chip->dt.cl_min_temp, chip->dt.cl_max_temp);
+ return false;
+ }
+
+ return true;
+}
+
+static void fg_cap_learning_post_process(struct fg_chip *chip)
+{
+ int64_t max_inc_val, min_dec_val, old_cap;
+ int rc;
+
+ max_inc_val = chip->cl.learned_cc_uah
+ * (1000 + chip->dt.cl_max_cap_inc);
+ do_div(max_inc_val, 1000);
+
+ min_dec_val = chip->cl.learned_cc_uah
+ * (1000 - chip->dt.cl_max_cap_dec);
+ do_div(min_dec_val, 1000);
+
+ old_cap = chip->cl.learned_cc_uah;
+ if (chip->cl.final_cc_uah > max_inc_val)
+ chip->cl.learned_cc_uah = max_inc_val;
+ else if (chip->cl.final_cc_uah < min_dec_val)
+ chip->cl.learned_cc_uah = min_dec_val;
+ else
+ chip->cl.learned_cc_uah =
+ chip->cl.final_cc_uah;
+
+ if (chip->dt.cl_max_cap_limit) {
+ max_inc_val = (int64_t)chip->cl.nom_cap_uah * (1000 +
+ chip->dt.cl_max_cap_limit);
+ do_div(max_inc_val, 1000);
+ if (chip->cl.final_cc_uah > max_inc_val) {
+ fg_dbg(chip, FG_CAP_LEARN, "learning capacity %lld goes above max limit %lld\n",
+ chip->cl.final_cc_uah, max_inc_val);
+ chip->cl.learned_cc_uah = max_inc_val;
+ }
+ }
+
+ if (chip->dt.cl_min_cap_limit) {
+ min_dec_val = (int64_t)chip->cl.nom_cap_uah * (1000 -
+ chip->dt.cl_min_cap_limit);
+ do_div(min_dec_val, 1000);
+ if (chip->cl.final_cc_uah < min_dec_val) {
+ fg_dbg(chip, FG_CAP_LEARN, "learning capacity %lld goes below min limit %lld\n",
+ chip->cl.final_cc_uah, min_dec_val);
+ chip->cl.learned_cc_uah = min_dec_val;
+ }
+ }
+
+ rc = fg_save_learned_cap_to_sram(chip);
+ if (rc < 0)
+ pr_err("Error in saving learned_cc_uah, rc=%d\n", rc);
+
+ fg_dbg(chip, FG_CAP_LEARN, "final cc_uah = %lld, learned capacity %lld -> %lld uah\n",
+ chip->cl.final_cc_uah, old_cap, chip->cl.learned_cc_uah);
+}
+
+static int fg_cap_learning_process_full_data(struct fg_chip *chip)
+{
+ int rc, cc_soc_sw, cc_soc_delta_pct;
+ int64_t delta_cc_uah;
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+ return rc;
+ }
+
+ cc_soc_delta_pct = DIV_ROUND_CLOSEST(
+ abs(cc_soc_sw - chip->cl.init_cc_soc_sw) * 100,
+ CC_SOC_30BIT);
+ delta_cc_uah = div64_s64(chip->cl.learned_cc_uah * cc_soc_delta_pct,
+ 100);
+ chip->cl.final_cc_uah = chip->cl.init_cc_uah + delta_cc_uah;
+ fg_dbg(chip, FG_CAP_LEARN, "Current cc_soc=%d cc_soc_delta_pct=%d total_cc_uah=%lld\n",
+ cc_soc_sw, cc_soc_delta_pct, chip->cl.final_cc_uah);
+ return 0;
+}
+
+static int fg_cap_learning_begin(struct fg_chip *chip, int batt_soc)
+{
+ int rc, cc_soc_sw;
+
+ if (DIV_ROUND_CLOSEST(batt_soc * 100, FULL_SOC_RAW) >
+ chip->dt.cl_start_soc) {
+ fg_dbg(chip, FG_CAP_LEARN, "Battery SOC %d is high!, not starting\n",
+ batt_soc);
+ return -EINVAL;
+ }
+
+ chip->cl.init_cc_uah = div64_s64(chip->cl.learned_cc_uah * batt_soc,
+ FULL_SOC_RAW);
+ rc = fg_get_sram_prop(chip, FG_SRAM_CC_SOC_SW, &cc_soc_sw);
+ if (rc < 0) {
+ pr_err("Error in getting CC_SOC_SW, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->cl.init_cc_soc_sw = cc_soc_sw;
+ chip->cl.active = true;
+ fg_dbg(chip, FG_CAP_LEARN, "Capacity learning started @ battery SOC %d init_cc_soc_sw:%d\n",
+ batt_soc, chip->cl.init_cc_soc_sw);
+ return 0;
+}
+
+static int fg_cap_learning_done(struct fg_chip *chip)
+{
+ int rc, cc_soc_sw;
+
+ rc = fg_cap_learning_process_full_data(chip);
+ if (rc < 0) {
+ pr_err("Error in processing cap learning full data, rc=%d\n",
+ rc);
+ goto out;
+ }
+
+ /* Write a FULL value to cc_soc_sw */
+ cc_soc_sw = CC_SOC_30BIT;
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_CC_SOC_SW].addr_word,
+ chip->sp[FG_SRAM_CC_SOC_SW].addr_byte, (u8 *)&cc_soc_sw,
+ chip->sp[FG_SRAM_CC_SOC_SW].len, FG_IMA_ATOMIC);
+ if (rc < 0) {
+ pr_err("Error in writing cc_soc_sw, rc=%d\n", rc);
+ goto out;
+ }
+
+ fg_cap_learning_post_process(chip);
+out:
+ return rc;
+}
+
+#define FULL_SOC_RAW 255
+static void fg_cap_learning_update(struct fg_chip *chip)
+{
+ int rc, batt_soc;
+
+ mutex_lock(&chip->cl.lock);
+
+ if (!is_temp_valid_cap_learning(chip) || !chip->cl.learned_cc_uah ||
+ chip->battery_missing) {
+ fg_dbg(chip, FG_CAP_LEARN, "Aborting cap_learning %lld\n",
+ chip->cl.learned_cc_uah);
+ chip->cl.active = false;
+ chip->cl.init_cc_uah = 0;
+ goto out;
+ }
+
+ rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &batt_soc);
+ if (rc < 0) {
+ pr_err("Error in getting ACT_BATT_CAP, rc=%d\n", rc);
+ goto out;
+ }
+
+ /* We need only the most significant byte here */
+ batt_soc = (u32)batt_soc >> 24;
+
+ fg_dbg(chip, FG_CAP_LEARN, "Chg_status: %d cl_active: %d batt_soc: %d\n",
+ chip->status, chip->cl.active, batt_soc);
+
+ /* Initialize the starting point of learning capacity */
+ if (!chip->cl.active) {
+ if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
+ rc = fg_cap_learning_begin(chip, batt_soc);
+ chip->cl.active = (rc == 0);
+ }
+
+ } else {
+ if (chip->charge_done) {
+ rc = fg_cap_learning_done(chip);
+ if (rc < 0)
+ pr_err("Error in completing capacity learning, rc=%d\n",
+ rc);
+
+ chip->cl.active = false;
+ chip->cl.init_cc_uah = 0;
+ }
+
+ if (chip->status == POWER_SUPPLY_STATUS_NOT_CHARGING) {
+ fg_dbg(chip, FG_CAP_LEARN, "Capacity learning aborted @ battery SOC %d\n",
+ batt_soc);
+ chip->cl.active = false;
+ chip->cl.init_cc_uah = 0;
+ }
+ }
+
+out:
+ mutex_unlock(&chip->cl.lock);
+}
+
+#define KI_COEFF_MED_DISCHG_DEFAULT 1500
+#define KI_COEFF_HI_DISCHG_DEFAULT 2200
+static int fg_adjust_ki_coeff_dischg(struct fg_chip *chip)
+{
+ int rc, i, msoc;
+ int ki_coeff_med = KI_COEFF_MED_DISCHG_DEFAULT;
+ int ki_coeff_hi = KI_COEFF_HI_DISCHG_DEFAULT;
+ u8 val;
+
+ if (!chip->ki_coeff_dischg_en)
+ return 0;
+
+ rc = fg_get_prop_capacity(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting capacity, rc=%d\n", rc);
+ return rc;
+ }
+
+ if (chip->status == POWER_SUPPLY_STATUS_DISCHARGING) {
+ for (i = KI_COEFF_SOC_LEVELS - 1; i >= 0; i--) {
+ if (msoc < chip->dt.ki_coeff_soc[i]) {
+ ki_coeff_med = chip->dt.ki_coeff_med_dischg[i];
+ ki_coeff_hi = chip->dt.ki_coeff_hi_dischg[i];
+ }
+ }
+ }
+
+ fg_encode(chip->sp, FG_SRAM_KI_COEFF_MED_DISCHG, ki_coeff_med, &val);
+ rc = fg_sram_write(chip,
+ chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_word,
+ chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].addr_byte, &val,
+ chip->sp[FG_SRAM_KI_COEFF_MED_DISCHG].len,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing ki_coeff_med, rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_encode(chip->sp, FG_SRAM_KI_COEFF_HI_DISCHG, ki_coeff_hi, &val);
+ rc = fg_sram_write(chip,
+ chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_word,
+ chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].addr_byte, &val,
+ chip->sp[FG_SRAM_KI_COEFF_HI_DISCHG].len,
+ FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing ki_coeff_hi, rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_dbg(chip, FG_STATUS, "Wrote ki_coeff_med %d ki_coeff_hi %d\n",
+ ki_coeff_med, ki_coeff_hi);
+ return 0;
+}
+
+static int fg_charge_full_update(struct fg_chip *chip)
+{
+ union power_supply_propval prop = {0, };
+ int rc, msoc, bsoc, recharge_soc;
+ u8 full_soc[2] = {0xFF, 0xFF};
+
+ if (!chip->dt.hold_soc_while_full)
+ return 0;
+
+ if (!is_charger_available(chip))
+ return 0;
+
+ rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+ &prop);
+ if (rc < 0) {
+ pr_err("Error in getting battery health, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->health = prop.intval;
+ recharge_soc = chip->dt.recharge_soc_thr;
+ recharge_soc = DIV_ROUND_CLOSEST(recharge_soc * FULL_SOC_RAW,
+ FULL_CAPACITY);
+ rc = fg_get_sram_prop(chip, FG_SRAM_BATT_SOC, &bsoc);
+ if (rc < 0) {
+ pr_err("Error in getting BATT_SOC, rc=%d\n", rc);
+ return rc;
+ }
+
+ /* We need 2 most significant bytes here */
+ bsoc = (u32)bsoc >> 16;
+ rc = fg_get_prop_capacity(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting capacity, rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_dbg(chip, FG_STATUS, "msoc: %d health: %d status: %d\n", msoc,
+ chip->health, chip->status);
+ if (chip->charge_done) {
+ if (msoc >= 99 && chip->health == POWER_SUPPLY_HEALTH_GOOD)
+ chip->charge_full = true;
+ else
+ fg_dbg(chip, FG_STATUS, "Terminated charging @ SOC%d\n",
+ msoc);
+ } else if ((bsoc >> 8) <= recharge_soc) {
+ fg_dbg(chip, FG_STATUS, "bsoc: %d recharge_soc: %d\n",
+ bsoc >> 8, recharge_soc);
+ chip->charge_full = false;
+ }
+
+ if (!chip->charge_full)
+ return 0;
+
+ /*
+ * During JEITA conditions, charge_full can happen early. FULL_SOC
+ * and MONOTONIC_SOC needs to be updated to reflect the same. Write
+ * battery SOC to FULL_SOC and write a full value to MONOTONIC_SOC.
+ */
+ rc = fg_sram_write(chip, FULL_SOC_WORD, FULL_SOC_OFFSET, (u8 *)&bsoc, 2,
+ FG_IMA_ATOMIC);
+ if (rc < 0) {
+ pr_err("failed to write full_soc rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = fg_sram_write(chip, MONOTONIC_SOC_WORD, MONOTONIC_SOC_OFFSET,
+ full_soc, 2, FG_IMA_ATOMIC);
+ if (rc < 0) {
+ pr_err("failed to write monotonic_soc rc=%d\n", rc);
+ return rc;
+ }
+
+ fg_dbg(chip, FG_STATUS, "Set charge_full to true @ soc %d\n", msoc);
+ return 0;
+}
+
+static int fg_set_recharge_soc(struct fg_chip *chip, int recharge_soc)
+{
+ u8 buf[4];
+ int rc;
+
+ fg_encode(chip->sp, FG_SRAM_RECHARGE_SOC_THR, recharge_soc, buf);
+ rc = fg_sram_write(chip,
+ chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_word,
+ chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_byte, buf,
+ chip->sp[FG_SRAM_RECHARGE_SOC_THR].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing recharge_soc_thr, rc=%d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static int fg_adjust_recharge_soc(struct fg_chip *chip)
+{
+ int rc, msoc, recharge_soc, new_recharge_soc = 0;
+
+ recharge_soc = chip->dt.recharge_soc_thr;
+ /*
+ * If the input is present and charging had been terminated, adjust
+ * the recharge SOC threshold based on the monotonic SOC at which
+ * the charge termination had happened.
+ */
+ if (is_input_present(chip) && !chip->recharge_soc_adjusted
+ && chip->charge_done) {
+ /* Get raw monotonic SOC for calculation */
+ rc = fg_get_msoc_raw(chip, &msoc);
+ if (rc < 0) {
+ pr_err("Error in getting msoc, rc=%d\n", rc);
+ return rc;
+ }
+
+ msoc = DIV_ROUND_CLOSEST(msoc * FULL_CAPACITY, FULL_SOC_RAW);
+ /* Adjust the recharge_soc threshold */
+ new_recharge_soc = msoc - (FULL_CAPACITY - recharge_soc);
+ } else if (chip->recharge_soc_adjusted && (!is_input_present(chip)
+ || chip->health == POWER_SUPPLY_HEALTH_GOOD)) {
+ /* Restore the default value */
+ new_recharge_soc = recharge_soc;
+ }
+
+ if (new_recharge_soc > 0 && new_recharge_soc < FULL_CAPACITY) {
+ rc = fg_set_recharge_soc(chip, new_recharge_soc);
+ if (rc) {
+ pr_err("Couldn't set resume SOC for FG, rc=%d\n", rc);
+ return rc;
+ }
+
+ chip->recharge_soc_adjusted = (new_recharge_soc !=
+ recharge_soc);
+ fg_dbg(chip, FG_STATUS, "resume soc set to %d\n",
+ new_recharge_soc);
+ }
+
+ return 0;
+}
+
static void status_change_work(struct work_struct *work)
{
struct fg_chip *chip = container_of(work,
struct fg_chip, status_change_work);
union power_supply_propval prop = {0, };
+ int rc;
if (!is_charger_available(chip)) {
fg_dbg(chip, FG_STATUS, "Charger not available?!\n");
- return;
+ goto out;
}
- power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
+ rc = power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_STATUS,
&prop);
- chip->prev_status = chip->status;
+ if (rc < 0) {
+ pr_err("Error in getting charging status, rc=%d\n", rc);
+ goto out;
+ }
+
chip->status = prop.intval;
+ rc = power_supply_get_property(chip->batt_psy,
+ POWER_SUPPLY_PROP_CHARGE_DONE, &prop);
+ if (rc < 0) {
+ pr_err("Error in getting charge_done, rc=%d\n", rc);
+ goto out;
+ }
- if (chip->cyc_ctr.en && chip->prev_status != chip->status)
+ chip->charge_done = prop.intval;
+ fg_dbg(chip, FG_POWER_SUPPLY, "curr_status:%d charge_done: %d\n",
+ chip->status, chip->charge_done);
+
+ if (chip->cyc_ctr.en)
schedule_work(&chip->cycle_count_work);
- switch (prop.intval) {
- case POWER_SUPPLY_STATUS_CHARGING:
- fg_dbg(chip, FG_POWER_SUPPLY, "Charging\n");
- break;
- case POWER_SUPPLY_STATUS_DISCHARGING:
- fg_dbg(chip, FG_POWER_SUPPLY, "Discharging\n");
- break;
- case POWER_SUPPLY_STATUS_FULL:
- fg_dbg(chip, FG_POWER_SUPPLY, "Full\n");
- break;
- default:
- break;
- }
+ fg_cap_learning_update(chip);
+
+ rc = fg_charge_full_update(chip);
+ if (rc < 0)
+ pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+ rc = fg_adjust_recharge_soc(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting recharge_soc, rc=%d\n", rc);
+
+ rc = fg_adjust_ki_coeff_dischg(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+out:
+ pm_relax(chip->dev);
}
static void restore_cycle_counter(struct fg_chip *chip)
@@ -853,6 +1472,9 @@ static void cycle_count_work(struct work_struct *work)
goto out;
}
+ /* We need only the most significant byte here */
+ batt_soc = (u32)batt_soc >> 24;
+
if (chip->status == POWER_SUPPLY_STATUS_CHARGING) {
/* Find out which bucket the SOC falls in */
bucket = batt_soc / BUCKET_SOC_PCT;
@@ -912,64 +1534,84 @@ static int fg_get_cycle_count(struct fg_chip *chip)
return count;
}
-#define PROFILE_COMP_LEN 32
-#define SOC_READY_WAIT_MS 2000
-static void profile_load_work(struct work_struct *work)
+static void dump_sram(u8 *buf, int len)
{
- struct fg_chip *chip = container_of(work,
- struct fg_chip,
- profile_load_work.work);
- int rc;
- u8 buf[PROFILE_COMP_LEN], val;
- bool tried_again = false, profiles_same = false;
+ int i;
+ char str[16];
- if (!chip->batt_id_avail) {
- pr_err("batt_id not available\n");
- return;
+ for (i = 0; i < len; i += 4) {
+ str[0] = '\0';
+ fill_string(str, sizeof(str), buf + i, 4);
+ pr_info("%03d %s\n", PROFILE_LOAD_WORD + (i / 4), str);
}
+}
+
+static bool is_profile_load_required(struct fg_chip *chip)
+{
+ u8 buf[PROFILE_COMP_LEN], val;
+ bool profiles_same = false;
+ int rc;
rc = fg_sram_read(chip, PROFILE_INTEGRITY_WORD,
PROFILE_INTEGRITY_OFFSET, &val, 1, FG_IMA_DEFAULT);
if (rc < 0) {
pr_err("failed to read profile integrity rc=%d\n", rc);
- return;
+ return false;
}
- vote(chip->awake_votable, PROFILE_LOAD, true, 0);
+ /* Check if integrity bit is set */
if (val == 0x01) {
fg_dbg(chip, FG_STATUS, "Battery profile integrity bit is set\n");
rc = fg_sram_read(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
buf, PROFILE_COMP_LEN, FG_IMA_DEFAULT);
if (rc < 0) {
pr_err("Error in reading battery profile, rc:%d\n", rc);
- goto out;
+ return false;
}
profiles_same = memcmp(chip->batt_profile, buf,
PROFILE_COMP_LEN) == 0;
if (profiles_same) {
- fg_dbg(chip, FG_STATUS, "Battery profile is same\n");
- goto done;
+ fg_dbg(chip, FG_STATUS, "Battery profile is same, not loading it\n");
+ return false;
}
- fg_dbg(chip, FG_STATUS, "profiles are different?\n");
- }
- clear_cycle_counter(chip);
- fg_dbg(chip, FG_STATUS, "profile loading started\n");
- rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
- if (rc < 0) {
- pr_err("Error in writing to %04x, rc=%d\n",
- BATT_SOC_RESTART(chip), rc);
- goto out;
+ if (!chip->dt.force_load_profile) {
+ pr_warn("Profiles doesn't match, skipping loading it since force_load_profile is disabled\n");
+ if (fg_sram_dump) {
+ pr_info("FG: loaded profile:\n");
+ dump_sram(buf, PROFILE_COMP_LEN);
+ pr_info("FG: available profile:\n");
+ dump_sram(chip->batt_profile, PROFILE_LEN);
+ }
+ return false;
+ }
+
+ fg_dbg(chip, FG_STATUS, "Profiles are different, loading the correct one\n");
+ } else {
+ fg_dbg(chip, FG_STATUS, "Profile integrity bit is not set\n");
+ if (fg_sram_dump) {
+ pr_info("FG: profile to be loaded:\n");
+ dump_sram(chip->batt_profile, PROFILE_LEN);
+ }
}
+ return true;
+}
- /* load battery profile */
- rc = fg_sram_write(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
- chip->batt_profile, PROFILE_LEN, FG_IMA_ATOMIC);
+#define SOC_READY_WAIT_MS 2000
+static int __fg_restart(struct fg_chip *chip)
+{
+ int rc, msoc;
+ bool tried_again = false;
+
+ rc = fg_get_prop_capacity(chip, &msoc);
if (rc < 0) {
- pr_err("Error in writing battery profile, rc:%d\n", rc);
- goto out;
+ pr_err("Error in getting capacity, rc=%d\n", rc);
+ return rc;
}
+ chip->last_soc = msoc;
+ chip->fg_restarting = true;
+ reinit_completion(&chip->soc_ready);
rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT,
RESTART_GO_BIT);
if (rc < 0) {
@@ -991,6 +1633,57 @@ wait:
goto out;
}
+ rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
+ if (rc < 0) {
+ pr_err("Error in writing to %04x, rc=%d\n",
+ BATT_SOC_RESTART(chip), rc);
+ goto out;
+ }
+out:
+ chip->fg_restarting = false;
+ return rc;
+}
+
+static void profile_load_work(struct work_struct *work)
+{
+ struct fg_chip *chip = container_of(work,
+ struct fg_chip,
+ profile_load_work.work);
+ u8 buf[2], val;
+ int rc;
+
+ vote(chip->awake_votable, PROFILE_LOAD, true, 0);
+ if (!is_profile_load_required(chip))
+ goto done;
+
+ clear_cycle_counter(chip);
+ mutex_lock(&chip->cl.lock);
+ chip->cl.learned_cc_uah = 0;
+ chip->cl.active = false;
+ mutex_unlock(&chip->cl.lock);
+
+ fg_dbg(chip, FG_STATUS, "profile loading started\n");
+ rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
+ if (rc < 0) {
+ pr_err("Error in writing to %04x, rc=%d\n",
+ BATT_SOC_RESTART(chip), rc);
+ goto out;
+ }
+
+ /* load battery profile */
+ rc = fg_sram_write(chip, PROFILE_LOAD_WORD, PROFILE_LOAD_OFFSET,
+ chip->batt_profile, PROFILE_LEN, FG_IMA_ATOMIC);
+ if (rc < 0) {
+ pr_err("Error in writing battery profile, rc:%d\n", rc);
+ goto out;
+ }
+
+ rc = __fg_restart(chip);
+ if (rc < 0) {
+ pr_err("Error in restarting FG, rc=%d\n", rc);
+ goto out;
+ }
+
fg_dbg(chip, FG_STATUS, "SOC is ready\n");
/* Set the profile integrity bit */
@@ -1002,26 +1695,67 @@ wait:
goto out;
}
- fg_dbg(chip, FG_STATUS, "profile loaded successfully");
done:
rc = fg_sram_read(chip, NOM_CAP_WORD, NOM_CAP_OFFSET, buf, 2,
FG_IMA_DEFAULT);
if (rc < 0) {
pr_err("Error in reading %04x[%d] rc=%d\n", NOM_CAP_WORD,
NOM_CAP_OFFSET, rc);
- goto out;
+ } else {
+ chip->cl.nom_cap_uah = (int)(buf[0] | buf[1] << 8) * 1000;
+ rc = fg_load_learned_cap_from_sram(chip);
+ if (rc < 0)
+ pr_err("Error in loading capacity learning data, rc:%d\n",
+ rc);
}
- chip->nom_cap_uah = (int)(buf[0] | buf[1] << 8) * 1000;
chip->profile_loaded = true;
+ fg_dbg(chip, FG_STATUS, "profile loaded successfully");
out:
vote(chip->awake_votable, PROFILE_LOAD, false, 0);
- rc = fg_masked_write(chip, BATT_SOC_RESTART(chip), RESTART_GO_BIT, 0);
- if (rc < 0)
- pr_err("Error in writing to %04x, rc=%d\n",
- BATT_SOC_RESTART(chip), rc);
}
+static int fg_restart_sysfs(const char *val, const struct kernel_param *kp)
+{
+ int rc;
+ struct power_supply *bms_psy;
+ struct fg_chip *chip;
+
+ rc = param_set_int(val, kp);
+ if (rc) {
+ pr_err("Unable to set fg_restart: %d\n", rc);
+ return rc;
+ }
+
+ if (fg_restart != 1) {
+ pr_err("Bad value %d\n", fg_restart);
+ return -EINVAL;
+ }
+
+ bms_psy = power_supply_get_by_name("bms");
+ if (!bms_psy) {
+ pr_err("bms psy not found\n");
+ return 0;
+ }
+
+ chip = power_supply_get_drvdata(bms_psy);
+ rc = __fg_restart(chip);
+ if (rc < 0) {
+ pr_err("Error in restarting FG, rc=%d\n", rc);
+ return rc;
+ }
+
+ pr_info("FG restart done\n");
+ return rc;
+}
+
+static struct kernel_param_ops fg_restart_ops = {
+ .set = fg_restart_sysfs,
+ .get = param_get_int,
+};
+
+module_param_cb(restart, &fg_restart_ops, &fg_restart, 0644);
+
/* PSY CALLBACKS STAY HERE */
static int fg_psy_get_property(struct power_supply *psy,
@@ -1033,7 +1767,10 @@ static int fg_psy_get_property(struct power_supply *psy,
switch (psp) {
case POWER_SUPPLY_PROP_CAPACITY:
- rc = fg_get_prop_capacity(chip, &pval->intval);
+ if (chip->fg_restarting)
+ pval->intval = chip->last_soc;
+ else
+ rc = fg_get_prop_capacity(chip, &pval->intval);
break;
case POWER_SUPPLY_PROP_VOLTAGE_NOW:
rc = fg_get_battery_voltage(chip, &pval->intval);
@@ -1051,7 +1788,7 @@ static int fg_psy_get_property(struct power_supply *psy,
rc = fg_get_sram_prop(chip, FG_SRAM_OCV, &pval->intval);
break;
case POWER_SUPPLY_PROP_CHARGE_FULL_DESIGN:
- pval->intval = chip->nom_cap_uah;
+ pval->intval = chip->cl.nom_cap_uah;
break;
case POWER_SUPPLY_PROP_RESISTANCE_ID:
rc = fg_get_batt_id(chip, &pval->intval);
@@ -1067,6 +1804,18 @@ static int fg_psy_get_property(struct power_supply *psy,
case POWER_SUPPLY_PROP_CYCLE_COUNT_ID:
pval->intval = chip->cyc_ctr.id;
break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW_RAW:
+ rc = fg_get_cc_soc(chip, &pval->intval);
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_NOW:
+ pval->intval = chip->cl.init_cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_FULL:
+ pval->intval = chip->cl.learned_cc_uah;
+ break;
+ case POWER_SUPPLY_PROP_CHARGE_COUNTER:
+ rc = fg_get_cc_soc_sw(chip, &pval->intval);
+ break;
default:
break;
}
@@ -1125,8 +1874,14 @@ static int fg_notifier_cb(struct notifier_block *nb,
return NOTIFY_OK;
if ((strcmp(psy->desc->name, "battery") == 0)
- || (strcmp(psy->desc->name, "usb") == 0))
+ || (strcmp(psy->desc->name, "usb") == 0)) {
+ /*
+ * We cannot vote for awake votable here as that takes
+ * a mutex lock and this is executed in an atomic context.
+ */
+ pm_stay_awake(chip->dev);
schedule_work(&chip->status_change_work);
+ }
return NOTIFY_OK;
}
@@ -1144,6 +1899,10 @@ static enum power_supply_property fg_psy_props[] = {
POWER_SUPPLY_PROP_VOLTAGE_MAX_DESIGN,
POWER_SUPPLY_PROP_CYCLE_COUNT,
POWER_SUPPLY_PROP_CYCLE_COUNT_ID,
+ POWER_SUPPLY_PROP_CHARGE_NOW_RAW,
+ POWER_SUPPLY_PROP_CHARGE_NOW,
+ POWER_SUPPLY_PROP_CHARGE_FULL,
+ POWER_SUPPLY_PROP_CHARGE_COUNTER,
};
static const struct power_supply_desc fg_psy_desc = {
@@ -1182,6 +1941,32 @@ static int fg_hw_init(struct fg_chip *chip)
return rc;
}
+ /* This SRAM register is only present in v2.0 */
+ if (chip->pmic_rev_id->rev4 == PMICOBALT_V2P0_REV4 &&
+ chip->bp.float_volt_uv > 0) {
+ fg_encode(chip->sp, FG_SRAM_FLOAT_VOLT,
+ chip->bp.float_volt_uv / 1000, buf);
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_FLOAT_VOLT].addr_word,
+ chip->sp[FG_SRAM_FLOAT_VOLT].addr_byte, buf,
+ chip->sp[FG_SRAM_FLOAT_VOLT].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing float_volt, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
+ if (chip->bp.vbatt_full_mv > 0) {
+ fg_encode(chip->sp, FG_SRAM_VBATT_FULL, chip->bp.vbatt_full_mv,
+ buf);
+ rc = fg_sram_write(chip, chip->sp[FG_SRAM_VBATT_FULL].addr_word,
+ chip->sp[FG_SRAM_VBATT_FULL].addr_byte, buf,
+ chip->sp[FG_SRAM_VBATT_FULL].len, FG_IMA_DEFAULT);
+ if (rc < 0) {
+ pr_err("Error in writing vbatt_full, rc=%d\n", rc);
+ return rc;
+ }
+ }
+
fg_encode(chip->sp, FG_SRAM_CHG_TERM_CURR, chip->dt.chg_term_curr_ma,
buf);
rc = fg_sram_write(chip, chip->sp[FG_SRAM_CHG_TERM_CURR].addr_word,
@@ -1230,16 +2015,9 @@ static int fg_hw_init(struct fg_chip *chip)
}
if (chip->dt.recharge_soc_thr > 0 && chip->dt.recharge_soc_thr < 100) {
- fg_encode(chip->sp, FG_SRAM_RECHARGE_SOC_THR,
- chip->dt.recharge_soc_thr, buf);
- rc = fg_sram_write(chip,
- chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_word,
- chip->sp[FG_SRAM_RECHARGE_SOC_THR].addr_byte,
- buf, chip->sp[FG_SRAM_RECHARGE_SOC_THR].len,
- FG_IMA_DEFAULT);
+ rc = fg_set_recharge_soc(chip, chip->dt.recharge_soc_thr);
if (rc < 0) {
- pr_err("Error in writing recharge_soc_thr, rc=%d\n",
- rc);
+ pr_err("Error in setting recharge_soc, rc=%d\n", rc);
return rc;
}
}
@@ -1303,36 +2081,32 @@ static int fg_hw_init(struct fg_chip *chip)
if (chip->cyc_ctr.en)
restore_cycle_counter(chip);
- return 0;
-}
-
-static int fg_memif_init(struct fg_chip *chip)
-{
- return fg_ima_init(chip);
-}
-
-static int fg_batt_profile_init(struct fg_chip *chip)
-{
- int rc;
-
- if (!chip->batt_profile) {
- chip->batt_profile = devm_kcalloc(chip->dev, PROFILE_LEN,
- sizeof(*chip->batt_profile),
- GFP_KERNEL);
- if (!chip->batt_profile)
- return -ENOMEM;
+ if (chip->dt.jeita_hyst_temp >= 0) {
+ val = chip->dt.jeita_hyst_temp << JEITA_TEMP_HYST_SHIFT;
+ rc = fg_masked_write(chip, BATT_INFO_BATT_TEMP_CFG(chip),
+ JEITA_TEMP_HYST_MASK, val);
+ if (rc < 0) {
+ pr_err("Error in writing batt_temp_cfg, rc=%d\n", rc);
+ return rc;
+ }
}
- rc = fg_get_batt_profile(chip);
+ get_batt_temp_delta(chip->dt.batt_temp_delta, &val);
+ rc = fg_masked_write(chip, BATT_INFO_BATT_TMPR_INTR(chip),
+ CHANGE_THOLD_MASK, val);
if (rc < 0) {
- pr_err("Error in getting battery profile, rc:%d\n", rc);
+ pr_err("Error in writing batt_temp_delta, rc=%d\n", rc);
return rc;
}
- schedule_delayed_work(&chip->profile_load_work, msecs_to_jiffies(0));
return 0;
}
+static int fg_memif_init(struct fg_chip *chip)
+{
+ return fg_ima_init(chip);
+}
+
/* INTERRUPT HANDLERS STAY HERE */
static irqreturn_t fg_vbatt_low_irq_handler(int irq, void *data)
@@ -1360,16 +2134,16 @@ static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data)
chip->battery_missing = (status & BT_MISS_BIT);
if (chip->battery_missing) {
- chip->batt_id_avail = false;
+ chip->profile_available = false;
chip->profile_loaded = false;
clear_cycle_counter(chip);
} else {
- rc = fg_batt_profile_init(chip);
+ rc = fg_get_batt_profile(chip);
if (rc < 0) {
- pr_err("Error in initializing battery profile, rc=%d\n",
- rc);
+ pr_err("Error in getting battery profile, rc:%d\n", rc);
return IRQ_HANDLED;
}
+ schedule_delayed_work(&chip->profile_load_work, 0);
}
return IRQ_HANDLED;
@@ -1378,8 +2152,33 @@ static irqreturn_t fg_batt_missing_irq_handler(int irq, void *data)
static irqreturn_t fg_delta_batt_temp_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
+ union power_supply_propval prop = {0, };
+ int rc, batt_temp;
fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+ rc = fg_get_battery_temp(chip, &batt_temp);
+ if (rc < 0) {
+ pr_err("Error in getting batt_temp\n");
+ return IRQ_HANDLED;
+ }
+
+ if (!is_charger_available(chip)) {
+ chip->last_batt_temp = batt_temp;
+ return IRQ_HANDLED;
+ }
+
+ power_supply_get_property(chip->batt_psy, POWER_SUPPLY_PROP_HEALTH,
+ &prop);
+ chip->health = prop.intval;
+
+ if (chip->last_batt_temp != batt_temp) {
+ chip->last_batt_temp = batt_temp;
+ power_supply_changed(chip->batt_psy);
+ }
+
+ if (abs(chip->last_batt_temp - batt_temp) > 30)
+ pr_warn("Battery temperature last:%d current: %d\n",
+ chip->last_batt_temp, batt_temp);
return IRQ_HANDLED;
}
@@ -1404,6 +2203,7 @@ static irqreturn_t fg_soc_update_irq_handler(int irq, void *data)
static irqreturn_t fg_delta_soc_irq_handler(int irq, void *data)
{
struct fg_chip *chip = data;
+ int rc;
if (chip->cyc_ctr.en)
schedule_work(&chip->cycle_count_work);
@@ -1412,6 +2212,18 @@ static irqreturn_t fg_delta_soc_irq_handler(int irq, void *data)
power_supply_changed(chip->batt_psy);
fg_dbg(chip, FG_IRQ, "irq %d triggered\n", irq);
+
+ if (chip->cl.active)
+ fg_cap_learning_update(chip);
+
+ rc = fg_charge_full_update(chip);
+ if (rc < 0)
+ pr_err("Error in charge_full_update, rc=%d\n", rc);
+
+ rc = fg_adjust_ki_coeff_dischg(chip);
+ if (rc < 0)
+ pr_err("Error in adjusting ki_coeff_dischg, rc=%d\n", rc);
+
return IRQ_HANDLED;
}
@@ -1445,39 +2257,79 @@ static irqreturn_t fg_dummy_irq_handler(int irq, void *data)
static struct fg_irq_info fg_irqs[FG_IRQ_MAX] = {
/* BATT_SOC irqs */
[MSOC_FULL_IRQ] = {
- "msoc-full", fg_soc_irq_handler, true },
+ .name = "msoc-full",
+ .handler = fg_soc_irq_handler,
+ },
[MSOC_HIGH_IRQ] = {
- "msoc-high", fg_soc_irq_handler, true },
+ .name = "msoc-high",
+ .handler = fg_soc_irq_handler,
+ .wakeable = true,
+ },
[MSOC_EMPTY_IRQ] = {
- "msoc-empty", fg_empty_soc_irq_handler, true },
+ .name = "msoc-empty",
+ .handler = fg_empty_soc_irq_handler,
+ .wakeable = true,
+ },
[MSOC_LOW_IRQ] = {
- "msoc-low", fg_soc_irq_handler },
+ .name = "msoc-low",
+ .handler = fg_soc_irq_handler,
+ .wakeable = true,
+ },
[MSOC_DELTA_IRQ] = {
- "msoc-delta", fg_delta_soc_irq_handler, true },
+ .name = "msoc-delta",
+ .handler = fg_delta_soc_irq_handler,
+ .wakeable = true,
+ },
[BSOC_DELTA_IRQ] = {
- "bsoc-delta", fg_delta_soc_irq_handler, true },
+ .name = "bsoc-delta",
+ .handler = fg_dummy_irq_handler,
+ },
[SOC_READY_IRQ] = {
- "soc-ready", fg_first_est_irq_handler, true },
+ .name = "soc-ready",
+ .handler = fg_first_est_irq_handler,
+ .wakeable = true,
+ },
[SOC_UPDATE_IRQ] = {
- "soc-update", fg_soc_update_irq_handler },
+ .name = "soc-update",
+ .handler = fg_soc_update_irq_handler,
+ },
/* BATT_INFO irqs */
[BATT_TEMP_DELTA_IRQ] = {
- "batt-temp-delta", fg_delta_batt_temp_irq_handler },
+ .name = "batt-temp-delta",
+ .handler = fg_delta_batt_temp_irq_handler,
+ .wakeable = true,
+ },
[BATT_MISSING_IRQ] = {
- "batt-missing", fg_batt_missing_irq_handler, true },
+ .name = "batt-missing",
+ .handler = fg_batt_missing_irq_handler,
+ .wakeable = true,
+ },
[ESR_DELTA_IRQ] = {
- "esr-delta", fg_dummy_irq_handler },
+ .name = "esr-delta",
+ .handler = fg_dummy_irq_handler,
+ },
[VBATT_LOW_IRQ] = {
- "vbatt-low", fg_vbatt_low_irq_handler, true },
+ .name = "vbatt-low",
+ .handler = fg_vbatt_low_irq_handler,
+ .wakeable = true,
+ },
[VBATT_PRED_DELTA_IRQ] = {
- "vbatt-pred-delta", fg_dummy_irq_handler },
+ .name = "vbatt-pred-delta",
+ .handler = fg_dummy_irq_handler,
+ },
/* MEM_IF irqs */
[DMA_GRANT_IRQ] = {
- "dma-grant", fg_dummy_irq_handler },
+ .name = "dma-grant",
+ .handler = fg_dummy_irq_handler,
+ },
[MEM_XCP_IRQ] = {
- "mem-xcp", fg_dummy_irq_handler },
+ .name = "mem-xcp",
+ .handler = fg_dummy_irq_handler,
+ },
[IMA_RDY_IRQ] = {
- "ima-rdy", fg_dummy_irq_handler },
+ .name = "ima-rdy",
+ .handler = fg_dummy_irq_handler,
+ },
};
static int fg_get_irq_index_byname(const char *name)
@@ -1532,6 +2384,73 @@ static int fg_register_interrupts(struct fg_chip *chip)
return 0;
}
+static int fg_parse_ki_coefficients(struct fg_chip *chip)
+{
+ struct device_node *node = chip->dev->of_node;
+ int rc, i;
+
+ rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-soc-dischg",
+ sizeof(u32));
+ if (rc != KI_COEFF_SOC_LEVELS)
+ return 0;
+
+ rc = of_property_read_u32_array(node, "qcom,ki-coeff-soc-dischg",
+ chip->dt.ki_coeff_soc, KI_COEFF_SOC_LEVELS);
+ if (rc < 0) {
+ pr_err("Error in reading ki-coeff-soc-dischg, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-med-dischg",
+ sizeof(u32));
+ if (rc != KI_COEFF_SOC_LEVELS)
+ return 0;
+
+ rc = of_property_read_u32_array(node, "qcom,ki-coeff-med-dischg",
+ chip->dt.ki_coeff_med_dischg, KI_COEFF_SOC_LEVELS);
+ if (rc < 0) {
+ pr_err("Error in reading ki-coeff-med-dischg, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = of_property_count_elems_of_size(node, "qcom,ki-coeff-hi-dischg",
+ sizeof(u32));
+ if (rc != KI_COEFF_SOC_LEVELS)
+ return 0;
+
+ rc = of_property_read_u32_array(node, "qcom,ki-coeff-hi-dischg",
+ chip->dt.ki_coeff_hi_dischg, KI_COEFF_SOC_LEVELS);
+ if (rc < 0) {
+ pr_err("Error in reading ki-coeff-hi-dischg, rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ for (i = 0; i < KI_COEFF_SOC_LEVELS; i++) {
+ if (chip->dt.ki_coeff_soc[i] < 0 ||
+ chip->dt.ki_coeff_soc[i] > FULL_CAPACITY) {
+ pr_err("Error in ki_coeff_soc_dischg values\n");
+ return -EINVAL;
+ }
+
+ if (chip->dt.ki_coeff_med_dischg[i] < 0 ||
+ chip->dt.ki_coeff_med_dischg[i] > KI_COEFF_MAX) {
+ pr_err("Error in ki_coeff_med_dischg values\n");
+ return -EINVAL;
+ }
+
+ if (chip->dt.ki_coeff_med_dischg[i] < 0 ||
+ chip->dt.ki_coeff_med_dischg[i] > KI_COEFF_MAX) {
+ pr_err("Error in ki_coeff_med_dischg values\n");
+ return -EINVAL;
+ }
+ }
+ chip->ki_coeff_dischg_en = true;
+ return 0;
+}
+
#define DEFAULT_CUTOFF_VOLT_MV 3200
#define DEFAULT_EMPTY_VOLT_MV 3100
#define DEFAULT_CHG_TERM_CURR_MA 100
@@ -1542,12 +2461,21 @@ static int fg_register_interrupts(struct fg_chip *chip)
#define DEFAULT_BATT_TEMP_COOL 5
#define DEFAULT_BATT_TEMP_WARM 45
#define DEFAULT_BATT_TEMP_HOT 50
+#define DEFAULT_CL_START_SOC 15
+#define DEFAULT_CL_MIN_TEMP_DECIDEGC 150
+#define DEFAULT_CL_MAX_TEMP_DECIDEGC 450
+#define DEFAULT_CL_MAX_INC_DECIPERC 5
+#define DEFAULT_CL_MAX_DEC_DECIPERC 100
+#define DEFAULT_CL_MIN_LIM_DECIPERC 0
+#define DEFAULT_CL_MAX_LIM_DECIPERC 0
+#define BTEMP_DELTA_LOW 2
+#define BTEMP_DELTA_HIGH 10
static int fg_parse_dt(struct fg_chip *chip)
{
struct device_node *child, *revid_node, *node = chip->dev->of_node;
u32 base, temp;
u8 subtype;
- int rc, len;
+ int rc;
if (!node) {
dev_err(chip->dev, "device tree node missing\n");
@@ -1638,6 +2566,11 @@ static int fg_parse_dt(struct fg_chip *chip)
}
}
+ rc = fg_get_batt_profile(chip);
+ if (rc < 0)
+ pr_warn("profile for batt_id=%dKOhms not found..using OTP, rc:%d\n",
+ chip->batt_id, rc);
+
/* Read all the optional properties below */
rc = of_property_read_u32(node, "qcom,fg-cutoff-voltage", &temp);
if (rc < 0)
@@ -1691,15 +2624,14 @@ static int fg_parse_dt(struct fg_chip *chip)
chip->dt.jeita_thresholds[JEITA_COOL] = DEFAULT_BATT_TEMP_COOL;
chip->dt.jeita_thresholds[JEITA_WARM] = DEFAULT_BATT_TEMP_WARM;
chip->dt.jeita_thresholds[JEITA_HOT] = DEFAULT_BATT_TEMP_HOT;
- if (of_find_property(node, "qcom,fg-jeita-thresholds", &len)) {
- if (len == NUM_JEITA_LEVELS) {
- rc = of_property_read_u32_array(node,
- "qcom,fg-jeita-thresholds",
- chip->dt.jeita_thresholds, len);
- if (rc < 0)
- pr_warn("Error reading Jeita thresholds, default values will be used rc:%d\n",
- rc);
- }
+ if (of_property_count_elems_of_size(node, "qcom,fg-jeita-thresholds",
+ sizeof(u32)) == NUM_JEITA_LEVELS) {
+ rc = of_property_read_u32_array(node,
+ "qcom,fg-jeita-thresholds",
+ chip->dt.jeita_thresholds, NUM_JEITA_LEVELS);
+ if (rc < 0)
+ pr_warn("Error reading Jeita thresholds, default values will be used rc:%d\n",
+ rc);
}
rc = of_property_read_u32(node, "qcom,fg-esr-timer-charging", &temp);
@@ -1724,6 +2656,70 @@ static int fg_parse_dt(struct fg_chip *chip)
if (chip->cyc_ctr.en)
chip->cyc_ctr.id = 1;
+ chip->dt.force_load_profile = of_property_read_bool(node,
+ "qcom,fg-force-load-profile");
+
+ rc = of_property_read_u32(node, "qcom,cl-start-capacity", &temp);
+ if (rc < 0)
+ chip->dt.cl_start_soc = DEFAULT_CL_START_SOC;
+ else
+ chip->dt.cl_start_soc = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-min-temp", &temp);
+ if (rc < 0)
+ chip->dt.cl_min_temp = DEFAULT_CL_MIN_TEMP_DECIDEGC;
+ else
+ chip->dt.cl_min_temp = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-temp", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_temp = DEFAULT_CL_MAX_TEMP_DECIDEGC;
+ else
+ chip->dt.cl_max_temp = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-increment", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_cap_inc = DEFAULT_CL_MAX_INC_DECIPERC;
+ else
+ chip->dt.cl_max_cap_inc = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-decrement", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_cap_dec = DEFAULT_CL_MAX_DEC_DECIPERC;
+ else
+ chip->dt.cl_max_cap_dec = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-min-limit", &temp);
+ if (rc < 0)
+ chip->dt.cl_min_cap_limit = DEFAULT_CL_MIN_LIM_DECIPERC;
+ else
+ chip->dt.cl_min_cap_limit = temp;
+
+ rc = of_property_read_u32(node, "qcom,cl-max-limit", &temp);
+ if (rc < 0)
+ chip->dt.cl_max_cap_limit = DEFAULT_CL_MAX_LIM_DECIPERC;
+ else
+ chip->dt.cl_max_cap_limit = temp;
+
+ rc = of_property_read_u32(node, "qcom,fg-jeita-hyst-temp", &temp);
+ if (rc < 0)
+ chip->dt.jeita_hyst_temp = -EINVAL;
+ else
+ chip->dt.jeita_hyst_temp = temp;
+
+ rc = of_property_read_u32(node, "qcom,fg-batt-temp-delta", &temp);
+ if (rc < 0)
+ chip->dt.batt_temp_delta = -EINVAL;
+ else if (temp > BTEMP_DELTA_LOW && temp <= BTEMP_DELTA_HIGH)
+ chip->dt.batt_temp_delta = temp;
+
+ chip->dt.hold_soc_while_full = of_property_read_bool(node,
+ "qcom,hold-soc-while-full");
+
+ rc = fg_parse_ki_coefficients(chip);
+ if (rc < 0)
+ pr_err("Error in parsing Ki coefficients, rc=%d\n", rc);
+
return 0;
}
@@ -1776,6 +2772,7 @@ static int fg_gen3_probe(struct platform_device *pdev)
mutex_init(&chip->bus_lock);
mutex_init(&chip->sram_rw_lock);
mutex_init(&chip->cyc_ctr.lock);
+ mutex_init(&chip->cl.lock);
init_completion(&chip->soc_update);
init_completion(&chip->soc_ready);
INIT_DELAYED_WORK(&chip->profile_load_work, profile_load_work);
@@ -1836,10 +2833,8 @@ static int fg_gen3_probe(struct platform_device *pdev)
goto exit;
}
- rc = fg_batt_profile_init(chip);
- if (rc < 0)
- dev_warn(chip->dev, "Error in initializing battery profile, rc:%d\n",
- rc);
+ if (chip->profile_available)
+ schedule_delayed_work(&chip->profile_load_work, 0);
device_init_wakeup(chip->dev, true);
pr_debug("FG GEN3 driver successfully probed\n");
diff --git a/drivers/power/qcom-charger/qpnp-smb2.c b/drivers/power/qcom-charger/qpnp-smb2.c
index 1b63f51088ee..8aaeb095db3c 100644
--- a/drivers/power/qcom-charger/qpnp-smb2.c
+++ b/drivers/power/qcom-charger/qpnp-smb2.c
@@ -58,6 +58,13 @@ static struct smb_params v1_params = {
.max_u = 4800000,
.step_u = 25000,
},
+ .otg_cl = {
+ .name = "usb otg current limit",
+ .reg = OTG_CURRENT_LIMIT_CFG_REG,
+ .min_u = 250000,
+ .max_u = 2000000,
+ .step_u = 250000,
+ },
.dc_icl = {
.name = "dc input current limit",
.reg = DCIN_CURRENT_LIMIT_CFG_REG,
@@ -202,6 +209,7 @@ struct smb_dt_props {
bool no_battery;
int fcc_ua;
int usb_icl_ua;
+ int otg_cl_ua;
int dc_icl_ua;
int fv_uv;
int wipower_max_uw;
@@ -226,6 +234,7 @@ module_param_named(
pl_master_percent, __pl_master_percent, int, S_IRUSR | S_IWUSR
);
+#define MICRO_1P5A 1500000
static int smb2_parse_dt(struct smb2 *chip)
{
struct smb_charger *chg = &chip->chg;
@@ -278,6 +287,11 @@ static int smb2_parse_dt(struct smb2 *chip)
chip->dt.usb_icl_ua = -EINVAL;
rc = of_property_read_u32(node,
+ "qcom,otg-cl-ua", &chip->dt.otg_cl_ua);
+ if (rc < 0)
+ chip->dt.otg_cl_ua = MICRO_1P5A;
+
+ rc = of_property_read_u32(node,
"qcom,dc-icl-ua", &chip->dt.dc_icl_ua);
if (rc < 0)
chip->dt.dc_icl_ua = -EINVAL;
@@ -328,6 +342,7 @@ static enum power_supply_property smb2_usb_props[] = {
POWER_SUPPLY_PROP_PD_ACTIVE,
POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED,
POWER_SUPPLY_PROP_INPUT_CURRENT_NOW,
+ POWER_SUPPLY_PROP_PARALLEL_DISABLE,
};
static int smb2_usb_get_prop(struct power_supply *psy,
@@ -390,6 +405,10 @@ static int smb2_usb_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_INPUT_CURRENT_NOW:
rc = smblib_get_prop_usb_current_now(chg, val);
break;
+ case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+ val->intval = get_client_vote(chg->pl_disable_votable,
+ USER_VOTER);
+ break;
default:
pr_err("get prop %d is not supported\n", psp);
rc = -EINVAL;
@@ -434,6 +453,9 @@ static int smb2_usb_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_PD_ACTIVE:
rc = smblib_set_prop_pd_active(chg, val);
break;
+ case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+ vote(chg->pl_disable_votable, USER_VOTER, (bool)val->intval, 0);
+ break;
default:
pr_err("set prop %d is not supported\n", psp);
rc = -EINVAL;
@@ -448,6 +470,7 @@ static int smb2_usb_prop_is_writeable(struct power_supply *psy,
{
switch (psp) {
case POWER_SUPPLY_PROP_TYPEC_POWER_ROLE:
+ case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
return 1;
default:
break;
@@ -664,7 +687,7 @@ static int smb2_batt_get_prop(struct power_supply *psy,
val->intval = POWER_SUPPLY_TECHNOLOGY_LION;
break;
case POWER_SUPPLY_PROP_CHARGE_DONE:
- val->intval = chg->chg_done;
+ rc = smblib_get_prop_batt_charge_done(chg, val);
break;
default:
pr_err("batt power supply prop %d not supported\n", psp);
@@ -696,9 +719,6 @@ static int smb2_batt_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CAPACITY:
rc = smblib_set_prop_batt_capacity(chg, val);
break;
- case POWER_SUPPLY_PROP_CHARGE_DONE:
- chg->chg_done = val->intval;
- break;
default:
rc = -EINVAL;
}
@@ -981,6 +1001,8 @@ static int smb2_init_hw(struct smb2 *chip)
smblib_get_charge_param(chg, &chg->param.dc_icl,
&chip->dt.dc_icl_ua);
+ chg->otg_cl_ua = chip->dt.otg_cl_ua;
+
/* votes must be cast before configuring software control */
vote(chg->pl_disable_votable,
PL_INDIRECT_VOTER, true, 0);
diff --git a/drivers/power/qcom-charger/smb-lib.c b/drivers/power/qcom-charger/smb-lib.c
index e9c189ae17e7..ce76260be6f6 100644
--- a/drivers/power/qcom-charger/smb-lib.c
+++ b/drivers/power/qcom-charger/smb-lib.c
@@ -12,6 +12,7 @@
#include <linux/device.h>
#include <linux/regmap.h>
+#include <linux/delay.h>
#include <linux/iio/consumer.h>
#include <linux/power_supply.h>
#include <linux/regulator/driver.h>
@@ -642,6 +643,31 @@ suspend:
return rc;
}
+#define MICRO_250MA 250000
+static int smblib_otg_cl_config(struct smb_charger *chg, int otg_cl_ua)
+{
+ int rc = 0;
+
+ rc = smblib_set_charge_param(chg, &chg->param.otg_cl, otg_cl_ua);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't set otg current limit rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* configure PFM/PWM mode for OTG regulator */
+ rc = smblib_masked_write(chg, DC_ENG_SSUPPLY_CFG3_REG,
+ ENG_SSUPPLY_CFG_SKIP_TH_V0P2_BIT,
+ otg_cl_ua > MICRO_250MA ? 1 : 0);
+ if (rc < 0) {
+ dev_err(chg->dev,
+ "Couldn't write DC_ENG_SSUPPLY_CFG3_REG rc=%d\n", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
static int smblib_dc_icl_vote_callback(struct votable *votable, void *data,
int icl_ua, const char *client)
{
@@ -746,14 +772,36 @@ static int smblib_pl_enable_indirect_vote_callback(struct votable *votable,
* OTG REGULATOR *
*****************/
+#define OTG_SOFT_START_DELAY_MS 20
int smblib_vbus_regulator_enable(struct regulator_dev *rdev)
{
struct smb_charger *chg = rdev_get_drvdata(rdev);
+ u8 stat;
int rc = 0;
- rc = regmap_write(chg->regmap, CMD_OTG_REG, OTG_EN_BIT);
- if (rc < 0)
+ rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ rc = smblib_write(chg, CMD_OTG_REG, OTG_EN_BIT);
+ if (rc < 0) {
dev_err(chg->dev, "Couldn't enable OTG regulator rc=%d\n", rc);
+ return rc;
+ }
+
+ msleep(OTG_SOFT_START_DELAY_MS);
+ rc = smblib_read(chg, OTG_STATUS_REG, &stat);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read OTG_STATUS_REG rc=%d\n", rc);
+ return rc;
+ }
+ if (stat & BOOST_SOFTSTART_DONE_BIT)
+ smblib_otg_cl_config(chg, chg->otg_cl_ua);
return rc;
}
@@ -763,9 +811,22 @@ int smblib_vbus_regulator_disable(struct regulator_dev *rdev)
struct smb_charger *chg = rdev_get_drvdata(rdev);
int rc = 0;
- rc = regmap_write(chg->regmap, CMD_OTG_REG, 0);
- if (rc < 0)
+ rc = smblib_write(chg, CMD_OTG_REG, 0);
+ if (rc < 0) {
dev_err(chg->dev, "Couldn't disable OTG regulator rc=%d\n", rc);
+ return rc;
+ }
+
+ smblib_otg_cl_config(chg, MICRO_250MA);
+
+ rc = smblib_masked_write(chg, OTG_ENG_OTG_CFG_REG,
+ ENG_BUCKBOOST_HALT1_8_MODE_BIT, 0);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't set OTG_ENG_OTG_CFG_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+
return rc;
}
@@ -1104,6 +1165,24 @@ int smblib_get_prop_step_chg_step(struct smb_charger *chg,
return rc;
}
+int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
+ union power_supply_propval *val)
+{
+ int rc;
+ u8 stat;
+
+ rc = smblib_read(chg, BATTERY_CHARGER_STATUS_1_REG, &stat);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't read BATTERY_CHARGER_STATUS_1 rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ stat = stat & BATTERY_CHARGER_STATUS_MASK;
+ val->intval = (stat == TERMINATE_CHARGE);
+ return 0;
+}
+
/***********************
* BATTERY PSY SETTERS *
***********************/
@@ -1688,6 +1767,22 @@ int smblib_set_prop_pd_active(struct smb_charger *chg,
return rc;
}
+/************************
+ * PARALLEL PSY GETTERS *
+ ************************/
+
+int smblib_get_prop_slave_current_now(struct smb_charger *chg,
+ union power_supply_propval *pval)
+{
+ if (IS_ERR_OR_NULL(chg->iio.batt_i_chan))
+ chg->iio.batt_i_chan = iio_channel_get(chg->dev, "batt_i");
+
+ if (IS_ERR(chg->iio.batt_i_chan))
+ return PTR_ERR(chg->iio.batt_i_chan);
+
+ return iio_read_channel_processed(chg->iio.batt_i_chan, &pval->intval);
+}
+
/**********************
* INTERRUPT HANDLERS *
**********************/
@@ -1732,7 +1827,6 @@ static void smblib_pl_handle_chg_state_change(struct smb_charger *chg, u8 stat)
irqreturn_t smblib_handle_chg_state_change(int irq, void *data)
{
- union power_supply_propval pval = {0, };
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
u8 stat;
@@ -1749,9 +1843,6 @@ irqreturn_t smblib_handle_chg_state_change(int irq, void *data)
stat = stat & BATTERY_CHARGER_STATUS_MASK;
smblib_pl_handle_chg_state_change(chg, stat);
- pval.intval = (stat == TERMINATE_CHARGE);
- power_supply_set_property(chg->batt_psy, POWER_SUPPLY_PROP_CHARGE_DONE,
- &pval);
power_supply_changed(chg->batt_psy);
return IRQ_HANDLED;
}
@@ -2379,6 +2470,8 @@ static void smblib_iio_deinit(struct smb_charger *chg)
iio_channel_release(chg->iio.usbin_i_chan);
if (!IS_ERR_OR_NULL(chg->iio.usbin_v_chan))
iio_channel_release(chg->iio.usbin_v_chan);
+ if (!IS_ERR_OR_NULL(chg->iio.batt_i_chan))
+ iio_channel_release(chg->iio.batt_i_chan);
}
int smblib_init(struct smb_charger *chg)
diff --git a/drivers/power/qcom-charger/smb-lib.h b/drivers/power/qcom-charger/smb-lib.h
index c9732c25dfcd..00975e6c1285 100644
--- a/drivers/power/qcom-charger/smb-lib.h
+++ b/drivers/power/qcom-charger/smb-lib.h
@@ -77,6 +77,7 @@ struct smb_params {
struct smb_chg_param fv;
struct smb_chg_param usb_icl;
struct smb_chg_param icl_stat;
+ struct smb_chg_param otg_cl;
struct smb_chg_param dc_icl;
struct smb_chg_param dc_icl_pt_lv;
struct smb_chg_param dc_icl_pt_hv;
@@ -102,6 +103,7 @@ struct smb_iio {
struct iio_channel *temp_max_chan;
struct iio_channel *usbin_i_chan;
struct iio_channel *usbin_v_chan;
+ struct iio_channel *batt_i_chan;
};
struct smb_charger {
@@ -167,6 +169,8 @@ struct smb_charger {
int thermal_levels;
int *thermal_mitigation;
+ int otg_cl_ua;
+
int fake_capacity;
bool step_chg_enabled;
@@ -230,6 +234,8 @@ int smblib_get_prop_batt_status(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_batt_charge_type(struct smb_charger *chg,
union power_supply_propval *val);
+int smblib_get_prop_batt_charge_done(struct smb_charger *chg,
+ union power_supply_propval *val);
int smblib_get_prop_batt_health(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_get_prop_system_temp_level(struct smb_charger *chg,
@@ -298,6 +304,9 @@ int smblib_set_prop_typec_power_role(struct smb_charger *chg,
int smblib_set_prop_pd_active(struct smb_charger *chg,
const union power_supply_propval *val);
+int smblib_get_prop_slave_current_now(struct smb_charger *chg,
+ union power_supply_propval *val);
+
int smblib_init(struct smb_charger *chg);
int smblib_deinit(struct smb_charger *chg);
#endif /* __SMB2_CHARGER_H */
diff --git a/drivers/power/qcom-charger/smb-reg.h b/drivers/power/qcom-charger/smb-reg.h
index c4ad72e254f9..8a49a8fb38ba 100644
--- a/drivers/power/qcom-charger/smb-reg.h
+++ b/drivers/power/qcom-charger/smb-reg.h
@@ -366,6 +366,9 @@ enum {
#define OTG_EN_SRC_CFG_BIT BIT(1)
#define CONCURRENT_MODE_CFG_BIT BIT(0)
+#define OTG_ENG_OTG_CFG_REG (OTG_BASE + 0xC0)
+#define ENG_BUCKBOOST_HALT1_8_MODE_BIT BIT(0)
+
/* BATIF Peripheral Registers */
/* BATIF Interrupt Bits */
#define BAT_7_RT_STS_BIT BIT(7)
@@ -766,6 +769,13 @@ enum {
ZIN_ICL_HV_MAX_MV = 11000,
};
+#define DC_ENG_SSUPPLY_CFG3_REG (DCIN_BASE + 0xC2)
+#define ENG_SSUPPLY_HI_CAP_BIT BIT(6)
+#define ENG_SSUPPLY_HI_RES_BIT BIT(5)
+#define ENG_SSUPPLY_CFG_SKIP_TH_V0P2_BIT BIT(3)
+#define ENG_SSUPPLY_CFG_SYSOV_TH_4P8_BIT BIT(2)
+#define ENG_SSUPPLY_5V_OV_OPT_BIT BIT(0)
+
/* MISC Peripheral Registers */
#define REVISION1_REG (MISC_BASE + 0x00)
#define DIG_MINOR_MASK GENMASK(7, 0)
diff --git a/drivers/power/qcom-charger/smb138x-charger.c b/drivers/power/qcom-charger/smb138x-charger.c
index 33d759be9aeb..e8ec2f49f7eb 100644
--- a/drivers/power/qcom-charger/smb138x-charger.c
+++ b/drivers/power/qcom-charger/smb138x-charger.c
@@ -48,8 +48,8 @@ static struct smb_params v1_params = {
.name = "fast charge current",
.reg = FAST_CHARGE_CURRENT_CFG_REG,
.min_u = 0,
- .max_u = 5000000,
- .step_u = 50000,
+ .max_u = 4500000,
+ .step_u = 25000,
},
.fv = {
.name = "float voltage",
@@ -395,6 +395,7 @@ static enum power_supply_property smb138x_parallel_props[] = {
POWER_SUPPLY_PROP_INPUT_SUSPEND,
POWER_SUPPLY_PROP_VOLTAGE_MAX,
POWER_SUPPLY_PROP_CURRENT_MAX,
+ POWER_SUPPLY_PROP_CURRENT_NOW,
POWER_SUPPLY_PROP_CHARGER_TEMP,
POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
};
@@ -431,6 +432,9 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
rc = smblib_get_charge_param(chg, &chg->param.fcc,
&val->intval);
break;
+ case POWER_SUPPLY_PROP_CURRENT_NOW:
+ rc = smblib_get_prop_slave_current_now(chg, val);
+ break;
case POWER_SUPPLY_PROP_CHARGER_TEMP:
rc = smblib_get_prop_charger_temp(chg, val);
break;
@@ -1125,6 +1129,15 @@ static int smb138x_slave_probe(struct smb138x *chip)
return rc;
}
+ /* enable parallel current sensing */
+ rc = smblib_masked_write(chg, CFG_REG,
+ VCHG_EN_CFG_BIT, VCHG_EN_CFG_BIT);
+ if (rc < 0) {
+ dev_err(chg->dev, "Couldn't enable parallel current sensing rc=%d\n",
+ rc);
+ return rc;
+ }
+
/* keep at the end of probe, ready to serve before notifying others */
rc = smb138x_init_parallel_psy(chip);
if (rc < 0) {
diff --git a/drivers/pwm/pwm-qpnp.c b/drivers/pwm/pwm-qpnp.c
index ac71f2c75472..6d0c1fbe566b 100644
--- a/drivers/pwm/pwm-qpnp.c
+++ b/drivers/pwm/pwm-qpnp.c
@@ -1879,7 +1879,7 @@ static int qpnp_parse_dt_config(struct platform_device *pdev,
int rc, enable, lut_entry_size, list_size, i;
const char *lable;
const __be32 *prop;
- u64 size;
+ u32 size;
struct device_node *node;
int found_pwm_subnode = 0;
int found_lpg_subnode = 0;
@@ -1968,11 +1968,18 @@ static int qpnp_parse_dt_config(struct platform_device *pdev,
return rc;
prop = of_get_address_by_name(pdev->dev.of_node, QPNP_LPG_LUT_BASE,
- &size, 0);
+ 0, 0);
if (!prop) {
chip->flags |= QPNP_PWM_LUT_NOT_SUPPORTED;
} else {
lpg_config->lut_base_addr = be32_to_cpu(*prop);
+ rc = of_property_read_u32(of_node, "qcom,lpg-lut-size", &size);
+ if (rc < 0) {
+ dev_err(&pdev->dev, "Error reading qcom,lpg-lut-size, rc=%d\n",
+ rc);
+ return rc;
+ }
+
/*
* Each entry of LUT is of 2 bytes for generic LUT and of 1 byte
* for KPDBL/GLED LUT.
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index ad4b6ffef36e..1a1dd804ffb3 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -1498,10 +1498,14 @@ static int ufs_qcom_setup_clocks(struct ufs_hba *hba, bool on,
/* M-PHY RMMI interface clocks can be turned off */
ufs_qcom_phy_disable_iface_clk(host->generic_phy);
- if (!ufs_qcom_is_link_active(hba)) {
- if (!is_gating_context)
- /* turn off UFS local PHY ref_clk */
- ufs_qcom_phy_disable_ref_clk(host->generic_phy);
+ /*
+ * If auto hibern8 is supported then the link will already
+ * be in hibern8 state and the ref clock can be gated.
+ */
+ if (ufshcd_is_auto_hibern8_supported(hba) ||
+ !ufs_qcom_is_link_active(hba)) {
+ /* turn off UFS local PHY ref_clk */
+ ufs_qcom_phy_disable_ref_clk(host->generic_phy);
/* disable device ref_clk */
ufs_qcom_dev_ref_clk_ctrl(host, false);
}
@@ -1956,13 +1960,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
host->hba = hba;
ufshcd_set_variant(hba, host);
- /*
- * voting/devoting device ref_clk source is time consuming hence
- * skip devoting it during aggressive clock gating. This clock
- * will still be gated off during runtime suspend.
- */
- hba->no_ref_clk_gating = true;
-
err = ufs_qcom_ice_get_dev(host);
if (err == -EPROBE_DEFER) {
/*
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index d478767ad3dd..53fed74eaefc 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -1176,6 +1176,12 @@ static int ufshcd_scale_clks(struct ufs_hba *hba, bool scale_up)
return ret;
}
+static inline void ufshcd_cancel_gate_work(struct ufs_hba *hba)
+{
+ hrtimer_cancel(&hba->clk_gating.gate_hrtimer);
+ cancel_work_sync(&hba->clk_gating.gate_work);
+}
+
static void ufshcd_ungate_work(struct work_struct *work)
{
int ret;
@@ -1183,7 +1189,7 @@ static void ufshcd_ungate_work(struct work_struct *work)
struct ufs_hba *hba = container_of(work, struct ufs_hba,
clk_gating.ungate_work);
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
+ ufshcd_cancel_gate_work(hba);
spin_lock_irqsave(hba->host->host_lock, flags);
if (hba->clk_gating.state == CLKS_ON) {
@@ -1254,14 +1260,18 @@ start:
}
break;
case REQ_CLKS_OFF:
- if (cancel_delayed_work(&hba->clk_gating.gate_work)) {
+ /*
+ * If the timer was active but the callback was not running
+ * we have nothing to do, just change state and return.
+ */
+ if (hrtimer_try_to_cancel(&hba->clk_gating.gate_hrtimer) == 1) {
hba->clk_gating.state = CLKS_ON;
trace_ufshcd_clk_gating(dev_name(hba->dev),
hba->clk_gating.state);
break;
}
/*
- * If we here, it means gating work is either done or
+ * If we are here, it means gating work is either done or
* currently running. Hence, fall through to cancel gating
* work and to enable clocks.
*/
@@ -1301,7 +1311,7 @@ EXPORT_SYMBOL_GPL(ufshcd_hold);
static void ufshcd_gate_work(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, struct ufs_hba,
- clk_gating.gate_work.work);
+ clk_gating.gate_work);
unsigned long flags;
spin_lock_irqsave(hba->host->host_lock, flags);
@@ -1346,7 +1356,12 @@ static void ufshcd_gate_work(struct work_struct *work)
ufshcd_set_link_hibern8(hba);
}
- if (!ufshcd_is_link_active(hba) && !hba->no_ref_clk_gating)
+ /*
+ * If auto hibern8 is supported then the link will already
+ * be in hibern8 state and the ref clock can be gated.
+ */
+ if ((ufshcd_is_auto_hibern8_supported(hba) ||
+ !ufshcd_is_link_active(hba)) && !hba->no_ref_clk_gating)
ufshcd_disable_clocks(hba, true);
else
/* If link is active, device ref_clk can't be switched off */
@@ -1394,8 +1409,9 @@ static void __ufshcd_release(struct ufs_hba *hba, bool no_sched)
hba->clk_gating.state = REQ_CLKS_OFF;
trace_ufshcd_clk_gating(dev_name(hba->dev), hba->clk_gating.state);
- schedule_delayed_work(&hba->clk_gating.gate_work,
- msecs_to_jiffies(hba->clk_gating.delay_ms));
+ hrtimer_start(&hba->clk_gating.gate_hrtimer,
+ ms_to_ktime(hba->clk_gating.delay_ms),
+ HRTIMER_MODE_REL);
}
void ufshcd_release(struct ufs_hba *hba, bool no_sched)
@@ -1523,6 +1539,17 @@ out:
return count;
}
+static enum hrtimer_restart ufshcd_clkgate_hrtimer_handler(
+ struct hrtimer *timer)
+{
+ struct ufs_hba *hba = container_of(timer, struct ufs_hba,
+ clk_gating.gate_hrtimer);
+
+ schedule_work(&hba->clk_gating.gate_work);
+
+ return HRTIMER_NORESTART;
+}
+
static void ufshcd_init_clk_gating(struct ufs_hba *hba)
{
struct ufs_clk_gating *gating = &hba->clk_gating;
@@ -1539,27 +1566,25 @@ static void ufshcd_init_clk_gating(struct ufs_hba *hba)
if (ufshcd_is_auto_hibern8_supported(hba))
hba->caps &= ~UFSHCD_CAP_HIBERN8_WITH_CLK_GATING;
- INIT_DELAYED_WORK(&gating->gate_work, ufshcd_gate_work);
+ INIT_WORK(&gating->gate_work, ufshcd_gate_work);
INIT_WORK(&gating->ungate_work, ufshcd_ungate_work);
+ /*
+ * Clock gating work must be executed only after auto hibern8
+ * timeout has expired in the hardware or after aggressive
+ * hibern8 on idle software timeout. Using jiffy based low
+ * resolution delayed work is not reliable to guarantee this,
+ * hence use a high resolution timer to make sure we schedule
+ * the gate work precisely more than hibern8 timeout.
+ *
+ * Always make sure gating->delay_ms > hibern8_on_idle->delay_ms
+ */
+ hrtimer_init(&gating->gate_hrtimer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
+ gating->gate_hrtimer.function = ufshcd_clkgate_hrtimer_handler;
gating->is_enabled = true;
- /*
- * Scheduling the delayed work after 1 jiffies will make the work to
- * get schedule any time from 0ms to 1000/HZ ms which is not desirable
- * for hibern8 enter work as it may impact the performance if it gets
- * scheduled almost immediately. Hence make sure that hibern8 enter
- * work gets scheduled atleast after 2 jiffies (any time between
- * 1000/HZ ms to 2000/HZ ms).
- */
- gating->delay_ms_pwr_save = jiffies_to_msecs(
- max_t(unsigned long,
- msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE),
- 2));
- gating->delay_ms_perf = jiffies_to_msecs(
- max_t(unsigned long,
- msecs_to_jiffies(UFSHCD_CLK_GATING_DELAY_MS_PERF),
- 2));
+ gating->delay_ms_pwr_save = UFSHCD_CLK_GATING_DELAY_MS_PWR_SAVE;
+ gating->delay_ms_perf = UFSHCD_CLK_GATING_DELAY_MS_PERF;
/* start with performance mode */
gating->delay_ms = gating->delay_ms_perf;
@@ -1616,8 +1641,8 @@ static void ufshcd_exit_clk_gating(struct ufs_hba *hba)
device_remove_file(hba->dev, &hba->clk_gating.delay_attr);
}
device_remove_file(hba->dev, &hba->clk_gating.enable_attr);
+ ufshcd_cancel_gate_work(hba);
cancel_work_sync(&hba->clk_gating.ungate_work);
- cancel_delayed_work_sync(&hba->clk_gating.gate_work);
}
static void ufshcd_set_auto_hibern8_timer(struct ufs_hba *hba, u32 delay)
@@ -1928,6 +1953,7 @@ static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
return;
if (ufshcd_is_auto_hibern8_supported(hba)) {
+ hba->hibern8_on_idle.delay_ms = 1;
hba->hibern8_on_idle.state = AUTO_HIBERN8;
/*
* Disable SW hibern8 enter on idle in case
@@ -1935,13 +1961,13 @@ static void ufshcd_init_hibern8_on_idle(struct ufs_hba *hba)
*/
hba->caps &= ~UFSHCD_CAP_HIBERN8_ENTER_ON_IDLE;
} else {
+ hba->hibern8_on_idle.delay_ms = 10;
INIT_DELAYED_WORK(&hba->hibern8_on_idle.enter_work,
ufshcd_hibern8_enter_work);
INIT_WORK(&hba->hibern8_on_idle.exit_work,
ufshcd_hibern8_exit_work);
}
- hba->hibern8_on_idle.delay_ms = 10;
hba->hibern8_on_idle.is_enabled = true;
hba->hibern8_on_idle.delay_attr.show =
diff --git a/drivers/scsi/ufs/ufshcd.h b/drivers/scsi/ufs/ufshcd.h
index a6298f614a0b..cd15d67cf8d5 100644
--- a/drivers/scsi/ufs/ufshcd.h
+++ b/drivers/scsi/ufs/ufshcd.h
@@ -39,6 +39,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
+#include <linux/hrtimer.h>
#include <linux/init.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -396,8 +397,9 @@ enum clk_gating_state {
/**
* struct ufs_clk_gating - UFS clock gating related info
- * @gate_work: worker to turn off clocks after some delay as specified in
- * delay_ms
+ * @gate_hrtimer: hrtimer to invoke @gate_work after some delay as
+ * specified in @delay_ms
+ * @gate_work: worker to turn off clocks
* @ungate_work: worker to turn on clocks that will be used in case of
* interrupt context
* @state: the current clocks state
@@ -415,7 +417,8 @@ enum clk_gating_state {
* completion before gating clocks.
*/
struct ufs_clk_gating {
- struct delayed_work gate_work;
+ struct hrtimer gate_hrtimer;
+ struct work_struct gate_work;
struct work_struct ungate_work;
enum clk_gating_state state;
unsigned long delay_ms;
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index b7fa71dd0695..9cfca014c8ad 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -2583,6 +2583,7 @@ void *glink_open(const struct glink_open_config *cfg)
ctx->notify_tx_abort = cfg->notify_tx_abort;
ctx->notify_rx_tracer_pkt = cfg->notify_rx_tracer_pkt;
ctx->notify_remote_rx_intent = cfg->notify_remote_rx_intent;
+ ctx->magic_number = GLINK_CTX_CANARY;
if (!ctx->notify_rx_intent_req)
ctx->notify_rx_intent_req = glink_dummy_notify_rx_intent_req;
@@ -2618,7 +2619,6 @@ void *glink_open(const struct glink_open_config *cfg)
GLINK_INFO_CH(ctx, "%s: Created channel, sent OPEN command. ctx %p\n",
__func__, ctx);
- ctx->magic_number = GLINK_CTX_CANARY;
return ctx;
}
EXPORT_SYMBOL(glink_open);
@@ -5380,7 +5380,7 @@ static int glink_scheduler_tx(struct channel_ctx *ctx,
size_t txd_len = 0;
size_t tx_len = 0;
uint32_t num_pkts = 0;
- int ret;
+ int ret = 0;
spin_lock_irqsave(&ctx->tx_lists_lock_lhc3, flags);
while (txd_len < xprt_ctx->mtu &&
diff --git a/drivers/soc/qcom/icnss.c b/drivers/soc/qcom/icnss.c
index 25b522806c3e..f47d4a51fccd 100644
--- a/drivers/soc/qcom/icnss.c
+++ b/drivers/soc/qcom/icnss.c
@@ -33,7 +33,6 @@
#include <linux/dma-mapping.h>
#include <linux/qmi_encdec.h>
#include <linux/ipc_logging.h>
-#include <linux/msm-bus.h>
#include <linux/thread_info.h>
#include <linux/uaccess.h>
#include <linux/qpnp/qpnp-adc.h>
@@ -411,8 +410,6 @@ static struct icnss_priv {
size_t smmu_iova_len;
dma_addr_t smmu_iova_ipa_start;
size_t smmu_iova_ipa_len;
- struct msm_bus_scale_pdata *bus_scale_table;
- uint32_t bus_client;
struct qmi_handle *wlfw_clnt;
struct list_head event_list;
spinlock_t event_lock;
@@ -3410,66 +3407,9 @@ unsigned int icnss_socinfo_get_serial_number(struct device *dev)
}
EXPORT_SYMBOL(icnss_socinfo_get_serial_number);
-static int icnss_bw_vote(struct icnss_priv *priv, int index)
-{
- int ret = 0;
-
- icnss_pr_dbg("Vote %d for msm_bus, state 0x%lx\n",
- index, priv->state);
- ret = msm_bus_scale_client_update_request(priv->bus_client, index);
- if (ret)
- icnss_pr_err("Fail to vote %d: ret %d, state 0x%lx\n",
- index, ret, priv->state);
-
- return ret;
-}
-
-static int icnss_bw_init(struct icnss_priv *priv)
-{
- int ret = 0;
-
- priv->bus_scale_table = msm_bus_cl_get_pdata(priv->pdev);
- if (!priv->bus_scale_table) {
- icnss_pr_err("Missing entry for msm_bus scale table\n");
- return -EINVAL;
- }
-
- priv->bus_client = msm_bus_scale_register_client(priv->bus_scale_table);
- if (!priv->bus_client) {
- icnss_pr_err("Fail to register with bus_scale client\n");
- ret = -EINVAL;
- goto out;
- }
-
- ret = icnss_bw_vote(priv, 1);
- if (ret)
- goto out;
-
- return 0;
-
-out:
- msm_bus_cl_clear_pdata(priv->bus_scale_table);
- return ret;
-}
-
-static void icnss_bw_deinit(struct icnss_priv *priv)
-{
- if (!priv)
- return;
-
- if (priv->bus_client) {
- icnss_bw_vote(priv, 0);
- msm_bus_scale_unregister_client(priv->bus_client);
- }
-
- if (priv->bus_scale_table)
- msm_bus_cl_clear_pdata(priv->bus_scale_table);
-}
-
static int icnss_smmu_init(struct icnss_priv *priv)
{
struct dma_iommu_mapping *mapping;
- int disable_htw = 1;
int atomic_ctx = 1;
int s1_bypass = 1;
int ret = 0;
@@ -3486,15 +3426,6 @@ static int icnss_smmu_init(struct icnss_priv *priv)
}
ret = iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
- if (ret < 0) {
- icnss_pr_err("Set disable_htw attribute failed, err = %d\n",
- ret);
- goto set_attr_fail;
- }
-
- ret = iommu_domain_set_attr(mapping->domain,
DOMAIN_ATTR_ATOMIC,
&atomic_ctx);
if (ret < 0) {
@@ -4429,10 +4360,6 @@ static int icnss_probe(struct platform_device *pdev)
priv->smmu_iova_len);
goto out;
}
-
- ret = icnss_bw_init(priv);
- if (ret)
- goto out_smmu_deinit;
}
spin_lock_init(&priv->event_lock);
@@ -4442,7 +4369,7 @@ static int icnss_probe(struct platform_device *pdev)
if (!priv->event_wq) {
icnss_pr_err("Workqueue creation failed\n");
ret = -EFAULT;
- goto out_bw_deinit;
+ goto out_smmu_deinit;
}
INIT_WORK(&priv->event_work, icnss_driver_event_work);
@@ -4470,8 +4397,6 @@ static int icnss_probe(struct platform_device *pdev)
out_destroy_wq:
destroy_workqueue(priv->event_wq);
-out_bw_deinit:
- icnss_bw_deinit(priv);
out_smmu_deinit:
icnss_smmu_deinit(priv);
out:
@@ -4497,8 +4422,6 @@ static int icnss_remove(struct platform_device *pdev)
if (penv->event_wq)
destroy_workqueue(penv->event_wq);
- icnss_bw_deinit(penv);
-
icnss_hw_power_off(penv);
dev_set_drvdata(&pdev->dev, NULL);
diff --git a/drivers/soc/qcom/msm_smem.c b/drivers/soc/qcom/msm_smem.c
index 881359d444fc..cd3d387645fd 100644
--- a/drivers/soc/qcom/msm_smem.c
+++ b/drivers/soc/qcom/msm_smem.c
@@ -79,6 +79,7 @@ static int spinlocks_initialized;
static void *smem_ramdump_dev;
static DEFINE_MUTEX(spinlock_init_lock);
static DEFINE_SPINLOCK(smem_init_check_lock);
+static struct device *smem_dev;
static int smem_module_inited;
static RAW_NOTIFIER_HEAD(smem_module_init_notifier_list);
static DEFINE_MUTEX(smem_module_init_notifier_lock);
@@ -1047,7 +1048,8 @@ static __init int modem_restart_late_init(void)
void *handle;
struct restart_notifier_block *nb;
- smem_ramdump_dev = create_ramdump_device("smem", NULL);
+ if (smem_dev)
+ smem_ramdump_dev = create_ramdump_device("smem", smem_dev);
if (IS_ERR_OR_NULL(smem_ramdump_dev)) {
LOG_ERR("%s: Unable to create smem ramdump device.\n",
__func__);
@@ -1444,7 +1446,7 @@ smem_targ_info_done:
SMEM_INFO("smem security enabled\n");
smem_init_security();
}
-
+ smem_dev = &pdev->dev;
probe_done = true;
ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
diff --git a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
index 9b44fb03cf94..83e3775ed533 100644
--- a/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
+++ b/drivers/soc/qcom/qdsp6v2/msm_audio_ion.c
@@ -741,7 +741,6 @@ static int msm_audio_smmu_init(struct device *dev)
{
struct dma_iommu_mapping *mapping;
int ret;
- int disable_htw = 1;
mapping = arm_iommu_create_mapping(
msm_iommu_get_bus(dev),
@@ -750,10 +749,6 @@ static int msm_audio_smmu_init(struct device *dev)
if (IS_ERR(mapping))
return PTR_ERR(mapping);
- iommu_domain_set_attr(mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
- &disable_htw);
-
ret = arm_iommu_attach_device(dev, mapping);
if (ret) {
dev_err(dev, "%s: Attach failed, err = %d\n",
diff --git a/drivers/soc/qcom/qsee_ipc_irq_bridge.c b/drivers/soc/qcom/qsee_ipc_irq_bridge.c
index ab43bbb7e86a..eee42d7ba314 100644
--- a/drivers/soc/qcom/qsee_ipc_irq_bridge.c
+++ b/drivers/soc/qcom/qsee_ipc_irq_bridge.c
@@ -115,10 +115,8 @@ static struct qiib_driver_data *qiib_info;
static int qiib_driver_data_init(void)
{
qiib_info = kzalloc(sizeof(*qiib_info), GFP_KERNEL);
- if (!qiib_info) {
- QIIB_ERR("Unable to allocate info pointer\n");
+ if (!qiib_info)
return -ENOMEM;
- }
INIT_LIST_HEAD(&qiib_info->list);
mutex_init(&qiib_info->list_lock);
@@ -356,6 +354,7 @@ static int qiib_parse_node(struct device_node *node, struct qiib_dev *devp)
const char *dev_name;
uint32_t irqtype;
uint32_t irq_clear[2];
+ struct irq_data *irqtype_data;
int ret = -ENODEV;
key = "qcom,dev-name";
@@ -374,7 +373,12 @@ static int qiib_parse_node(struct device_node *node, struct qiib_dev *devp)
}
QIIB_DBG("%s: %s = %d\n", __func__, key, devp->irq_line);
- irqtype = irqd_get_trigger_type(irq_get_irq_data(devp->irq_line));
+ irqtype_data = irq_get_irq_data(devp->irq_line);
+ if (!irqtype_data) {
+ QIIB_ERR("%s: get irqdata fail:%d\n", __func__, devp->irq_line);
+ goto missing_key;
+ }
+ irqtype = irqd_get_trigger_type(irqtype_data);
QIIB_DBG("%s: irqtype = %d\n", __func__, irqtype);
key = "label";
diff --git a/drivers/soc/qcom/rpm-smd.c b/drivers/soc/qcom/rpm-smd.c
index 03a1591e5b09..242071f52811 100644
--- a/drivers/soc/qcom/rpm-smd.c
+++ b/drivers/soc/qcom/rpm-smd.c
@@ -967,8 +967,10 @@ static struct msm_rpm_request *msm_rpm_create_request_common(
cdata->client_buf = kzalloc(buf_size, GFP_FLAG(noirq));
- if (!cdata->client_buf)
- goto cdata_alloc_fail;
+ if (!cdata->client_buf) {
+ pr_warn("Cannot allocate memory for client_buf\n");
+ goto client_buf_alloc_fail;
+ }
set_set_type(cdata->client_buf, set);
set_rsc_type(cdata->client_buf, rsc_type);
@@ -997,6 +999,8 @@ static struct msm_rpm_request *msm_rpm_create_request_common(
buf_alloc_fail:
kfree(cdata->kvp);
kvp_alloc_fail:
+ kfree(cdata->client_buf);
+client_buf_alloc_fail:
kfree(cdata);
cdata_alloc_fail:
return NULL;
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 4ad994972b19..805c5e1931e1 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -421,7 +421,16 @@ static void dwc3_free_trb_pool(struct dwc3_ep *dep)
if (dep->endpoint.ep_type == EP_TYPE_GSI)
return;
- if (dep->trb_pool && dep->trb_pool_dma) {
+ /*
+ * Clean up ep ring to avoid getting xferInProgress due to stale trbs
+ * with HWO bit set from previous composition when update transfer cmd
+ * is issued.
+ */
+ if (dep->number > 1 && dep->trb_pool && dep->trb_pool_dma) {
+ memset(&dep->trb_pool[0], 0,
+ sizeof(struct dwc3_trb) * dep->num_trbs);
+ dbg_event(dep->number, "Clr_TRB", 0);
+
dma_free_coherent(dwc->dev,
sizeof(struct dwc3_trb) * DWC3_TRB_NUM, dep->trb_pool,
dep->trb_pool_dma);
@@ -723,17 +732,6 @@ static int __dwc3_gadget_ep_disable(struct dwc3_ep *dep)
(dep->number & 1) ? "in" : "out");
}
- /*
- * Clean up ep ring of non-control endpoint to avoid getting xferInProgress
- * due to stale trbs with HWO bit set from previous composition when update
- * transfer cmd is issued.
- */
- if (dep->number > 1 && dep->trb_pool) {
- memset(&dep->trb_pool[0], 0,
- sizeof(struct dwc3_trb) * dep->num_trbs);
- dbg_event(dep->number, "Clr_TRB", 0);
- }
-
return 0;
}
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 8a0e7f988d25..0121bf7ca4ac 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -1111,7 +1111,7 @@ static struct usb_function_instance *f_midi_alloc_inst(void)
opts->func_inst.free_func_inst = f_midi_free_inst;
opts->index = SNDRV_DEFAULT_IDX1;
opts->id = SNDRV_DEFAULT_STR1;
- opts->buflen = 256;
+ opts->buflen = 1024;
opts->qlen = 32;
opts->in_ports = 1;
opts->out_ports = 1;
diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c
index 7ad798ace1e5..4e35ed9654b7 100644
--- a/drivers/usb/gadget/function/f_ncm.c
+++ b/drivers/usb/gadget/function/f_ncm.c
@@ -333,6 +333,77 @@ static struct usb_descriptor_header *ncm_hs_function[] = {
NULL,
};
+/* Super Speed Support */
+static struct usb_endpoint_descriptor ncm_ss_notify_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_INT,
+ .wMaxPacketSize = cpu_to_le16(NCM_STATUS_BYTECOUNT),
+ .bInterval = USB_MS_TO_HS_INTERVAL(NCM_STATUS_INTERVAL_MS),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_notify_comp_desc = {
+ .bLength = sizeof(ncm_ss_notify_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 3 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+ .wBytesPerInterval = cpu_to_le16(NCM_STATUS_BYTECOUNT),
+};
+
+static struct usb_endpoint_descriptor ncm_ss_in_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_IN,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_in_comp_desc = {
+ .bLength = sizeof(ncm_ss_in_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_endpoint_descriptor ncm_ss_out_desc = {
+ .bLength = USB_DT_ENDPOINT_SIZE,
+ .bDescriptorType = USB_DT_ENDPOINT,
+ .bEndpointAddress = USB_DIR_OUT,
+ .bmAttributes = USB_ENDPOINT_XFER_BULK,
+ .wMaxPacketSize = cpu_to_le16(1024),
+};
+
+static struct usb_ss_ep_comp_descriptor ncm_ss_out_comp_desc = {
+ .bLength = sizeof(ncm_ss_out_comp_desc),
+ .bDescriptorType = USB_DT_SS_ENDPOINT_COMP,
+ /* the following 2 values can be tweaked if necessary */
+ /* .bMaxBurst = 0, */
+ /* .bmAttributes = 0, */
+};
+
+static struct usb_descriptor_header *ncm_ss_function[] = {
+ (struct usb_descriptor_header *) &ncm_iad_desc,
+ /* CDC NCM control descriptors */
+ (struct usb_descriptor_header *) &ncm_control_intf,
+ (struct usb_descriptor_header *) &ncm_header_desc,
+ (struct usb_descriptor_header *) &ncm_union_desc,
+ (struct usb_descriptor_header *) &ecm_desc,
+ (struct usb_descriptor_header *) &ncm_desc,
+ (struct usb_descriptor_header *) &ncm_ss_notify_desc,
+ (struct usb_descriptor_header *) &ncm_ss_notify_comp_desc,
+ /* data interface, altsettings 0 and 1 */
+ (struct usb_descriptor_header *) &ncm_data_nop_intf,
+ (struct usb_descriptor_header *) &ncm_data_intf,
+ (struct usb_descriptor_header *) &ncm_ss_in_desc,
+ (struct usb_descriptor_header *) &ncm_ss_in_comp_desc,
+ (struct usb_descriptor_header *) &ncm_ss_out_desc,
+ (struct usb_descriptor_header *) &ncm_ss_out_comp_desc,
+ NULL,
+};
+
/* string descriptors: */
#define STRING_CTRL_IDX 0
@@ -1431,8 +1502,17 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f)
hs_ncm_notify_desc.bEndpointAddress =
fs_ncm_notify_desc.bEndpointAddress;
+ if (gadget_is_superspeed(c->cdev->gadget)) {
+ ncm_ss_in_desc.bEndpointAddress =
+ fs_ncm_in_desc.bEndpointAddress;
+ ncm_ss_out_desc.bEndpointAddress =
+ fs_ncm_out_desc.bEndpointAddress;
+ ncm_ss_notify_desc.bEndpointAddress =
+ fs_ncm_notify_desc.bEndpointAddress;
+ }
+
status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function,
- NULL);
+ ncm_ss_function);
if (status)
goto fail;
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 12b98017beb2..915080a5b817 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -168,8 +168,12 @@ static void *usbpd_ipc_log;
#define PS_HARD_RESET_TIME 25
#define PS_SOURCE_ON 400
#define PS_SOURCE_OFF 750
+#define SWAP_SOURCE_START_TIME 20
#define VDM_BUSY_TIME 50
+/* tPSHardReset + tSafe0V + tSrcRecover + tSrcTurnOn */
+#define SNK_HARD_RESET_RECOVER_TIME (35 + 650 + 1000 + 275)
+
#define PD_CAPS_COUNT 50
#define PD_MAX_MSG_ID 7
@@ -593,6 +597,11 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
/* Defer starting USB host mode until after PD */
}
+ /* Set CC back to DRP toggle for the next disconnect */
+ val.intval = POWER_SUPPLY_TYPEC_PR_DUAL;
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_TYPEC_POWER_ROLE, &val);
+
pd->rx_msg_len = 0;
pd->rx_msg_type = 0;
pd->rx_msgid = -1;
@@ -622,10 +631,15 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_ACTIVE, &val);
- pd->in_pr_swap = false;
pd->current_state = PE_SRC_SEND_CAPABILITIES;
- usbpd_dbg(&pd->dev, "Enter %s\n",
- usbpd_state_strings[pd->current_state]);
+ if (pd->in_pr_swap) {
+ pd->in_pr_swap = false;
+ hrtimer_start(&pd->timer,
+ ms_to_ktime(SWAP_SOURCE_START_TIME),
+ HRTIMER_MODE_REL);
+ break;
+ }
+
/* fall-through */
case PE_SRC_SEND_CAPABILITIES:
@@ -800,18 +814,6 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
pd->in_pr_swap = false;
pd->current_voltage = 5000000;
- if (!pd->vbus_present) {
- /* can get here during a hard reset and we lost vbus */
- pd->current_state = PE_SNK_DISCOVERY;
- hrtimer_start(&pd->timer, ms_to_ktime(2000),
- HRTIMER_MODE_REL);
- break;
- }
-
- /*
- * If VBUS is already present go and skip ahead to
- * PE_SNK_WAIT_FOR_CAPABILITIES.
- */
pd->current_state = PE_SNK_WAIT_FOR_CAPABILITIES;
/* fall-through */
@@ -882,8 +884,28 @@ static void usbpd_set_state(struct usbpd *pd, enum usbpd_state next_state)
POWER_SUPPLY_PROP_VOLTAGE_MAX, &val);
pd->current_voltage = pd->requested_voltage;
- /* recursive call; go back to beginning state */
- usbpd_set_state(pd, PE_SNK_STARTUP);
+ /* max time for hard reset to toggle vbus off/on */
+ hrtimer_start(&pd->timer,
+ ms_to_ktime(SNK_HARD_RESET_RECOVER_TIME),
+ HRTIMER_MODE_REL);
+ break;
+
+ case PE_PRS_SNK_SRC_TRANSITION_TO_OFF:
+ val.intval = pd->requested_current = 0; /* suspend charging */
+ power_supply_set_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_CURRENT_MAX, &val);
+
+ pd->in_explicit_contract = false;
+
+ /*
+ * need to update PR bit in message header so that
+ * proper GoodCRC is sent when receiving next PS_RDY
+ */
+ pd_phy_update_roles(pd->current_dr, PR_SRC);
+
+ /* wait for PS_RDY */
+ hrtimer_start(&pd->timer, ms_to_ktime(PS_SOURCE_OFF),
+ HRTIMER_MODE_REL);
break;
default:
@@ -1276,12 +1298,12 @@ static void usbpd_sm(struct work_struct *w)
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_ACTIVE, &val);
- if (pd->current_pr == PR_SRC) {
+ if (pd->current_pr == PR_SRC)
regulator_disable(pd->vbus);
- if (pd->vconn_enabled) {
- regulator_disable(pd->vconn);
- pd->vconn_enabled = false;
- }
+
+ if (pd->vconn_enabled) {
+ regulator_disable(pd->vconn);
+ pd->vconn_enabled = false;
}
if (pd->current_dr == DR_UFP)
@@ -1388,7 +1410,7 @@ static void usbpd_sm(struct work_struct *w)
pd->hard_reset_count = 0;
pd->pd_connected = true; /* we know peer is PD capable */
- val.intval = POWER_SUPPLY_TYPE_USB_PD;
+ val.intval = pd->psy_type = POWER_SUPPLY_TYPE_USB_PD;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_TYPE, &val);
@@ -1436,6 +1458,9 @@ static void usbpd_sm(struct work_struct *w)
dr_swap(pd);
kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
} else if (ctrl_recvd == MSG_PR_SWAP) {
+ /* lock in current mode */
+ set_power_role(pd, pd->current_pr);
+
/* we'll happily accept Src->Sink requests anytime */
ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
if (ret) {
@@ -1445,7 +1470,9 @@ static void usbpd_sm(struct work_struct *w)
}
pd->current_state = PE_PRS_SRC_SNK_TRANSITION_TO_OFF;
- queue_work(pd->wq, &pd->sm_work);
+ hrtimer_start(&pd->timer,
+ ms_to_ktime(SRC_TRANSITION_TIME),
+ HRTIMER_MODE_REL);
break;
} else {
if (data_recvd == MSG_VDM)
@@ -1465,28 +1492,13 @@ static void usbpd_sm(struct work_struct *w)
usbpd_set_state(pd, PE_SRC_TRANSITION_TO_DEFAULT);
break;
- case PE_SNK_DISCOVERY:
- if (!pd->vbus_present) {
- /* Hard reset and VBUS didn't come back? */
- power_supply_get_property(pd->usb_psy,
- POWER_SUPPLY_PROP_TYPE, &val);
- if (val.intval == POWER_SUPPLY_TYPEC_NONE) {
- pd->typec_mode = POWER_SUPPLY_TYPEC_NONE;
- queue_work(pd->wq, &pd->sm_work);
- }
- break;
- }
-
- usbpd_set_state(pd, PE_SNK_WAIT_FOR_CAPABILITIES);
- break;
-
case PE_SNK_WAIT_FOR_CAPABILITIES:
if (data_recvd == MSG_SOURCE_CAPABILITIES) {
val.intval = 1;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_PD_ACTIVE, &val);
- val.intval = POWER_SUPPLY_TYPE_USB_PD;
+ val.intval = pd->psy_type = POWER_SUPPLY_TYPE_USB_PD;
power_supply_set_property(pd->usb_psy,
POWER_SUPPLY_PROP_TYPE, &val);
@@ -1580,6 +1592,9 @@ static void usbpd_sm(struct work_struct *w)
dr_swap(pd);
kobject_uevent(&pd->dev.kobj, KOBJ_CHANGE);
} else if (ctrl_recvd == MSG_PR_SWAP) {
+ /* lock in current mode */
+ set_power_role(pd, pd->current_pr);
+
/* TODO: should we Reject in certain circumstances? */
ret = pd_send_msg(pd, MSG_ACCEPT, NULL, 0, SOP_MSG);
if (ret) {
@@ -1589,19 +1604,7 @@ static void usbpd_sm(struct work_struct *w)
}
pd->in_pr_swap = true;
- pd->current_state = PE_PRS_SNK_SRC_TRANSITION_TO_OFF;
- /* turn off sink */
- pd->in_explicit_contract = false;
-
- /*
- * need to update PR bit in message header so that
- * proper GoodCRC is sent when receiving next PS_RDY
- */
- pd->current_pr = PR_SRC;
- pd_phy_update_roles(pd->current_dr, pd->current_pr);
-
- hrtimer_start(&pd->timer, ms_to_ktime(PS_SOURCE_OFF),
- HRTIMER_MODE_REL);
+ usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
break;
} else {
if (data_recvd == MSG_VDM)
@@ -1611,6 +1614,20 @@ static void usbpd_sm(struct work_struct *w)
}
break;
+ case PE_SNK_TRANSITION_TO_DEFAULT:
+ if (pd->vbus_present) {
+ usbpd_set_state(pd, PE_SNK_STARTUP);
+ } else {
+ /* Hard reset and VBUS didn't come back? */
+ power_supply_get_property(pd->usb_psy,
+ POWER_SUPPLY_PROP_TYPEC_MODE, &val);
+ if (val.intval == POWER_SUPPLY_TYPEC_NONE) {
+ pd->typec_mode = POWER_SUPPLY_TYPEC_NONE;
+ queue_work(pd->wq, &pd->sm_work);
+ }
+ }
+ break;
+
case PE_SRC_SOFT_RESET:
case PE_SNK_SOFT_RESET:
/* Reset protocol layer */
@@ -1684,16 +1701,24 @@ static void usbpd_sm(struct work_struct *w)
}
pd->current_state = PE_PRS_SRC_SNK_TRANSITION_TO_OFF;
- /* fall-through */
+ hrtimer_start(&pd->timer, ms_to_ktime(SRC_TRANSITION_TIME),
+ HRTIMER_MODE_REL);
+ break;
+
case PE_PRS_SRC_SNK_TRANSITION_TO_OFF:
pd->in_pr_swap = true;
pd->in_explicit_contract = false;
regulator_disable(pd->vbus);
- set_power_role(pd, PR_SINK); /* switch Rp->Rd */
+
+ /* PE_PRS_SRC_SNK_Assert_Rd */
pd->current_pr = PR_SINK;
+ set_power_role(pd, pd->current_pr);
pd_phy_update_roles(pd->current_dr, pd->current_pr);
+ /* allow time for Vbus discharge, must be < tSrcSwapStdby */
+ msleep(500);
+
ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
if (ret) {
usbpd_err(&pd->dev, "Error sending PS_RDY\n");
@@ -1720,19 +1745,7 @@ static void usbpd_sm(struct work_struct *w)
}
pd->in_pr_swap = true;
- pd->current_state = PE_PRS_SNK_SRC_TRANSITION_TO_OFF;
- /* turn off sink */
- pd->in_explicit_contract = false;
-
- /*
- * need to update PR bit in message header so that
- * proper GoodCRC is sent when receiving next PS_RDY
- */
- pd->current_pr = PR_SRC;
- pd_phy_update_roles(pd->current_dr, pd->current_pr);
-
- hrtimer_start(&pd->timer, ms_to_ktime(PS_SOURCE_OFF),
- HRTIMER_MODE_REL);
+ usbpd_set_state(pd, PE_PRS_SNK_SRC_TRANSITION_TO_OFF);
break;
case PE_PRS_SNK_SRC_TRANSITION_TO_OFF:
@@ -1741,14 +1754,20 @@ static void usbpd_sm(struct work_struct *w)
break;
}
+ /* PE_PRS_SNK_SRC_Assert_Rp */
+ pd->current_pr = PR_SRC;
+ set_power_role(pd, pd->current_pr);
pd->current_state = PE_PRS_SNK_SRC_SOURCE_ON;
+
/* fall-through */
+
case PE_PRS_SNK_SRC_SOURCE_ON:
- set_power_role(pd, PR_SRC);
ret = regulator_enable(pd->vbus);
if (ret)
usbpd_err(&pd->dev, "Unable to enable vbus\n");
+ msleep(200); /* allow time VBUS ramp-up, must be < tNewSrc */
+
ret = pd_send_msg(pd, MSG_PS_RDY, NULL, 0, SOP_MSG);
if (ret) {
usbpd_err(&pd->dev, "Error sending PS_RDY\n");
@@ -1846,11 +1865,12 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
* until the HW bug is fixed: in which disconnection won't be reported
* on VBUS loss alone unless pullup is also removed from CC.
*/
- if ((pd->hard_reset || pd->in_pr_swap) &&
- typec_mode == POWER_SUPPLY_TYPEC_NONE &&
- pd->psy_type != POWER_SUPPLY_TYPE_USB) {
+ if (typec_mode == POWER_SUPPLY_TYPEC_NONE &&
+ (pd->in_pr_swap ||
+ (pd->psy_type != POWER_SUPPLY_TYPE_USB &&
+ pd->current_state == PE_SNK_TRANSITION_TO_DEFAULT))) {
usbpd_dbg(&pd->dev, "Ignoring disconnect due to %s\n",
- pd->hard_reset ? "hard reset" : "PR swap");
+ pd->in_pr_swap ? "PR swap" : "hard reset");
return 0;
}
@@ -1863,8 +1883,9 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
psy_type = val.intval;
- usbpd_dbg(&pd->dev, "typec mode:%d present:%d type:%d\n", typec_mode,
- pd->vbus_present, psy_type);
+ usbpd_dbg(&pd->dev, "typec mode:%d present:%d type:%d orientation:%d\n",
+ typec_mode, pd->vbus_present, psy_type,
+ usbpd_get_plug_orientation(pd));
/* any change? */
if (pd->typec_mode == typec_mode && pd->psy_type == psy_type)
@@ -1885,7 +1906,8 @@ static int psy_changed(struct notifier_block *nb, unsigned long evt, void *ptr)
case POWER_SUPPLY_TYPEC_SOURCE_HIGH:
usbpd_info(&pd->dev, "Type-C Source (%s) connected\n",
src_current(typec_mode));
- if (pd->current_pr != PR_SINK) {
+ if (pd->current_pr != PR_SINK ||
+ pd->current_state == PE_SNK_TRANSITION_TO_DEFAULT) {
pd->current_pr = PR_SINK;
queue_work(pd->wq, &pd->sm_work);
}
diff --git a/drivers/video/fbdev/core/fbcmap.c b/drivers/video/fbdev/core/fbcmap.c
index 6bf80e43cac5..021755f7f32d 100644
--- a/drivers/video/fbdev/core/fbcmap.c
+++ b/drivers/video/fbdev/core/fbcmap.c
@@ -187,8 +187,8 @@ int fb_copy_cmap(const struct fb_cmap *from, struct fb_cmap *to)
int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
{
- int tooff = 0, fromoff = 0;
- int size;
+ u32 tooff = 0, fromoff = 0;
+ u32 size;
if (to->start > from->start)
fromoff = to->start - from->start;
@@ -198,10 +198,10 @@ int fb_cmap_to_user(const struct fb_cmap *from, struct fb_cmap_user *to)
return -EINVAL;
size = to->len - tooff;
- if (size > (int) (from->len - fromoff))
+ if (size > (from->len - fromoff))
size = from->len - fromoff;
size *= sizeof(u16);
- if (!size)
+ if (size == 0)
return -EINVAL;
if (copy_to_user(to->red+tooff, from->red+fromoff, size))
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index a5368cdf2254..55918d47a21a 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -193,6 +193,7 @@ enum mdss_qos_settings {
MDSS_QOS_REMAPPER,
MDSS_QOS_IB_NOCR,
MDSS_QOS_WB2_WRITE_GATHER_EN,
+ MDSS_QOS_WB_QOS,
MDSS_QOS_MAX,
};
diff --git a/drivers/video/fbdev/msm/mdss_dp.c b/drivers/video/fbdev/msm/mdss_dp.c
index 57e18a7dc5e1..4e68952d33a9 100644
--- a/drivers/video/fbdev/msm/mdss_dp.c
+++ b/drivers/video/fbdev/msm/mdss_dp.c
@@ -45,6 +45,18 @@
#define VDDA_UA_ON_LOAD 100000 /* uA units */
#define VDDA_UA_OFF_LOAD 100 /* uA units */
+#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3
+static u32 supported_modes[] = {
+ HDMI_VFRMT_640x480p60_4_3,
+ HDMI_VFRMT_720x480p60_4_3, HDMI_VFRMT_720x480p60_16_9,
+ HDMI_VFRMT_1280x720p60_16_9,
+ HDMI_VFRMT_1920x1080p60_16_9,
+ HDMI_VFRMT_3840x2160p24_16_9, HDMI_VFRMT_3840x2160p30_16_9,
+ HDMI_VFRMT_3840x2160p60_16_9,
+ HDMI_VFRMT_4096x2160p24_256_135, HDMI_VFRMT_4096x2160p30_256_135,
+ HDMI_VFRMT_4096x2160p60_256_135, HDMI_EVFRMT_4096x2160p24_16_9
+};
+
static void mdss_dp_put_dt_clk_data(struct device *dev,
struct dss_module_power *module_power)
{
@@ -789,17 +801,34 @@ void mdss_dp_config_ctrl(struct mdss_dp_drv_pdata *dp)
cap = &dp->dpcd;
- data = dp->lane_cnt - 1;
- data <<= 4;
+ data |= (2 << 13); /* Default-> LSCLK DIV: 1/4 LCLK */
+
+ /* Color Format */
+ switch (dp->panel_data.panel_info.out_format) {
+ case MDP_Y_CBCR_H2V2:
+ data |= (1 << 11); /* YUV420 */
+ break;
+ case MDP_Y_CBCR_H2V1:
+ data |= (2 << 11); /* YUV422 */
+ break;
+ default:
+ data |= (0 << 11); /* RGB */
+ break;
+ }
+
+ /* Scrambler reset enable */
+ if (cap->scrambler_reset)
+ data |= (1 << 10);
+
+ if (dp->edid.color_depth != 6)
+ data |= 0x100; /* Default: 8 bits */
+
+ /* Num of Lanes */
+ data |= ((dp->lane_cnt - 1) << 4);
if (cap->enhanced_frame)
data |= 0x40;
- if (dp->edid.color_depth == 8) {
- /* 0 == 6 bits, 1 == 8 bits */
- data |= 0x100; /* bit 8 */
- }
-
if (!timing->interlaced) /* progressive */
data |= 0x04;
@@ -863,6 +892,8 @@ static int dp_audio_info_setup(struct platform_device *pdev,
mdss_dp_set_safe_to_exit_level(&dp_ctrl->ctrl_io, dp_ctrl->lane_cnt);
mdss_dp_audio_enable(&dp_ctrl->ctrl_io, true);
+ dp_ctrl->wait_for_audio_comp = true;
+
return rc;
} /* dp_audio_info_setup */
@@ -885,6 +916,17 @@ static int dp_get_audio_edid_blk(struct platform_device *pdev,
return rc;
} /* dp_get_audio_edid_blk */
+static void dp_audio_codec_teardown_done(struct platform_device *pdev)
+{
+ struct mdss_dp_drv_pdata *dp = platform_get_drvdata(pdev);
+
+ if (!dp)
+ pr_err("invalid input\n");
+
+ pr_debug("audio codec teardown done\n");
+ complete_all(&dp->audio_comp);
+}
+
static int mdss_dp_init_ext_disp(struct mdss_dp_drv_pdata *dp)
{
int ret = 0;
@@ -906,6 +948,8 @@ static int mdss_dp_init_ext_disp(struct mdss_dp_drv_pdata *dp)
dp_get_audio_edid_blk;
dp->ext_audio_data.codec_ops.cable_status =
dp_get_cable_status;
+ dp->ext_audio_data.codec_ops.teardown_done =
+ dp_audio_codec_teardown_done;
if (!dp->pdev->dev.of_node) {
pr_err("%s cannot find dp dev.of_node\n", __func__);
@@ -936,8 +980,6 @@ end:
return ret;
}
-#define DEFAULT_VIDEO_RESOLUTION HDMI_VFRMT_640x480p60_4_3
-
static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic)
{
struct mdss_panel_info *pinfo;
@@ -949,7 +991,6 @@ static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic)
return -EINVAL;
}
- dp_drv->ds_data.ds_registered = false;
ret = hdmi_get_supported_mode(&timing, &dp_drv->ds_data, vic);
pinfo = &dp_drv->panel_data.panel_info;
@@ -981,12 +1022,21 @@ static int dp_init_panel_info(struct mdss_dp_drv_pdata *dp_drv, u32 vic)
pinfo->lcdc.hsync_skew = 0;
pinfo->is_pluggable = true;
+ dp_drv->bpp = pinfo->bpp;
+
pr_debug("update res. vic= %d, pclk_rate = %llu\n",
dp_drv->vic, pinfo->clk_rate);
return 0;
} /* dp_init_panel_info */
+static inline void mdss_dp_set_audio_switch_node(
+ struct mdss_dp_drv_pdata *dp, int val)
+{
+ if (dp && dp->ext_audio_data.intf_ops.notify)
+ dp->ext_audio_data.intf_ops.notify(dp->ext_pdev,
+ val);
+}
int mdss_dp_on(struct mdss_panel_data *pdata)
{
@@ -1054,6 +1104,9 @@ int mdss_dp_on(struct mdss_panel_data *pdata)
goto exit;
}
+ mdss_dp_phy_share_lane_config(&dp_drv->phy_io,
+ orientation, dp_drv->dpcd.max_lane_count);
+
pr_debug("link_rate = 0x%x\n", dp_drv->link_rate);
dp_drv->power_data[DP_CTRL_PM].clk_config[0].rate =
@@ -1096,6 +1149,7 @@ int mdss_dp_on(struct mdss_panel_data *pdata)
pr_debug("mainlink ready\n");
dp_drv->power_on = true;
+ mdss_dp_set_audio_switch_node(dp_drv, true);
pr_debug("End-\n");
exit:
@@ -1119,14 +1173,15 @@ int mdss_dp_off(struct mdss_panel_data *pdata)
mutex_lock(&dp_drv->train_mutex);
reinit_completion(&dp_drv->idle_comp);
-
- mdss_dp_state_ctrl(&dp_drv->ctrl_io, 0);
+ mdss_dp_state_ctrl(&dp_drv->ctrl_io, ST_PUSH_IDLE);
if (dp_drv->link_clks_on)
mdss_dp_mainlink_ctrl(&dp_drv->ctrl_io, false);
mdss_dp_aux_ctrl(&dp_drv->ctrl_io, false);
+ mdss_dp_audio_enable(&dp_drv->ctrl_io, false);
+
mdss_dp_irq_disable(dp_drv);
mdss_dp_config_gpios(dp_drv, false);
@@ -1147,14 +1202,6 @@ int mdss_dp_off(struct mdss_panel_data *pdata)
return 0;
}
-static inline void mdss_dp_set_audio_switch_node(
- struct mdss_dp_drv_pdata *dp, int val)
-{
- if (dp && dp->ext_audio_data.intf_ops.notify)
- dp->ext_audio_data.intf_ops.notify(dp->ext_pdev,
- val);
-}
-
static void mdss_dp_send_cable_notification(
struct mdss_dp_drv_pdata *dp, int val)
{
@@ -1169,6 +1216,38 @@ static void mdss_dp_send_cable_notification(
dp->ext_audio_data.type, val);
}
+static void mdss_dp_audio_codec_wait(struct mdss_dp_drv_pdata *dp)
+{
+ const int audio_completion_timeout_ms = HZ * 3;
+ int ret = 0;
+
+ if (!dp->wait_for_audio_comp)
+ return;
+
+ reinit_completion(&dp->audio_comp);
+ ret = wait_for_completion_timeout(&dp->audio_comp,
+ audio_completion_timeout_ms);
+ if (ret <= 0)
+ pr_warn("audio codec teardown timed out\n");
+
+ dp->wait_for_audio_comp = false;
+}
+
+static void mdss_dp_notify_clients(struct mdss_dp_drv_pdata *dp, bool enable)
+{
+ if (enable) {
+ mdss_dp_send_cable_notification(dp, enable);
+ } else {
+ mdss_dp_set_audio_switch_node(dp, enable);
+ mdss_dp_audio_codec_wait(dp);
+ mdss_dp_send_cable_notification(dp, enable);
+ }
+
+ pr_debug("notify state %s done\n",
+ enable ? "ENABLE" : "DISABLE");
+}
+
+
static int mdss_dp_edid_init(struct mdss_panel_data *pdata)
{
struct mdss_dp_drv_pdata *dp_drv = NULL;
@@ -1183,6 +1262,10 @@ static int mdss_dp_edid_init(struct mdss_panel_data *pdata)
dp_drv = container_of(pdata, struct mdss_dp_drv_pdata,
panel_data);
+ dp_drv->ds_data.ds_registered = true;
+ dp_drv->ds_data.modes_num = ARRAY_SIZE(supported_modes);
+ dp_drv->ds_data.modes = supported_modes;
+
dp_drv->max_pclk_khz = DP_MAX_PIXEL_CLK_KHZ;
edid_init_data.kobj = dp_drv->kobj;
edid_init_data.ds_data = dp_drv->ds_data;
@@ -1236,15 +1319,19 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata)
mdss_dp_aux_init(dp_drv);
+ mdss_dp_phy_initialize(dp_drv);
+ mdss_dp_ctrl_reset(&dp_drv->ctrl_io);
mdss_dp_phy_reset(&dp_drv->ctrl_io);
mdss_dp_aux_reset(&dp_drv->ctrl_io);
- mdss_dp_phy_initialize(dp_drv);
mdss_dp_aux_ctrl(&dp_drv->ctrl_io, true);
pr_debug("Ctrl_hw_rev =0x%x, phy hw_rev =0x%x\n",
mdss_dp_get_ctrl_hw_version(&dp_drv->ctrl_io),
mdss_dp_get_phy_hw_version(&dp_drv->phy_io));
+ pr_debug("plug Orientation = %d\n",
+ usbpd_get_plug_orientation(dp_drv->pd));
+
mdss_dp_phy_aux_setup(&dp_drv->phy_io);
mdss_dp_irq_enable(dp_drv);
@@ -1264,8 +1351,7 @@ static int mdss_dp_host_init(struct mdss_panel_data *pdata)
goto edid_error;
}
- mdss_dp_send_cable_notification(dp_drv, true);
- mdss_dp_set_audio_switch_node(dp_drv, true);
+ mdss_dp_notify_clients(dp_drv, true);
dp_drv->dp_initialized = true;
return ret;
@@ -1702,16 +1788,15 @@ static void mdss_dp_do_link_train(struct mdss_dp_drv_pdata *dp)
static void mdss_dp_event_work(struct work_struct *work)
{
struct mdss_dp_drv_pdata *dp = NULL;
- struct delayed_work *dw = to_delayed_work(work);
unsigned long flag;
- u32 todo = 0, dp_config_pkt[2];
+ u32 todo = 0, config;
- if (!dw) {
+ if (!work) {
pr_err("invalid work structure\n");
return;
}
- dp = container_of(dw, struct mdss_dp_drv_pdata, dwork);
+ dp = container_of(work, struct mdss_dp_drv_pdata, work);
spin_lock_irqsave(&dp->event_lock, flag);
todo = dp->current_event;
@@ -1756,11 +1841,9 @@ static void mdss_dp_event_work(struct work_struct *work)
SVDM_CMD_TYPE_INITIATOR, 0x1, 0x0, 0x0);
break;
case EV_USBPD_DP_CONFIGURE:
- dp_config_pkt[0] = SVDM_HDR(USB_C_DP_SID, VDM_VERSION, 0x1,
- SVDM_CMD_TYPE_INITIATOR, DP_VDM_CONFIGURE);
- dp_config_pkt[1] = mdss_dp_usbpd_gen_config_pkt(dp);
+ config = mdss_dp_usbpd_gen_config_pkt(dp);
usbpd_send_svdm(dp->pd, USB_C_DP_SID, DP_VDM_CONFIGURE,
- SVDM_CMD_TYPE_INITIATOR, 0x1, dp_config_pkt, 0x2);
+ SVDM_CMD_TYPE_INITIATOR, 0x1, &config, 0x1);
break;
default:
pr_err("Unknown event:%d\n", todo);
@@ -1771,8 +1854,7 @@ static void dp_send_events(struct mdss_dp_drv_pdata *dp, u32 events)
{
spin_lock(&dp->event_lock);
dp->current_event = events;
- queue_delayed_work(dp->workq,
- &dp->dwork, HZ);
+ queue_work(dp->workq, &dp->work);
spin_unlock(&dp->event_lock);
}
@@ -1848,7 +1930,7 @@ static int mdss_dp_event_setup(struct mdss_dp_drv_pdata *dp)
return -EPERM;
}
- INIT_DELAYED_WORK(&dp->dwork, mdss_dp_event_work);
+ INIT_WORK(&dp->work, mdss_dp_event_work);
return 0;
}
@@ -1883,8 +1965,7 @@ static void usbpd_disconnect_callback(struct usbpd_svid_handler *hdlr)
mutex_lock(&dp_drv->pd_msg_mutex);
dp_drv->cable_connected = false;
mutex_unlock(&dp_drv->pd_msg_mutex);
- mdss_dp_send_cable_notification(dp_drv, false);
- mdss_dp_set_audio_switch_node(dp_drv, false);
+ mdss_dp_notify_clients(dp_drv, false);
}
static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
@@ -1968,8 +2049,7 @@ static void usbpd_response_callback(struct usbpd_svid_handler *hdlr, u8 cmd,
}
break;
case DP_VDM_CONFIGURE:
- if ((dp_drv->cable_connected == true)
- || (cmd_type == SVDM_CMD_TYPE_RESP_ACK)) {
+ if (cmd_type == SVDM_CMD_TYPE_RESP_ACK) {
dp_drv->alt_mode.current_state = DP_CONFIGURE_DONE;
pr_debug("config USBPD to DP done\n");
mdss_dp_host_init(&dp_drv->panel_data);
@@ -2135,6 +2215,8 @@ static int mdss_dp_probe(struct platform_device *pdev)
mdss_dp_device_register(dp_drv);
dp_drv->inited = true;
+ dp_drv->wait_for_audio_comp = false;
+ init_completion(&dp_drv->audio_comp);
pr_debug("done\n");
diff --git a/drivers/video/fbdev/msm/mdss_dp.h b/drivers/video/fbdev/msm/mdss_dp.h
index 4710cf7a98e2..6c391f6f7de0 100644
--- a/drivers/video/fbdev/msm/mdss_dp.h
+++ b/drivers/video/fbdev/msm/mdss_dp.h
@@ -399,6 +399,7 @@ struct mdss_dp_drv_pdata {
struct completion train_comp;
struct completion idle_comp;
struct completion video_comp;
+ struct completion audio_comp;
struct mutex aux_mutex;
struct mutex train_mutex;
struct mutex pd_msg_mutex;
@@ -423,10 +424,11 @@ struct mdss_dp_drv_pdata {
char delay_start;
u32 bpp;
struct dp_statistic dp_stat;
+ bool wait_for_audio_comp;
/* event */
struct workqueue_struct *workq;
- struct delayed_work dwork;
+ struct work_struct work;
u32 current_event;
spinlock_t event_lock;
spinlock_t lock;
diff --git a/drivers/video/fbdev/msm/mdss_dp_aux.c b/drivers/video/fbdev/msm/mdss_dp_aux.c
index d9297a7af764..119e2a2b05cf 100644
--- a/drivers/video/fbdev/msm/mdss_dp_aux.c
+++ b/drivers/video/fbdev/msm/mdss_dp_aux.c
@@ -374,7 +374,19 @@ static int dp_aux_read_buf(struct mdss_dp_drv_pdata *ep, u32 addr,
/*
* edid standard header bytes
*/
-static char edid_hdr[8] = {0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00};
+static u8 edid_hdr[8] = {0x00, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0x00};
+
+static bool dp_edid_is_valid_header(u8 *buf)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(edid_hdr); i++) {
+ if (buf[i] != edid_hdr[i])
+ return false;
+ }
+
+ return true;
+}
int dp_edid_buf_error(char *buf, int len)
{
@@ -396,11 +408,6 @@ int dp_edid_buf_error(char *buf, int len)
return -EINVAL;
}
- if (strncmp(buf, edid_hdr, strlen(edid_hdr))) {
- pr_err("Error: header\n");
- return -EINVAL;
- }
-
return 0;
}
@@ -510,11 +517,20 @@ char mdss_dp_gen_link_clk(struct mdss_panel_info *pinfo, char lane_cnt)
pr_debug("clk_rate=%llu, bpp= %d, lane_cnt=%d\n",
pinfo->clk_rate, pinfo->bpp, lane_cnt);
- min_link_rate = (u32)div_u64((pinfo->clk_rate * 10),
- (lane_cnt * encoding_factx10));
- min_link_rate = (min_link_rate * pinfo->bpp)
- / (DP_LINK_RATE_MULTIPLIER);
+
+ /*
+ * The max pixel clock supported is 675Mhz. The
+ * current calculations below will make sure
+ * the min_link_rate is within 32 bit limits.
+ * Any changes in the section of code should
+ * consider this limitation.
+ */
+ min_link_rate = pinfo->clk_rate
+ / (lane_cnt * encoding_factx10);
min_link_rate /= ln_to_link_ratio;
+ min_link_rate = (min_link_rate * pinfo->bpp);
+ min_link_rate = (u32)div_u64(min_link_rate * 10,
+ DP_LINK_RATE_MULTIPLIER);
pr_debug("min_link_rate = %d\n", min_link_rate);
@@ -699,10 +715,11 @@ static int dp_aux_chan_ready(struct mdss_dp_drv_pdata *ep)
int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
{
- struct edp_buf *rp;
- int cnt, rlen;
- int ret = 0;
- int blk_num = 0;
+ struct edp_buf *rp = &dp->rxp;
+ int rlen, ret = 0;
+ int edid_blk = 0, blk_num = 0, retries = 10;
+ bool edid_parsing_done = false;
+ const u8 cea_tag = 0x02;
ret = dp_aux_chan_ready(dp);
if (ret) {
@@ -710,70 +727,56 @@ int mdss_dp_edid_read(struct mdss_dp_drv_pdata *dp)
return ret;
}
- for (cnt = 5; cnt; cnt--) {
- rlen = dp_aux_read_buf
- (dp, EDID_START_ADDRESS, EDID_BLOCK_SIZE, 1);
- if (rlen > 0) {
- pr_debug("cnt=%d, block=%d, rlen=%d\n",
- cnt, blk_num, rlen);
-
- rp = &dp->rxp;
- if (!dp_edid_buf_error(rp->data, rp->len))
- break;
+ do {
+ rlen = dp_aux_read_buf(dp, EDID_START_ADDRESS +
+ (blk_num * EDID_BLOCK_SIZE),
+ EDID_BLOCK_SIZE, 1);
+ if (rlen != EDID_BLOCK_SIZE) {
+ pr_err("Read failed. rlen=%d\n", rlen);
+ continue;
}
- }
- if ((cnt <= 0) && (rlen != EDID_BLOCK_SIZE)) {
- pr_err("Read failed. rlen=%d\n", rlen);
- return -EINVAL;
- }
+ pr_debug("blk_num=%d, rlen=%d\n", blk_num, rlen);
- rp = &dp->rxp;
+ if (dp_edid_is_valid_header(rp->data)) {
+ if (dp_edid_buf_error(rp->data, rp->len))
+ continue;
- dp_extract_edid_manufacturer(&dp->edid, rp->data);
- dp_extract_edid_product(&dp->edid, rp->data);
- dp_extract_edid_version(&dp->edid, rp->data);
- dp_extract_edid_ext_block_cnt(&dp->edid, rp->data);
- dp_extract_edid_video_support(&dp->edid, rp->data);
- dp_extract_edid_feature(&dp->edid, rp->data);
- dp_extract_edid_detailed_timing_description(&dp->edid, rp->data);
- /* for the first block initialize the edid buffer size */
- dp->edid_buf_size = 0;
+ if (edid_parsing_done) {
+ blk_num++;
+ continue;
+ }
- pr_debug("edid extension = %d\n",
- dp->edid.ext_block_cnt);
+ dp_extract_edid_manufacturer(&dp->edid, rp->data);
+ dp_extract_edid_product(&dp->edid, rp->data);
+ dp_extract_edid_version(&dp->edid, rp->data);
+ dp_extract_edid_ext_block_cnt(&dp->edid, rp->data);
+ dp_extract_edid_video_support(&dp->edid, rp->data);
+ dp_extract_edid_feature(&dp->edid, rp->data);
+ dp_extract_edid_detailed_timing_description(&dp->edid,
+ rp->data);
- memcpy(dp->edid_buf, rp->data, EDID_BLOCK_SIZE);
- dp->edid_buf_size += EDID_BLOCK_SIZE;
+ edid_parsing_done = true;
+ } else {
+ edid_blk++;
+ blk_num++;
- if (!dp->edid.ext_block_cnt)
- return 0;
+ /* fix dongle byte shift issue */
+ if (edid_blk == 1 && rp->data[0] != cea_tag) {
+ u8 tmp[EDID_BLOCK_SIZE - 1];
- for (blk_num = 1; blk_num <= dp->edid.ext_block_cnt;
- blk_num++) {
- for (cnt = 5; cnt; cnt--) {
- rlen = dp_aux_read_buf
- (dp, EDID_START_ADDRESS +
- (blk_num * EDID_BLOCK_SIZE),
- EDID_BLOCK_SIZE, 1);
- if (rlen > 0) {
- pr_debug("cnt=%d, blk_num=%d, rlen=%d\n",
- cnt, blk_num, rlen);
- rp = &dp->rxp;
- if (!dp_edid_buf_error(rp->data, rp->len))
- break;
+ memcpy(tmp, rp->data, EDID_BLOCK_SIZE - 1);
+ rp->data[0] = cea_tag;
+ memcpy(rp->data + 1, tmp, EDID_BLOCK_SIZE - 1);
}
}
- if ((cnt <= 0) && (rlen != EDID_BLOCK_SIZE)) {
- pr_err("Read failed. rlen=%d\n", rlen);
- return -EINVAL;
- }
+ memcpy(dp->edid_buf + (edid_blk * EDID_BLOCK_SIZE),
+ rp->data, EDID_BLOCK_SIZE);
- memcpy(dp->edid_buf + (blk_num * EDID_BLOCK_SIZE),
- rp->data, EDID_BLOCK_SIZE);
- dp->edid_buf_size += EDID_BLOCK_SIZE;
- }
+ if (edid_blk == dp->edid.ext_block_cnt)
+ return 0;
+ } while (retries--);
return 0;
}
@@ -1113,17 +1116,17 @@ static void dp_host_train_set(struct mdss_dp_drv_pdata *ep, int train)
}
char vm_pre_emphasis[4][4] = {
- {0x00, 0x06, 0x09, 0x0C}, /* pe0, 0 db */
- {0x00, 0x06, 0x09, 0xFF}, /* pe1, 3.5 db */
- {0x03, 0x06, 0xFF, 0xFF}, /* pe2, 6.0 db */
- {0x03, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
+ {0x00, 0x09, 0x11, 0x0C}, /* pe0, 0 db */
+ {0x00, 0x0A, 0x10, 0xFF}, /* pe1, 3.5 db */
+ {0x00, 0x0C, 0xFF, 0xFF}, /* pe2, 6.0 db */
+ {0x00, 0xFF, 0xFF, 0xFF} /* pe3, 9.5 db */
};
/* voltage swing, 0.2v and 1.0v are not support */
char vm_voltage_swing[4][4] = {
- {0x0a, 0x18, 0x1A, 0x1E}, /* sw0, 0.4v */
- {0x07, 0x1A, 0x1E, 0xFF}, /* sw1, 0.6 v */
- {0x1A, 0x1E, 0xFF, 0xFF}, /* sw1, 0.8 v */
+ {0x07, 0x0f, 0x12, 0x1E}, /* sw0, 0.4v */
+ {0x11, 0x1D, 0x1F, 0xFF}, /* sw1, 0.6 v */
+ {0x18, 0x1F, 0xFF, 0xFF}, /* sw1, 0.8 v */
{0x1E, 0xFF, 0xFF, 0xFF} /* sw1, 1.2 v, optional */
};
@@ -1211,7 +1214,7 @@ static int dp_start_link_train_1(struct mdss_dp_drv_pdata *ep)
static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
{
- int tries;
+ int tries = 0;
int ret = 0;
int usleep_time;
char pattern;
@@ -1223,12 +1226,12 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
else
pattern = 0x02;
- dp_host_train_set(ep, pattern); /* train_2 */
- dp_voltage_pre_emphasise_set(ep);
dp_train_pattern_set_write(ep, pattern | 0x20);/* train_2 */
- tries = 0;
- while (1) {
+ do {
+ dp_voltage_pre_emphasise_set(ep);
+ dp_host_train_set(ep, pattern);
+
usleep_time = ep->dpcd.training_read_interval;
usleep_range(usleep_time, usleep_time);
@@ -1240,14 +1243,13 @@ static int dp_start_link_train_2(struct mdss_dp_drv_pdata *ep)
}
tries++;
- if (tries > 5) {
+ if (tries > 4) {
ret = -1;
break;
}
dp_sink_train_set_adjust(ep);
- dp_voltage_pre_emphasise_set(ep);
- }
+ } while (1);
return ret;
}
@@ -1319,7 +1321,6 @@ static void dp_clear_training_pattern(struct mdss_dp_drv_pdata *ep)
int mdss_dp_link_train(struct mdss_dp_drv_pdata *dp)
{
int ret = 0;
- int usleep_time;
ret = dp_aux_chan_ready(dp);
if (ret) {
@@ -1340,8 +1341,6 @@ train_start:
mdss_dp_state_ctrl(&dp->ctrl_io, 0);
dp_clear_training_pattern(dp);
- usleep_time = dp->dpcd.training_read_interval;
- usleep_range(usleep_time, usleep_time);
ret = dp_start_link_train_1(dp);
if (ret < 0) {
@@ -1356,8 +1355,6 @@ train_start:
pr_debug("Training 1 completed successfully\n");
- mdss_dp_state_ctrl(&dp->ctrl_io, 0);
- dp_clear_training_pattern(dp);
ret = dp_start_link_train_2(dp);
if (ret < 0) {
if (dp_link_rate_down_shift(dp) == 0) {
@@ -1375,7 +1372,8 @@ train_start:
clear:
dp_clear_training_pattern(dp);
if (ret != -1) {
- mdss_dp_setup_tr_unit(&dp->ctrl_io);
+ mdss_dp_setup_tr_unit(&dp->ctrl_io, dp->link_rate,
+ dp->lane_cnt, dp->vic);
mdss_dp_state_ctrl(&dp->ctrl_io, ST_SEND_VIDEO);
pr_debug("State_ctrl set to SEND_VIDEO\n");
}
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.c b/drivers/video/fbdev/msm/mdss_dp_util.c
index bdf5d92f7053..92acb910e0c3 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.c
+++ b/drivers/video/fbdev/msm/mdss_dp_util.c
@@ -32,6 +32,29 @@
#define AUDIO_FREQ_48 48000
#define DP_AUDIO_FREQ_COUNT 3
+enum mdss_dp_pin_assignment {
+ PIN_ASSIGNMENT_A,
+ PIN_ASSIGNMENT_B,
+ PIN_ASSIGNMENT_C,
+ PIN_ASSIGNMENT_D,
+ PIN_ASSIGNMENT_E,
+ PIN_ASSIGNMENT_F,
+ PIN_ASSIGNMENT_MAX,
+};
+
+static const char *mdss_dp_pin_name(u8 pin)
+{
+ switch (pin) {
+ case PIN_ASSIGNMENT_A: return "PIN_ASSIGNMENT_A";
+ case PIN_ASSIGNMENT_B: return "PIN_ASSIGNMENT_B";
+ case PIN_ASSIGNMENT_C: return "PIN_ASSIGNMENT_C";
+ case PIN_ASSIGNMENT_D: return "PIN_ASSIGNMENT_D";
+ case PIN_ASSIGNMENT_E: return "PIN_ASSIGNMENT_E";
+ case PIN_ASSIGNMENT_F: return "PIN_ASSIGNMENT_F";
+ default: return "UNKNOWN";
+ }
+}
+
static const uint32_t naud_value[DP_AUDIO_FREQ_COUNT][DP_AUDIO_FREQ_COUNT] = {
{ 10125, 16875, 33750 },
{ 5625, 9375, 18750 },
@@ -143,6 +166,18 @@ void mdss_dp_aux_reset(struct dss_io_data *ctrl_io)
writel_relaxed(aux_ctrl, ctrl_io->base + DP_AUX_CTRL);
}
+/* reset DP controller */
+void mdss_dp_ctrl_reset(struct dss_io_data *ctrl_io)
+{
+ u32 sw_reset = readl_relaxed(ctrl_io->base + DP_SW_RESET);
+
+ sw_reset |= BIT(0);
+ writel_relaxed(sw_reset, ctrl_io->base + DP_SW_RESET);
+ udelay(1000);
+ sw_reset &= ~BIT(0);
+ writel_relaxed(sw_reset, ctrl_io->base + DP_SW_RESET);
+}
+
/* reset DP Mainlink */
void mdss_dp_mainlink_reset(struct dss_io_data *ctrl_io)
{
@@ -284,13 +319,47 @@ void mdss_dp_sw_mvid_nvid(struct dss_io_data *ctrl_io)
writel_relaxed(0x3c, ctrl_io->base + DP_SOFTWARE_NVID);
}
-void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io)
+void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate,
+ u8 ln_cnt, u32 res)
{
- /* Current Tr unit configuration supports only 1080p */
+ u32 dp_tu = 0x0;
+ u32 valid_boundary = 0x0;
+ u32 valid_boundary2 = 0x0;
+ struct dp_vc_tu_mapping_table const *tu_entry = tu_table;
+
writel_relaxed(0x21, ctrl_io->base + DP_MISC1_MISC0);
- writel_relaxed(0x0f0016, ctrl_io->base + DP_VALID_BOUNDARY);
- writel_relaxed(0x1f, ctrl_io->base + DP_TU);
- writel_relaxed(0x0, ctrl_io->base + DP_VALID_BOUNDARY_2);
+
+ for (; tu_entry != tu_table + ARRAY_SIZE(tu_table); ++tu_entry) {
+ if ((tu_entry->vic == res) &&
+ (tu_entry->lanes == ln_cnt) &&
+ (tu_entry->lrate == link_rate))
+ break;
+ }
+
+ if (tu_entry == tu_table + ARRAY_SIZE(tu_table)) {
+ pr_err("requested ln_cnt=%d, lrate=0x%x not supported\n",
+ ln_cnt, link_rate);
+ return;
+ }
+
+ dp_tu |= tu_entry->tu_size_minus1;
+ valid_boundary |= tu_entry->valid_boundary_link;
+ valid_boundary |= (tu_entry->delay_start_link << 16);
+
+ valid_boundary2 |= (tu_entry->valid_lower_boundary_link << 1);
+ valid_boundary2 |= (tu_entry->upper_boundary_count << 16);
+ valid_boundary2 |= (tu_entry->lower_boundary_count << 20);
+
+ if (tu_entry->boundary_moderation_en)
+ valid_boundary2 |= BIT(0);
+
+ writel_relaxed(valid_boundary, ctrl_io->base + DP_VALID_BOUNDARY);
+ writel_relaxed(dp_tu, ctrl_io->base + DP_TU);
+ writel_relaxed(valid_boundary2, ctrl_io->base + DP_VALID_BOUNDARY_2);
+
+ pr_debug("valid_boundary=0x%x, valid_boundary2=0x%x\n",
+ valid_boundary, valid_boundary2);
+ pr_debug("dp_tu=0x%x\n", dp_tu);
}
void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io,
@@ -431,9 +500,23 @@ void mdss_dp_usbpd_ext_dp_status(struct usbpd_dp_status *dp_status)
u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp)
{
+ u8 pin_cfg, pin;
u32 config = 0;
- config |= (dp->alt_mode.dp_cap.dlink_pin_config << 8);
+ pin_cfg = dp->alt_mode.dp_cap.dlink_pin_config;
+
+ for (pin = PIN_ASSIGNMENT_A; pin < PIN_ASSIGNMENT_MAX; pin++) {
+ if (pin_cfg & BIT(pin))
+ break;
+ }
+
+ if (pin == PIN_ASSIGNMENT_MAX)
+ pin = PIN_ASSIGNMENT_C;
+
+ pr_debug("pin assignment: %s\n", mdss_dp_pin_name(pin));
+
+ config |= BIT(pin) << 8;
+
config |= (0x1 << 2); /* configure for DPv1.3 */
config |= 0x2; /* Configuring for UFP_D */
@@ -441,6 +524,17 @@ u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp)
return config;
}
+void mdss_dp_phy_share_lane_config(struct dss_io_data *phy_io,
+ u8 orientation, u8 ln_cnt)
+{
+ u32 info = 0x0;
+
+ info |= (ln_cnt & 0x0F);
+ info |= ((orientation & 0x0F) << 4);
+ pr_debug("Shared Info = 0x%x\n", info);
+ writel_relaxed(info, phy_io->base + DP_PHY_SPARE0);
+}
+
void mdss_dp_config_audio_acr_ctrl(struct dss_io_data *ctrl_io, char link_rate)
{
u32 acr_ctrl = 0;
diff --git a/drivers/video/fbdev/msm/mdss_dp_util.h b/drivers/video/fbdev/msm/mdss_dp_util.h
index 5eb9d092476f..cf2286f9b58a 100644
--- a/drivers/video/fbdev/msm/mdss_dp_util.h
+++ b/drivers/video/fbdev/msm/mdss_dp_util.h
@@ -150,6 +150,8 @@
#define DP_PHY_AUX_INTERRUPT_MASK (0x00000044)
#define DP_PHY_AUX_INTERRUPT_CLEAR (0x00000048)
+#define DP_PHY_SPARE0 0x00A8
+
#define QSERDES_TX0_OFFSET 0x0400
#define QSERDES_TX1_OFFSET 0x0800
@@ -200,17 +202,72 @@ struct edp_cmd {
char next; /* next command */
};
+struct dp_vc_tu_mapping_table {
+ u32 vic;
+ u8 lanes;
+ u8 lrate; /* DP_LINK_RATE -> 162(6), 270(10), 540(20) */
+ u8 bpp;
+ u8 valid_boundary_link;
+ u16 delay_start_link;
+ bool boundary_moderation_en;
+ u8 valid_lower_boundary_link;
+ u8 upper_boundary_count;
+ u8 lower_boundary_count;
+ u8 tu_size_minus1;
+};
+
+static const struct dp_vc_tu_mapping_table tu_table[] = {
+ {HDMI_VFRMT_640x480p60_4_3, 4, 06, 24,
+ 0x07, 0x0056, false, 0x00, 0x00, 0x00, 0x3b},
+ {HDMI_VFRMT_640x480p60_4_3, 2, 06, 24,
+ 0x0e, 0x004f, false, 0x00, 0x00, 0x00, 0x3b},
+ {HDMI_VFRMT_640x480p60_4_3, 1, 06, 24,
+ 0x15, 0x0039, false, 0x00, 0x00, 0x00, 0x2c},
+ {HDMI_VFRMT_720x480p60_4_3, 1, 06, 24,
+ 0x13, 0x0038, true, 0x12, 0x0c, 0x0b, 0x24},
+ {HDMI_VFRMT_720x480p60_16_9, 1, 06, 24,
+ 0x13, 0x0038, true, 0x12, 0x0c, 0x0b, 0x24},
+ {HDMI_VFRMT_1280x720p60_16_9, 4, 06, 24,
+ 0x0c, 0x0020, false, 0x00, 0x00, 0x00, 0x1f},
+ {HDMI_VFRMT_1280x720p60_16_9, 2, 06, 24,
+ 0x16, 0x0015, false, 0x00, 0x00, 0x00, 0x1f},
+ {HDMI_VFRMT_1280x720p60_16_9, 1, 10, 24,
+ 0x21, 0x001a, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_1920x1080p60_16_9, 4, 06, 24,
+ 0x16, 0x000f, false, 0x00, 0x00, 0x00, 0x1f},
+ {HDMI_VFRMT_1920x1080p60_16_9, 2, 10, 24,
+ 0x21, 0x0011, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_1920x1080p60_16_9, 1, 20, 24,
+ 0x21, 0x001a, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_3840x2160p24_16_9, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_3840x2160p30_16_9, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_3840x2160p60_16_9, 4, 20, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_4096x2160p24_256_135, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_4096x2160p30_256_135, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_VFRMT_4096x2160p60_256_135, 4, 20, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+ {HDMI_EVFRMT_4096x2160p24_16_9, 4, 10, 24,
+ 0x21, 0x000c, false, 0x00, 0x00, 0x00, 0x27},
+};
+
int dp_aux_read(void *ep, struct edp_cmd *cmds);
int dp_aux_write(void *ep, struct edp_cmd *cmd);
void mdss_dp_state_ctrl(struct dss_io_data *ctrl_io, u32 data);
u32 mdss_dp_get_ctrl_hw_version(struct dss_io_data *ctrl_io);
u32 mdss_dp_get_phy_hw_version(struct dss_io_data *phy_io);
+void mdss_dp_ctrl_reset(struct dss_io_data *ctrl_io);
void mdss_dp_aux_reset(struct dss_io_data *ctrl_io);
void mdss_dp_mainlink_reset(struct dss_io_data *ctrl_io);
void mdss_dp_phy_reset(struct dss_io_data *ctrl_io);
void mdss_dp_switch_usb3_phy_to_dp_mode(struct dss_io_data *tcsr_reg_io);
void mdss_dp_assert_phy_reset(struct dss_io_data *ctrl_io, bool assert);
-void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io);
+void mdss_dp_setup_tr_unit(struct dss_io_data *ctrl_io, u8 link_rate,
+ u8 ln_cnt, u32 res);
void mdss_dp_phy_aux_setup(struct dss_io_data *phy_io);
void mdss_dp_hpd_configure(struct dss_io_data *ctrl_io, bool enable);
void mdss_dp_aux_ctrl(struct dss_io_data *ctrl_io, bool enable);
@@ -231,6 +288,8 @@ void mdss_dp_usbpd_ext_dp_status(struct usbpd_dp_status *dp_status);
u32 mdss_dp_usbpd_gen_config_pkt(struct mdss_dp_drv_pdata *dp);
void mdss_dp_ctrl_lane_mapping(struct dss_io_data *ctrl_io,
struct lane_mapping l_map);
+void mdss_dp_phy_share_lane_config(struct dss_io_data *phy_io,
+ u8 orientation, u8 ln_cnt);
void mdss_dp_config_audio_acr_ctrl(struct dss_io_data *ctrl_io,
char link_rate);
void mdss_dp_audio_setup_sdps(struct dss_io_data *ctrl_io);
diff --git a/drivers/video/fbdev/msm/mdss_dsi_panel.c b/drivers/video/fbdev/msm/mdss_dsi_panel.c
index 8ffba091e2b2..01fc01425a3a 100644
--- a/drivers/video/fbdev/msm/mdss_dsi_panel.c
+++ b/drivers/video/fbdev/msm/mdss_dsi_panel.c
@@ -856,6 +856,48 @@ static int mdss_dsi_panel_low_power_config(struct mdss_panel_data *pdata,
return 0;
}
+static void mdss_dsi_parse_mdp_kickoff_threshold(struct device_node *np,
+ struct mdss_panel_info *pinfo)
+{
+ int len, rc;
+ const u32 *src;
+ u32 tmp;
+ u32 max_delay_us;
+
+ pinfo->mdp_koff_thshold = false;
+ src = of_get_property(np, "qcom,mdss-mdp-kickoff-threshold", &len);
+ if (!src || (len == 0))
+ return;
+
+ rc = of_property_read_u32(np, "qcom,mdss-mdp-kickoff-delay", &tmp);
+ if (!rc)
+ pinfo->mdp_koff_delay = tmp;
+ else
+ return;
+
+ if (pinfo->mipi.frame_rate == 0) {
+ pr_err("cannot enable guard window, unexpected panel fps\n");
+ return;
+ }
+
+ pinfo->mdp_koff_thshold_low = be32_to_cpu(src[0]);
+ pinfo->mdp_koff_thshold_high = be32_to_cpu(src[1]);
+ max_delay_us = 1000000 / pinfo->mipi.frame_rate;
+
+ /* enable the feature if threshold is valid */
+ if ((pinfo->mdp_koff_thshold_low < pinfo->mdp_koff_thshold_high) &&
+ ((pinfo->mdp_koff_delay > 0) ||
+ (pinfo->mdp_koff_delay < max_delay_us)))
+ pinfo->mdp_koff_thshold = true;
+
+ pr_debug("panel kickoff thshold:[%d, %d] delay:%d (max:%d) enable:%d\n",
+ pinfo->mdp_koff_thshold_low,
+ pinfo->mdp_koff_thshold_high,
+ pinfo->mdp_koff_delay,
+ max_delay_us,
+ pinfo->mdp_koff_thshold);
+}
+
static void mdss_dsi_parse_trigger(struct device_node *np, char *trigger,
char *trigger_key)
{
@@ -2497,6 +2539,8 @@ static int mdss_panel_parse_dt(struct device_node *np,
rc = of_property_read_u32(np, "qcom,mdss-mdp-transfer-time-us", &tmp);
pinfo->mdp_transfer_time_us = (!rc ? tmp : DEFAULT_MDP_TRANSFER_TIME);
+ mdss_dsi_parse_mdp_kickoff_threshold(np, pinfo);
+
pinfo->mipi.lp11_init = of_property_read_bool(np,
"qcom,mdss-dsi-lp11-init");
rc = of_property_read_u32(np, "qcom,mdss-dsi-init-delay-us", &tmp);
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_edid.c b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
index 4f1435d006b2..2047a047b537 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_edid.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_edid.c
@@ -698,7 +698,6 @@ static ssize_t hdmi_edid_sysfs_rda_3d_modes(struct device *dev,
}
}
- DEV_DBG("%s: '%s'\n", __func__, buf);
ret += scnprintf(buf + ret, PAGE_SIZE - ret, "\n");
return ret;
@@ -1567,7 +1566,9 @@ static void hdmi_edid_detail_desc(struct hdmi_edid_ctrl *edid_ctrl,
frame_data = (active_h + blank_h) * (active_v + blank_v);
if (frame_data) {
- int refresh_rate_khz = (pixel_clk * khz_to_hz) / frame_data;
+ u64 refresh_rate = (u64)pixel_clk * khz_to_hz * khz_to_hz;
+
+ do_div(refresh_rate, frame_data);
timing.active_h = active_h;
timing.front_porch_h = front_porch_h;
@@ -1582,19 +1583,24 @@ static void hdmi_edid_detail_desc(struct hdmi_edid_ctrl *edid_ctrl,
(front_porch_v + pulse_width_v);
timing.active_low_v = active_low_v;
timing.pixel_freq = pixel_clk;
- timing.refresh_rate = refresh_rate_khz * khz_to_hz;
+ timing.refresh_rate = refresh_rate;
timing.interlaced = interlaced;
timing.supported = true;
timing.ar = aspect_ratio_4_3 ? HDMI_RES_AR_4_3 :
(aspect_ratio_5_4 ? HDMI_RES_AR_5_4 :
HDMI_RES_AR_16_9);
- DEV_DBG("%s: new res: %dx%d%s@%dHz\n", __func__,
+ DEV_DBG("%s: new res: %dx%d%s@%d.%d%d%dHz\n", __func__,
timing.active_h, timing.active_v,
interlaced ? "i" : "p",
- timing.refresh_rate / khz_to_hz);
-
- rc = hdmi_set_resv_timing_info(&timing);
+ timing.refresh_rate / khz_to_hz,
+ (timing.refresh_rate % khz_to_hz) / 100,
+ (timing.refresh_rate % 100) / 10,
+ timing.refresh_rate % 10);
+
+ rc = hdmi_get_video_id_code(&timing, NULL);
+ if (rc < 0)
+ rc = hdmi_set_resv_timing_info(&timing);
} else {
DEV_ERR("%s: Invalid frame data\n", __func__);
rc = -EINVAL;
@@ -1642,6 +1648,7 @@ static void hdmi_edid_add_sink_video_format(struct hdmi_edid_ctrl *edid_ctrl,
u32 supported = hdmi_edid_is_mode_supported(edid_ctrl, &timing);
struct hdmi_edid_sink_data *sink_data = &edid_ctrl->sink_data;
struct disp_mode_info *disp_mode_list = sink_data->disp_mode_list;
+ u32 i = 0;
if (video_format >= HDMI_VFRMT_MAX) {
DEV_ERR("%s: video format: %s is not supported\n", __func__,
@@ -1653,6 +1660,15 @@ static void hdmi_edid_add_sink_video_format(struct hdmi_edid_ctrl *edid_ctrl,
video_format, msm_hdmi_mode_2string(video_format),
supported ? "Supported" : "Not-Supported");
+ for (i = 0; i < sink_data->num_of_elements; i++) {
+ u32 vic = disp_mode_list[i].video_format;
+
+ if (vic == video_format) {
+ DEV_DBG("%s: vic %d already added\n", __func__, vic);
+ return;
+ }
+ }
+
if (!ret && supported) {
/* todo: MHL */
disp_mode_list[sink_data->num_of_elements].video_format =
@@ -1970,6 +1986,7 @@ static void hdmi_edid_get_display_mode(struct hdmi_edid_ctrl *edid_ctrl)
const u8 *svd = NULL;
u32 has60hz_mode = false;
u32 has50hz_mode = false;
+ u32 desc_offset = 0;
bool read_block0_res = false;
struct hdmi_edid_sink_data *sink_data = NULL;
@@ -2033,103 +2050,66 @@ static void hdmi_edid_get_display_mode(struct hdmi_edid_ctrl *edid_ctrl)
if (video_format == HDMI_VFRMT_640x480p60_4_3)
has480p = true;
}
- } else if (!num_of_cea_blocks || read_block0_res) {
- /* Detailed timing descriptors */
- u32 desc_offset = 0;
- /*
- * * Maximum 4 timing descriptor in block 0 - No CEA
- * extension in this case
- * * EDID_FIRST_TIMING_DESC[0x36] - 1st detailed timing
- * descriptor
- * * EDID_DETAIL_TIMING_DESC_BLCK_SZ[0x12] - Each detailed
- * timing descriptor has block size of 18
- */
- while (4 > i && 0 != edid_blk0[0x36+desc_offset]) {
- hdmi_edid_detail_desc(edid_ctrl,
- edid_blk0+0x36+desc_offset,
- &video_format);
-
- DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
- __func__, __LINE__,
- msm_hdmi_mode_2string(video_format));
-
- hdmi_edid_add_sink_video_format(edid_ctrl,
- video_format);
-
- if (video_format == HDMI_VFRMT_640x480p60_4_3)
- has480p = true;
-
- /* Make a note of the preferred video format */
- if (i == 0) {
- sink_data->preferred_video_format =
- video_format;
- }
- desc_offset += 0x12;
- ++i;
- }
- } else if (1 == num_of_cea_blocks) {
- u32 desc_offset = 0;
-
- /*
- * Read from both block 0 and block 1
- * Read EDID block[0] as above
- */
- while (4 > i && 0 != edid_blk0[0x36+desc_offset]) {
- hdmi_edid_detail_desc(edid_ctrl,
- edid_blk0+0x36+desc_offset,
- &video_format);
+ }
- DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
- __func__, __LINE__,
- msm_hdmi_mode_2string(video_format));
+ i = 0;
+ /* Read DTD resolutions from block0 */
+ while (4 > i && 0 != edid_blk0[0x36+desc_offset]) {
+ hdmi_edid_detail_desc(edid_ctrl,
+ edid_blk0+0x36+desc_offset,
+ &video_format);
+
+ DEV_DBG("[%s:%d] Block-0 Adding vid fmt = [%s]\n",
+ __func__, __LINE__,
+ msm_hdmi_mode_2string(video_format));
- hdmi_edid_add_sink_video_format(edid_ctrl,
- video_format);
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ video_format);
- if (video_format == HDMI_VFRMT_640x480p60_4_3)
- has480p = true;
+ if (video_format == HDMI_VFRMT_640x480p60_4_3)
+ has480p = true;
- /* Make a note of the preferred video format */
- if (i == 0) {
- sink_data->preferred_video_format =
- video_format;
- }
- desc_offset += 0x12;
- ++i;
+ /* Make a note of the preferred video format */
+ if (i == 0) {
+ sink_data->preferred_video_format =
+ video_format;
}
+ desc_offset += 0x12;
+ ++i;
+ }
- /*
- * * Parse block 1 - CEA extension byte offset of first
- * detailed timing generation - offset is relevant to
- * the offset of block 1
- * * EDID_CEA_EXTENSION_FIRST_DESC[0x82]: Offset to CEA
- * extension first timing desc - indicate the offset of
- * the first detailed timing descriptor
- * * EDID_BLOCK_SIZE = 0x80 Each page size in the EDID ROM
- */
- desc_offset = edid_blk1[0x02];
- while (0 != edid_blk1[desc_offset]) {
- hdmi_edid_detail_desc(edid_ctrl,
- edid_blk1+desc_offset,
- &video_format);
-
- DEV_DBG("[%s:%d] Block-1 Adding vid fmt = [%s]\n",
- __func__, __LINE__,
- msm_hdmi_mode_2string(video_format));
+ /*
+ * * Parse block 1 - CEA extension byte offset of first
+ * detailed timing generation - offset is relevant to
+ * the offset of block 1
+ * * EDID_CEA_EXTENSION_FIRST_DESC[0x82]: Offset to CEA
+ * extension first timing desc - indicate the offset of
+ * the first detailed timing descriptor
+ * * EDID_BLOCK_SIZE = 0x80 Each page size in the EDID ROM
+ */
+ desc_offset = edid_blk1[0x02];
+ i = 0;
+ while (!edid_blk1[desc_offset]) {
+ hdmi_edid_detail_desc(edid_ctrl,
+ edid_blk1+desc_offset,
+ &video_format);
- hdmi_edid_add_sink_video_format(edid_ctrl,
- video_format);
- if (video_format == HDMI_VFRMT_640x480p60_4_3)
- has480p = true;
+ DEV_DBG("[%s:%d] Block-1 Adding vid fmt = [%s]\n",
+ __func__, __LINE__,
+ msm_hdmi_mode_2string(video_format));
- /* Make a note of the preferred video format */
- if (i == 0) {
- sink_data->preferred_video_format =
- video_format;
- }
- desc_offset += 0x12;
- ++i;
+ hdmi_edid_add_sink_video_format(edid_ctrl,
+ video_format);
+ if (video_format == HDMI_VFRMT_640x480p60_4_3)
+ has480p = true;
+
+ /* Make a note of the preferred video format */
+ if (i == 0) {
+ sink_data->preferred_video_format =
+ video_format;
}
+ desc_offset += 0x12;
+ ++i;
}
std_blk = 0;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.c b/drivers/video/fbdev/msm/mdss_hdmi_util.c
index 9ed909e9a387..c9fc8ba8bfdb 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.c
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.c
@@ -560,7 +560,7 @@ int msm_hdmi_get_timing_info(
int hdmi_get_supported_mode(struct msm_hdmi_mode_timing_info *info,
struct hdmi_util_ds_data *ds_data, u32 mode)
{
- int ret;
+ int ret, i = 0;
if (!info)
return -EINVAL;
@@ -570,9 +570,23 @@ int hdmi_get_supported_mode(struct msm_hdmi_mode_timing_info *info,
ret = msm_hdmi_get_timing_info(info, mode);
- if (!ret && ds_data && ds_data->ds_registered && ds_data->ds_max_clk) {
- if (info->pixel_freq > ds_data->ds_max_clk)
- info->supported = false;
+ if (!ret && ds_data && ds_data->ds_registered) {
+ if (ds_data->ds_max_clk) {
+ if (info->pixel_freq > ds_data->ds_max_clk)
+ info->supported = false;
+ }
+
+ if (ds_data->modes_num) {
+ u32 *modes = ds_data->modes;
+
+ for (i = 0; i < ds_data->modes_num; i++) {
+ if (info->video_format == *modes++)
+ break;
+ }
+
+ if (i == ds_data->modes_num)
+ info->supported = false;
+ }
}
return ret;
@@ -625,7 +639,7 @@ int hdmi_get_video_id_code(struct msm_hdmi_mode_timing_info *timing_in,
{
int i, vic = -1;
struct msm_hdmi_mode_timing_info supported_timing = {0};
- u32 ret;
+ u32 ret, pclk_delta, pclk, fps_delta, fps;
if (!timing_in) {
pr_err("invalid input\n");
@@ -633,9 +647,16 @@ int hdmi_get_video_id_code(struct msm_hdmi_mode_timing_info *timing_in,
}
/* active_low_h, active_low_v and interlaced are not checked against */
- for (i = 0; i < HDMI_VFRMT_MAX; i++) {
+ for (i = 1; i < HDMI_VFRMT_MAX; i++) {
ret = hdmi_get_supported_mode(&supported_timing, ds_data, i);
+ pclk = supported_timing.pixel_freq;
+ fps = supported_timing.refresh_rate;
+
+ /* as per standard, 0.5% of deviation is allowed */
+ pclk_delta = (pclk / HDMI_KHZ_TO_HZ) * 5;
+ fps_delta = (fps / HDMI_KHZ_TO_HZ) * 5;
+
if (ret || !supported_timing.supported)
continue;
if (timing_in->active_h != supported_timing.active_h)
@@ -654,9 +675,11 @@ int hdmi_get_video_id_code(struct msm_hdmi_mode_timing_info *timing_in,
continue;
if (timing_in->back_porch_v != supported_timing.back_porch_v)
continue;
- if (timing_in->pixel_freq != supported_timing.pixel_freq)
+ if (timing_in->pixel_freq < (pclk - pclk_delta) ||
+ timing_in->pixel_freq > (pclk + pclk_delta))
continue;
- if (timing_in->refresh_rate != supported_timing.refresh_rate)
+ if (timing_in->refresh_rate < (fps - fps_delta) ||
+ timing_in->refresh_rate > (fps + fps_delta))
continue;
vic = (int)supported_timing.video_format;
diff --git a/drivers/video/fbdev/msm/mdss_hdmi_util.h b/drivers/video/fbdev/msm/mdss_hdmi_util.h
index e65cf915fe92..8a7e4d1ebafc 100644
--- a/drivers/video/fbdev/msm/mdss_hdmi_util.h
+++ b/drivers/video/fbdev/msm/mdss_hdmi_util.h
@@ -459,6 +459,8 @@ struct hdmi_tx_ddc_ctrl {
struct hdmi_util_ds_data {
bool ds_registered;
u32 ds_max_clk;
+ u32 modes_num;
+ u32 *modes;
};
static inline int hdmi_tx_get_v_total(const struct msm_hdmi_mode_timing_info *t)
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index 81e3438befca..6845b386807b 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1992,6 +1992,8 @@ static void mdss_mdp_hw_rev_caps_init(struct mdss_data_type *mdata)
set_bit(MDSS_QOS_PER_PIPE_IB, mdata->mdss_qos_map);
set_bit(MDSS_QOS_REMAPPER, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_TS_PREFILL, mdata->mdss_qos_map);
+ set_bit(MDSS_QOS_WB_QOS, mdata->mdss_qos_map);
set_bit(MDSS_QOS_OVERHEAD_FACTOR, mdata->mdss_qos_map);
set_bit(MDSS_QOS_CDP, mdata->mdss_qos_map); /* cdp supported */
mdata->enable_cdp = false; /* disable cdp */
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index 0085163ada52..8ac63aaaefce 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -122,6 +122,11 @@
*/
#define MDSS_MDP_DS_OVERFETCH_SIZE 5
+#define QOS_LUT_NRT_READ 0x0
+#define QOS_LUT_CWB_READ 0xe4000000
+#define PANIC_LUT_NRT_READ 0x0
+#define ROBUST_LUT_NRT_READ 0xFFFF
+
/* hw cursor can only be setup in highest mixer stage */
#define HW_CURSOR_STAGE(mdata) \
(((mdata)->max_target_zorder + MDSS_MDP_STAGE_0) - 1)
@@ -407,7 +412,7 @@ struct mdss_mdp_cwb {
struct list_head data_queue;
int valid;
u32 wb_idx;
- struct mdp_output_layer *layer;
+ struct mdp_output_layer layer;
void *priv_data;
struct msm_sync_pt_data cwb_sync_pt_data;
struct blocking_notifier_head notifier_head;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index ebc7d2144eb9..eb1e0b5c47a6 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -3424,6 +3424,7 @@ int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl)
mutex_lock(&cwb->queue_lock);
cwb_data = list_first_entry_or_null(&cwb->data_queue,
struct mdss_mdp_wb_data, next);
+ __list_del_entry(&cwb_data->next);
mutex_unlock(&cwb->queue_lock);
if (cwb_data == NULL) {
pr_err("no output buffer for cwb\n");
@@ -3453,14 +3454,14 @@ int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl)
sctl->opmode |= MDSS_MDP_CTL_OP_WFD_MODE;
/* Select CWB data point */
- data_point = (cwb->layer->flags & MDP_COMMIT_CWB_DSPP) ? 0x4 : 0;
+ data_point = (cwb->layer.flags & MDP_COMMIT_CWB_DSPP) ? 0x4 : 0;
writel_relaxed(data_point, mdata->mdp_base + mdata->ppb_ctl[2]);
if (sctl)
writel_relaxed(data_point + 1,
mdata->mdp_base + mdata->ppb_ctl[3]);
- /* Flush WB */
- ctl->flush_bits |= BIT(16);
+ /* Flush WB and CTL */
+ ctl->flush_bits |= BIT(16) | BIT(17);
opmode = mdss_mdp_ctl_read(ctl, MDSS_MDP_REG_CTL_TOP) | ctl->opmode;
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_TOP, opmode);
@@ -3469,6 +3470,10 @@ int mdss_mdp_cwb_setup(struct mdss_mdp_ctl *ctl)
sctl->opmode;
mdss_mdp_ctl_write(sctl, MDSS_MDP_REG_CTL_TOP, opmode);
}
+
+ /* Increase commit count to signal CWB release fence */
+ atomic_inc(&cwb->cwb_sync_pt_data.commit_cnt);
+
goto cwb_setup_done;
cwb_setup_fail:
diff --git a/drivers/video/fbdev/msm/mdss_mdp_hwio.h b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
index 76fd2d12ac95..294e05c2fbb0 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_hwio.h
+++ b/drivers/video/fbdev/msm/mdss_mdp_hwio.h
@@ -541,6 +541,10 @@ enum mdss_mdp_writeback_index {
#define MDSS_MDP_REG_WB_N16_INIT_PHASE_Y_C12 0x06C
#define MDSS_MDP_REG_WB_OUT_SIZE 0x074
#define MDSS_MDP_REG_WB_ALPHA_X_VALUE 0x078
+#define MDSS_MDP_REG_WB_DANGER_LUT 0x084
+#define MDSS_MDP_REG_WB_SAFE_LUT 0x088
+#define MDSS_MDP_REG_WB_CREQ_LUT 0x08c
+#define MDSS_MDP_REG_WB_QOS_CTRL 0x090
#define MDSS_MDP_REG_WB_CSC_BASE 0x260
#define MDSS_MDP_REG_WB_DST_ADDR_SW_STATUS 0x2B0
#define MDSS_MDP_REG_WB_CDP_CTRL 0x2B4
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index 72d6175686b7..4eb121f01aca 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -73,6 +73,7 @@ struct mdss_mdp_cmd_ctx {
struct mutex clk_mtx;
spinlock_t clk_lock;
spinlock_t koff_lock;
+ spinlock_t ctlstart_lock;
struct work_struct gate_clk_work;
struct delayed_work delayed_off_clk_work;
struct work_struct pp_done_work;
@@ -144,15 +145,11 @@ static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
u32 init;
u32 height;
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
-
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_LEFT);
if (!mixer) {
mixer = mdss_mdp_mixer_get(ctl, MDSS_MDP_MIXER_MUX_RIGHT);
- if (!mixer) {
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ if (!mixer)
goto exit;
- }
}
init = mdss_mdp_pingpong_read(mixer->pingpong_base,
@@ -160,10 +157,8 @@ static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
height = mdss_mdp_pingpong_read(mixer->pingpong_base,
MDSS_MDP_REG_PP_SYNC_CONFIG_HEIGHT) & 0xffff;
- if (height < init) {
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ if (height < init)
goto exit;
- }
cnt = mdss_mdp_pingpong_read(mixer->pingpong_base,
MDSS_MDP_REG_PP_INT_COUNT_VAL) & 0xffff;
@@ -173,13 +168,21 @@ static inline u32 mdss_mdp_cmd_line_count(struct mdss_mdp_ctl *ctl)
else
cnt -= init;
- mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
-
pr_debug("cnt=%d init=%d height=%d\n", cnt, init, height);
exit:
return cnt;
}
+static inline u32 mdss_mdp_cmd_line_count_wrapper(struct mdss_mdp_ctl *ctl)
+{
+ u32 ret;
+
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_ON);
+ ret = mdss_mdp_cmd_line_count(ctl);
+ mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
+ return ret;
+}
+
static int mdss_mdp_tearcheck_enable(struct mdss_mdp_ctl *ctl, bool enable)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
@@ -2677,12 +2680,42 @@ static int mdss_mdp_disable_autorefresh(struct mdss_mdp_ctl *ctl,
return 0;
}
+static bool wait_for_read_ptr_if_late(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_ctl *sctl, struct mdss_panel_info *pinfo)
+{
+ u32 line_count;
+ u32 sline_count = 0;
+ bool ret = true;
+ u32 low_threshold = pinfo->mdp_koff_thshold_low;
+ u32 high_threshold = pinfo->mdp_koff_thshold_high;
+
+ /* read the line count */
+ line_count = mdss_mdp_cmd_line_count(ctl);
+ if (sctl)
+ sline_count = mdss_mdp_cmd_line_count(sctl);
+
+ /* if line count is between the range, return to trigger transfer */
+ if (((line_count > low_threshold) && (line_count < high_threshold)) &&
+ (!sctl || ((sline_count > low_threshold) &&
+ (sline_count < high_threshold))))
+ ret = false;
+
+ pr_debug("threshold:[%d, %d]\n", low_threshold, high_threshold);
+ pr_debug("line:%d sline:%d ret:%d\n", line_count, sline_count, ret);
+ MDSS_XLOG(line_count, sline_count, ret);
+
+ return ret;
+}
static void __mdss_mdp_kickoff(struct mdss_mdp_ctl *ctl,
- struct mdss_mdp_cmd_ctx *ctx)
+ struct mdss_mdp_ctl *sctl, struct mdss_mdp_cmd_ctx *ctx)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
bool is_pp_split = is_pingpong_split(ctl->mfd);
+ struct mdss_panel_info *pinfo = NULL;
+
+ if (ctl->panel_data)
+ pinfo = &ctl->panel_data->panel_info;
MDSS_XLOG(ctx->autorefresh_state);
@@ -2707,9 +2740,33 @@ static void __mdss_mdp_kickoff(struct mdss_mdp_ctl *ctl,
ctx->autorefresh_state = MDP_AUTOREFRESH_ON;
} else {
+
+ /*
+ * Some panels can require that mdp is within some range
+ * of the scanlines in order to trigger the tansfer.
+ * If that is the case, make sure the panel scanline
+ * is within the limit to start.
+ * Acquire an spinlock for this operation to raise the
+ * priority of this thread and make sure the context
+ * is maintained, so we can have the less time possible
+ * between the check of the scanline and the kickoff.
+ */
+ if (pinfo && pinfo->mdp_koff_thshold) {
+ spin_lock(&ctx->ctlstart_lock);
+ if (wait_for_read_ptr_if_late(ctl, sctl, pinfo)) {
+ spin_unlock(&ctx->ctlstart_lock);
+ usleep_range(pinfo->mdp_koff_delay,
+ pinfo->mdp_koff_delay + 10);
+ spin_lock(&ctx->ctlstart_lock);
+ }
+ }
+
/* SW Kickoff */
mdss_mdp_ctl_write(ctl, MDSS_MDP_REG_CTL_START, 1);
MDSS_XLOG(0x11, ctx->autorefresh_state);
+
+ if (pinfo && pinfo->mdp_koff_thshold)
+ spin_unlock(&ctx->ctlstart_lock);
}
}
@@ -2841,7 +2898,7 @@ static int mdss_mdp_cmd_kickoff(struct mdss_mdp_ctl *ctl, void *arg)
}
/* Kickoff */
- __mdss_mdp_kickoff(ctl, ctx);
+ __mdss_mdp_kickoff(ctl, sctl, ctx);
mdss_mdp_cmd_post_programming(ctl);
@@ -3267,6 +3324,7 @@ static int mdss_mdp_cmd_ctx_setup(struct mdss_mdp_ctl *ctl,
init_completion(&ctx->autorefresh_done);
spin_lock_init(&ctx->clk_lock);
spin_lock_init(&ctx->koff_lock);
+ spin_lock_init(&ctx->ctlstart_lock);
mutex_init(&ctx->clk_mtx);
mutex_init(&ctx->mdp_rdptr_lock);
mutex_init(&ctx->mdp_wrptr_lock);
@@ -3557,7 +3615,7 @@ int mdss_mdp_cmd_start(struct mdss_mdp_ctl *ctl)
ctl->ops.wait_pingpong = mdss_mdp_cmd_wait4pingpong;
ctl->ops.add_vsync_handler = mdss_mdp_cmd_add_vsync_handler;
ctl->ops.remove_vsync_handler = mdss_mdp_cmd_remove_vsync_handler;
- ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count;
+ ctl->ops.read_line_cnt_fnc = mdss_mdp_cmd_line_count_wrapper;
ctl->ops.restore_fnc = mdss_mdp_cmd_restore;
ctl->ops.early_wake_up_fnc = mdss_mdp_cmd_early_wake_up;
ctl->ops.reconfigure = mdss_mdp_cmd_reconfigure;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
index 40b10e368309..e6e03e7d54b2 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_writeback.c
@@ -124,6 +124,30 @@ static inline void mdp_wb_write(struct mdss_mdp_writeback_ctx *ctx,
writel_relaxed(val, ctx->base + reg);
}
+static void mdss_mdp_set_qos_wb(struct mdss_mdp_ctl *ctl,
+ struct mdss_mdp_writeback_ctx *ctx)
+{
+ u32 wb_qos_setup = QOS_LUT_NRT_READ;
+ struct mdss_mdp_cwb *cwb = NULL;
+ struct mdss_overlay_private *mdp5_data;
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
+ if (false == test_bit(MDSS_QOS_WB_QOS, mdata->mdss_qos_map))
+ return;
+
+ mdp5_data = mfd_to_mdp5_data(ctl->mfd);
+ cwb = &mdp5_data->cwb;
+
+ if (cwb->valid)
+ wb_qos_setup = QOS_LUT_CWB_READ;
+ else
+ wb_qos_setup = QOS_LUT_NRT_READ;
+
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_DANGER_LUT, PANIC_LUT_NRT_READ);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_SAFE_LUT, ROBUST_LUT_NRT_READ);
+ mdp_wb_write(ctx, MDSS_MDP_REG_WB_CREQ_LUT, wb_qos_setup);
+}
+
static void mdss_mdp_set_ot_limit_wb(struct mdss_mdp_writeback_ctx *ctx,
int is_wfd)
{
@@ -447,7 +471,7 @@ int mdss_mdp_writeback_prepare_cwb(struct mdss_mdp_ctl *ctl,
cwb = &mdp5_data->cwb;
ctx = (struct mdss_mdp_writeback_ctx *)cwb->priv_data;
- buffer = &cwb->layer->buffer;
+ buffer = &cwb->layer.buffer;
ctx->opmode = 0;
ctx->img_width = buffer->width;
@@ -495,6 +519,8 @@ int mdss_mdp_writeback_prepare_cwb(struct mdss_mdp_ctl *ctl,
if (ctl->mdata->default_ot_wr_limit || ctl->mdata->default_ot_rd_limit)
mdss_mdp_set_ot_limit_wb(ctx, false);
+ mdss_mdp_set_qos_wb(ctl, ctx);
+
return ret;
}
@@ -897,6 +923,8 @@ static int mdss_mdp_writeback_display(struct mdss_mdp_ctl *ctl, void *arg)
ctl->mdata->default_ot_rd_limit)
mdss_mdp_set_ot_limit_wb(ctx, true);
+ mdss_mdp_set_qos_wb(ctl, ctx);
+
wb_args = (struct mdss_mdp_writeback_arg *) arg;
if (!wb_args)
return -ENOENT;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_layer.c b/drivers/video/fbdev/msm/mdss_mdp_layer.c
index 91d4332700b6..0f0df2256f74 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_layer.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_layer.c
@@ -2285,12 +2285,12 @@ end:
return ret;
}
-int __is_cwb_requested(uint32_t output_layer_flags)
+int __is_cwb_requested(uint32_t commit_flags)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
int req = 0;
- req = output_layer_flags & MDP_COMMIT_CWB_EN;
+ req = commit_flags & MDP_COMMIT_CWB_EN;
if (req && !test_bit(MDSS_CAPS_CWB_SUPPORTED, mdata->mdss_caps_map)) {
pr_err("CWB not supported");
return -ENODEV;
@@ -2330,7 +2330,7 @@ int mdss_mdp_layer_pre_commit(struct msm_fb_data_type *mfd,
return -EINVAL;
if (commit->output_layer) {
- ret = __is_cwb_requested(commit->output_layer->flags);
+ ret = __is_cwb_requested(commit->flags);
if (IS_ERR_VALUE(ret)) {
return ret;
} else if (ret) {
@@ -2493,7 +2493,7 @@ int mdss_mdp_layer_atomic_validate(struct msm_fb_data_type *mfd,
}
if (commit->output_layer) {
- rc = __is_cwb_requested(commit->output_layer->flags);
+ rc = __is_cwb_requested(commit->flags);
if (IS_ERR_VALUE(rc)) {
return rc;
} else if (rc) {
@@ -2553,7 +2553,7 @@ int mdss_mdp_layer_pre_commit_cwb(struct msm_fb_data_type *mfd,
return rc;
}
- mdp5_data->cwb.layer = commit->output_layer;
+ mdp5_data->cwb.layer = *commit->output_layer;
mdp5_data->cwb.wb_idx = commit->output_layer->writeback_ndx;
mutex_lock(&mdp5_data->cwb.queue_lock);
diff --git a/drivers/video/fbdev/msm/mdss_panel.h b/drivers/video/fbdev/msm/mdss_panel.h
index a633528b5373..463d26643dde 100644
--- a/drivers/video/fbdev/msm/mdss_panel.h
+++ b/drivers/video/fbdev/msm/mdss_panel.h
@@ -635,6 +635,10 @@ struct mdss_panel_info {
u32 saved_fporch;
/* current fps, once is programmed in hw */
int current_fps;
+ u32 mdp_koff_thshold_low;
+ u32 mdp_koff_thshold_high;
+ bool mdp_koff_thshold;
+ u32 mdp_koff_delay;
int panel_max_fps;
int panel_max_vtotal;
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
index b5da4ad1a86b..eab7bcaaa156 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.c
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -573,7 +573,6 @@ int mdss_smmu_probe(struct platform_device *pdev)
struct mdss_smmu_domain smmu_domain;
const struct of_device_id *match;
struct dss_module_power *mp;
- int disable_htw = 1;
char name[MAX_CLIENT_NAME_LEN];
const __be32 *address = NULL, *size = NULL;
@@ -667,13 +666,6 @@ int mdss_smmu_probe(struct platform_device *pdev)
goto disable_power;
}
- rc = iommu_domain_set_attr(mdss_smmu->mmu_mapping->domain,
- DOMAIN_ATTR_COHERENT_HTW_DISABLE, &disable_htw);
- if (rc) {
- pr_err("couldn't disable coherent HTW\n");
- goto release_mapping;
- }
-
if (smmu_domain.domain == MDSS_IOMMU_DOMAIN_SECURE ||
smmu_domain.domain == MDSS_IOMMU_DOMAIN_ROT_SECURE) {
int secure_vmid = VMID_CP_PIXEL;
diff --git a/drivers/video/fbdev/msm/msm_ext_display.c b/drivers/video/fbdev/msm/msm_ext_display.c
index e229f52057d4..4899231787f2 100644
--- a/drivers/video/fbdev/msm/msm_ext_display.c
+++ b/drivers/video/fbdev/msm/msm_ext_display.c
@@ -365,6 +365,7 @@ static int msm_ext_disp_hpd(struct platform_device *pdev,
ext_disp->ops->get_audio_edid_blk = NULL;
ext_disp->ops->cable_status = NULL;
ext_disp->ops->get_intf_id = NULL;
+ ext_disp->ops->teardown_done = NULL;
}
ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX;
@@ -463,6 +464,20 @@ end:
return ret;
}
+static void msm_ext_disp_teardown_done(struct platform_device *pdev)
+{
+ int ret = 0;
+ struct msm_ext_disp_init_data *data = NULL;
+
+ ret = msm_ext_disp_get_intf_data_helper(pdev, &data);
+ if (ret || !data) {
+ pr_err("invalid input");
+ return;
+ }
+
+ data->codec_ops.teardown_done(data->pdev);
+}
+
static int msm_ext_disp_get_intf_id(struct platform_device *pdev)
{
int ret = 0;
@@ -545,6 +560,8 @@ static int msm_ext_disp_notify(struct platform_device *pdev,
msm_ext_disp_cable_status;
ext_disp->ops->get_intf_id =
msm_ext_disp_get_intf_id;
+ ext_disp->ops->teardown_done =
+ msm_ext_disp_teardown_done;
}
switch_set_state(&ext_disp->audio_sdev, (int)new_state);
@@ -614,6 +631,7 @@ static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack)
ext_disp->ops->get_audio_edid_blk = NULL;
ext_disp->ops->cable_status = NULL;
ext_disp->ops->get_intf_id = NULL;
+ ext_disp->ops->teardown_done = NULL;
}
ext_disp->current_disp = EXT_DISPLAY_TYPE_MAX;
diff --git a/fs/ext4/inode.c b/fs/ext4/inode.c
index b15e6edb8f2c..933f1866b811 100644
--- a/fs/ext4/inode.c
+++ b/fs/ext4/inode.c
@@ -3602,6 +3602,7 @@ int ext4_can_truncate(struct inode *inode)
int ext4_punch_hole(struct inode *inode, loff_t offset, loff_t length)
{
+#if 0
struct super_block *sb = inode->i_sb;
ext4_lblk_t first_block, stop_block;
struct address_space *mapping = inode->i_mapping;
@@ -3725,6 +3726,12 @@ out_dio:
out_mutex:
mutex_unlock(&inode->i_mutex);
return ret;
+#else
+ /*
+ * Disabled as per b/28760453
+ */
+ return -EOPNOTSUPP;
+#endif
}
int ext4_inode_attach_jinode(struct inode *inode)
diff --git a/include/dt-bindings/clock/qcom,gcc-msmfalcon.h b/include/dt-bindings/clock/qcom,gcc-msmfalcon.h
index 609a20422ed1..aa76fbad5083 100644
--- a/include/dt-bindings/clock/qcom,gcc-msmfalcon.h
+++ b/include/dt-bindings/clock/qcom,gcc-msmfalcon.h
@@ -195,6 +195,8 @@
#define GCC_UFS_ICE_CORE_HW_CTL_CLK 180
#define GCC_UFS_PHY_AUX_HW_CTL_CLK 181
#define GCC_UFS_UNIPRO_CORE_HW_CTL_CLK 182
+#define HLOS1_VOTE_TURING_ADSP_SMMU_CLK 183
+#define HLOS2_VOTE_TURING_ADSP_SMMU_CLK 184
/* Block resets */
#define GCC_QUSB2PHY_PRIM_BCR 0
diff --git a/include/linux/cgroup_subsys.h b/include/linux/cgroup_subsys.h
index 1a96fdaa33d5..e133705d794a 100644
--- a/include/linux/cgroup_subsys.h
+++ b/include/linux/cgroup_subsys.h
@@ -26,6 +26,10 @@ SUBSYS(cpu)
SUBSYS(cpuacct)
#endif
+#if IS_ENABLED(CONFIG_CGROUP_SCHEDTUNE)
+SUBSYS(schedtune)
+#endif
+
#if IS_ENABLED(CONFIG_BLK_CGROUP)
SUBSYS(io)
#endif
diff --git a/include/linux/diagchar.h b/include/linux/diagchar.h
index 768c44d9ea8b..0ae23ddbc528 100644
--- a/include/linux/diagchar.h
+++ b/include/linux/diagchar.h
@@ -144,10 +144,10 @@ the appropriate macros. */
/* This needs to be modified manually now, when we add
a new RANGE of SSIDs to the msg_mask_tbl */
#define MSG_MASK_TBL_CNT 25
-#define APPS_EVENT_LAST_ID 0x0B14
+#define APPS_EVENT_LAST_ID 0x0B2A
#define MSG_SSID_0 0
-#define MSG_SSID_0_LAST 118
+#define MSG_SSID_0_LAST 120
#define MSG_SSID_1 500
#define MSG_SSID_1_LAST 506
#define MSG_SSID_2 1000
@@ -163,7 +163,7 @@ the appropriate macros. */
#define MSG_SSID_7 4600
#define MSG_SSID_7_LAST 4615
#define MSG_SSID_8 5000
-#define MSG_SSID_8_LAST 5032
+#define MSG_SSID_8_LAST 5033
#define MSG_SSID_9 5500
#define MSG_SSID_9_LAST 5516
#define MSG_SSID_10 6000
@@ -193,7 +193,7 @@ the appropriate macros. */
#define MSG_SSID_22 10350
#define MSG_SSID_22_LAST 10377
#define MSG_SSID_23 10400
-#define MSG_SSID_23_LAST 10415
+#define MSG_SSID_23_LAST 10416
#define MSG_SSID_24 0xC000
#define MSG_SSID_24_LAST 0xC063
@@ -336,7 +336,9 @@ static const uint32_t msg_bld_masks_0[] = {
MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL,
MSG_LVL_MED,
MSG_LVL_MED,
- MSG_LVL_HIGH
+ MSG_LVL_HIGH,
+ MSG_LVL_LOW,
+ MSG_LVL_LOW|MSG_LVL_MED|MSG_LVL_HIGH|MSG_LVL_ERROR|MSG_LVL_FATAL
};
static const uint32_t msg_bld_masks_1[] = {
@@ -535,7 +537,8 @@ static const uint32_t msg_bld_masks_8[] = {
MSG_LVL_MED,
MSG_LVL_MED,
MSG_LVL_MED,
- MSG_LVL_MED
+ MSG_LVL_MED,
+ MSG_LVL_HIGH
};
static const uint32_t msg_bld_masks_9[] = {
@@ -848,13 +851,14 @@ static const uint32_t msg_bld_masks_23[] = {
MSG_LVL_LOW,
MSG_LVL_LOW,
MSG_LVL_LOW,
+ MSG_LVL_LOW,
MSG_LVL_LOW
};
/* LOG CODES */
static const uint32_t log_code_last_tbl[] = {
0x0, /* EQUIP ID 0 */
- 0x1966, /* EQUIP ID 1 */
+ 0x1A02, /* EQUIP ID 1 */
0x0, /* EQUIP ID 2 */
0x0, /* EQUIP ID 3 */
0x4910, /* EQUIP ID 4 */
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index f4f5af978c7c..c34a68ce901a 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -121,7 +121,6 @@ enum iommu_attr {
DOMAIN_ATTR_FSL_PAMU_ENABLE,
DOMAIN_ATTR_FSL_PAMUV1,
DOMAIN_ATTR_NESTING, /* two stages of translation */
- DOMAIN_ATTR_COHERENT_HTW_DISABLE,
DOMAIN_ATTR_PT_BASE_ADDR,
DOMAIN_ATTR_SECURE_VMID,
DOMAIN_ATTR_ATOMIC,
diff --git a/include/linux/leds-qpnp-flash.h b/include/linux/leds-qpnp-flash.h
index 3df370a9e6d3..4b5a339970fa 100644
--- a/include/linux/leds-qpnp-flash.h
+++ b/include/linux/leds-qpnp-flash.h
@@ -18,6 +18,9 @@
#define ENABLE_REGULATOR BIT(0)
#define DISABLE_REGULATOR BIT(1)
#define QUERY_MAX_CURRENT BIT(2)
+#define PRE_FLASH BIT(3)
+
+#define FLASH_LED_PREPARE_OPTIONS_MASK GENMASK(3, 0)
int qpnp_flash_led_prepare(struct led_trigger *trig, int options,
int *max_current);
diff --git a/include/linux/mfd/wcd934x/registers.h b/include/linux/mfd/wcd934x/registers.h
index 085e16d66bc4..a824875096e4 100644
--- a/include/linux/mfd/wcd934x/registers.h
+++ b/include/linux/mfd/wcd934x/registers.h
@@ -800,9 +800,11 @@ enum {
#define WCD934X_VBADC_NEW_ADC_DOUTLSB 0x0731
#define WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL 0x0732
#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL 0x0733
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L 0x0733
#define WCD934X_HPH_NEW_INT_RDAC_VREF_CTL 0x0734
#define WCD934X_HPH_NEW_INT_RDAC_OVERRIDE_CTL 0x0735
#define WCD934X_HPH_NEW_INT_RDAC_MISC1 0x0736
+#define WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R 0x0736
#define WCD934X_HPH_NEW_INT_PA_MISC1 0x0737
#define WCD934X_HPH_NEW_INT_PA_MISC2 0x0738
#define WCD934X_HPH_NEW_INT_PA_RDAC_MISC 0x0739
diff --git a/include/linux/msm_ext_display.h b/include/linux/msm_ext_display.h
index 873a778d5370..59ba776b5f9b 100644
--- a/include/linux/msm_ext_display.h
+++ b/include/linux/msm_ext_display.h
@@ -108,6 +108,7 @@ struct msm_ext_disp_audio_codec_ops {
struct msm_ext_disp_audio_edid_blk *blk);
int (*cable_status)(struct platform_device *pdev, u32 vote);
int (*get_intf_id)(struct platform_device *pdev);
+ void (*teardown_done)(struct platform_device *pdev);
};
/*
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index c95a529b029b..fb2607dd365b 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -13,6 +13,14 @@
#define MSM_GSI_H
#include <linux/types.h>
+enum gsi_ver {
+ GSI_VER_ERR = 0,
+ GSI_VER_1_0 = 1,
+ GSI_VER_1_2 = 2,
+ GSI_VER_1_3 = 3,
+ GSI_VER_MAX,
+};
+
enum gsi_status {
GSI_STATUS_SUCCESS = 0,
GSI_STATUS_ERROR = 1,
@@ -65,6 +73,7 @@ enum gsi_intr_type {
/**
* gsi_per_props - Peripheral related properties
*
+ * @gsi: GSI core version
* @ee: EE where this driver and peripheral driver runs
* @intr: control interrupt type
* @intvec: write data for MSI write
@@ -87,6 +96,7 @@ enum gsi_intr_type {
*
*/
struct gsi_per_props {
+ enum gsi_ver ver;
unsigned int ee;
enum gsi_intr_type intr;
uint32_t intvec;
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 03853d956b41..c477f60c3f01 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -218,6 +218,7 @@ enum power_supply_property {
POWER_SUPPLY_PROP_PD_ACTIVE,
POWER_SUPPLY_PROP_CHARGER_TEMP,
POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+ POWER_SUPPLY_PROP_PARALLEL_DISABLE,
/* Local extensions of type int64_t */
POWER_SUPPLY_PROP_CHARGE_COUNTER_EXT,
/* Properties of type `const char *' */
diff --git a/include/soc/qcom/smem.h b/include/soc/qcom/smem.h
index b5425dd7eaea..4117b0d47b0d 100644
--- a/include/soc/qcom/smem.h
+++ b/include/soc/qcom/smem.h
@@ -22,11 +22,11 @@ enum {
SMEM_DSPS,
SMEM_WCNSS,
SMEM_MODEM_Q6_FW,
+ SMEM_CDSP = SMEM_MODEM_Q6_FW,
SMEM_RPM,
SMEM_TZ,
SMEM_SPSS,
SMEM_HYP,
- SMEM_CDSP,
NUM_SMEM_SUBSYSTEMS,
};
diff --git a/include/sound/wcd-dsp-mgr.h b/include/sound/wcd-dsp-mgr.h
index aa3b363e95e1..2beb9b38a46a 100644
--- a/include/sound/wcd-dsp-mgr.h
+++ b/include/sound/wcd-dsp-mgr.h
@@ -65,9 +65,14 @@ enum wdsp_event_type {
WDSP_EVENT_RESUME,
};
-enum wdsp_intr {
+enum wdsp_signal {
+ /* Hardware generated interrupts signalled to manager */
WDSP_IPC1_INTR,
WDSP_ERR_INTR,
+
+ /* Other signals */
+ WDSP_CDC_DOWN_SIGNAL,
+ WDSP_CDC_UP_SIGNAL,
};
/*
@@ -92,7 +97,7 @@ struct wdsp_img_section {
u8 *data;
};
-struct wdsp_err_intr_arg {
+struct wdsp_err_signal_arg {
bool mem_dumps_enabled;
u32 remote_start_addr;
size_t dump_size;
@@ -104,8 +109,9 @@ struct wdsp_err_intr_arg {
* their own ops to manager driver
* @get_dev_for_cmpnt: components can use this to get handle
* to struct device * of any other component
- * @intr_handler: callback to notify manager driver that interrupt
- * has occurred.
+ * @signal_handler: callback to notify manager driver that signal
+ * has occurred. Cannot be called from interrupt
+ * context as this can sleep
* @vote_for_dsp: notifies manager that dsp should be booted up
* @suspend: notifies manager that one component wants to suspend.
* Manager will make sure to suspend all components in order
@@ -120,8 +126,8 @@ struct wdsp_mgr_ops {
struct wdsp_cmpnt_ops *ops);
struct device *(*get_dev_for_cmpnt)(struct device *wdsp_dev,
enum wdsp_cmpnt_type type);
- int (*intr_handler)(struct device *wdsp_dev,
- enum wdsp_intr intr, void *arg);
+ int (*signal_handler)(struct device *wdsp_dev,
+ enum wdsp_signal signal, void *arg);
int (*vote_for_dsp)(struct device *wdsp_dev, bool vote);
int (*suspend)(struct device *wdsp_dev);
int (*resume)(struct device *wdsp_dev);
diff --git a/include/trace/events/trace_msm_low_power.h b/include/trace/events/trace_msm_low_power.h
index e14cab59e90a..97eefc665130 100644
--- a/include/trace/events/trace_msm_low_power.h
+++ b/include/trace/events/trace_msm_low_power.h
@@ -192,6 +192,64 @@ TRACE_EVENT(cluster_exit,
__entry->from_idle)
);
+TRACE_EVENT(cluster_pred_select,
+
+ TP_PROTO(const char *name, int index, u32 sleep_us,
+ u32 latency, int pred, u32 pred_us),
+
+ TP_ARGS(name, index, sleep_us, latency, pred, pred_us),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(int, index)
+ __field(u32, sleep_us)
+ __field(u32, latency)
+ __field(int, pred)
+ __field(u32, pred_us)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->index = index;
+ __entry->sleep_us = sleep_us;
+ __entry->latency = latency;
+ __entry->pred = pred;
+ __entry->pred_us = pred_us;
+ ),
+
+ TP_printk("name:%s idx:%d sleep_time:%u latency:%u pred:%d pred_us:%u",
+ __entry->name, __entry->index, __entry->sleep_us,
+ __entry->latency, __entry->pred, __entry->pred_us)
+);
+
+TRACE_EVENT(cluster_pred_hist,
+
+ TP_PROTO(const char *name, int idx, u32 resi,
+ u32 sample, u32 tmr),
+
+ TP_ARGS(name, idx, resi, sample, tmr),
+
+ TP_STRUCT__entry(
+ __field(const char *, name)
+ __field(int, idx)
+ __field(u32, resi)
+ __field(u32, sample)
+ __field(u32, tmr)
+ ),
+
+ TP_fast_assign(
+ __entry->name = name;
+ __entry->idx = idx;
+ __entry->resi = resi;
+ __entry->sample = sample;
+ __entry->tmr = tmr;
+ ),
+
+ TP_printk("name:%s idx:%d resi:%u sample:%u tmr:%u",
+ __entry->name, __entry->idx, __entry->resi,
+ __entry->sample, __entry->tmr)
+);
+
TRACE_EVENT(pre_pc_cb,
TP_PROTO(int tzflag),
diff --git a/init/Kconfig b/init/Kconfig
index 311669332867..eb9e1a0aa688 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -1002,6 +1002,23 @@ config CGROUP_CPUACCT
config PAGE_COUNTER
bool
+config CGROUP_SCHEDTUNE
+ bool "CFS tasks boosting cgroup subsystem (EXPERIMENTAL)"
+ depends on SCHED_TUNE
+ help
+ This option provides the "schedtune" controller which improves the
+ flexibility of the task boosting mechanism by introducing the support
+ to define "per task" boost values.
+
+ This new controller:
+ 1. allows only a two layers hierarchy, where the root defines the
+ system-wide boost value and its direct childrens define each one a
+ different "class of tasks" to be boosted with a different value
+ 2. supports up to 16 different task classes, each one which could be
+ configured with a different boost value
+
+ Say N if unsure.
+
config MEMCG
bool "Memory Resource Controller for Control Groups"
select PAGE_COUNTER
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index ff7f6f35fc8f..024fb1007c78 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8239,7 +8239,7 @@ void set_curr_task(int cpu, struct task_struct *p)
/* task_group_lock serializes the addition/removal of task groups */
static DEFINE_SPINLOCK(task_group_lock);
-static void free_sched_group(struct task_group *tg)
+static void sched_free_group(struct task_group *tg)
{
free_fair_sched_group(tg);
free_rt_sched_group(tg);
@@ -8265,7 +8265,7 @@ struct task_group *sched_create_group(struct task_group *parent)
return tg;
err:
- free_sched_group(tg);
+ sched_free_group(tg);
return ERR_PTR(-ENOMEM);
}
@@ -8285,27 +8285,24 @@ void sched_online_group(struct task_group *tg, struct task_group *parent)
}
/* rcu callback to free various structures associated with a task group */
-static void free_sched_group_rcu(struct rcu_head *rhp)
+static void sched_free_group_rcu(struct rcu_head *rhp)
{
/* now it should be safe to free those cfs_rqs */
- free_sched_group(container_of(rhp, struct task_group, rcu));
+ sched_free_group(container_of(rhp, struct task_group, rcu));
}
-/* Destroy runqueue etc associated with a task group */
void sched_destroy_group(struct task_group *tg)
{
/* wait for possible concurrent references to cfs_rqs complete */
- call_rcu(&tg->rcu, free_sched_group_rcu);
+ call_rcu(&tg->rcu, sched_free_group_rcu);
}
void sched_offline_group(struct task_group *tg)
{
unsigned long flags;
- int i;
/* end participation in shares distribution */
- for_each_possible_cpu(i)
- unregister_fair_sched_group(tg, i);
+ unregister_fair_sched_group(tg);
spin_lock_irqsave(&task_group_lock, flags);
list_del_rcu(&tg->list);
@@ -8756,31 +8753,26 @@ cpu_cgroup_css_alloc(struct cgroup_subsys_state *parent_css)
if (IS_ERR(tg))
return ERR_PTR(-ENOMEM);
+ sched_online_group(tg, parent);
+
return &tg->css;
}
-static int cpu_cgroup_css_online(struct cgroup_subsys_state *css)
+static void cpu_cgroup_css_released(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
- struct task_group *parent = css_tg(css->parent);
- if (parent)
- sched_online_group(tg, parent);
- return 0;
+ sched_offline_group(tg);
}
static void cpu_cgroup_css_free(struct cgroup_subsys_state *css)
{
struct task_group *tg = css_tg(css);
- sched_destroy_group(tg);
-}
-
-static void cpu_cgroup_css_offline(struct cgroup_subsys_state *css)
-{
- struct task_group *tg = css_tg(css);
-
- sched_offline_group(tg);
+ /*
+ * Relies on the RCU grace period between css_released() and this.
+ */
+ sched_free_group(tg);
}
static void cpu_cgroup_fork(struct task_struct *task, void *private)
@@ -9147,9 +9139,8 @@ static struct cftype cpu_files[] = {
struct cgroup_subsys cpu_cgrp_subsys = {
.css_alloc = cpu_cgroup_css_alloc,
+ .css_released = cpu_cgroup_css_released,
.css_free = cpu_cgroup_css_free,
- .css_online = cpu_cgroup_css_online,
- .css_offline = cpu_cgroup_css_offline,
.fork = cpu_cgroup_fork,
.can_attach = cpu_cgroup_can_attach,
.attach = cpu_cgroup_attach,
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index df23b0365527..6362b864e2b1 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -2619,7 +2619,7 @@ struct cluster_cpu_stats {
int best_idle_cpu, least_loaded_cpu;
int best_capacity_cpu, best_cpu, best_sibling_cpu;
int min_cost, best_sibling_cpu_cost;
- int best_cpu_cstate;
+ int best_cpu_wakeup_latency;
u64 min_load, best_load, best_sibling_cpu_load;
s64 highest_spare_capacity;
};
@@ -2827,19 +2827,19 @@ next_best_cluster(struct sched_cluster *cluster, struct cpu_select_env *env,
static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
struct cpu_select_env *env, int cpu_cost)
{
- int cpu_cstate;
+ int wakeup_latency;
int prev_cpu = env->prev_cpu;
- cpu_cstate = cpu_rq(cpu)->cstate;
+ wakeup_latency = cpu_rq(cpu)->wakeup_latency;
if (env->need_idle) {
stats->min_cost = cpu_cost;
if (idle_cpu(cpu)) {
- if (cpu_cstate < stats->best_cpu_cstate ||
- (cpu_cstate == stats->best_cpu_cstate &&
- cpu == prev_cpu)) {
+ if (wakeup_latency < stats->best_cpu_wakeup_latency ||
+ (wakeup_latency == stats->best_cpu_wakeup_latency &&
+ cpu == prev_cpu)) {
stats->best_idle_cpu = cpu;
- stats->best_cpu_cstate = cpu_cstate;
+ stats->best_cpu_wakeup_latency = wakeup_latency;
}
} else {
if (env->cpu_load < stats->min_load ||
@@ -2855,7 +2855,7 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
if (cpu_cost < stats->min_cost) {
stats->min_cost = cpu_cost;
- stats->best_cpu_cstate = cpu_cstate;
+ stats->best_cpu_wakeup_latency = wakeup_latency;
stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
env->sbc_best_flag = SBC_FLAG_CPU_COST;
@@ -2864,11 +2864,11 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
/* CPU cost is the same. Start breaking the tie by C-state */
- if (cpu_cstate > stats->best_cpu_cstate)
+ if (wakeup_latency > stats->best_cpu_wakeup_latency)
return;
- if (cpu_cstate < stats->best_cpu_cstate) {
- stats->best_cpu_cstate = cpu_cstate;
+ if (wakeup_latency < stats->best_cpu_wakeup_latency) {
+ stats->best_cpu_wakeup_latency = wakeup_latency;
stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
env->sbc_best_flag = SBC_FLAG_COST_CSTATE_TIE_BREAKER;
@@ -2883,8 +2883,8 @@ static void __update_cluster_stats(int cpu, struct cluster_cpu_stats *stats,
}
if (stats->best_cpu != prev_cpu &&
- ((cpu_cstate == 0 && env->cpu_load < stats->best_load) ||
- (cpu_cstate > 0 && env->cpu_load > stats->best_load))) {
+ ((wakeup_latency == 0 && env->cpu_load < stats->best_load) ||
+ (wakeup_latency > 0 && env->cpu_load > stats->best_load))) {
stats->best_load = env->cpu_load;
stats->best_cpu = cpu;
env->sbc_best_flag = SBC_FLAG_CSTATE_LOAD;
@@ -2979,7 +2979,7 @@ static inline void init_cluster_cpu_stats(struct cluster_cpu_stats *stats)
stats->min_load = stats->best_sibling_cpu_load = ULLONG_MAX;
stats->highest_spare_capacity = 0;
stats->least_loaded_cpu = -1;
- stats->best_cpu_cstate = INT_MAX;
+ stats->best_cpu_wakeup_latency = INT_MAX;
/* No need to initialize stats->best_load */
}
@@ -9653,11 +9653,8 @@ void free_fair_sched_group(struct task_group *tg)
for_each_possible_cpu(i) {
if (tg->cfs_rq)
kfree(tg->cfs_rq[i]);
- if (tg->se) {
- if (tg->se[i])
- remove_entity_load_avg(tg->se[i]);
+ if (tg->se)
kfree(tg->se[i]);
- }
}
kfree(tg->cfs_rq);
@@ -9705,21 +9702,29 @@ err:
return 0;
}
-void unregister_fair_sched_group(struct task_group *tg, int cpu)
+void unregister_fair_sched_group(struct task_group *tg)
{
- struct rq *rq = cpu_rq(cpu);
unsigned long flags;
+ struct rq *rq;
+ int cpu;
- /*
- * Only empty task groups can be destroyed; so we can speculatively
- * check on_list without danger of it being re-added.
- */
- if (!tg->cfs_rq[cpu]->on_list)
- return;
+ for_each_possible_cpu(cpu) {
+ if (tg->se[cpu])
+ remove_entity_load_avg(tg->se[cpu]);
- raw_spin_lock_irqsave(&rq->lock, flags);
- list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
+ /*
+ * Only empty task groups can be destroyed; so we can speculatively
+ * check on_list without danger of it being re-added.
+ */
+ if (!tg->cfs_rq[cpu]->on_list)
+ continue;
+
+ rq = cpu_rq(cpu);
+
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ list_del_leaf_cfs_rq(tg->cfs_rq[cpu]);
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+ }
}
void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
@@ -9801,7 +9806,7 @@ int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent)
return 1;
}
-void unregister_fair_sched_group(struct task_group *tg, int cpu) { }
+void unregister_fair_sched_group(struct task_group *tg) { }
#endif /* CONFIG_FAIR_GROUP_SCHED */
diff --git a/kernel/sched/hmp.c b/kernel/sched/hmp.c
index a0686ea29243..3d5de8ba70a2 100644
--- a/kernel/sched/hmp.c
+++ b/kernel/sched/hmp.c
@@ -24,6 +24,8 @@
#include <trace/events/sched.h>
+#define CSTATE_LATENCY_GRANULARITY_SHIFT (6)
+
const char *task_event_names[] = {"PUT_PREV_TASK", "PICK_NEXT_TASK",
"TASK_WAKE", "TASK_MIGRATE", "TASK_UPDATE",
"IRQ_UPDATE"};
@@ -99,7 +101,10 @@ sched_set_cpu_cstate(int cpu, int cstate, int wakeup_energy, int wakeup_latency)
rq->cstate = cstate; /* C1, C2 etc */
rq->wakeup_energy = wakeup_energy;
- rq->wakeup_latency = wakeup_latency;
+ /* disregard small latency delta (64 us). */
+ rq->wakeup_latency = ((wakeup_latency >>
+ CSTATE_LATENCY_GRANULARITY_SHIFT) <<
+ CSTATE_LATENCY_GRANULARITY_SHIFT);
}
/*
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ada5e580e968..27b28369440d 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -313,7 +313,7 @@ extern int tg_nop(struct task_group *tg, void *data);
extern void free_fair_sched_group(struct task_group *tg);
extern int alloc_fair_sched_group(struct task_group *tg, struct task_group *parent);
-extern void unregister_fair_sched_group(struct task_group *tg, int cpu);
+extern void unregister_fair_sched_group(struct task_group *tg);
extern void init_tg_cfs_entry(struct task_group *tg, struct cfs_rq *cfs_rq,
struct sched_entity *se, int cpu,
struct sched_entity *parent);
diff --git a/kernel/sched/tune.c b/kernel/sched/tune.c
index 4c44b1a4ad98..4f8182302e5e 100644
--- a/kernel/sched/tune.c
+++ b/kernel/sched/tune.c
@@ -1,7 +1,231 @@
+#include <linux/cgroup.h>
+#include <linux/err.h>
+#include <linux/percpu.h>
+#include <linux/printk.h>
+#include <linux/slab.h>
+
#include "sched.h"
unsigned int sysctl_sched_cfs_boost __read_mostly;
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+
+/*
+ * EAS scheduler tunables for task groups.
+ */
+
+/* SchdTune tunables for a group of tasks */
+struct schedtune {
+ /* SchedTune CGroup subsystem */
+ struct cgroup_subsys_state css;
+
+ /* Boost group allocated ID */
+ int idx;
+
+ /* Boost value for tasks on that SchedTune CGroup */
+ int boost;
+
+};
+
+static inline struct schedtune *css_st(struct cgroup_subsys_state *css)
+{
+ return container_of(css, struct schedtune, css);
+}
+
+static inline struct schedtune *task_schedtune(struct task_struct *tsk)
+{
+ return css_st(task_css(tsk, schedtune_cgrp_id));
+}
+
+static inline struct schedtune *parent_st(struct schedtune *st)
+{
+ return css_st(st->css.parent);
+}
+
+/*
+ * SchedTune root control group
+ * The root control group is used to defined a system-wide boosting tuning,
+ * which is applied to all tasks in the system.
+ * Task specific boost tuning could be specified by creating and
+ * configuring a child control group under the root one.
+ * By default, system-wide boosting is disabled, i.e. no boosting is applied
+ * to tasks which are not into a child control group.
+ */
+static struct schedtune
+root_schedtune = {
+ .boost = 0,
+};
+
+/*
+ * Maximum number of boost groups to support
+ * When per-task boosting is used we still allow only limited number of
+ * boost groups for two main reasons:
+ * 1. on a real system we usually have only few classes of workloads which
+ * make sense to boost with different values (e.g. background vs foreground
+ * tasks, interactive vs low-priority tasks)
+ * 2. a limited number allows for a simpler and more memory/time efficient
+ * implementation especially for the computation of the per-CPU boost
+ * value
+ */
+#define BOOSTGROUPS_COUNT 5
+
+/* Array of configured boostgroups */
+static struct schedtune *allocated_group[BOOSTGROUPS_COUNT] = {
+ &root_schedtune,
+ NULL,
+};
+
+/* SchedTune boost groups
+ * Keep track of all the boost groups which impact on CPU, for example when a
+ * CPU has two RUNNABLE tasks belonging to two different boost groups and thus
+ * likely with different boost values.
+ * Since on each system we expect only a limited number of boost groups, here
+ * we use a simple array to keep track of the metrics required to compute the
+ * maximum per-CPU boosting value.
+ */
+struct boost_groups {
+ /* Maximum boost value for all RUNNABLE tasks on a CPU */
+ unsigned boost_max;
+ struct {
+ /* The boost for tasks on that boost group */
+ unsigned boost;
+ /* Count of RUNNABLE tasks on that boost group */
+ unsigned tasks;
+ } group[BOOSTGROUPS_COUNT];
+};
+
+/* Boost groups affecting each CPU in the system */
+DEFINE_PER_CPU(struct boost_groups, cpu_boost_groups);
+
+static u64
+boost_read(struct cgroup_subsys_state *css, struct cftype *cft)
+{
+ struct schedtune *st = css_st(css);
+
+ return st->boost;
+}
+
+static int
+boost_write(struct cgroup_subsys_state *css, struct cftype *cft,
+ u64 boost)
+{
+ struct schedtune *st = css_st(css);
+
+ if (boost < 0 || boost > 100)
+ return -EINVAL;
+
+ st->boost = boost;
+ if (css == &root_schedtune.css)
+ sysctl_sched_cfs_boost = boost;
+
+ return 0;
+}
+
+static struct cftype files[] = {
+ {
+ .name = "boost",
+ .read_u64 = boost_read,
+ .write_u64 = boost_write,
+ },
+ { } /* terminate */
+};
+
+static int
+schedtune_boostgroup_init(struct schedtune *st)
+{
+ /* Keep track of allocated boost groups */
+ allocated_group[st->idx] = st;
+
+ return 0;
+}
+
+static int
+schedtune_init(void)
+{
+ struct boost_groups *bg;
+ int cpu;
+
+ /* Initialize the per CPU boost groups */
+ for_each_possible_cpu(cpu) {
+ bg = &per_cpu(cpu_boost_groups, cpu);
+ memset(bg, 0, sizeof(struct boost_groups));
+ }
+
+ pr_info(" schedtune configured to support %d boost groups\n",
+ BOOSTGROUPS_COUNT);
+ return 0;
+}
+
+static struct cgroup_subsys_state *
+schedtune_css_alloc(struct cgroup_subsys_state *parent_css)
+{
+ struct schedtune *st;
+ int idx;
+
+ if (!parent_css) {
+ schedtune_init();
+ return &root_schedtune.css;
+ }
+
+ /* Allow only single level hierachies */
+ if (parent_css != &root_schedtune.css) {
+ pr_err("Nested SchedTune boosting groups not allowed\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ /* Allow only a limited number of boosting groups */
+ for (idx = 1; idx < BOOSTGROUPS_COUNT; ++idx)
+ if (!allocated_group[idx])
+ break;
+ if (idx == BOOSTGROUPS_COUNT) {
+ pr_err("Trying to create more than %d SchedTune boosting groups\n",
+ BOOSTGROUPS_COUNT);
+ return ERR_PTR(-ENOSPC);
+ }
+
+ st = kzalloc(sizeof(*st), GFP_KERNEL);
+ if (!st)
+ goto out;
+
+ /* Initialize per CPUs boost group support */
+ st->idx = idx;
+ if (schedtune_boostgroup_init(st))
+ goto release;
+
+ return &st->css;
+
+release:
+ kfree(st);
+out:
+ return ERR_PTR(-ENOMEM);
+}
+
+static void
+schedtune_boostgroup_release(struct schedtune *st)
+{
+ /* Keep track of allocated boost groups */
+ allocated_group[st->idx] = NULL;
+}
+
+static void
+schedtune_css_free(struct cgroup_subsys_state *css)
+{
+ struct schedtune *st = css_st(css);
+
+ schedtune_boostgroup_release(st);
+ kfree(st);
+}
+
+struct cgroup_subsys schedtune_cgrp_subsys = {
+ .css_alloc = schedtune_css_alloc,
+ .css_free = schedtune_css_free,
+ .legacy_cftypes = files,
+ .early_init = 1,
+ .allow_attach = subsys_cgroup_allow_attach,
+};
+
+#endif /* CONFIG_CGROUP_SCHEDTUNE */
+
int
sysctl_sched_cfs_boost_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index 8e2f4ab15498..587dbe09c47d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -636,7 +636,11 @@ static struct ctl_table kern_table[] = {
.procname = "sched_cfs_boost",
.data = &sysctl_sched_cfs_boost,
.maxlen = sizeof(sysctl_sched_cfs_boost),
+#ifdef CONFIG_CGROUP_SCHEDTUNE
+ .mode = 0444,
+#else
.mode = 0644,
+#endif
.proc_handler = &sysctl_sched_cfs_boost_handler,
.extra1 = &zero,
.extra2 = &one_hundred,
diff --git a/lib/asn1_decoder.c b/lib/asn1_decoder.c
index 2b3f46c049d4..554522934c44 100644
--- a/lib/asn1_decoder.c
+++ b/lib/asn1_decoder.c
@@ -74,7 +74,7 @@ next_tag:
/* Extract a tag from the data */
tag = data[dp++];
- if (tag == 0) {
+ if (tag == ASN1_EOC) {
/* It appears to be an EOC. */
if (data[dp++] != 0)
goto invalid_eoc;
@@ -96,10 +96,8 @@ next_tag:
/* Extract the length */
len = data[dp++];
- if (len <= 0x7f) {
- dp += len;
- goto next_tag;
- }
+ if (len <= 0x7f)
+ goto check_length;
if (unlikely(len == ASN1_INDEFINITE_LENGTH)) {
/* Indefinite length */
@@ -110,14 +108,18 @@ next_tag:
}
n = len - 0x80;
- if (unlikely(n > sizeof(size_t) - 1))
+ if (unlikely(n > sizeof(len) - 1))
goto length_too_long;
if (unlikely(n > datalen - dp))
goto data_overrun_error;
- for (len = 0; n > 0; n--) {
+ len = 0;
+ for (; n > 0; n--) {
len <<= 8;
len |= data[dp++];
}
+check_length:
+ if (len > datalen - dp)
+ goto data_overrun_error;
dp += len;
goto next_tag;
diff --git a/net/core/dev.c b/net/core/dev.c
index a299c3956daa..a4c647893e52 100644
--- a/net/core/dev.c
+++ b/net/core/dev.c
@@ -3544,9 +3544,6 @@ static int netif_rx_internal(struct sk_buff *skb)
trace_netif_rx(skb);
#ifdef CONFIG_RPS
- WARN_ONCE(skb_cloned(skb), "Cloned packet from dev %s\n",
- skb->dev->name);
-
if (static_key_false(&rps_needed)) {
struct rps_dev_flow voidflow, *rflow = &voidflow;
int cpu;
diff --git a/net/ipv4/tcp_input.c b/net/ipv4/tcp_input.c
index b8f7e621e16e..32027efa5033 100644
--- a/net/ipv4/tcp_input.c
+++ b/net/ipv4/tcp_input.c
@@ -89,7 +89,7 @@ int sysctl_tcp_adv_win_scale __read_mostly = 1;
EXPORT_SYMBOL(sysctl_tcp_adv_win_scale);
/* rfc5961 challenge ack rate limiting */
-int sysctl_tcp_challenge_ack_limit = 100;
+int sysctl_tcp_challenge_ack_limit = 1000;
int sysctl_tcp_stdurg __read_mostly;
int sysctl_tcp_rfc1337 __read_mostly;
@@ -3428,7 +3428,7 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
static u32 challenge_timestamp;
static unsigned int challenge_count;
struct tcp_sock *tp = tcp_sk(sk);
- u32 now;
+ u32 count, now;
/* First check our per-socket dupack rate limit. */
if (tcp_oow_rate_limited(sock_net(sk), skb,
@@ -3436,13 +3436,18 @@ static void tcp_send_challenge_ack(struct sock *sk, const struct sk_buff *skb)
&tp->last_oow_ack_time))
return;
- /* Then check the check host-wide RFC 5961 rate limit. */
+ /* Then check host-wide RFC 5961 rate limit. */
now = jiffies / HZ;
if (now != challenge_timestamp) {
+ u32 half = (sysctl_tcp_challenge_ack_limit + 1) >> 1;
+
challenge_timestamp = now;
- challenge_count = 0;
+ WRITE_ONCE(challenge_count, half +
+ prandom_u32_max(sysctl_tcp_challenge_ack_limit));
}
- if (++challenge_count <= sysctl_tcp_challenge_ack_limit) {
+ count = READ_ONCE(challenge_count);
+ if (count > 0) {
+ WRITE_ONCE(challenge_count, count - 1);
NET_INC_STATS_BH(sock_net(sk), LINUX_MIB_TCPCHALLENGEACK);
tcp_send_ack(sk);
}
diff --git a/net/wireless/db.txt b/net/wireless/db.txt
index 3e47e0641780..23b7c76ff2d8 100644
--- a/net/wireless/db.txt
+++ b/net/wireless/db.txt
@@ -416,7 +416,7 @@ country EE: DFS-ETSI
(57240 - 65880 @ 2160), (40), NO-OUTDOOR
country EG: DFS-ETSI
- (2402 - 2482 @ 40), (20)
+ (2402 - 2482 @ 20), (20)
(5170 - 5250 @ 20), (23)
(5250 - 5330 @ 20), (23), DFS
diff --git a/sound/soc/codecs/msm_hdmi_codec_rx.c b/sound/soc/codecs/msm_hdmi_codec_rx.c
index dee66f231ceb..7d649ba2b505 100644
--- a/sound/soc/codecs/msm_hdmi_codec_rx.c
+++ b/sound/soc/codecs/msm_hdmi_codec_rx.c
@@ -318,8 +318,9 @@ static void msm_ext_disp_audio_codec_rx_dai_shutdown(
struct msm_ext_disp_audio_codec_rx_data *codec_data =
dev_get_drvdata(dai->codec->dev);
- if (!codec_data || !codec_data->ext_disp_ops.cable_status) {
- dev_err(dai->dev, "%s: codec data or cable_status is null\n",
+ if (!codec_data || !codec_data->ext_disp_ops.teardown_done ||
+ !codec_data->ext_disp_ops.cable_status) {
+ dev_err(dai->dev, "%s: codec data or teardown_done or cable_status is null\n",
__func__);
return;
}
@@ -332,6 +333,8 @@ static void msm_ext_disp_audio_codec_rx_dai_shutdown(
__func__);
}
+ codec_data->ext_disp_ops.teardown_done(
+ codec_data->ext_disp_core_pdev);
return;
}
diff --git a/sound/soc/codecs/wcd-dsp-mgr.c b/sound/soc/codecs/wcd-dsp-mgr.c
index ee8b27dbec64..d9d413f0a80a 100644
--- a/sound/soc/codecs/wcd-dsp-mgr.c
+++ b/sound/soc/codecs/wcd-dsp-mgr.c
@@ -136,7 +136,7 @@ struct wdsp_ramdump_data {
void *rd_v_addr;
/* Data provided through error interrupt */
- struct wdsp_err_intr_arg err_data;
+ struct wdsp_err_signal_arg err_data;
};
struct wdsp_mgr_priv {
@@ -608,7 +608,7 @@ static struct device *wdsp_get_dev_for_cmpnt(struct device *wdsp_dev,
static void wdsp_collect_ramdumps(struct wdsp_mgr_priv *wdsp)
{
struct wdsp_img_section img_section;
- struct wdsp_err_intr_arg *data = &wdsp->dump_data.err_data;
+ struct wdsp_err_signal_arg *data = &wdsp->dump_data.err_data;
struct ramdump_segment rd_seg;
int ret = 0;
@@ -684,17 +684,18 @@ static void wdsp_ssr_work_fn(struct work_struct *work)
WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
- wdsp_collect_ramdumps(wdsp);
-
- /* In case of CDC_DOWN event, the DSP is already shutdown */
- if (wdsp->ssr_type != WDSP_SSR_TYPE_CDC_DOWN) {
+ /* Issue ramdumps and shutdown only if DSP is currently booted */
+ if (WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_BOOTED)) {
+ wdsp_collect_ramdumps(wdsp);
ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_CONTROL,
WDSP_EVENT_DO_SHUTDOWN, NULL);
if (IS_ERR_VALUE(ret))
WDSP_ERR(wdsp, "Failed WDSP shutdown, err = %d", ret);
+
+ wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_SHUTDOWN,
+ NULL);
+ WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_BOOTED);
}
- wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_POST_SHUTDOWN, NULL);
- WDSP_CLEAR_STATUS(wdsp, WDSP_STATUS_BOOTED);
WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->ssr_mutex);
ret = wait_for_completion_timeout(&wdsp->ready_compl,
@@ -739,7 +740,7 @@ static int wdsp_ssr_handler(struct wdsp_mgr_priv *wdsp, void *arg,
enum wdsp_ssr_type ssr_type)
{
enum wdsp_ssr_type current_ssr_type;
- struct wdsp_err_intr_arg *err_data;
+ struct wdsp_err_signal_arg *err_data;
WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->ssr_mutex);
@@ -750,7 +751,7 @@ static int wdsp_ssr_handler(struct wdsp_mgr_priv *wdsp, void *arg,
wdsp->ssr_type = ssr_type;
if (arg) {
- err_data = (struct wdsp_err_intr_arg *) arg;
+ err_data = (struct wdsp_err_signal_arg *) arg;
memcpy(&wdsp->dump_data.err_data, err_data,
sizeof(*err_data));
} else {
@@ -761,16 +762,29 @@ static int wdsp_ssr_handler(struct wdsp_mgr_priv *wdsp, void *arg,
switch (ssr_type) {
case WDSP_SSR_TYPE_WDSP_DOWN:
- case WDSP_SSR_TYPE_CDC_DOWN:
__wdsp_clr_ready_locked(wdsp, WDSP_SSR_STATUS_WDSP_READY);
- if (ssr_type == WDSP_SSR_TYPE_CDC_DOWN)
- __wdsp_clr_ready_locked(wdsp,
- WDSP_SSR_STATUS_CDC_READY);
wdsp_broadcast_event_downseq(wdsp, WDSP_EVENT_PRE_SHUTDOWN,
NULL);
schedule_work(&wdsp->ssr_work);
break;
+ case WDSP_SSR_TYPE_CDC_DOWN:
+ __wdsp_clr_ready_locked(wdsp, WDSP_SSR_STATUS_CDC_READY);
+ /*
+ * If DSP is booted when CDC_DOWN is received, it needs
+ * to be shutdown.
+ */
+ if (WDSP_STATUS_IS_SET(wdsp, WDSP_STATUS_BOOTED)) {
+ __wdsp_clr_ready_locked(wdsp,
+ WDSP_SSR_STATUS_WDSP_READY);
+ wdsp_broadcast_event_downseq(wdsp,
+ WDSP_EVENT_PRE_SHUTDOWN,
+ NULL);
+ }
+
+ schedule_work(&wdsp->ssr_work);
+ break;
+
case WDSP_SSR_TYPE_CDC_UP:
__wdsp_set_ready_locked(wdsp, WDSP_SSR_STATUS_CDC_READY, true);
break;
@@ -787,8 +801,8 @@ static int wdsp_ssr_handler(struct wdsp_mgr_priv *wdsp, void *arg,
return 0;
}
-static int wdsp_intr_handler(struct device *wdsp_dev,
- enum wdsp_intr intr, void *arg)
+static int wdsp_signal_handler(struct device *wdsp_dev,
+ enum wdsp_signal signal, void *arg)
{
struct wdsp_mgr_priv *wdsp;
int ret;
@@ -799,7 +813,9 @@ static int wdsp_intr_handler(struct device *wdsp_dev,
wdsp = dev_get_drvdata(wdsp_dev);
WDSP_MGR_MUTEX_LOCK(wdsp, wdsp->api_mutex);
- switch (intr) {
+ WDSP_DBG(wdsp, "Raised signal %d", signal);
+
+ switch (signal) {
case WDSP_IPC1_INTR:
ret = wdsp_unicast_event(wdsp, WDSP_CMPNT_IPC,
WDSP_EVENT_IPC1_INTR, NULL);
@@ -807,14 +823,20 @@ static int wdsp_intr_handler(struct device *wdsp_dev,
case WDSP_ERR_INTR:
ret = wdsp_ssr_handler(wdsp, arg, WDSP_SSR_TYPE_WDSP_DOWN);
break;
+ case WDSP_CDC_DOWN_SIGNAL:
+ ret = wdsp_ssr_handler(wdsp, arg, WDSP_SSR_TYPE_CDC_DOWN);
+ break;
+ case WDSP_CDC_UP_SIGNAL:
+ ret = wdsp_ssr_handler(wdsp, arg, WDSP_SSR_TYPE_CDC_UP);
+ break;
default:
ret = -EINVAL;
break;
}
if (IS_ERR_VALUE(ret))
- WDSP_ERR(wdsp, "handling intr %d failed with error %d",
- intr, ret);
+ WDSP_ERR(wdsp, "handling signal %d failed with error %d",
+ signal, ret);
WDSP_MGR_MUTEX_UNLOCK(wdsp, wdsp->api_mutex);
return ret;
@@ -870,7 +892,7 @@ static int wdsp_resume(struct device *wdsp_dev)
static struct wdsp_mgr_ops wdsp_ops = {
.register_cmpnt_ops = wdsp_register_cmpnt_ops,
.get_dev_for_cmpnt = wdsp_get_dev_for_cmpnt,
- .intr_handler = wdsp_intr_handler,
+ .signal_handler = wdsp_signal_handler,
.vote_for_dsp = wdsp_vote_for_dsp,
.suspend = wdsp_suspend,
.resume = wdsp_resume,
diff --git a/sound/soc/codecs/wcd-spi.c b/sound/soc/codecs/wcd-spi.c
index 60efcb174740..0a9c283c250c 100644
--- a/sound/soc/codecs/wcd-spi.c
+++ b/sound/soc/codecs/wcd-spi.c
@@ -319,7 +319,7 @@ static int wcd_spi_transfer_split(struct spi_device *spi,
u32 addr = data_msg->remote_addr;
u8 *data = data_msg->data;
int remain_size = data_msg->len;
- int to_xfer, loop_cnt, ret;
+ int to_xfer, loop_cnt, ret = 0;
/* Perform single writes until multi word alignment is met */
loop_cnt = 1;
@@ -631,6 +631,14 @@ static int wcd_spi_init(struct spi_device *spi)
if (IS_ERR_VALUE(ret))
goto err_wr_en;
+ /*
+ * In case spi_init is called after component deinit,
+ * it is possible hardware register state is also reset.
+ * Sync the regcache here so hardware state is updated
+ * to reflect the cache.
+ */
+ regcache_sync(wcd_spi->regmap);
+
regmap_write(wcd_spi->regmap, WCD_SPI_SLAVE_CONFIG,
0x0F3D0800);
@@ -837,7 +845,7 @@ static int wdsp_spi_event_handler(struct device *dev, void *priv_data,
void *data)
{
struct spi_device *spi = to_spi_device(dev);
- int ret;
+ int ret = 0;
dev_dbg(&spi->dev, "%s: event type %d\n",
__func__, event);
@@ -1093,46 +1101,12 @@ static struct regmap_config wcd_spi_regmap_cfg = {
static int wdsp_spi_init(struct device *dev, void *priv_data)
{
struct spi_device *spi = to_spi_device(dev);
- struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
int ret;
- wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
- wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
-
- wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
- &spi->dev, &wcd_spi_regmap_cfg);
- if (IS_ERR(wcd_spi->regmap)) {
- ret = PTR_ERR(wcd_spi->regmap);
- dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
- __func__, ret);
- goto err_regmap;
- }
-
- if (wcd_spi_debugfs_init(spi))
- dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
-
- spi_message_init(&wcd_spi->msg1);
- spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
-
- spi_message_init(&wcd_spi->msg2);
- spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
- spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
-
ret = wcd_spi_init(spi);
- if (IS_ERR_VALUE(ret)) {
+ if (IS_ERR_VALUE(ret))
dev_err(&spi->dev, "%s: Init failed, err = %d\n",
__func__, ret);
- goto err_init;
- }
-
- return 0;
-
-err_init:
- spi_transfer_del(&wcd_spi->xfer1);
- spi_transfer_del(&wcd_spi->xfer2[0]);
- spi_transfer_del(&wcd_spi->xfer2[1]);
-
-err_regmap:
return ret;
}
@@ -1141,9 +1115,11 @@ static int wdsp_spi_deinit(struct device *dev, void *priv_data)
struct spi_device *spi = to_spi_device(dev);
struct wcd_spi_priv *wcd_spi = spi_get_drvdata(spi);
- spi_transfer_del(&wcd_spi->xfer1);
- spi_transfer_del(&wcd_spi->xfer2[0]);
- spi_transfer_del(&wcd_spi->xfer2[1]);
+ /*
+ * Deinit means the hardware is reset. Mark the cache
+ * as dirty here, so init will sync the cache
+ */
+ regcache_mark_dirty(wcd_spi->regmap);
return 0;
}
@@ -1170,9 +1146,34 @@ static int wcd_spi_component_bind(struct device *dev,
ret = wcd_spi->m_ops->register_cmpnt_ops(master, dev,
wcd_spi,
&wdsp_spi_ops);
- if (ret)
+ if (ret) {
dev_err(dev, "%s: register_cmpnt_ops failed, err = %d\n",
__func__, ret);
+ goto done;
+ }
+
+ wcd_spi->reg_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.reg_bits, 8);
+ wcd_spi->val_bytes = DIV_ROUND_UP(wcd_spi_regmap_cfg.val_bits, 8);
+
+ wcd_spi->regmap = devm_regmap_init(&spi->dev, &wcd_spi_regmap_bus,
+ &spi->dev, &wcd_spi_regmap_cfg);
+ if (IS_ERR(wcd_spi->regmap)) {
+ ret = PTR_ERR(wcd_spi->regmap);
+ dev_err(&spi->dev, "%s: Failed to allocate regmap, err = %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ if (wcd_spi_debugfs_init(spi))
+ dev_err(&spi->dev, "%s: Failed debugfs init\n", __func__);
+
+ spi_message_init(&wcd_spi->msg1);
+ spi_message_add_tail(&wcd_spi->xfer1, &wcd_spi->msg1);
+
+ spi_message_init(&wcd_spi->msg2);
+ spi_message_add_tail(&wcd_spi->xfer2[0], &wcd_spi->msg2);
+ spi_message_add_tail(&wcd_spi->xfer2[1], &wcd_spi->msg2);
+done:
return ret;
}
@@ -1185,6 +1186,10 @@ static void wcd_spi_component_unbind(struct device *dev,
wcd_spi->m_dev = NULL;
wcd_spi->m_ops = NULL;
+
+ spi_transfer_del(&wcd_spi->xfer1);
+ spi_transfer_del(&wcd_spi->xfer2[0]);
+ spi_transfer_del(&wcd_spi->xfer2[1]);
}
static const struct component_ops wcd_spi_component_ops = {
diff --git a/sound/soc/codecs/wcd9335.c b/sound/soc/codecs/wcd9335.c
index 46b8e7f72eb8..9394ee52cad0 100644
--- a/sound/soc/codecs/wcd9335.c
+++ b/sound/soc/codecs/wcd9335.c
@@ -4596,9 +4596,11 @@ static int tasha_codec_hphl_dac_event(struct snd_soc_dapm_widget *w,
if (!ret) {
wcd_clsh_imped_config(codec, impedl, false);
set_bit(CLASSH_CONFIG, &tasha->status_mask);
- } else
+ } else {
dev_dbg(codec->dev, "%s: Failed to get mbhc impedance %d\n",
__func__, ret);
+ ret = 0;
+ }
break;
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsd.c b/sound/soc/codecs/wcd934x/wcd934x-dsd.c
index 55072466af55..4e3e769585e6 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsd.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsd.c
@@ -28,6 +28,9 @@
#define DSD_VOLUME_STEP_DELAY_US ((1000 * DSD_VOLUME_UPDATE_DELAY_MS) / \
(2 * DSD_VOLUME_STEPS))
+#define TAVIL_VERSION_1_0 0
+#define TAVIL_VERSION_1_1 1
+
static const DECLARE_TLV_DB_MINMAX(tavil_dsd_db_scale, DSD_VOLUME_MIN_M110dB,
DSD_VOLUME_MAX_0dB);
@@ -369,6 +372,14 @@ static void tavil_dsd_data_pull(struct snd_soc_codec *codec, int dsd_num,
}
}
+static void tavil_dsd_update_volume(struct tavil_dsd_config *dsd_conf)
+{
+ snd_soc_update_bits(dsd_conf->codec, WCD934X_CDC_TOP_TOP_CFG0,
+ 0x01, 0x01);
+ snd_soc_update_bits(dsd_conf->codec, WCD934X_CDC_TOP_TOP_CFG0,
+ 0x01, 0x00);
+}
+
static int tavil_enable_dsd(struct snd_soc_dapm_widget *w,
struct snd_kcontrol *kcontrol, int event)
{
@@ -429,6 +440,8 @@ static int tavil_enable_dsd(struct snd_soc_dapm_widget *w,
/* Apply Gain */
snd_soc_write(codec, WCD934X_CDC_DSD0_CFG1,
dsd_conf->volume[DSD0]);
+ if (dsd_conf->version == TAVIL_VERSION_1_1)
+ tavil_dsd_update_volume(dsd_conf);
} else if (w->shift == DSD1) {
snd_soc_update_bits(codec, WCD934X_CDC_DSD1_PATH_CTL,
@@ -440,6 +453,8 @@ static int tavil_enable_dsd(struct snd_soc_dapm_widget *w,
/* Apply Gain */
snd_soc_write(codec, WCD934X_CDC_DSD1_CFG1,
dsd_conf->volume[DSD1]);
+ if (dsd_conf->version == TAVIL_VERSION_1_1)
+ tavil_dsd_update_volume(dsd_conf);
}
/* 10msec sleep required after DSD clock is set */
usleep_range(10000, 10100);
@@ -538,16 +553,23 @@ static int tavil_dsd_vol_put(struct snd_kcontrol *kcontrol,
snd_soc_write(codec,
WCD934X_CDC_DSD0_CFG1 + 16 * dsd_idx,
nv1);
+ if (dsd_conf->version == TAVIL_VERSION_1_1)
+ tavil_dsd_update_volume(dsd_conf);
+
/* sleep required after each volume step */
usleep_range(DSD_VOLUME_STEP_DELAY_US,
(DSD_VOLUME_STEP_DELAY_US +
DSD_VOLUME_USLEEP_MARGIN_US));
}
- if (nv1 != nv[dsd_idx])
+ if (nv1 != nv[dsd_idx]) {
snd_soc_write(codec,
WCD934X_CDC_DSD0_CFG1 + 16 * dsd_idx,
nv[dsd_idx]);
+ if (dsd_conf->version == TAVIL_VERSION_1_1)
+ tavil_dsd_update_volume(dsd_conf);
+ }
+
dsd_conf->volume[dsd_idx] = nv[dsd_idx];
}
@@ -629,9 +651,14 @@ struct tavil_dsd_config *tavil_dsd_init(struct snd_soc_codec *codec)
dsd_conf->codec = codec;
+ /* Read version */
+ dsd_conf->version = snd_soc_read(codec,
+ WCD934X_CHIP_TIER_CTRL_CHIP_ID_BYTE0);
/* DSD registers init */
- snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2, 0x02, 0x00);
- snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2, 0x02, 0x00);
+ if (dsd_conf->version == TAVIL_VERSION_1_0) {
+ snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2, 0x02, 0x00);
+ snd_soc_update_bits(codec, WCD934X_CDC_DSD1_CFG2, 0x02, 0x00);
+ }
/* DSD0: Mute EN */
snd_soc_update_bits(codec, WCD934X_CDC_DSD0_CFG2, 0x04, 0x04);
/* DSD1: Mute EN */
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsd.h b/sound/soc/codecs/wcd934x/wcd934x-dsd.h
index c033795beb9b..21450c90a272 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsd.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsd.h
@@ -40,6 +40,7 @@ struct tavil_dsd_config {
u32 base_sample_rate[DSD_MAX];
int volume[DSD_MAX];
struct mutex vol_mutex;
+ int version;
};
#ifdef CONFIG_SND_SOC_WCD934X_DSD
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
index e649770297f1..8d2247176607 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.c
@@ -523,7 +523,9 @@ static int wcd_cntl_enable_memory(struct wcd_dsp_cntl *cntl)
ARRAY_SIZE(mem_enable_values),
mem_enable_values);
- snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0, 0x05);
+ /* Make sure Deep sleep of memories is enabled for all banks */
+ snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0, 0xFF);
+ snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_1, 0x0F);
done:
return ret;
}
@@ -533,6 +535,7 @@ static void wcd_cntl_disable_memory(struct wcd_dsp_cntl *cntl)
struct snd_soc_codec *codec = cntl->codec;
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_0, 0xFF);
+ snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_DEEPSLP_1, 0x0F);
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_3, 0xFF);
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_SYSMEM_SHUTDOWN_2, 0xFF);
snd_soc_write(codec, WCD934X_CPE_SS_PWR_CPE_DRAM1_SHUTDOWN, 0x07);
@@ -646,9 +649,9 @@ static irqreturn_t wcd_cntl_ipc_irq(int irq, void *data)
complete(&cntl->boot_complete);
if (cntl->m_dev && cntl->m_ops &&
- cntl->m_ops->intr_handler)
- ret = cntl->m_ops->intr_handler(cntl->m_dev, WDSP_IPC1_INTR,
- NULL);
+ cntl->m_ops->signal_handler)
+ ret = cntl->m_ops->signal_handler(cntl->m_dev, WDSP_IPC1_INTR,
+ NULL);
else
ret = -EINVAL;
@@ -663,7 +666,7 @@ static irqreturn_t wcd_cntl_err_irq(int irq, void *data)
{
struct wcd_dsp_cntl *cntl = data;
struct snd_soc_codec *codec = cntl->codec;
- struct wdsp_err_intr_arg arg;
+ struct wdsp_err_signal_arg arg;
u16 status = 0;
u8 reg_val;
int ret = 0;
@@ -678,19 +681,19 @@ static irqreturn_t wcd_cntl_err_irq(int irq, void *data)
__func__, status);
if ((status & cntl->irqs.fatal_irqs) &&
- (cntl->m_dev && cntl->m_ops && cntl->m_ops->intr_handler)) {
+ (cntl->m_dev && cntl->m_ops && cntl->m_ops->signal_handler)) {
arg.mem_dumps_enabled = cntl->ramdump_enable;
arg.remote_start_addr = WCD_934X_RAMDUMP_START_ADDR;
arg.dump_size = WCD_934X_RAMDUMP_SIZE;
- ret = cntl->m_ops->intr_handler(cntl->m_dev, WDSP_ERR_INTR,
- &arg);
+ ret = cntl->m_ops->signal_handler(cntl->m_dev, WDSP_ERR_INTR,
+ &arg);
if (IS_ERR_VALUE(ret))
dev_err(cntl->codec->dev,
"%s: Failed to handle fatal irq 0x%x\n",
__func__, status & cntl->irqs.fatal_irqs);
wcd_cntl_change_online_state(cntl, 0);
} else {
- dev_err(cntl->codec->dev, "%s: Invalid intr_handler\n",
+ dev_err(cntl->codec->dev, "%s: Invalid signal_handler\n",
__func__);
}
@@ -833,8 +836,7 @@ static int wcd_control_init(struct device *dev, void *priv_data)
struct snd_soc_codec *codec = cntl->codec;
struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
- char wcd_cntl_dir_name[WCD_CNTL_DIR_NAME_LEN_MAX];
- int ret, ret1;
+ int ret;
bool err_irq_requested = false;
ret = wcd9xxx_request_irq(core_res,
@@ -876,25 +878,8 @@ static int wcd_control_init(struct device *dev, void *priv_data)
}
wcd_cntl_cpar_ctrl(cntl, true);
- snprintf(wcd_cntl_dir_name, WCD_CNTL_DIR_NAME_LEN_MAX,
- "%s%d", "wdsp", cntl->dsp_instance);
- wcd_cntl_debugfs_init(wcd_cntl_dir_name, cntl);
- ret = wcd_cntl_sysfs_init(wcd_cntl_dir_name, cntl);
- if (IS_ERR_VALUE(ret)) {
- dev_err(codec->dev,
- "%s: Failed to init sysfs %d\n",
- __func__, ret);
- goto err_sysfs_init;
- }
-
return 0;
-err_sysfs_init:
- wcd_cntl_cpar_ctrl(cntl, false);
- ret1 = wcd_cntl_clocks_disable(cntl);
- if (IS_ERR_VALUE(ret1))
- dev_err(codec->dev, "%s: Failed to disable clocks, err = %d\n",
- __func__, ret1);
err_clk_enable:
/* Mask all error interrupts */
snd_soc_write(codec, WCD934X_CPE_SS_SS_ERROR_INT_MASK_0A, 0xFF);
@@ -916,12 +901,6 @@ static int wcd_control_deinit(struct device *dev, void *priv_data)
struct wcd9xxx *wcd9xxx = dev_get_drvdata(codec->dev->parent);
struct wcd9xxx_core_resource *core_res = &wcd9xxx->core_res;
- /* Remove the sysfs entries */
- wcd_cntl_sysfs_remove(cntl);
-
- /* Remove the debugfs entries */
- wcd_cntl_debugfs_remove(cntl);
-
wcd_cntl_clocks_disable(cntl);
wcd_cntl_cpar_ctrl(cntl, false);
@@ -951,6 +930,7 @@ static int wcd_ctrl_component_bind(struct device *dev,
struct snd_card *card;
struct snd_info_entry *entry;
char proc_name[WCD_PROCFS_ENTRY_MAX_LEN];
+ char wcd_cntl_dir_name[WCD_CNTL_DIR_NAME_LEN_MAX];
int ret = 0;
if (!dev || !master || !data) {
@@ -982,6 +962,17 @@ static int wcd_ctrl_component_bind(struct device *dev,
goto done;
}
+ snprintf(wcd_cntl_dir_name, WCD_CNTL_DIR_NAME_LEN_MAX,
+ "%s%d", "wdsp", cntl->dsp_instance);
+ ret = wcd_cntl_sysfs_init(wcd_cntl_dir_name, cntl);
+ if (IS_ERR_VALUE(ret)) {
+ dev_err(dev, "%s: sysfs_init failed, err = %d\n",
+ __func__, ret);
+ goto done;
+ }
+
+ wcd_cntl_debugfs_init(wcd_cntl_dir_name, cntl);
+
codec = cntl->codec;
card = codec->component.card->snd_card;
snprintf(proc_name, WCD_PROCFS_ENTRY_MAX_LEN, "%s%d%s", "cpe",
@@ -1032,6 +1023,13 @@ static void wcd_ctrl_component_unbind(struct device *dev,
cntl->m_dev = NULL;
cntl->m_ops = NULL;
+
+ /* Remove the sysfs entries */
+ wcd_cntl_sysfs_remove(cntl);
+
+ /* Remove the debugfs entries */
+ wcd_cntl_debugfs_remove(cntl);
+
}
static const struct component_ops wcd_ctrl_component_ops = {
@@ -1040,6 +1038,60 @@ static const struct component_ops wcd_ctrl_component_ops = {
};
/*
+ * wcd_dsp_ssr_event: handle the SSR event raised by caller.
+ * @cntl: Handle to the wcd_dsp_cntl structure
+ * @event: The SSR event to be handled
+ *
+ * Notifies the manager driver about the SSR event.
+ * Returns 0 on success and negative error code on error.
+ */
+int wcd_dsp_ssr_event(struct wcd_dsp_cntl *cntl, enum cdc_ssr_event event)
+{
+ int ret = 0;
+
+ if (!cntl) {
+ pr_err("%s: Invalid handle to control\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!cntl->m_dev || !cntl->m_ops || !cntl->m_ops->signal_handler) {
+ dev_err(cntl->codec->dev,
+ "%s: Invalid signal_handler callback\n", __func__);
+ return -EINVAL;
+ }
+
+ switch (event) {
+ case WCD_CDC_DOWN_EVENT:
+ ret = cntl->m_ops->signal_handler(cntl->m_dev,
+ WDSP_CDC_DOWN_SIGNAL,
+ NULL);
+ if (IS_ERR_VALUE(ret))
+ dev_err(cntl->codec->dev,
+ "%s: WDSP_CDC_DOWN_SIGNAL failed, err = %d\n",
+ __func__, ret);
+ wcd_cntl_change_online_state(cntl, 0);
+ break;
+ case WCD_CDC_UP_EVENT:
+ ret = cntl->m_ops->signal_handler(cntl->m_dev,
+ WDSP_CDC_UP_SIGNAL,
+ NULL);
+ if (IS_ERR_VALUE(ret))
+ dev_err(cntl->codec->dev,
+ "%s: WDSP_CDC_UP_SIGNAL failed, err = %d\n",
+ __func__, ret);
+ break;
+ default:
+ dev_err(cntl->codec->dev, "%s: Invalid event %d\n",
+ __func__, event);
+ ret = -EINVAL;
+ break;
+ }
+
+ return ret;
+}
+EXPORT_SYMBOL(wcd_dsp_ssr_event);
+
+/*
* wcd_dsp_cntl_init: Initialize the wcd-dsp control
* @codec: pointer to the codec handle
* @params: Parameters required to initialize wcd-dsp control
diff --git a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h
index cd6697b3d641..83c59ed7b676 100644
--- a/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h
+++ b/sound/soc/codecs/wcd934x/wcd934x-dsp-cntl.h
@@ -17,6 +17,11 @@
#include <sound/soc.h>
#include <sound/wcd-dsp-mgr.h>
+enum cdc_ssr_event {
+ WCD_CDC_DOWN_EVENT,
+ WCD_CDC_UP_EVENT,
+};
+
struct wcd_dsp_cdc_cb {
/* Callback to enable codec clock */
int (*cdc_clk_en)(struct snd_soc_codec *, bool);
@@ -106,5 +111,5 @@ void wcd_dsp_cntl_init(struct snd_soc_codec *codec,
struct wcd_dsp_params *params,
struct wcd_dsp_cntl **cntl);
void wcd_dsp_cntl_deinit(struct wcd_dsp_cntl **cntl);
-
+int wcd_dsp_ssr_event(struct wcd_dsp_cntl *cntl, enum cdc_ssr_event event);
#endif /* end __WCD_DSP_CONTROL_H__ */
diff --git a/sound/soc/codecs/wcd934x/wcd934x.c b/sound/soc/codecs/wcd934x/wcd934x.c
index 9e18c17d6f1c..c0996e24e3d6 100644
--- a/sound/soc/codecs/wcd934x/wcd934x.c
+++ b/sound/soc/codecs/wcd934x/wcd934x.c
@@ -1859,8 +1859,9 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
- 0x06, (0x03 << 1));
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
+ 0x06, (0x03 << 1));
set_bit(HPH_PA_DELAY, &tavil->status_mask);
if (dsd_conf &&
(snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01)) {
@@ -1922,8 +1923,9 @@ static int tavil_codec_enable_hphr_pa(struct snd_soc_dapm_widget *w,
blocking_notifier_call_chain(&tavil->mbhc->notifier,
WCD_EVENT_POST_HPHR_PA_OFF,
&tavil->mbhc->wcd_mbhc);
- snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
- 0x06, 0x0);
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
+ 0x06, 0x0);
break;
};
@@ -1942,8 +1944,9 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
switch (event) {
case SND_SOC_DAPM_PRE_PMU:
- snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
- 0x06, (0x03 << 1));
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
+ 0x06, (0x03 << 1));
set_bit(HPH_PA_DELAY, &tavil->status_mask);
if (dsd_conf &&
(snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01)) {
@@ -2004,8 +2007,9 @@ static int tavil_codec_enable_hphl_pa(struct snd_soc_dapm_widget *w,
blocking_notifier_call_chain(&tavil->mbhc->notifier,
WCD_EVENT_POST_HPHL_PA_OFF,
&tavil->mbhc->wcd_mbhc);
- snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
- 0x06, 0x0);
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec, WCD934X_HPH_REFBUFF_LP_CTL,
+ 0x06, 0x0);
break;
};
@@ -2130,9 +2134,10 @@ static int tavil_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_HPH_TIMER1,
0x02, 0x00);
/* Set RDAC gain */
- snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
- 0xF0, 0x40);
-
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec,
+ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
+ 0xF0, 0x40);
if (dsd_conf &&
(snd_soc_read(codec, WCD934X_CDC_DSD1_PATH_CTL) & 0x01))
hph_mode = CLS_H_HIFI;
@@ -2155,8 +2160,10 @@ static int tavil_codec_hphr_dac_event(struct snd_soc_dapm_widget *w,
WCD934X_SIDO_NEW_VOUT_D_FREQ2,
0x01, 0x0);
/* Re-set RDAC gain */
- snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
- 0xF0, 0x0);
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec,
+ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
+ 0xF0, 0x0);
break;
default:
break;
@@ -2199,8 +2206,10 @@ static int tavil_codec_hphl_dac_event(struct snd_soc_dapm_widget *w,
snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_HPH_TIMER1,
0x02, 0x00);
/* Set RDAC gain */
- snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
- 0xF0, 0x40);
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec,
+ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
+ 0xF0, 0x40);
if (dsd_conf &&
(snd_soc_read(codec, WCD934X_CDC_DSD0_PATH_CTL) & 0x01))
hph_mode = CLS_H_HIFI;
@@ -2223,8 +2232,10 @@ static int tavil_codec_hphl_dac_event(struct snd_soc_dapm_widget *w,
WCD934X_SIDO_NEW_VOUT_D_FREQ2,
0x01, 0x0);
/* Re-set RDAC gain */
- snd_soc_update_bits(codec, WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
- 0xF0, 0x0);
+ if (TAVIL_IS_1_0(tavil->wcd9xxx))
+ snd_soc_update_bits(codec,
+ WCD934X_HPH_NEW_INT_RDAC_GAIN_CTL,
+ 0xF0, 0x0);
break;
default:
break;
@@ -2820,11 +2831,15 @@ static void tavil_codec_hphdelay_lutbypass(struct snd_soc_codec *codec,
}
}
-static void tavil_codec_hd2_control(struct snd_soc_codec *codec,
+static void tavil_codec_hd2_control(struct tavil_priv *priv,
u16 interp_idx, int event)
{
u16 hd2_scale_reg;
u16 hd2_enable_reg = 0;
+ struct snd_soc_codec *codec = priv->codec;
+
+ if (TAVIL_IS_1_1(priv->wcd9xxx))
+ return;
switch (interp_idx) {
case INTERP_HPHL:
@@ -3002,7 +3017,7 @@ int tavil_codec_enable_interp_clk(struct snd_soc_codec *codec,
snd_soc_update_bits(codec, main_reg, 0x20, 0x20);
tavil_codec_idle_detect_control(codec, interp_idx,
event);
- tavil_codec_hd2_control(codec, interp_idx, event);
+ tavil_codec_hd2_control(tavil, interp_idx, event);
tavil_codec_hphdelay_lutbypass(codec, interp_idx,
event);
tavil_config_compander(codec, interp_idx, event);
@@ -3017,7 +3032,7 @@ int tavil_codec_enable_interp_clk(struct snd_soc_codec *codec,
tavil_config_compander(codec, interp_idx, event);
tavil_codec_hphdelay_lutbypass(codec, interp_idx,
event);
- tavil_codec_hd2_control(codec, interp_idx, event);
+ tavil_codec_hd2_control(tavil, interp_idx, event);
tavil_codec_idle_detect_control(codec, interp_idx,
event);
/* Clk Disable */
@@ -7837,7 +7852,11 @@ static const struct wcd_resmgr_cb tavil_resmgr_cb = {
.cdc_rco_ctrl = __tavil_codec_internal_rco_ctrl,
};
-static const struct tavil_reg_mask_val tavil_codec_mclk2_defaults[] = {
+static const struct tavil_reg_mask_val tavil_codec_mclk2_1_1_defaults[] = {
+ {WCD934X_CLK_SYS_MCLK2_PRG1, 0x60, 0x20},
+};
+
+static const struct tavil_reg_mask_val tavil_codec_mclk2_1_0_defaults[] = {
/*
* PLL Settings:
* Clock Root: MCLK2,
@@ -7896,6 +7915,13 @@ static const struct tavil_reg_mask_val tavil_codec_reg_defaults[] = {
{WCD934X_HPH_R_TEST, 0x01, 0x01},
};
+static const struct tavil_reg_mask_val tavil_codec_reg_init_1_1_val[] = {
+ {WCD934X_CDC_COMPANDER1_CTL7, 0x1E, 0x06},
+ {WCD934X_CDC_COMPANDER2_CTL7, 0x1E, 0x06},
+ {WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_L, 0xFF, 0x84},
+ {WCD934X_HPH_NEW_INT_RDAC_HD2_CTL_R, 0xFF, 0x84},
+};
+
static const struct tavil_reg_mask_val tavil_codec_reg_init_common_val[] = {
{WCD934X_CDC_CLSH_K2_MSB, 0x0F, 0x00},
{WCD934X_CDC_CLSH_K2_LSB, 0xFF, 0x60},
@@ -7922,8 +7948,9 @@ static const struct tavil_reg_mask_val tavil_codec_reg_init_common_val[] = {
{WCD934X_CPE_SS_SVA_CFG, 0x60, 0x00},
};
-static void tavil_codec_init_reg(struct snd_soc_codec *codec)
+static void tavil_codec_init_reg(struct tavil_priv *priv)
{
+ struct snd_soc_codec *codec = priv->codec;
u32 i;
for (i = 0; i < ARRAY_SIZE(tavil_codec_reg_init_common_val); i++)
@@ -7931,6 +7958,14 @@ static void tavil_codec_init_reg(struct snd_soc_codec *codec)
tavil_codec_reg_init_common_val[i].reg,
tavil_codec_reg_init_common_val[i].mask,
tavil_codec_reg_init_common_val[i].val);
+
+ if (TAVIL_IS_1_1(priv->wcd9xxx)) {
+ for (i = 0; i < ARRAY_SIZE(tavil_codec_reg_init_1_1_val); i++)
+ snd_soc_update_bits(codec,
+ tavil_codec_reg_init_1_1_val[i].reg,
+ tavil_codec_reg_init_1_1_val[i].mask,
+ tavil_codec_reg_init_1_1_val[i].val);
+ }
}
static void tavil_update_reg_defaults(struct tavil_priv *tavil)
@@ -8367,11 +8402,22 @@ static void tavil_mclk2_reg_defaults(struct tavil_priv *tavil)
int i;
struct snd_soc_codec *codec = tavil->codec;
- /* MCLK2 configuration */
- for (i = 0; i < ARRAY_SIZE(tavil_codec_mclk2_defaults); i++)
- snd_soc_update_bits(codec, tavil_codec_mclk2_defaults[i].reg,
- tavil_codec_mclk2_defaults[i].mask,
- tavil_codec_mclk2_defaults[i].val);
+ if (TAVIL_IS_1_0(tavil->wcd9xxx)) {
+ /* MCLK2 configuration */
+ for (i = 0; i < ARRAY_SIZE(tavil_codec_mclk2_1_0_defaults); i++)
+ snd_soc_update_bits(codec,
+ tavil_codec_mclk2_1_0_defaults[i].reg,
+ tavil_codec_mclk2_1_0_defaults[i].mask,
+ tavil_codec_mclk2_1_0_defaults[i].val);
+ }
+ if (TAVIL_IS_1_1(tavil->wcd9xxx)) {
+ /* MCLK2 configuration */
+ for (i = 0; i < ARRAY_SIZE(tavil_codec_mclk2_1_1_defaults); i++)
+ snd_soc_update_bits(codec,
+ tavil_codec_mclk2_1_1_defaults[i].reg,
+ tavil_codec_mclk2_1_1_defaults[i].mask,
+ tavil_codec_mclk2_1_1_defaults[i].val);
+ }
}
static int tavil_soc_codec_probe(struct snd_soc_codec *codec)
@@ -8429,7 +8475,7 @@ static int tavil_soc_codec_probe(struct snd_soc_codec *codec)
for (i = 0; i < COMPANDER_MAX; i++)
tavil->comp_enabled[i] = 0;
- tavil_codec_init_reg(codec);
+ tavil_codec_init_reg(tavil);
tavil_enable_sido_buck(codec);
pdata = dev_get_platdata(codec->dev->parent);
@@ -8749,6 +8795,9 @@ static int tavil_swrm_clock(void *handle, bool enable)
if (enable) {
tavil->swr.clk_users++;
if (tavil->swr.clk_users == 1) {
+ regmap_update_bits(tavil->wcd9xxx->regmap,
+ WCD934X_TEST_DEBUG_NPL_DLY_TEST_1,
+ 0x10, 0x00);
__tavil_cdc_mclk_enable(tavil, true);
regmap_update_bits(tavil->wcd9xxx->regmap,
WCD934X_CDC_CLK_RST_CTRL_SWR_CONTROL,
@@ -8761,6 +8810,9 @@ static int tavil_swrm_clock(void *handle, bool enable)
WCD934X_CDC_CLK_RST_CTRL_SWR_CONTROL,
0x01, 0x00);
__tavil_cdc_mclk_enable(tavil, false);
+ regmap_update_bits(tavil->wcd9xxx->regmap,
+ WCD934X_TEST_DEBUG_NPL_DLY_TEST_1,
+ 0x10, 0x10);
}
}
dev_dbg(tavil->dev, "%s: swrm clock users %d\n",
diff --git a/sound/soc/codecs/wcd9xxx-common-v2.c b/sound/soc/codecs/wcd9xxx-common-v2.c
index 63872bbf540c..47518ec92661 100644
--- a/sound/soc/codecs/wcd9xxx-common-v2.c
+++ b/sound/soc/codecs/wcd9xxx-common-v2.c
@@ -369,8 +369,9 @@ static inline void wcd_clsh_gm3_boost_disable(struct snd_soc_codec *codec,
if (mode == CLS_H_HIFI || mode == CLS_H_LOHIFI ||
mode == CLS_AB_HIFI || mode == CLS_AB) {
- snd_soc_update_bits(codec, WCD9XXX_HPH_CNP_WG_CTL,
- 0x80, 0x0); /* disable GM3 Boost */
+ if (TAVIL_IS_1_0(wcd9xxx))
+ snd_soc_update_bits(codec, WCD9XXX_HPH_CNP_WG_CTL,
+ 0x80, 0x0); /* disable GM3 Boost */
snd_soc_update_bits(codec, WCD9XXX_FLYBACK_VNEG_CTRL_4,
0xF0, 0x80);
} else {
diff --git a/sound/soc/codecs/wcd9xxx-resmgr-v2.c b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
index 29718a8d7c04..39ca965e791e 100644
--- a/sound/soc/codecs/wcd9xxx-resmgr-v2.c
+++ b/sound/soc/codecs/wcd9xxx-resmgr-v2.c
@@ -307,7 +307,7 @@ static int wcd_resmgr_disable_clk_mclk(struct wcd9xxx_resmgr_v2 *resmgr)
WCD9335_ANA_CLK_TOP,
0x04, 0x00);
wcd_resmgr_codec_reg_update_bits(resmgr,
- WCD934X_CLK_SYS_MCLK_PRG, 0x01, 0x0);
+ WCD934X_CLK_SYS_MCLK_PRG, 0x81, 0x00);
resmgr->clk_type = WCD_CLK_OFF;
}
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index 5c40c55a4a0c..547af163c5c0 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -253,218 +253,218 @@ static void msm_pcm_routng_cfg_matrix_map_pp(struct route_payload payload,
#define SLIMBUS_EXTPROC_RX AFE_PORT_INVALID
struct msm_pcm_routing_bdai_data msm_bedais[MSM_BACKEND_DAI_MAX] = {
- { PRIMARY_I2S_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_RX},
- { PRIMARY_I2S_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_TX},
- { SLIMBUS_0_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_RX},
- { SLIMBUS_0_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_TX},
- { HDMI_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_HDMI},
- { INT_BT_SCO_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_RX},
- { INT_BT_SCO_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_TX},
- { INT_FM_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_RX},
- { INT_FM_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_TX},
- { RT_PROXY_PORT_001_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_RX},
- { RT_PROXY_PORT_001_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_TX},
- { AFE_PORT_ID_PRIMARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { PRIMARY_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_RX},
+ { PRIMARY_I2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_PRI_I2S_TX},
+ { SLIMBUS_0_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_RX},
+ { SLIMBUS_0_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_0_TX},
+ { HDMI_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_HDMI},
+ { INT_BT_SCO_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_RX},
+ { INT_BT_SCO_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_SCO_TX},
+ { INT_FM_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_RX},
+ { INT_FM_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_FM_TX},
+ { RT_PROXY_PORT_001_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_RX},
+ { RT_PROXY_PORT_001_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_AFE_PCM_TX},
+ { AFE_PORT_ID_PRIMARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_AUXPCM_RX},
- { AFE_PORT_ID_PRIMARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_AUXPCM_TX},
- { VOICE_PLAYBACK_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { VOICE_PLAYBACK_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_VOICE_PLAYBACK_TX},
- { VOICE2_PLAYBACK_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { VOICE2_PLAYBACK_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_VOICE2_PLAYBACK_TX},
- { VOICE_RECORD_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_RX},
- { VOICE_RECORD_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_TX},
- { MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_MI2S_RX},
- { MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_MI2S_TX},
- { SECONDARY_I2S_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX},
- { SLIMBUS_1_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX},
- { SLIMBUS_1_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX},
- { SLIMBUS_2_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_RX},
- { SLIMBUS_4_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX},
- { SLIMBUS_4_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX},
- { SLIMBUS_3_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX},
- { SLIMBUS_3_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_TX},
- { SLIMBUS_5_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_TX},
- { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_STUB_RX},
- { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_STUB_TX},
- { SLIMBUS_EXTPROC_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_STUB_1_TX},
- { AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { VOICE_RECORD_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_RX},
+ { VOICE_RECORD_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INCALL_RECORD_TX},
+ { MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_RX},
+ { MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_MI2S_TX},
+ { SECONDARY_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SEC_I2S_RX},
+ { SLIMBUS_1_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_RX},
+ { SLIMBUS_1_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_1_TX},
+ { SLIMBUS_2_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_2_RX},
+ { SLIMBUS_4_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_RX},
+ { SLIMBUS_4_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_4_TX},
+ { SLIMBUS_3_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_RX},
+ { SLIMBUS_3_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_3_TX},
+ { SLIMBUS_5_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_TX},
+ { SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_RX},
+ { SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_TX},
+ { SLIMBUS_EXTPROC_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_STUB_1_TX},
+ { AFE_PORT_ID_QUATERNARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_MI2S_RX},
- { AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_MI2S_TX},
- { AFE_PORT_ID_SECONDARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_MI2S_RX},
- { AFE_PORT_ID_SECONDARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_MI2S_TX},
- { AFE_PORT_ID_PRIMARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_MI2S_RX},
- { AFE_PORT_ID_PRIMARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_MI2S_TX},
- { AFE_PORT_ID_TERTIARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_MI2S_RX},
- { AFE_PORT_ID_TERTIARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_MI2S_TX},
- { AUDIO_PORT_ID_I2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AUDIO_PORT_ID_I2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_AUDIO_I2S_RX},
- { AFE_PORT_ID_SECONDARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_AUXPCM_RX},
- { AFE_PORT_ID_SECONDARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_AUXPCM_TX},
- { SLIMBUS_6_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_RX},
- { SLIMBUS_6_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_TX},
- { AFE_PORT_ID_SPDIF_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SPDIF_RX},
- { AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { SLIMBUS_6_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_RX},
+ { SLIMBUS_6_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_6_TX},
+ { AFE_PORT_ID_SPDIF_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SPDIF_RX},
+ { AFE_PORT_ID_SECONDARY_MI2S_RX_SD1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_MI2S_RX_SD1},
- { SLIMBUS_5_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_RX},
- { AFE_PORT_ID_QUINARY_MI2S_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { SLIMBUS_5_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_5_RX},
+ { AFE_PORT_ID_QUINARY_MI2S_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUIN_MI2S_RX},
- { AFE_PORT_ID_QUINARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUINARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUIN_MI2S_TX},
- { AFE_PORT_ID_SENARY_MI2S_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SENARY_MI2S_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SENARY_MI2S_TX},
- { AFE_PORT_ID_PRIMARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_0},
- { AFE_PORT_ID_PRIMARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_0},
- { AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_1},
- { AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_1},
- { AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_2},
- { AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_2},
- { AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_3},
- { AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_3},
- { AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_4},
- { AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_4},
- { AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_5},
- { AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_5},
- { AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_6},
- { AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_6},
- { AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_RX_7},
- { AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_PRIMARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_PRI_TDM_TX_7},
- { AFE_PORT_ID_SECONDARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_0},
- { AFE_PORT_ID_SECONDARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_0},
- { AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_1},
- { AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_1},
- { AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_2},
- { AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_2},
- { AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_3},
- { AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_3},
- { AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_4},
- { AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_4},
- { AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_5},
- { AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_5},
- { AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_6},
- { AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_6},
- { AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_RX_7},
- { AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_SECONDARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_SEC_TDM_TX_7},
- { AFE_PORT_ID_TERTIARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_0},
- { AFE_PORT_ID_TERTIARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_0},
- { AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_1},
- { AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_1},
- { AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_2},
- { AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_2},
- { AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_3},
- { AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_3},
- { AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_4},
- { AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_4},
- { AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_5},
- { AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_5},
- { AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_6},
- { AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_6},
- { AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_RX_7},
- { AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_TDM_TX_7},
- { AFE_PORT_ID_QUATERNARY_TDM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_0},
- { AFE_PORT_ID_QUATERNARY_TDM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_0},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_1},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_1, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_1},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_2},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_2, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_2},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_3},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_3, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_3},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_4},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_4, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_4},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_5},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_5, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_5},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_6},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_6, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_6},
- { AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_RX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_RX_7},
- { AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_TDM_TX_7, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_TDM_TX_7},
- { INT_BT_A2DP_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_A2DP_RX},
- { SLIMBUS_7_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_RX},
- { SLIMBUS_7_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_TX},
- { SLIMBUS_8_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_RX},
- { SLIMBUS_8_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_TX},
- { AFE_PORT_ID_USB_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_RX},
- { AFE_PORT_ID_USB_TX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_TX},
- { DISPLAY_PORT_RX, 0, 0, 0, 0, 0, 0, 0, 0, LPASS_BE_DISPLAY_PORT},
- { AFE_PORT_ID_TERTIARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { INT_BT_A2DP_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_INT_BT_A2DP_RX},
+ { SLIMBUS_7_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_RX},
+ { SLIMBUS_7_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_7_TX},
+ { SLIMBUS_8_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_RX},
+ { SLIMBUS_8_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_SLIMBUS_8_TX},
+ { AFE_PORT_ID_USB_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_RX},
+ { AFE_PORT_ID_USB_TX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_USB_AUDIO_TX},
+ { DISPLAY_PORT_RX, 0, 0, {0}, 0, 0, 0, 0, 0, LPASS_BE_DISPLAY_PORT},
+ { AFE_PORT_ID_TERTIARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_AUXPCM_RX},
- { AFE_PORT_ID_TERTIARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_TERTIARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_TERT_AUXPCM_TX},
- { AFE_PORT_ID_QUATERNARY_PCM_RX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_PCM_RX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_AUXPCM_RX},
- { AFE_PORT_ID_QUATERNARY_PCM_TX, 0, 0, 0, 0, 0, 0, 0, 0,
+ { AFE_PORT_ID_QUATERNARY_PCM_TX, 0, 0, {0}, 0, 0, 0, 0, 0,
LPASS_BE_QUAT_AUXPCM_TX},
};
@@ -2006,11 +2006,20 @@ static int msm_routing_slim_0_rx_aanc_mux_put(struct snd_kcontrol *kcontrol,
static int msm_routing_get_port_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
+ int idx = 0, shift = 0;
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- if (test_bit(mc->shift,
- (unsigned long *)&msm_bedais[mc->reg].port_sessions))
+ idx = mc->shift/(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+ shift = mc->shift%(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+
+ if (idx >= BE_DAI_PORT_SESSIONS_IDX_MAX) {
+ pr_err("%s: Invalid idx = %d\n", __func__, idx);
+ return -EINVAL;
+ }
+
+ if (test_bit(shift,
+ (unsigned long *)&msm_bedais[mc->reg].port_sessions[idx]))
ucontrol->value.integer.value[0] = 1;
else
ucontrol->value.integer.value[0] = 0;
@@ -2024,22 +2033,32 @@ static int msm_routing_get_port_mixer(struct snd_kcontrol *kcontrol,
static int msm_routing_put_port_mixer(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
+ int idx = 0, shift = 0;
struct soc_mixer_control *mc =
(struct soc_mixer_control *)kcontrol->private_value;
- pr_debug("%s: reg 0x%x shift 0x%x val %ld\n", __func__, mc->reg,
- mc->shift, ucontrol->value.integer.value[0]);
+ idx = mc->shift/(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+ shift = mc->shift%(sizeof(msm_bedais[mc->reg].port_sessions[0]) * 8);
+
+ if (idx >= BE_DAI_PORT_SESSIONS_IDX_MAX) {
+ pr_err("%s: Invalid idx = %d\n", __func__, idx);
+ return -EINVAL;
+ }
+
+ pr_debug("%s: reg 0x%x shift 0x%x val %ld idx %d reminder shift %d\n",
+ __func__, mc->reg, mc->shift,
+ ucontrol->value.integer.value[0], idx, shift);
if (ucontrol->value.integer.value[0]) {
afe_loopback(1, msm_bedais[mc->reg].port_id,
msm_bedais[mc->shift].port_id);
- set_bit(mc->shift,
- (unsigned long *)&msm_bedais[mc->reg].port_sessions);
+ set_bit(shift,
+ (unsigned long *)&msm_bedais[mc->reg].port_sessions[idx]);
} else {
afe_loopback(0, msm_bedais[mc->reg].port_id,
msm_bedais[mc->shift].port_id);
- clear_bit(mc->shift,
- (unsigned long *)&msm_bedais[mc->reg].port_sessions);
+ clear_bit(shift,
+ (unsigned long *)&msm_bedais[mc->reg].port_sessions[idx]);
}
return 1;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
index 6b7f2113e0f6..8e3086849d92 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.h
@@ -355,6 +355,7 @@ enum {
#define ADM_PP_PARAM_MUTE_BIT 1
#define ADM_PP_PARAM_LATENCY_ID 1
#define ADM_PP_PARAM_LATENCY_BIT 2
+#define BE_DAI_PORT_SESSIONS_IDX_MAX 4
struct msm_pcm_routing_evt {
void (*event_func)(enum msm_pcm_routing_event, void *);
@@ -365,10 +366,15 @@ struct msm_pcm_routing_bdai_data {
u16 port_id; /* AFE port ID */
u8 active; /* track if this backend is enabled */
unsigned long fe_sessions; /* Front-end sessions */
- u64 port_sessions; /* track Tx BE ports -> Rx BE
- * number of BE should not exceed
- * the size of this field
- */
+ /*
+ * Track Tx BE ports -> Rx BE ports.
+ * port_sessions[0] used to track BE 0 to BE 63.
+ * port_sessions[1] used to track BE 64 to BE 127.
+ * port_sessions[2] used to track BE 128 to BE 191.
+ * port_sessions[3] used to track BE 192 to BE 255.
+ */
+ u64 port_sessions[BE_DAI_PORT_SESSIONS_IDX_MAX];
+
unsigned int sample_rate;
unsigned int channel;
unsigned int format;