summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/clock/qcom,mmcc.txt1
-rw-r--r--Documentation/devicetree/bindings/display/msm/hdmi.txt2
-rw-r--r--Documentation/devicetree/bindings/drm/msm/hdmi-display.txt59
-rw-r--r--Documentation/devicetree/bindings/input/touchscreen/msg21xx-ts.txt71
-rw-r--r--Documentation/devicetree/bindings/media/video/laser-sensor.txt28
-rw-r--r--Documentation/devicetree/bindings/media/video/msm-cam.txt4
-rw-r--r--Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt12
-rw-r--r--Documentation/devicetree/bindings/power/supply/qcom/smb1351-charger.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt8
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qpnp-pbs.txt30
-rw-r--r--Documentation/devicetree/bindings/sound/wcd_codec.txt5
-rw-r--r--Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt3
-rw-r--r--Documentation/filesystems/Locking4
-rw-r--r--Documentation/filesystems/vfs.txt11
-rw-r--r--Documentation/vm/page_migration108
-rw-r--r--arch/arm/boot/dts/qcom/Makefile8
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-cdp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-mtp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-slimbus-mtp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pm8004-mdm9x55-i2s-cdp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pmk8001-mdm9x55-i2s-cdp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-cdp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-mtp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-slimbus-mtp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts5
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/dsi-panel-truly-1080p-cmd.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/msm-audio.dtsi52
-rw-r--r--arch/arm/boot/dts/qcom/msm-pm660l.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm-smb138x.dtsi28
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-camera-sensor-cdp.dtsi22
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi21
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi46
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-interposer-pm660.dtsi7
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi26
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi23
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-sde-display.dtsi41
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-sde.dtsi215
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-v2-qrd-skuk-hdk.dts34
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-v2.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-vidc.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998.dtsi14
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-camera-sensor-cdp.dtsi370
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-camera-sensor-mtp.dtsi370
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-camera.dtsi865
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-cdp.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-gpu.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-headset-jacktype-no-cdp.dts26
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-headset-jacktype-no-rcm.dts26
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-mdss-panels.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-mdss.dtsi17
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-mtp.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-pm660a-headset-jacktype-no-cdp.dts26
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-pm660a-headset-jacktype-no-rcm.dts26
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-qrd.dtsi61
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-regulator.dtsi34
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-usbc-audio-mtp.dts31
-rw-r--r--arch/arm/boot/dts/qcom/sdm630.dtsi78
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-audio.dtsi20
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-camera.dtsi5
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-cdp.dtsi14
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-common.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-gpu.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi14
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mdss.dtsi17
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mtp.dtsi8
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi60
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-pm.dtsi130
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-qrd.dtsi23
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-regulator.dtsi60
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-usbc-audio-mtp.dts31
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-vidc.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/sdm660.dtsi31
-rw-r--r--arch/arm/configs/msmcortex_defconfig1
-rw-r--r--arch/arm/configs/sdm660-perf_defconfig5
-rw-r--r--arch/arm/configs/sdm660_defconfig3
-rw-r--r--arch/arm64/Kconfig.debug3
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig3
-rw-r--r--arch/arm64/configs/msmcortex_defconfig5
-rw-r--r--arch/arm64/configs/msmcortex_mediabox_defconfig14
-rw-r--r--arch/arm64/configs/sdm660-perf_defconfig4
-rw-r--r--arch/arm64/configs/sdm660_defconfig4
-rw-r--r--arch/arm64/include/asm/pgtable.h29
-rw-r--r--arch/arm64/kernel/stacktrace.c4
-rw-r--r--certs/Makefile4
-rw-r--r--certs/verity.x509.pem24
-rw-r--r--drivers/base/dma-removed.c3
-rw-r--r--drivers/base/firmware_class.c3
-rw-r--r--drivers/base/regmap/Kconfig5
-rw-r--r--drivers/block/zram/zram_drv.c5
-rw-r--r--drivers/bluetooth/btfm_slim.c2
-rw-r--r--drivers/bluetooth/btfm_slim.h4
-rw-r--r--drivers/bluetooth/btfm_slim_codec.c60
-rw-r--r--drivers/bluetooth/btfm_slim_wcn3990.c4
-rw-r--r--drivers/char/diag/diag_dci.c53
-rw-r--r--drivers/char/diag/diagchar_core.c3
-rw-r--r--drivers/char/diag/diagfwd.c94
-rw-r--r--drivers/char/diag/diagfwd.h5
-rw-r--r--drivers/char/diag/diagfwd_glink.c44
-rw-r--r--drivers/char/diag/diagfwd_glink.h4
-rw-r--r--drivers/clk/msm/clock-gcc-8998.c49
-rw-r--r--drivers/clk/msm/clock-generic.c14
-rw-r--r--drivers/clk/msm/clock-mmss-8998.c16
-rw-r--r--drivers/clk/msm/clock-osm.c62
-rw-r--r--drivers/clk/qcom/clk-alpha-pll.c1
-rw-r--r--drivers/clk/qcom/clk-cpu-osm.c27
-rw-r--r--drivers/clk/qcom/clk-rcg2.c22
-rw-r--r--drivers/clk/qcom/clk-smd-rpm.c4
-rw-r--r--drivers/clk/qcom/gcc-sdm660.c61
-rw-r--r--drivers/clk/qcom/mmcc-sdm660.c18
-rw-r--r--drivers/crypto/msm/qcedev.c10
-rw-r--r--drivers/gpu/drm/msm/Kconfig8
-rw-r--r--drivers/gpu/drm/msm/Makefile21
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h22
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h523
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c115
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h1365
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c201
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h3497
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c1345
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h187
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c509
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c383
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_snapshot.c796
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h49
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c117
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c467
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h221
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h160
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c11
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h5
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h5
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c1352
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h417
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c393
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c417
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_edid.c227
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h300
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c38
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h10
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c19
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c49
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c43
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h5
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c203
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h103
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c26
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c199
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h37
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c38
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c256
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c340
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h122
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c245
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.h37
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h12
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c6
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c13
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h19
-rw-r--r--drivers/gpu/drm/msm/msm_smmu.c18
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot.c105
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot.h85
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot_api.h121
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c38
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.h17
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_irq.c95
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_irq.h16
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c14
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys.h4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c29
-rw-r--r--drivers/gpu/drm/msm/sde/sde_formats.c99
-rw-r--r--drivers/gpu/drm/msm/sde/sde_formats.h11
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.c8
-rw-r--r--drivers/gpu/drm/msm/sde/sde_irq.c80
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c159
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.h7
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.c12
-rw-r--r--drivers/gpu/drm/msm/sde_io_util.c502
-rw-r--r--drivers/gpu/drm/msm/sde_power_handle.c2
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c1
-rw-r--r--drivers/gpu/msm/adreno_debugfs.c3
-rw-r--r--drivers/gpu/msm/adreno_dispatch.c6
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.c10
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c3
-rw-r--r--drivers/gpu/msm/kgsl.c5
-rw-r--r--drivers/gpu/msm/kgsl_device.h6
-rw-r--r--drivers/gpu/msm/kgsl_snapshot.c8
-rw-r--r--drivers/hwtracing/coresight/Kconfig11
-rw-r--r--drivers/i2c/busses/i2c-msm-v2.c70
-rw-r--r--drivers/input/touchscreen/Kconfig11
-rw-r--r--drivers/input/touchscreen/Makefile1
-rw-r--r--drivers/input/touchscreen/msg21xx_ts.c2260
-rw-r--r--drivers/iommu/arm-smmu.c29
-rw-r--r--drivers/iommu/io-pgtable-arm.c158
-rw-r--r--drivers/iommu/io-pgtable.h1
-rw-r--r--drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c32
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c22
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.c8
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h3
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c23
-rw-r--r--drivers/media/platform/msm/camera_v2/msm.c20
-rw-r--r--drivers/media/platform/msm/camera_v2/msm.h4
-rw-r--r--drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c11
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c43
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h3
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h3
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c24
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h3
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_base.h1
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c39
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h3
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c37
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h1
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c34
-rw-r--r--drivers/misc/hdcp.c3
-rw-r--r--drivers/misc/qseecom.c4
-rw-r--r--drivers/mmc/card/block.c12
-rw-r--r--drivers/mmc/core/core.c12
-rw-r--r--drivers/mmc/host/cmdq_hci.c74
-rw-r--r--drivers/mmc/host/cmdq_hci.h8
-rw-r--r--drivers/mmc/host/sdhci-msm.c11
-rw-r--r--drivers/net/ethernet/msm/msm_rmnet_mhi.c4
-rw-r--r--drivers/net/ppp/ppp_generic.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c109
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.h61
-rw-r--r--drivers/net/wireless/ath/ath10k/core.c2
-rw-r--r--drivers/net/wireless/ath/ath10k/core.h2
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.c54
-rw-r--r--drivers/net/wireless/ath/ath10k/hw.h57
-rw-r--r--drivers/net/wireless/ath/ath10k/snoc.c24
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/cfg80211.c82
-rw-r--r--drivers/net/wireless/ath/wil6210/debugfs.c135
-rw-r--r--drivers/net/wireless/ath/wil6210/ethtool.c10
-rw-r--r--drivers/net/wireless/ath/wil6210/interrupt.c30
-rw-r--r--drivers/net/wireless/ath/wil6210/main.c66
-rw-r--r--drivers/net/wireless/ath/wil6210/netdev.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/p2p.c36
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c57
-rw-r--r--drivers/net/wireless/ath/wil6210/pm.c17
-rw-r--r--drivers/net/wireless/ath/wil6210/pmc.c79
-rw-r--r--drivers/net/wireless/ath/wil6210/rx_reorder.c8
-rw-r--r--drivers/net/wireless/ath/wil6210/sysfs.c126
-rw-r--r--drivers/net/wireless/ath/wil6210/txrx.c75
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h4
-rw-r--r--drivers/net/wireless/ath/wil6210/wil_crash_dump.c18
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.c62
-rw-r--r--drivers/net/wireless/ath/wil6210/wmi.h46
-rw-r--r--drivers/pci/host/pci-msm.c14
-rw-r--r--drivers/phy/phy-qcom-ufs.c8
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.c56
-rw-r--r--drivers/pinctrl/qcom/pinctrl-msm.h1
-rw-r--r--drivers/platform/msm/Kconfig7
-rw-r--r--drivers/platform/msm/Makefile1
-rw-r--r--drivers/platform/msm/gsi/gsi.c72
-rw-r--r--drivers/platform/msm/gsi/gsi.h27
-rw-r--r--drivers/platform/msm/gsi/gsi_reg.h6
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_client.c42
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_i.h3
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c8
-rw-r--r--drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c58
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c42
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_client.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dma.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_flt.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c26
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c7
-rw-r--r--drivers/platform/msm/mhi/mhi.h154
-rw-r--r--drivers/platform/msm/mhi/mhi_bhi.c27
-rw-r--r--drivers/platform/msm/mhi/mhi_event.c63
-rw-r--r--drivers/platform/msm/mhi/mhi_hwio.h18
-rw-r--r--drivers/platform/msm/mhi/mhi_iface.c128
-rw-r--r--drivers/platform/msm/mhi/mhi_init.c109
-rw-r--r--drivers/platform/msm/mhi/mhi_isr.c329
-rw-r--r--drivers/platform/msm/mhi/mhi_macros.h13
-rw-r--r--drivers/platform/msm/mhi/mhi_main.c932
-rw-r--r--drivers/platform/msm/mhi/mhi_mmio_ops.c62
-rw-r--r--drivers/platform/msm/mhi/mhi_pm.c306
-rw-r--r--drivers/platform/msm/mhi/mhi_ring_ops.c12
-rw-r--r--drivers/platform/msm/mhi/mhi_ssr.c74
-rw-r--r--drivers/platform/msm/mhi/mhi_states.c785
-rw-r--r--drivers/platform/msm/mhi/mhi_sys.c127
-rw-r--r--drivers/platform/msm/mhi/mhi_sys.h4
-rw-r--r--drivers/platform/msm/mhi_uci/mhi_uci.c2
-rw-r--r--drivers/platform/msm/msm_ext_display.c (renamed from drivers/video/fbdev/msm/msm_ext_display.c)246
-rw-r--r--drivers/power/supply/qcom/battery.c61
-rw-r--r--drivers/power/supply/qcom/qpnp-fg.c32
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c21
-rw-r--r--drivers/power/supply/qcom/smb-lib.c333
-rw-r--r--drivers/power/supply/qcom/smb-lib.h10
-rw-r--r--drivers/power/supply/qcom/smb-reg.h3
-rw-r--r--drivers/power/supply/qcom/smb1351-charger.c9
-rw-r--r--drivers/power/supply/qcom/smb138x-charger.c115
-rw-r--r--drivers/regulator/cpr4-mmss-ldo-regulator.c44
-rw-r--r--drivers/regulator/cprh-kbss-regulator.c121
-rw-r--r--drivers/regulator/qpnp-labibb-regulator.c59
-rw-r--r--drivers/regulator/qpnp-lcdb-regulator.c23
-rw-r--r--drivers/scsi/ufs/ufs-qcom.c179
-rw-r--r--drivers/scsi/ufs/ufs-qcom.h1
-rw-r--r--drivers/scsi/ufs/ufshcd.c8
-rw-r--r--drivers/sensors/sensors_ssc.c17
-rw-r--r--drivers/soc/qcom/Kconfig9
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/glink.c4
-rw-r--r--drivers/soc/qcom/glink_spi_xprt.c4
-rw-r--r--drivers/soc/qcom/msm_smem.c2
-rw-r--r--drivers/soc/qcom/peripheral-loader.c27
-rw-r--r--drivers/soc/qcom/pil-msa.c46
-rw-r--r--drivers/soc/qcom/pil-msa.h3
-rw-r--r--drivers/soc/qcom/pil-q6v5-mss.c42
-rw-r--r--drivers/soc/qcom/qdsp6v2/adsp-loader.c16
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_tal_glink.c1
-rw-r--r--drivers/soc/qcom/qdsp6v2/audio_notifier.c11
-rw-r--r--drivers/soc/qcom/qdsp6v2/cdsp-loader.c22
-rw-r--r--drivers/soc/qcom/qpnp-pbs.c361
-rw-r--r--drivers/soc/qcom/ramdump.c10
-rw-r--r--drivers/soc/qcom/rpm_rail_stats.c4
-rw-r--r--drivers/soc/qcom/service-locator.c5
-rw-r--r--drivers/soc/qcom/smcinvoke.c7
-rw-r--r--drivers/soc/qcom/spcom.c91
-rw-r--r--drivers/soc/qcom/spss_utils.c22
-rw-r--r--drivers/spi/spi_qsd.c55
-rw-r--r--drivers/thermal/lmh_lite.c9
-rw-r--r--drivers/thermal/msm_thermal.c3
-rw-r--r--drivers/thermal/thermal_core.c13
-rw-r--r--drivers/uio/msm_sharedmem/sharedmem_qmi.c17
-rw-r--r--drivers/usb/gadget/function/f_gsi.c39
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c8
-rw-r--r--drivers/usb/gadget/function/f_qc_ecm.c2
-rw-r--r--drivers/usb/gadget/function/f_qc_rndis.c2
-rw-r--r--drivers/usb/gadget/function/u_ether.c12
-rw-r--r--drivers/usb/gadget/function/u_qc_ether.c4
-rw-r--r--drivers/usb/pd/policy_engine.c16
-rw-r--r--drivers/usb/phy/phy-msm-qusb.c44
-rw-r--r--drivers/video/fbdev/msm/Kconfig2
-rw-r--r--drivers/video/fbdev/msm/Makefile1
-rw-r--r--drivers/video/fbdev/msm/mdss.h3
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c34
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c172
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c15
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c23
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_video.c16
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c46
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pipe.c5
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pp.c8
-rw-r--r--drivers/video/fbdev/msm/mdss_smmu.c14
-rw-r--r--drivers/virtio/virtio_balloon.c51
-rw-r--r--fs/aio.c1
-rw-r--r--fs/block_dev.c7
-rw-r--r--fs/proc/base.c13
-rw-r--r--include/dt-bindings/clock/msm-clocks-8998.h4
-rw-r--r--include/dt-bindings/clock/msm-clocks-hwio-8998.h4
-rw-r--r--include/dt-bindings/clock/qcom,gcc-sdm660.h2
-rw-r--r--include/linux/balloon_compaction.h51
-rw-r--r--include/linux/compaction.h17
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/iommu.h1
-rw-r--r--include/linux/ksm.h3
-rw-r--r--include/linux/migrate.h17
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/msm_ext_display.h2
-rw-r--r--include/linux/msm_gsi.h18
-rw-r--r--include/linux/msm_kgsl.h21
-rw-r--r--include/linux/msm_mhi.h7
-rw-r--r--include/linux/page-flags.h32
-rw-r--r--include/linux/power_supply.h7
-rw-r--r--include/linux/qpnp/qpnp-pbs.h25
-rw-r--r--include/linux/regulator/qpnp-labibb-regulator.h23
-rw-r--r--include/linux/sde_io_util.h113
-rw-r--r--include/linux/thermal.h1
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/linux/zsmalloc.h4
-rw-r--r--include/trace/events/compaction.h55
-rw-r--r--include/uapi/drm/msm_drm.h29
-rw-r--r--include/uapi/linux/magic.h2
-rw-r--r--include/uapi/sound/compress_offload.h6
-rw-r--r--kernel/sched/core_ctl.c24
-rw-r--r--kernel/trace/trace.c20
-rw-r--r--mm/balloon_compaction.c94
-rw-r--r--mm/compaction.c313
-rw-r--r--mm/internal.h1
-rw-r--r--mm/ksm.c4
-rw-r--r--mm/memory_hotplug.c15
-rw-r--r--mm/migrate.c255
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/util.c6
-rw-r--r--mm/vmpressure.c10
-rw-r--r--mm/vmscan.c182
-rw-r--r--mm/vmstat.c1
-rw-r--r--mm/zsmalloc.c1426
-rw-r--r--net/socket.c4
-rwxr-xr-xscripts/checkpatch.pl24
-rw-r--r--sound/soc/codecs/Kconfig2
-rw-r--r--sound/soc/codecs/msm_sdw/msm_sdw_cdc.c151
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c55
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c3
-rw-r--r--sound/soc/codecs/wsa881x-regmap.c3
-rw-r--r--sound/soc/codecs/wsa881x-temp-sensor.c10
-rw-r--r--sound/soc/codecs/wsa881x.c28
-rw-r--r--sound/soc/codecs/wsa881x.h1
-rw-r--r--sound/soc/msm/Kconfig16
-rw-r--r--sound/soc/msm/msm-cpe-lsm.c61
-rw-r--r--sound/soc/msm/msm-dai-fe.c2
-rw-r--r--sound/soc/msm/msm8998.c31
-rw-r--r--sound/soc/msm/qdsp6v2/Makefile1
-rw-r--r--sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c1092
-rw-r--r--sound/soc/msm/qdsp6v2/msm-lsm-client.c91
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c34
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c17
-rw-r--r--sound/soc/msm/qdsp6v2/q6afe.c32
-rw-r--r--sound/soc/msm/sdm660-common.c383
-rw-r--r--sound/soc/msm/sdm660-ext-dai-links.c42
-rw-r--r--sound/soc/msm/sdm660-external.c2
-rw-r--r--sound/soc/msm/sdm660-internal.c31
437 files changed, 26706 insertions, 9000 deletions
diff --git a/Documentation/devicetree/bindings/clock/qcom,mmcc.txt b/Documentation/devicetree/bindings/clock/qcom,mmcc.txt
index 4b1057288b2f..f7b1bfd257fd 100644
--- a/Documentation/devicetree/bindings/clock/qcom,mmcc.txt
+++ b/Documentation/devicetree/bindings/clock/qcom,mmcc.txt
@@ -11,6 +11,7 @@ Required properties :
"qcom,mmcc-msm8974"
"qcom,mmcc-msm8996"
"qcom,mmcc-sdm660"
+ "qcom,mmcc-sdm630"
- reg : shall contain base register location and length
- #clock-cells : shall contain 1
diff --git a/Documentation/devicetree/bindings/display/msm/hdmi.txt b/Documentation/devicetree/bindings/display/msm/hdmi.txt
index 379ee2ea9a3d..a0615ac9d73e 100644
--- a/Documentation/devicetree/bindings/display/msm/hdmi.txt
+++ b/Documentation/devicetree/bindings/display/msm/hdmi.txt
@@ -3,6 +3,7 @@ Qualcomm adreno/snapdragon hdmi output
Required properties:
- compatible: one of the following
* "qcom,hdmi-tx-8996"
+ * "qcom,hdmi-tx-8998"
* "qcom,hdmi-tx-8994"
* "qcom,hdmi-tx-8084"
* "qcom,hdmi-tx-8974"
@@ -21,6 +22,7 @@ Required properties:
Optional properties:
- qcom,hdmi-tx-mux-en-gpio: hdmi mux enable pin
+- qcom,hdmi-tx-hpd5v-gpio: hdmi 5v boost pin
- qcom,hdmi-tx-mux-sel-gpio: hdmi mux select pin
- power-domains: reference to the power domain(s), if available.
- pinctrl-names: the pin control state names; should contain "default"
diff --git a/Documentation/devicetree/bindings/drm/msm/hdmi-display.txt b/Documentation/devicetree/bindings/drm/msm/hdmi-display.txt
new file mode 100644
index 000000000000..aaa3722659ab
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/msm/hdmi-display.txt
@@ -0,0 +1,59 @@
+Qualcomm Technologies,Inc. Adreno/Snapdragon hdmi display manager
+
+Required properties:
+- compatible: "qcom,hdmi-display"
+- label: label of this display manager
+
+Optional properties:
+- qcom,display-type: display type of this manager. It could be "primary",
+ "secondary", "tertiary", etc.
+- qcom,non-pluggable: Boolean to indicate if display is non pluggable.
+- qcom,customize-modes: Customized modes when it's non pluggable display.
+- qcom,customize-mode-id: Customized mode node.
+- qcom,mode-name: String which indicates the mode name which shall be used
+ by the connector in non pluggable mode. Refer the example below for details.
+ In pluggable mode, the modes shall be filled up
+ after edid parsing.
+- qcom,mode-h-active: Horizontal active pixels for this mode.
+- qcom,mode-h-front-porch: Horizontal front porch in pixels for this mode.
+- qcom,mode-h-pulse-width: Horizontal sync width in pixels for this mode.
+- qcom,mode-h-back-porch: Horizontal back porch in pixels for this mode.
+- qcom,mode-h-active-high: Boolean to indicate if mode horizontal polarity is active high.
+- qcom,mode-v-active: Vertical active lines for this mode.
+- qcom,mode-v-front-porch: Vertical front porch in lines for this mode.
+- qcom,mode-v-pulse-width: Vertical sync width in lines for this mode.
+- qcom,mode-v-back-porch: Vertical back porch in lines for this mode.
+- qcom,mode-v-active-high: Boolean to indicate if mode vertical polarity is active high.
+- qcom,mode-refersh-rate: Mode refresh rate in hertz.
+- qcom,mode-clock-in-khz: Mode pixel clock in KHz.
+
+Example:
+
+/ {
+ ...
+
+ hdmi_display: qcom,hdmi-display {
+ compatible = "qcom,hdmi-display";
+ label = "hdmi_display";
+ qcom,display-type = "secondary";
+ qcom,non-pluggable;
+ qcom,customize-modes {
+ qcom,customize-mode-id@0 {
+ qcom,mode-name = "3840x2160@30Hz";
+ qcom,mode-h-active = <3840>;
+ qcom,mode-h-front-porch = <176>;
+ qcom,mode-h-pulse-width = <88>;
+ qcom,mode-h-back-porch = <296>;
+ qcom,mode-h-active-high;
+ qcom,mode-v-active = <2160>;
+ qcom,mode-v-front-porch = <8>;
+ qcom,mode-v-pulse-width = <10>;
+ qcom,mode-v-back-porch = <72>;
+ qcom,mode-v-active-high;
+ qcom,mode-refersh-rate = <30>;
+ qcom,mode-clock-in-khz = <297000>;
+ };
+ };
+ };
+
+};
diff --git a/Documentation/devicetree/bindings/input/touchscreen/msg21xx-ts.txt b/Documentation/devicetree/bindings/input/touchscreen/msg21xx-ts.txt
deleted file mode 100644
index 7315aef62493..000000000000
--- a/Documentation/devicetree/bindings/input/touchscreen/msg21xx-ts.txt
+++ /dev/null
@@ -1,71 +0,0 @@
-Mstar touch controller
-
-The mstar controller is connected to host processor
-via i2c. The controller generates interrupts when the
-user touches the panel. The host controller is expected
-to read the touch coordinates over i2c and pass the coordinates
-to the rest of the system.
-
-Required properties:
-
- - compatible : should be "mstar,msg21xx".
- - reg : i2c slave address of the device.
- - interrupt-parent : parent of interrupt.
- - interrupts : touch sample interrupt to indicate presense or release
- of fingers on the panel.
- - vdd-supply : Power supply needed to power up the device.
- - vcc_i2c-supply : Power source required to power up i2c bus.
- - mstar,irq-gpio : irq gpio which is to provide interrupts to host,
- same as "interrupts" node. It will also
- contain active low or active high information.
- - mstar,reset-gpio : reset gpio to control the reset of chip.
- - mstar,display-coords : display coords in pixels. It is a four
- tuple consisting of min x, min y, max x and
- max y values.
- - pinctrl-names : This should be defined if a target uses pinctrl framework.
- See "pinctrl" in Documentation/devicetree/bindings/pinctrl/msm-pinctrl.txt.
- Specify the names of the configs that pinctrl can install in driver.
- Following are the pinctrl configs that can be installed:
- "pmx_ts_active" : Active configuration of pins, this should specify active
- config defined in pin groups of interrupt and reset gpio.
- "pmx_ts_suspend" : Disabled configuration of pins, this should specify sleep
- config defined in pin groups of interrupt and reset gpio.
- "pmx_ts_release" : Release configuration of pins, this should specify
- release config defined in pin groups of interrupt and reset gpio.
- - mstar,num-max-touches: It defines the maximum number of touch supported by the controller.
- - mstar,hard-reset-delay-ms : hard reset delay in ms
- - mstar,post-hard-reset-delay-ms : post hard reset delay in ms
-
-Optional properties:
-
- - mstar,button-map : button map of key codes. It is a three tuple consisting of key codes.
- - mstar,panel-coords : panel coords for the chip in pixels.
- It is a four tuple consisting of min x,
- min y, max x and max y values.
- - mstar,ic-type : It defines the ic-type of the controller. Values are as folows:
- 1 -> msg2133.
- 2 -> msg21xxA.
- 3 -> msg26xxM.
-
-Example:
- i2c@78b9000 { /* BLSP1 QUP5 */
- mstar@26 {
- compatible = "mstar,msg21xx";
- reg = <0x26>;
- interrupt-parent = <&msm_gpio>;
- interrupts = <13 0x2008>;
- mstar,irq-gpio = <&msm_gpio 13 0x00000001>;
- mstar,reset-gpio = <&msm_gpio 12 0x0>;
- vdd-supply = <&pm8916_l17>;
- vcc_i2c-supply = <&pm8916_l6>;
- mstar,display-coords = <0 0 480 854>;
- pinctrl-names = "pmx_ts_active","pmx_ts_suspend";
- pinctrl-0 = <&ts_int_active &ts_reset_active>;
- pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
- mstar,button-map = <172 139 158>;
- mstar,ic-type = <2>;
- mstar,num_max_touches = <2>;
- mstar,hard-reset-delay-ms = <100>;
- mstar,post-hard-reset-delay-ms = <100>;
- };
- };
diff --git a/Documentation/devicetree/bindings/media/video/laser-sensor.txt b/Documentation/devicetree/bindings/media/video/laser-sensor.txt
new file mode 100644
index 000000000000..1bcb0b93cb10
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/laser-sensor.txt
@@ -0,0 +1,28 @@
+Laser Sensor Device Tree Bindings.
+========================================
+
+Boards with the Laser Sensor connected to CCI shall have the following
+properties:
+
+Required node properties:
+ - cell-index: cci hardware core index
+ - compatible:
+ - "st,stmvl53l0" : STMiecroelectronics VL53L0 Laser sensor.
+ - reg : offset and length of the register set for the device
+ - qcom, cci-master: cci master the sensor connected to
+ - cam_cci-supply : cci voltage regulator used
+ - cam_laser-supply: laser sensor voltage regulator
+ - qcom,cam-vreg-name: voltage regulators name
+ - qcom, cam-vreg-min-voltage: specify minimum voltage level for
+ regulators used
+ - qcom, cam-vreg-max-voltage: specify maximum voltage level for
+ regulators used
+ - pinctrl-names : should specify the pin control groups followed by
+ the definition of each group
+ - gpios : should contain phandle to gpio controller node and array of
+ #gpio-cells specifying specific gpio (controller specific)
+ - qcom,gpio-req-tbl-num : contains index to gpios specific to the sensor
+ - qcom,gpio-req-tbl-flags : should contain direction of gpios present in
+ qcom,gpio-req-tbl-num property (in the same order)
+ - qcom,gpio-req-tbl-label : should contain name of gpios present in
+ qcom,gpio-req-tbl-num property (in the same order)
diff --git a/Documentation/devicetree/bindings/media/video/msm-cam.txt b/Documentation/devicetree/bindings/media/video/msm-cam.txt
index 9763bf8e4051..371447cfb64e 100644
--- a/Documentation/devicetree/bindings/media/video/msm-cam.txt
+++ b/Documentation/devicetree/bindings/media/video/msm-cam.txt
@@ -6,10 +6,14 @@ Required properties:
- reg : offset and length of msm camera device registers.
- reg-names : should specify relevant names for each reg property defined.
+Optional properties:
+- qcom,gpu-limit : valid kgsl frequency.
+
Example:
qcom,msm-cam@fd8c0000 {
compatible = "qcom,msm-cam";
reg = <0xfd8C0000 0x10000>;
reg-names = "msm-cam";
+ qcom,gpu-limit = <700000000>;
};
diff --git a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
index be5633024986..47a6fdd300ca 100644
--- a/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
+++ b/Documentation/devicetree/bindings/pil/pil-q6v5-mss.txt
@@ -89,6 +89,13 @@ Optional properties:
- qcom,cx-ipeak-vote: Boolean- Present if we need to set bit 5 of cxip_lm_vote_clear
during modem shutdown
+One child node to represent the MBA image may be specified, when the MBA image
+needs to be loaded in a specifically carved out memory region.
+
+Required properties:
+- compatible: Must be "qcom,pil-mba-mem"
+- memory-region: A phandle that points to a reserved memory where the MBA image will be loaded.
+
Example:
qcom,mss@fc880000 {
compatible = "qcom,pil-q6v5-mss";
@@ -128,4 +135,9 @@ Example:
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
qcom,ssctl-instance-id = <12>;
qcom,sysmon-id = <0>;
+
+ qcom,mba-mem@0 {
+ compatible = "qcom,pil-mba-mem";
+ memory-region = <&peripheral_mem>;
+ };
};
diff --git a/Documentation/devicetree/bindings/power/supply/qcom/smb1351-charger.txt b/Documentation/devicetree/bindings/power/supply/qcom/smb1351-charger.txt
index ab0ac32e444e..c200f9423384 100644
--- a/Documentation/devicetree/bindings/power/supply/qcom/smb1351-charger.txt
+++ b/Documentation/devicetree/bindings/power/supply/qcom/smb1351-charger.txt
@@ -69,6 +69,8 @@ Optional Properties:
via pin in a parallel-charger configuration.
0 - Active low and 1 - Active high.
If not specified the default value is active-low.
+- qcom,parallel-external-current-sense If present specifies external rsense is
+ used for charge current sensing.
Example for standalone charger:
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
index d08ca957c954..c9cfc889faba 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
@@ -149,6 +149,8 @@ LAB subnode optional properties:
already. If it it not specified, then
output voltage can be configured to
any value in the allowed limit.
+- qcom,notify-lab-vreg-ok-sts: A boolean property which upon set will
+ poll and notify the lab_vreg_ok status.
Following properties are available only for PM660A:
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
index 8b3a38da0834..63da8ecbfa4c 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
@@ -26,6 +26,13 @@ First Level Node - LCDB module
Value type: <prop-encoded-array>
Definition: Base address of the LCDB SPMI peripheral.
+- qcom,force-module-reenable
+ Usage: required if using SW mode for module enable
+ Value type: <bool>
+ Definition: This enables the workaround to force enable
+ the vph_pwr_2p5_ok signal required for
+ turning on the LCDB module.
+
Touch-to-wake (TTW) properties:
TTW supports 2 modes of operation - HW and SW. In the HW mode the enable/disable
@@ -59,7 +66,6 @@ main node.
Definition: ON time (in mS) for the VDISP/VDISN signals.
Possible values are 4, 8, 16, 32.
-
========================================
Second Level Nodes - LDO/NCP/BOOST block
========================================
diff --git a/Documentation/devicetree/bindings/soc/qcom/qpnp-pbs.txt b/Documentation/devicetree/bindings/soc/qcom/qpnp-pbs.txt
new file mode 100644
index 000000000000..d7aefbf9c19c
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qpnp-pbs.txt
@@ -0,0 +1,30 @@
+QPNP PBS
+
+QPNP (Qualcomm Technologies, Inc. Plug N Play) PBS is programmable boot sequence
+and this driver is for helping the client drivers triggering such sequence
+to be configured in PMIC.
+
+This document describes the bindings for QPNP PBS driver.
+
+=======================
+Required Node Structure
+=======================
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: should be "qcom,qpnp-pbs".
+
+- reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Base address of the PBS registers.
+
+
+=======
+Example
+=======
+ pm660l_pbs: qcom,pbs@7300 {
+ compatible = "qcom,qpnp-pbs";
+ reg = <0x7300 0x100>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt
index b7a81efb768c..5b5d43a6323c 100644
--- a/Documentation/devicetree/bindings/sound/wcd_codec.txt
+++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt
@@ -514,10 +514,6 @@ Required properties:
which is also existing driver WSA881x that represents
soundwire slave devices.
-Optional Properties:
- - qcom,cache-always : Boolean. This property is used in WSA slave
- device to use cacheable for all registers.
-
Example:
msm_sdw_codec: qcom,msm-sdw-codec@152c1000 {
@@ -535,7 +531,6 @@ msm_sdw_codec: qcom,msm-sdw-codec@152c1000 {
compatible = "qcom,wsa881x";
reg = <0x00 0x20170212>;
qcom,spkr-sd-n-gpio = <&tlmm 80 0>;
- qcom,cache-always;
};
};
};
diff --git a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
index a25961c6e7de..8b99dbce871b 100644
--- a/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
+++ b/Documentation/devicetree/bindings/ufs/ufshcd-pltfrm.txt
@@ -141,6 +141,9 @@ enabled and functional in the driver:
- qcom,pm-qos-default-cpu: PM QoS voting is based on the cpu associated with each IO request by the block layer.
This defined the default cpu used for PM QoS voting in case a specific cpu value is not available.
+- qcom,vddp-ref-clk-supply : reference clock to ufs device. Controlled by the host driver.
+- qcom,vddp-ref-clk-max-microamp : specifies max. load that can be drawn for
+ ref-clk supply.
Example:
ufshc@0xfc598000 {
...
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 06d443450f21..539ac9c78ccf 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -197,7 +197,9 @@ prototypes:
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
int (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
+ bool (*isolate_page) (struct page *, isolate_mode_t);
int (*migratepage)(struct address_space *, struct page *, struct page *);
+ void (*putback_page) (struct page *);
int (*launder_page)(struct page *);
int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
int (*error_remove_page)(struct address_space *, struct page *);
@@ -221,7 +223,9 @@ invalidatepage: yes
releasepage: yes
freepage: yes
direct_IO:
+isolate_page: yes
migratepage: yes (both)
+putback_page: yes
launder_page: yes
is_partially_uptodate: yes
error_remove_page: yes
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 8c6f07ad373a..66ddf1e8fe28 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -593,9 +593,14 @@ struct address_space_operations {
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
+ /* isolate a page for migration */
+ bool (*isolate_page) (struct page *, isolate_mode_t);
/* migrate the contents of a page to the specified target */
int (*migratepage) (struct page *, struct page *);
+ /* put migration-failed page back to right list */
+ void (*putback_page) (struct page *);
int (*launder_page) (struct page *);
+
int (*is_partially_uptodate) (struct page *, unsigned long,
unsigned long);
void (*is_dirty_writeback) (struct page *, bool *, bool *);
@@ -748,6 +753,10 @@ struct address_space_operations {
and transfer data directly between the storage and the
application's address space.
+ isolate_page: Called by the VM when isolating a movable non-lru page.
+ If page is successfully isolated, VM marks the page as PG_isolated
+ via __SetPageIsolated.
+
migrate_page: This is used to compact the physical memory usage.
If the VM wants to relocate a page (maybe off a memory card
that is signalling imminent failure) it will pass a new page
@@ -755,6 +764,8 @@ struct address_space_operations {
transfer any private data across and update any references
that it has to the page.
+ putback_page: Called by the VM when isolated page's migration fails.
+
launder_page: Called before freeing a page - it writes back the dirty page. To
prevent redirtying the page, it is kept locked during the whole
operation.
diff --git a/Documentation/vm/page_migration b/Documentation/vm/page_migration
index fea5c0864170..94bd9c11c4e0 100644
--- a/Documentation/vm/page_migration
+++ b/Documentation/vm/page_migration
@@ -142,5 +142,111 @@ Steps:
20. The new page is moved to the LRU and can be scanned by the swapper
etc again.
-Christoph Lameter, May 8, 2006.
+C. Non-LRU page migration
+-------------------------
+
+Although original migration aimed for reducing the latency of memory access
+for NUMA, compaction who want to create high-order page is also main customer.
+
+Current problem of the implementation is that it is designed to migrate only
+*LRU* pages. However, there are potential non-lru pages which can be migrated
+in drivers, for example, zsmalloc, virtio-balloon pages.
+
+For virtio-balloon pages, some parts of migration code path have been hooked
+up and added virtio-balloon specific functions to intercept migration logics.
+It's too specific to a driver so other drivers who want to make their pages
+movable would have to add own specific hooks in migration path.
+
+To overclome the problem, VM supports non-LRU page migration which provides
+generic functions for non-LRU movable pages without driver specific hooks
+migration path.
+
+If a driver want to make own pages movable, it should define three functions
+which are function pointers of struct address_space_operations.
+
+1. bool (*isolate_page) (struct page *page, isolate_mode_t mode);
+
+What VM expects on isolate_page function of driver is to return *true*
+if driver isolates page successfully. On returing true, VM marks the page
+as PG_isolated so concurrent isolation in several CPUs skip the page
+for isolation. If a driver cannot isolate the page, it should return *false*.
+
+Once page is successfully isolated, VM uses page.lru fields so driver
+shouldn't expect to preserve values in that fields.
+
+2. int (*migratepage) (struct address_space *mapping,
+ struct page *newpage, struct page *oldpage, enum migrate_mode);
+
+After isolation, VM calls migratepage of driver with isolated page.
+The function of migratepage is to move content of the old page to new page
+and set up fields of struct page newpage. Keep in mind that you should
+indicate to the VM the oldpage is no longer movable via __ClearPageMovable()
+under page_lock if you migrated the oldpage successfully and returns
+MIGRATEPAGE_SUCCESS. If driver cannot migrate the page at the moment, driver
+can return -EAGAIN. On -EAGAIN, VM will retry page migration in a short time
+because VM interprets -EAGAIN as "temporal migration failure". On returning
+any error except -EAGAIN, VM will give up the page migration without retrying
+in this time.
+
+Driver shouldn't touch page.lru field VM using in the functions.
+
+3. void (*putback_page)(struct page *);
+
+If migration fails on isolated page, VM should return the isolated page
+to the driver so VM calls driver's putback_page with migration failed page.
+In this function, driver should put the isolated page back to the own data
+structure.
+4. non-lru movable page flags
+
+There are two page flags for supporting non-lru movable page.
+
+* PG_movable
+
+Driver should use the below function to make page movable under page_lock.
+
+ void __SetPageMovable(struct page *page, struct address_space *mapping)
+
+It needs argument of address_space for registering migration family functions
+which will be called by VM. Exactly speaking, PG_movable is not a real flag of
+struct page. Rather than, VM reuses page->mapping's lower bits to represent it.
+
+ #define PAGE_MAPPING_MOVABLE 0x2
+ page->mapping = page->mapping | PAGE_MAPPING_MOVABLE;
+
+so driver shouldn't access page->mapping directly. Instead, driver should
+use page_mapping which mask off the low two bits of page->mapping under
+page lock so it can get right struct address_space.
+
+For testing of non-lru movable page, VM supports __PageMovable function.
+However, it doesn't guarantee to identify non-lru movable page because
+page->mapping field is unified with other variables in struct page.
+As well, if driver releases the page after isolation by VM, page->mapping
+doesn't have stable value although it has PAGE_MAPPING_MOVABLE
+(Look at __ClearPageMovable). But __PageMovable is cheap to catch whether
+page is LRU or non-lru movable once the page has been isolated. Because
+LRU pages never can have PAGE_MAPPING_MOVABLE in page->mapping. It is also
+good for just peeking to test non-lru movable pages before more expensive
+checking with lock_page in pfn scanning to select victim.
+
+For guaranteeing non-lru movable page, VM provides PageMovable function.
+Unlike __PageMovable, PageMovable functions validates page->mapping and
+mapping->a_ops->isolate_page under lock_page. The lock_page prevents sudden
+destroying of page->mapping.
+
+Driver using __SetPageMovable should clear the flag via __ClearMovablePage
+under page_lock before the releasing the page.
+
+* PG_isolated
+
+To prevent concurrent isolation among several CPUs, VM marks isolated page
+as PG_isolated under lock_page. So if a CPU encounters PG_isolated non-lru
+movable page, it can skip it. Driver doesn't need to manipulate the flag
+because VM will set/clear it automatically. Keep in mind that if driver
+sees PG_isolated page, it means the page have been isolated by VM so it
+shouldn't touch page.lru field.
+PG_isolated is alias with PG_reclaim flag so driver shouldn't use the flag
+for own purpose.
+
+Christoph Lameter, May 8, 2006.
+Minchan Kim, Mar 28, 2016.
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index d572568eb94e..db37dc6f31bb 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -164,6 +164,7 @@ dtb-$(CONFIG_ARCH_SDM660) += sdm660-sim.dtb \
sdm660-headset-jacktype-no-rcm.dtb \
sdm660-pm660a-headset-jacktype-no-cdp.dtb \
sdm660-pm660a-headset-jacktype-no-rcm.dtb \
+ sdm660-usbc-audio-mtp.dtb \
sdm658-mtp.dtb \
sdm658-cdp.dtb \
sdm658-rcm.dtb \
@@ -188,6 +189,7 @@ dtb-$(CONFIG_ARCH_SDM660) += sdm660-sim.dtb \
dtb-$(CONFIG_ARCH_SDM630) += sdm630-rumi.dtb \
sdm630-pm660a-rumi.dtb \
sdm630-mtp.dtb \
+ sdm630-usbc-audio-mtp.dtb \
sdm630-cdp.dtb \
sdm630-rcm.dtb \
sdm630-internal-codec-mtp.dtb \
@@ -205,7 +207,11 @@ dtb-$(CONFIG_ARCH_SDM630) += sdm630-rumi.dtb \
sda630-rcm.dtb \
sda630-pm660a-mtp.dtb \
sda630-pm660a-cdp.dtb \
- sda630-pm660a-rcm.dtb
+ sda630-pm660a-rcm.dtb \
+ sdm630-headset-jacktype-no-cdp.dtb \
+ sdm630-headset-jacktype-no-rcm.dtb \
+ sdm630-pm660a-headset-jacktype-no-cdp.dtb \
+ sdm630-pm660a-headset-jacktype-no-rcm.dtb
ifeq ($(CONFIG_ARM64),y)
always := $(dtb-y)
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-cdp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-cdp.dts
index b434140dc88d..5e2886f7e615 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-cdp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-cdp.dts
@@ -79,8 +79,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-mtp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-mtp.dts
index d017949248a5..2ff517a2521b 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-mtp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-mtp.dts
@@ -78,8 +78,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-slimbus-mtp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-slimbus-mtp.dts
index 2fbb98c26e2c..0b74b1b2ca6c 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-slimbus-mtp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-slimbus-mtp.dts
@@ -79,8 +79,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pm8004-mdm9x55-i2s-cdp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pm8004-mdm9x55-i2s-cdp.dts
index eae754dbd3c5..1c9d4ea56d47 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pm8004-mdm9x55-i2s-cdp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pm8004-mdm9x55-i2s-cdp.dts
@@ -79,8 +79,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pmk8001-mdm9x55-i2s-cdp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pmk8001-mdm9x55-i2s-cdp.dts
index 17d7717587f2..7c58c05f5c61 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pmk8001-mdm9x55-i2s-cdp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pmk8001-mdm9x55-i2s-cdp.dts
@@ -79,8 +79,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-cdp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-cdp.dts
index ab51e673bc7d..71ef7617b47f 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-cdp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-cdp.dts
@@ -80,8 +80,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-mtp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-mtp.dts
index 3e4d11122e01..4d136091806a 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-mtp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-mtp.dts
@@ -80,8 +80,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-slimbus-mtp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-slimbus-mtp.dts
index c5c1fe0115f7..8fda04fbfe92 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-slimbus-mtp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-slimbus-mtp.dts
@@ -80,8 +80,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts b/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts
index daf43d49e079..00e3d0e42427 100644
--- a/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts
+++ b/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts
@@ -26,9 +26,14 @@
};
&mdss_mdp {
+ status = "disabled";
qcom,mdss-pref-prim-intf = "hdmi";
};
+&sde_hdmi {
+ qcom,display-type = "primary";
+};
+
&slim_aud {
tasha_codec {
wsa_spkr_sd1: msm_cdc_pinctrll {
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
index e3f60de3c3eb..49afd34c50e7 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-nt35695b-truly-fhd-cmd.dtsi
@@ -185,5 +185,6 @@
qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-dsi-tx-eot-append;
qcom,mdss-dsi-post-init-delay = <1>;
+ qcom,ulps-enabled;
};
};
diff --git a/arch/arm/boot/dts/qcom/dsi-panel-truly-1080p-cmd.dtsi b/arch/arm/boot/dts/qcom/dsi-panel-truly-1080p-cmd.dtsi
index 51bb891f98d6..42e44cf838ab 100644
--- a/arch/arm/boot/dts/qcom/dsi-panel-truly-1080p-cmd.dtsi
+++ b/arch/arm/boot/dts/qcom/dsi-panel-truly-1080p-cmd.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -90,5 +90,6 @@
qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
qcom,mdss-dsi-reset-sequence = <1 10>, <0 10>, <1 10>;
qcom,mdss-dsi-post-init-delay = <1>;
+ qcom,ulps-enabled;
};
};
diff --git a/arch/arm/boot/dts/qcom/msm-audio.dtsi b/arch/arm/boot/dts/qcom/msm-audio.dtsi
index a8a174300c7e..42cf30c789d9 100644
--- a/arch/arm/boot/dts/qcom/msm-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-audio.dtsi
@@ -102,6 +102,11 @@
compatible = "qcom,msm-pcm-afe";
};
+ dai_dp: qcom,msm-dai-q6-dp {
+ compatible = "qcom,msm-dai-q6-hdmi";
+ qcom,msm-dai-q6-dev-id = <24608>;
+ };
+
loopback: qcom,msm-pcm-loopback {
compatible = "qcom,msm-pcm-loopback";
};
@@ -592,6 +597,7 @@
qcom,wcn-btfm;
qcom,mi2s-audio-intf;
qcom,auxpcm-audio-intf;
+ qcom,ext-disp-audio-rx;
qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
qcom,audio-routing =
"AIF4 VI", "MCLK",
@@ -640,7 +646,8 @@
"msm-pcm-routing", "msm-cpe-lsm",
"msm-compr-dsp", "msm-pcm-dsp-noirq",
"msm-cpe-lsm.3";
- asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+ asoc-cpu = <&dai_dp>, <&dai_mi2s0>,
+ <&dai_mi2s1>,
<&dai_mi2s2>, <&dai_mi2s3>,
<&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
<&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
@@ -657,7 +664,8 @@
<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
- asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ asoc-cpu-names = "msm-dai-q6-dp.24608", "msm-dai-q6-mi2s.0",
+ "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
"msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
@@ -679,8 +687,9 @@
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36896",
"msm-dai-q6-tdm.36897", "msm-dai-q6-tdm.36912",
"msm-dai-q6-tdm.36913";
- asoc-codec = <&stub_codec>;
- asoc-codec-names = "msm-stub-codec.1";
+ asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
+ asoc-codec-names = "msm-stub-codec.1",
+ "msm-ext-disp-audio-codec-rx";
qcom,wsa-max-devs = <2>;
qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_212>,
<&wsa881x_213>, <&wsa881x_214>;
@@ -694,6 +703,7 @@
qcom,wcn-btfm;
qcom,mi2s-audio-intf;
qcom,auxpcm-audio-intf;
+ qcom,ext-disp-audio-rx;
qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
qcom,audio-routing =
"AIF4 VI", "MCLK",
@@ -728,6 +738,8 @@
qcom,hph-en0-gpio = <&tavil_hph_en0>;
qcom,hph-en1-gpio = <&tavil_hph_en1>;
qcom,msm-mclk-freq = <9600000>;
+ qcom,usbc-analog-en1_gpio = <&wcd_usbc_analog_en1_gpio>;
+ qcom,usbc-analog-en2_n_gpio = <&wcd_usbc_analog_en2n_gpio>;
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
@@ -739,7 +751,8 @@
"msm-pcm-afe", "msm-lsm-client",
"msm-pcm-routing", "msm-cpe-lsm",
"msm-compr-dsp", "msm-pcm-dsp-noirq";
- asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+ asoc-cpu = <&dai_dp>, <&dai_mi2s0>,
+ <&dai_mi2s1>,
<&dai_mi2s2>, <&dai_mi2s3>,
<&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
<&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
@@ -756,7 +769,8 @@
<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
- asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ asoc-cpu-names = "msm-dai-q6-dp.24608", "msm-dai-q6-mi2s.0",
+ "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
"msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
@@ -778,8 +792,9 @@
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36896",
"msm-dai-q6-tdm.36897", "msm-dai-q6-tdm.36912",
"msm-dai-q6-tdm.36913";
- asoc-codec = <&stub_codec>;
- asoc-codec-names = "msm-stub-codec.1";
+ asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
+ asoc-codec-names = "msm-stub-codec.1",
+ "msm-ext-disp-audio-codec-rx";
qcom,wsa-max-devs = <2>;
qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
<&wsa881x_0213>, <&wsa881x_0214>;
@@ -794,6 +809,7 @@
qcom,wcn-btfm;
qcom,mi2s-audio-intf;
qcom,auxpcm-audio-intf;
+ qcom,ext-disp-audio-rx;
qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
qcom,msm-mclk-freq = <9600000>;
qcom,msm-mbhc-hphl-swh = <1>;
@@ -823,7 +839,13 @@
"DMIC4", "MIC BIAS External",
"MIC BIAS External", "Digital Mic4",
"SpkrLeft IN", "SPK1 OUT",
- "SpkrRight IN", "SPK2 OUT";
+ "SpkrRight IN", "SPK2 OUT",
+ "PDM_IN_RX1", "PDM_OUT_RX1",
+ "PDM_IN_RX2", "PDM_OUT_RX2",
+ "PDM_IN_RX3", "PDM_OUT_RX3",
+ "ADC1_IN", "ADC1_OUT",
+ "ADC2_IN", "ADC2_OUT",
+ "ADC3_IN", "ADC3_OUT";
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
@@ -836,7 +858,8 @@
"msm-pcm-afe", "msm-lsm-client",
"msm-pcm-routing", "msm-compr-dsp",
"msm-pcm-dsp-noirq";
- asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+ asoc-cpu = <&dai_dp>, <&dai_mi2s0>,
+ <&dai_mi2s1>,
<&dai_mi2s2>, <&dai_mi2s3>,
<&dai_int_mi2s0>, <&dai_int_mi2s1>,
<&dai_int_mi2s2>, <&dai_int_mi2s3>,
@@ -853,7 +876,8 @@
<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
- asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ asoc-cpu-names = "msm-dai-q6-dp.24608", "msm-dai-q6-mi2s.0",
+ "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
"msm-dai-q6-mi2s.7", "msm-dai-q6-mi2s.8",
"msm-dai-q6-mi2s.9", "msm-dai-q6-mi2s.10",
@@ -872,9 +896,11 @@
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
asoc-codec = <&stub_codec>, <&msm_digital_codec>,
- <&pmic_analog_codec>, <&msm_sdw_codec>;
+ <&pmic_analog_codec>, <&msm_sdw_codec>,
+ <&ext_disp_audio_codec>;
asoc-codec-names = "msm-stub-codec.1", "msm-dig-codec",
- "analog-codec", "msm_sdw_codec";
+ "analog-codec", "msm_sdw_codec",
+ "msm-ext-disp-audio-codec-rx";
qcom,wsa-max-devs = <2>;
qcom,wsa-devs = <&wsa881x_211_en>, <&wsa881x_212_en>,
diff --git a/arch/arm/boot/dts/qcom/msm-pm660l.dtsi b/arch/arm/boot/dts/qcom/msm-pm660l.dtsi
index cdb3662a1a56..20016c7ac6ec 100644
--- a/arch/arm/boot/dts/qcom/msm-pm660l.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pm660l.dtsi
@@ -389,6 +389,7 @@
#address-cells = <1>;
#size-cells = <1>;
reg = <0xec00 0x100>;
+ qcom,force-module-reenable;
lcdb_ldo_vreg: ldo {
label = "ldo";
diff --git a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
index 8edc1fc61830..ea4f05069aab 100644
--- a/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-smb138x.dtsi
@@ -95,18 +95,22 @@
#address-cells = <1>;
#size-cells = <1>;
interrupt-parent = <&smb138x>;
- io-channels = <&smb138x_tadc 2>,
- <&smb138x_tadc 3>,
- <&smb138x_tadc 14>,
- <&smb138x_tadc 15>,
- <&smb138x_tadc 16>,
- <&smb138x_tadc 17>;
- io-channel-names = "charger_temp",
- "batt_i",
- "connector_temp_thr1",
- "connector_temp_thr2",
- "connector_temp_thr3",
- "charger_temp_max";
+ io-channels =
+ <&smb138x_tadc 1>,
+ <&smb138x_tadc 2>,
+ <&smb138x_tadc 3>,
+ <&smb138x_tadc 14>,
+ <&smb138x_tadc 15>,
+ <&smb138x_tadc 16>,
+ <&smb138x_tadc 17>;
+ io-channel-names =
+ "connector_temp",
+ "charger_temp",
+ "batt_i",
+ "connector_temp_thr1",
+ "connector_temp_thr2",
+ "connector_temp_thr3",
+ "charger_temp_max";
qcom,chgr@1000 {
reg = <0x1000 0x100>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-cdp.dtsi
index 2cb08e1709a5..e7a61f42dff1 100644
--- a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-cdp.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -78,6 +78,26 @@
status = "disabled";
};
+ tof0:qcom,tof@0{
+ cell-index = <0>;
+ reg = <0x29>;
+ compatible = "st,stmvl53l0";
+ qcom,cci-master = <0>;
+ cam_cci-supply = <&pm8998_lvs1>;
+ cam_laser-supply = <&pmi8998_bob>;
+ qcom,cam-vreg-name = "cam_cci", "cam_laser";
+ qcom,cam-vreg-min-voltage = <0 0>;
+ qcom,cam-vreg-max-voltage = <0 3600000>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_tof_active>;
+ pinctrl-1 = <&cam_tof_suspend>;
+ gpios = <&tlmm 27 0>,
+ <&tlmm 126 0>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <0 0>;
+ qcom,gpio-req-tbl-label = "CAM_TOF", "CAM_CE";
+ };
+
eeprom0: qcom,eeprom@0 {
cell-index = <0>;
reg = <0>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi
index 0a41383ba874..c5384eaf17a1 100644
--- a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -61,7 +61,24 @@
pinctrl-0 = <&cam_actuator_vaf_active>;
pinctrl-1 = <&cam_actuator_vaf_suspend>;
};
-
+ tof0:qcom,tof@0{
+ cell-index = <0>;
+ reg = <0x29>;
+ compatible = "st,stmvl53l0";
+ qcom,cci-master = <0>;
+ cam_cci-supply = <&pm8998_lvs1>;
+ cam_laser-supply = <&pmi8998_bob>;
+ qcom,cam-vreg-name = "cam_cci", "cam_laser";
+ qcom,cam-vreg-min-voltage = <0 0>;
+ qcom,cam-vreg-max-voltage = <0 3600000>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_tof_active>;
+ pinctrl-1 = <&cam_tof_suspend>;
+ gpios = <&tlmm 27 0>, <&tlmm 126 0>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <0 0>;
+ qcom,gpio-req-tbl-label = "CAM_TOF", "CAM_CE";
+ };
ois0: qcom,ois@0 {
cell-index = <0>;
reg = <0x0>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi
new file mode 100644
index 000000000000..4b3748e5a40a
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8998-camera-sensor-skuk-evt3.dtsi"
+
+&tlmm{
+ cam_sensor_mclk1_active: cam_sensor_mclk1_active {
+ /* MCLK1 */
+ mux {
+ /* CLK, DATA */
+ pins = "gpio14";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio14";
+ bias-disable; /* No PULL */
+ drive-strength = <8>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk2_active: cam_sensor_mclk2_active {
+ /* MCLK1 */
+ mux {
+ /* CLK, DATA */
+ pins = "gpio15";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio15";
+ bias-disable; /* No PULL */
+ drive-strength = <8>; /* 2 MA */
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-interposer-pm660.dtsi b/arch/arm/boot/dts/qcom/msm8998-interposer-pm660.dtsi
index c9522154ae7d..f4ff7c401a98 100644
--- a/arch/arm/boot/dts/qcom/msm8998-interposer-pm660.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-interposer-pm660.dtsi
@@ -55,6 +55,13 @@
/delete-property/gpios;
};
+&tof0 {
+ /delete-property/cam_cci-supply;
+ /delete-property/cam_laser-supply;
+ /delete-property/gpios;
+};
+
+
&cci {
/delete-node/qcom,camera@1;
/delete-node/qcom,camera@2;
diff --git a/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
index 19b227f1b60f..4914363b414a 100644
--- a/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
@@ -954,6 +954,32 @@
};
};
+ cam_tof_active: cam_tof_active {
+ mux {
+ pins = "gpio27", "gpio126";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio27", "gpio126";
+ bias-disable;
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_tof_suspend: cam_tof_suspend {
+ mux {
+ pins = "gpio27", "gpio126";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio27", "gpio126";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
cam_sensor_mclk0_active: cam_sensor_mclk0_active {
/* MCLK0 */
mux {
diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi b/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi
index 3c1c49edcc82..aa95872da385 100644
--- a/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi
@@ -156,6 +156,25 @@
};
};
+&i2c_5 {
+ status = "okay";
+ st_fts@49 {
+ compatible = "st,fts";
+ reg = <0x49>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <125 0x2008>;
+ vdd-supply = <&pm8998_l6>;
+ avdd-supply = <&pm8998_l28>;
+ pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+ pinctrl-0 = <&ts_active>;
+ pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+ st,irq-gpio = <&tlmm 125 0x2008>;
+ st,reset-gpio = <&tlmm 89 0x00>;
+ st,regulator_dvdd = "vdd";
+ st,regulator_avdd = "avdd";
+ };
+};
+
&soc {
gpio_keys {
compatible = "gpio-keys";
@@ -448,3 +467,7 @@
qcom,mdss-dsi-bl-max-level = <255>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
+
+&wil6210 {
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-sde-display.dtsi b/arch/arm/boot/dts/qcom/msm8998-sde-display.dtsi
new file mode 100644
index 000000000000..6098a96db206
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-sde-display.dtsi
@@ -0,0 +1,41 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+
+ sde_wb: qcom,wb-display@0 {
+ compatible = "qcom,wb-display";
+ cell-index = <0>;
+ label = "wb_display";
+ };
+
+ msm_ext_disp: qcom,msm_ext_disp {
+ compatible = "qcom,msm-ext-disp";
+
+ ext_disp_audio_codec: qcom,msm-ext-disp-audio-codec-rx {
+ compatible = "qcom,msm-ext-disp-audio-codec-rx";
+ qcom,msm_ext_disp = <&msm_ext_disp>;
+ };
+ };
+
+ sde_hdmi: qcom,hdmi-display {
+ compatible = "qcom,hdmi-display";
+ label = "sde_hdmi";
+ qcom,display-type = "secondary";
+ qcom,msm_ext_disp = <&msm_ext_disp>;
+ };
+
+};
+
+&sde_kms {
+ connectors = <&sde_hdmi_tx &sde_hdmi &sde_wb>;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-sde.dtsi b/arch/arm/boot/dts/qcom/msm8998-sde.dtsi
new file mode 100644
index 000000000000..ec70d0367160
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-sde.dtsi
@@ -0,0 +1,215 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ sde_kms: qcom,sde_kms@c900000 {
+ compatible = "qcom,sde-kms";
+ reg = <0x0c900000 0x90000>,
+ <0x0c9b0000 0x1040>;
+ reg-names = "mdp_phys", "vbif_phys";
+
+ /* clock and supply entries */
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+ <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+ <&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+ <&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+ <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_axi_clk>,
+ <&clock_mmss clk_mdp_clk_src>,
+ <&clock_mmss clk_mmss_mdss_mdp_clk>,
+ <&clock_mmss clk_mmss_mdss_vsync_clk>,
+ <&clock_mmss clk_mmss_mdss_mdp_lut_clk>;
+ clock-names = "mmss_noc_axi_clk",
+ "mmss_noc_ahb_clk",
+ "mmss_smmu_ahb_clk",
+ "mmss_smmu_axi_clk",
+ "mnoc_clk", "iface_clk", "bus_clk",
+ "core_clk_src", "core_clk", "vsync_clk",
+ "lut_clk";
+ clock-rate = <0 0 0 0 0 0 0 330000000 0 0 0 0>;
+ clock-max-rate = <0 0 0 0 0 0 412500000 412500000 0 0 0 0>;
+ qcom,sde-max-bw-low-kbps = <6700000>;
+ qcom,sde-max-bw-high-kbps = <6700000>;
+
+ /* interrupt config */
+ interrupt-parent = <&intc>;
+ interrupts = <0 83 0>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ iommus = <&mmss_smmu 0>;
+
+ /* hw blocks */
+ qcom,sde-off = <0x1000>;
+ qcom,sde-ctl-off = <0x2000 0x2200 0x2400
+ 0x2600 0x2800>;
+ qcom,sde-mixer-off = <0x45000 0x46000 0x47000
+ 0x48000 0x49000 0x4a000>;
+ qcom,sde-dspp-off = <0x55000 0x57000>;
+ qcom,sde-wb-off = <0x66000>;
+ qcom,sde-wb-id = <2>;
+ qcom,sde-wb-xin-id = <6>;
+ qcom,sde-wb-clk-ctrl = <0x2bc 0x10>;
+ qcom,sde-intf-off = <0x6b000 0x6b800
+ 0x6c000 0x6c800>;
+ qcom,sde-intf-type = "dp", "dsi", "dsi", "hdmi";
+ qcom,sde-pp-off = <0x71000 0x71800
+ 0x72000 0x72800>;
+ qcom,sde-te2-off = <0x2000 0x2000 0x0 0x0>;
+ qcom,sde-cdm-off = <0x7a200>;
+ qcom,sde-dsc-off = <0x10000 0x10000 0x0 0x0>;
+ qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
+
+ qcom,sde-sspp-type = "vig", "vig", "vig", "vig",
+ "dma", "dma", "dma", "dma",
+ "cursor", "cursor";
+
+ qcom,sde-sspp-off = <0x5000 0x7000 0x9000 0xb000
+ 0x25000 0x27000 0x29000 0x2b000
+ 0x35000 0x37000>;
+
+ qcom,sde-sspp-xin-id = <0 4 8 12 1 5 9 13 2 10>;
+
+ /* offsets are relative to "mdp_phys + qcom,sde-off */
+ qcom,sde-sspp-clk-ctrl = <0x2ac 0x8>, <0x2b4 0x8>,
+ <0x2c4 0x8>, <0x2c4 0xc>, <0x3a8 0x10>,
+ <0x3b0 0x10>;
+
+ qcom,sde-qseed-type = "qseedv3";
+ qcom,sde-mixer-linewidth = <2560>;
+ qcom,sde-sspp-linewidth = <2560>;
+ qcom,sde-mixer-blendstages = <0x7>;
+ qcom,sde-highest-bank-bit = <0x2>;
+ qcom,sde-panic-per-pipe;
+ qcom,sde-has-cdp;
+ qcom,sde-has-src-split;
+ qcom,sde-sspp-danger-lut = <0x000f 0xffff 0x0000>;
+ qcom,sde-sspp-safe-lut = <0xfffc 0xff00 0xffff>;
+
+ qcom,sde-vbif-off = <0>;
+ qcom,sde-vbif-id = <0>;
+ qcom,sde-vbif-default-ot-rd-limit = <32>;
+ qcom,sde-vbif-default-ot-wr-limit = <32>;
+ qcom,sde-vbif-dynamic-ot-rd-limit = <62208000 2>,
+ <124416000 4>, <248832000 16>;
+ qcom,sde-vbif-dynamic-ot-wr-limit = <62208000 2>,
+ <124416000 4>, <248832000 16>;
+
+ vdd-supply = <&gdsc_mdss>;
+ gdsc-mmagic-mdss-supply = <&gdsc_bimc_smmu>;
+ qcom,sde-csc-type = "csc-10bit";
+
+ qcom,sde-sspp-vig-blocks {
+ qcom,sde-vig-csc-off = <0x1a00>;
+ qcom,sde-vig-qseed-off = <0xa00>;
+ };
+
+ qcom,platform-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,platform-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc-mmagic-mdss";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+
+ qcom,platform-supply-entry@1 {
+ reg = <1>;
+ qcom,supply-name = "vdd";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+
+ smmu_kms_unsec: qcom,smmu_kms_unsec_cb {
+ compatible = "qcom,smmu_mdp_unsec";
+ iommus = <&mmss_smmu 0>;
+ };
+
+ /* data and reg bus scale settings */
+ qcom,sde-data-bus {
+ qcom,msm-bus,name = "mdss_sde";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>, <23 512 0 0>,
+ <22 512 0 6400000>, <23 512 0 6400000>,
+ <22 512 0 6400000>, <23 512 0 6400000>;
+ };
+ qcom,sde-reg-bus {
+ qcom,msm-bus,name = "mdss_reg";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>,
+ <1 590 0 160000>,
+ <1 590 0 320000>;
+ };
+ };
+
+ sde_hdmi_tx: qcom,hdmi_tx_8998@c9a0000 {
+ cell-index = <0>;
+ compatible = "qcom,hdmi-tx-8998";
+ reg = <0xc9a0000 0x50c>,
+ <0x780000 0x621c>,
+ <0xc9e0000 0x28>;
+ reg-names = "core_physical", "qfprom_physical", "hdcp_physical";
+ interrupt-parent = <&sde_kms>;
+ interrupts = <8 0>;
+ qcom,hdmi-tx-ddc-clk-gpio = <&tlmm 32 0>;
+ qcom,hdmi-tx-ddc-data-gpio = <&tlmm 33 0>;
+ qcom,hdmi-tx-hpd-gpio = <&tlmm 34 0>;
+ qcom,hdmi-tx-hpd5v-gpio = <&tlmm 133 0>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&mdss_hdmi_hpd_active
+ &mdss_hdmi_ddc_active
+ &mdss_hdmi_cec_active
+ &mdss_hdmi_5v_active>;
+ pinctrl-1 = <&mdss_hdmi_hpd_suspend
+ &mdss_hdmi_ddc_suspend
+ &mdss_hdmi_cec_suspend
+ &mdss_hdmi_5v_suspend>;
+ hpd-gdsc-supply = <&gdsc_mdss>;
+ qcom,supply-names = "hpd-gdsc";
+ qcom,min-voltage-level = <0>;
+ qcom,max-voltage-level = <0>;
+ qcom,enable-load = <0>;
+ qcom,disable-load = <0>;
+
+ clocks = <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_hdmi_clk>,
+ <&clock_mmss clk_mmss_mdss_mdp_clk>,
+ <&clock_mmss clk_mmss_mdss_hdmi_dp_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_extpclk_clk>,
+ <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+ <&clock_mmss clk_mmss_misc_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_axi_clk>;
+ clock-names = "hpd_mnoc_clk", "hpd_iface_clk",
+ "hpd_core_clk", "hpd_mdp_core_clk",
+ "hpd_alt_iface_clk", "core_extp_clk",
+ "mnoc_clk","hpd_misc_ahb_clk",
+ "hpd_bus_clk";
+
+ /*qcom,mdss-fb-map = <&mdss_fb2>;*/
+ qcom,pluggable;
+ };
+};
+#include "msm8998-sde-display.dtsi"
diff --git a/arch/arm/boot/dts/qcom/msm8998-v2-qrd-skuk-hdk.dts b/arch/arm/boot/dts/qcom/msm8998-v2-qrd-skuk-hdk.dts
index ace7fc169703..73debc374fda 100644
--- a/arch/arm/boot/dts/qcom/msm8998-v2-qrd-skuk-hdk.dts
+++ b/arch/arm/boot/dts/qcom/msm8998-v2-qrd-skuk-hdk.dts
@@ -15,7 +15,7 @@
#include "msm8998-v2.dtsi"
#include "msm8998-qrd-skuk.dtsi"
-#include "msm8998-camera-sensor-skuk.dtsi"
+#include "msm8998-camera-sensor-skuk-hdk.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM 8998 SKUK HDK";
@@ -23,6 +23,12 @@
qcom,board-id = <0x06000b 0x10>;
};
+&soc {
+ sound-tavil {
+ qcom,msm-mbhc-hphl-swh = <0>;
+ };
+};
+
&pmx_mdss {
mdss_dsi_active: mdss_dsi_active {
mux {
@@ -114,3 +120,29 @@
qcom,mdss-dsi-bl-max-level = <255>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
+
+&i2c_5 {
+ status = "okay";
+ st_fts@49 {
+ compatible = "st,fts";
+ reg = <0x49>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <125 0x2008>;
+ vdd-supply = <&pm8998_l6>;
+ avdd-supply = <&pm8998_l28>;
+ pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+ pinctrl-0 = <&ts_active>;
+ pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+ st,irq-gpio = <&tlmm 125 0x2008>;
+ st,reset-gpio = <&tlmm 89 0x00>;
+ st,regulator_dvdd = "vdd";
+ st,regulator_avdd = "avdd";
+ };
+};
+
+&soc {
+ /* HDK835 do not use improveTouch. If do not remove this node,
+ * legacy TOUCH could not work.
+ */
+ /delete-node/hbtp;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-v2.dtsi b/arch/arm/boot/dts/qcom/msm8998-v2.dtsi
index b6ddd549efe5..348faf9b69c3 100644
--- a/arch/arm/boot/dts/qcom/msm8998-v2.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-v2.dtsi
@@ -366,7 +366,7 @@
&pm8998_s13 {
regulator-min-microvolt = <568000>;
- regulator-max-microvolt = <1056000>;
+ regulator-max-microvolt = <1136000>;
};
&pcie0 {
diff --git a/arch/arm/boot/dts/qcom/msm8998-vidc.dtsi b/arch/arm/boot/dts/qcom/msm8998-vidc.dtsi
index 3e7cacac9d43..05a8816677ed 100644
--- a/arch/arm/boot/dts/qcom/msm8998-vidc.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-vidc.dtsi
@@ -117,7 +117,7 @@
qcom,bus-master = <MSM_BUS_MASTER_VIDEO_P0>;
qcom,bus-slave = <MSM_BUS_SLAVE_EBI_CH0>;
qcom,bus-governor = "msm-vidc-ddr";
- qcom,bus-range-kbps = <1000 3388000>;
+ qcom,bus-range-kbps = <1000 4946000>;
};
venus_bus_vmem {
diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi
index 697b049235bd..f33b8bc2a8a8 100644
--- a/arch/arm/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998.dtsi
@@ -1342,6 +1342,18 @@
qcom,irq-mask = <0x200>;
interrupts = <0 157 1>;
label = "lpass";
+ qcom,qos-config = <&glink_qos_adsp>;
+ qcom,ramp-time = <0xaf>;
+ };
+
+ glink_qos_adsp: qcom,glink-qos-config-adsp {
+ compatible = "qcom,glink-qos-config";
+ qcom,flow-info = <0x3c 0x0>,
+ <0x3c 0x0>,
+ <0x3c 0x0>,
+ <0x3c 0x0>;
+ qcom,mtu-size = <0x800>;
+ qcom,tput-stats-cycle = <0xa>;
};
qcom,glink-smem-native-xprt-dsps@86000000 {
@@ -1750,6 +1762,7 @@
tx-fifo-resize;
snps,nominal-elastic-buffer;
snps,disable-clk-gating;
+ snps,has-lpm-erratum;
snps,hird-threshold = /bits/ 8 <0x10>;
snps,num-gsi-evt-buffs = <0x3>;
};
@@ -3285,3 +3298,4 @@
#include "msm8998-blsp.dtsi"
#include "msm8998-audio.dtsi"
#include "msm-smb138x.dtsi"
+#include "msm8998-sde.dtsi"
diff --git a/arch/arm/boot/dts/qcom/sdm630-camera-sensor-cdp.dtsi b/arch/arm/boot/dts/qcom/sdm630-camera-sensor-cdp.dtsi
new file mode 100644
index 000000000000..94158834eee6
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdm630-camera-sensor-cdp.dtsi
@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ led_flash0: qcom,camera-flash@0 {
+ cell-index = <0>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pm660l_flash0 &pm660l_flash1>;
+ qcom,torch-source = <&pm660l_torch0 &pm660l_torch1>;
+ qcom,switch-source = <&pm660l_switch0>;
+ status = "ok";
+ };
+
+ led_flash1: qcom,camera-flash@1 {
+ cell-index = <1>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pm660l_flash2>;
+ qcom,torch-source = <&pm660l_torch2>;
+ qcom,switch-source = <&pm660l_switch1>;
+ status = "ok";
+ };
+};
+
+&cci {
+ actuator0: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ cam_vaf-supply = <&pm660l_l8>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <3400000>;
+ qcom,cam-vreg-op-mode = <100000>;
+ };
+
+ actuator1: qcom,actuator@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <1>;
+ cam_vaf-supply = <&pm660l_l8>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <3400000>;
+ qcom,cam-vreg-op-mode = <100000>;
+ };
+
+ actuator2: qcom,actuator@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <1>;
+ cam_vaf-supply = <&pm660l_l8>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <3400000>;
+ qcom,cam-vreg-op-mode = <100000>;
+ };
+
+ ois0: qcom,ois@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,ois";
+ qcom,cci-master = <0>;
+ gpios = <&tlmm 50 0>;
+ qcom,gpio-vaf = <0>;
+ qcom,gpio-req-tbl-num = <0>;
+ qcom,gpio-req-tbl-flags = <0>;
+ qcom,gpio-req-tbl-label = "CAM_VAF";
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_actuator_vaf_active>;
+ pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ status = "disabled";
+ };
+
+ eeprom0: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm660_l11>;
+ cam_vana-supply = <&pm660l_bob>;
+ cam_vdig-supply = <&pm660_s5>;
+ cam_vaf-supply = <&pm660l_l8>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_vaf";
+ qcom,cam-vreg-min-voltage = <1780000 3300000 1350000 2800000>;
+ qcom,cam-vreg-max-voltage = <1950000 3600000 1350000 3400000>;
+ qcom,cam-vreg-op-mode = <105000 80000 105000 100000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 32 0>,
+ <&tlmm 46 0>,
+ <&pm660l_gpios 4 0>,
+ <&tlmm 51 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VDIG",
+ "CAM_VANA";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_mmss MCLK0_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_MCLK0_CLK>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ eeprom1: qcom,eeprom@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm660_l11>;
+ cam_vana-supply = <&pm660l_bob>;
+ cam_vdig-supply = <&pm660_s5>;
+ cam_vaf-supply = <&pm660l_l8>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_vaf";
+ qcom,cam-vreg-min-voltage = <1780000 3300000 1350000 2800000>;
+ qcom,cam-vreg-max-voltage = <1950000 3600000 1350000 3400000>;
+ qcom,cam-vreg-op-mode = <105000 80000 105000 100000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 34 0>,
+ <&tlmm 48 0>,
+ <&pm660l_gpios 3 0>,
+ <&tlmm 51 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK",
+ "CAM_RESET",
+ "CAM_VDIG",
+ "CAM_VANA";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss MCLK2_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_MCLK2_CLK>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ eeprom2: qcom,eeprom@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm660_l11>;
+ cam_vana-supply = <&pm660l_bob>;
+ cam_vdig-supply = <&pm660_s5>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_vaf";
+ qcom,cam-vreg-min-voltage = <1780000 3300000 1350000 2800000>;
+ qcom,cam-vreg-max-voltage = <1950000 3600000 1350000 3400000>;
+ qcom,cam-vreg-op-mode = <105000 80000 105000 100000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 33 0>,
+ <&tlmm 47 0>,
+ <&pm660_gpios 3 0>,
+ <&tlmm 44 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VDIG",
+ "CAM_VANA";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss MCLK1_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_MCLK1_CLK>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@0 {
+ cell-index = <0>;
+ compatible = "qcom,camera";
+ reg = <0x0>;
+ qcom,csiphy-sd-index = <0>;
+ qcom,csid-sd-index = <0>;
+ qcom,mount-angle = <270>;
+ qcom,led-flash-src = <&led_flash0>;
+ qcom,actuator-src = <&actuator0>;
+ qcom,ois-src = <&ois0>;
+ qcom,eeprom-src = <&eeprom0>;
+ cam_vio-supply = <&pm660_l11>;
+ cam_vana-supply = <&pm660l_bob>;
+ cam_vdig-supply = <&pm660_s5>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage = <1780000 3300000 1350000>;
+ qcom,cam-vreg-max-voltage = <1950000 3600000 1350000>;
+ qcom,cam-vreg-op-mode = <105000 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 32 0>,
+ <&tlmm 46 0>,
+ <&pm660l_gpios 4 0>,
+ <&tlmm 51 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VDIG",
+ "CAM_VANA";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_mmss MCLK0_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_MCLK0_CLK>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@1 {
+ cell-index = <1>;
+ compatible = "qcom,camera";
+ reg = <0x1>;
+ qcom,csiphy-sd-index = <1>;
+ qcom,csid-sd-index = <2>;
+ qcom,mount-angle = <90>;
+ qcom,actuator-src = <&actuator1>;
+ qcom,eeprom-src = <&eeprom1>;
+ cam_vio-supply = <&pm660_l11>;
+ cam_vana-supply = <&pm660l_bob>;
+ cam_vdig-supply = <&pm660_s5>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage = <1780000 3300000 1350000>;
+ qcom,cam-vreg-max-voltage = <1950000 3600000 1350000>;
+ qcom,cam-vreg-op-mode = <105000 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 34 0>,
+ <&tlmm 48 0>,
+ <&pm660l_gpios 3 0>,
+ <&tlmm 51 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK",
+ "CAM_RESET",
+ "CAM_VDIG",
+ "CAM_VANA";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss MCLK2_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_MCLK2_CLK>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@2 {
+ cell-index = <2>;
+ compatible = "qcom,camera";
+ reg = <0x02>;
+ qcom,csiphy-sd-index = <2>;
+ qcom,csid-sd-index = <2>;
+ qcom,mount-angle = <90>;
+ qcom,actuator-src = <&actuator2>;
+ qcom,eeprom-src = <&eeprom2>;
+ cam_vio-supply = <&pm660_l11>;
+ cam_vana-supply = <&pm660l_bob>;
+ cam_vdig-supply = <&pm660_s5>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage = <1780000 3300000 1350000>;
+ qcom,cam-vreg-max-voltage = <1950000 3600000 1350000>;
+ qcom,cam-vreg-op-mode = <105000 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 33 0>,
+ <&tlmm 47 0>,
+ <&pm660l_gpios 3 0>,
+ <&tlmm 51 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VDIG",
+ "CAM_VANA";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss MCLK1_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_MCLK1_CLK>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+};
+
+&pm660l_gpios {
+ gpio@c300 { /* GPIO4 -CAMERA SENSOR 0 VDIG*/
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <0>; /* VIN1 GPIO_LV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "ok";
+ };
+
+ gpio@c200 { /* GPIO3 -CAMERA SENSOR 2 VDIG*/
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <0>; /* VIN1 GPIO_LV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "ok";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdm630-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm630-camera-sensor-mtp.dtsi
new file mode 100644
index 000000000000..94158834eee6
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdm630-camera-sensor-mtp.dtsi
@@ -0,0 +1,370 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ led_flash0: qcom,camera-flash@0 {
+ cell-index = <0>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pm660l_flash0 &pm660l_flash1>;
+ qcom,torch-source = <&pm660l_torch0 &pm660l_torch1>;
+ qcom,switch-source = <&pm660l_switch0>;
+ status = "ok";
+ };
+
+ led_flash1: qcom,camera-flash@1 {
+ cell-index = <1>;
+ compatible = "qcom,camera-flash";
+ qcom,flash-source = <&pm660l_flash2>;
+ qcom,torch-source = <&pm660l_torch2>;
+ qcom,switch-source = <&pm660l_switch1>;
+ status = "ok";
+ };
+};
+
+&cci {
+ actuator0: qcom,actuator@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <0>;
+ cam_vaf-supply = <&pm660l_l8>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <3400000>;
+ qcom,cam-vreg-op-mode = <100000>;
+ };
+
+ actuator1: qcom,actuator@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <1>;
+ cam_vaf-supply = <&pm660l_l8>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <3400000>;
+ qcom,cam-vreg-op-mode = <100000>;
+ };
+
+ actuator2: qcom,actuator@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,actuator";
+ qcom,cci-master = <1>;
+ cam_vaf-supply = <&pm660l_l8>;
+ qcom,cam-vreg-name = "cam_vaf";
+ qcom,cam-vreg-min-voltage = <2800000>;
+ qcom,cam-vreg-max-voltage = <3400000>;
+ qcom,cam-vreg-op-mode = <100000>;
+ };
+
+ ois0: qcom,ois@0 {
+ cell-index = <0>;
+ reg = <0x0>;
+ compatible = "qcom,ois";
+ qcom,cci-master = <0>;
+ gpios = <&tlmm 50 0>;
+ qcom,gpio-vaf = <0>;
+ qcom,gpio-req-tbl-num = <0>;
+ qcom,gpio-req-tbl-flags = <0>;
+ qcom,gpio-req-tbl-label = "CAM_VAF";
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_actuator_vaf_active>;
+ pinctrl-1 = <&cam_actuator_vaf_suspend>;
+ status = "disabled";
+ };
+
+ eeprom0: qcom,eeprom@0 {
+ cell-index = <0>;
+ reg = <0>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm660_l11>;
+ cam_vana-supply = <&pm660l_bob>;
+ cam_vdig-supply = <&pm660_s5>;
+ cam_vaf-supply = <&pm660l_l8>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_vaf";
+ qcom,cam-vreg-min-voltage = <1780000 3300000 1350000 2800000>;
+ qcom,cam-vreg-max-voltage = <1950000 3600000 1350000 3400000>;
+ qcom,cam-vreg-op-mode = <105000 80000 105000 100000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 32 0>,
+ <&tlmm 46 0>,
+ <&pm660l_gpios 4 0>,
+ <&tlmm 51 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VDIG",
+ "CAM_VANA";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_mmss MCLK0_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_MCLK0_CLK>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ eeprom1: qcom,eeprom@1 {
+ cell-index = <1>;
+ reg = <0x1>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm660_l11>;
+ cam_vana-supply = <&pm660l_bob>;
+ cam_vdig-supply = <&pm660_s5>;
+ cam_vaf-supply = <&pm660l_l8>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_vaf";
+ qcom,cam-vreg-min-voltage = <1780000 3300000 1350000 2800000>;
+ qcom,cam-vreg-max-voltage = <1950000 3600000 1350000 3400000>;
+ qcom,cam-vreg-op-mode = <105000 80000 105000 100000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 34 0>,
+ <&tlmm 48 0>,
+ <&pm660l_gpios 3 0>,
+ <&tlmm 51 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK",
+ "CAM_RESET",
+ "CAM_VDIG",
+ "CAM_VANA";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss MCLK2_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_MCLK2_CLK>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ eeprom2: qcom,eeprom@2 {
+ cell-index = <2>;
+ reg = <0x2>;
+ compatible = "qcom,eeprom";
+ cam_vio-supply = <&pm660_l11>;
+ cam_vana-supply = <&pm660l_bob>;
+ cam_vdig-supply = <&pm660_s5>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig",
+ "cam_vaf";
+ qcom,cam-vreg-min-voltage = <1780000 3300000 1350000 2800000>;
+ qcom,cam-vreg-max-voltage = <1950000 3600000 1350000 3400000>;
+ qcom,cam-vreg-op-mode = <105000 80000 105000 100000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 33 0>,
+ <&tlmm 47 0>,
+ <&pm660_gpios 3 0>,
+ <&tlmm 44 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VDIG",
+ "CAM_VANA";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss MCLK1_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_MCLK1_CLK>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@0 {
+ cell-index = <0>;
+ compatible = "qcom,camera";
+ reg = <0x0>;
+ qcom,csiphy-sd-index = <0>;
+ qcom,csid-sd-index = <0>;
+ qcom,mount-angle = <270>;
+ qcom,led-flash-src = <&led_flash0>;
+ qcom,actuator-src = <&actuator0>;
+ qcom,ois-src = <&ois0>;
+ qcom,eeprom-src = <&eeprom0>;
+ cam_vio-supply = <&pm660_l11>;
+ cam_vana-supply = <&pm660l_bob>;
+ cam_vdig-supply = <&pm660_s5>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage = <1780000 3300000 1350000>;
+ qcom,cam-vreg-max-voltage = <1950000 3600000 1350000>;
+ qcom,cam-vreg-op-mode = <105000 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk0_active
+ &cam_sensor_rear_active>;
+ pinctrl-1 = <&cam_sensor_mclk0_suspend
+ &cam_sensor_rear_suspend>;
+ gpios = <&tlmm 32 0>,
+ <&tlmm 46 0>,
+ <&pm660l_gpios 4 0>,
+ <&tlmm 51 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK0",
+ "CAM_RESET0",
+ "CAM_VDIG",
+ "CAM_VANA";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <0>;
+ status = "ok";
+ clocks = <&clock_mmss MCLK0_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_MCLK0_CLK>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@1 {
+ cell-index = <1>;
+ compatible = "qcom,camera";
+ reg = <0x1>;
+ qcom,csiphy-sd-index = <1>;
+ qcom,csid-sd-index = <2>;
+ qcom,mount-angle = <90>;
+ qcom,actuator-src = <&actuator1>;
+ qcom,eeprom-src = <&eeprom1>;
+ cam_vio-supply = <&pm660_l11>;
+ cam_vana-supply = <&pm660l_bob>;
+ cam_vdig-supply = <&pm660_s5>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage = <1780000 3300000 1350000>;
+ qcom,cam-vreg-max-voltage = <1950000 3600000 1350000>;
+ qcom,cam-vreg-op-mode = <105000 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk2_active
+ &cam_sensor_rear2_active>;
+ pinctrl-1 = <&cam_sensor_mclk2_suspend
+ &cam_sensor_rear2_suspend>;
+ gpios = <&tlmm 34 0>,
+ <&tlmm 48 0>,
+ <&pm660l_gpios 3 0>,
+ <&tlmm 51 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK",
+ "CAM_RESET",
+ "CAM_VDIG",
+ "CAM_VANA";
+ qcom,sensor-position = <0>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss MCLK2_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_MCLK2_CLK>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+
+ qcom,camera@2 {
+ cell-index = <2>;
+ compatible = "qcom,camera";
+ reg = <0x02>;
+ qcom,csiphy-sd-index = <2>;
+ qcom,csid-sd-index = <2>;
+ qcom,mount-angle = <90>;
+ qcom,actuator-src = <&actuator2>;
+ qcom,eeprom-src = <&eeprom2>;
+ cam_vio-supply = <&pm660_l11>;
+ cam_vana-supply = <&pm660l_bob>;
+ cam_vdig-supply = <&pm660_s5>;
+ qcom,cam-vreg-name = "cam_vio", "cam_vana", "cam_vdig";
+ qcom,cam-vreg-min-voltage = <1780000 3300000 1350000>;
+ qcom,cam-vreg-max-voltage = <1950000 3600000 1350000>;
+ qcom,cam-vreg-op-mode = <105000 80000 105000>;
+ qcom,gpio-no-mux = <0>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_sensor_mclk1_active
+ &cam_sensor_front_active>;
+ pinctrl-1 = <&cam_sensor_mclk1_suspend
+ &cam_sensor_front_suspend>;
+ gpios = <&tlmm 33 0>,
+ <&tlmm 47 0>,
+ <&pm660l_gpios 3 0>,
+ <&tlmm 51 0>;
+ qcom,gpio-reset = <1>;
+ qcom,gpio-vdig = <2>;
+ qcom,gpio-vana = <3>;
+ qcom,gpio-req-tbl-num = <0 1 2 3>;
+ qcom,gpio-req-tbl-flags = <1 0 0 0>;
+ qcom,gpio-req-tbl-label = "CAMIF_MCLK2",
+ "CAM_RESET2",
+ "CAM_VDIG",
+ "CAM_VANA";
+ qcom,sensor-position = <1>;
+ qcom,sensor-mode = <0>;
+ qcom,cci-master = <1>;
+ status = "ok";
+ clocks = <&clock_mmss MCLK1_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_MCLK1_CLK>;
+ clock-names = "cam_src_clk", "cam_clk";
+ qcom,clock-rates = <24000000 0>;
+ };
+};
+
+&pm660l_gpios {
+ gpio@c300 { /* GPIO4 -CAMERA SENSOR 0 VDIG*/
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <0>; /* VIN1 GPIO_LV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "ok";
+ };
+
+ gpio@c200 { /* GPIO3 -CAMERA SENSOR 2 VDIG*/
+ qcom,mode = <1>; /* Output */
+ qcom,pull = <5>; /* No Pull */
+ qcom,vin-sel = <0>; /* VIN1 GPIO_LV */
+ qcom,src-sel = <0>; /* GPIO */
+ qcom,invert = <0>; /* Invert */
+ qcom,master-en = <1>; /* Enable GPIO */
+ status = "ok";
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/sdm630-camera.dtsi b/arch/arm/boot/dts/qcom/sdm630-camera.dtsi
new file mode 100644
index 000000000000..56210dc6892b
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdm630-camera.dtsi
@@ -0,0 +1,865 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ qcom,msm-cam@ca00000 {
+ compatible = "qcom,msm-cam";
+ reg = <0xca00000 0x4000>;
+ reg-names = "msm-cam";
+ status = "ok";
+ bus-vectors = "suspend", "svs", "nominal", "turbo";
+ qcom,bus-votes = <0 150000000 320000000 320000000>;
+ };
+
+ qcom,csiphy@c824000 {
+ cell-index = <0>;
+ compatible = "qcom,csiphy-v3.5", "qcom,csiphy";
+ reg = <0xc824000 0x1000>,
+ <0xca00120 0x4>;
+ reg-names = "csiphy", "csiphy_clk_mux";
+ interrupts = <0 78 0>;
+ interrupt-names = "csiphy";
+ gdscr-supply = <&gdsc_camss_top>;
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>,
+ <&clock_mmss MMSS_CAMSS_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_TOP_AHB_CLK>,
+ <&clock_mmss CSI0_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSI0_CLK>,
+ <&clock_mmss MMSS_CAMSS_CPHY_CSID0_CLK>,
+ <&clock_mmss CSI0PHYTIMER_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSI0PHYTIMER_CLK>,
+ <&clock_mmss MMSS_CAMSS_ISPIF_AHB_CLK>,
+ <&clock_mmss CSIPHY_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSIPHY0_CLK>,
+ <&clock_mmss MMSS_CSIPHY_AHB2CRIF_CLK>;
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
+ "bmic_smmu_ahb", "bmic_smmu_axi",
+ "camss_ahb_clk", "camss_top_ahb_clk",
+ "csi_src_clk", "csi_clk", "cphy_csid_clk",
+ "csiphy_timer_src_clk", "csiphy_timer_clk",
+ "camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk",
+ "csiphy_ahb2crif";
+ qcom,clock-rates = <0 0 0 0 0 0 384000000 0 0 269333333 0
+ 0 384000000 0 0>;
+ status = "ok";
+ };
+
+ qcom,csiphy@c825000 {
+ cell-index = <1>;
+ compatible = "qcom,csiphy-v3.5", "qcom,csiphy";
+ reg = <0xc825000 0x1000>,
+ <0xca00124 0x4>;
+ reg-names = "csiphy", "csiphy_clk_mux";
+ interrupts = <0 79 0>;
+ interrupt-names = "csiphy";
+ gdscr-supply = <&gdsc_camss_top>;
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>,
+ <&clock_mmss MMSS_CAMSS_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_TOP_AHB_CLK>,
+ <&clock_mmss CSI1_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSI1_CLK>,
+ <&clock_mmss MMSS_CAMSS_CPHY_CSID1_CLK>,
+ <&clock_mmss CSI1PHYTIMER_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSI1PHYTIMER_CLK>,
+ <&clock_mmss MMSS_CAMSS_ISPIF_AHB_CLK>,
+ <&clock_mmss CSIPHY_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSIPHY1_CLK>,
+ <&clock_mmss MMSS_CSIPHY_AHB2CRIF_CLK>;
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
+ "bmic_smmu_ahb", "bmic_smmu_axi",
+ "camss_ahb_clk", "camss_top_ahb_clk",
+ "csi_src_clk", "csi_clk", "cphy_csid_clk",
+ "csiphy_timer_src_clk", "csiphy_timer_clk",
+ "camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk",
+ "csiphy_ahb2crif";
+ qcom,clock-rates = <0 0 0 0 0 0 384000000 0 0 269333333 0
+ 0 384000000 0 0>;
+ status = "ok";
+ };
+
+ qcom,csiphy@c826000 {
+ cell-index = <2>;
+ compatible = "qcom,csiphy-v3.5", "qcom,csiphy";
+ reg = <0xc826000 0x1000>,
+ <0xca00128 0x4>;
+ reg-names = "csiphy", "csiphy_clk_mux";
+ interrupts = <0 80 0>;
+ interrupt-names = "csiphy";
+ gdscr-supply = <&gdsc_camss_top>;
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "gdscr", "bimc_smmu";
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>,
+ <&clock_mmss MMSS_CAMSS_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_TOP_AHB_CLK>,
+ <&clock_mmss CSI2_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSI2_CLK>,
+ <&clock_mmss MMSS_CAMSS_CPHY_CSID2_CLK>,
+ <&clock_mmss CSI2PHYTIMER_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSI2PHYTIMER_CLK>,
+ <&clock_mmss MMSS_CAMSS_ISPIF_AHB_CLK>,
+ <&clock_mmss CSIPHY_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSIPHY2_CLK>,
+ <&clock_mmss MMSS_CSIPHY_AHB2CRIF_CLK>;
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
+ "bmic_smmu_ahb", "bmic_smmu_axi",
+ "camss_ahb_clk", "camss_top_ahb_clk",
+ "csi_src_clk", "csi_clk", "cphy_csid_clk",
+ "csiphy_timer_src_clk", "csiphy_timer_clk",
+ "camss_ispif_ahb_clk", "csiphy_clk_src", "csiphy_clk",
+ "csiphy_ahb2crif";
+ qcom,clock-rates = <0 0 0 0 0 0 384000000 0 0 269333333 0
+ 0 384000000 0 0>;
+ status = "ok";
+ };
+
+ qcom,csid@ca30000 {
+ cell-index = <0>;
+ compatible = "qcom,csid-v5.0", "qcom,csid";
+ reg = <0xca30000 0x400>;
+ reg-names = "csid";
+ interrupts = <0 296 0>;
+ interrupt-names = "csid";
+ qcom,csi-vdd-voltage = <1200000>;
+ qcom,mipi-csi-vdd-supply = <&pm660_l1>;
+ gdscr-supply = <&gdsc_camss_top>;
+ vdd_sec-supply = <&pm660l_l1>;
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+ qcom,cam-vreg-min-voltage = <925000 0 0>;
+ qcom,cam-vreg-max-voltage = <925000 0 0>;
+ qcom,cam-vreg-op-mode = <0 0 0>;
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>,
+ <&clock_mmss MMSS_CAMSS_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_TOP_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_ISPIF_AHB_CLK>,
+ <&clock_mmss CSI0_CLK_SRC>,
+ <&clock_mmss CSIPHY_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSI0_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI0_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI0RDI_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI0PIX_CLK>,
+ <&clock_mmss MMSS_CAMSS_CPHY_CSID0_CLK>;
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
+ "bmic_smmu_ahb", "bmic_smmu_axi",
+ "camss_ahb_clk", "camss_top_ahb_clk",
+ "ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+ "csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+ "csi_pix_clk", "cphy_csid_clk";
+ qcom,clock-rates = <0 0 0 0 0 0 0 384000000 384000000
+ 0 0 0 0 0>;
+ status = "ok";
+ };
+
+ qcom,csid@ca30400 {
+ cell-index = <1>;
+ compatible = "qcom,csid-v5.0", "qcom,csid";
+ reg = <0xca30400 0x400>;
+ reg-names = "csid";
+ interrupts = <0 297 0>;
+ interrupt-names = "csid";
+ qcom,csi-vdd-voltage = <1200000>;
+ qcom,mipi-csi-vdd-supply = <&pm660_l1>;
+ gdscr-supply = <&gdsc_camss_top>;
+ vdd_sec-supply = <&pm660l_l1>;
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+ qcom,cam-vreg-min-voltage = <925000 0 0>;
+ qcom,cam-vreg-max-voltage = <925000 0 0>;
+ qcom,cam-vreg-op-mode = <0 0 0>;
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>,
+ <&clock_mmss MMSS_CAMSS_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_TOP_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_ISPIF_AHB_CLK>,
+ <&clock_mmss CSI1_CLK_SRC>,
+ <&clock_mmss CSIPHY_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSI1_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI1_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI1RDI_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI1PIX_CLK>,
+ <&clock_mmss MMSS_CAMSS_CPHY_CSID1_CLK>;
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
+ "bmic_smmu_ahb", "bmic_smmu_axi",
+ "camss_ahb_clk", "camss_top_ahb_clk",
+ "ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+ "csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+ "csi_pix_clk", "cphy_csid_clk";
+ qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000
+ 0 0 0 0 0>;
+ status = "ok";
+ };
+
+ qcom,csid@ca30800 {
+ cell-index = <2>;
+ compatible = "qcom,csid-v5.0", "qcom,csid";
+ reg = <0xca30800 0x400>;
+ reg-names = "csid";
+ interrupts = <0 298 0>;
+ interrupt-names = "csid";
+ qcom,csi-vdd-voltage = <1200000>;
+ qcom,mipi-csi-vdd-supply = <&pm660_l1>;
+ gdscr-supply = <&gdsc_camss_top>;
+ vdd_sec-supply = <&pm660l_l1>;
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+ qcom,cam-vreg-min-voltage = <925000 0 0>;
+ qcom,cam-vreg-max-voltage = <925000 0 0>;
+ qcom,cam-vreg-op-mode = <0 0 0>;
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>,
+ <&clock_mmss MMSS_CAMSS_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_TOP_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_ISPIF_AHB_CLK>,
+ <&clock_mmss CSI2_CLK_SRC>,
+ <&clock_mmss CSIPHY_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSI2_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI2_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI2RDI_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI2PIX_CLK>,
+ <&clock_mmss MMSS_CAMSS_CPHY_CSID2_CLK>;
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
+ "bmic_smmu_ahb", "bmic_smmu_axi",
+ "camss_ahb_clk", "camss_top_ahb_clk",
+ "ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+ "csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+ "csi_pix_clk", "cphy_csid_clk";
+ qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000
+ 0 0 0 0 0>;
+ status = "ok";
+ };
+
+ qcom,csid@ca30c00 {
+ cell-index = <3>;
+ compatible = "qcom,csid-v5.0", "qcom,csid";
+ reg = <0xca30c00 0x400>;
+ reg-names = "csid";
+ interrupts = <0 299 0>;
+ interrupt-names = "csid";
+ qcom,csi-vdd-voltage = <1200000>;
+ qcom,mipi-csi-vdd-supply = <&pm660_l1>;
+ gdscr-supply = <&gdsc_camss_top>;
+ vdd_sec-supply = <&pm660l_l1>;
+ bimc_smmu-supply = <&gdsc_bimc_smmu>;
+ qcom,cam-vreg-name = "vdd_sec", "gdscr", "bimc_smmu";
+ qcom,cam-vreg-min-voltage = <925000 0 0>;
+ qcom,cam-vreg-max-voltage = <925000 0 0>;
+ qcom,cam-vreg-op-mode = <0 0 0>;
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>,
+ <&clock_mmss MMSS_CAMSS_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_TOP_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_ISPIF_AHB_CLK>,
+ <&clock_mmss CSI3_CLK_SRC>,
+ <&clock_mmss CSIPHY_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSI3_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI3_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI3RDI_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI3PIX_CLK>,
+ <&clock_mmss MMSS_CAMSS_CPHY_CSID3_CLK>;
+ clock-names = "mmssnoc_axi", "mnoc_ahb",
+ "bmic_smmu_ahb", "bmic_smmu_axi",
+ "camss_ahb_clk", "camss_top_ahb_clk",
+ "ispif_ahb_clk", "csi_src_clk", "csiphy_clk_src",
+ "csi_clk", "csi_ahb_clk", "csi_rdi_clk",
+ "csi_pix_clk", "cphy_csid_clk";
+ qcom,clock-rates = <0 0 0 0 0 0 0 256000000 256000000
+ 0 0 0 0 0>;
+ status = "ok";
+ };
+
+ qcom,cam_smmu {
+ compatible = "qcom,msm-cam-smmu";
+ status = "ok";
+
+ msm_cam_smmu_cb1 {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&mmss_bimc_smmu 0xc00>,
+ <&mmss_bimc_smmu 0xc01>,
+ <&mmss_bimc_smmu 0xc02>,
+ <&mmss_bimc_smmu 0xc03>;
+ label = "vfe";
+ qcom,scratch-buf-support;
+ };
+
+ msm_cam_smmu_cb2 {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&mmss_bimc_smmu 0xa00>;
+ label = "cpp";
+ };
+
+ msm_cam_smmu_cb4 {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&mmss_bimc_smmu 0x800>;
+ label = "jpeg_enc0";
+ };
+
+ msm_cam_smmu_cb5 {
+ compatible = "qcom,msm-cam-smmu-cb";
+ iommus = <&mmss_bimc_smmu 0x801>;
+ label = "jpeg_dma";
+ };
+ };
+
+ qcom,cpp@ca04000 {
+ cell-index = <0>;
+ compatible = "qcom,cpp";
+ reg = <0xca04000 0x100>,
+ <0xca80000 0x3000>,
+ <0xca18000 0x3000>,
+ <0xc8c36D4 0x4>;
+ reg-names = "cpp", "cpp_vbif", "cpp_hw", "camss_cpp";
+ interrupts = <0 294 0>;
+ interrupt-names = "cpp";
+ smmu-vdd-supply = <&gdsc_bimc_smmu>;
+ camss-vdd-supply = <&gdsc_camss_top>;
+ vdd-supply = <&gdsc_cpp>;
+ qcom,vdd-names = "smmu-vdd", "camss-vdd", "vdd";
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_TOP_AHB_CLK>,
+ <&clock_mmss CPP_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CPP_CLK>,
+ <&clock_mmss MMSS_CAMSS_CPP_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_CPP_AXI_CLK>,
+ <&clock_mmss MMSS_CAMSS_MICRO_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>,
+ <&clock_mmss MMSS_CAMSS_CPP_VBIF_AHB_CLK>;
+ clock-names = "mmssnoc_axi_clk",
+ "mnoc_ahb_clk",
+ "camss_ahb_clk", "camss_top_ahb_clk",
+ "cpp_src_clk",
+ "cpp_core_clk", "camss_cpp_ahb_clk",
+ "camss_cpp_axi_clk", "micro_iface_clk",
+ "mmss_smmu_axi_clk", "cpp_vbif_ahb_clk";
+ qcom,clock-rates = <0 0 0 0 200000000 200000000 0 0 0 0 0>;
+ qcom,min-clock-rate = <200000000>;
+ qcom,bus-master = <1>;
+ qcom,vbif-qos-setting = <0x550 0x55555555>,
+ <0x554 0x55555555>,
+ <0x558 0x55555555>,
+ <0x55c 0x55555555>,
+ <0x560 0x55555555>,
+ <0x564 0x55555555>,
+ <0x568 0x55555555>,
+ <0x56c 0x55555555>,
+ <0x570 0x55555555>,
+ <0x574 0x55555555>,
+ <0x578 0x55555555>,
+ <0x57c 0x55555555>,
+ <0x580 0x55555555>,
+ <0x584 0x55555555>,
+ <0x588 0x55555555>,
+ <0x58c 0x55555555>;
+ status = "ok";
+ qcom,msm-bus,name = "msm_camera_cpp";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <106 512 0 0>,
+ <106 512 0 0>;
+ qcom,msm-bus-vector-dyn-vote;
+ resets = <&clock_mmss CAMSS_MICRO_BCR>;
+ reset-names = "micro_iface_reset";
+ qcom,src-clock-rates = <120000000 256000000 384000000
+ 480000000 540000000 576000000>;
+ qcom,cpp-fw-payload-info {
+ qcom,stripe-base = <790>;
+ qcom,plane-base = <715>;
+ qcom,stripe-size = <63>;
+ qcom,plane-size = <25>;
+ qcom,fe-ptr-off = <11>;
+ qcom,we-ptr-off = <23>;
+ qcom,ref-fe-ptr-off = <17>;
+ qcom,ref-we-ptr-off = <36>;
+ qcom,we-meta-ptr-off = <42>;
+ qcom,fe-mmu-pf-ptr-off = <7>;
+ qcom,ref-fe-mmu-pf-ptr-off = <10>;
+ qcom,we-mmu-pf-ptr-off = <13>;
+ qcom,dup-we-mmu-pf-ptr-off = <18>;
+ qcom,ref-we-mmu-pf-ptr-off = <23>;
+ qcom,set-group-buffer-len = <135>;
+ qcom,dup-frame-indicator-off = <70>;
+ };
+ };
+
+ qcom,ispif@ca31000 {
+ cell-index = <0>;
+ compatible = "qcom,ispif-v3.0", "qcom,ispif";
+ reg = <0xca31000 0xc00>,
+ <0xca00020 0x4>;
+ reg-names = "ispif", "csi_clk_mux";
+ interrupts = <0 309 0>;
+ interrupt-names = "ispif";
+ qcom,num-isps = <0x2>;
+ camss-vdd-supply = <&gdsc_camss_top>;
+ vfe0-vdd-supply = <&gdsc_vfe0>;
+ vfe1-vdd-supply = <&gdsc_vfe1>;
+ qcom,vdd-names = "camss-vdd", "vfe0-vdd",
+ "vfe1-vdd";
+ qcom,clock-cntl-support;
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_TOP_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_ISPIF_AHB_CLK>,
+ <&clock_mmss CSI0_CLK_SRC>,
+ <&clock_mmss CSI1_CLK_SRC>,
+ <&clock_mmss CSI2_CLK_SRC>,
+ <&clock_mmss CSI3_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSI0RDI_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI1RDI_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI2RDI_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI3RDI_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI0PIX_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI1PIX_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI2PIX_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI3PIX_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI0_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI1_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI2_CLK>,
+ <&clock_mmss MMSS_CAMSS_CSI3_CLK>,
+ <&clock_mmss MMSS_CAMSS_VFE0_CLK>,
+ <&clock_mmss VFE0_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSI_VFE0_CLK>,
+ <&clock_mmss MMSS_CAMSS_VFE1_CLK>,
+ <&clock_mmss VFE1_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSI_VFE1_CLK>;
+ clock-names = "mmssnoc_axi", "mnoc_ahb_clk",
+ "camss_ahb_clk",
+ "camss_top_ahb_clk", "ispif_ahb_clk",
+ "csi0_src_clk", "csi1_src_clk",
+ "csi2_src_clk", "csi3_src_clk",
+ "csi0_rdi_clk", "csi1_rdi_clk",
+ "csi2_rdi_clk", "csi3_rdi_clk",
+ "csi0_pix_clk", "csi1_pix_clk",
+ "csi2_pix_clk", "csi3_pix_clk",
+ "camss_csi0_clk", "camss_csi1_clk",
+ "camss_csi2_clk", "camss_csi3_clk",
+ "camss_vfe_vfe0_clk",
+ "vfe0_clk_src", "camss_csi_vfe0_clk",
+ "camss_vfe_vfe1_clk",
+ "vfe1_clk_src", "camss_csi_vfe1_clk";
+ qcom,clock-rates = <0 0 0 0 0
+ 0 0 0 0
+ 0 0 0 0
+ 0 0 0 0
+ 0 0 0 0
+ 0 0 0
+ 0 0 0>;
+ qcom,clock-control = "INIT_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE",
+ "INIT_RATE", "INIT_RATE",
+ "INIT_RATE", "INIT_RATE",
+ "NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE", "NO_SET_RATE",
+ "NO_SET_RATE",
+ "INIT_RATE", "NO_SET_RATE",
+ "NO_SET_RATE",
+ "INIT_RATE", "NO_SET_RATE";
+ status = "ok";
+ };
+
+ vfe0: qcom,vfe0@ca10000 {
+ cell-index = <0>;
+ compatible = "qcom,vfe48";
+ reg = <0xca10000 0x4000>,
+ <0xca40000 0x3000>;
+ reg-names = "vfe", "vfe_vbif";
+ interrupts = <0 314 0>;
+ interrupt-names = "vfe";
+ vdd-supply = <&gdsc_vfe0>;
+ camss-vdd-supply = <&gdsc_camss_top>;
+ smmu-vdd-supply = <&gdsc_bimc_smmu>;
+ qcom,vdd-names = "vdd", "camss-vdd", "smmu-vdd";
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>,
+ <&clock_mmss MMSS_CAMSS_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_TOP_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_VFE0_CLK>,
+ <&clock_mmss MMSS_CAMSS_VFE0_STREAM_CLK>,
+ <&clock_mmss MMSS_CAMSS_VFE0_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_VFE_VBIF_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_VFE_VBIF_AXI_CLK>,
+ <&clock_mmss VFE0_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSI_VFE0_CLK>;
+ clock-names = "mmssnoc_axi", "mnoc_ahb_clk",
+ "bimc_smmu_ahb_clk", "bimc_smmu_axi_clk",
+ "camss_ahb_clk", "camss_top_ahb_clk",
+ "camss_vfe_clk", "camss_vfe_stream_clk",
+ "camss_vfe_ahb_clk", "camss_vfe_vbif_ahb_clk",
+ "camss_vfe_vbif_axi_clk", "vfe_clk_src",
+ "camss_csi_vfe_clk";
+ qcom,clock-rates = <0 0 0 0 0 0 0 0 0 0 0 256000000 0
+ 0 0 0 0 0 0 0 0 0 0 0 480000000 0
+ 0 0 0 0 0 0 0 0 0 0 0 576000000 0>;
+ status = "ok";
+ qos-entries = <8>;
+ qos-regs = <0x404 0x408 0x40c 0x410 0x414 0x418
+ 0x41c 0x420>;
+ qos-settings = <0xaaa5aaa5
+ 0xaaa5aaa5
+ 0xaaa5aaa5
+ 0xaa55aaa5
+ 0xaa55aa55
+ 0xaa55aa55
+ 0xaa55aa55
+ 0x0005aa55>;
+ vbif-entries = <3>;
+ vbif-regs = <0x124 0xac 0xd0>;
+ vbif-settings = <0x3 0x40 0x1010>;
+ ds-entries = <17>;
+ ds-regs = <0x424 0x428 0x42c 0x430 0x434
+ 0x438 0x43c 0x440 0x444 0x448 0x44c
+ 0x450 0x454 0x458 0x45c 0x460 0x464>;
+ ds-settings = <0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0x110>;
+ qcom,msm-bus,name = "msm_camera_vfe";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <29 512 0 0>,
+ <29 512 100000000 100000000>;
+ qcom,msm-bus-vector-dyn-vote;
+ };
+
+ vfe1: qcom,vfe1@ca14000 {
+ cell-index = <1>;
+ compatible = "qcom,vfe48";
+ reg = <0xca14000 0x4000>,
+ <0xca40000 0x3000>;
+ reg-names = "vfe", "vfe_vbif";
+ interrupts = <0 315 0>;
+ interrupt-names = "vfe";
+ vdd-supply = <&gdsc_vfe1>;
+ camss-vdd-supply = <&gdsc_camss_top>;
+ smmu-vdd-supply = <&gdsc_bimc_smmu>;
+ qcom,vdd-names = "vdd", "camss-vdd", "smmu-vdd";
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>,
+ <&clock_mmss MMSS_CAMSS_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_TOP_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_VFE1_CLK>,
+ <&clock_mmss MMSS_CAMSS_VFE1_STREAM_CLK>,
+ <&clock_mmss MMSS_CAMSS_VFE1_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_VFE_VBIF_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_VFE_VBIF_AXI_CLK>,
+ <&clock_mmss VFE1_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CSI_VFE1_CLK>;
+ clock-names = "mmssnoc_axi", "mnoc_ahb_clk",
+ "bimc_smmu_ahb_clk", "bimc_smmu_axi_clk",
+ "camss_ahb_clk", "camss_top_ahb_clk",
+ "camss_vfe_clk", "camss_vfe_stream_clk",
+ "camss_vfe_ahb_clk", "camss_vfe_vbif_ahb_clk",
+ "camss_vfe_vbif_axi_clk", "vfe_clk_src",
+ "camss_csi_vfe_clk";
+ qcom,clock-rates = <0 0 0 0 0 0 0 0 0 0 0 256000000 0
+ 0 0 0 0 0 0 0 0 0 0 0 480000000 0
+ 0 0 0 0 0 0 0 0 0 0 0 576000000 0>;
+ status = "ok";
+ qos-entries = <8>;
+ qos-regs = <0x404 0x408 0x40c 0x410 0x414 0x418
+ 0x41c 0x420>;
+ qos-settings = <0xaaa5aaa5
+ 0xaaa5aaa5
+ 0xaaa5aaa5
+ 0xaa55aaa5
+ 0xaa55aa55
+ 0xaa55aa55
+ 0xaa55aa55
+ 0x0005aa55>;
+ vbif-entries = <3>;
+ vbif-regs = <0x124 0xac 0xd0>;
+ vbif-settings = <0x3 0x40 0x1010>;
+ ds-entries = <17>;
+ ds-regs = <0x424 0x428 0x42c 0x430 0x434
+ 0x438 0x43c 0x440 0x444 0x448 0x44c
+ 0x450 0x454 0x458 0x45c 0x460 0x464>;
+ ds-settings = <0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0xcccc1111
+ 0x110>;
+ qcom,msm-bus,name = "msm_camera_vfe";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps =
+ <29 512 0 0>,
+ <29 512 100000000 100000000>;
+ qcom,msm-bus-vector-dyn-vote;
+ };
+
+ qcom,vfe {
+ compatible = "qcom,vfe";
+ num_child = <2>;
+ };
+
+ cci: qcom,cci@ca0c000 {
+ cell-index = <0>;
+ compatible = "qcom,cci";
+ reg = <0xca0c000 0x4000>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg-names = "cci";
+ interrupts = <0 295 0>;
+ interrupt-names = "cci";
+ status = "ok";
+ mmagic-supply = <&gdsc_bimc_smmu>;
+ gdscr-supply = <&gdsc_camss_top>;
+ qcom,cam-vreg-name = "mmagic", "gdscr";
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>,
+ <&clock_mmss MMSS_CAMSS_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_TOP_AHB_CLK>,
+ <&clock_mmss CCI_CLK_SRC>,
+ <&clock_mmss MMSS_CAMSS_CCI_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_CCI_CLK>;
+ clock-names = "mmssnoc_axi", "mnoc_ahb", "smmu_ahb", "smmu_axi",
+ "camss_ahb_clk", "camss_top_ahb_clk",
+ "cci_src_clk", "cci_ahb_clk", "camss_cci_clk";
+ qcom,clock-rates = <0 0 0 0 0 0 19200000 0 0>,
+ <0 0 0 0 0 0 37500000 0 0>;
+ pinctrl-names = "cci_default", "cci_suspend";
+ pinctrl-0 = <&cci0_active &cci1_active>;
+ pinctrl-1 = <&cci0_suspend &cci1_suspend>;
+ gpios = <&tlmm 36 0>,
+ <&tlmm 37 0>,
+ <&tlmm 38 0>,
+ <&tlmm 39 0>;
+ qcom,gpio-tbl-num = <0 1 2 3>;
+ qcom,gpio-tbl-flags = <1 1 1 1>;
+ qcom,gpio-tbl-label = "CCI_I2C_DATA0",
+ "CCI_I2C_CLK0",
+ "CCI_I2C_DATA1",
+ "CCI_I2C_CLK1";
+ i2c_freq_100Khz: qcom,i2c_standard_mode {
+ status = "disabled";
+ };
+ i2c_freq_400Khz: qcom,i2c_fast_mode {
+ status = "disabled";
+ };
+ i2c_freq_custom: qcom,i2c_custom_mode {
+ status = "disabled";
+ };
+ i2c_freq_1Mhz: qcom,i2c_fast_plus_mode {
+ status = "disabled";
+ };
+ };
+
+ qcom,jpeg@ca1c000 {
+ cell-index = <0>;
+ compatible = "qcom,jpeg";
+ reg = <0xca1c000 0x4000>,
+ <0xca60000 0x3000>;
+ reg-names = "jpeg_hw", "jpeg_vbif";
+ interrupts = <0 316 0>;
+ interrupt-names = "jpeg";
+ smmu-vdd-supply = <&gdsc_bimc_smmu>;
+ camss-vdd-supply = <&gdsc_camss_top>;
+ qcom,vdd-names = "smmu-vdd", "camss-vdd";
+ clock-names = "mmssnoc_axi",
+ "mmss_mnoc_ahb_clk",
+ "mmss_bimc_smmu_ahb_clk",
+ "mmss_bimc_smmu_axi_clk",
+ "mmss_camss_ahb_clk",
+ "mmss_camss_top_ahb_clk",
+ "core_clk",
+ "mmss_camss_jpeg_ahb_clk",
+ "mmss_camss_jpeg_axi_clk";
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>,
+ <&clock_mmss MMSS_CAMSS_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_TOP_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_JPEG0_VOTE_CLK>,
+ <&clock_mmss MMSS_CAMSS_JPEG_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_JPEG_AXI_CLK >;
+ qcom,clock-rates = <0 0 0 0 0 0 480000000 0 0>;
+ qcom,vbif-reg-settings = <0x4 0x1>;
+ qcom,prefetch-reg-settings = <0x30c 0x1111>,
+ <0x318 0x31>,
+ <0x324 0x31>,
+ <0x330 0x31>,
+ <0x33c 0x0>;
+ qcom,msm-bus,name = "msm_camera_jpeg0";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <62 512 0 0>,
+ <62 512 1920000 2880000>;
+ status = "ok";
+ };
+
+ qcom,jpeg@caa0000 {
+ cell-index = <3>;
+ compatible = "qcom,jpegdma";
+ reg = <0xcaa0000 0x4000>,
+ <0xca60000 0x3000>;
+ reg-names = "jpeg_hw", "jpeg_vbif";
+ interrupts = <0 304 0>;
+ interrupt-names = "jpeg";
+ smmu-vdd-supply = <&gdsc_bimc_smmu>;
+ camss-vdd-supply = <&gdsc_camss_top>;
+ qcom,vdd-names = "smmu-vdd", "camss-vdd";
+ clock-names = "mmssnoc_axi",
+ "mmss_mnoc_ahb_clk",
+ "mmss_bimc_smmu_ahb_clk",
+ "mmss_bimc_smmu_axi_clk",
+ "mmss_camss_ahb_clk",
+ "mmss_camss_top_ahb_clk",
+ "core_clk",
+ "mmss_camss_jpeg_ahb_clk",
+ "mmss_camss_jpeg_axi_clk";
+ clocks = <&clock_rpmcc MMSSNOC_AXI_CLK>,
+ <&clock_mmss MMSS_MNOC_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AHB_CLK>,
+ <&clock_mmss MMSS_BIMC_SMMU_AXI_CLK>,
+ <&clock_mmss MMSS_CAMSS_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_TOP_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_JPEG0_DMA_VOTE_CLK>,
+ <&clock_mmss MMSS_CAMSS_JPEG_AHB_CLK>,
+ <&clock_mmss MMSS_CAMSS_JPEG_AXI_CLK>;
+ qcom,clock-rates = <0 0 0 0 0 0 480000000 0 0>;
+ qcom,vbif-reg-settings = <0x4 0x1>;
+ qcom,prefetch-reg-settings = <0x18c 0x11>,
+ <0x1a0 0x31>,
+ <0x1b0 0x31>;
+ qcom,msm-bus,name = "msm_camera_jpeg_dma";
+ qcom,msm-bus,num-cases = <2>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,vectors-KBps = <62 512 0 0>,
+ <62 512 1920000 2880000>;
+ qcom,max-ds-factor = <128>;
+ status = "ok";
+ };
+};
+
+&i2c_freq_100Khz {
+ qcom,hw-thigh = <201>;
+ qcom,hw-tlow = <174>;
+ qcom,hw-tsu-sto = <204>;
+ qcom,hw-tsu-sta = <231>;
+ qcom,hw-thd-dat = <22>;
+ qcom,hw-thd-sta = <162>;
+ qcom,hw-tbuf = <227>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <3>;
+ qcom,cci-clk-src = <37500000>;
+ status = "ok";
+};
+
+&i2c_freq_400Khz {
+ qcom,hw-thigh = <38>;
+ qcom,hw-tlow = <56>;
+ qcom,hw-tsu-sto = <40>;
+ qcom,hw-tsu-sta = <40>;
+ qcom,hw-thd-dat = <22>;
+ qcom,hw-thd-sta = <35>;
+ qcom,hw-tbuf = <62>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <3>;
+ qcom,cci-clk-src = <37500000>;
+ status = "ok";
+};
+
+&i2c_freq_custom {
+ qcom,hw-thigh = <38>;
+ qcom,hw-tlow = <56>;
+ qcom,hw-tsu-sto = <40>;
+ qcom,hw-tsu-sta = <40>;
+ qcom,hw-thd-dat = <22>;
+ qcom,hw-thd-sta = <35>;
+ qcom,hw-tbuf = <62>;
+ qcom,hw-scl-stretch-en = <1>;
+ qcom,hw-trdhld = <6>;
+ qcom,hw-tsp = <3>;
+ qcom,cci-clk-src = <37500000>;
+ status = "ok";
+};
+
+&i2c_freq_1Mhz {
+ qcom,hw-thigh = <16>;
+ qcom,hw-tlow = <22>;
+ qcom,hw-tsu-sto = <17>;
+ qcom,hw-tsu-sta = <18>;
+ qcom,hw-thd-dat = <16>;
+ qcom,hw-thd-sta = <15>;
+ qcom,hw-tbuf = <24>;
+ qcom,hw-scl-stretch-en = <0>;
+ qcom,hw-trdhld = <3>;
+ qcom,hw-tsp = <3>;
+ qcom,cci-clk-src = <37500000>;
+ status = "ok";
+};
diff --git a/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi b/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi
index d223197ad878..37fce82adb17 100644
--- a/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi
@@ -11,7 +11,7 @@
*/
#include "sdm660-pinctrl.dtsi"
-#include "sdm660-camera-sensor-cdp.dtsi"
+#include "sdm630-camera-sensor-cdp.dtsi"
/ {
};
@@ -47,6 +47,9 @@
};
&soc {
+ qcom,msm-ssc-sensors {
+ compatible = "qcom,msm-ssc-sensors";
+ };
};
&pm660_charger {
diff --git a/arch/arm/boot/dts/qcom/sdm630-gpu.dtsi b/arch/arm/boot/dts/qcom/sdm630-gpu.dtsi
index eec0862e5ee3..e9c8e0456abc 100644
--- a/arch/arm/boot/dts/qcom/sdm630-gpu.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-gpu.dtsi
@@ -90,7 +90,7 @@
/* GPU to BIMC bus width, VBIF data transfer in 1 cycle */
qcom,bus-width = <32>;
qcom,msm-bus,name = "grp3d";
- qcom,msm-bus,num-cases = <14>;
+ qcom,msm-bus,num-cases = <12>;
qcom,msm-bus,num-paths = <1>;
qcom,msm-bus,vectors-KBps =
<26 512 0 0>,
diff --git a/arch/arm/boot/dts/qcom/sdm630-headset-jacktype-no-cdp.dts b/arch/arm/boot/dts/qcom/sdm630-headset-jacktype-no-cdp.dts
new file mode 100644
index 000000000000..4db377dc755a
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdm630-headset-jacktype-no-cdp.dts
@@ -0,0 +1,26 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm630.dtsi"
+#include "sdm630-cdp.dtsi"
+#include "sdm660-external-codec.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660L, Headset Jacktype NO, CDP";
+ compatible = "qcom,sdm630-cdp", "qcom,sdm630", "qcom,cdp";
+ qcom,board-id = <1 2>;
+ qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+ <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm630-headset-jacktype-no-rcm.dts b/arch/arm/boot/dts/qcom/sdm630-headset-jacktype-no-rcm.dts
new file mode 100644
index 000000000000..be30eb172176
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdm630-headset-jacktype-no-rcm.dts
@@ -0,0 +1,26 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm630.dtsi"
+#include "sdm630-cdp.dtsi"
+#include "sdm660-external-codec.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660L, Headset Jacktype NO, RCM";
+ compatible = "qcom,sdm630-cdp", "qcom,sdm630", "qcom,cdp";
+ qcom,board-id = <21 2>;
+ qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+ <0x0001001b 0x0201011a 0x0 0x0>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm630-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/sdm630-mdss-panels.dtsi
index a07118486322..3c19dc70e7c4 100644
--- a/arch/arm/boot/dts/qcom/sdm630-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-mdss-panels.dtsi
@@ -22,7 +22,7 @@
qcom,panel-supply-entry@0 {
reg = <0>;
qcom,supply-name = "wqhd-vddio";
- qcom,supply-min-voltage = <1880000>;
+ qcom,supply-min-voltage = <1800000>;
qcom,supply-max-voltage = <1950000>;
qcom,supply-enable-load = <32000>;
qcom,supply-disable-load = <80>;
diff --git a/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi b/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi
index d7a0c33019f2..3fb6993d71d6 100644
--- a/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi
@@ -63,7 +63,7 @@
<2 2400000>; /* Camera */
qcom,max-clk-rate = <412500000>;
qcom,mdss-default-ot-rd-limit = <32>;
- qcom,mdss-default-ot-wr-limit = <40>;
+ qcom,mdss-default-ot-wr-limit = <32>;
qcom,mdss-dram-channels = <2>;
/* Bandwidth limit settings */
@@ -180,6 +180,19 @@
qcom,mdss-prefill-post-scaler-buffer-pixels = <2560>;
qcom,mdss-prefill-pingpong-buffer-pixels = <5120>;
+ qcom,mdss-reg-bus {
+ /* Reg Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_reg";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>,
+ <1 590 0 160000>,
+ <1 590 0 320000>;
+ };
+
qcom,mdss-pp-offsets {
qcom,mdss-sspp-mdss-igc-lut-off = <0x2000>;
qcom,mdss-sspp-vig-pcc-off = <0x1b00>;
@@ -481,7 +494,7 @@
qcom,mdss-rot-xin-id = <14 15>;
qcom,mdss-default-ot-rd-limit = <32>;
- qcom,mdss-default-ot-wr-limit = <40>;
+ qcom,mdss-default-ot-wr-limit = <32>;
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi
index cdcea7654e3e..d13e1dbfe824 100644
--- a/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi
@@ -11,7 +11,7 @@
*/
#include "sdm660-pinctrl.dtsi"
-#include "sdm660-camera-sensor-mtp.dtsi"
+#include "sdm630-camera-sensor-mtp.dtsi"
/ {
mtp_batterydata: qcom,battery-data {
qcom,batt-id-range-pct = <15>;
@@ -56,6 +56,9 @@
};
&soc {
+ qcom,msm-ssc-sensors {
+ compatible = "qcom,msm-ssc-sensors";
+ };
};
&pm660_fg {
diff --git a/arch/arm/boot/dts/qcom/sdm630-pm660a-headset-jacktype-no-cdp.dts b/arch/arm/boot/dts/qcom/sdm630-pm660a-headset-jacktype-no-cdp.dts
new file mode 100644
index 000000000000..15936f47da7b
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdm630-pm660a-headset-jacktype-no-cdp.dts
@@ -0,0 +1,26 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm630.dtsi"
+#include "sdm630-cdp.dtsi"
+#include "msm-pm660a.dtsi"
+#include "sdm660-external-codec.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660A, Headset Jacktype NO, CDP";
+ compatible = "qcom,sdm630-cdp", "qcom,sdm630", "qcom,cdp";
+ qcom,board-id = <1 2>;
+ qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm630-pm660a-headset-jacktype-no-rcm.dts b/arch/arm/boot/dts/qcom/sdm630-pm660a-headset-jacktype-no-rcm.dts
new file mode 100644
index 000000000000..aeba192834f3
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdm630-pm660a-headset-jacktype-no-rcm.dts
@@ -0,0 +1,26 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm630.dtsi"
+#include "sdm630-cdp.dtsi"
+#include "msm-pm660a.dtsi"
+#include "sdm660-external-codec.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660A, Headset Jacktype NO, RCM";
+ compatible = "qcom,sdm630-cdp", "qcom,sdm630", "qcom,cdp";
+ qcom,board-id = <21 2>;
+ qcom,pmic-id = <0x0001001b 0x0001011a 0x0 0x0>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
index 59afb993fb83..52df459cedfb 100644
--- a/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
@@ -10,8 +10,19 @@
* GNU General Public License for more details.
*/
+#include "msm-pm660a.dtsi"
#include "sdm660-pinctrl.dtsi"
/ {
+ qrd_batterydata: qcom,battery-data {
+ qcom,batt-id-range-pct = <15>;
+
+ #include "fg-gen3-batterydata-qrd-skuk-4v4-3000mah.dtsi"
+ };
+};
+
+&pm660_fg {
+ qcom,battery-data = <&qrd_batterydata>;
+ qcom,fg-jeita-thresholds = <0 5 55 55>;
};
&uartblsp1dm1 {
@@ -91,7 +102,7 @@
1 &intc 0 0 221 0
2 &tlmm 54 0>;
interrupt-names = "hc_irq", "pwr_irq", "status_irq";
- cd-gpios = <&tlmm 54 0x1>;
+ cd-gpios = <&tlmm 54 0x0>;
qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
200000000>;
@@ -100,5 +111,53 @@
status = "ok";
};
+&pm660l_gpios {
+ /* GPIO 7 for VOL_UP */
+ gpio@c600 {
+ status = "ok";
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,out-strength = <1>;
+ };
+};
+
+&pm660_gpios {
+ /* GPIO 11 for home key */
+ gpio@ca00 {
+ status = "ok";
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,out-strength = <1>;
+ };
+};
+
&soc {
+ gpio_keys {
+ compatible = "gpio-keys";
+ input-name = "gpio-keys";
+ status = "ok";
+
+ vol_up {
+ label = "volume_up";
+ gpios = <&pm660l_gpios 7 0x1>;
+ linux,input-type = <1>;
+ linux,code = <115>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ };
+
+ home {
+ label = "home";
+ gpios = <&pm660_gpios 11 0x1>;
+ linux,input-type = <1>;
+ linux,code = <102>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ };
+
+ };
};
diff --git a/arch/arm/boot/dts/qcom/sdm630-regulator.dtsi b/arch/arm/boot/dts/qcom/sdm630-regulator.dtsi
index 13cf99ef0b40..c16c83050f31 100644
--- a/arch/arm/boot/dts/qcom/sdm630-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-regulator.dtsi
@@ -383,7 +383,7 @@
rpm-regulator-ldob8 {
status = "okay";
pm660l_l8: regulator-l8 {
- regulator-min-microvolt = <3200000>;
+ regulator-min-microvolt = <2800000>;
regulator-max-microvolt = <3400000>;
status = "okay";
};
@@ -528,7 +528,7 @@
/* CPR controller regulators */
/* MMSS CPR Controller node */
gfx_cpr: cpr4-ctrl@05061000 {
- compatible = "qcom,cpr4-sdm660-mmss-ldo-regulator";
+ compatible = "qcom,cpr4-sdm630-mmss-ldo-regulator";
reg = <0x05061000 0x4000>, <0x00784000 0x1000>;
reg-names = "cpr_ctrl", "fuse_base";
clocks = <&clock_gpu GPUCC_RBCPR_CLK>,
@@ -681,20 +681,20 @@
regulator-min-microvolt = <1>;
regulator-max-microvolt = <8>;
- qcom,cpr-fuse-corners = <4>;
+ qcom,cpr-fuse-corners = <3>;
qcom,cpr-fuse-combos = <24>;
qcom,cpr-speed-bins = <3>;
qcom,cpr-speed-bin-corners = <8 8 8>;
qcom,cpr-corners = <8>;
- qcom,cpr-corner-fmax-map = <2 4 5 8>;
+ qcom,cpr-corner-fmax-map = <2 4 8>;
qcom,cpr-voltage-ceiling =
- <724000 724000 724000 788000 868000
- 924000 988000 1068000>;
+ <724000 724000 788000 788000
+ 1068000 1068000 1068000 1068000>;
qcom,cpr-voltage-floor =
- <588000 588000 596000 652000 712000
- 744000 784000 844000>;
+ <588000 588000 596000 652000
+ 712000 744000 784000 844000>;
qcom,corner-frequencies =
<300000000 614400000 883200000
@@ -755,7 +755,7 @@
regulator-min-microvolt = <1>;
regulator-max-microvolt = <11>;
- qcom,cpr-fuse-corners = <4>;
+ qcom,cpr-fuse-corners = <5>;
qcom,cpr-fuse-combos = <24>;
qcom,cpr-speed-bins = <3>;
qcom,cpr-speed-bin-corners = <10 10 11>;
@@ -771,29 +771,29 @@
qcom,cpr-corner-fmax-map =
/* Speed bin 0 */
- <2 4 6 10>,
+ <2 4 6 9 10>,
/* Speed bin 1 */
- <2 4 6 10>,
+ <2 4 6 9 10>,
/* Speed bin 2 */
- <2 4 6 11>;
+ <2 4 6 9 11>;
qcom,cpr-voltage-ceiling =
/* Speed bin 0 */
<724000 724000 724000 788000
- 868000 868000 924000 988000
+ 868000 868000 988000 988000
988000 1068000>,
/* Speed bin 1 */
<724000 724000 724000 788000
- 868000 868000 924000 988000
+ 868000 868000 988000 988000
988000 1068000>,
/* Speed bin 2 */
<724000 724000 724000 788000
- 868000 868000 924000 988000
- 988000 1068000 1140000>;
+ 868000 868000 988000 988000
+ 988000 1140000 1140000>;
qcom,cpr-voltage-floor =
/* Speed bin 0 */
@@ -828,7 +828,7 @@
<300000000 787200000 1113600000
1344000000 1516800000 1670400000
1881600000 2016000000 2150400000
- 2208000000 2515200000>;
+ 2380800000 2515200000>;
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
diff --git a/arch/arm/boot/dts/qcom/sdm630-usbc-audio-mtp.dts b/arch/arm/boot/dts/qcom/sdm630-usbc-audio-mtp.dts
new file mode 100644
index 000000000000..eb089b524fef
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdm630-usbc-audio-mtp.dts
@@ -0,0 +1,31 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm630.dtsi"
+#include "sdm630-mtp.dtsi"
+#include "sdm660-external-codec.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660L, USBC Audio MTP";
+ compatible = "qcom,sdm630-mtp", "qcom,sdm630", "qcom,mtp";
+ qcom,board-id = <8 2>;
+ qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+ <0x0001001b 0x0201011a 0x0 0x0>;
+};
+
+&tavil_snd {
+ qcom,msm-mbhc-moist-cfg = <0>, <0>, <3>;
+ qcom,msm-mbhc-usbc-audio-supported = <1>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi
index 206628149f7d..cd895a067f65 100644
--- a/arch/arm/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630.dtsi
@@ -52,6 +52,7 @@
reg = <0x0 0x100>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile0>;
+ qcom,lmh-dcvs = <&lmh_dcvs1>;
qcom,ea = <&ea0>;
efficiency = <1126>;
next-level-cache = <&L2_1>;
@@ -80,6 +81,7 @@
reg = <0x0 0x101>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile1>;
+ qcom,lmh-dcvs = <&lmh_dcvs1>;
qcom,ea = <&ea1>;
efficiency = <1126>;
next-level-cache = <&L2_1>;
@@ -102,6 +104,7 @@
reg = <0x0 0x102>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile2>;
+ qcom,lmh-dcvs = <&lmh_dcvs1>;
qcom,ea = <&ea2>;
efficiency = <1126>;
next-level-cache = <&L2_1>;
@@ -124,6 +127,7 @@
reg = <0x0 0x103>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile3>;
+ qcom,lmh-dcvs = <&lmh_dcvs1>;
qcom,ea = <&ea3>;
efficiency = <1126>;
next-level-cache = <&L2_1>;
@@ -146,6 +150,7 @@
reg = <0x0 0x0>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile4>;
+ qcom,lmh-dcvs = <&lmh_dcvs0>;
qcom,ea = <&ea4>;
efficiency = <1024>;
next-level-cache = <&L2_0>;
@@ -174,6 +179,7 @@
reg = <0x0 0x1>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile4>;
+ qcom,lmh-dcvs = <&lmh_dcvs0>;
qcom,ea = <&ea5>;
efficiency = <1024>;
next-level-cache = <&L2_0>;
@@ -196,6 +202,7 @@
reg = <0x0 0x2>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile4>;
+ qcom,lmh-dcvs = <&lmh_dcvs0>;
qcom,ea = <&ea6>;
efficiency = <1024>;
next-level-cache = <&L2_0>;
@@ -218,6 +225,7 @@
reg = <0x0 0x3>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile4>;
+ qcom,lmh-dcvs = <&lmh_dcvs0>;
qcom,ea = <&ea7>;
efficiency = <1024>;
next-level-cache = <&L2_0>;
@@ -712,6 +720,8 @@
qcom,synchronous-cluster-map = <0 4 &CPU4 &CPU5 &CPU6 &CPU7>,
<1 4 &CPU0 &CPU1 &CPU2 &CPU3>;
+ clock-names = "osm";
+ clocks = <&clock_cpu PERFCL_CLK>;
qcom,cxip-lm-enable = <0>;
qcom,vdd-restriction-temp = <5>;
qcom,vdd-restriction-temp-hysteresis = <10>;
@@ -800,7 +810,6 @@
};
wdog: qcom,wdt@17817000 {
- status = "disabled";
compatible = "qcom,msm-watchdog";
reg = <0x17817000 0x1000>;
reg-names = "wdt-base";
@@ -953,7 +962,7 @@
};
clock_mmss: clock-controller@c8c0000 {
- compatible = "qcom,mmcc-sdm660";
+ compatible = "qcom,mmcc-sdm630";
reg = <0xc8c0000 0x40000>;
vdd_mx_mmss-supply = <&pm660l_s5_level>;
vdd_dig_mmss-supply = <&pm660l_s3_level>;
@@ -1109,7 +1118,7 @@
qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
qcom,target-dev = <&memlat_cpu0>;
qcom,core-dev-table =
- < 787200 762 >,
+ < 1113600 762 >,
< 1344000 2086 >,
< 1670400 2929 >,
< 2150400 3879 >,
@@ -1144,8 +1153,8 @@
msm_cpufreq: qcom,msm-cpufreq {
compatible = "qcom,msm-cpufreq";
clock-names = "cpu0_clk", "cpu4_clk";
- clocks = <&clock_cpu PWRCL_CLK>,
- <&clock_cpu PERFCL_CLK>;
+ clocks = <&clock_cpu PERFCL_CLK>,
+ <&clock_cpu PWRCL_CLK>;
qcom,governor-per-policy;
@@ -1153,11 +1162,14 @@
< 787200 >,
< 1113600 >,
< 1344000 >,
+ < 1516800 >,
< 1670400 >,
< 1881600 >,
+ < 2016000 >,
< 2150400 >,
+ < 2208000 >,
< 2380800 >,
- < 2515000 >;
+ < 2515200 >;
qcom,cpufreq-table-4 =
< 614400 >,
@@ -1166,8 +1178,7 @@
< 1382400 >,
< 1536000 >,
< 1728000 >,
- < 1843200 >,
- < 2000000 >;
+ < 1843200 >;
};
ipa_hw: qcom,ipa@14780000 {
@@ -1247,13 +1258,15 @@
clock_cpu: qcom,clk-cpu-630@179c0000 {
compatible = "qcom,clk-cpu-osm-sdm630";
- status = "disabled";
reg = <0x179c0000 0x4000>, <0x17916000 0x1000>,
<0x17816000 0x1000>, <0x179d1000 0x1000>,
<0x00784130 0x8>;
reg-names = "osm", "pwrcl_pll", "perfcl_pll",
"apcs_common", "perfcl_efuse";
+ vdd-pwrcl-supply = <&apc0_pwrcl_vreg>;
+ vdd-perfcl-supply = <&apc1_perfcl_vreg>;
+
interrupts = <GIC_SPI 35 IRQ_TYPE_EDGE_RISING>,
<GIC_SPI 36 IRQ_TYPE_EDGE_RISING>;
interrupt-names = "pwrcl-irq", "perfcl-irq";
@@ -1593,11 +1606,13 @@
};
qcom,icnss@18800000 {
- status = "disabled";
compatible = "qcom,icnss";
reg = <0x18800000 0x800000>,
- <0x10ac000 0x20>;
- reg-names = "membase", "mpm_config";
+ <0xa0000000 0x10000000>,
+ <0xb0000000 0x10000>;
+ reg-names = "membase", "smmu_iova_base", "smmu_iova_ipa";
+ iommus = <&anoc2_smmu 0x1a00>,
+ <&anoc2_smmu 0x1a01>;
interrupts = <0 413 0>, /* CE0 */
<0 414 0>, /* CE1 */
<0 415 0>, /* CE2 */
@@ -1752,9 +1767,9 @@
reg = <0x10 8>;
};
- dload_type@18 {
+ dload_type@1c {
compatible = "qcom,msm-imem-dload-type";
- reg = <0x18 4>;
+ reg = <0x1c 4>;
};
restart_reason@65c {
@@ -2066,7 +2081,7 @@
#include "msm-audio.dtsi"
#include "sdm660-audio.dtsi"
#include "sdm630-gpu.dtsi"
-#include "sdm660-camera.dtsi"
+#include "sdm630-camera.dtsi"
#include "sdm630-pm.dtsi"
#include "sdm660-vidc.dtsi"
#include "sdm630-mdss.dtsi"
@@ -2139,10 +2154,34 @@
status = "ok";
};
+&clock_cpu {
+ lmh_dcvs0: qcom,limits-dcvs@0 {
+ compatible = "qcom,msm-hw-limits";
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ lmh_dcvs1: qcom,limits-dcvs@1 {
+ compatible = "qcom,msm-hw-limits";
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ };
+};
+
&blsp2_uart1_hs {
status = "ok";
};
+&pm660l_gpios {
+ /* GPIO 7 for VOL_UP */
+ gpio@c600 {
+ status = "okay";
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,out-strength = <1>;
+ };
+};
+
&soc {
gpio_keys {
status = "okay";
@@ -2167,5 +2206,14 @@
linux,code = <0x2fe>;
debounce-interval = <15>;
};
+
+ vol_up {
+ label = "volume_up";
+ gpios = <&pm660l_gpios 7 0x1>;
+ linux,input-type = <1>;
+ linux,code = <115>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-audio.dtsi b/arch/arm/boot/dts/qcom/sdm660-audio.dtsi
index e1244a497201..0b216a16c7e8 100644
--- a/arch/arm/boot/dts/qcom/sdm660-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-audio.dtsi
@@ -94,6 +94,20 @@
qcom,max-frequency = <24000000>;
qcom,mem-base-addr = <0x100000>;
};
+
+ wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl_usbc_audio_en1 {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&wcd_usbc_analog_en1_active>;
+ pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
+ };
+
+ wcd_usbc_analog_en2n_gpio: msm_cdc_pinctrl_usbc_audio_en2 {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&wcd_usbc_analog_en2n_active>;
+ pinctrl-1 = <&wcd_usbc_analog_en2n_idle>;
+ };
};
};
@@ -102,6 +116,8 @@
status = "disabled";
compatible = "qcom,pmic-analog-codec";
reg = <0xf000 0x200>;
+ #address-cells = <2>;
+ #size-cells = <0>;
interrupt-parent = <&spmi_bus>;
interrupts = <0x3 0xf0 0x0 IRQ_TYPE_NONE>,
<0x3 0xf0 0x1 IRQ_TYPE_NONE>,
@@ -228,28 +244,24 @@
compatible = "qcom,wsa881x";
reg = <0x0 0x20170211>;
qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
- qcom,cache-always;
};
wsa881x_212_en: wsa881x_en@20170212 {
compatible = "qcom,wsa881x";
reg = <0x0 0x20170212>;
qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
- qcom,cache-always;
};
wsa881x_213_en: wsa881x_en@21170213 {
compatible = "qcom,wsa881x";
reg = <0x0 0x21170213>;
qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
- qcom,cache-always;
};
wsa881x_214_en: wsa881x_en@21170214 {
compatible = "qcom,wsa881x";
reg = <0x0 0x21170214>;
qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
- qcom,cache-always;
};
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-camera.dtsi b/arch/arm/boot/dts/qcom/sdm660-camera.dtsi
index 16127bfccf35..10afca1c56ed 100644
--- a/arch/arm/boot/dts/qcom/sdm660-camera.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-camera.dtsi
@@ -19,6 +19,7 @@
status = "ok";
bus-vectors = "suspend", "svs", "nominal", "turbo";
qcom,bus-votes = <0 150000000 320000000 320000000>;
+ qcom,gpu-limit = <700000000>;
};
qcom,csiphy@c824000 {
@@ -529,7 +530,7 @@
"camss_vfe_ahb_clk", "camss_vfe_vbif_ahb_clk",
"camss_vfe_vbif_axi_clk", "vfe_clk_src",
"camss_csi_vfe_clk";
- qcom,clock-rates = <0 0 0 0 0 0 0 0 0 0 0 256000000 0
+ qcom,clock-rates = <0 0 0 0 0 0 0 0 0 0 0 404000000 0
0 0 0 0 0 0 0 0 0 0 0 480000000 0
0 0 0 0 0 0 0 0 0 0 0 576000000 0>;
status = "ok";
@@ -609,7 +610,7 @@
"camss_vfe_ahb_clk", "camss_vfe_vbif_ahb_clk",
"camss_vfe_vbif_axi_clk", "vfe_clk_src",
"camss_csi_vfe_clk";
- qcom,clock-rates = <0 0 0 0 0 0 0 0 0 0 0 256000000 0
+ qcom,clock-rates = <0 0 0 0 0 0 0 0 0 0 0 404000000 0
0 0 0 0 0 0 0 0 0 0 0 480000000 0
0 0 0 0 0 0 0 0 0 0 0 576000000 0>;
status = "ok";
diff --git a/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi b/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi
index 1145bfa63cba..33303f1e2a74 100644
--- a/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-cdp.dtsi
@@ -24,11 +24,8 @@
&ufsphy1 {
vdda-phy-supply = <&pm660l_l1>;
vdda-pll-supply = <&pm660_l10>;
- vddp-ref-clk-supply = <&pm660_l1>;
vdda-phy-max-microamp = <51400>;
vdda-pll-max-microamp = <14200>;
- vddp-ref-clk-max-microamp = <100>;
- vddp-ref-clk-always-on;
status = "ok";
};
@@ -39,6 +36,9 @@
vccq2-supply = <&pm660_l8>;
vcc-max-microamp = <500000>;
vccq2-max-microamp = <600000>;
+ qcom,vddp-ref-clk-supply = <&pm660_l1>;
+ qcom,vddp-ref-clk-max-microamp = <100>;
+
status = "ok";
};
@@ -143,16 +143,12 @@
};
&dsi_dual_nt35597_video {
- qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
- qcom,mdss-dsi-bl-min-level = <1>;
- qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
&dsi_dual_nt35597_cmd {
- qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_wled";
- qcom,mdss-dsi-bl-min-level = <1>;
- qcom,mdss-dsi-bl-max-level = <4095>;
+ qcom,mdss-dsi-bl-pmic-control-type = "bl_ctrl_dcs";
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
qcom,partial-update-enabled = "single_roi";
qcom,panel-roi-alignment = <720 128 720 128 1440 128>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-common.dtsi b/arch/arm/boot/dts/qcom/sdm660-common.dtsi
index 05b7973f2457..dc57ee62a867 100644
--- a/arch/arm/boot/dts/qcom/sdm660-common.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-common.dtsi
@@ -194,6 +194,7 @@
tx-fifo-resize;
snps,nominal-elastic-buffer;
snps,disable-clk-gating;
+ snps,has-lpm-erratum;
snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>;
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi
index e5cf0b1534ec..a47a788874fa 100644
--- a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi
@@ -119,8 +119,8 @@
vdd-supply = <&gdsc_gpu_gx>;
/* CPU latency parameter */
- qcom,pm-qos-active-latency = <349>;
- qcom,pm-qos-wakeup-latency = <349>;
+ qcom,pm-qos-active-latency = <518>;
+ qcom,pm-qos-wakeup-latency = <518>;
/* Quirks */
qcom,gpu-quirk-dp2clockgating-disable;
diff --git a/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi b/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
index 2f71f2407891..9c3862ce6a1f 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mdss-panels.dtsi
@@ -133,10 +133,6 @@
23 18 07 08 04 03 04 a0];
qcom,esd-check-enabled;
qcom,mdss-dsi-panel-status-check-mode = "bta_check";
- qcom,mdss-dsi-min-refresh-rate = <53>;
- qcom,mdss-dsi-max-refresh-rate = <60>;
- qcom,mdss-dsi-pan-enable-dynamic-fps;
- qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
};
&dsi_dual_nt35597_truly_cmd {
@@ -180,6 +176,8 @@
qcom,mdss-dsi-max-refresh-rate = <60>;
qcom,mdss-dsi-pan-enable-dynamic-fps;
qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "bta_check";
};
&dsi_nt35597_truly_dsc_cmd {
@@ -221,6 +219,8 @@
qcom,mdss-dsi-max-refresh-rate = <60>;
qcom,mdss-dsi-pan-enable-dynamic-fps;
qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "bta_check";
};
&dsi_nt35695b_truly_fhd_cmd {
@@ -229,6 +229,8 @@
24 1e 08 09 05 03 04 a0
24 1e 08 09 05 03 04 a0
24 1a 08 09 05 03 04 a0];
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "bta_check";
};
&dsi_truly_1080_vid {
@@ -241,6 +243,8 @@
qcom,mdss-dsi-max-refresh-rate = <60>;
qcom,mdss-dsi-pan-enable-dynamic-fps;
qcom,mdss-dsi-pan-fps-update = "dfps_immediate_porch_mode_vfp";
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "bta_check";
};
&dsi_truly_1080_cmd {
@@ -249,4 +253,6 @@
23 1e 08 09 05 03 04 a0
23 1e 08 09 05 03 04 a0
23 1a 08 09 05 03 04 a0];
+ qcom,esd-check-enabled;
+ qcom,mdss-dsi-panel-status-check-mode = "bta_check";
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi b/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
index 7f82a00e9f19..6e32c1282d3e 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
@@ -62,7 +62,7 @@
qcom,max-bandwidth-per-pipe-kbps = <3100000>;
qcom,max-clk-rate = <412500000>;
qcom,mdss-default-ot-rd-limit = <32>;
- qcom,mdss-default-ot-wr-limit = <40>;
+ qcom,mdss-default-ot-wr-limit = <32>;
qcom,mdss-dram-channels = <2>;
/* Bandwidth limit settings */
@@ -195,6 +195,19 @@
qcom,mdss-prefill-post-scaler-buffer-pixels = <2560>;
qcom,mdss-prefill-pingpong-buffer-pixels = <5120>;
+ qcom,mdss-reg-bus {
+ /* Reg Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_reg";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>,
+ <1 590 0 160000>,
+ <1 590 0 320000>;
+ };
+
qcom,mdss-pp-offsets {
qcom,mdss-sspp-mdss-igc-lut-off = <0x2000>;
qcom,mdss-sspp-vig-pcc-off = <0x1b00>;
@@ -578,7 +591,7 @@
qcom,mdss-rot-xin-id = <14 15>;
qcom,mdss-default-ot-rd-limit = <32>;
- qcom,mdss-default-ot-wr-limit = <40>;
+ qcom,mdss-default-ot-wr-limit = <32>;
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
index 23514467ba73..941fe808188c 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mtp.dtsi
@@ -29,11 +29,8 @@
&ufsphy1 {
vdda-phy-supply = <&pm660l_l1>;
vdda-pll-supply = <&pm660_l10>;
- vddp-ref-clk-supply = <&pm660_l1>;
vdda-phy-max-microamp = <51400>;
vdda-pll-max-microamp = <14200>;
- vddp-ref-clk-max-microamp = <100>;
- vddp-ref-clk-always-on;
status = "ok";
};
@@ -44,6 +41,9 @@
vccq2-supply = <&pm660_l8>;
vcc-max-microamp = <500000>;
vccq2-max-microamp = <600000>;
+ vddp-ref-clk-supply = <&pm660_l1>;
+ vddp-ref-clk-max-microamp = <100>;
+
status = "ok";
};
@@ -251,6 +251,6 @@
qcom,parallel-charger;
qcom,float-voltage-mv = <4400>;
qcom,recharge-mv = <100>;
- qcom,parallel-en-pin-polarity = <0>;
+ qcom,parallel-en-pin-polarity = <1>;
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi
index 7ecab691dadf..c6e2f431a7c9 100644
--- a/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi
@@ -744,6 +744,66 @@
};
};
};
+ /* USB C analog configuration */
+ wcd_usbc_analog_en1 {
+ wcd_usbc_analog_en1_idle: wcd_usbc_ana_en1_idle {
+ mux {
+ pins = "gpio80";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio80";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+
+ wcd_usbc_analog_en1_active: wcd_usbc_ana_en1_active {
+ mux {
+ pins = "gpio80";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio80";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ wcd_usbc_analog_en2n {
+ wcd_usbc_analog_en2n_idle: wcd_usbc_ana_en2n_idle {
+ mux {
+ pins = "gpio77";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio77";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ wcd_usbc_analog_en2n_active: wcd_usbc_ana_en2n_active {
+ mux {
+ pins = "gpio77";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio77";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+ };
sdw_clk_pin {
sdw_clk_sleep: sdw_clk_sleep {
diff --git a/arch/arm/boot/dts/qcom/sdm660-pm.dtsi b/arch/arm/boot/dts/qcom/sdm660-pm.dtsi
index 6a9fc5bde361..1624975028c5 100644
--- a/arch/arm/boot/dts/qcom/sdm660-pm.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -62,20 +62,20 @@
reg = <0>;
label = "system-wfi";
qcom,psci-mode = <0x0>;
- qcom,latency-us = <100>;
- qcom,ss-power = <725>;
- qcom,energy-overhead = <85000>;
- qcom,time-overhead = <120>;
+ qcom,latency-us = <1654>;
+ qcom,ss-power = <219>;
+ qcom,energy-overhead = <98750>;
+ qcom,time-overhead = <2294>;
};
qcom,pm-cluster-level@1{ /* E3 */
reg = <1>;
label = "system-pc";
qcom,psci-mode = <0x3>;
- qcom,latency-us = <350>;
- qcom,ss-power = <530>;
- qcom,energy-overhead = <160000>;
- qcom,time-overhead = <550>;
+ qcom,latency-us = <4506>;
+ qcom,ss-power = <88>;
+ qcom,energy-overhead = <1228536>;
+ qcom,time-overhead = <15337>;
qcom,min-child-idx = <3>;
qcom,is-reset;
qcom,notify-rpm;
@@ -95,19 +95,19 @@
reg = <0>;
label = "pwr-l2-wfi";
qcom,psci-mode = <0x1>;
- qcom,latency-us = <40>;
- qcom,ss-power = <740>;
- qcom,energy-overhead = <65000>;
- qcom,time-overhead = <85>;
+ qcom,latency-us = <51>;
+ qcom,ss-power = <250>;
+ qcom,energy-overhead = <83452>;
+ qcom,time-overhead = <89>;
};
qcom,pm-cluster-level@1{ /* D2D */
reg = <1>;
label = "pwr-l2-dynret";
qcom,psci-mode = <0x2>;
- qcom,latency-us = <60>;
- qcom,ss-power = <700>;
- qcom,energy-overhead = <85000>;
- qcom,time-overhead = <85>;
+ qcom,latency-us = <421>;
+ qcom,ss-power = <235>;
+ qcom,energy-overhead = <219416>;
+ qcom,time-overhead = <781>;
qcom,min-child-idx = <1>;
};
@@ -115,10 +115,10 @@
reg = <2>;
label = "pwr-l2-ret";
qcom,psci-mode = <0x3>;
- qcom,latency-us = <100>;
- qcom,ss-power = <640>;
- qcom,energy-overhead = <135000>;
- qcom,time-overhead = <85>;
+ qcom,latency-us = <517>;
+ qcom,ss-power = <226>;
+ qcom,energy-overhead= <299405>;
+ qcom,time-overhead = <922>;
qcom,min-child-idx = <2>;
};
@@ -126,10 +126,10 @@
reg = <3>;
label = "pwr-l2-pc";
qcom,psci-mode = <0x4>;
- qcom,latency-us = <700>;
- qcom,ss-power = <450>;
- qcom,energy-overhead = <210000>;
- qcom,time-overhead = <11500>;
+ qcom,latency-us = <2118>;
+ qcom,ss-power = <210>;
+ qcom,energy-overhead = <833056>;
+ qcom,time-overhead = <2918>;
qcom,min-child-idx = <2>;
qcom,is-reset;
};
@@ -144,30 +144,30 @@
reg = <0>;
qcom,spm-cpu-mode = "wfi";
qcom,psci-cpu-mode = <0x1>;
- qcom,latency-us = <20>;
- qcom,ss-power = <750>;
- qcom,energy-overhead = <32000>;
- qcom,time-overhead = <60>;
+ qcom,latency-us = <42>;
+ qcom,ss-power = <250>;
+ qcom,energy-overhead = <30562>;
+ qcom,time-overhead = <91>;
};
qcom,pm-cpu-level@1 { /* C2D */
reg = <1>;
qcom,psci-cpu-mode = <2>;
qcom,spm-cpu-mode = "ret";
- qcom,latency-us = <40>;
- qcom,ss-power = <730>;
- qcom,energy-overhead = <85500>;
- qcom,time-overhead = <110>;
+ qcom,latency-us = <63>;
+ qcom,ss-power = <245>;
+ qcom,energy-overhead = <49239>;
+ qcom,time-overhead = <172>;
};
qcom,pm-cpu-level@2 { /* C3 */
reg = <2>;
qcom,spm-cpu-mode = "pc";
qcom,psci-cpu-mode = <0x3>;
- qcom,latency-us = <80>;
- qcom,ss-power = <700>;
- qcom,energy-overhead = <126480>;
- qcom,time-overhead = <160>;
+ qcom,latency-us = <376>;
+ qcom,ss-power = <237>;
+ qcom,energy-overhead = <181018>;
+ qcom,time-overhead = <666>;
qcom,is-reset;
};
};
@@ -187,20 +187,20 @@
reg = <0>;
label = "perf-l2-wfi";
qcom,psci-mode = <0x1>;
- qcom,latency-us = <40>;
- qcom,ss-power = <740>;
- qcom,energy-overhead = <70000>;
- qcom,time-overhead = <80>;
+ qcom,latency-us = <51>;
+ qcom,ss-power = <283>;
+ qcom,energy-overhead = <83083>;
+ qcom,time-overhead = <89>;
};
qcom,pm-cluster-level@1{ /* D2D */
reg = <1>;
label = "perf-l2-dynret";
qcom,psci-mode = <2>;
- qcom,latency-us = <60>;
- qcom,ss-power = <700>;
- qcom,energy-overhead = <85000>;
- qcom,time-overhead = <85>;
+ qcom,latency-us = <345>;
+ qcom,ss-power = <254>;
+ qcom,energy-overhead = <198349>;
+ qcom,time-overhead = <659>;
qcom,min-child-idx = <1>;
};
@@ -208,10 +208,10 @@
reg = <2>;
label = "perf-l2-ret";
qcom,psci-mode = <3>;
- qcom,latency-us = <100>;
- qcom,ss-power = <640>;
- qcom,energy-overhead = <135000>;
- qcom,time-overhead = <85>;
+ qcom,latency-us = <419>;
+ qcom,ss-power = <244>;
+ qcom,energy-overhead = <281921>;
+ qcom,time-overhead = <737>;
qcom,min-child-idx = <2>;
};
@@ -219,10 +219,10 @@
reg = <3>;
label = "perf-l2-pc";
qcom,psci-mode = <0x4>;
- qcom,latency-us = <800>;
- qcom,ss-power = <450>;
- qcom,energy-overhead = <240000>;
- qcom,time-overhead = <11500>;
+ qcom,latency-us = <1654>;
+ qcom,ss-power = <219>;
+ qcom,energy-overhead = <815573>;
+ qcom,time-overhead = <2294>;
qcom,min-child-idx = <2>;
qcom,is-reset;
};
@@ -237,30 +237,30 @@
reg = <0>;
qcom,spm-cpu-mode = "wfi";
qcom,psci-cpu-mode = <0x1>;
- qcom,latency-us = <25>;
- qcom,ss-power = <750>;
- qcom,energy-overhead = <37000>;
- qcom,time-overhead = <50>;
+ qcom,latency-us = <39>;
+ qcom,ss-power = <292>;
+ qcom,energy-overhead = <37558>;
+ qcom,time-overhead = <68>;
};
qcom,pm-cpu-level@1 { /* C2D */
reg = <1>;
qcom,psci-cpu-mode = <2>;
qcom,spm-cpu-mode = "ret";
- qcom,latency-us = <40>;
- qcom,ss-power = <730>;
- qcom,energy-overhead = <85500>;
- qcom,time-overhead = <110>;
+ qcom,latency-us = <60>;
+ qcom,ss-power = <275>;
+ qcom,energy-overhead = <70737>;
+ qcom,time-overhead = <181>;
};
qcom,pm-cpu-level@2 { /* C3 */
reg = <2>;
qcom,spm-cpu-mode = "pc";
qcom,psci-cpu-mode = <0x3>;
- qcom,latency-us = <80>;
- qcom,ss-power = <700>;
- qcom,energy-overhead = <136480>;
- qcom,time-overhead = <160>;
+ qcom,latency-us = <324>;
+ qcom,ss-power = <263>;
+ qcom,energy-overhead = <213213>;
+ qcom,time-overhead = <621>;
qcom,is-reset;
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
index 49b58c8be8d5..b2f6ac722dfc 100644
--- a/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
@@ -107,6 +107,9 @@
};
&soc {
+ qcom,msm-ssc-sensors {
+ compatible = "qcom,msm-ssc-sensors";
+ };
};
&pm660_gpios {
@@ -235,6 +238,18 @@
qcom,fg-cutoff-voltage = <3700>;
};
+&i2c_2 {
+ status = "ok";
+ smb1351-charger@1d {
+ compatible = "qcom,smb1351-charger";
+ reg = <0x1d>;
+ qcom,parallel-charger;
+ qcom,float-voltage-mv = <4400>;
+ qcom,recharge-mv = <100>;
+ qcom,parallel-en-pin-polarity = <0>;
+ };
+};
+
&tasha_snd {
qcom,model = "sdm660-tasha-skus-snd-card";
qcom,audio-routing =
@@ -245,10 +260,10 @@
"MIC BIAS2", "Headset Mic",
"DMIC0", "MIC BIAS1",
"MIC BIAS1", "Digital Mic0",
- "DMIC2", "MIC BIAS3",
- "MIC BIAS3", "Digital Mic2",
- "DMIC4", "MIC BIAS3",
- "MIC BIAS3", "Digital Mic4",
+ "DMIC3", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic3",
+ "DMIC5", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic5",
"SpkrLeft IN", "SPK1 OUT";
qcom,msm-mbhc-hphl-swh = <1>;
/delete-property/ qcom,us-euro-gpios;
diff --git a/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi b/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi
index 44796eeb7059..47db57b3dc0a 100644
--- a/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-regulator.dtsi
@@ -672,6 +672,9 @@
qcom,cpr-panic-reg-name-list =
"PWR_CPRH_STATUS", "APCLUS0_L2_SAW4_PMIC_STS";
+ qcom,cpr-enable;
+ qcom,cpr-hw-closed-loop;
+
thread@0 {
qcom,cpr-thread-id = <0>;
qcom,cpr-consecutive-up = <0>;
@@ -707,6 +710,33 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-ro-scaling-factor =
+ <3600 3600 3830 2430 2520 2700 1790
+ 1760 1970 1880 2110 2010 2510 4900
+ 4370 4780>,
+ <3600 3600 3830 2430 2520 2700 1790
+ 1760 1970 1880 2110 2010 2510 4900
+ 4370 4780>,
+ <3600 3600 3830 2430 2520 2700 1790
+ 1760 1970 1880 2110 2010 2510 4900
+ 4370 4780>,
+ <3600 3600 3830 2430 2520 2700 1790
+ 1760 1970 1880 2110 2010 2510 4900
+ 4370 4780>,
+ <3600 3600 3830 2430 2520 2700 1790
+ 1760 1970 1880 2110 2010 2510 4900
+ 4370 4780>;
+
+ qcom,cpr-open-loop-voltage-fuse-adjustment =
+ <13000 31000 27000 37000 21000>;
+
+ qcom,cpr-closed-loop-voltage-fuse-adjustment =
+ <0 0 0 0 8000>;
+
+ qcom,cpr-floor-to-ceiling-max-range =
+ <32000 32000 32000 40000 40000
+ 40000 40000 40000>;
};
};
};
@@ -747,6 +777,9 @@
qcom,cpr-panic-reg-name-list =
"PERF_CPRH_STATUS", "APCLUS1_L2_SAW4_PMIC_STS";
+ qcom,cpr-enable;
+ qcom,cpr-hw-closed-loop;
+
thread@0 {
qcom,cpr-thread-id = <0>;
qcom,cpr-consecutive-up = <0>;
@@ -788,6 +821,33 @@
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
qcom,cpr-scaled-open-loop-voltage-as-ceiling;
+
+ qcom,cpr-ro-scaling-factor =
+ <4040 4230 0000 2210 2560 2450 2230
+ 2220 2410 2300 2560 2470 1600 3120
+ 2620 2280>,
+ <4040 4230 0000 2210 2560 2450 2230
+ 2220 2410 2300 2560 2470 1600 3120
+ 2620 2280>,
+ <4040 4230 0000 2210 2560 2450 2230
+ 2220 2410 2300 2560 2470 1600 3120
+ 2620 2280>,
+ <4040 4230 0000 2210 2560 2450 2230
+ 2220 2410 2300 2560 2470 1600 3120
+ 2620 2280>,
+ <4040 4230 0000 2210 2560 2450 2230
+ 2220 2410 2300 2560 2470 1600 3120
+ 2620 2280>;
+
+ qcom,cpr-open-loop-voltage-fuse-adjustment =
+ <31000 39000 53000 40000 29000>;
+
+ qcom,cpr-closed-loop-voltage-fuse-adjustment =
+ <0 3000 19000 23000 28000>;
+
+ qcom,cpr-floor-to-ceiling-max-range =
+ <40000 40000 40000 40000
+ 40000 40000 40000>;
};
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-usbc-audio-mtp.dts b/arch/arm/boot/dts/qcom/sdm660-usbc-audio-mtp.dts
new file mode 100644
index 000000000000..dff55d8e9cf8
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdm660-usbc-audio-mtp.dts
@@ -0,0 +1,31 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm660.dtsi"
+#include "sdm660-mtp.dtsi"
+#include "sdm660-external-codec.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM 660 PM660 + PM660L, USBC Audio MTP";
+ compatible = "qcom,sdm660-mtp", "qcom,sdm660", "qcom,mtp";
+ qcom,board-id = <8 2>;
+ qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+ <0x0001001b 0x0201011a 0x0 0x0>;
+};
+
+&tavil_snd {
+ qcom,msm-mbhc-moist-cfg = <0>, <0>, <3>;
+ qcom,msm-mbhc-usbc-audio-supported = <1>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660-vidc.dtsi b/arch/arm/boot/dts/qcom/sdm660-vidc.dtsi
index 82e6a5be4d10..b1ca93b9f613 100644
--- a/arch/arm/boot/dts/qcom/sdm660-vidc.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-vidc.dtsi
@@ -28,6 +28,7 @@
qcom,firmware-name = "venus";
qcom,never-unload-fw;
qcom,sw-power-collapse;
+ qcom,max-secure-instances = <5>;
qcom,reg-presets =
<0x80010 0x001f001f>,
<0x80018 0x00000156>,
@@ -43,7 +44,7 @@
qcom,dcvs-tbl =
/* Dec UHD@30 All decoder - NOM to SVS+ */
- <897600 734400 979200 0x3f00000c>,
+ <897600 783360 979200 0x3f00000c>,
/* Dec DCI@24 HEVC - NOM to SVS+ */
<816000 734400 829440 0x0c000000>,
diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi
index 8633d89821ec..2d44c69ea827 100644
--- a/arch/arm/boot/dts/qcom/sdm660.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660.dtsi
@@ -152,7 +152,7 @@
qcom,limits-info = <&mitigation_profile1>;
qcom,lmh-dcvs = <&lmh_dcvs1>;
qcom,ea = <&ea4>;
- efficiency = <1536>;
+ efficiency = <1638>;
next-level-cache = <&L2_1>;
L2_1: l2-cache {
compatible = "arm,arch-cache";
@@ -179,7 +179,7 @@
qcom,limits-info = <&mitigation_profile2>;
qcom,lmh-dcvs = <&lmh_dcvs1>;
qcom,ea = <&ea5>;
- efficiency = <1536>;
+ efficiency = <1638>;
next-level-cache = <&L2_1>;
L1_I_101: l1-icache {
compatible = "arm,arch-cache";
@@ -202,7 +202,7 @@
qcom,limits-info = <&mitigation_profile3>;
qcom,lmh-dcvs = <&lmh_dcvs1>;
qcom,ea = <&ea6>;
- efficiency = <1536>;
+ efficiency = <1638>;
next-level-cache = <&L2_1>;
L1_I_102: l1-icache {
compatible = "arm,arch-cache";
@@ -225,7 +225,7 @@
qcom,limits-info = <&mitigation_profile4>;
qcom,lmh-dcvs = <&lmh_dcvs1>;
qcom,ea = <&ea7>;
- efficiency = <1536>;
+ efficiency = <1638>;
next-level-cache = <&L2_1>;
L1_I_103: l1-icache {
compatible = "arm,arch-cache";
@@ -320,10 +320,16 @@
reg = <0x0 0x92a00000 0x0 0x1e00000>;
};
- cdsp_fw_mem: cdsp_fw_region@94800000 {
+ pil_mba_mem: pil_mba_region@94800000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0x0 0x94800000 0x0 0x600000>;
+ reg = <0x0 0x94800000 0x0 0x200000>;
+ };
+
+ cdsp_fw_mem: cdsp_fw_region@94a00000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0x0 0x94a00000 0x0 0x600000>;
};
venus_fw_mem: venus_fw_region {
@@ -619,7 +625,7 @@
interrupts = <0 184 0>, <0 430 0>;
interrupt-names = "tsens-upper-lower", "tsens-critical";
qcom,client-id = <0 1 2 3 4 5 6 7 8 9 10 11 12 13>;
- qcom,sensor-id = <0 10 11 4 5 6 7 8 13 1 3 12 9 2>;
+ qcom,sensor-id = <0 10 11 4 5 6 7 8 13 2 3 12 9 1>;
qcom,sensors = <14>;
qcom,slope = <3200 3200 3200 3200 3200 3200 3200 3200
3200 3200 3200 3200 3200 3200>;
@@ -1166,7 +1172,7 @@
qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
qcom,target-dev = <&memlat_cpu0>;
qcom,core-dev-table =
- < 633600 762 >,
+ < 902400 762 >,
< 1401600 2086 >,
< 1881600 3879 >;
};
@@ -1966,6 +1972,10 @@
/* GPIO output to mss */
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
status = "ok";
+ qcom,mba-mem@0 {
+ compatible = "qcom,pil-mba-mem";
+ memory-region = <&pil_mba_mem>;
+ };
};
qcom,msm-rtb {
@@ -1991,9 +2001,9 @@
reg = <0x10 8>;
};
- dload_type@18 {
+ dload_type@1c {
compatible = "qcom,msm-imem-dload-type";
- reg = <0x18 4>;
+ reg = <0x1c 4>;
};
restart_reason@65c {
@@ -2219,6 +2229,7 @@
<0 0>;
lanes-per-direction = <1>;
+ spm-level = <5>;
qcom,msm-bus,name = "ufs1";
qcom,msm-bus,num-cases = <12>;
diff --git a/arch/arm/configs/msmcortex_defconfig b/arch/arm/configs/msmcortex_defconfig
index 2cfe647855c3..1658cc992ee6 100644
--- a/arch/arm/configs/msmcortex_defconfig
+++ b/arch/arm/configs/msmcortex_defconfig
@@ -217,6 +217,7 @@ CONFIG_NFC_NQ=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=40
CONFIG_ZRAM=y
diff --git a/arch/arm/configs/sdm660-perf_defconfig b/arch/arm/configs/sdm660-perf_defconfig
index 7df369de029a..515aab3ccefb 100644
--- a/arch/arm/configs/sdm660-perf_defconfig
+++ b/arch/arm/configs/sdm660-perf_defconfig
@@ -62,6 +62,7 @@ CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_PROCESS_RECLAIM=y
CONFIG_SECCOMP=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
@@ -509,6 +510,7 @@ CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_COMMON_LOG=y
CONFIG_MSM_SMEM=y
CONFIG_QPNP_HAPTIC=y
+CONFIG_QPNP_PBS=y
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
CONFIG_MSM_GLINK=y
@@ -619,6 +621,8 @@ CONFIG_CORESIGHT_EVENT=y
CONFIG_CORESIGHT_QCOM_REPLICATOR=y
CONFIG_CORESIGHT_STM=y
CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_CTI_SAVE_DISABLE=y
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
CONFIG_CORESIGHT_QPDI=y
@@ -637,4 +641,5 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_XZ_DEC=y
diff --git a/arch/arm/configs/sdm660_defconfig b/arch/arm/configs/sdm660_defconfig
index 3a757a7dc7b5..574f3808704b 100644
--- a/arch/arm/configs/sdm660_defconfig
+++ b/arch/arm/configs/sdm660_defconfig
@@ -61,6 +61,7 @@ CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_PROCESS_RECLAIM=y
CONFIG_SECCOMP=y
CONFIG_BUILD_ARM_APPENDED_DTB_IMAGE=y
@@ -509,6 +510,7 @@ CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_COMMON_LOG=y
CONFIG_MSM_SMEM=y
CONFIG_QPNP_HAPTIC=y
+CONFIG_QPNP_PBS=y
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
CONFIG_MSM_GLINK=y
@@ -679,4 +681,5 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_XZ_DEC=y
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 6236b3e1297c..a96db6bdaf4b 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -122,6 +122,9 @@ config KERNEL_TEXT_RDONLY
If unsure, say N.
+config ARM64_STRICT_BREAK_BEFORE_MAKE
+ bool "Enforce strict break-before-make on page table updates "
+
source "drivers/hwtracing/coresight/Kconfig"
endmenu
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index f64ca9a07c6e..5c07a8b80354 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -287,6 +287,8 @@ CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
CONFIG_SECURE_TOUCH=y
+CONFIG_TOUCHSCREEN_ST=y
+CONFIG_TOUCHSCREEN_ST_I2C=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
@@ -628,6 +630,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index dd1c4ffe3dc5..f5b163eaec0b 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -289,6 +289,8 @@ CONFIG_INPUT_TOUCHSCREEN=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_CORE_v21=y
CONFIG_TOUCHSCREEN_SYNAPTICS_DSX_FW_UPDATE_v21=y
CONFIG_SECURE_TOUCH=y
+CONFIG_TOUCHSCREEN_ST=y
+CONFIG_TOUCHSCREEN_ST_I2C=y
CONFIG_INPUT_MISC=y
CONFIG_INPUT_HBTP_INPUT=y
CONFIG_INPUT_QPNP_POWER_ON=y
@@ -548,7 +550,6 @@ CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_IRQ_HELPER=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
CONFIG_ICNSS=y
-CONFIG_ICNSS_DEBUG=y
CONFIG_MSM_GLADIATOR_ERP_V2=y
CONFIG_PANIC_ON_GLADIATOR_ERROR_V2=y
CONFIG_MSM_GLADIATOR_HANG_DETECT=y
@@ -667,6 +668,7 @@ CONFIG_PID_IN_CONTEXTIDR=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_DEBUG_RODATA=y
CONFIG_FREE_PAGES_RDONLY=y
+CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
@@ -695,6 +697,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/msmcortex_mediabox_defconfig b/arch/arm64/configs/msmcortex_mediabox_defconfig
index 967edf184f1b..27055fc4b9d5 100644
--- a/arch/arm64/configs/msmcortex_mediabox_defconfig
+++ b/arch/arm64/configs/msmcortex_mediabox_defconfig
@@ -349,7 +349,6 @@ CONFIG_MFD_SPMI_PMIC=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_WCD9335_CODEC=y
CONFIG_WCD934X_CODEC=y
-CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_RPM_SMD=y
CONFIG_REGULATOR_QPNP=y
@@ -408,14 +407,8 @@ CONFIG_DVB_MPQ_DEMUX=m
CONFIG_DVB_MPQ_MEDIA_BOX_DEMUX=y
CONFIG_TSPP=m
CONFIG_QCOM_KGSL=y
-CONFIG_FB=y
-CONFIG_FB_VIRTUAL=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
-CONFIG_FB_MSM_MDSS_DP_PANEL=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_DRM=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
@@ -497,6 +490,7 @@ CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_TIMED_GPIO=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_QPNP_REVID=y
@@ -545,7 +539,6 @@ CONFIG_MSM_SYSMON_GLINK_COMM=y
CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SPM=y
-CONFIG_QCOM_SCM=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_IRQ_HELPER=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
@@ -608,7 +601,6 @@ CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_EFIVAR_FS=y
CONFIG_ECRYPT_FS=y
diff --git a/arch/arm64/configs/sdm660-perf_defconfig b/arch/arm64/configs/sdm660-perf_defconfig
index d00772c12626..b0bdabf58d62 100644
--- a/arch/arm64/configs/sdm660-perf_defconfig
+++ b/arch/arm64/configs/sdm660-perf_defconfig
@@ -527,6 +527,7 @@ CONFIG_IOMMU_DEBUG_TRACKING=y
CONFIG_IOMMU_TESTS=y
CONFIG_MSM_SMEM=y
CONFIG_QPNP_HAPTIC=y
+CONFIG_QPNP_PBS=y
CONFIG_MSM_SMD=y
CONFIG_MSM_GLINK=y
CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
@@ -627,6 +628,8 @@ CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
CONFIG_CORESIGHT_QCOM_REPLICATOR=y
CONFIG_CORESIGHT_STM=y
CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_CTI_SAVE_DISABLE=y
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
CONFIG_CORESIGHT_QPDI=y
@@ -645,6 +648,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/sdm660_defconfig b/arch/arm64/configs/sdm660_defconfig
index af8902054b8a..229de3920276 100644
--- a/arch/arm64/configs/sdm660_defconfig
+++ b/arch/arm64/configs/sdm660_defconfig
@@ -59,6 +59,7 @@ CONFIG_CLEANCACHE=y
CONFIG_CMA=y
CONFIG_CMA_DEBUGFS=y
CONFIG_ZSMALLOC=y
+CONFIG_BALANCE_ANON_FILE_RECLAIM=y
CONFIG_FORCE_ALLOC_FROM_DMA_ZONE=y
CONFIG_PROCESS_RECLAIM=y
CONFIG_SECCOMP=y
@@ -232,6 +233,7 @@ CONFIG_NFC_NQ=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
@@ -539,6 +541,7 @@ CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_COMMON_LOG=y
CONFIG_MSM_SMEM=y
CONFIG_QPNP_HAPTIC=y
+CONFIG_QPNP_PBS=y
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
CONFIG_MSM_GLINK=y
@@ -713,6 +716,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 9a09ccf7122d..72b1c3fa1576 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -19,6 +19,7 @@
#include <asm/bug.h>
#include <asm/proc-fns.h>
+#include <asm/bug.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
@@ -229,6 +230,34 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
static inline void set_pte(pte_t *ptep, pte_t pte)
{
+#ifdef CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE
+ pteval_t old = pte_val(*ptep);
+ pteval_t new = pte_val(pte);
+
+ /* Only problematic if valid -> valid */
+ if (!(old & new & PTE_VALID))
+ goto pte_ok;
+
+ /* Changing attributes should go via an invalid entry */
+ if (WARN_ON((old & PTE_ATTRINDX_MASK) != (new & PTE_ATTRINDX_MASK)))
+ goto pte_bad;
+
+ /* Change of OA is only an issue if one mapping is writable */
+ if (!(old & new & PTE_RDONLY) &&
+ WARN_ON(pte_pfn(*ptep) != pte_pfn(pte)))
+ goto pte_bad;
+
+ goto pte_ok;
+
+pte_bad:
+ *ptep = __pte(0);
+ dsb(ishst);
+ asm("tlbi vmalle1is");
+ dsb(ish);
+ isb();
+pte_ok:
+#endif
+
*ptep = pte;
/*
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 85aea381fbf6..cb3eec8e8e50 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -69,6 +69,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->fp = *(unsigned long *)(fp);
frame->pc = *(unsigned long *)(fp + 8);
+ kasan_enable_current();
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk && tsk->ret_stack &&
(frame->pc == (unsigned long)return_to_handler)) {
@@ -112,8 +114,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
}
}
- kasan_enable_current();
-
return 0;
}
diff --git a/certs/Makefile b/certs/Makefile
index 28ac694dd11a..f30d601f410c 100644
--- a/certs/Makefile
+++ b/certs/Makefile
@@ -17,6 +17,10 @@ AFLAGS_system_certificates.o := -I$(srctree)
quiet_cmd_extract_certs = EXTRACT_CERTS $(patsubst "%",%,$(2))
cmd_extract_certs = scripts/extract-cert $(2) $@ || ( rm $@; exit 1)
+ifeq ($(CONFIG_SYSTEM_TRUSTED_KEYS),"verity.x509.pem")
+SYSTEM_TRUSTED_KEYS_SRCPREFIX := $(srctree)/certs/
+endif
+
targets += x509_certificate_list
$(obj)/x509_certificate_list: scripts/extract-cert $(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(SYSTEM_TRUSTED_KEYS_FILENAME) FORCE
$(call if_changed,extract_certs,$(SYSTEM_TRUSTED_KEYS_SRCPREFIX)$(CONFIG_SYSTEM_TRUSTED_KEYS))
diff --git a/certs/verity.x509.pem b/certs/verity.x509.pem
new file mode 100644
index 000000000000..86399c3c1dd7
--- /dev/null
+++ b/certs/verity.x509.pem
@@ -0,0 +1,24 @@
+-----BEGIN CERTIFICATE-----
+MIID/TCCAuWgAwIBAgIJAJcPmDkJqolJMA0GCSqGSIb3DQEBBQUAMIGUMQswCQYD
+VQQGEwJVUzETMBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4g
+VmlldzEQMA4GA1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UE
+AwwHQW5kcm9pZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTAe
+Fw0xNDExMDYxOTA3NDBaFw00MjAzMjQxOTA3NDBaMIGUMQswCQYDVQQGEwJVUzET
+MBEGA1UECAwKQ2FsaWZvcm5pYTEWMBQGA1UEBwwNTW91bnRhaW4gVmlldzEQMA4G
+A1UECgwHQW5kcm9pZDEQMA4GA1UECwwHQW5kcm9pZDEQMA4GA1UEAwwHQW5kcm9p
+ZDEiMCAGCSqGSIb3DQEJARYTYW5kcm9pZEBhbmRyb2lkLmNvbTCCASIwDQYJKoZI
+hvcNAQEBBQADggEPADCCAQoCggEBAOjreE0vTVSRenuzO9vnaWfk0eQzYab0gqpi
+6xAzi6dmD+ugoEKJmbPiuE5Dwf21isZ9uhUUu0dQM46dK4ocKxMRrcnmGxydFn6o
+fs3ODJMXOkv2gKXL/FdbEPdDbxzdu8z3yk+W67udM/fW7WbaQ3DO0knu+izKak/3
+T41c5uoXmQ81UNtAzRGzGchNVXMmWuTGOkg6U+0I2Td7K8yvUMWhAWPPpKLtVH9r
+AL5TzjYNR92izdKcz3AjRsI3CTjtpiVABGeX0TcjRSuZB7K9EK56HV+OFNS6I1NP
+jdD7FIShyGlqqZdUOkAUZYanbpgeT5N7QL6uuqcGpoTOkalu6kkCAwEAAaNQME4w
+HQYDVR0OBBYEFH5DM/m7oArf4O3peeKO0ZIEkrQPMB8GA1UdIwQYMBaAFH5DM/m7
+oArf4O3peeKO0ZIEkrQPMAwGA1UdEwQFMAMBAf8wDQYJKoZIhvcNAQEFBQADggEB
+AHO3NSvDE5jFvMehGGtS8BnFYdFKRIglDMc4niWSzhzOVYRH4WajxdtBWc5fx0ix
+NF/+hVKVhP6AIOQa+++sk+HIi7RvioPPbhjcsVlZe7cUEGrLSSveGouQyc+j0+m6
+JF84kszIl5GGNMTnx0XRPO+g8t6h5LWfnVydgZfpGRRg+WHewk1U2HlvTjIceb0N
+dcoJ8WKJAFWdcuE7VIm4w+vF/DYX/A2Oyzr2+QRhmYSv1cusgAeC1tvH4ap+J1Lg
+UnOu5Kh/FqPLLSwNVQp4Bu7b9QFfqK8Moj84bj88NqRGZgDyqzuTrFxn6FW7dmyA
+yttuAJAEAymk1mipd9+zp38=
+-----END CERTIFICATE-----
diff --git a/drivers/base/dma-removed.c b/drivers/base/dma-removed.c
index 87c743995a50..5fa3c6bdeea0 100644
--- a/drivers/base/dma-removed.c
+++ b/drivers/base/dma-removed.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
* Copyright (C) 2000-2004 Russell King
*
* This program is free software; you can redistribute it and/or modify
@@ -296,6 +296,7 @@ void removed_free(struct device *dev, size_t size, void *cpu_addr,
attrs);
struct removed_region *dma_mem = dev->removed_mem;
+ size = PAGE_ALIGN(size);
if (!no_kernel_mapping)
iounmap(cpu_addr);
mutex_lock(&dma_mem->lock);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 87a48268b663..1c6e4da01e69 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -307,8 +307,7 @@ static const char * const fw_path[] = {
"/lib/firmware/updates/" UTS_RELEASE,
"/lib/firmware/updates",
"/lib/firmware/" UTS_RELEASE,
- "/lib/firmware",
- "/firmware/image"
+ "/lib/firmware"
};
/*
diff --git a/drivers/base/regmap/Kconfig b/drivers/base/regmap/Kconfig
index 5b83a06ee1a9..45fc564bf949 100644
--- a/drivers/base/regmap/Kconfig
+++ b/drivers/base/regmap/Kconfig
@@ -36,3 +36,8 @@ config REGMAP_SWR
config REGMAP_ALLOW_WRITE_DEBUGFS
depends on REGMAP && DEBUG_FS
bool "Allow REGMAP debugfs write"
+ default n
+ help
+ Say 'y' here to allow the regmap debugfs write. Regmap debugfs write
+ could be risky when accessing some essential hardwares, so it is not
+ recommended to enable this option on any production device.
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 72d088b1863c..3c59417b485a 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -521,7 +521,7 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
goto out_error;
}
- meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
+ meta->mem_pool = zs_create_pool(pool_name);
if (!meta->mem_pool) {
pr_err("Error creating memory pool\n");
goto out_error;
@@ -725,7 +725,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
src = uncmem;
}
- handle = zs_malloc(meta->mem_pool, clen);
+ handle = zs_malloc(meta->mem_pool, clen, GFP_NOIO | __GFP_HIGHMEM |
+ __GFP_MOVABLE);
if (!handle) {
if (printk_timed_ratelimit(&zram_rs_time,
ALLOC_ERROR_LOG_RATE_MS))
diff --git a/drivers/bluetooth/btfm_slim.c b/drivers/bluetooth/btfm_slim.c
index 37cc6284cf97..969f755f5dc4 100644
--- a/drivers/bluetooth/btfm_slim.c
+++ b/drivers/bluetooth/btfm_slim.c
@@ -127,7 +127,7 @@ int btfm_slim_enable_ch(struct btfmslim *btfmslim, struct btfmslim_ch *ch,
if (!btfmslim || !ch)
return -EINVAL;
- BTFMSLIM_DBG("port:%d", ch->port);
+ BTFMSLIM_DBG("port: %d ch: %d", ch->port, ch->ch);
/* Define the channel with below parameters */
prop.prot = SLIM_AUTO_ISO;
diff --git a/drivers/bluetooth/btfm_slim.h b/drivers/bluetooth/btfm_slim.h
index dbb4c563d802..5d105fba2193 100644
--- a/drivers/bluetooth/btfm_slim.h
+++ b/drivers/bluetooth/btfm_slim.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -13,7 +13,7 @@
#define BTFM_SLIM_H
#include <linux/slimbus/slimbus.h>
-#define BTFMSLIM_DBG(fmt, arg...) pr_debug("%s: " fmt "\n", __func__, ## arg)
+#define BTFMSLIM_DBG(fmt, arg...) pr_debug(fmt "\n", ## arg)
#define BTFMSLIM_INFO(fmt, arg...) pr_info("%s: " fmt "\n", __func__, ## arg)
#define BTFMSLIM_ERR(fmt, arg...) pr_err("%s: " fmt "\n", __func__, ## arg)
diff --git a/drivers/bluetooth/btfm_slim_codec.c b/drivers/bluetooth/btfm_slim_codec.c
index d7d24ff07801..061177173ab8 100644
--- a/drivers/bluetooth/btfm_slim_codec.c
+++ b/drivers/bluetooth/btfm_slim_codec.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -54,8 +54,8 @@ static int btfm_slim_dai_startup(struct snd_pcm_substream *substream,
int ret;
struct btfmslim *btfmslim = dai->dev->platform_data;
- BTFMSLIM_DBG("substream = %s stream = %d",
- substream->name, substream->stream);
+ BTFMSLIM_DBG("substream = %s stream = %d dai->name = %s",
+ substream->name, substream->stream, dai->name);
ret = btfm_slim_hw_init(btfmslim);
return ret;
}
@@ -63,33 +63,12 @@ static int btfm_slim_dai_startup(struct snd_pcm_substream *substream,
static void btfm_slim_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct btfmslim *btfmslim = dai->dev->platform_data;
-
- BTFMSLIM_DBG("substream = %s stream = %d",
- substream->name, substream->stream);
- btfm_slim_hw_deinit(btfmslim);
-}
-
-static int btfm_slim_dai_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- BTFMSLIM_DBG("dai_name = %s DAI-ID %x rate %d num_ch %d",
- dai->name, dai->id, params_rate(params),
- params_channels(params));
-
- return 0;
-}
-
-int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- int i, ret = -EINVAL;
+ int i;
struct btfmslim *btfmslim = dai->dev->platform_data;
struct btfmslim_ch *ch;
uint8_t rxport, grp = false, nchan = 1;
- BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name,
+ BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
dai->id, dai->rate);
switch (dai->id) {
@@ -110,7 +89,7 @@ int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
case BTFM_SLIM_NUM_CODEC_DAIS:
default:
BTFMSLIM_ERR("dai->id is invalid:%d", dai->id);
- return ret;
+ return;
}
/* Search for dai->id matched port handler */
@@ -122,14 +101,25 @@ int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
if ((ch->port == BTFM_SLIM_PGD_PORT_LAST) ||
(ch->id == BTFM_SLIM_NUM_CODEC_DAIS)) {
BTFMSLIM_ERR("ch is invalid!!");
- return ret;
+ return;
}
- ret = btfm_slim_enable_ch(btfmslim, ch, rxport, dai->rate, grp, nchan);
- return ret;
+ btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
+ btfm_slim_hw_deinit(btfmslim);
+}
+
+static int btfm_slim_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ BTFMSLIM_DBG("dai->name = %s DAI-ID %x rate %d num_ch %d",
+ dai->name, dai->id, params_rate(params),
+ params_channels(params));
+
+ return 0;
}
-int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
+int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
int i, ret = -EINVAL;
@@ -137,7 +127,7 @@ int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
struct btfmslim_ch *ch;
uint8_t rxport, grp = false, nchan = 1;
- BTFMSLIM_DBG("dai->name:%s, dai->id: %d, dai->rate: %d", dai->name,
+ BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
dai->id, dai->rate);
switch (dai->id) {
@@ -172,7 +162,8 @@ int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
BTFMSLIM_ERR("ch is invalid!!");
return ret;
}
- ret = btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
+
+ ret = btfm_slim_enable_ch(btfmslim, ch, rxport, dai->rate, grp, nchan);
return ret;
}
@@ -315,7 +306,6 @@ static struct snd_soc_dai_ops btfmslim_dai_ops = {
.shutdown = btfm_slim_dai_shutdown,
.hw_params = btfm_slim_dai_hw_params,
.prepare = btfm_slim_dai_prepare,
- .hw_free = btfm_slim_dai_hw_free,
.set_channel_map = btfm_slim_dai_set_channel_map,
.get_channel_map = btfm_slim_dai_get_channel_map,
};
@@ -384,7 +374,7 @@ static struct snd_soc_dai_driver btfmslim_dai[] = {
static struct snd_soc_codec_driver btfmslim_codec = {
.probe = btfm_slim_codec_probe,
.remove = btfm_slim_codec_remove,
- .read = btfm_slim_codec_read,
+ .read = btfm_slim_codec_read,
.write = btfm_slim_codec_write,
};
diff --git a/drivers/bluetooth/btfm_slim_wcn3990.c b/drivers/bluetooth/btfm_slim_wcn3990.c
index 7d7bd2441c6e..72e28da4bd3b 100644
--- a/drivers/bluetooth/btfm_slim_wcn3990.c
+++ b/drivers/bluetooth/btfm_slim_wcn3990.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -76,7 +76,7 @@ int btfm_slim_chrk_enable_port(struct btfmslim *btfmslim, uint8_t port_num,
uint8_t reg_val = 0;
uint16_t reg;
- BTFMSLIM_DBG("enable(%d)", enable);
+ BTFMSLIM_DBG("port(%d) enable(%d)", port_num, enable);
if (rxport) {
/* Port enable */
reg = CHRK_SB_PGD_PORT_RX_CFGN(port_num - 0x10);
diff --git a/drivers/char/diag/diag_dci.c b/drivers/char/diag/diag_dci.c
index 2c7662a24cd1..4748012a37cd 100644
--- a/drivers/char/diag/diag_dci.c
+++ b/drivers/char/diag/diag_dci.c
@@ -133,6 +133,35 @@ void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
void diag_dci_record_traffic(int read_bytes, uint8_t ch_type,
uint8_t peripheral, uint8_t proc) { }
#endif
+
+static int check_peripheral_dci_support(int peripheral_id, int dci_proc_id)
+{
+ int dci_peripheral_list = 0;
+
+ if (dci_proc_id < 0 || dci_proc_id >= NUM_DCI_PROC) {
+ pr_err("diag:In %s,not a supported DCI proc id\n", __func__);
+ return 0;
+ }
+ if (peripheral_id < 0 || peripheral_id >= NUM_PERIPHERALS) {
+ pr_err("diag:In %s,not a valid peripheral id\n", __func__);
+ return 0;
+ }
+ dci_peripheral_list = dci_ops_tbl[dci_proc_id].peripheral_status;
+
+ if (dci_peripheral_list <= 0 || dci_peripheral_list > DIAG_CON_ALL) {
+ pr_err("diag:In %s,not a valid dci peripheral mask\n",
+ __func__);
+ return 0;
+ }
+ /* Remove APSS bit mask information */
+ dci_peripheral_list = dci_peripheral_list >> 1;
+
+ if ((1 << peripheral_id) & (dci_peripheral_list))
+ return 1;
+ else
+ return 0;
+}
+
static void create_dci_log_mask_tbl(unsigned char *mask, uint8_t dirty)
{
unsigned char *temp = mask;
@@ -1433,7 +1462,7 @@ void diag_dci_channel_open_work(struct work_struct *work)
void diag_dci_notify_client(int peripheral_mask, int data, int proc)
{
- int stat;
+ int stat = 0;
struct siginfo info;
struct list_head *start, *temp;
struct diag_dci_client_tbl *entry = NULL;
@@ -2372,10 +2401,12 @@ int diag_send_dci_event_mask(int token)
* is down. It may also mean that the peripheral doesn't
* support DCI.
*/
- err = diag_dci_write_proc(i, DIAG_CNTL_TYPE, buf,
- header_size + DCI_EVENT_MASK_SIZE);
- if (err != DIAG_DCI_NO_ERROR)
- ret = DIAG_DCI_SEND_DATA_FAIL;
+ if (check_peripheral_dci_support(i, DCI_LOCAL_PROC)) {
+ err = diag_dci_write_proc(i, DIAG_CNTL_TYPE, buf,
+ header_size + DCI_EVENT_MASK_SIZE);
+ if (err != DIAG_DCI_NO_ERROR)
+ ret = DIAG_DCI_SEND_DATA_FAIL;
+ }
}
mutex_unlock(&event_mask.lock);
@@ -2557,11 +2588,13 @@ int diag_send_dci_log_mask(int token)
}
write_len = dci_fill_log_mask(buf, log_mask_ptr);
for (j = 0; j < NUM_PERIPHERALS && write_len; j++) {
- err = diag_dci_write_proc(j, DIAG_CNTL_TYPE, buf,
- write_len);
- if (err != DIAG_DCI_NO_ERROR) {
- updated = 0;
- ret = DIAG_DCI_SEND_DATA_FAIL;
+ if (check_peripheral_dci_support(j, DCI_LOCAL_PROC)) {
+ err = diag_dci_write_proc(j, DIAG_CNTL_TYPE,
+ buf, write_len);
+ if (err != DIAG_DCI_NO_ERROR) {
+ updated = 0;
+ ret = DIAG_DCI_SEND_DATA_FAIL;
+ }
}
}
if (updated)
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index facdb0f40e44..335064352789 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -2606,7 +2606,8 @@ static int diag_user_process_raw_data(const char __user *buf, int len)
info = diag_md_session_get_pid(current->tgid);
ret = diag_process_apps_pkt(user_space_data, len, info);
if (ret == 1)
- diag_send_error_rsp((void *)(user_space_data), len);
+ diag_send_error_rsp((void *)(user_space_data), len,
+ info);
}
fail:
diagmem_free(driver, user_space_data, mempool);
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index a7069bc0edf3..99a16dd47cd4 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -239,10 +239,11 @@ void chk_logging_wakeup(void)
}
}
-static void pack_rsp_and_send(unsigned char *buf, int len)
+static void pack_rsp_and_send(unsigned char *buf, int len,
+ struct diag_md_session_t *info)
{
int err;
- int retry_count = 0;
+ int retry_count = 0, i, rsp_ctxt;
uint32_t write_len = 0;
unsigned long flags;
unsigned char *rsp_ptr = driver->encoded_rsp_buf;
@@ -257,6 +258,15 @@ static void pack_rsp_and_send(unsigned char *buf, int len)
return;
}
+ if (info && info->peripheral_mask) {
+ for (i = 0; i <= NUM_PERIPHERALS; i++) {
+ if (info->peripheral_mask & (1 << i))
+ break;
+ }
+ rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1);
+ } else
+ rsp_ctxt = driver->rsp_buf_ctxt;
+
/*
* Keep trying till we get the buffer back. It should probably
* take one or two iterations. When this loops till UINT_MAX, it
@@ -298,8 +308,7 @@ static void pack_rsp_and_send(unsigned char *buf, int len)
*(uint8_t *)(rsp_ptr + write_len) = CONTROL_CHAR;
write_len += sizeof(uint8_t);
- err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, write_len,
- driver->rsp_buf_ctxt);
+ err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, write_len, rsp_ctxt);
if (err) {
pr_err("diag: In %s, unable to write to mux, err: %d\n",
__func__, err);
@@ -309,12 +318,13 @@ static void pack_rsp_and_send(unsigned char *buf, int len)
}
}
-static void encode_rsp_and_send(unsigned char *buf, int len)
+static void encode_rsp_and_send(unsigned char *buf, int len,
+ struct diag_md_session_t *info)
{
struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
unsigned char *rsp_ptr = driver->encoded_rsp_buf;
- int err, retry_count = 0;
+ int err, i, rsp_ctxt, retry_count = 0;
unsigned long flags;
if (!rsp_ptr || !buf)
@@ -326,6 +336,15 @@ static void encode_rsp_and_send(unsigned char *buf, int len)
return;
}
+ if (info && info->peripheral_mask) {
+ for (i = 0; i <= NUM_PERIPHERALS; i++) {
+ if (info->peripheral_mask & (1 << i))
+ break;
+ }
+ rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1);
+ } else
+ rsp_ctxt = driver->rsp_buf_ctxt;
+
/*
* Keep trying till we get the buffer back. It should probably
* take one or two iterations. When this loops till UINT_MAX, it
@@ -369,7 +388,7 @@ static void encode_rsp_and_send(unsigned char *buf, int len)
diag_hdlc_encode(&send, &enc);
driver->encoded_rsp_len = (int)(enc.dest - (void *)rsp_ptr);
err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, driver->encoded_rsp_len,
- driver->rsp_buf_ctxt);
+ rsp_ctxt);
if (err) {
pr_err("diag: In %s, Unable to write to device, err: %d\n",
__func__, err);
@@ -380,21 +399,22 @@ static void encode_rsp_and_send(unsigned char *buf, int len)
memset(buf, '\0', DIAG_MAX_RSP_SIZE);
}
-void diag_send_rsp(unsigned char *buf, int len)
+void diag_send_rsp(unsigned char *buf, int len, struct diag_md_session_t *info)
{
struct diag_md_session_t *session_info = NULL;
uint8_t hdlc_disabled;
- session_info = diag_md_session_get_peripheral(APPS_DATA);
+ session_info = (info) ? info :
+ diag_md_session_get_peripheral(APPS_DATA);
if (session_info)
hdlc_disabled = session_info->hdlc_disabled;
else
hdlc_disabled = driver->hdlc_disabled;
if (hdlc_disabled)
- pack_rsp_and_send(buf, len);
+ pack_rsp_and_send(buf, len, session_info);
else
- encode_rsp_and_send(buf, len);
+ encode_rsp_and_send(buf, len, session_info);
}
void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type)
@@ -865,7 +885,8 @@ static int diag_cmd_disable_hdlc(unsigned char *src_buf, int src_len,
return write_len;
}
-void diag_send_error_rsp(unsigned char *buf, int len)
+void diag_send_error_rsp(unsigned char *buf, int len,
+ struct diag_md_session_t *info)
{
/* -1 to accomodate the first byte 0x13 */
if (len > (DIAG_MAX_RSP_SIZE - 1)) {
@@ -875,7 +896,7 @@ void diag_send_error_rsp(unsigned char *buf, int len)
*(uint8_t *)driver->apps_rsp_buf = DIAG_CMD_ERROR;
memcpy((driver->apps_rsp_buf + sizeof(uint8_t)), buf, len);
- diag_send_rsp(driver->apps_rsp_buf, len + 1);
+ diag_send_rsp(driver->apps_rsp_buf, len + 1, info);
}
int diag_process_apps_pkt(unsigned char *buf, int len,
@@ -895,7 +916,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
/* Check if the command is a supported mask command */
mask_ret = diag_process_apps_masks(buf, len, info);
if (mask_ret > 0) {
- diag_send_rsp(driver->apps_rsp_buf, mask_ret);
+ diag_send_rsp(driver->apps_rsp_buf, mask_ret, info);
return 0;
}
@@ -917,7 +938,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0)
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
return 0;
}
@@ -933,7 +954,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
} else {
if (MD_PERIPHERAL_MASK(reg_item->proc) &
driver->logging_mask)
- diag_send_error_rsp(buf, len);
+ diag_send_error_rsp(buf, len, info);
else
write_len = diag_send_data(reg_item, buf, len);
}
@@ -949,13 +970,13 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
for (i = 0; i < 4; i++)
*(driver->apps_rsp_buf+i) = *(buf+i);
*(uint32_t *)(driver->apps_rsp_buf+4) = DIAG_MAX_REQ_SIZE;
- diag_send_rsp(driver->apps_rsp_buf, 8);
+ diag_send_rsp(driver->apps_rsp_buf, 8, info);
return 0;
} else if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
(*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) {
len = diag_process_stm_cmd(buf, driver->apps_rsp_buf);
if (len > 0) {
- diag_send_rsp(driver->apps_rsp_buf, len);
+ diag_send_rsp(driver->apps_rsp_buf, len, info);
return 0;
}
return len;
@@ -968,7 +989,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0)
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
return 0;
}
/* Check for time sync switch command */
@@ -979,14 +1000,14 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0)
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
return 0;
}
/* Check for download command */
else if ((chk_apps_master()) && (*buf == 0x3A)) {
/* send response back */
driver->apps_rsp_buf[0] = *buf;
- diag_send_rsp(driver->apps_rsp_buf, 1);
+ diag_send_rsp(driver->apps_rsp_buf, 1, info);
msleep(5000);
/* call download API */
msm_set_restart_mode(RESTART_DLOAD);
@@ -1006,7 +1027,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
for (i = 0; i < 13; i++)
driver->apps_rsp_buf[i+3] = 0;
- diag_send_rsp(driver->apps_rsp_buf, 16);
+ diag_send_rsp(driver->apps_rsp_buf, 16, info);
return 0;
}
}
@@ -1015,7 +1036,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
(*(buf+2) == 0x04) && (*(buf+3) == 0x0)) {
memcpy(driver->apps_rsp_buf, buf, 4);
driver->apps_rsp_buf[4] = wrap_enabled;
- diag_send_rsp(driver->apps_rsp_buf, 5);
+ diag_send_rsp(driver->apps_rsp_buf, 5, info);
return 0;
}
/* Wrap the Delayed Rsp ID */
@@ -1024,7 +1045,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
wrap_enabled = true;
memcpy(driver->apps_rsp_buf, buf, 4);
driver->apps_rsp_buf[4] = wrap_count;
- diag_send_rsp(driver->apps_rsp_buf, 6);
+ diag_send_rsp(driver->apps_rsp_buf, 6, info);
return 0;
}
/* Mobile ID Rsp */
@@ -1035,7 +1056,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0) {
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
return 0;
}
}
@@ -1055,7 +1076,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
for (i = 0; i < 55; i++)
driver->apps_rsp_buf[i] = 0;
- diag_send_rsp(driver->apps_rsp_buf, 55);
+ diag_send_rsp(driver->apps_rsp_buf, 55, info);
return 0;
}
/* respond to 0x7c command */
@@ -1068,14 +1089,14 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
chk_config_get_id();
*(unsigned char *)(driver->apps_rsp_buf + 12) = '\0';
*(unsigned char *)(driver->apps_rsp_buf + 13) = '\0';
- diag_send_rsp(driver->apps_rsp_buf, 14);
+ diag_send_rsp(driver->apps_rsp_buf, 14, info);
return 0;
}
}
write_len = diag_cmd_chk_stats(buf, len, driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0) {
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
return 0;
}
write_len = diag_cmd_disable_hdlc(buf, len, driver->apps_rsp_buf,
@@ -1087,7 +1108,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
* before disabling HDLC encoding on Apps processor.
*/
mutex_lock(&driver->hdlc_disable_mutex);
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
/*
* Set the value of hdlc_disabled after sending the response to
* the tools. This is required since the tools is expecting a
@@ -1107,7 +1128,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
/* We have now come to the end of the function. */
if (chk_apps_only())
- diag_send_error_rsp(buf, len);
+ diag_send_error_rsp(buf, len, info);
return 0;
}
@@ -1190,7 +1211,7 @@ fail:
* recovery algorithm. Send an error response if the
* packet is not in expected format.
*/
- diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len);
+ diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len, info);
driver->hdlc_buf_len = 0;
end:
mutex_unlock(&driver->diag_hdlc_mutex);
@@ -1446,7 +1467,7 @@ start:
if (actual_pkt->start != CONTROL_CHAR) {
diag_hdlc_start_recovery(buf, len, info);
- diag_send_error_rsp(buf, len);
+ diag_send_error_rsp(buf, len, info);
goto end;
}
@@ -1528,15 +1549,14 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
case TYPE_CMD:
if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
diagfwd_write_done(peripheral, type, num);
- } else if (peripheral == APPS_DATA) {
+ }
+ if (peripheral == APPS_DATA ||
+ ctxt == DIAG_MEMORY_DEVICE_MODE) {
spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
driver->rsp_buf_busy = 0;
driver->encoded_rsp_len = 0;
spin_unlock_irqrestore(&driver->rsp_buf_busy_lock,
flags);
- } else {
- pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n",
- peripheral, __func__, type);
}
break;
default:
diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h
index 0023e0638aa2..4c6d86fc36ae 100644
--- a/drivers/char/diag/diagfwd.h
+++ b/drivers/char/diag/diagfwd.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -45,7 +45,8 @@ void diag_update_userspace_clients(unsigned int type);
void diag_update_sleeping_process(int process_id, int data_type);
int diag_process_apps_pkt(unsigned char *buf, int len,
struct diag_md_session_t *info);
-void diag_send_error_rsp(unsigned char *buf, int len);
+void diag_send_error_rsp(unsigned char *buf, int len,
+ struct diag_md_session_t *info);
void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type);
int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf);
void diag_md_hdlc_reset_timer_func(unsigned long pid);
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
index e761e6ec4e39..37f3bd2626c8 100644
--- a/drivers/char/diag/diagfwd_glink.c
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -462,6 +462,31 @@ static int diag_glink_write(void *ctxt, unsigned char *buf, int len)
return err;
}
+
+static void diag_glink_connect_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ connect_work);
+ if (!glink_info || glink_info->hdl)
+ return;
+ atomic_set(&glink_info->opened, 1);
+ diagfwd_channel_open(glink_info->fwd_ctxt);
+ diagfwd_late_open(glink_info->fwd_ctxt);
+}
+
+static void diag_glink_remote_disconnect_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ remote_disconnect_work);
+ if (!glink_info || glink_info->hdl)
+ return;
+ atomic_set(&glink_info->opened, 0);
+ diagfwd_channel_close(glink_info->fwd_ctxt);
+ atomic_set(&glink_info->tx_intent_ready, 0);
+}
+
static void diag_glink_transport_notify_state(void *handle, const void *priv,
unsigned event)
{
@@ -475,9 +500,7 @@ static void diag_glink_transport_notify_state(void *handle, const void *priv,
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"%s received channel connect for periph:%d\n",
glink_info->name, glink_info->peripheral);
- atomic_set(&glink_info->opened, 1);
- diagfwd_channel_open(glink_info->fwd_ctxt);
- diagfwd_late_open(glink_info->fwd_ctxt);
+ queue_work(glink_info->wq, &glink_info->connect_work);
break;
case GLINK_LOCAL_DISCONNECTED:
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -489,9 +512,7 @@ static void diag_glink_transport_notify_state(void *handle, const void *priv,
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"%s received channel remote disconnect for periph:%d\n",
glink_info->name, glink_info->peripheral);
- atomic_set(&glink_info->opened, 0);
- diagfwd_channel_close(glink_info->fwd_ctxt);
- atomic_set(&glink_info->tx_intent_ready, 0);
+ queue_work(glink_info->wq, &glink_info->remote_disconnect_work);
break;
default:
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -641,6 +662,9 @@ static void __diag_glink_init(struct diag_glink_info *glink_info)
INIT_WORK(&(glink_info->open_work), diag_glink_open_work_fn);
INIT_WORK(&(glink_info->close_work), diag_glink_close_work_fn);
INIT_WORK(&(glink_info->read_work), diag_glink_read_work_fn);
+ INIT_WORK(&(glink_info->connect_work), diag_glink_connect_work_fn);
+ INIT_WORK(&(glink_info->remote_disconnect_work),
+ diag_glink_remote_disconnect_work_fn);
link_info.glink_link_state_notif_cb = diag_glink_notify_cb;
link_info.transport = NULL;
link_info.edge = glink_info->edge;
@@ -681,6 +705,8 @@ int diag_glink_init(void)
struct diag_glink_info *glink_info = NULL;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
glink_info = &glink_cntl[peripheral];
__diag_glink_init(glink_info);
diagfwd_cntl_register(TRANSPORT_GLINK, glink_info->peripheral,
@@ -719,6 +745,8 @@ void diag_glink_early_exit(void)
int peripheral = 0;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
__diag_glink_exit(&glink_cntl[peripheral]);
glink_unregister_link_state_cb(&glink_cntl[peripheral].hdl);
}
@@ -729,6 +757,8 @@ void diag_glink_exit(void)
int peripheral = 0;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
__diag_glink_exit(&glink_data[peripheral]);
__diag_glink_exit(&glink_cmd[peripheral]);
__diag_glink_exit(&glink_dci[peripheral]);
diff --git a/drivers/char/diag/diagfwd_glink.h b/drivers/char/diag/diagfwd_glink.h
index bad4629b5ab8..5c1abeffd498 100644
--- a/drivers/char/diag/diagfwd_glink.h
+++ b/drivers/char/diag/diagfwd_glink.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,8 @@ struct diag_glink_info {
struct work_struct open_work;
struct work_struct close_work;
struct work_struct read_work;
+ struct work_struct connect_work;
+ struct work_struct remote_disconnect_work;
struct diagfwd_info *fwd_ctxt;
};
diff --git a/drivers/clk/msm/clock-gcc-8998.c b/drivers/clk/msm/clock-gcc-8998.c
index 8b7efbd093b5..f9d713a22c76 100644
--- a/drivers/clk/msm/clock-gcc-8998.c
+++ b/drivers/clk/msm/clock-gcc-8998.c
@@ -239,28 +239,6 @@ static struct pll_vote_clk gpll4 = {
};
DEFINE_EXT_CLK(gpll4_out_main, &gpll4.c);
-static struct clk_freq_tbl ftbl_hmss_ahb_clk_src[] = {
- F( 19200000, cxo_clk_src_ao, 1, 0, 0),
- F( 50000000, gpll0_out_main, 12, 0, 0),
- F( 100000000, gpll0_out_main, 6, 0, 0),
- F_END
-};
-
-static struct rcg_clk hmss_ahb_clk_src = {
- .cmd_rcgr_reg = GCC_HMSS_AHB_CMD_RCGR,
- .set_rate = set_rate_hid,
- .freq_tbl = ftbl_hmss_ahb_clk_src,
- .current_freq = &rcg_dummy_freq,
- .base = &virt_base,
- .c = {
- .dbg_name = "hmss_ahb_clk_src",
- .ops = &clk_ops_rcg,
- VDD_DIG_FMAX_MAP3_AO(LOWER, 19200000, LOW, 50000000,
- NOMINAL, 100000000),
- CLK_INIT(hmss_ahb_clk_src.c),
- },
-};
-
static struct clk_freq_tbl ftbl_usb30_master_clk_src[] = {
F( 19200000, cxo_clk_src, 1, 0, 0),
F( 60000000, gpll0_out_main, 10, 0, 0),
@@ -1732,20 +1710,6 @@ static struct branch_clk gcc_gpu_iref_clk = {
},
};
-static struct local_vote_clk gcc_hmss_ahb_clk = {
- .cbcr_reg = GCC_HMSS_AHB_CBCR,
- .vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
- .en_mask = BIT(21),
- .base = &virt_base,
- .c = {
- .dbg_name = "gcc_hmss_ahb_clk",
- .always_on = true,
- .parent = &hmss_ahb_clk_src.c,
- .ops = &clk_ops_vote,
- CLK_INIT(gcc_hmss_ahb_clk.c),
- },
-};
-
static struct branch_clk gcc_hmss_dvm_bus_clk = {
.cbcr_reg = GCC_HMSS_DVM_BUS_CBCR,
.has_sibling = 1,
@@ -2408,7 +2372,6 @@ static struct mux_clk gcc_debug_mux = {
{ &gcc_ce1_ahb_m_clk.c, 0x0099 },
{ &measure_only_bimc_hmss_axi_clk.c, 0x00bb },
{ &gcc_bimc_gfx_clk.c, 0x00ac },
- { &gcc_hmss_ahb_clk.c, 0x00ba },
{ &gcc_hmss_rbcpr_clk.c, 0x00bc },
{ &gcc_gp1_clk.c, 0x00df },
{ &gcc_gp2_clk.c, 0x00e0 },
@@ -2531,7 +2494,6 @@ static struct clk_lookup msm_clocks_gcc_8998[] = {
CLK_LIST(gcc_gpu_gpll0_div_clk),
CLK_LIST(gpll4),
CLK_LIST(gpll4_out_main),
- CLK_LIST(hmss_ahb_clk_src),
CLK_LIST(usb30_master_clk_src),
CLK_LIST(pcie_aux_clk_src),
CLK_LIST(ufs_axi_clk_src),
@@ -2629,7 +2591,6 @@ static struct clk_lookup msm_clocks_gcc_8998[] = {
CLK_LIST(gcc_gpu_bimc_gfx_clk),
CLK_LIST(gcc_gpu_cfg_ahb_clk),
CLK_LIST(gcc_gpu_iref_clk),
- CLK_LIST(gcc_hmss_ahb_clk),
CLK_LIST(gcc_hmss_dvm_bus_clk),
CLK_LIST(gcc_hmss_rbcpr_clk),
CLK_LIST(gcc_mmss_noc_cfg_ahb_clk),
@@ -2742,12 +2703,12 @@ static int msm_gcc_8998_probe(struct platform_device *pdev)
}
/*
- * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be
- * turned off by hardware during certain apps low power modes.
+ * Clear the HMSS_AHB_CLK_ENA bit to allow the gcc_hmss_ahb_clk clock
+ * to be gated by RPM during VDD_MIN.
*/
- regval = readl_relaxed(virt_base + GCC_APCS_CLOCK_SLEEP_ENA_VOTE);
- regval |= BIT(21);
- writel_relaxed(regval, virt_base + GCC_APCS_CLOCK_SLEEP_ENA_VOTE);
+ regval = readl_relaxed(virt_base + GCC_APCS_CLOCK_BRANCH_ENA_VOTE);
+ regval &= ~BIT(21);
+ writel_relaxed(regval, virt_base + GCC_APCS_CLOCK_BRANCH_ENA_VOTE);
vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
if (IS_ERR(vdd_dig.regulator[0])) {
diff --git a/drivers/clk/msm/clock-generic.c b/drivers/clk/msm/clock-generic.c
index eeb940e434cf..69e47a103402 100644
--- a/drivers/clk/msm/clock-generic.c
+++ b/drivers/clk/msm/clock-generic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -491,6 +491,9 @@ static long __slave_div_round_rate(struct clk *c, unsigned long rate,
if (best_div)
*best_div = div;
+ if (d->data.is_half_divider)
+ p_rate *= 2;
+
return p_rate / div;
}
@@ -530,9 +533,16 @@ static int slave_div_set_rate(struct clk *c, unsigned long rate)
static unsigned long slave_div_get_rate(struct clk *c)
{
struct div_clk *d = to_div_clk(c);
+ unsigned long rate;
+
if (!d->data.div)
return 0;
- return clk_get_rate(c->parent) / d->data.div;
+
+ rate = clk_get_rate(c->parent) / d->data.div;
+ if (d->data.is_half_divider)
+ rate *= 2;
+
+ return rate;
}
struct clk_ops clk_ops_slave_div = {
diff --git a/drivers/clk/msm/clock-mmss-8998.c b/drivers/clk/msm/clock-mmss-8998.c
index 1661878fc650..6ebb3ed6ed91 100644
--- a/drivers/clk/msm/clock-mmss-8998.c
+++ b/drivers/clk/msm/clock-mmss-8998.c
@@ -664,8 +664,8 @@ static struct rcg_clk byte0_clk_src = {
.parent = &ext_byte0_clk_src.c,
.ops = &clk_ops_byte_multiparent,
.flags = CLKFLAG_NO_RATE_CACHE,
- VDD_DIG_FMAX_MAP3(LOWER, 150000000, LOW, 240000000,
- NOMINAL, 357140000),
+ VDD_DIG_FMAX_MAP3(LOWER, 131250000, LOW, 210000000,
+ NOMINAL, 312500000),
CLK_INIT(byte0_clk_src.c),
},
};
@@ -681,8 +681,8 @@ static struct rcg_clk byte1_clk_src = {
.parent = &ext_byte1_clk_src.c,
.ops = &clk_ops_byte_multiparent,
.flags = CLKFLAG_NO_RATE_CACHE,
- VDD_DIG_FMAX_MAP3(LOWER, 150000000, LOW, 240000000,
- NOMINAL, 357140000),
+ VDD_DIG_FMAX_MAP3(LOWER, 131250000, LOW, 210000000,
+ NOMINAL, 312500000),
CLK_INIT(byte1_clk_src.c),
},
};
@@ -722,8 +722,8 @@ static struct rcg_clk pclk0_clk_src = {
.parent = &ext_pclk0_clk_src.c,
.ops = &clk_ops_pixel_multiparent,
.flags = CLKFLAG_NO_RATE_CACHE,
- VDD_DIG_FMAX_MAP3(LOWER, 184000000, LOW, 295000000,
- NOMINAL, 610000000),
+ VDD_DIG_FMAX_MAP3(LOWER, 175000000, LOW, 280000000,
+ NOMINAL, 416670000),
CLK_INIT(pclk0_clk_src.c),
},
};
@@ -739,8 +739,8 @@ static struct rcg_clk pclk1_clk_src = {
.parent = &ext_pclk1_clk_src.c,
.ops = &clk_ops_pixel_multiparent,
.flags = CLKFLAG_NO_RATE_CACHE,
- VDD_DIG_FMAX_MAP3(LOWER, 184000000, LOW, 295000000,
- NOMINAL, 610000000),
+ VDD_DIG_FMAX_MAP3(LOWER, 175000000, LOW, 280000000,
+ NOMINAL, 416670000),
CLK_INIT(pclk1_clk_src.c),
},
};
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index 1639b1b7f94b..9d9aa61c480a 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -89,6 +89,7 @@ enum clk_osm_trace_packet_id {
#define SINGLE_CORE 1
#define MAX_CORE_COUNT 4
#define LLM_SW_OVERRIDE_CNT 3
+#define OSM_SEQ_MINUS_ONE 0xff
#define ENABLE_REG 0x1004
#define INDEX_REG 0x1150
@@ -367,8 +368,10 @@ struct clk_osm {
u32 cluster_num;
u32 irq;
u32 apm_crossover_vc;
+ u32 apm_threshold_pre_vc;
u32 apm_threshold_vc;
u32 mem_acc_crossover_vc;
+ u32 mem_acc_threshold_pre_vc;
u32 mem_acc_threshold_vc;
u32 cycle_counter_reads;
u32 cycle_counter_delay;
@@ -1646,10 +1649,26 @@ static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
if (corner_volt >= apm_threshold) {
c->apm_threshold_vc = c->osm_table[i].virtual_corner;
+ /*
+ * Handle case where VC 0 has open-loop
+ * greater than or equal to APM threshold voltage.
+ */
+ c->apm_threshold_pre_vc = c->apm_threshold_vc ?
+ c->apm_threshold_vc - 1 : OSM_SEQ_MINUS_ONE;
break;
}
}
+ /*
+ * This assumes the OSM table uses corners
+ * 0 to MAX_VIRTUAL_CORNER - 1.
+ */
+ if (!c->apm_threshold_vc &&
+ c->apm_threshold_pre_vc != OSM_SEQ_MINUS_ONE) {
+ c->apm_threshold_vc = MAX_VIRTUAL_CORNER;
+ c->apm_threshold_pre_vc = c->apm_threshold_vc - 1;
+ }
+
/* Determine MEM ACC threshold virtual corner */
if (mem_acc_threshold) {
for (i = 0; i < OSM_TABLE_SIZE; i++) {
@@ -1660,6 +1679,15 @@ static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
if (corner_volt >= mem_acc_threshold) {
c->mem_acc_threshold_vc
= c->osm_table[i].virtual_corner;
+ /*
+ * Handle case where VC 0 has open-loop
+ * greater than or equal to MEM-ACC threshold
+ * voltage.
+ */
+ c->mem_acc_threshold_pre_vc =
+ c->mem_acc_threshold_vc ?
+ c->mem_acc_threshold_vc - 1 :
+ OSM_SEQ_MINUS_ONE;
break;
}
}
@@ -1668,9 +1696,13 @@ static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
* This assumes the OSM table uses corners
* 0 to MAX_VIRTUAL_CORNER - 1.
*/
- if (!c->mem_acc_threshold_vc)
+ if (!c->mem_acc_threshold_vc && c->mem_acc_threshold_pre_vc
+ != OSM_SEQ_MINUS_ONE) {
c->mem_acc_threshold_vc =
MAX_VIRTUAL_CORNER;
+ c->mem_acc_threshold_pre_vc =
+ c->mem_acc_threshold_vc - 1;
+ }
}
return 0;
@@ -2021,13 +2053,20 @@ static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
* highest MEM ACC threshold if it is specified instead of the
* fixed mapping in the LUT.
*/
- if (c->mem_acc_threshold_vc) {
- threshold_vc[2] = c->mem_acc_threshold_vc - 1;
+ if (c->mem_acc_threshold_vc || c->mem_acc_threshold_pre_vc
+ == OSM_SEQ_MINUS_ONE) {
+ threshold_vc[2] = c->mem_acc_threshold_pre_vc;
threshold_vc[3] = c->mem_acc_threshold_vc;
- if (threshold_vc[1] >= threshold_vc[2])
- threshold_vc[1] = threshold_vc[2] - 1;
- if (threshold_vc[0] >= threshold_vc[1])
- threshold_vc[0] = threshold_vc[1] - 1;
+
+ if (c->mem_acc_threshold_pre_vc == OSM_SEQ_MINUS_ONE) {
+ threshold_vc[1] = threshold_vc[0] =
+ c->mem_acc_threshold_pre_vc;
+ } else {
+ if (threshold_vc[1] >= threshold_vc[2])
+ threshold_vc[1] = threshold_vc[2] - 1;
+ if (threshold_vc[0] >= threshold_vc[1])
+ threshold_vc[0] = threshold_vc[1] - 1;
+ }
}
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(55),
@@ -2045,7 +2084,8 @@ static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
* Program L_VAL corresponding to the first virtual
* corner with MEM ACC level 3.
*/
- if (c->mem_acc_threshold_vc)
+ if (c->mem_acc_threshold_vc ||
+ c->mem_acc_threshold_pre_vc == OSM_SEQ_MINUS_ONE)
for (i = 0; i < c->num_entries; i++)
if (c->mem_acc_threshold_vc == table[i].virtual_corner)
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(32),
@@ -2365,8 +2405,7 @@ static void clk_osm_apm_vc_setup(struct clk_osm *c)
SEQ_REG(8));
clk_osm_write_reg(c, c->apm_threshold_vc,
SEQ_REG(15));
- clk_osm_write_reg(c, c->apm_threshold_vc != 0 ?
- c->apm_threshold_vc - 1 : 0xff,
+ clk_osm_write_reg(c, c->apm_threshold_pre_vc,
SEQ_REG(31));
clk_osm_write_reg(c, 0x3b | c->apm_threshold_vc << 6,
SEQ_REG(73));
@@ -2390,8 +2429,7 @@ static void clk_osm_apm_vc_setup(struct clk_osm *c)
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(15),
c->apm_threshold_vc);
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(31),
- c->apm_threshold_vc != 0 ?
- c->apm_threshold_vc - 1 : 0xff);
+ c->apm_threshold_pre_vc);
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(76),
0x39 | c->apm_threshold_vc << 6);
}
diff --git a/drivers/clk/qcom/clk-alpha-pll.c b/drivers/clk/qcom/clk-alpha-pll.c
index e6054444599c..1d9085cff82c 100644
--- a/drivers/clk/qcom/clk-alpha-pll.c
+++ b/drivers/clk/qcom/clk-alpha-pll.c
@@ -876,5 +876,6 @@ const struct clk_ops clk_alpha_pll_slew_ops = {
.recalc_rate = clk_alpha_pll_recalc_rate,
.round_rate = clk_alpha_pll_round_rate,
.set_rate = clk_alpha_pll_slew_set_rate,
+ .list_registers = clk_alpha_pll_list_registers,
};
EXPORT_SYMBOL_GPL(clk_alpha_pll_slew_ops);
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index e88d70f07a1c..6b00bee337a1 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -1952,16 +1952,21 @@ static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
}
}
+ threshold_vc[0] = mem_acc_level_map[0];
+ threshold_vc[1] = mem_acc_level_map[0] + 1;
+ threshold_vc[2] = mem_acc_level_map[1];
+ threshold_vc[3] = mem_acc_level_map[1] + 1;
+
if (c->secure_init) {
clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(1), SEQ_REG(51));
clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(2), SEQ_REG(52));
clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(3), SEQ_REG(53));
clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(4), SEQ_REG(54));
clk_osm_write_reg(c, MEM_ACC_APM_READ_MASK, SEQ_REG(59));
- clk_osm_write_reg(c, mem_acc_level_map[0], SEQ_REG(55));
- clk_osm_write_reg(c, mem_acc_level_map[0] + 1, SEQ_REG(56));
- clk_osm_write_reg(c, mem_acc_level_map[1], SEQ_REG(57));
- clk_osm_write_reg(c, mem_acc_level_map[1] + 1, SEQ_REG(58));
+ clk_osm_write_reg(c, threshold_vc[0], SEQ_REG(55));
+ clk_osm_write_reg(c, threshold_vc[1], SEQ_REG(56));
+ clk_osm_write_reg(c, threshold_vc[2], SEQ_REG(57));
+ clk_osm_write_reg(c, threshold_vc[3], SEQ_REG(58));
clk_osm_write_reg(c, c->pbases[OSM_BASE] + SEQ_REG(28),
SEQ_REG(49));
@@ -1977,11 +1982,6 @@ static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(88),
c->mem_acc_crossover_vc);
- threshold_vc[0] = mem_acc_level_map[0];
- threshold_vc[1] = mem_acc_level_map[0] + 1;
- threshold_vc[2] = mem_acc_level_map[1];
- threshold_vc[3] = mem_acc_level_map[1] + 1;
-
/*
* Use dynamic MEM ACC threshold voltage based value for the
* highest MEM ACC threshold if it is specified instead of the
@@ -2011,11 +2011,10 @@ static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
* Program L_VAL corresponding to the first virtual
* corner with MEM ACC level 3.
*/
- if (c->mem_acc_threshold_vc)
- for (i = 0; i < c->num_entries; i++)
- if (c->mem_acc_threshold_vc == table[i].virtual_corner)
- scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(32),
- L_VAL(table[i].freq_data));
+ for (i = 0; i < c->num_entries; i++)
+ if (threshold_vc[3] == table[i].virtual_corner)
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(32),
+ L_VAL(table[i].freq_data));
}
void clk_osm_setup_sequencer(struct clk_osm *c)
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index a07deb902577..9e5c0b6f7a0e 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -307,7 +307,18 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
{
u32 cfg, mask, old_cfg;
struct clk_hw *hw = &rcg->clkr.hw;
- int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ int ret, index;
+
+ /*
+ * In case the frequency table of cxo_f is used, the src in parent_map
+ * and the source in cxo_f.src could be different. Update the index to
+ * '0' since it's assumed that CXO is always fed to port 0 of RCGs HLOS
+ * controls.
+ */
+ if (f == &cxo_f)
+ index = 0;
+ else
+ index = qcom_find_src_index(hw, rcg->parent_map, f->src);
if (index < 0)
return index;
@@ -507,6 +518,15 @@ static int clk_rcg2_enable(struct clk_hw *hw)
if (!f)
return -EINVAL;
+ /*
+ * If CXO is not listed as a supported frequency in the frequency
+ * table, the above API would return the lowest supported frequency
+ * instead. This will lead to incorrect configuration of the RCG.
+ * Check if the RCG rate is CXO and configure it accordingly.
+ */
+ if (rate == cxo_f.freq)
+ f = &cxo_f;
+
clk_rcg_set_force_enable(hw);
clk_rcg2_configure(rcg, f);
clk_rcg_clear_force_enable(hw);
diff --git a/drivers/clk/qcom/clk-smd-rpm.c b/drivers/clk/qcom/clk-smd-rpm.c
index 095530e72b78..0d76b6b28985 100644
--- a/drivers/clk/qcom/clk-smd-rpm.c
+++ b/drivers/clk/qcom/clk-smd-rpm.c
@@ -570,8 +570,6 @@ static DEFINE_CLK_VOTER(pnoc_msmbus_clk, pnoc_clk, LONG_MAX);
static DEFINE_CLK_VOTER(pnoc_msmbus_a_clk, pnoc_a_clk, LONG_MAX);
static DEFINE_CLK_VOTER(pnoc_pm_clk, pnoc_clk, LONG_MAX);
static DEFINE_CLK_VOTER(pnoc_sps_clk, pnoc_clk, 0);
-static DEFINE_CLK_VOTER(mmssnoc_a_cpu_clk, mmssnoc_axi_a_clk,
- 19200000);
/* Voter Branch clocks */
static DEFINE_CLK_BRANCH_VOTER(cxo_dwc3_clk, cxo);
@@ -740,7 +738,6 @@ static struct clk_hw *sdm660_clks[] = {
[CXO_PIL_LPASS_CLK] = &cxo_pil_lpass_clk.hw,
[CXO_PIL_CDSP_CLK] = &cxo_pil_cdsp_clk.hw,
[CNOC_PERIPH_KEEPALIVE_A_CLK] = &cnoc_periph_keepalive_a_clk.hw,
- [MMSSNOC_A_CLK_CPU_VOTE] = &mmssnoc_a_cpu_clk.hw
};
static const struct rpm_smd_clk_desc rpm_clk_sdm660 = {
@@ -861,7 +858,6 @@ static int rpm_smd_clk_probe(struct platform_device *pdev)
/* Hold an active set vote for the cnoc_periph resource */
clk_set_rate(cnoc_periph_keepalive_a_clk.hw.clk, 19200000);
clk_prepare_enable(cnoc_periph_keepalive_a_clk.hw.clk);
- clk_prepare_enable(mmssnoc_a_cpu_clk.hw.clk);
}
dev_info(&pdev->dev, "Registered RPM clocks\n");
diff --git a/drivers/clk/qcom/gcc-sdm660.c b/drivers/clk/qcom/gcc-sdm660.c
index 3413859a56ef..c282253da584 100644
--- a/drivers/clk/qcom/gcc-sdm660.c
+++ b/drivers/clk/qcom/gcc-sdm660.c
@@ -730,32 +730,6 @@ static struct clk_rcg2 gp3_clk_src = {
},
};
-static const struct freq_tbl ftbl_hmss_ahb_clk_src[] = {
- F(19200000, P_XO, 1, 0, 0),
- F(37500000, P_GPLL0_OUT_MAIN, 16, 0, 0),
- F(75000000, P_GPLL0_OUT_MAIN, 8, 0, 0),
- F(100000000, P_GPLL0_OUT_MAIN, 6, 0, 0),
- { }
-};
-
-static struct clk_rcg2 hmss_ahb_clk_src = {
- .cmd_rcgr = 0x48014,
- .mnd_width = 0,
- .hid_width = 5,
- .parent_map = gcc_parent_map_1,
- .freq_tbl = ftbl_hmss_ahb_clk_src,
- .clkr.hw.init = &(struct clk_init_data){
- .name = "hmss_ahb_clk_src",
- .parent_names = gcc_parent_names_ao_1,
- .num_parents = 3,
- .ops = &clk_rcg2_ops,
- VDD_DIG_FMAX_MAP3_AO(
- LOWER, 19200000,
- LOW, 50000000,
- NOMINAL, 100000000),
- },
-};
-
static const struct freq_tbl ftbl_hmss_gpll0_clk_src[] = {
F(600000000, P_GPLL0_OUT_MAIN, 1, 0, 0),
{ }
@@ -1823,24 +1797,6 @@ static struct clk_branch gcc_gpu_gpll0_div_clk = {
},
};
-static struct clk_branch gcc_hmss_ahb_clk = {
- .halt_reg = 0x48000,
- .halt_check = BRANCH_HALT_VOTED,
- .clkr = {
- .enable_reg = 0x52004,
- .enable_mask = BIT(21),
- .hw.init = &(struct clk_init_data){
- .name = "gcc_hmss_ahb_clk",
- .parent_names = (const char *[]){
- "hmss_ahb_clk_src",
- },
- .num_parents = 1,
- .flags = CLK_SET_RATE_PARENT | CLK_IGNORE_UNUSED,
- .ops = &clk_branch2_ops,
- },
- },
-};
-
static struct clk_branch gcc_hmss_dvm_bus_clk = {
.halt_reg = 0x4808c,
.halt_check = BRANCH_HALT,
@@ -2493,14 +2449,15 @@ static struct clk_branch gcc_usb3_phy_aux_clk = {
},
};
-static struct clk_gate2 gcc_usb3_phy_pipe_clk = {
- .udelay = 50,
+static struct clk_branch gcc_usb3_phy_pipe_clk = {
+ .halt_reg = 0x50004,
+ .halt_check = BRANCH_HALT_DELAY,
.clkr = {
.enable_reg = 0x50004,
.enable_mask = BIT(0),
.hw.init = &(struct clk_init_data){
.name = "gcc_usb3_phy_pipe_clk",
- .ops = &clk_gate2_ops,
+ .ops = &clk_branch2_ops,
},
},
};
@@ -2640,7 +2597,6 @@ static struct clk_regmap *gcc_660_clocks[] = {
[GCC_GPU_CFG_AHB_CLK] = &gcc_gpu_cfg_ahb_clk.clkr,
[GCC_GPU_GPLL0_CLK] = &gcc_gpu_gpll0_clk.clkr,
[GCC_GPU_GPLL0_DIV_CLK] = &gcc_gpu_gpll0_div_clk.clkr,
- [GCC_HMSS_AHB_CLK] = &gcc_hmss_ahb_clk.clkr,
[GCC_HMSS_DVM_BUS_CLK] = &gcc_hmss_dvm_bus_clk.clkr,
[GCC_HMSS_RBCPR_CLK] = &gcc_hmss_rbcpr_clk.clkr,
[GCC_MMSS_GPLL0_CLK] = &gcc_mmss_gpll0_clk.clkr,
@@ -2690,7 +2646,6 @@ static struct clk_regmap *gcc_660_clocks[] = {
[GPLL1] = &gpll1_out_main.clkr,
[GPLL4] = &gpll4_out_main.clkr,
[HLOS1_VOTE_LPASS_ADSP_SMMU_CLK] = &hlos1_vote_lpass_adsp_smmu_clk.clkr,
- [HMSS_AHB_CLK_SRC] = &hmss_ahb_clk_src.clkr,
[HMSS_GPLL0_CLK_SRC] = &hmss_gpll0_clk_src.clkr,
[HMSS_GPLL4_CLK_SRC] = &hmss_gpll4_clk_src.clkr,
[HMSS_RBCPR_CLK_SRC] = &hmss_rbcpr_clk_src.clkr,
@@ -2764,12 +2719,6 @@ static int gcc_660_probe(struct platform_device *pdev)
if (IS_ERR(regmap))
return PTR_ERR(regmap);
- /*
- * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be
- * turned off by hardware during certain apps low power modes.
- */
- regmap_update_bits(regmap, 0x52008, BIT(21), BIT(21));
-
vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
if (IS_ERR(vdd_dig.regulator[0])) {
if (!(PTR_ERR(vdd_dig.regulator[0]) == -EPROBE_DEFER))
@@ -2882,7 +2831,6 @@ static const char *const debug_mux_parent_names[] = {
"gcc_gp3_clk",
"gcc_gpu_bimc_gfx_clk",
"gcc_gpu_cfg_ahb_clk",
- "gcc_hmss_ahb_clk",
"gcc_hmss_dvm_bus_clk",
"gcc_hmss_rbcpr_clk",
"gcc_mmss_noc_cfg_ahb_clk",
@@ -3061,7 +3009,6 @@ static struct clk_debug_mux gcc_debug_mux = {
{ "gcc_gp3_clk", 0x0E1 },
{ "gcc_gpu_bimc_gfx_clk", 0x13F },
{ "gcc_gpu_cfg_ahb_clk", 0x13B },
- { "gcc_hmss_ahb_clk", 0x0BA },
{ "gcc_hmss_dvm_bus_clk", 0x0BF },
{ "gcc_hmss_rbcpr_clk", 0x0BC },
{ "gcc_mmss_noc_cfg_ahb_clk", 0x020 },
diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
index aec73d62bc18..87e6f8c6be0a 100644
--- a/drivers/clk/qcom/mmcc-sdm660.c
+++ b/drivers/clk/qcom/mmcc-sdm660.c
@@ -529,7 +529,7 @@ static struct clk_rcg2 ahb_clk_src = {
.hid_width = 5,
.parent_map = mmcc_parent_map_10,
.freq_tbl = ftbl_ahb_clk_src,
- .flags = FORCE_ENABLE_RCGR,
+ .enable_safe_config = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "ahb_clk_src",
.parent_names = mmcc_parent_names_10,
@@ -3002,6 +3002,7 @@ static const struct qcom_cc_desc mmcc_660_desc = {
static const struct of_device_id mmcc_660_match_table[] = {
{ .compatible = "qcom,mmcc-sdm660" },
+ { .compatible = "qcom,mmcc-sdm630" },
{ }
};
MODULE_DEVICE_TABLE(of, mmcc_660_match_table);
@@ -3010,11 +3011,15 @@ static int mmcc_660_probe(struct platform_device *pdev)
{
int ret = 0;
struct regmap *regmap;
+ bool is_sdm630 = 0;
regmap = qcom_cc_map(pdev, &mmcc_660_desc);
if (IS_ERR(regmap))
return PTR_ERR(regmap);
+ is_sdm630 = of_device_is_compatible(pdev->dev.of_node,
+ "qcom,mmcc-sdm630");
+
/* PLLs connected on Mx rails of MMSS_CC */
vdd_mx.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_mx_mmss");
if (IS_ERR(vdd_mx.regulator[0])) {
@@ -3048,6 +3053,17 @@ static int mmcc_660_probe(struct platform_device *pdev)
clk_alpha_pll_configure(&mmpll8_pll_out_main, regmap, &mmpll8_config);
clk_alpha_pll_configure(&mmpll10_pll_out_main, regmap, &mmpll10_config);
+ if (is_sdm630) {
+ mmcc_660_desc.clks[BYTE1_CLK_SRC] = 0;
+ mmcc_660_desc.clks[MMSS_MDSS_BYTE1_CLK] = 0;
+ mmcc_660_desc.clks[MMSS_MDSS_BYTE1_INTF_DIV_CLK] = 0;
+ mmcc_660_desc.clks[MMSS_MDSS_BYTE1_INTF_CLK] = 0;
+ mmcc_660_desc.clks[ESC1_CLK_SRC] = 0;
+ mmcc_660_desc.clks[MMSS_MDSS_ESC1_CLK] = 0;
+ mmcc_660_desc.clks[PCLK1_CLK_SRC] = 0;
+ mmcc_660_desc.clks[MMSS_MDSS_PCLK1_CLK] = 0;
+ }
+
ret = qcom_cc_really_probe(pdev, &mmcc_660_desc, regmap);
if (ret) {
dev_err(&pdev->dev, "Failed to register MMSS clocks\n");
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 5ce87a6edcc3..7459401979fe 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -1490,6 +1490,11 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
}
/* Check for sum of all dst length is equal to data_len */
for (i = 0, total = 0; i < req->entries; i++) {
+ if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
+ pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
+ __func__, i, req->vbuf.dst[i].len);
+ goto error;
+ }
if (req->vbuf.dst[i].len >= U32_MAX - total) {
pr_err("%s: Integer overflow on total req dst vbuf length\n",
__func__);
@@ -1504,6 +1509,11 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
}
/* Check for sum of all src length is equal to data_len */
for (i = 0, total = 0; i < req->entries; i++) {
+ if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
+ pr_err("%s: NULL req src vbuf[%d] with length %d\n",
+ __func__, i, req->vbuf.src[i].len);
+ goto error;
+ }
if (req->vbuf.src[i].len > U32_MAX - total) {
pr_err("%s: Integer overflow on total req src vbuf length\n",
__func__);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index afd94a1e85d3..5838545468f8 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -10,6 +10,8 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM
+ select BACKLIGHT_CLASS_DEVICE
+ select MSM_EXT_DISPLAY
default y
help
DRM/KMS driver for MSM/snapdragon.
@@ -88,3 +90,9 @@ config DRM_SDE_WB
help
Choose this option for writeback connector support.
+config DRM_SDE_HDMI
+ bool "Enable HDMI driver support in DRM SDE driver"
+ depends on DRM_MSM
+ default y
+ help
+ Choose this option if HDMI connector support is needed in SDE driver.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 4ca16fc01e1c..79ea5a9f90ea 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,4 +1,6 @@
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging
+ccflags-y += -Idrivers/gpu/drm/msm/hdmi
+ccflags-y += -Idrivers/gpu/drm/msm/hdmi-staging
ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android
ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
@@ -45,14 +47,19 @@ msm_drm-y := \
sde/sde_backlight.o \
sde/sde_color_processing.o \
sde/sde_vbif.o \
- sde_dbg_evtlog.o
+ sde_dbg_evtlog.o \
+ sde_io_util.o \
# use drm gpu driver only if qcom_kgsl driver not available
ifneq ($(CONFIG_QCOM_KGSL),y)
msm_drm-y += adreno/adreno_device.o \
adreno/adreno_gpu.o \
adreno/a3xx_gpu.o \
- adreno/a4xx_gpu.o
+ adreno/a4xx_gpu.o \
+ adreno/a5xx_gpu.o \
+ adreno/a5xx_power.o \
+ adreno/a5xx_preempt.o \
+ adreno/a5xx_snapshot.o
endif
msm_drm-$(CONFIG_DRM_MSM_MDP4) += mdp/mdp4/mdp4_crtc.o \
@@ -90,6 +97,12 @@ msm_drm-$(CONFIG_DRM_MSM_DSI_STAGING) += dsi-staging/dsi_phy.o \
dsi-staging/dsi_panel.o \
dsi-staging/dsi_display_test.o
+msm_drm-$(CONFIG_DRM_SDE_HDMI) += \
+ hdmi-staging/sde_hdmi.o \
+ hdmi-staging/sde_hdmi_bridge.o \
+ hdmi-staging/sde_hdmi_audio.o \
+ hdmi-staging/sde_hdmi_edid.o
+
msm_drm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
dsi/pll/dsi_pll_28nm.o
@@ -121,12 +134,14 @@ msm_drm-$(CONFIG_DRM_MSM) += \
msm_gem.o \
msm_gem_prime.o \
msm_gem_submit.o \
+ msm_gem_vma.o \
msm_gpu.o \
msm_iommu.o \
msm_smmu.o \
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o \
- msm_prop.o
+ msm_prop.o \
+ msm_snapshot.o
obj-$(CONFIG_DRM_MSM) += msm_drm.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 9e2aceb4ffe6..8d16b21ef8a5 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 97dc1c6ec107..d521b13bdf9f 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -111,10 +113,14 @@ enum a3xx_vtx_fmt {
VFMT_8_8_SNORM = 53,
VFMT_8_8_8_SNORM = 54,
VFMT_8_8_8_8_SNORM = 55,
- VFMT_10_10_10_2_UINT = 60,
- VFMT_10_10_10_2_UNORM = 61,
- VFMT_10_10_10_2_SINT = 62,
- VFMT_10_10_10_2_SNORM = 63,
+ VFMT_10_10_10_2_UINT = 56,
+ VFMT_10_10_10_2_UNORM = 57,
+ VFMT_10_10_10_2_SINT = 58,
+ VFMT_10_10_10_2_SNORM = 59,
+ VFMT_2_10_10_10_UINT = 60,
+ VFMT_2_10_10_10_UNORM = 61,
+ VFMT_2_10_10_10_SINT = 62,
+ VFMT_2_10_10_10_SNORM = 63,
};
enum a3xx_tex_fmt {
@@ -124,10 +130,14 @@ enum a3xx_tex_fmt {
TFMT_Z16_UNORM = 9,
TFMT_X8Z24_UNORM = 10,
TFMT_Z32_FLOAT = 11,
- TFMT_NV12_UV_TILED = 17,
- TFMT_NV12_Y_TILED = 19,
- TFMT_NV12_UV = 21,
- TFMT_NV12_Y = 23,
+ TFMT_UV_64X32 = 16,
+ TFMT_VU_64X32 = 17,
+ TFMT_Y_64X32 = 18,
+ TFMT_NV12_64X32 = 19,
+ TFMT_UV_LINEAR = 20,
+ TFMT_VU_LINEAR = 21,
+ TFMT_Y_LINEAR = 22,
+ TFMT_NV12_LINEAR = 23,
TFMT_I420_Y = 24,
TFMT_I420_U = 26,
TFMT_I420_V = 27,
@@ -138,10 +148,12 @@ enum a3xx_tex_fmt {
TFMT_DXT1 = 36,
TFMT_DXT3 = 37,
TFMT_DXT5 = 38,
+ TFMT_2_10_10_10_UNORM = 40,
TFMT_10_10_10_2_UNORM = 41,
TFMT_9_9_9_E5_FLOAT = 42,
TFMT_11_11_10_FLOAT = 43,
TFMT_A8_UNORM = 44,
+ TFMT_L8_UNORM = 45,
TFMT_L8_A8_UNORM = 47,
TFMT_8_UNORM = 48,
TFMT_8_8_UNORM = 49,
@@ -183,6 +195,8 @@ enum a3xx_tex_fmt {
TFMT_32_SINT = 92,
TFMT_32_32_SINT = 93,
TFMT_32_32_32_32_SINT = 95,
+ TFMT_2_10_10_10_UINT = 96,
+ TFMT_10_10_10_2_UINT = 97,
TFMT_ETC2_RG11_SNORM = 112,
TFMT_ETC2_RG11_UNORM = 113,
TFMT_ETC2_R11_SNORM = 114,
@@ -215,6 +229,9 @@ enum a3xx_color_fmt {
RB_R8_UINT = 14,
RB_R8_SINT = 15,
RB_R10G10B10A2_UNORM = 16,
+ RB_A2R10G10B10_UNORM = 17,
+ RB_R10G10B10A2_UINT = 18,
+ RB_A2R10G10B10_UINT = 19,
RB_A8_UNORM = 20,
RB_R8_UNORM = 21,
RB_R16_FLOAT = 24,
@@ -244,38 +261,273 @@ enum a3xx_color_fmt {
RB_R32G32B32A32_UINT = 59,
};
+enum a3xx_cp_perfcounter_select {
+ CP_ALWAYS_COUNT = 0,
+ CP_AHB_PFPTRANS_WAIT = 3,
+ CP_AHB_NRTTRANS_WAIT = 6,
+ CP_CSF_NRT_READ_WAIT = 8,
+ CP_CSF_I1_FIFO_FULL = 9,
+ CP_CSF_I2_FIFO_FULL = 10,
+ CP_CSF_ST_FIFO_FULL = 11,
+ CP_RESERVED_12 = 12,
+ CP_CSF_RING_ROQ_FULL = 13,
+ CP_CSF_I1_ROQ_FULL = 14,
+ CP_CSF_I2_ROQ_FULL = 15,
+ CP_CSF_ST_ROQ_FULL = 16,
+ CP_RESERVED_17 = 17,
+ CP_MIU_TAG_MEM_FULL = 18,
+ CP_MIU_NRT_WRITE_STALLED = 22,
+ CP_MIU_NRT_READ_STALLED = 23,
+ CP_ME_REGS_RB_DONE_FIFO_FULL = 26,
+ CP_ME_REGS_VS_EVENT_FIFO_FULL = 27,
+ CP_ME_REGS_PS_EVENT_FIFO_FULL = 28,
+ CP_ME_REGS_CF_EVENT_FIFO_FULL = 29,
+ CP_ME_MICRO_RB_STARVED = 30,
+ CP_AHB_RBBM_DWORD_SENT = 40,
+ CP_ME_BUSY_CLOCKS = 41,
+ CP_ME_WAIT_CONTEXT_AVAIL = 42,
+ CP_PFP_TYPE0_PACKET = 43,
+ CP_PFP_TYPE3_PACKET = 44,
+ CP_CSF_RB_WPTR_NEQ_RPTR = 45,
+ CP_CSF_I1_SIZE_NEQ_ZERO = 46,
+ CP_CSF_I2_SIZE_NEQ_ZERO = 47,
+ CP_CSF_RBI1I2_FETCHING = 48,
+};
+
+enum a3xx_gras_tse_perfcounter_select {
+ GRAS_TSEPERF_INPUT_PRIM = 0,
+ GRAS_TSEPERF_INPUT_NULL_PRIM = 1,
+ GRAS_TSEPERF_TRIVAL_REJ_PRIM = 2,
+ GRAS_TSEPERF_CLIPPED_PRIM = 3,
+ GRAS_TSEPERF_NEW_PRIM = 4,
+ GRAS_TSEPERF_ZERO_AREA_PRIM = 5,
+ GRAS_TSEPERF_FACENESS_CULLED_PRIM = 6,
+ GRAS_TSEPERF_ZERO_PIXEL_PRIM = 7,
+ GRAS_TSEPERF_OUTPUT_NULL_PRIM = 8,
+ GRAS_TSEPERF_OUTPUT_VISIBLE_PRIM = 9,
+ GRAS_TSEPERF_PRE_CLIP_PRIM = 10,
+ GRAS_TSEPERF_POST_CLIP_PRIM = 11,
+ GRAS_TSEPERF_WORKING_CYCLES = 12,
+ GRAS_TSEPERF_PC_STARVE = 13,
+ GRAS_TSERASPERF_STALL = 14,
+};
+
+enum a3xx_gras_ras_perfcounter_select {
+ GRAS_RASPERF_16X16_TILES = 0,
+ GRAS_RASPERF_8X8_TILES = 1,
+ GRAS_RASPERF_4X4_TILES = 2,
+ GRAS_RASPERF_WORKING_CYCLES = 3,
+ GRAS_RASPERF_STALL_CYCLES_BY_RB = 4,
+ GRAS_RASPERF_STALL_CYCLES_BY_VSC = 5,
+ GRAS_RASPERF_STARVE_CYCLES_BY_TSE = 6,
+};
+
+enum a3xx_hlsq_perfcounter_select {
+ HLSQ_PERF_SP_VS_CONSTANT = 0,
+ HLSQ_PERF_SP_VS_INSTRUCTIONS = 1,
+ HLSQ_PERF_SP_FS_CONSTANT = 2,
+ HLSQ_PERF_SP_FS_INSTRUCTIONS = 3,
+ HLSQ_PERF_TP_STATE = 4,
+ HLSQ_PERF_QUADS = 5,
+ HLSQ_PERF_PIXELS = 6,
+ HLSQ_PERF_VERTICES = 7,
+ HLSQ_PERF_FS8_THREADS = 8,
+ HLSQ_PERF_FS16_THREADS = 9,
+ HLSQ_PERF_FS32_THREADS = 10,
+ HLSQ_PERF_VS8_THREADS = 11,
+ HLSQ_PERF_VS16_THREADS = 12,
+ HLSQ_PERF_SP_VS_DATA_BYTES = 13,
+ HLSQ_PERF_SP_FS_DATA_BYTES = 14,
+ HLSQ_PERF_ACTIVE_CYCLES = 15,
+ HLSQ_PERF_STALL_CYCLES_SP_STATE = 16,
+ HLSQ_PERF_STALL_CYCLES_SP_VS = 17,
+ HLSQ_PERF_STALL_CYCLES_SP_FS = 18,
+ HLSQ_PERF_STALL_CYCLES_UCHE = 19,
+ HLSQ_PERF_RBBM_LOAD_CYCLES = 20,
+ HLSQ_PERF_DI_TO_VS_START_SP0 = 21,
+ HLSQ_PERF_DI_TO_FS_START_SP0 = 22,
+ HLSQ_PERF_VS_START_TO_DONE_SP0 = 23,
+ HLSQ_PERF_FS_START_TO_DONE_SP0 = 24,
+ HLSQ_PERF_SP_STATE_COPY_CYCLES_VS = 25,
+ HLSQ_PERF_SP_STATE_COPY_CYCLES_FS = 26,
+ HLSQ_PERF_UCHE_LATENCY_CYCLES = 27,
+ HLSQ_PERF_UCHE_LATENCY_COUNT = 28,
+};
+
+enum a3xx_pc_perfcounter_select {
+ PC_PCPERF_VISIBILITY_STREAMS = 0,
+ PC_PCPERF_TOTAL_INSTANCES = 1,
+ PC_PCPERF_PRIMITIVES_PC_VPC = 2,
+ PC_PCPERF_PRIMITIVES_KILLED_BY_VS = 3,
+ PC_PCPERF_PRIMITIVES_VISIBLE_BY_VS = 4,
+ PC_PCPERF_DRAWCALLS_KILLED_BY_VS = 5,
+ PC_PCPERF_DRAWCALLS_VISIBLE_BY_VS = 6,
+ PC_PCPERF_VERTICES_TO_VFD = 7,
+ PC_PCPERF_REUSED_VERTICES = 8,
+ PC_PCPERF_CYCLES_STALLED_BY_VFD = 9,
+ PC_PCPERF_CYCLES_STALLED_BY_TSE = 10,
+ PC_PCPERF_CYCLES_STALLED_BY_VBIF = 11,
+ PC_PCPERF_CYCLES_IS_WORKING = 12,
+};
+
+enum a3xx_rb_perfcounter_select {
+ RB_RBPERF_ACTIVE_CYCLES_ANY = 0,
+ RB_RBPERF_ACTIVE_CYCLES_ALL = 1,
+ RB_RBPERF_STARVE_CYCLES_BY_SP = 2,
+ RB_RBPERF_STARVE_CYCLES_BY_RAS = 3,
+ RB_RBPERF_STARVE_CYCLES_BY_MARB = 4,
+ RB_RBPERF_STALL_CYCLES_BY_MARB = 5,
+ RB_RBPERF_STALL_CYCLES_BY_HLSQ = 6,
+ RB_RBPERF_RB_MARB_DATA = 7,
+ RB_RBPERF_SP_RB_QUAD = 8,
+ RB_RBPERF_RAS_EARLY_Z_QUADS = 9,
+ RB_RBPERF_GMEM_CH0_READ = 10,
+ RB_RBPERF_GMEM_CH1_READ = 11,
+ RB_RBPERF_GMEM_CH0_WRITE = 12,
+ RB_RBPERF_GMEM_CH1_WRITE = 13,
+ RB_RBPERF_CP_CONTEXT_DONE = 14,
+ RB_RBPERF_CP_CACHE_FLUSH = 15,
+ RB_RBPERF_CP_ZPASS_DONE = 16,
+};
+
+enum a3xx_rbbm_perfcounter_select {
+ RBBM_ALAWYS_ON = 0,
+ RBBM_VBIF_BUSY = 1,
+ RBBM_TSE_BUSY = 2,
+ RBBM_RAS_BUSY = 3,
+ RBBM_PC_DCALL_BUSY = 4,
+ RBBM_PC_VSD_BUSY = 5,
+ RBBM_VFD_BUSY = 6,
+ RBBM_VPC_BUSY = 7,
+ RBBM_UCHE_BUSY = 8,
+ RBBM_VSC_BUSY = 9,
+ RBBM_HLSQ_BUSY = 10,
+ RBBM_ANY_RB_BUSY = 11,
+ RBBM_ANY_TEX_BUSY = 12,
+ RBBM_ANY_USP_BUSY = 13,
+ RBBM_ANY_MARB_BUSY = 14,
+ RBBM_ANY_ARB_BUSY = 15,
+ RBBM_AHB_STATUS_BUSY = 16,
+ RBBM_AHB_STATUS_STALLED = 17,
+ RBBM_AHB_STATUS_TXFR = 18,
+ RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+ RBBM_AHB_STATUS_TXFR_ERROR = 20,
+ RBBM_AHB_STATUS_LONG_STALL = 21,
+ RBBM_RBBM_STATUS_MASKED = 22,
+};
+
enum a3xx_sp_perfcounter_select {
+ SP_LM_LOAD_INSTRUCTIONS = 0,
+ SP_LM_STORE_INSTRUCTIONS = 1,
+ SP_LM_ATOMICS = 2,
+ SP_UCHE_LOAD_INSTRUCTIONS = 3,
+ SP_UCHE_STORE_INSTRUCTIONS = 4,
+ SP_UCHE_ATOMICS = 5,
+ SP_VS_TEX_INSTRUCTIONS = 6,
+ SP_VS_CFLOW_INSTRUCTIONS = 7,
+ SP_VS_EFU_INSTRUCTIONS = 8,
+ SP_VS_FULL_ALU_INSTRUCTIONS = 9,
+ SP_VS_HALF_ALU_INSTRUCTIONS = 10,
+ SP_FS_TEX_INSTRUCTIONS = 11,
SP_FS_CFLOW_INSTRUCTIONS = 12,
+ SP_FS_EFU_INSTRUCTIONS = 13,
SP_FS_FULL_ALU_INSTRUCTIONS = 14,
- SP0_ICL1_MISSES = 26,
+ SP_FS_HALF_ALU_INSTRUCTIONS = 15,
+ SP_FS_BARY_INSTRUCTIONS = 16,
+ SP_VS_INSTRUCTIONS = 17,
+ SP_FS_INSTRUCTIONS = 18,
+ SP_ADDR_LOCK_COUNT = 19,
+ SP_UCHE_READ_TRANS = 20,
+ SP_UCHE_WRITE_TRANS = 21,
+ SP_EXPORT_VPC_TRANS = 22,
+ SP_EXPORT_RB_TRANS = 23,
+ SP_PIXELS_KILLED = 24,
+ SP_ICL1_REQUESTS = 25,
+ SP_ICL1_MISSES = 26,
+ SP_ICL0_REQUESTS = 27,
+ SP_ICL0_MISSES = 28,
SP_ALU_ACTIVE_CYCLES = 29,
+ SP_EFU_ACTIVE_CYCLES = 30,
+ SP_STALL_CYCLES_BY_VPC = 31,
+ SP_STALL_CYCLES_BY_TP = 32,
+ SP_STALL_CYCLES_BY_UCHE = 33,
+ SP_STALL_CYCLES_BY_RB = 34,
+ SP_ACTIVE_CYCLES_ANY = 35,
+ SP_ACTIVE_CYCLES_ALL = 36,
+};
+
+enum a3xx_tp_perfcounter_select {
+ TPL1_TPPERF_L1_REQUESTS = 0,
+ TPL1_TPPERF_TP0_L1_REQUESTS = 1,
+ TPL1_TPPERF_TP0_L1_MISSES = 2,
+ TPL1_TPPERF_TP1_L1_REQUESTS = 3,
+ TPL1_TPPERF_TP1_L1_MISSES = 4,
+ TPL1_TPPERF_TP2_L1_REQUESTS = 5,
+ TPL1_TPPERF_TP2_L1_MISSES = 6,
+ TPL1_TPPERF_TP3_L1_REQUESTS = 7,
+ TPL1_TPPERF_TP3_L1_MISSES = 8,
+ TPL1_TPPERF_OUTPUT_TEXELS_POINT = 9,
+ TPL1_TPPERF_OUTPUT_TEXELS_BILINEAR = 10,
+ TPL1_TPPERF_OUTPUT_TEXELS_MIP = 11,
+ TPL1_TPPERF_OUTPUT_TEXELS_ANISO = 12,
+ TPL1_TPPERF_BILINEAR_OPS = 13,
+ TPL1_TPPERF_QUADSQUADS_OFFSET = 14,
+ TPL1_TPPERF_QUADQUADS_SHADOW = 15,
+ TPL1_TPPERF_QUADS_ARRAY = 16,
+ TPL1_TPPERF_QUADS_PROJECTION = 17,
+ TPL1_TPPERF_QUADS_GRADIENT = 18,
+ TPL1_TPPERF_QUADS_1D2D = 19,
+ TPL1_TPPERF_QUADS_3DCUBE = 20,
+ TPL1_TPPERF_ZERO_LOD = 21,
+ TPL1_TPPERF_OUTPUT_TEXELS = 22,
+ TPL1_TPPERF_ACTIVE_CYCLES_ANY = 23,
+ TPL1_TPPERF_ACTIVE_CYCLES_ALL = 24,
+ TPL1_TPPERF_STALL_CYCLES_BY_ARB = 25,
+ TPL1_TPPERF_LATENCY = 26,
+ TPL1_TPPERF_LATENCY_TRANS = 27,
+};
+
+enum a3xx_vfd_perfcounter_select {
+ VFD_PERF_UCHE_BYTE_FETCHED = 0,
+ VFD_PERF_UCHE_TRANS = 1,
+ VFD_PERF_VPC_BYPASS_COMPONENTS = 2,
+ VFD_PERF_FETCH_INSTRUCTIONS = 3,
+ VFD_PERF_DECODE_INSTRUCTIONS = 4,
+ VFD_PERF_ACTIVE_CYCLES = 5,
+ VFD_PERF_STALL_CYCLES_UCHE = 6,
+ VFD_PERF_STALL_CYCLES_HLSQ = 7,
+ VFD_PERF_STALL_CYCLES_VPC_BYPASS = 8,
+ VFD_PERF_STALL_CYCLES_VPC_ALLOC = 9,
};
-enum a3xx_rop_code {
- ROP_CLEAR = 0,
- ROP_NOR = 1,
- ROP_AND_INVERTED = 2,
- ROP_COPY_INVERTED = 3,
- ROP_AND_REVERSE = 4,
- ROP_INVERT = 5,
- ROP_XOR = 6,
- ROP_NAND = 7,
- ROP_AND = 8,
- ROP_EQUIV = 9,
- ROP_NOOP = 10,
- ROP_OR_INVERTED = 11,
- ROP_COPY = 12,
- ROP_OR_REVERSE = 13,
- ROP_OR = 14,
- ROP_SET = 15,
+enum a3xx_vpc_perfcounter_select {
+ VPC_PERF_SP_LM_PRIMITIVES = 0,
+ VPC_PERF_COMPONENTS_FROM_SP = 1,
+ VPC_PERF_SP_LM_COMPONENTS = 2,
+ VPC_PERF_ACTIVE_CYCLES = 3,
+ VPC_PERF_STALL_CYCLES_LM = 4,
+ VPC_PERF_STALL_CYCLES_RAS = 5,
};
-enum a3xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_DST_MINUS_SRC = 2,
- BLEND_MIN_DST_SRC = 3,
- BLEND_MAX_DST_SRC = 4,
+enum a3xx_uche_perfcounter_select {
+ UCHE_UCHEPERF_VBIF_READ_BEATS_TP = 0,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_VFD = 1,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_HLSQ = 2,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_MARB = 3,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_SP = 4,
+ UCHE_UCHEPERF_READ_REQUESTS_TP = 8,
+ UCHE_UCHEPERF_READ_REQUESTS_VFD = 9,
+ UCHE_UCHEPERF_READ_REQUESTS_HLSQ = 10,
+ UCHE_UCHEPERF_READ_REQUESTS_MARB = 11,
+ UCHE_UCHEPERF_READ_REQUESTS_SP = 12,
+ UCHE_UCHEPERF_WRITE_REQUESTS_MARB = 13,
+ UCHE_UCHEPERF_WRITE_REQUESTS_SP = 14,
+ UCHE_UCHEPERF_TAG_CHECK_FAILS = 15,
+ UCHE_UCHEPERF_EVICTS = 16,
+ UCHE_UCHEPERF_FLUSHES = 17,
+ UCHE_UCHEPERF_VBIF_LATENCY_CYCLES = 18,
+ UCHE_UCHEPERF_VBIF_LATENCY_SAMPLES = 19,
+ UCHE_UCHEPERF_ACTIVE_CYCLES = 20,
};
enum a3xx_intp_mode {
@@ -1138,13 +1390,14 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mod
{
return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
}
+#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE 0x00000080
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
{
return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
}
-#define A3XX_RB_COPY_CONTROL_UNK12 0x00001000
+#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE 0x00001000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
@@ -1217,7 +1470,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
{
return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
}
-#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
@@ -1429,15 +1682,23 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_
#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200
-#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000030
#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
{
return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
}
#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
+#define A3XX_HLSQ_CONTROL_0_REG_COMPUTEMODE 0x00000100
#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK 0x00fff000
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT 12
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK;
+}
+#define A3XX_HLSQ_CONTROL_0_REG_FSONLYTEX 0x02000000
#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
@@ -1451,17 +1712,39 @@ static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201
-#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x000000c0
#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
{
return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
}
#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
-#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
-#define A3XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK 0x00ff0000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK 0xff000000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT 24
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK;
+}
#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK 0x000003fc
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT 2
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK 0x03fc0000
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT 18
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK;
+}
#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
@@ -1478,13 +1761,13 @@ static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
}
#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff
#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000
#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
{
@@ -1498,13 +1781,13 @@ static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
}
#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff
#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000
#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
{
@@ -1518,13 +1801,13 @@ static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
}
#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff
#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
{
return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
}
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000
#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
{
@@ -1532,13 +1815,13 @@ static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
}
#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff
#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
{
return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
}
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000
#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
{
@@ -1620,12 +1903,24 @@ static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
}
#define REG_A3XX_VFD_CONTROL_1 0x00002241
-#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000000f
#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
{
return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
}
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK 0x000000f0
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT 4
+static inline uint32_t A3XX_VFD_CONTROL_1_MAXTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK;
+}
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK 0x00000f00
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT 8
+static inline uint32_t A3XX_VFD_CONTROL_1_MINTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK;
+}
#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
@@ -2008,24 +2303,19 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffe
return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
}
#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_VS_CTRL_REG0_ALUSCHMODE 0x00000008
#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
}
-#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
-#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
-static inline uint32_t A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
-}
#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
@@ -2033,8 +2323,6 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
}
#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
-#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
-#define A3XX_SP_VS_CTRL_REG0_COMPUTEMODE 0x00800000
#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
@@ -2075,7 +2363,8 @@ static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
{
return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
}
-#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
+#define A3XX_SP_VS_PARAM_REG_POS2DMODE 0x00010000
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0x01f00000
#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
{
@@ -2085,24 +2374,26 @@ static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
-#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
{
return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
}
+#define A3XX_SP_VS_OUT_REG_A_HALF 0x00000100
#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
{
return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
}
-#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
{
return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
}
+#define A3XX_SP_VS_OUT_REG_B_HALF 0x01000000
#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
@@ -2113,25 +2404,25 @@ static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x0000007f
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
{
return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
}
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x00007f00
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
{
return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
}
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x007f0000
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
{
return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
}
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0x7f000000
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
{
@@ -2139,6 +2430,12 @@ static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
}
#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
@@ -2155,8 +2452,38 @@ static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
#define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+ return ((val >> 5) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
@@ -2182,24 +2509,22 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffe
return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
}
#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_FS_CTRL_REG0_ALUSCHMODE 0x00000008
#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
}
-#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
-#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
-static inline uint32_t A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
-}
+#define A3XX_SP_FS_CTRL_REG0_FSBYPASSENABLE 0x00020000
+#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP 0x00040000
+#define A3XX_SP_FS_CTRL_REG0_OUTORDERED 0x00080000
#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
@@ -2235,7 +2560,7 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
{
return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
}
-#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x3f000000
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x7f000000
#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24
static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
{
@@ -2243,6 +2568,12 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
}
#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
@@ -2259,8 +2590,38 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
#define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+ return ((val >> 5) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index fd266ed963b6..c4f886fd6037 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -40,10 +40,11 @@
extern bool hang_debug;
static void a3xx_dump(struct msm_gpu *gpu);
+static bool a3xx_idle(struct msm_gpu *gpu);
-static void a3xx_me_init(struct msm_gpu *gpu)
+static bool a3xx_me_init(struct msm_gpu *gpu)
{
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
@@ -64,8 +65,8 @@ static void a3xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu);
- gpu->funcs->idle(gpu);
+ gpu->funcs->flush(gpu, ring);
+ return a3xx_idle(gpu);
}
static int a3xx_hw_init(struct msm_gpu *gpu)
@@ -294,9 +295,7 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
- a3xx_me_init(gpu);
-
- return 0;
+ return a3xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a3xx_recover(struct msm_gpu *gpu)
@@ -330,17 +329,22 @@ static void a3xx_destroy(struct msm_gpu *gpu)
kfree(a3xx_gpu);
}
-static void a3xx_idle(struct msm_gpu *gpu)
+static bool a3xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- adreno_idle(gpu);
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
- A3XX_RBBM_STATUS_GPU_BUSY)))
+ A3XX_RBBM_STATUS_GPU_BUSY))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
- /* TODO maybe we need to reset GPU here to recover from hang? */
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
}
static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
@@ -419,91 +423,13 @@ static void a3xx_dump(struct msm_gpu *gpu)
}
/* Register offset defines for A3XX */
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_A3XX_CP_PFP_UCODE_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_A3XX_CP_PFP_UCODE_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
- REG_A3XX_CP_PROTECT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_A3XX_RBBM_PERFCTR_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
- REG_A3XX_RBBM_INT_0_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_A3XX_RBBM_AHB_ERROR_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_A3XX_RBBM_INT_CLEAR_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
- REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_A3XX_VSC_SIZE_ADDRESS),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_A3XX_SP_VS_OBJ_START_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_A3XX_SP_FS_OBJ_START_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
- REG_A3XX_RBBM_PM_OVERRIDE2),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
- REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
- REG_A3XX_SQ_GPR_MANAGEMENT),
- REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
- REG_A3XX_SQ_INST_STORE_MANAGMENT),
- REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_A3XX_RBBM_SW_RESET_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
- REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
- REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static const struct adreno_gpu_funcs funcs = {
@@ -514,9 +440,10 @@ static const struct adreno_gpu_funcs funcs = {
.pm_resume = msm_gpu_pm_resume,
.recover = a3xx_recover,
.last_fence = adreno_last_fence,
+ .submitted_fence = adreno_submitted_fence,
.submit = adreno_submit,
.flush = adreno_flush,
- .idle = a3xx_idle,
+ .active_ring = adreno_active_ring,
.irq = a3xx_irq,
.destroy = a3xx_destroy,
#ifdef CONFIG_DEBUG_FS
@@ -564,7 +491,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a3xx_registers;
adreno_gpu->reg_offsets = a3xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
@@ -583,7 +510,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 99de8271dba8..1004d4ecf8fe 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -45,13 +47,18 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
enum a4xx_color_fmt {
RB4_A8_UNORM = 1,
RB4_R8_UNORM = 2,
+ RB4_R8_SNORM = 3,
+ RB4_R8_UINT = 4,
+ RB4_R8_SINT = 5,
RB4_R4G4B4A4_UNORM = 8,
RB4_R5G5B5A1_UNORM = 10,
- RB4_R5G6R5_UNORM = 14,
+ RB4_R5G6B5_UNORM = 14,
RB4_R8G8_UNORM = 15,
RB4_R8G8_SNORM = 16,
RB4_R8G8_UINT = 17,
RB4_R8G8_SINT = 18,
+ RB4_R16_UNORM = 19,
+ RB4_R16_SNORM = 20,
RB4_R16_FLOAT = 21,
RB4_R16_UINT = 22,
RB4_R16_SINT = 23,
@@ -63,12 +70,16 @@ enum a4xx_color_fmt {
RB4_R10G10B10A2_UNORM = 31,
RB4_R10G10B10A2_UINT = 34,
RB4_R11G11B10_FLOAT = 39,
+ RB4_R16G16_UNORM = 40,
+ RB4_R16G16_SNORM = 41,
RB4_R16G16_FLOAT = 42,
RB4_R16G16_UINT = 43,
RB4_R16G16_SINT = 44,
RB4_R32_FLOAT = 45,
RB4_R32_UINT = 46,
RB4_R32_SINT = 47,
+ RB4_R16G16B16A16_UNORM = 52,
+ RB4_R16G16B16A16_SNORM = 53,
RB4_R16G16B16A16_FLOAT = 54,
RB4_R16G16B16A16_UINT = 55,
RB4_R16G16B16A16_SINT = 56,
@@ -82,17 +93,10 @@ enum a4xx_color_fmt {
enum a4xx_tile_mode {
TILE4_LINEAR = 0,
+ TILE4_2 = 2,
TILE4_3 = 3,
};
-enum a4xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_DST_MINUS_SRC = 2,
- BLEND_MIN_DST_SRC = 3,
- BLEND_MAX_DST_SRC = 4,
-};
-
enum a4xx_vtx_fmt {
VFMT4_32_FLOAT = 1,
VFMT4_32_32_FLOAT = 2,
@@ -106,6 +110,7 @@ enum a4xx_vtx_fmt {
VFMT4_32_32_FIXED = 10,
VFMT4_32_32_32_FIXED = 11,
VFMT4_32_32_32_32_FIXED = 12,
+ VFMT4_11_11_10_FLOAT = 13,
VFMT4_16_SINT = 16,
VFMT4_16_16_SINT = 17,
VFMT4_16_16_16_SINT = 18,
@@ -146,52 +151,76 @@ enum a4xx_vtx_fmt {
VFMT4_8_8_SNORM = 53,
VFMT4_8_8_8_SNORM = 54,
VFMT4_8_8_8_8_SNORM = 55,
- VFMT4_10_10_10_2_UINT = 60,
- VFMT4_10_10_10_2_UNORM = 61,
- VFMT4_10_10_10_2_SINT = 62,
- VFMT4_10_10_10_2_SNORM = 63,
+ VFMT4_10_10_10_2_UINT = 56,
+ VFMT4_10_10_10_2_UNORM = 57,
+ VFMT4_10_10_10_2_SINT = 58,
+ VFMT4_10_10_10_2_SNORM = 59,
+ VFMT4_2_10_10_10_UINT = 60,
+ VFMT4_2_10_10_10_UNORM = 61,
+ VFMT4_2_10_10_10_SINT = 62,
+ VFMT4_2_10_10_10_SNORM = 63,
};
enum a4xx_tex_fmt {
- TFMT4_5_6_5_UNORM = 11,
- TFMT4_5_5_5_1_UNORM = 10,
- TFMT4_4_4_4_4_UNORM = 8,
- TFMT4_X8Z24_UNORM = 71,
- TFMT4_10_10_10_2_UNORM = 33,
TFMT4_A8_UNORM = 3,
- TFMT4_L8_A8_UNORM = 13,
TFMT4_8_UNORM = 4,
- TFMT4_8_8_UNORM = 14,
- TFMT4_8_8_8_8_UNORM = 28,
TFMT4_8_SNORM = 5,
- TFMT4_8_8_SNORM = 15,
- TFMT4_8_8_8_8_SNORM = 29,
TFMT4_8_UINT = 6,
- TFMT4_8_8_UINT = 16,
- TFMT4_8_8_8_8_UINT = 30,
TFMT4_8_SINT = 7,
+ TFMT4_4_4_4_4_UNORM = 8,
+ TFMT4_5_5_5_1_UNORM = 9,
+ TFMT4_5_6_5_UNORM = 11,
+ TFMT4_L8_A8_UNORM = 13,
+ TFMT4_8_8_UNORM = 14,
+ TFMT4_8_8_SNORM = 15,
+ TFMT4_8_8_UINT = 16,
TFMT4_8_8_SINT = 17,
- TFMT4_8_8_8_8_SINT = 31,
+ TFMT4_16_UNORM = 18,
+ TFMT4_16_SNORM = 19,
+ TFMT4_16_FLOAT = 20,
TFMT4_16_UINT = 21,
- TFMT4_16_16_UINT = 41,
- TFMT4_16_16_16_16_UINT = 54,
TFMT4_16_SINT = 22,
+ TFMT4_8_8_8_8_UNORM = 28,
+ TFMT4_8_8_8_8_SNORM = 29,
+ TFMT4_8_8_8_8_UINT = 30,
+ TFMT4_8_8_8_8_SINT = 31,
+ TFMT4_9_9_9_E5_FLOAT = 32,
+ TFMT4_10_10_10_2_UNORM = 33,
+ TFMT4_10_10_10_2_UINT = 34,
+ TFMT4_11_11_10_FLOAT = 37,
+ TFMT4_16_16_UNORM = 38,
+ TFMT4_16_16_SNORM = 39,
+ TFMT4_16_16_FLOAT = 40,
+ TFMT4_16_16_UINT = 41,
TFMT4_16_16_SINT = 42,
- TFMT4_16_16_16_16_SINT = 55,
+ TFMT4_32_FLOAT = 43,
TFMT4_32_UINT = 44,
- TFMT4_32_32_UINT = 57,
- TFMT4_32_32_32_32_UINT = 64,
TFMT4_32_SINT = 45,
- TFMT4_32_32_SINT = 58,
- TFMT4_32_32_32_32_SINT = 65,
- TFMT4_16_FLOAT = 20,
- TFMT4_16_16_FLOAT = 40,
+ TFMT4_16_16_16_16_UNORM = 51,
+ TFMT4_16_16_16_16_SNORM = 52,
TFMT4_16_16_16_16_FLOAT = 53,
- TFMT4_32_FLOAT = 43,
+ TFMT4_16_16_16_16_UINT = 54,
+ TFMT4_16_16_16_16_SINT = 55,
TFMT4_32_32_FLOAT = 56,
+ TFMT4_32_32_UINT = 57,
+ TFMT4_32_32_SINT = 58,
+ TFMT4_32_32_32_FLOAT = 59,
+ TFMT4_32_32_32_UINT = 60,
+ TFMT4_32_32_32_SINT = 61,
TFMT4_32_32_32_32_FLOAT = 63,
- TFMT4_9_9_9_E5_FLOAT = 32,
- TFMT4_11_11_10_FLOAT = 37,
+ TFMT4_32_32_32_32_UINT = 64,
+ TFMT4_32_32_32_32_SINT = 65,
+ TFMT4_X8Z24_UNORM = 71,
+ TFMT4_DXT1 = 86,
+ TFMT4_DXT3 = 87,
+ TFMT4_DXT5 = 88,
+ TFMT4_RGTC1_UNORM = 90,
+ TFMT4_RGTC1_SNORM = 91,
+ TFMT4_RGTC2_UNORM = 94,
+ TFMT4_RGTC2_SNORM = 95,
+ TFMT4_BPTC_UFLOAT = 97,
+ TFMT4_BPTC_FLOAT = 98,
+ TFMT4_BPTC = 99,
TFMT4_ATC_RGB = 100,
TFMT4_ATC_RGBA_EXPLICIT = 101,
TFMT4_ATC_RGBA_INTERPOLATED = 102,
@@ -240,6 +269,545 @@ enum a4xx_tess_spacing {
EVEN_SPACING = 3,
};
+enum a4xx_ccu_perfcounter_select {
+ CCU_BUSY_CYCLES = 0,
+ CCU_RB_DEPTH_RETURN_STALL = 2,
+ CCU_RB_COLOR_RETURN_STALL = 3,
+ CCU_DEPTH_BLOCKS = 6,
+ CCU_COLOR_BLOCKS = 7,
+ CCU_DEPTH_BLOCK_HIT = 8,
+ CCU_COLOR_BLOCK_HIT = 9,
+ CCU_DEPTH_FLAG1_COUNT = 10,
+ CCU_DEPTH_FLAG2_COUNT = 11,
+ CCU_DEPTH_FLAG3_COUNT = 12,
+ CCU_DEPTH_FLAG4_COUNT = 13,
+ CCU_COLOR_FLAG1_COUNT = 14,
+ CCU_COLOR_FLAG2_COUNT = 15,
+ CCU_COLOR_FLAG3_COUNT = 16,
+ CCU_COLOR_FLAG4_COUNT = 17,
+ CCU_PARTIAL_BLOCK_READ = 18,
+};
+
+enum a4xx_cp_perfcounter_select {
+ CP_ALWAYS_COUNT = 0,
+ CP_BUSY = 1,
+ CP_PFP_IDLE = 2,
+ CP_PFP_BUSY_WORKING = 3,
+ CP_PFP_STALL_CYCLES_ANY = 4,
+ CP_PFP_STARVE_CYCLES_ANY = 5,
+ CP_PFP_STARVED_PER_LOAD_ADDR = 6,
+ CP_PFP_STALLED_PER_STORE_ADDR = 7,
+ CP_PFP_PC_PROFILE = 8,
+ CP_PFP_MATCH_PM4_PKT_PROFILE = 9,
+ CP_PFP_COND_INDIRECT_DISCARDED = 10,
+ CP_LONG_RESUMPTIONS = 11,
+ CP_RESUME_CYCLES = 12,
+ CP_RESUME_TO_BOUNDARY_CYCLES = 13,
+ CP_LONG_PREEMPTIONS = 14,
+ CP_PREEMPT_CYCLES = 15,
+ CP_PREEMPT_TO_BOUNDARY_CYCLES = 16,
+ CP_ME_FIFO_EMPTY_PFP_IDLE = 17,
+ CP_ME_FIFO_EMPTY_PFP_BUSY = 18,
+ CP_ME_FIFO_NOT_EMPTY_NOT_FULL = 19,
+ CP_ME_FIFO_FULL_ME_BUSY = 20,
+ CP_ME_FIFO_FULL_ME_NON_WORKING = 21,
+ CP_ME_WAITING_FOR_PACKETS = 22,
+ CP_ME_BUSY_WORKING = 23,
+ CP_ME_STARVE_CYCLES_ANY = 24,
+ CP_ME_STARVE_CYCLES_PER_PROFILE = 25,
+ CP_ME_STALL_CYCLES_PER_PROFILE = 26,
+ CP_ME_PC_PROFILE = 27,
+ CP_RCIU_FIFO_EMPTY = 28,
+ CP_RCIU_FIFO_NOT_EMPTY_NOT_FULL = 29,
+ CP_RCIU_FIFO_FULL = 30,
+ CP_RCIU_FIFO_FULL_NO_CONTEXT = 31,
+ CP_RCIU_FIFO_FULL_AHB_MASTER = 32,
+ CP_RCIU_FIFO_FULL_OTHER = 33,
+ CP_AHB_IDLE = 34,
+ CP_AHB_STALL_ON_GRANT_NO_SPLIT = 35,
+ CP_AHB_STALL_ON_GRANT_SPLIT = 36,
+ CP_AHB_STALL_ON_GRANT_SPLIT_PROFILE = 37,
+ CP_AHB_BUSY_WORKING = 38,
+ CP_AHB_BUSY_STALL_ON_HRDY = 39,
+ CP_AHB_BUSY_STALL_ON_HRDY_PROFILE = 40,
+};
+
+enum a4xx_gras_ras_perfcounter_select {
+ RAS_SUPER_TILES = 0,
+ RAS_8X8_TILES = 1,
+ RAS_4X4_TILES = 2,
+ RAS_BUSY_CYCLES = 3,
+ RAS_STALL_CYCLES_BY_RB = 4,
+ RAS_STALL_CYCLES_BY_VSC = 5,
+ RAS_STARVE_CYCLES_BY_TSE = 6,
+ RAS_SUPERTILE_CYCLES = 7,
+ RAS_TILE_CYCLES = 8,
+ RAS_FULLY_COVERED_SUPER_TILES = 9,
+ RAS_FULLY_COVERED_8X8_TILES = 10,
+ RAS_4X4_PRIM = 11,
+ RAS_8X4_4X8_PRIM = 12,
+ RAS_8X8_PRIM = 13,
+};
+
+enum a4xx_gras_tse_perfcounter_select {
+ TSE_INPUT_PRIM = 0,
+ TSE_INPUT_NULL_PRIM = 1,
+ TSE_TRIVAL_REJ_PRIM = 2,
+ TSE_CLIPPED_PRIM = 3,
+ TSE_NEW_PRIM = 4,
+ TSE_ZERO_AREA_PRIM = 5,
+ TSE_FACENESS_CULLED_PRIM = 6,
+ TSE_ZERO_PIXEL_PRIM = 7,
+ TSE_OUTPUT_NULL_PRIM = 8,
+ TSE_OUTPUT_VISIBLE_PRIM = 9,
+ TSE_PRE_CLIP_PRIM = 10,
+ TSE_POST_CLIP_PRIM = 11,
+ TSE_BUSY_CYCLES = 12,
+ TSE_PC_STARVE = 13,
+ TSE_RAS_STALL = 14,
+ TSE_STALL_BARYPLANE_FIFO_FULL = 15,
+ TSE_STALL_ZPLANE_FIFO_FULL = 16,
+};
+
+enum a4xx_hlsq_perfcounter_select {
+ HLSQ_SP_VS_STAGE_CONSTANT = 0,
+ HLSQ_SP_VS_STAGE_INSTRUCTIONS = 1,
+ HLSQ_SP_FS_STAGE_CONSTANT = 2,
+ HLSQ_SP_FS_STAGE_INSTRUCTIONS = 3,
+ HLSQ_TP_STATE = 4,
+ HLSQ_QUADS = 5,
+ HLSQ_PIXELS = 6,
+ HLSQ_VERTICES = 7,
+ HLSQ_SP_VS_STAGE_DATA_BYTES = 13,
+ HLSQ_SP_FS_STAGE_DATA_BYTES = 14,
+ HLSQ_BUSY_CYCLES = 15,
+ HLSQ_STALL_CYCLES_SP_STATE = 16,
+ HLSQ_STALL_CYCLES_SP_VS_STAGE = 17,
+ HLSQ_STALL_CYCLES_SP_FS_STAGE = 18,
+ HLSQ_STALL_CYCLES_UCHE = 19,
+ HLSQ_RBBM_LOAD_CYCLES = 20,
+ HLSQ_DI_TO_VS_START_SP = 21,
+ HLSQ_DI_TO_FS_START_SP = 22,
+ HLSQ_VS_STAGE_START_TO_DONE_SP = 23,
+ HLSQ_FS_STAGE_START_TO_DONE_SP = 24,
+ HLSQ_SP_STATE_COPY_CYCLES_VS_STAGE = 25,
+ HLSQ_SP_STATE_COPY_CYCLES_FS_STAGE = 26,
+ HLSQ_UCHE_LATENCY_CYCLES = 27,
+ HLSQ_UCHE_LATENCY_COUNT = 28,
+ HLSQ_STARVE_CYCLES_VFD = 29,
+};
+
+enum a4xx_pc_perfcounter_select {
+ PC_VIS_STREAMS_LOADED = 0,
+ PC_VPC_PRIMITIVES = 2,
+ PC_DEAD_PRIM = 3,
+ PC_LIVE_PRIM = 4,
+ PC_DEAD_DRAWCALLS = 5,
+ PC_LIVE_DRAWCALLS = 6,
+ PC_VERTEX_MISSES = 7,
+ PC_STALL_CYCLES_VFD = 9,
+ PC_STALL_CYCLES_TSE = 10,
+ PC_STALL_CYCLES_UCHE = 11,
+ PC_WORKING_CYCLES = 12,
+ PC_IA_VERTICES = 13,
+ PC_GS_PRIMITIVES = 14,
+ PC_HS_INVOCATIONS = 15,
+ PC_DS_INVOCATIONS = 16,
+ PC_DS_PRIMITIVES = 17,
+ PC_STARVE_CYCLES_FOR_INDEX = 20,
+ PC_STARVE_CYCLES_FOR_TESS_FACTOR = 21,
+ PC_STARVE_CYCLES_FOR_VIZ_STREAM = 22,
+ PC_STALL_CYCLES_TESS = 23,
+ PC_STARVE_CYCLES_FOR_POSITION = 24,
+ PC_MODE0_DRAWCALL = 25,
+ PC_MODE1_DRAWCALL = 26,
+ PC_MODE2_DRAWCALL = 27,
+ PC_MODE3_DRAWCALL = 28,
+ PC_MODE4_DRAWCALL = 29,
+ PC_PREDICATED_DEAD_DRAWCALL = 30,
+ PC_STALL_CYCLES_BY_TSE_ONLY = 31,
+ PC_STALL_CYCLES_BY_VPC_ONLY = 32,
+ PC_VPC_POS_DATA_TRANSACTION = 33,
+ PC_BUSY_CYCLES = 34,
+ PC_STARVE_CYCLES_DI = 35,
+ PC_STALL_CYCLES_VPC = 36,
+ TESS_WORKING_CYCLES = 37,
+ TESS_NUM_CYCLES_SETUP_WORKING = 38,
+ TESS_NUM_CYCLES_PTGEN_WORKING = 39,
+ TESS_NUM_CYCLES_CONNGEN_WORKING = 40,
+ TESS_BUSY_CYCLES = 41,
+ TESS_STARVE_CYCLES_PC = 42,
+ TESS_STALL_CYCLES_PC = 43,
+};
+
+enum a4xx_pwr_perfcounter_select {
+ PWR_CORE_CLOCK_CYCLES = 0,
+ PWR_BUSY_CLOCK_CYCLES = 1,
+};
+
+enum a4xx_rb_perfcounter_select {
+ RB_BUSY_CYCLES = 0,
+ RB_BUSY_CYCLES_BINNING = 1,
+ RB_BUSY_CYCLES_RENDERING = 2,
+ RB_BUSY_CYCLES_RESOLVE = 3,
+ RB_STARVE_CYCLES_BY_SP = 4,
+ RB_STARVE_CYCLES_BY_RAS = 5,
+ RB_STARVE_CYCLES_BY_MARB = 6,
+ RB_STALL_CYCLES_BY_MARB = 7,
+ RB_STALL_CYCLES_BY_HLSQ = 8,
+ RB_RB_RB_MARB_DATA = 9,
+ RB_SP_RB_QUAD = 10,
+ RB_RAS_RB_Z_QUADS = 11,
+ RB_GMEM_CH0_READ = 12,
+ RB_GMEM_CH1_READ = 13,
+ RB_GMEM_CH0_WRITE = 14,
+ RB_GMEM_CH1_WRITE = 15,
+ RB_CP_CONTEXT_DONE = 16,
+ RB_CP_CACHE_FLUSH = 17,
+ RB_CP_ZPASS_DONE = 18,
+ RB_STALL_FIFO0_FULL = 19,
+ RB_STALL_FIFO1_FULL = 20,
+ RB_STALL_FIFO2_FULL = 21,
+ RB_STALL_FIFO3_FULL = 22,
+ RB_RB_HLSQ_TRANSACTIONS = 23,
+ RB_Z_READ = 24,
+ RB_Z_WRITE = 25,
+ RB_C_READ = 26,
+ RB_C_WRITE = 27,
+ RB_C_READ_LATENCY = 28,
+ RB_Z_READ_LATENCY = 29,
+ RB_STALL_BY_UCHE = 30,
+ RB_MARB_UCHE_TRANSACTIONS = 31,
+ RB_CACHE_STALL_MISS = 32,
+ RB_CACHE_STALL_FIFO_FULL = 33,
+ RB_8BIT_BLENDER_UNITS_ACTIVE = 34,
+ RB_16BIT_BLENDER_UNITS_ACTIVE = 35,
+ RB_SAMPLER_UNITS_ACTIVE = 36,
+ RB_TOTAL_PASS = 38,
+ RB_Z_PASS = 39,
+ RB_Z_FAIL = 40,
+ RB_S_FAIL = 41,
+ RB_POWER0 = 42,
+ RB_POWER1 = 43,
+ RB_POWER2 = 44,
+ RB_POWER3 = 45,
+ RB_POWER4 = 46,
+ RB_POWER5 = 47,
+ RB_POWER6 = 48,
+ RB_POWER7 = 49,
+};
+
+enum a4xx_rbbm_perfcounter_select {
+ RBBM_ALWAYS_ON = 0,
+ RBBM_VBIF_BUSY = 1,
+ RBBM_TSE_BUSY = 2,
+ RBBM_RAS_BUSY = 3,
+ RBBM_PC_DCALL_BUSY = 4,
+ RBBM_PC_VSD_BUSY = 5,
+ RBBM_VFD_BUSY = 6,
+ RBBM_VPC_BUSY = 7,
+ RBBM_UCHE_BUSY = 8,
+ RBBM_VSC_BUSY = 9,
+ RBBM_HLSQ_BUSY = 10,
+ RBBM_ANY_RB_BUSY = 11,
+ RBBM_ANY_TPL1_BUSY = 12,
+ RBBM_ANY_SP_BUSY = 13,
+ RBBM_ANY_MARB_BUSY = 14,
+ RBBM_ANY_ARB_BUSY = 15,
+ RBBM_AHB_STATUS_BUSY = 16,
+ RBBM_AHB_STATUS_STALLED = 17,
+ RBBM_AHB_STATUS_TXFR = 18,
+ RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+ RBBM_AHB_STATUS_TXFR_ERROR = 20,
+ RBBM_AHB_STATUS_LONG_STALL = 21,
+ RBBM_STATUS_MASKED = 22,
+ RBBM_CP_BUSY_GFX_CORE_IDLE = 23,
+ RBBM_TESS_BUSY = 24,
+ RBBM_COM_BUSY = 25,
+ RBBM_DCOM_BUSY = 32,
+ RBBM_ANY_CCU_BUSY = 33,
+ RBBM_DPM_BUSY = 34,
+};
+
+enum a4xx_sp_perfcounter_select {
+ SP_LM_LOAD_INSTRUCTIONS = 0,
+ SP_LM_STORE_INSTRUCTIONS = 1,
+ SP_LM_ATOMICS = 2,
+ SP_GM_LOAD_INSTRUCTIONS = 3,
+ SP_GM_STORE_INSTRUCTIONS = 4,
+ SP_GM_ATOMICS = 5,
+ SP_VS_STAGE_TEX_INSTRUCTIONS = 6,
+ SP_VS_STAGE_CFLOW_INSTRUCTIONS = 7,
+ SP_VS_STAGE_EFU_INSTRUCTIONS = 8,
+ SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 9,
+ SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 10,
+ SP_FS_STAGE_TEX_INSTRUCTIONS = 11,
+ SP_FS_STAGE_CFLOW_INSTRUCTIONS = 12,
+ SP_FS_STAGE_EFU_INSTRUCTIONS = 13,
+ SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 14,
+ SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 15,
+ SP_VS_INSTRUCTIONS = 17,
+ SP_FS_INSTRUCTIONS = 18,
+ SP_ADDR_LOCK_COUNT = 19,
+ SP_UCHE_READ_TRANS = 20,
+ SP_UCHE_WRITE_TRANS = 21,
+ SP_EXPORT_VPC_TRANS = 22,
+ SP_EXPORT_RB_TRANS = 23,
+ SP_PIXELS_KILLED = 24,
+ SP_ICL1_REQUESTS = 25,
+ SP_ICL1_MISSES = 26,
+ SP_ICL0_REQUESTS = 27,
+ SP_ICL0_MISSES = 28,
+ SP_ALU_WORKING_CYCLES = 29,
+ SP_EFU_WORKING_CYCLES = 30,
+ SP_STALL_CYCLES_BY_VPC = 31,
+ SP_STALL_CYCLES_BY_TP = 32,
+ SP_STALL_CYCLES_BY_UCHE = 33,
+ SP_STALL_CYCLES_BY_RB = 34,
+ SP_BUSY_CYCLES = 35,
+ SP_HS_INSTRUCTIONS = 36,
+ SP_DS_INSTRUCTIONS = 37,
+ SP_GS_INSTRUCTIONS = 38,
+ SP_CS_INSTRUCTIONS = 39,
+ SP_SCHEDULER_NON_WORKING = 40,
+ SP_WAVE_CONTEXTS = 41,
+ SP_WAVE_CONTEXT_CYCLES = 42,
+ SP_POWER0 = 43,
+ SP_POWER1 = 44,
+ SP_POWER2 = 45,
+ SP_POWER3 = 46,
+ SP_POWER4 = 47,
+ SP_POWER5 = 48,
+ SP_POWER6 = 49,
+ SP_POWER7 = 50,
+ SP_POWER8 = 51,
+ SP_POWER9 = 52,
+ SP_POWER10 = 53,
+ SP_POWER11 = 54,
+ SP_POWER12 = 55,
+ SP_POWER13 = 56,
+ SP_POWER14 = 57,
+ SP_POWER15 = 58,
+};
+
+enum a4xx_tp_perfcounter_select {
+ TP_L1_REQUESTS = 0,
+ TP_L1_MISSES = 1,
+ TP_QUADS_OFFSET = 8,
+ TP_QUAD_SHADOW = 9,
+ TP_QUADS_ARRAY = 10,
+ TP_QUADS_GRADIENT = 11,
+ TP_QUADS_1D2D = 12,
+ TP_QUADS_3DCUBE = 13,
+ TP_BUSY_CYCLES = 16,
+ TP_STALL_CYCLES_BY_ARB = 17,
+ TP_STATE_CACHE_REQUESTS = 20,
+ TP_STATE_CACHE_MISSES = 21,
+ TP_POWER0 = 22,
+ TP_POWER1 = 23,
+ TP_POWER2 = 24,
+ TP_POWER3 = 25,
+ TP_POWER4 = 26,
+ TP_POWER5 = 27,
+ TP_POWER6 = 28,
+ TP_POWER7 = 29,
+};
+
+enum a4xx_uche_perfcounter_select {
+ UCHE_VBIF_READ_BEATS_TP = 0,
+ UCHE_VBIF_READ_BEATS_VFD = 1,
+ UCHE_VBIF_READ_BEATS_HLSQ = 2,
+ UCHE_VBIF_READ_BEATS_MARB = 3,
+ UCHE_VBIF_READ_BEATS_SP = 4,
+ UCHE_READ_REQUESTS_TP = 5,
+ UCHE_READ_REQUESTS_VFD = 6,
+ UCHE_READ_REQUESTS_HLSQ = 7,
+ UCHE_READ_REQUESTS_MARB = 8,
+ UCHE_READ_REQUESTS_SP = 9,
+ UCHE_WRITE_REQUESTS_MARB = 10,
+ UCHE_WRITE_REQUESTS_SP = 11,
+ UCHE_TAG_CHECK_FAILS = 12,
+ UCHE_EVICTS = 13,
+ UCHE_FLUSHES = 14,
+ UCHE_VBIF_LATENCY_CYCLES = 15,
+ UCHE_VBIF_LATENCY_SAMPLES = 16,
+ UCHE_BUSY_CYCLES = 17,
+ UCHE_VBIF_READ_BEATS_PC = 18,
+ UCHE_READ_REQUESTS_PC = 19,
+ UCHE_WRITE_REQUESTS_VPC = 20,
+ UCHE_STALL_BY_VBIF = 21,
+ UCHE_WRITE_REQUESTS_VSC = 22,
+ UCHE_POWER0 = 23,
+ UCHE_POWER1 = 24,
+ UCHE_POWER2 = 25,
+ UCHE_POWER3 = 26,
+ UCHE_POWER4 = 27,
+ UCHE_POWER5 = 28,
+ UCHE_POWER6 = 29,
+ UCHE_POWER7 = 30,
+};
+
+enum a4xx_vbif_perfcounter_select {
+ AXI_READ_REQUESTS_ID_0 = 0,
+ AXI_READ_REQUESTS_ID_1 = 1,
+ AXI_READ_REQUESTS_ID_2 = 2,
+ AXI_READ_REQUESTS_ID_3 = 3,
+ AXI_READ_REQUESTS_ID_4 = 4,
+ AXI_READ_REQUESTS_ID_5 = 5,
+ AXI_READ_REQUESTS_ID_6 = 6,
+ AXI_READ_REQUESTS_ID_7 = 7,
+ AXI_READ_REQUESTS_ID_8 = 8,
+ AXI_READ_REQUESTS_ID_9 = 9,
+ AXI_READ_REQUESTS_ID_10 = 10,
+ AXI_READ_REQUESTS_ID_11 = 11,
+ AXI_READ_REQUESTS_ID_12 = 12,
+ AXI_READ_REQUESTS_ID_13 = 13,
+ AXI_READ_REQUESTS_ID_14 = 14,
+ AXI_READ_REQUESTS_ID_15 = 15,
+ AXI0_READ_REQUESTS_TOTAL = 16,
+ AXI1_READ_REQUESTS_TOTAL = 17,
+ AXI2_READ_REQUESTS_TOTAL = 18,
+ AXI3_READ_REQUESTS_TOTAL = 19,
+ AXI_READ_REQUESTS_TOTAL = 20,
+ AXI_WRITE_REQUESTS_ID_0 = 21,
+ AXI_WRITE_REQUESTS_ID_1 = 22,
+ AXI_WRITE_REQUESTS_ID_2 = 23,
+ AXI_WRITE_REQUESTS_ID_3 = 24,
+ AXI_WRITE_REQUESTS_ID_4 = 25,
+ AXI_WRITE_REQUESTS_ID_5 = 26,
+ AXI_WRITE_REQUESTS_ID_6 = 27,
+ AXI_WRITE_REQUESTS_ID_7 = 28,
+ AXI_WRITE_REQUESTS_ID_8 = 29,
+ AXI_WRITE_REQUESTS_ID_9 = 30,
+ AXI_WRITE_REQUESTS_ID_10 = 31,
+ AXI_WRITE_REQUESTS_ID_11 = 32,
+ AXI_WRITE_REQUESTS_ID_12 = 33,
+ AXI_WRITE_REQUESTS_ID_13 = 34,
+ AXI_WRITE_REQUESTS_ID_14 = 35,
+ AXI_WRITE_REQUESTS_ID_15 = 36,
+ AXI0_WRITE_REQUESTS_TOTAL = 37,
+ AXI1_WRITE_REQUESTS_TOTAL = 38,
+ AXI2_WRITE_REQUESTS_TOTAL = 39,
+ AXI3_WRITE_REQUESTS_TOTAL = 40,
+ AXI_WRITE_REQUESTS_TOTAL = 41,
+ AXI_TOTAL_REQUESTS = 42,
+ AXI_READ_DATA_BEATS_ID_0 = 43,
+ AXI_READ_DATA_BEATS_ID_1 = 44,
+ AXI_READ_DATA_BEATS_ID_2 = 45,
+ AXI_READ_DATA_BEATS_ID_3 = 46,
+ AXI_READ_DATA_BEATS_ID_4 = 47,
+ AXI_READ_DATA_BEATS_ID_5 = 48,
+ AXI_READ_DATA_BEATS_ID_6 = 49,
+ AXI_READ_DATA_BEATS_ID_7 = 50,
+ AXI_READ_DATA_BEATS_ID_8 = 51,
+ AXI_READ_DATA_BEATS_ID_9 = 52,
+ AXI_READ_DATA_BEATS_ID_10 = 53,
+ AXI_READ_DATA_BEATS_ID_11 = 54,
+ AXI_READ_DATA_BEATS_ID_12 = 55,
+ AXI_READ_DATA_BEATS_ID_13 = 56,
+ AXI_READ_DATA_BEATS_ID_14 = 57,
+ AXI_READ_DATA_BEATS_ID_15 = 58,
+ AXI0_READ_DATA_BEATS_TOTAL = 59,
+ AXI1_READ_DATA_BEATS_TOTAL = 60,
+ AXI2_READ_DATA_BEATS_TOTAL = 61,
+ AXI3_READ_DATA_BEATS_TOTAL = 62,
+ AXI_READ_DATA_BEATS_TOTAL = 63,
+ AXI_WRITE_DATA_BEATS_ID_0 = 64,
+ AXI_WRITE_DATA_BEATS_ID_1 = 65,
+ AXI_WRITE_DATA_BEATS_ID_2 = 66,
+ AXI_WRITE_DATA_BEATS_ID_3 = 67,
+ AXI_WRITE_DATA_BEATS_ID_4 = 68,
+ AXI_WRITE_DATA_BEATS_ID_5 = 69,
+ AXI_WRITE_DATA_BEATS_ID_6 = 70,
+ AXI_WRITE_DATA_BEATS_ID_7 = 71,
+ AXI_WRITE_DATA_BEATS_ID_8 = 72,
+ AXI_WRITE_DATA_BEATS_ID_9 = 73,
+ AXI_WRITE_DATA_BEATS_ID_10 = 74,
+ AXI_WRITE_DATA_BEATS_ID_11 = 75,
+ AXI_WRITE_DATA_BEATS_ID_12 = 76,
+ AXI_WRITE_DATA_BEATS_ID_13 = 77,
+ AXI_WRITE_DATA_BEATS_ID_14 = 78,
+ AXI_WRITE_DATA_BEATS_ID_15 = 79,
+ AXI0_WRITE_DATA_BEATS_TOTAL = 80,
+ AXI1_WRITE_DATA_BEATS_TOTAL = 81,
+ AXI2_WRITE_DATA_BEATS_TOTAL = 82,
+ AXI3_WRITE_DATA_BEATS_TOTAL = 83,
+ AXI_WRITE_DATA_BEATS_TOTAL = 84,
+ AXI_DATA_BEATS_TOTAL = 85,
+ CYCLES_HELD_OFF_ID_0 = 86,
+ CYCLES_HELD_OFF_ID_1 = 87,
+ CYCLES_HELD_OFF_ID_2 = 88,
+ CYCLES_HELD_OFF_ID_3 = 89,
+ CYCLES_HELD_OFF_ID_4 = 90,
+ CYCLES_HELD_OFF_ID_5 = 91,
+ CYCLES_HELD_OFF_ID_6 = 92,
+ CYCLES_HELD_OFF_ID_7 = 93,
+ CYCLES_HELD_OFF_ID_8 = 94,
+ CYCLES_HELD_OFF_ID_9 = 95,
+ CYCLES_HELD_OFF_ID_10 = 96,
+ CYCLES_HELD_OFF_ID_11 = 97,
+ CYCLES_HELD_OFF_ID_12 = 98,
+ CYCLES_HELD_OFF_ID_13 = 99,
+ CYCLES_HELD_OFF_ID_14 = 100,
+ CYCLES_HELD_OFF_ID_15 = 101,
+ AXI_READ_REQUEST_HELD_OFF = 102,
+ AXI_WRITE_REQUEST_HELD_OFF = 103,
+ AXI_REQUEST_HELD_OFF = 104,
+ AXI_WRITE_DATA_HELD_OFF = 105,
+ OCMEM_AXI_READ_REQUEST_HELD_OFF = 106,
+ OCMEM_AXI_WRITE_REQUEST_HELD_OFF = 107,
+ OCMEM_AXI_REQUEST_HELD_OFF = 108,
+ OCMEM_AXI_WRITE_DATA_HELD_OFF = 109,
+ ELAPSED_CYCLES_DDR = 110,
+ ELAPSED_CYCLES_OCMEM = 111,
+};
+
+enum a4xx_vfd_perfcounter_select {
+ VFD_UCHE_BYTE_FETCHED = 0,
+ VFD_UCHE_TRANS = 1,
+ VFD_FETCH_INSTRUCTIONS = 3,
+ VFD_BUSY_CYCLES = 5,
+ VFD_STALL_CYCLES_UCHE = 6,
+ VFD_STALL_CYCLES_HLSQ = 7,
+ VFD_STALL_CYCLES_VPC_BYPASS = 8,
+ VFD_STALL_CYCLES_VPC_ALLOC = 9,
+ VFD_MODE_0_FIBERS = 13,
+ VFD_MODE_1_FIBERS = 14,
+ VFD_MODE_2_FIBERS = 15,
+ VFD_MODE_3_FIBERS = 16,
+ VFD_MODE_4_FIBERS = 17,
+ VFD_BFIFO_STALL = 18,
+ VFD_NUM_VERTICES_TOTAL = 19,
+ VFD_PACKER_FULL = 20,
+ VFD_UCHE_REQUEST_FIFO_FULL = 21,
+ VFD_STARVE_CYCLES_PC = 22,
+ VFD_STARVE_CYCLES_UCHE = 23,
+};
+
+enum a4xx_vpc_perfcounter_select {
+ VPC_SP_LM_COMPONENTS = 2,
+ VPC_SP0_LM_BYTES = 3,
+ VPC_SP1_LM_BYTES = 4,
+ VPC_SP2_LM_BYTES = 5,
+ VPC_SP3_LM_BYTES = 6,
+ VPC_WORKING_CYCLES = 7,
+ VPC_STALL_CYCLES_LM = 8,
+ VPC_STARVE_CYCLES_RAS = 9,
+ VPC_STREAMOUT_CYCLES = 10,
+ VPC_UCHE_TRANSACTIONS = 12,
+ VPC_STALL_CYCLES_UCHE = 13,
+ VPC_BUSY_CYCLES = 14,
+ VPC_STARVE_CYCLES_SP = 15,
+};
+
+enum a4xx_vsc_perfcounter_select {
+ VSC_BUSY_CYCLES = 0,
+ VSC_WORKING_CYCLES = 1,
+ VSC_STALL_CYCLES_UCHE = 2,
+ VSC_STARVE_CYCLES_RAS = 3,
+ VSC_EOT_NUM = 4,
+};
+
enum a4xx_tex_filter {
A4XX_TEX_NEAREST = 0,
A4XX_TEX_LINEAR = 1,
@@ -326,6 +894,12 @@ static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val)
#define REG_A4XX_RB_PERFCTR_RB_SEL_7 0x00000cce
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_0 0x00000ccf
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_1 0x00000cd0
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_2 0x00000cd1
+
#define REG_A4XX_RB_PERFCTR_CCU_SEL_3 0x00000cd2
#define REG_A4XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
@@ -363,6 +937,7 @@ static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
{
return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
}
+#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM 0x00010000
#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1
#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001
@@ -400,8 +975,13 @@ static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4
#define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
#define A4XX_RB_MRT_CONTROL_BLEND 0x00000010
#define A4XX_RB_MRT_CONTROL_BLEND2 0x00000020
-#define A4XX_RB_MRT_CONTROL_FASTCLEAR 0x00000400
-#define A4XX_RB_MRT_CONTROL_B11 0x00000800
+#define A4XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000040
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A4XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A4XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
@@ -461,7 +1041,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_b
}
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
}
@@ -479,7 +1059,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb
}
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
}
@@ -490,13 +1070,19 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
}
-#define REG_A4XX_RB_BLEND_RED 0x000020f3
-#define A4XX_RB_BLEND_RED_UINT__MASK 0x00007fff
+#define REG_A4XX_RB_BLEND_RED 0x000020f0
+#define A4XX_RB_BLEND_RED_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_RED_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK;
}
+#define A4XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK;
+}
#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
@@ -504,13 +1090,27 @@ static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK;
}
-#define REG_A4XX_RB_BLEND_GREEN 0x000020f4
-#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x00007fff
+#define REG_A4XX_RB_BLEND_RED_F32 0x000020f1
+#define A4XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_RED_F32__SHIFT) & A4XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_GREEN 0x000020f2
+#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK;
}
+#define A4XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK;
+}
#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
@@ -518,13 +1118,27 @@ static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK;
}
-#define REG_A4XX_RB_BLEND_BLUE 0x000020f5
-#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x00007fff
+#define REG_A4XX_RB_BLEND_GREEN_F32 0x000020f3
+#define A4XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_GREEN_F32__SHIFT) & A4XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_BLUE 0x000020f4
+#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK;
}
+#define A4XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK;
+}
#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
@@ -532,13 +1146,27 @@ static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK;
}
+#define REG_A4XX_RB_BLEND_BLUE_F32 0x000020f5
+#define A4XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_BLUE_F32__SHIFT) & A4XX_RB_BLEND_BLUE_F32__MASK;
+}
+
#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6
-#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x00007fff
+#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK;
}
+#define A4XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK;
+}
#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
@@ -546,6 +1174,14 @@ static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK;
}
+#define REG_A4XX_RB_BLEND_ALPHA_F32 0x000020f7
+#define A4XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_ALPHA_F32__SHIFT) & A4XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
#define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8
#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
@@ -568,7 +1204,7 @@ static inline uint32_t A4XX_RB_FS_OUTPUT_ENABLE_BLEND(uint32_t val)
{
return ((val) << A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT) & A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK;
}
-#define A4XX_RB_FS_OUTPUT_FAST_CLEAR 0x00000100
+#define A4XX_RB_FS_OUTPUT_INDEPENDENT_BLEND 0x00000100
#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000
#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16
static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
@@ -734,8 +1370,9 @@ static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
{
return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
}
-#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
+#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000
#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
#define REG_A4XX_RB_DEPTH_CLEAR 0x00002102
@@ -996,8 +1633,386 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x
#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D 0x0000004d
+#define REG_A4XX_RBBM_POWER_CNTL_IP 0x00000098
+#define A4XX_RBBM_POWER_CNTL_IP_SW_COLLAPSE 0x00000001
+#define A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON 0x00100000
+
#define REG_A4XX_RBBM_PERFCTR_CP_0_LO 0x0000009c
+#define REG_A4XX_RBBM_PERFCTR_CP_0_HI 0x0000009d
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_LO 0x0000009e
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_HI 0x0000009f
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_LO 0x000000a0
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_HI 0x000000a1
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_LO 0x000000a2
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_HI 0x000000a3
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_LO 0x000000a4
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_HI 0x000000a5
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_LO 0x000000a6
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_HI 0x000000a7
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_LO 0x000000a8
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_HI 0x000000a9
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_LO 0x000000aa
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_HI 0x000000ab
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_LO 0x000000ac
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_HI 0x000000ad
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_LO 0x000000ae
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_HI 0x000000af
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_LO 0x000000b0
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_HI 0x000000b1
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_LO 0x000000b2
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_HI 0x000000b3
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_LO 0x000000b4
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_HI 0x000000b5
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_LO 0x000000b6
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_HI 0x000000b7
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_LO 0x000000b8
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_HI 0x000000b9
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_LO 0x000000ba
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_HI 0x000000bb
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_LO 0x000000bc
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_HI 0x000000bd
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_LO 0x000000be
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_HI 0x000000bf
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_LO 0x000000c0
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_HI 0x000000c1
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_LO 0x000000c2
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_HI 0x000000c3
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_LO 0x000000c4
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_HI 0x000000c5
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_LO 0x000000c6
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_HI 0x000000c7
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_LO 0x000000c8
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_HI 0x000000c9
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_LO 0x000000ca
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_HI 0x000000cb
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_LO 0x000000cc
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_HI 0x000000cd
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_LO 0x000000ce
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_HI 0x000000cf
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_LO 0x000000d0
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_HI 0x000000d1
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_LO 0x000000d2
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_HI 0x000000d3
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000d4
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000d5
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000d6
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000d7
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000d8
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000d9
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000da
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000db
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000dc
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000dd
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000de
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000df
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_LO 0x000000e0
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_HI 0x000000e1
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_LO 0x000000e2
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_HI 0x000000e3
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_LO 0x000000e4
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_HI 0x000000e5
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_LO 0x000000e6
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_HI 0x000000e7
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_LO 0x000000e8
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_HI 0x000000e9
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_LO 0x000000ea
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_HI 0x000000eb
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_LO 0x000000ec
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_HI 0x000000ed
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_LO 0x000000ee
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_HI 0x000000ef
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_LO 0x000000f0
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_HI 0x000000f1
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_LO 0x000000f2
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_HI 0x000000f3
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_LO 0x000000f4
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_HI 0x000000f5
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_LO 0x000000f6
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_HI 0x000000f7
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_LO 0x000000f8
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_HI 0x000000f9
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_LO 0x000000fa
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_HI 0x000000fb
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_LO 0x000000fc
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_HI 0x000000fd
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_LO 0x000000fe
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_HI 0x000000ff
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_LO 0x00000100
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_HI 0x00000101
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_LO 0x00000102
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_HI 0x00000103
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_LO 0x00000104
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_HI 0x00000105
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_LO 0x00000106
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_HI 0x00000107
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_LO 0x00000108
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_HI 0x00000109
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_LO 0x0000010a
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_HI 0x0000010b
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_LO 0x0000010c
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_HI 0x0000010d
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_LO 0x0000010e
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_HI 0x0000010f
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_LO 0x00000110
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_HI 0x00000111
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_LO 0x00000112
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_HI 0x00000113
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_LO 0x00000116
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_HI 0x00000117
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_LO 0x00000118
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_HI 0x00000119
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_LO 0x0000011a
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_HI 0x0000011b
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_LO 0x0000011c
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_HI 0x0000011d
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_LO 0x0000011e
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_HI 0x0000011f
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_LO 0x00000120
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_HI 0x00000121
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_LO 0x00000122
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_HI 0x00000123
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_LO 0x00000124
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_HI 0x00000125
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_LO 0x00000126
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_HI 0x00000127
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_LO 0x00000128
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_HI 0x00000129
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_LO 0x0000012a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_HI 0x0000012b
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_LO 0x0000012c
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_HI 0x0000012d
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_LO 0x0000012e
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_HI 0x0000012f
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_LO 0x00000130
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_HI 0x00000131
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_LO 0x00000132
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_HI 0x00000133
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_LO 0x00000134
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_HI 0x00000135
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_LO 0x00000136
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_HI 0x00000137
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_LO 0x00000138
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_HI 0x00000139
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_LO 0x0000013a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_HI 0x0000013b
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_LO 0x0000013c
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_HI 0x0000013d
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_LO 0x0000013e
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_HI 0x0000013f
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_LO 0x00000140
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_HI 0x00000141
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_LO 0x00000142
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_HI 0x00000143
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_LO 0x00000144
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_HI 0x00000145
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_LO 0x00000146
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_HI 0x00000147
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_LO 0x00000148
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_HI 0x00000149
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_LO 0x0000014a
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_HI 0x0000014b
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_LO 0x0000014c
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_HI 0x0000014d
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_LO 0x0000014e
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_HI 0x0000014f
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_LO 0x00000166
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_HI 0x00000167
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_HI 0x00000169
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_LO 0x0000016e
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_HI 0x0000016f
+
static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; }
static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; }
@@ -1046,6 +2061,10 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(uint32_t i0) { r
static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; }
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0 0x00000099
+
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1 0x0000009a
+
#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
#define REG_A4XX_RBBM_PERFCTR_CTL 0x00000170
@@ -1060,6 +2079,14 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000175
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_0 0x00000176
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_1 0x00000177
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_2 0x00000178
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_3 0x00000179
+
#define REG_A4XX_RBBM_GPU_BUSY_MASKED 0x0000017a
#define REG_A4XX_RBBM_INT_0_STATUS 0x0000017d
@@ -1099,6 +2126,11 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5 0x0000019f
+#define REG_A4XX_RBBM_POWER_STATUS 0x000001b0
+#define A4XX_RBBM_POWER_STATUS_SP_TP_PWR_ON 0x00100000
+
+#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2 0x000001b8
+
#define REG_A4XX_CP_SCRATCH_UMASK 0x00000228
#define REG_A4XX_CP_SCRATCH_ADDR 0x00000229
@@ -1167,11 +2199,23 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
-#define REG_A4XX_CP_PROTECT_REG_0 0x00000240
-
static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A4XX_CP_PROTECT_REG_TRAP_READ 0x40000000
#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
@@ -1191,6 +2235,20 @@ static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240
#define REG_A4XX_CP_PERFCTR_CP_SEL_0 0x00000500
+#define REG_A4XX_CP_PERFCTR_CP_SEL_1 0x00000501
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_2 0x00000502
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_3 0x00000503
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_4 0x00000504
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_5 0x00000505
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_6 0x00000506
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_7 0x00000507
+
#define REG_A4XX_CP_PERFCOMBINER_SELECT 0x0000050b
static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; }
@@ -1201,6 +2259,28 @@ static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578
#define REG_A4XX_SP_MODE_CONTROL 0x00000ec3
+#define REG_A4XX_SP_PERFCTR_SP_SEL_0 0x00000ec4
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_1 0x00000ec5
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_2 0x00000ec6
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_3 0x00000ec7
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_4 0x00000ec8
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_5 0x00000ec9
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_6 0x00000eca
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_7 0x00000ecb
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_8 0x00000ecc
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_9 0x00000ecd
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_10 0x00000ece
+
#define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf
#define REG_A4XX_SP_SP_CTRL_REG 0x000022c0
@@ -1226,7 +2306,7 @@ static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
@@ -1374,7 +2454,7 @@ static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
@@ -1699,6 +2779,12 @@ static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A4XX_VPC_DEBUG_ECO_CONTROL 0x00000e64
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_0 0x00000e65
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_1 0x00000e66
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_2 0x00000e67
+
#define REG_A4XX_VPC_PERFCTR_VPC_SEL_3 0x00000e68
#define REG_A4XX_VPC_ATTR 0x00002140
@@ -1811,6 +2897,20 @@ static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0
#define REG_A4XX_VFD_DEBUG_CONTROL 0x00000e40
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_0 0x00000e43
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_1 0x00000e44
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_2 0x00000e45
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_3 0x00000e46
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_4 0x00000e47
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_5 0x00000e48
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_6 0x00000e49
+
#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a
#define REG_A4XX_VGT_CL_INITIATOR 0x000021d0
@@ -1967,6 +3067,20 @@ static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
#define REG_A4XX_TPL1_TP_MODE_CONTROL 0x00000f03
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_0 0x00000f04
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_1 0x00000f05
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_2 0x00000f06
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_3 0x00000f07
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_4 0x00000f08
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_5 0x00000f09
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_6 0x00000f0a
+
#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b
#define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380
@@ -2021,9 +3135,25 @@ static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c88
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c89
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c8a
+
#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c8b
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c8c
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c8d
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c8e
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c8f
+
#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
+#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE 0x00010000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
#define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003
#define A4XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR 0x00000001
@@ -2114,6 +3244,7 @@ static inline uint32_t A4XX_GRAS_SU_POINT_SIZE(float val)
#define REG_A4XX_GRAS_ALPHA_CONTROL 0x00002073
#define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE 0x00000004
+#define A4XX_GRAS_ALPHA_CONTROL_FORCE_FRAGZ_TO_FS 0x00000008
#define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE 0x00002074
#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
@@ -2285,6 +3416,20 @@ static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val)
#define REG_A4XX_UCHE_CACHE_WAYS_VFD 0x00000e8c
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000e8e
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000e8f
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000e90
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000e91
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000e92
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000e93
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000e94
+
#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e95
#define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD 0x00000e00
@@ -2295,6 +3440,22 @@ static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val)
#define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e06
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e07
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e08
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e09
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e0a
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e0b
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e0c
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e0d
+
#define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0
#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
@@ -2545,14 +3706,42 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001
+#define REG_A4XX_PC_TESSFACTOR_ADDR 0x00000d08
+
#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c
#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+#define REG_A4XX_PC_PERFCTR_PC_SEL_1 0x00000d11
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_2 0x00000d12
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_3 0x00000d13
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_4 0x00000d14
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_5 0x00000d15
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_6 0x00000d16
+
#define REG_A4XX_PC_PERFCTR_PC_SEL_7 0x00000d17
#define REG_A4XX_PC_BIN_BASE 0x000021c0
+#define REG_A4XX_PC_VSTREAM_CONTROL 0x000021c2
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A4XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
+#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT 22
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK;
+}
+
#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0
@@ -2564,7 +3753,20 @@ static inline uint32_t A4XX_PC_PRIM_VTX_CNTL_VAROUT(uint32_t val)
#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
#define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
-#define REG_A4XX_UNKNOWN_21C5 0x000021c5
+#define REG_A4XX_PC_PRIM_VTX_CNTL2 0x000021c5
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK 0x00000007
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT 0
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK 0x00000038
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT 3
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_ENABLE 0x00000040
#define REG_A4XX_PC_RESTART_INDEX 0x000021c6
@@ -2602,12 +3804,8 @@ static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
{
return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK;
}
-#define A4XX_PC_HS_PARAM_PRIMTYPE__MASK 0x01800000
-#define A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT 23
-static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_HS_PARAM_PRIMTYPE__MASK;
-}
+#define A4XX_PC_HS_PARAM_CW 0x00800000
+#define A4XX_PC_HS_PARAM_CONNECTED 0x01000000
#define REG_A4XX_VBIF_VERSION 0x00003000
@@ -2646,20 +3844,6 @@ static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
#define REG_A4XX_UNKNOWN_20EF 0x000020ef
-#define REG_A4XX_UNKNOWN_20F0 0x000020f0
-
-#define REG_A4XX_UNKNOWN_20F1 0x000020f1
-
-#define REG_A4XX_UNKNOWN_20F2 0x000020f2
-
-#define REG_A4XX_UNKNOWN_20F7 0x000020f7
-#define A4XX_UNKNOWN_20F7__MASK 0xffffffff
-#define A4XX_UNKNOWN_20F7__SHIFT 0
-static inline uint32_t A4XX_UNKNOWN_20F7(float val)
-{
- return ((fui(val)) << A4XX_UNKNOWN_20F7__SHIFT) & A4XX_UNKNOWN_20F7__MASK;
-}
-
#define REG_A4XX_UNKNOWN_2152 0x00002152
#define REG_A4XX_UNKNOWN_2153 0x00002153
@@ -2720,6 +3904,12 @@ static inline uint32_t A4XX_TEX_SAMP_0_ANISO(enum a4xx_tex_aniso val)
{
return ((val) << A4XX_TEX_SAMP_0_ANISO__SHIFT) & A4XX_TEX_SAMP_0_ANISO__MASK;
}
+#define A4XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A4XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A4XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
#define REG_A4XX_TEX_SAMP_1 0x00000001
#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
@@ -2728,6 +3918,7 @@ static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val
{
return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
}
+#define A4XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
#define A4XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
#define A4XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
#define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
@@ -2796,7 +3987,7 @@ static inline uint32_t A4XX_TEX_CONST_1_HEIGHT(uint32_t val)
{
return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK;
}
-#define A4XX_TEX_CONST_1_WIDTH__MASK 0x1fff8000
+#define A4XX_TEX_CONST_1_WIDTH__MASK 0x3fff8000
#define A4XX_TEX_CONST_1_WIDTH__SHIFT 15
static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
{
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index a53f1be05f75..534a7c3fbdca 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -31,6 +31,7 @@
extern bool hang_debug;
static void a4xx_dump(struct msm_gpu *gpu);
+static bool a4xx_idle(struct msm_gpu *gpu);
/*
* a4xx_enable_hwcg() - Program the clock control registers
@@ -102,14 +103,20 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00020000);
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
+ /* Early A430's have a timing issue with SP/TP power collapse;
+ disabling HW clock gating prevents it. */
+ if (adreno_is_a430(adreno_gpu) && adreno_gpu->rev.patchid < 2)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0);
+ else
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
}
-static void a4xx_me_init(struct msm_gpu *gpu)
+
+static bool a4xx_me_init(struct msm_gpu *gpu)
{
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
@@ -130,8 +137,8 @@ static void a4xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu);
- gpu->funcs->idle(gpu);
+ gpu->funcs->flush(gpu, ring);
+ return a4xx_idle(gpu);
}
static int a4xx_hw_init(struct msm_gpu *gpu)
@@ -141,7 +148,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
uint32_t *ptr, len;
int i, ret;
- if (adreno_is_a4xx(adreno_gpu)) {
+ if (adreno_is_a420(adreno_gpu)) {
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
@@ -150,6 +157,13 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ } else if (adreno_is_a430(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
} else {
BUG();
}
@@ -161,6 +175,10 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+ if (adreno_is_a430(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30);
+ }
+
/* Enable the RBBM error reporting bits */
gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
@@ -183,6 +201,14 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
/* Turn on performance counters: */
gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
+ /* use the first CP counter for timestamp queries.. userspace may set
+ * this as well but it selects the same counter/countable:
+ */
+ gpu_write(gpu, REG_A4XX_CP_PERFCTR_CP_SEL_0, CP_ALWAYS_COUNT);
+
+ if (adreno_is_a430(adreno_gpu))
+ gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
+
/* Disable L2 bypass to avoid UCHE out of bounds errors */
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
@@ -190,6 +216,15 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
(adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
+ /* On A430 enable SP regfile sleep for power savings */
+ /* TODO downstream does this for !420, so maybe applies for 405 too? */
+ if (!adreno_is_a420(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0,
+ 0x00000441);
+ gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1,
+ 0x00000441);
+ }
+
a4xx_enable_hwcg(gpu);
/*
@@ -204,10 +239,6 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
}
- ret = adreno_hw_init(gpu);
- if (ret)
- return ret;
-
/* setup access protection: */
gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
@@ -262,8 +293,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
- a4xx_me_init(gpu);
- return 0;
+ return a4xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a4xx_recover(struct msm_gpu *gpu)
@@ -297,17 +327,21 @@ static void a4xx_destroy(struct msm_gpu *gpu)
kfree(a4xx_gpu);
}
-static void a4xx_idle(struct msm_gpu *gpu)
+static bool a4xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- adreno_idle(gpu);
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
- A4XX_RBBM_STATUS_GPU_BUSY)))
+ A4XX_RBBM_STATUS_GPU_BUSY))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
- /* TODO maybe we need to reset GPU here to recover from hang? */
+ return true;
}
static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
@@ -422,87 +456,13 @@ static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
/* Register offset defines for A4XX, in order of enum adreno_regs */
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_A4XX_CP_PFP_UCODE_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_A4XX_CP_PFP_UCODE_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
- REG_A4XX_CP_PROTECT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_A4XX_RBBM_PERFCTR_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
- REG_A4XX_RBBM_INT_0_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_A4XX_RBBM_AHB_ERROR_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
- REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
- REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_A4XX_VPC_DEBUG_RAM_SEL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_A4XX_VPC_DEBUG_RAM_READ),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_A4XX_RBBM_INT_CLEAR_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_A4XX_VSC_SIZE_ADDRESS),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_A4XX_SP_VS_PVT_MEM_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_A4XX_SP_FS_PVT_MEM_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_A4XX_SP_VS_OBJ_START),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_A4XX_SP_FS_OBJ_START),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_A4XX_RBBM_SW_RESET_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
- REG_A4XX_UCHE_INVALIDATE0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
- REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static void a4xx_dump(struct msm_gpu *gpu)
@@ -512,23 +472,68 @@ static void a4xx_dump(struct msm_gpu *gpu)
adreno_dump(gpu);
}
+static int a4xx_pm_resume(struct msm_gpu *gpu) {
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ ret = msm_gpu_pm_resume(gpu);
+ if (ret)
+ return ret;
+
+ if (adreno_is_a430(adreno_gpu)) {
+ unsigned int reg;
+ /* Set the default register values; set SW_COLLAPSE to 0 */
+ gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778000);
+ do {
+ udelay(5);
+ reg = gpu_read(gpu, REG_A4XX_RBBM_POWER_STATUS);
+ } while (!(reg & A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON));
+ }
+ return 0;
+}
+
+static int a4xx_pm_suspend(struct msm_gpu *gpu) {
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ ret = msm_gpu_pm_suspend(gpu);
+ if (ret)
+ return ret;
+
+ if (adreno_is_a430(adreno_gpu)) {
+ /* Set the default register values; set SW_COLLAPSE to 1 */
+ gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778001);
+ }
+ return 0;
+}
+
+static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A4XX_RBBM_PERFCTR_CP_0_HI);
+
+ return 0;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
.hw_init = a4xx_hw_init,
- .pm_suspend = msm_gpu_pm_suspend,
- .pm_resume = msm_gpu_pm_resume,
+ .pm_suspend = a4xx_pm_suspend,
+ .pm_resume = a4xx_pm_resume,
.recover = a4xx_recover,
.last_fence = adreno_last_fence,
+ .submitted_fence = adreno_submitted_fence,
.submit = adreno_submit,
.flush = adreno_flush,
- .idle = a4xx_idle,
+ .active_ring = adreno_active_ring,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
#ifdef CONFIG_DEBUG_FS
.show = a4xx_show,
#endif
},
+ .get_timestamp = a4xx_get_timestamp,
};
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
@@ -563,7 +568,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a4xx_registers;
adreno_gpu->reg_offsets = a4xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
@@ -582,7 +587,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
new file mode 100644
index 000000000000..56dad2217289
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -0,0 +1,3497 @@
+#ifndef A5XX_XML
+#define A5XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /local3/projects/drm/envytools/rnndb//adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a5xx.xml ( 81546 bytes, from 2016-10-31 16:38:41)
+- /local3/projects/drm/envytools/rnndb//adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a5xx_color_fmt {
+ RB5_R8_UNORM = 3,
+ RB5_R5G5B5A1_UNORM = 10,
+ RB5_R8G8B8A8_UNORM = 48,
+ RB5_R8G8B8_UNORM = 49,
+ RB5_R8G8B8A8_UINT = 51,
+ RB5_R10G10B10A2_UINT = 58,
+ RB5_R16G16B16A16_FLOAT = 98,
+};
+
+enum a5xx_tile_mode {
+ TILE5_LINEAR = 0,
+ TILE5_2 = 2,
+ TILE5_3 = 3,
+};
+
+enum a5xx_vtx_fmt {
+ VFMT5_8_UNORM = 3,
+ VFMT5_8_SNORM = 4,
+ VFMT5_8_UINT = 5,
+ VFMT5_8_SINT = 6,
+ VFMT5_8_8_UNORM = 15,
+ VFMT5_8_8_SNORM = 16,
+ VFMT5_8_8_UINT = 17,
+ VFMT5_8_8_SINT = 18,
+ VFMT5_16_UNORM = 21,
+ VFMT5_16_SNORM = 22,
+ VFMT5_16_FLOAT = 23,
+ VFMT5_16_UINT = 24,
+ VFMT5_16_SINT = 25,
+ VFMT5_8_8_8_UNORM = 33,
+ VFMT5_8_8_8_SNORM = 34,
+ VFMT5_8_8_8_UINT = 35,
+ VFMT5_8_8_8_SINT = 36,
+ VFMT5_8_8_8_8_UNORM = 48,
+ VFMT5_8_8_8_8_SNORM = 50,
+ VFMT5_8_8_8_8_UINT = 51,
+ VFMT5_8_8_8_8_SINT = 52,
+ VFMT5_16_16_UNORM = 67,
+ VFMT5_16_16_SNORM = 68,
+ VFMT5_16_16_FLOAT = 69,
+ VFMT5_16_16_UINT = 70,
+ VFMT5_16_16_SINT = 71,
+ VFMT5_32_UNORM = 72,
+ VFMT5_32_SNORM = 73,
+ VFMT5_32_FLOAT = 74,
+ VFMT5_32_UINT = 75,
+ VFMT5_32_SINT = 76,
+ VFMT5_32_FIXED = 77,
+ VFMT5_16_16_16_UNORM = 88,
+ VFMT5_16_16_16_SNORM = 89,
+ VFMT5_16_16_16_FLOAT = 90,
+ VFMT5_16_16_16_UINT = 91,
+ VFMT5_16_16_16_SINT = 92,
+ VFMT5_16_16_16_16_UNORM = 96,
+ VFMT5_16_16_16_16_SNORM = 97,
+ VFMT5_16_16_16_16_FLOAT = 98,
+ VFMT5_16_16_16_16_UINT = 99,
+ VFMT5_16_16_16_16_SINT = 100,
+ VFMT5_32_32_UNORM = 101,
+ VFMT5_32_32_SNORM = 102,
+ VFMT5_32_32_FLOAT = 103,
+ VFMT5_32_32_UINT = 104,
+ VFMT5_32_32_SINT = 105,
+ VFMT5_32_32_FIXED = 106,
+ VFMT5_32_32_32_UNORM = 112,
+ VFMT5_32_32_32_SNORM = 113,
+ VFMT5_32_32_32_UINT = 114,
+ VFMT5_32_32_32_SINT = 115,
+ VFMT5_32_32_32_FLOAT = 116,
+ VFMT5_32_32_32_FIXED = 117,
+ VFMT5_32_32_32_32_UNORM = 128,
+ VFMT5_32_32_32_32_SNORM = 129,
+ VFMT5_32_32_32_32_FLOAT = 130,
+ VFMT5_32_32_32_32_UINT = 131,
+ VFMT5_32_32_32_32_SINT = 132,
+ VFMT5_32_32_32_32_FIXED = 133,
+};
+
+enum a5xx_tex_fmt {
+ TFMT5_A8_UNORM = 2,
+ TFMT5_8_UNORM = 3,
+ TFMT5_4_4_4_4_UNORM = 8,
+ TFMT5_5_6_5_UNORM = 14,
+ TFMT5_L8_A8_UNORM = 19,
+ TFMT5_16_FLOAT = 23,
+ TFMT5_8_8_8_8_UNORM = 48,
+ TFMT5_10_10_10_2_UNORM = 54,
+ TFMT5_16_16_FLOAT = 69,
+ TFMT5_32_FLOAT = 74,
+ TFMT5_16_16_16_16_FLOAT = 98,
+ TFMT5_32_32_FLOAT = 103,
+ TFMT5_32_32_32_32_FLOAT = 130,
+ TFMT5_X8Z24_UNORM = 160,
+};
+
+enum a5xx_tex_fetchsize {
+ TFETCH5_1_BYTE = 0,
+ TFETCH5_2_BYTE = 1,
+ TFETCH5_4_BYTE = 2,
+ TFETCH5_8_BYTE = 3,
+ TFETCH5_16_BYTE = 4,
+};
+
+enum a5xx_depth_format {
+ DEPTH5_NONE = 0,
+ DEPTH5_16 = 1,
+ DEPTH5_24_8 = 2,
+ DEPTH5_32 = 4,
+};
+
+enum a5xx_debugbus {
+ A5XX_RBBM_DBGBUS_CP = 1,
+ A5XX_RBBM_DBGBUS_RBBM = 2,
+ A5XX_RBBM_DBGBUS_VBIF = 3,
+ A5XX_RBBM_DBGBUS_HLSQ = 4,
+ A5XX_RBBM_DBGBUS_UCHE = 5,
+ A5XX_RBBM_DBGBUS_DPM = 6,
+ A5XX_RBBM_DBGBUS_TESS = 7,
+ A5XX_RBBM_DBGBUS_PC = 8,
+ A5XX_RBBM_DBGBUS_VFDP = 9,
+ A5XX_RBBM_DBGBUS_VPC = 10,
+ A5XX_RBBM_DBGBUS_TSE = 11,
+ A5XX_RBBM_DBGBUS_RAS = 12,
+ A5XX_RBBM_DBGBUS_VSC = 13,
+ A5XX_RBBM_DBGBUS_COM = 14,
+ A5XX_RBBM_DBGBUS_DCOM = 15,
+ A5XX_RBBM_DBGBUS_LRZ = 16,
+ A5XX_RBBM_DBGBUS_A2D_DSP = 17,
+ A5XX_RBBM_DBGBUS_CCUFCHE = 18,
+ A5XX_RBBM_DBGBUS_GPMU = 19,
+ A5XX_RBBM_DBGBUS_RBP = 20,
+ A5XX_RBBM_DBGBUS_HM = 21,
+ A5XX_RBBM_DBGBUS_RBBM_CFG = 22,
+ A5XX_RBBM_DBGBUS_VBIF_CX = 23,
+ A5XX_RBBM_DBGBUS_GPC = 29,
+ A5XX_RBBM_DBGBUS_LARC = 30,
+ A5XX_RBBM_DBGBUS_HLSQ_SPTP = 31,
+ A5XX_RBBM_DBGBUS_RB_0 = 32,
+ A5XX_RBBM_DBGBUS_RB_1 = 33,
+ A5XX_RBBM_DBGBUS_RB_2 = 34,
+ A5XX_RBBM_DBGBUS_RB_3 = 35,
+ A5XX_RBBM_DBGBUS_CCU_0 = 40,
+ A5XX_RBBM_DBGBUS_CCU_1 = 41,
+ A5XX_RBBM_DBGBUS_CCU_2 = 42,
+ A5XX_RBBM_DBGBUS_CCU_3 = 43,
+ A5XX_RBBM_DBGBUS_A2D_RAS_0 = 48,
+ A5XX_RBBM_DBGBUS_A2D_RAS_1 = 49,
+ A5XX_RBBM_DBGBUS_A2D_RAS_2 = 50,
+ A5XX_RBBM_DBGBUS_A2D_RAS_3 = 51,
+ A5XX_RBBM_DBGBUS_VFD_0 = 56,
+ A5XX_RBBM_DBGBUS_VFD_1 = 57,
+ A5XX_RBBM_DBGBUS_VFD_2 = 58,
+ A5XX_RBBM_DBGBUS_VFD_3 = 59,
+ A5XX_RBBM_DBGBUS_SP_0 = 64,
+ A5XX_RBBM_DBGBUS_SP_1 = 65,
+ A5XX_RBBM_DBGBUS_SP_2 = 66,
+ A5XX_RBBM_DBGBUS_SP_3 = 67,
+ A5XX_RBBM_DBGBUS_TPL1_0 = 72,
+ A5XX_RBBM_DBGBUS_TPL1_1 = 73,
+ A5XX_RBBM_DBGBUS_TPL1_2 = 74,
+ A5XX_RBBM_DBGBUS_TPL1_3 = 75,
+};
+
+enum a5xx_shader_blocks {
+ A5XX_TP_W_MEMOBJ = 1,
+ A5XX_TP_W_SAMPLER = 2,
+ A5XX_TP_W_MIPMAP_BASE = 3,
+ A5XX_TP_W_MEMOBJ_TAG = 4,
+ A5XX_TP_W_SAMPLER_TAG = 5,
+ A5XX_TP_S_3D_MEMOBJ = 6,
+ A5XX_TP_S_3D_SAMPLER = 7,
+ A5XX_TP_S_3D_MEMOBJ_TAG = 8,
+ A5XX_TP_S_3D_SAMPLER_TAG = 9,
+ A5XX_TP_S_CS_MEMOBJ = 10,
+ A5XX_TP_S_CS_SAMPLER = 11,
+ A5XX_TP_S_CS_MEMOBJ_TAG = 12,
+ A5XX_TP_S_CS_SAMPLER_TAG = 13,
+ A5XX_SP_W_INSTR = 14,
+ A5XX_SP_W_CONST = 15,
+ A5XX_SP_W_UAV_SIZE = 16,
+ A5XX_SP_W_CB_SIZE = 17,
+ A5XX_SP_W_UAV_BASE = 18,
+ A5XX_SP_W_CB_BASE = 19,
+ A5XX_SP_W_INST_TAG = 20,
+ A5XX_SP_W_STATE = 21,
+ A5XX_SP_S_3D_INSTR = 22,
+ A5XX_SP_S_3D_CONST = 23,
+ A5XX_SP_S_3D_CB_BASE = 24,
+ A5XX_SP_S_3D_CB_SIZE = 25,
+ A5XX_SP_S_3D_UAV_BASE = 26,
+ A5XX_SP_S_3D_UAV_SIZE = 27,
+ A5XX_SP_S_CS_INSTR = 28,
+ A5XX_SP_S_CS_CONST = 29,
+ A5XX_SP_S_CS_CB_BASE = 30,
+ A5XX_SP_S_CS_CB_SIZE = 31,
+ A5XX_SP_S_CS_UAV_BASE = 32,
+ A5XX_SP_S_CS_UAV_SIZE = 33,
+ A5XX_SP_S_3D_INSTR_DIRTY = 34,
+ A5XX_SP_S_3D_CONST_DIRTY = 35,
+ A5XX_SP_S_3D_CB_BASE_DIRTY = 36,
+ A5XX_SP_S_3D_CB_SIZE_DIRTY = 37,
+ A5XX_SP_S_3D_UAV_BASE_DIRTY = 38,
+ A5XX_SP_S_3D_UAV_SIZE_DIRTY = 39,
+ A5XX_SP_S_CS_INSTR_DIRTY = 40,
+ A5XX_SP_S_CS_CONST_DIRTY = 41,
+ A5XX_SP_S_CS_CB_BASE_DIRTY = 42,
+ A5XX_SP_S_CS_CB_SIZE_DIRTY = 43,
+ A5XX_SP_S_CS_UAV_BASE_DIRTY = 44,
+ A5XX_SP_S_CS_UAV_SIZE_DIRTY = 45,
+ A5XX_HLSQ_ICB = 46,
+ A5XX_HLSQ_ICB_DIRTY = 47,
+ A5XX_HLSQ_ICB_CB_BASE_DIRTY = 48,
+ A5XX_SP_POWER_RESTORE_RAM = 64,
+ A5XX_SP_POWER_RESTORE_RAM_TAG = 65,
+ A5XX_TP_POWER_RESTORE_RAM = 66,
+ A5XX_TP_POWER_RESTORE_RAM_TAG = 67,
+};
+
+enum a5xx_tex_filter {
+ A5XX_TEX_NEAREST = 0,
+ A5XX_TEX_LINEAR = 1,
+ A5XX_TEX_ANISO = 2,
+};
+
+enum a5xx_tex_clamp {
+ A5XX_TEX_REPEAT = 0,
+ A5XX_TEX_CLAMP_TO_EDGE = 1,
+ A5XX_TEX_MIRROR_REPEAT = 2,
+ A5XX_TEX_CLAMP_TO_BORDER = 3,
+ A5XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a5xx_tex_aniso {
+ A5XX_TEX_ANISO_1 = 0,
+ A5XX_TEX_ANISO_2 = 1,
+ A5XX_TEX_ANISO_4 = 2,
+ A5XX_TEX_ANISO_8 = 3,
+ A5XX_TEX_ANISO_16 = 4,
+};
+
+enum a5xx_tex_swiz {
+ A5XX_TEX_X = 0,
+ A5XX_TEX_Y = 1,
+ A5XX_TEX_Z = 2,
+ A5XX_TEX_W = 3,
+ A5XX_TEX_ZERO = 4,
+ A5XX_TEX_ONE = 5,
+};
+
+enum a5xx_tex_type {
+ A5XX_TEX_1D = 0,
+ A5XX_TEX_2D = 1,
+ A5XX_TEX_CUBE = 2,
+ A5XX_TEX_3D = 3,
+};
+
+#define A5XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A5XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A5XX_INT0_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_INT0_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_INT0_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_INT0_RBBM_GPC_ERROR 0x00000080
+#define A5XX_INT0_CP_SW 0x00000100
+#define A5XX_INT0_CP_HW_ERROR 0x00000200
+#define A5XX_INT0_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_INT0_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_INT0_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_INT0_CP_IB2 0x00002000
+#define A5XX_INT0_CP_IB1 0x00004000
+#define A5XX_INT0_CP_RB 0x00008000
+#define A5XX_INT0_CP_UNUSED_1 0x00010000
+#define A5XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A5XX_INT0_CP_WT_DONE_TS 0x00040000
+#define A5XX_INT0_UNKNOWN_1 0x00080000
+#define A5XX_INT0_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_INT0_UNUSED_2 0x00200000
+#define A5XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_INT0_MISC_HANG_DETECT 0x00800000
+#define A5XX_INT0_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_INT0_UCHE_TRAP_INTR 0x02000000
+#define A5XX_INT0_DEBBUS_INTR_0 0x04000000
+#define A5XX_INT0_DEBBUS_INTR_1 0x08000000
+#define A5XX_INT0_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_INT0_GPMU_FIRMWARE 0x20000000
+#define A5XX_INT0_ISDB_CPU_IRQ 0x40000000
+#define A5XX_INT0_ISDB_UNDER_DEBUG 0x80000000
+#define A5XX_CP_INT_CP_OPCODE_ERROR 0x00000001
+#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR 0x00000002
+#define A5XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
+#define A5XX_CP_INT_CP_DMA_ERROR 0x00000008
+#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
+#define A5XX_CP_INT_CP_AHB_ERROR 0x00000020
+#define REG_A5XX_CP_RB_BASE 0x00000800
+
+#define REG_A5XX_CP_RB_BASE_HI 0x00000801
+
+#define REG_A5XX_CP_RB_CNTL 0x00000802
+
+#define REG_A5XX_CP_RB_RPTR_ADDR 0x00000804
+
+#define REG_A5XX_CP_RB_RPTR_ADDR_HI 0x00000805
+
+#define REG_A5XX_CP_RB_RPTR 0x00000806
+
+#define REG_A5XX_CP_RB_WPTR 0x00000807
+
+#define REG_A5XX_CP_PFP_STAT_ADDR 0x00000808
+
+#define REG_A5XX_CP_PFP_STAT_DATA 0x00000809
+
+#define REG_A5XX_CP_DRAW_STATE_ADDR 0x0000080b
+
+#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818
+
+#define REG_A5XX_CP_CRASH_DUMP_CNTL 0x00000819
+
+#define REG_A5XX_CP_ME_STAT_ADDR 0x0000081a
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_1 0x0000081f
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_2 0x00000820
+
+#define REG_A5XX_CP_ROQ_DBG_ADDR 0x00000821
+
+#define REG_A5XX_CP_ROQ_DBG_DATA 0x00000822
+
+#define REG_A5XX_CP_MEQ_DBG_ADDR 0x00000823
+
+#define REG_A5XX_CP_MEQ_DBG_DATA 0x00000824
+
+#define REG_A5XX_CP_MEQ_THRESHOLDS 0x00000825
+
+#define REG_A5XX_CP_MERCIU_SIZE 0x00000826
+
+#define REG_A5XX_CP_MERCIU_DBG_ADDR 0x00000827
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_1 0x00000828
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_2 0x00000829
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_ADDR 0x0000082a
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_DATA 0x0000082b
+
+#define REG_A5XX_CP_ME_UCODE_DBG_ADDR 0x0000082f
+
+#define REG_A5XX_CP_ME_UCODE_DBG_DATA 0x00000830
+
+#define REG_A5XX_CP_CNTL 0x00000831
+
+#define REG_A5XX_CP_PFP_ME_CNTL 0x00000832
+
+#define REG_A5XX_CP_CHICKEN_DBG 0x00000833
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_LO 0x00000835
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_HI 0x00000836
+
+#define REG_A5XX_CP_ME_INSTR_BASE_LO 0x00000838
+
+#define REG_A5XX_CP_ME_INSTR_BASE_HI 0x00000839
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_CNTL 0x0000083b
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO 0x0000083c
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI 0x0000083d
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO 0x0000083e
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI 0x0000083f
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x00000840
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x00000841
+
+#define REG_A5XX_CP_ADDR_MODE_CNTL 0x00000860
+
+#define REG_A5XX_CP_ME_STAT_DATA 0x00000b14
+
+#define REG_A5XX_CP_WFI_PEND_CTR 0x00000b15
+
+#define REG_A5XX_CP_INTERRUPT_STATUS 0x00000b18
+
+#define REG_A5XX_CP_HW_FAULT 0x00000b1a
+
+#define REG_A5XX_CP_PROTECT_STATUS 0x00000b1c
+
+#define REG_A5XX_CP_IB1_BASE 0x00000b1f
+
+#define REG_A5XX_CP_IB1_BASE_HI 0x00000b20
+
+#define REG_A5XX_CP_IB1_BUFSZ 0x00000b21
+
+#define REG_A5XX_CP_IB2_BASE 0x00000b22
+
+#define REG_A5XX_CP_IB2_BASE_HI 0x00000b23
+
+#define REG_A5XX_CP_IB2_BUFSZ 0x00000b24
+
+static inline uint32_t REG_A5XX_CP_SCRATCH(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A5XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A5XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A5XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A5XX_CP_PROTECT_REG_TRAP_READ 0x40000000
+
+#define REG_A5XX_CP_PROTECT_CNTL 0x000008a0
+
+#define REG_A5XX_CP_AHB_FAULT 0x00000b1b
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_0 0x00000bb0
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_1 0x00000bb1
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_2 0x00000bb2
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_3 0x00000bb3
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_4 0x00000bb4
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_5 0x00000bb5
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_6 0x00000bb6
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_7 0x00000bb7
+
+#define REG_A5XX_VSC_ADDR_MODE_CNTL 0x00000bc1
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_0 0x00000bba
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_1 0x00000bbb
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_2 0x00000bbc
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_3 0x00000bbd
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_A 0x00000004
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__MASK 0x000000ff
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT 0
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT) & A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__MASK;
+}
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK 0x0000ff00
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT) & A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK;
+}
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_B 0x00000005
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_C 0x00000006
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_D 0x00000007
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLT 0x00000008
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLM 0x00000009
+#define A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A5XX_RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPL 0x0000000a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPE 0x0000000b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_0 0x0000000c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_1 0x0000000d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_2 0x0000000e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_3 0x0000000f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_0 0x00000010
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_1 0x00000011
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_2 0x00000012
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_3 0x00000013
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_0 0x00000014
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_1 0x00000015
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_0 0x00000016
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_1 0x00000017
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_2 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_3 0x00000019
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_0 0x0000001a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_1 0x0000001b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_2 0x0000001c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_3 0x0000001d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_NIBBLEE 0x0000001e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC0 0x0000001f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC1 0x00000020
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADREG 0x00000021
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IDX 0x00000022
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CLRC 0x00000023
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADIVT 0x00000024
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000002f
+
+#define REG_A5XX_RBBM_INT_CLEAR_CMD 0x00000037
+
+#define REG_A5XX_RBBM_INT_0_MASK 0x00000038
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
+#define A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR 0x00000002
+#define A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
+#define A5XX_RBBM_INT_0_MASK_CP_SW 0x00000100
+#define A5XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
+#define A5XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
+#define A5XX_RBBM_INT_0_MASK_CP_RB 0x00008000
+#define A5XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
+#define A5XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
+#define A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT 0x00800000
+#define A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_FIRMWARE 0x20000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+
+#define REG_A5XX_RBBM_AHB_DBG_CNTL 0x0000003f
+
+#define REG_A5XX_RBBM_EXT_VBIF_DBG_CNTL 0x00000041
+
+#define REG_A5XX_RBBM_SW_RESET_CMD 0x00000043
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+
+#define REG_A5XX_RBBM_DBG_LO_HI_GPIO 0x00000048
+
+#define REG_A5XX_RBBM_EXT_TRACE_BUS_CNTL 0x00000049
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP0 0x0000004a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP1 0x0000004b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP2 0x0000004c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP3 0x0000004d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP0 0x0000004e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP1 0x0000004f
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP2 0x00000050
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP3 0x00000051
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP0 0x00000052
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP1 0x00000053
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP2 0x00000054
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP3 0x00000055
+
+#define REG_A5XX_RBBM_READ_AHB_THROUGH_DBG 0x00000059
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_UCHE 0x0000005a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_UCHE 0x0000005b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_UCHE 0x0000005c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL4_UCHE 0x0000005d
+
+#define REG_A5XX_RBBM_CLOCK_HYST_UCHE 0x0000005e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_UCHE 0x0000005f
+
+#define REG_A5XX_RBBM_CLOCK_MODE_GPC 0x00000060
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPC 0x00000061
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPC 0x00000062
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000063
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x00000064
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000065
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_HLSQ 0x00000066
+
+#define REG_A5XX_RBBM_CLOCK_CNTL 0x00000067
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP0 0x00000068
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP1 0x00000069
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP2 0x0000006a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP3 0x0000006b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP0 0x0000006c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP1 0x0000006d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP2 0x0000006e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP3 0x0000006f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP0 0x00000070
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP1 0x00000071
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP2 0x00000072
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP3 0x00000073
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP0 0x00000074
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP1 0x00000075
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP2 0x00000076
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP3 0x00000077
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB0 0x00000078
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB1 0x00000079
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB2 0x0000007a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB3 0x0000007b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB0 0x0000007c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB1 0x0000007d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB2 0x0000007e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB3 0x0000007f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RAC 0x00000080
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RAC 0x00000081
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU0 0x00000082
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU1 0x00000083
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU2 0x00000084
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU3 0x00000085
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000086
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000087
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000088
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000089
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RAC 0x0000008a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RAC 0x0000008b
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0 0x0000008c
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1 0x0000008d
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2 0x0000008e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3 0x0000008f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_VFD 0x00000090
+
+#define REG_A5XX_RBBM_CLOCK_MODE_VFD 0x00000091
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_VFD 0x00000092
+
+#define REG_A5XX_RBBM_AHB_CNTL0 0x00000093
+
+#define REG_A5XX_RBBM_AHB_CNTL1 0x00000094
+
+#define REG_A5XX_RBBM_AHB_CNTL2 0x00000095
+
+#define REG_A5XX_RBBM_AHB_CMD 0x00000096
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11 0x0000009c
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12 0x0000009d
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13 0x0000009e
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14 0x0000009f
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15 0x000000a0
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16 0x000000a1
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17 0x000000a2
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18 0x000000a3
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP0 0x000000a4
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP1 0x000000a5
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP2 0x000000a6
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP3 0x000000a7
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP0 0x000000a8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP1 0x000000a9
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP2 0x000000aa
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP3 0x000000ab
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP0 0x000000ac
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP1 0x000000ad
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP2 0x000000ae
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP3 0x000000af
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP0 0x000000b0
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP1 0x000000b1
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP2 0x000000b2
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP3 0x000000b3
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP0 0x000000b4
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP1 0x000000b5
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP2 0x000000b6
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP3 0x000000b7
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP0 0x000000b8
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP1 0x000000b9
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP2 0x000000ba
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP3 0x000000bb
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_GPMU 0x000000c8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPMU 0x000000c9
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPMU 0x000000ca
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_LO 0x000003a0
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_HI 0x000003a1
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_LO 0x000003a2
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_HI 0x000003a3
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_LO 0x000003a4
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_HI 0x000003a5
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_LO 0x000003a6
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_HI 0x000003a7
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_LO 0x000003a8
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_HI 0x000003a9
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_LO 0x000003aa
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_HI 0x000003ab
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_LO 0x000003ac
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_HI 0x000003ad
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_LO 0x000003ae
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_HI 0x000003af
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_LO 0x000003b0
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_HI 0x000003b1
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_LO 0x000003b2
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_HI 0x000003b3
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_LO 0x000003b4
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_HI 0x000003b5
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_LO 0x000003b6
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_HI 0x000003b7
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_LO 0x000003b8
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_HI 0x000003b9
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_LO 0x000003ba
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_HI 0x000003bb
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_LO 0x000003bc
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_HI 0x000003bd
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_LO 0x000003be
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_HI 0x000003bf
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_LO 0x000003c0
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_HI 0x000003c1
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_LO 0x000003c2
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_HI 0x000003c3
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_LO 0x000003c4
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_HI 0x000003c5
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_LO 0x000003c6
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_HI 0x000003c7
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_LO 0x000003c8
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_HI 0x000003c9
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_LO 0x000003ca
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_HI 0x000003cb
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_LO 0x000003cc
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_HI 0x000003cd
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_LO 0x000003ce
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_HI 0x000003cf
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_LO 0x000003d0
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_HI 0x000003d1
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_LO 0x000003d2
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_HI 0x000003d3
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_LO 0x000003d4
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_HI 0x000003d5
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_LO 0x000003d6
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_HI 0x000003d7
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO 0x000003d8
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI 0x000003d9
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO 0x000003da
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI 0x000003db
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO 0x000003dc
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI 0x000003dd
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO 0x000003de
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI 0x000003df
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO 0x000003e0
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI 0x000003e1
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO 0x000003e2
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI 0x000003e3
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO 0x000003e4
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI 0x000003e5
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO 0x000003e6
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI 0x000003e7
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_LO 0x000003e8
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_HI 0x000003e9
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_LO 0x000003ea
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_HI 0x000003eb
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_LO 0x000003ec
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_HI 0x000003ed
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_LO 0x000003ee
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_HI 0x000003ef
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_LO 0x000003f0
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_HI 0x000003f1
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_LO 0x000003f2
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_HI 0x000003f3
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_LO 0x000003f4
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_HI 0x000003f5
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_LO 0x000003f6
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_HI 0x000003f7
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_LO 0x000003f8
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_HI 0x000003f9
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_LO 0x000003fa
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_HI 0x000003fb
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_LO 0x000003fc
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_HI 0x000003fd
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_LO 0x000003fe
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_HI 0x000003ff
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_LO 0x00000400
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_HI 0x00000401
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_LO 0x00000402
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_HI 0x00000403
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_LO 0x00000404
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_HI 0x00000405
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_LO 0x00000406
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_HI 0x00000407
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_LO 0x00000408
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_HI 0x00000409
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_LO 0x0000040a
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_HI 0x0000040b
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_LO 0x0000040c
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_HI 0x0000040d
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_LO 0x0000040e
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_HI 0x0000040f
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_LO 0x00000410
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_HI 0x00000411
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_LO 0x00000412
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_HI 0x00000413
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_LO 0x00000414
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_HI 0x00000415
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_LO 0x00000416
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_HI 0x00000417
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_LO 0x00000418
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_HI 0x00000419
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_LO 0x0000041a
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_HI 0x0000041b
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_LO 0x0000041c
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_HI 0x0000041d
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_LO 0x0000041e
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_HI 0x0000041f
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_LO 0x00000420
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_HI 0x00000421
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_LO 0x00000422
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_HI 0x00000423
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_LO 0x00000424
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_HI 0x00000425
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_LO 0x00000426
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_HI 0x00000427
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_LO 0x00000428
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_HI 0x00000429
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_LO 0x0000042a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_HI 0x0000042b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_LO 0x0000042c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_HI 0x0000042d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_LO 0x0000042e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_HI 0x0000042f
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_LO 0x00000430
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_HI 0x00000431
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_LO 0x00000432
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_HI 0x00000433
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_LO 0x00000434
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_HI 0x00000435
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_LO 0x00000436
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_HI 0x00000437
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_LO 0x00000438
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_HI 0x00000439
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_LO 0x0000043a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_HI 0x0000043b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_LO 0x0000043c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_HI 0x0000043d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_LO 0x0000043e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_HI 0x0000043f
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_LO 0x00000440
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_HI 0x00000441
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_LO 0x00000442
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_HI 0x00000443
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_LO 0x00000444
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_HI 0x00000445
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_LO 0x00000446
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_HI 0x00000447
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_LO 0x00000448
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_HI 0x00000449
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_LO 0x0000044a
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_HI 0x0000044b
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_LO 0x0000044c
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_HI 0x0000044d
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_LO 0x0000044e
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_HI 0x0000044f
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_LO 0x00000450
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_HI 0x00000451
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_LO 0x00000452
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_HI 0x00000453
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_LO 0x00000454
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_HI 0x00000455
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_LO 0x00000456
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_HI 0x00000457
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_LO 0x00000458
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_HI 0x00000459
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_LO 0x0000045a
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_HI 0x0000045b
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_LO 0x0000045c
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_HI 0x0000045d
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_LO 0x0000045e
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_HI 0x0000045f
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_LO 0x00000460
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_HI 0x00000461
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_LO 0x00000462
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_HI 0x00000463
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_LO 0x000004d2
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI 0x000004d3
+
+#define REG_A5XX_RBBM_STATUS 0x000004f5
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x80000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x40000000
+#define A5XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A5XX_RBBM_STATUS_VSC_BUSY 0x10000000
+#define A5XX_RBBM_STATUS_TPL1_BUSY 0x08000000
+#define A5XX_RBBM_STATUS_SP_BUSY 0x04000000
+#define A5XX_RBBM_STATUS_UCHE_BUSY 0x02000000
+#define A5XX_RBBM_STATUS_VPC_BUSY 0x01000000
+#define A5XX_RBBM_STATUS_VFDP_BUSY 0x00800000
+#define A5XX_RBBM_STATUS_VFD_BUSY 0x00400000
+#define A5XX_RBBM_STATUS_TESS_BUSY 0x00200000
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY 0x00040000
+#define A5XX_RBBM_STATUS_DCOM_BUSY 0x00020000
+#define A5XX_RBBM_STATUS_COM_BUSY 0x00010000
+#define A5XX_RBBM_STATUS_LRZ_BUZY 0x00008000
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY 0x00004000
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY 0x00002000
+#define A5XX_RBBM_STATUS_RB_BUSY 0x00001000
+#define A5XX_RBBM_STATUS_RAS_BUSY 0x00000800
+#define A5XX_RBBM_STATUS_TSE_BUSY 0x00000400
+#define A5XX_RBBM_STATUS_VBIF_BUSY 0x00000200
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST 0x00000100
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST 0x00000080
+#define A5XX_RBBM_STATUS_CP_BUSY 0x00000040
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY 0x00000020
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY 0x00000010
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY 0x00000008
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A5XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001
+
+#define REG_A5XX_RBBM_STATUS3 0x00000530
+
+#define REG_A5XX_RBBM_INT_0_STATUS 0x000004e1
+
+#define REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS 0x000004f0
+
+#define REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS 0x000004f1
+
+#define REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS 0x000004f3
+
+#define REG_A5XX_RBBM_AHB_ERROR_STATUS 0x000004f4
+
+#define REG_A5XX_RBBM_PERFCTR_CNTL 0x00000464
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 0x00000465
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD1 0x00000466
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD2 0x00000467
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD3 0x00000468
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000469
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x0000046a
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000046f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC 0x00000504
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OVER 0x00000505
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT0 0x00000506
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT1 0x00000507
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT2 0x00000508
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT3 0x00000509
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT4 0x0000050a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT5 0x0000050b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR 0x0000050c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0 0x0000050d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1 0x0000050e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2 0x0000050f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3 0x00000510
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4 0x00000511
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR0 0x00000512
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR1 0x00000513
+
+#define REG_A5XX_RBBM_ISDB_CNT 0x00000533
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CONFIG 0x0000f000
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
+
+#define REG_A5XX_RBBM_SECVID_TSB_CNTL 0x0000f803
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_LO 0x0000f804
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_HI 0x0000f805
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_LO 0x0000f806
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_HI 0x0000f807
+
+#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
+
+#define REG_A5XX_VSC_PIPE_DATA_LENGTH_0 0x00000c00
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0 0x00000c60
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1 0x00000c61
+
+#define REG_A5XX_VSC_BIN_SIZE 0x00000cdd
+#define A5XX_VSC_BIN_SIZE_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_VSC_BIN_SIZE_X__MASK 0x00007fff
+#define A5XX_VSC_BIN_SIZE_X__SHIFT 0
+static inline uint32_t A5XX_VSC_BIN_SIZE_X(uint32_t val)
+{
+ return ((val) << A5XX_VSC_BIN_SIZE_X__SHIFT) & A5XX_VSC_BIN_SIZE_X__MASK;
+}
+#define A5XX_VSC_BIN_SIZE_Y__MASK 0x7fff0000
+#define A5XX_VSC_BIN_SIZE_Y__SHIFT 16
+static inline uint32_t A5XX_VSC_BIN_SIZE_Y(uint32_t val)
+{
+ return ((val) << A5XX_VSC_BIN_SIZE_Y__SHIFT) & A5XX_VSC_BIN_SIZE_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_ADDR_MODE_CNTL 0x00000c81
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c90
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c91
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c92
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c93
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c94
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c95
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c96
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c97
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 0x00000c98
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 0x00000c99
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 0x00000c9a
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 0x00000c9b
+
+#define REG_A5XX_RB_DBG_ECO_CNTL 0x00000cc4
+
+#define REG_A5XX_RB_ADDR_MODE_CNTL 0x00000cc5
+
+#define REG_A5XX_RB_MODE_CNTL 0x00000cc6
+
+#define REG_A5XX_RB_CCU_CNTL 0x00000cc7
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_0 0x00000cd0
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_1 0x00000cd1
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_2 0x00000cd2
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_3 0x00000cd3
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_4 0x00000cd4
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_5 0x00000cd5
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_6 0x00000cd6
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_7 0x00000cd7
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_0 0x00000cd8
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_1 0x00000cd9
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_2 0x00000cda
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_3 0x00000cdb
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_0 0x00000ce0
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_1 0x00000ce1
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_2 0x00000ce2
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_3 0x00000ce3
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_0 0x00000ce4
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_1 0x00000ce5
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_0 0x00000cec
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_1 0x00000ced
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_2 0x00000cee
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_3 0x00000cef
+
+#define REG_A5XX_PC_DBG_ECO_CNTL 0x00000d00
+#define A5XX_PC_DBG_ECO_CNTL_TWOPASSUSEWFI 0x00000100
+
+#define REG_A5XX_PC_ADDR_MODE_CNTL 0x00000d01
+
+#define REG_A5XX_PC_MODE_CNTL 0x00000d02
+
+#define REG_A5XX_UNKNOWN_0D08 0x00000d08
+
+#define REG_A5XX_UNKNOWN_0D09 0x00000d09
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_1 0x00000d11
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_2 0x00000d12
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_3 0x00000d13
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_4 0x00000d14
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_5 0x00000d15
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_6 0x00000d16
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_7 0x00000d17
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0 0x00000e00
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01
+
+#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05
+
+#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e10
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e11
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e12
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e13
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e14
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e15
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e16
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e17
+
+#define REG_A5XX_HLSQ_SPTP_RDSEL 0x00000f08
+
+#define REG_A5XX_HLSQ_DBG_READ_SEL 0x0000bc00
+#define A5XX_HLSQ_DBG_READ_SEL_STATETYPE__MASK 0x0000ff00
+#define A5XX_HLSQ_DBG_READ_SEL_STATETYPE__SHIFT 8
+static inline uint32_t A5XX_HLSQ_DBG_READ_SEL_STATETYPE(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DBG_READ_SEL_STATETYPE__SHIFT) & A5XX_HLSQ_DBG_READ_SEL_STATETYPE__MASK;
+}
+
+#define REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000a000
+
+#define REG_A5XX_VFD_ADDR_MODE_CNTL 0x00000e41
+
+#define REG_A5XX_VFD_MODE_CNTL 0x00000e42
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_0 0x00000e50
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_1 0x00000e51
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_2 0x00000e52
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_3 0x00000e53
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_4 0x00000e54
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_5 0x00000e55
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_6 0x00000e56
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7 0x00000e57
+
+#define REG_A5XX_VPC_DBG_ECO_CNTL 0x00000e60
+
+#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61
+
+#define REG_A5XX_VPC_MODE_CNTL 0x00000e62
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0 0x00000e64
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_1 0x00000e65
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_2 0x00000e66
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_3 0x00000e67
+
+#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80
+
+#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_HI 0x00000e88
+
+#define REG_A5XX_UCHE_TRAP_BASE_LO 0x00000e89
+
+#define REG_A5XX_UCHE_TRAP_BASE_HI 0x00000e8a
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e8b
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e8c
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e8d
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e8e
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL_2 0x00000e8f
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL 0x00000e90
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO 0x00000e91
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_HI 0x00000e92
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_LO 0x00000e93
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_HI 0x00000e94
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE 0x00000e95
+
+#define REG_A5XX_UCHE_CACHE_WAYS 0x00000e96
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000ea0
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000ea1
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000ea2
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000ea3
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000ea4
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000ea5
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000ea6
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000ea7
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 0x00000ea8
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 0x00000ea9
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 0x00000eaa
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 0x00000eab
+
+#define REG_A5XX_UCHE_TRAP_LOG_LO 0x00000eb1
+
+#define REG_A5XX_UCHE_TRAP_LOG_HI 0x00000eb2
+
+#define REG_A5XX_SP_DBG_ECO_CNTL 0x00000ec0
+
+#define REG_A5XX_SP_ADDR_MODE_CNTL 0x00000ec1
+
+#define REG_A5XX_SP_MODE_CNTL 0x00000ec2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_0 0x00000ed0
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_1 0x00000ed1
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_2 0x00000ed2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_3 0x00000ed3
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_4 0x00000ed4
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_5 0x00000ed5
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_6 0x00000ed6
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_7 0x00000ed7
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_8 0x00000ed8
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_9 0x00000ed9
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_10 0x00000eda
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_11 0x00000edb
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_0 0x00000edc
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_1 0x00000edd
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_2 0x00000ede
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_3 0x00000edf
+
+#define REG_A5XX_TPL1_ADDR_MODE_CNTL 0x00000f01
+
+#define REG_A5XX_TPL1_MODE_CNTL 0x00000f02
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_0 0x00000f10
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_1 0x00000f11
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_2 0x00000f12
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_3 0x00000f13
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_4 0x00000f14
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_5 0x00000f15
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_6 0x00000f16
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_7 0x00000f17
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_0 0x00000f18
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_1 0x00000f19
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_2 0x00000f1a
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_3 0x00000f1b
+
+#define REG_A5XX_VBIF_VERSION 0x00003000
+
+#define REG_A5XX_VBIF_CLKON 0x00003001
+#define A5XX_VBIF_CLKON_FORCE_ON 0x00000001
+#define A5XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000002
+
+#define REG_A5XX_VBIF_ABIT_SORT 0x00003028
+
+#define REG_A5XX_VBIF_ABIT_SORT_CONF 0x00003029
+
+#define REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A5XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL0 0x00003080
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL1 0x00003081
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
+#define A5XX_VBIF_TEST_BUS_OUT_CTRL_TEST_BUS_CTRL_EN 0x00000001
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL0 0x00003085
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL1 0x00003086
+#define A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__MASK 0x0000000f
+#define A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__SHIFT 0
+static inline uint32_t A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL(uint32_t val)
+{
+ return ((val) << A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__SHIFT) & A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__MASK;
+}
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL0 0x00003087
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL1 0x00003088
+#define A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__MASK 0x0000001f
+#define A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__SHIFT 0
+static inline uint32_t A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(uint32_t val)
+{
+ return ((val) << A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__SHIFT) & A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__MASK;
+}
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+
+#define REG_A5XX_GPMU_INST_RAM_BASE 0x00008800
+
+#define REG_A5XX_GPMU_DATA_RAM_BASE 0x00009800
+
+#define REG_A5XX_GPMU_SP_POWER_CNTL 0x0000a881
+
+#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL 0x0000a886
+
+#define REG_A5XX_GPMU_RBCCU_POWER_CNTL 0x0000a887
+
+#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS 0x0000a88b
+#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS 0x0000a88d
+#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY 0x0000a891
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL 0x0000a892
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST 0x0000a893
+
+#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1
+
+#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6
+
+#define REG_A5XX_GPMU_CM3_SYSRESET 0x0000a8d8
+
+#define REG_A5XX_GPMU_GENERAL_0 0x0000a8e0
+
+#define REG_A5XX_GPMU_GENERAL_1 0x0000a8e1
+
+#define REG_A5XX_SP_POWER_COUNTER_0_LO 0x0000a840
+
+#define REG_A5XX_SP_POWER_COUNTER_0_HI 0x0000a841
+
+#define REG_A5XX_SP_POWER_COUNTER_1_LO 0x0000a842
+
+#define REG_A5XX_SP_POWER_COUNTER_1_HI 0x0000a843
+
+#define REG_A5XX_SP_POWER_COUNTER_2_LO 0x0000a844
+
+#define REG_A5XX_SP_POWER_COUNTER_2_HI 0x0000a845
+
+#define REG_A5XX_SP_POWER_COUNTER_3_LO 0x0000a846
+
+#define REG_A5XX_SP_POWER_COUNTER_3_HI 0x0000a847
+
+#define REG_A5XX_TP_POWER_COUNTER_0_LO 0x0000a848
+
+#define REG_A5XX_TP_POWER_COUNTER_0_HI 0x0000a849
+
+#define REG_A5XX_TP_POWER_COUNTER_1_LO 0x0000a84a
+
+#define REG_A5XX_TP_POWER_COUNTER_1_HI 0x0000a84b
+
+#define REG_A5XX_TP_POWER_COUNTER_2_LO 0x0000a84c
+
+#define REG_A5XX_TP_POWER_COUNTER_2_HI 0x0000a84d
+
+#define REG_A5XX_TP_POWER_COUNTER_3_LO 0x0000a84e
+
+#define REG_A5XX_TP_POWER_COUNTER_3_HI 0x0000a84f
+
+#define REG_A5XX_RB_POWER_COUNTER_0_LO 0x0000a850
+
+#define REG_A5XX_RB_POWER_COUNTER_0_HI 0x0000a851
+
+#define REG_A5XX_RB_POWER_COUNTER_1_LO 0x0000a852
+
+#define REG_A5XX_RB_POWER_COUNTER_1_HI 0x0000a853
+
+#define REG_A5XX_RB_POWER_COUNTER_2_LO 0x0000a854
+
+#define REG_A5XX_RB_POWER_COUNTER_2_HI 0x0000a855
+
+#define REG_A5XX_RB_POWER_COUNTER_3_LO 0x0000a856
+
+#define REG_A5XX_RB_POWER_COUNTER_3_HI 0x0000a857
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_LO 0x0000a858
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_HI 0x0000a859
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_LO 0x0000a85a
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_HI 0x0000a85b
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_LO 0x0000a85c
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_HI 0x0000a85d
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_LO 0x0000a85e
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_HI 0x0000a85f
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_LO 0x0000a860
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_HI 0x0000a861
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_LO 0x0000a862
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_HI 0x0000a863
+
+#define REG_A5XX_CP_POWER_COUNTER_0_LO 0x0000a864
+
+#define REG_A5XX_CP_POWER_COUNTER_0_HI 0x0000a865
+
+#define REG_A5XX_CP_POWER_COUNTER_1_LO 0x0000a866
+
+#define REG_A5XX_CP_POWER_COUNTER_1_HI 0x0000a867
+
+#define REG_A5XX_CP_POWER_COUNTER_2_LO 0x0000a868
+
+#define REG_A5XX_CP_POWER_COUNTER_2_HI 0x0000a869
+
+#define REG_A5XX_CP_POWER_COUNTER_3_LO 0x0000a86a
+
+#define REG_A5XX_CP_POWER_COUNTER_3_HI 0x0000a86b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_LO 0x0000a86c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_HI 0x0000a86d
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_LO 0x0000a86e
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_HI 0x0000a86f
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_LO 0x0000a870
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_HI 0x0000a871
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_LO 0x0000a872
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_HI 0x0000a873
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_LO 0x0000a874
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_HI 0x0000a875
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_LO 0x0000a876
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_HI 0x0000a877
+
+#define REG_A5XX_GPMU_POWER_COUNTER_ENABLE 0x0000a878
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO 0x0000a879
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI 0x0000a87a
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET 0x0000a87b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_0 0x0000a87c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1 0x0000a87d
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL 0x0000a8a8
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_ID 0x0000ac00
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG 0x0000ac01
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__MASK 0x0000000f
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__SHIFT 0
+static inline uint32_t A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS(uint32_t val)
+{
+ return ((val) << A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__SHIFT) & A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__MASK;
+}
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_BCL_ENABLED 0x00000002
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_LLM_ENABLED 0x00000200
+
+#define REG_A5XX_GPMU_TEMP_VAL 0x0000ac02
+
+#define REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD 0x0000ac03
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_STATUS 0x0000ac05
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK 0x0000ac06
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_0_1 0x0000ac40
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_2_3 0x0000ac41
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_0_1 0x0000ac42
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_2_3 0x0000ac43
+
+#define REG_A5XX_GPMU_BASE_LEAKAGE 0x0000ac46
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE 0x0000ac60
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_STATUS 0x0000ac61
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK 0x0000ac62
+
+#define REG_A5XX_GPMU_GPMU_PWR_THRESHOLD 0x0000ac80
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL 0x0000acc4
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_IDLE_FULL_LM 0x00000001
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__MASK 0x00000030
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__SHIFT 4
+static inline uint32_t A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD(uint32_t val)
+{
+ return ((val) << A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__SHIFT) & A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__MASK;
+}
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS 0x0000acc5
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS_IDLE_FULL_ACK 0x00000001
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS_WAKEUP_ACK 0x00000002
+
+#define REG_A5XX_GDPM_CONFIG1 0x0000b80c
+
+#define REG_A5XX_GDPM_CONFIG2 0x0000b80d
+
+#define REG_A5XX_GDPM_INT_EN 0x0000b80f
+
+#define REG_A5XX_GDPM_INT_MASK 0x0000b811
+
+#define REG_A5XX_GPMU_BEC_ENABLE 0x0000b9a0
+
+#define REG_A5XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000c41a
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000c41d
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000c41f
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x0000c421
+
+#define REG_A5XX_GPU_CS_ENABLE_REG 0x0000c520
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557
+
+#define REG_A5XX_GRAS_CL_CNTL 0x0000e000
+
+#define REG_A5XX_UNKNOWN_E001 0x0000e001
+
+#define REG_A5XX_UNKNOWN_E004 0x0000e004
+
+#define REG_A5XX_GRAS_CLEAR_CNTL 0x0000e005
+#define A5XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR 0x00000001
+
+#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XOFFSET_0 0x0000e010
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XSCALE_0 0x0000e011
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YOFFSET_0 0x0000e012
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YSCALE_0 0x0000e013
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000e014
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZSCALE_0 0x0000e015
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CNTL 0x0000e090
+#define A5XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
+#define A5XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+
+#define REG_A5XX_GRAS_SU_POINT_MINMAX 0x0000e091
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POINT_SIZE 0x0000e092
+#define A5XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E093 0x0000e093
+
+#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094
+#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_ALPHA_TEST_ENABLE 0x00000001
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000e095
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000e096
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x0000e097
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO 0x0000e098
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x0000e099
+
+#define REG_A5XX_GRAS_SC_CNTL 0x0000e0a0
+
+#define REG_A5XX_GRAS_SC_BIN_CNTL 0x0000e0a1
+
+#define REG_A5XX_GRAS_SC_RAS_MSAA_CNTL 0x0000e0a2
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_DEST_MSAA_CNTL 0x0000e0a3
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL 0x0000e0a4
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x0000e0aa
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x0000e0ab
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x0000e0ca
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x0000e0cb
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000e0ea
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000e0eb
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_LRZ_CNTL 0x0000e100
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO 0x0000e101
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI 0x0000e102
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH 0x0000e103
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x0000e105
+
+#define REG_A5XX_RB_CNTL 0x0000e140
+#define A5XX_RB_CNTL_WIDTH__MASK 0x000000ff
+#define A5XX_RB_CNTL_WIDTH__SHIFT 0
+static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK;
+}
+#define A5XX_RB_CNTL_HEIGHT__MASK 0x0001fe00
+#define A5XX_RB_CNTL_HEIGHT__SHIFT 9
+static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK;
+}
+#define A5XX_RB_CNTL_BYPASS 0x00020000
+
+#define REG_A5XX_RB_RENDER_CNTL 0x0000e141
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS__MASK 0x00ff0000
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS__SHIFT 16
+static inline uint32_t A5XX_RB_RENDER_CNTL_ENABLED_MRTS(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_ENABLED_MRTS__SHIFT) & A5XX_RB_RENDER_CNTL_ENABLED_MRTS__MASK;
+}
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__MASK 0xff000000
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__SHIFT 24
+static inline uint32_t A5XX_RB_RENDER_CNTL_ENABLED_MRTS2(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__SHIFT) & A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__MASK;
+}
+
+#define REG_A5XX_RB_RAS_MSAA_CNTL 0x0000e142
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_RB_DEST_MSAA_CNTL 0x0000e143
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144
+#define A5XX_RB_RENDER_CONTROL0_VARYING 0x00000001
+#define A5XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
+#define A5XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
+#define A5XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
+#define A5XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
+
+#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145
+#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+
+#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_RB_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z 0x00000020
+
+static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001
+#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
+static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x0000e151 + 0x7*i0; }
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x0000e152 + 0x7*i0; }
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000007f
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
+
+static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 + 0x7*i0; }
+#define A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__MASK 0x0007ffff
+#define A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__SHIFT) & A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; }
+#define A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__MASK 0x01ffffff
+#define A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x0000e156 + 0x7*i0; }
+
+#define REG_A5XX_RB_BLEND_RED 0x0000e1a0
+#define A5XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_UINT__SHIFT) & A5XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_SINT__SHIFT) & A5XX_RB_BLEND_RED_SINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_RED_F32 0x0000e1a1
+#define A5XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_RED_F32__SHIFT) & A5XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN 0x0000e1a2
+#define A5XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_UINT__SHIFT) & A5XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_SINT__SHIFT) & A5XX_RB_BLEND_GREEN_SINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN_F32 0x0000e1a3
+#define A5XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_GREEN_F32__SHIFT) & A5XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE 0x0000e1a4
+#define A5XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_UINT__SHIFT) & A5XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_SINT__SHIFT) & A5XX_RB_BLEND_BLUE_SINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE_F32 0x0000e1a5
+#define A5XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_BLUE_F32__SHIFT) & A5XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA 0x0000e1a6
+#define A5XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_UINT__SHIFT) & A5XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_SINT__SHIFT) & A5XX_RB_BLEND_ALPHA_SINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA_F32 0x0000e1a7
+#define A5XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_ALPHA_F32__SHIFT) & A5XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A5XX_RB_ALPHA_CONTROL 0x0000e1a8
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_CNTL 0x0000e1a9
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_PLANE_CNTL 0x0000e1b0
+#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+
+#define REG_A5XX_RB_DEPTH_CNTL 0x0000e1b1
+#define A5XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
+#define A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
+static inline uint32_t A5XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A5XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000040
+
+#define REG_A5XX_RB_DEPTH_BUFFER_INFO 0x0000e1b2
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_LO 0x0000e1b3
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_HI 0x0000e1b4
+
+#define REG_A5XX_RB_DEPTH_BUFFER_PITCH 0x0000e1b5
+#define A5XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A5XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_INFO 0x0000e1c2
+#define A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+#define A5XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff000
+#define A5XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 12
+static inline uint32_t A5XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
+{
+ return ((val >> 12) << A5XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A5XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E1C3 0x0000e1c3
+
+#define REG_A5XX_RB_STENCILREFMASK 0x0000e1c6
+#define A5XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0
+#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A5XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_X__SHIFT) & A5XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A5XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A5XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_1 0x0000e211
+#define A5XX_RB_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_1_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_1_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_2 0x0000e212
+#define A5XX_RB_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_2_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_2_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x0000e240
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x0000e241
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x0000e242
+
+#define REG_A5XX_VPC_CNTL_0 0x0000e280
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT) & A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK;
+}
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+#define REG_A5XX_VPC_GS_SIV_CNTL 0x0000e298
+
+#define REG_A5XX_VPC_PACK 0x0000e29d
+#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK 0x000000ff
+#define A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT 0
+static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK;
+}
+
+#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL 0x0000e2a0
+
+#define REG_A5XX_VPC_SO_OVERRIDE 0x0000e2a2
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_LO_0 0x0000e2a7
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_HI_0 0x0000e2a8
+
+#define REG_A5XX_VPC_SO_BUFFER_SIZE_0 0x0000e2a9
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_LO_0 0x0000e2ac
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_HI_0 0x0000e2ad
+
+#define REG_A5XX_PC_PRIMITIVE_CNTL 0x0000e384
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
+}
+
+#define REG_A5XX_PC_RASTER_CNTL 0x0000e388
+
+#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c
+
+#define REG_A5XX_PC_GS_PARAM 0x0000e38e
+
+#define REG_A5XX_PC_HS_PARAM 0x0000e38f
+
+#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0
+
+#define REG_A5XX_VFD_CONTROL_0 0x0000e400
+#define A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x0000003f
+#define A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_1 0x0000e401
+#define A5XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
+#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_2 0x0000e402
+
+#define REG_A5XX_VFD_CONTROL_3 0x0000e403
+
+#define REG_A5XX_VFD_CONTROL_4 0x0000e404
+
+#define REG_A5XX_VFD_CONTROL_5 0x0000e405
+
+#define REG_A5XX_VFD_INDEX_OFFSET 0x0000e408
+
+#define REG_A5XX_VFD_INSTANCE_START_OFFSET 0x0000e409
+
+static inline uint32_t REG_A5XX_VFD_FETCH(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000e40b + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+#define A5XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
+#define A5XX_VFD_DECODE_INSTR_IDX__SHIFT 0
+static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x3ff00000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
+static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+
+static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A5XX_VFD_POWER_CNTL 0x0000e4f0
+
+#define REG_A5XX_SP_SP_CNTL 0x0000e580
+
+#define REG_A5XX_SP_VS_CONTROL_REG 0x0000e584
+#define A5XX_SP_VS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_FS_CONTROL_REG 0x0000e585
+#define A5XX_SP_FS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_HS_CONTROL_REG 0x0000e586
+#define A5XX_SP_HS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_DS_CONTROL_REG 0x0000e587
+#define A5XX_SP_DS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_GS_CONTROL_REG 0x0000e588
+#define A5XX_SP_GS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_CS_CONFIG 0x0000e589
+
+#define REG_A5XX_SP_VS_CONFIG_MAX_CONST 0x0000e58a
+
+#define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b
+
+#define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00100000
+
+static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+#define A5XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A5XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A5XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A5XX_SP_VS_OBJ_START_LO 0x0000e5ac
+
+#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad
+
+#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00100000
+
+#define REG_A5XX_SP_FS_OBJ_START_LO 0x0000e5c3
+
+#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4
+
+#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
+
+#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK 0x00001fe0
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT 5
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK 0x001fe000
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT 13
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
+static inline uint32_t REG_A5XX_SP_FS_MRT(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x0000007f
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+
+#define REG_A5XX_SP_CS_CNTL_0 0x0000e5f0
+
+#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704
+
+#define REG_A5XX_TPL1_TP_DEST_MSAA_CNTL 0x0000e705
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_LO 0x0000e722
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_HI 0x0000e723
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_LO 0x0000e72a
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_HI 0x0000e72b
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_LO 0x0000e75a
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_HI 0x0000e75b
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_LO 0x0000e75e
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_HI 0x0000e75f
+
+#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL 0x0000e764
+
+#define REG_A5XX_HLSQ_CONTROL_0_REG 0x0000e784
+
+#define REG_A5XX_HLSQ_CONTROL_1_REG 0x0000e785
+
+#define REG_A5XX_HLSQ_CONTROL_2_REG 0x0000e786
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787
+#define A5XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_4_REG 0x0000e788
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_UPDATE_CNTL 0x0000e78a
+
+#define REG_A5XX_HLSQ_VS_CONTROL_REG 0x0000e78b
+#define A5XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CONTROL_REG 0x0000e78c
+#define A5XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CONTROL_REG 0x0000e78d
+#define A5XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CONTROL_REG 0x0000e78e
+#define A5XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CONTROL_REG 0x0000e78f
+#define A5XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CONFIG 0x0000e790
+
+#define REG_A5XX_HLSQ_VS_CNTL 0x0000e791
+
+#define REG_A5XX_HLSQ_FS_CNTL 0x0000e792
+
+#define REG_A5XX_HLSQ_CS_CNTL 0x0000e796
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_X 0x0000e7b9
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000e7ba
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000e7bb
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_0 0x0000e7b0
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6
+
+#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7
+
+#define REG_A5XX_HLSQ_CS_CNTL_1 0x0000e7b8
+
+#define REG_A5XX_HLSQ_VS_CONSTLEN 0x0000e7c3
+
+#define REG_A5XX_HLSQ_VS_INSTRLEN 0x0000e7c4
+
+#define REG_A5XX_HLSQ_FS_CONSTLEN 0x0000e7d7
+
+#define REG_A5XX_HLSQ_FS_INSTRLEN 0x0000e7d8
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_3 0x0000e7dc
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_4 0x0000e7dd
+
+#define REG_A5XX_TEX_SAMP_0 0x00000000
+#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A5XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A5XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MAG(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MAG__SHIFT) & A5XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A5XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A5XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MIN(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MIN__SHIFT) & A5XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A5XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_S(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_S__SHIFT) & A5XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A5XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_T(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_T__SHIFT) & A5XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A5XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_R(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_R__SHIFT) & A5XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A5XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A5XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A5XX_TEX_SAMP_0_ANISO(enum a5xx_tex_aniso val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_ANISO__SHIFT) & A5XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A5XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A5XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A5XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_1 0x00000001
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A5XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A5XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A5XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A5XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A5XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A5XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A5XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A5XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A5XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_2 0x00000002
+
+#define REG_A5XX_TEX_SAMP_3 0x00000003
+
+#define REG_A5XX_TEX_CONST_0 0x00000000
+#define A5XX_TEX_CONST_0_TILED 0x00000001
+#define A5XX_TEX_CONST_0_SRGB 0x00000004
+#define A5XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A5XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_X(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_X__SHIFT) & A5XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A5XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Y(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A5XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Z(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A5XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000
+#define A5XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
+{
+ return ((val) << A5XX_TEX_CONST_0_FMT__SHIFT) & A5XX_TEX_CONST_0_FMT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_1 0x00000001
+#define A5XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
+#define A5XX_TEX_CONST_1_WIDTH__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_WIDTH__SHIFT) & A5XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A5XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
+#define A5XX_TEX_CONST_1_HEIGHT__SHIFT 15
+static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_HEIGHT__SHIFT) & A5XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_2 0x00000002
+#define A5XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f
+#define A5XX_TEX_CONST_2_FETCHSIZE__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_2_FETCHSIZE(enum a5xx_tex_fetchsize val)
+{
+ return ((val) << A5XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A5XX_TEX_CONST_2_FETCHSIZE__MASK;
+}
+#define A5XX_TEX_CONST_2_PITCH__MASK 0x1fffff00
+#define A5XX_TEX_CONST_2_PITCH__SHIFT 8
+static inline uint32_t A5XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_2_PITCH__SHIFT) & A5XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_2_TYPE__MASK 0x60000000
+#define A5XX_TEX_CONST_2_TYPE__SHIFT 29
+static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val)
+{
+ return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_3 0x00000003
+#define A5XX_TEX_CONST_3_LAYERSZ__MASK 0x00003fff
+#define A5XX_TEX_CONST_3_LAYERSZ__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_3_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_LAYERSZ__SHIFT) & A5XX_TEX_CONST_3_LAYERSZ__MASK;
+}
+#define A5XX_TEX_CONST_3_LAYERSZ2__MASK 0xff800000
+#define A5XX_TEX_CONST_3_LAYERSZ2__SHIFT 23
+static inline uint32_t A5XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A5XX_TEX_CONST_3_LAYERSZ2__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_4 0x00000004
+#define A5XX_TEX_CONST_4_BASE__MASK 0xffffffe0
+#define A5XX_TEX_CONST_4_BASE__SHIFT 5
+static inline uint32_t A5XX_TEX_CONST_4_BASE(uint32_t val)
+{
+ return ((val >> 5) << A5XX_TEX_CONST_4_BASE__SHIFT) & A5XX_TEX_CONST_4_BASE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_5 0x00000005
+#define A5XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
+#define A5XX_TEX_CONST_5_DEPTH__SHIFT 17
+static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_5_DEPTH__SHIFT) & A5XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_6 0x00000006
+
+#define REG_A5XX_TEX_CONST_7 0x00000007
+
+#define REG_A5XX_TEX_CONST_8 0x00000008
+
+#define REG_A5XX_TEX_CONST_9 0x00000009
+
+#define REG_A5XX_TEX_CONST_10 0x0000000a
+
+#define REG_A5XX_TEX_CONST_11 0x0000000b
+
+
+#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
new file mode 100644
index 000000000000..f5847bc60c49
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -0,0 +1,1345 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "msm_iommu.h"
+#include "a5xx_gpu.h"
+
+static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ uint32_t wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Make sure everything is posted before making a decision */
+ mb();
+
+ /* Update HW if this is the current ring and we are not in preempt */
+ if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ struct msm_gem_address_space *aspace)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_mmu *mmu = aspace->mmu;
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ if (!iommu->ttbr0)
+ return;
+
+ /* Turn off protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Turn on APIV mode to access critical regions */
+ OUT_PKT4(ring, REG_A5XX_CP_CNTL, 1);
+ OUT_RING(ring, 1);
+
+ /* Make sure the ME is syncronized before staring the update */
+ OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
+
+ /* Execute the table update */
+ OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 3);
+ OUT_RING(ring, lower_32_bits(iommu->ttbr0));
+ OUT_RING(ring, upper_32_bits(iommu->ttbr0));
+ OUT_RING(ring, iommu->contextidr);
+
+ /*
+ * Write the new TTBR0 to the preemption records - this will be used to
+ * reload the pagetable if the current ring gets preempted out.
+ */
+ OUT_PKT7(ring, CP_MEM_WRITE, 4);
+ OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0)));
+ OUT_RING(ring, lower_32_bits(iommu->ttbr0));
+ OUT_RING(ring, upper_32_bits(iommu->ttbr0));
+
+ /* Also write the current contextidr (ASID) */
+ OUT_PKT7(ring, CP_MEM_WRITE, 3);
+ OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id,
+ contextidr)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id,
+ contextidr)));
+ OUT_RING(ring, iommu->contextidr);
+
+ /* Invalidate the draw state so we start off fresh */
+ OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
+ OUT_RING(ring, 0x40000);
+ OUT_RING(ring, 1);
+ OUT_RING(ring, 0);
+
+ /* Turn off APRIV */
+ OUT_PKT4(ring, REG_A5XX_CP_CNTL, 1);
+ OUT_RING(ring, 0);
+
+ /* Turn off protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+}
+
+static int a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[submit->ring];
+ unsigned int i, ibs = 0;
+
+ a5xx_set_pagetable(gpu, ring, submit->aspace);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ /* Enable local preemption for finegrain preemption */
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Submit the commands */
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+ }
+
+ /*
+ * Write the render mode to NULL (0) to indicate to the CP that the IBs
+ * are done rendering - otherwise a lucky preemption would start
+ * replaying from the last checkpoint
+ */
+ OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+
+ /* Turn off IB level preemptions */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Write the fence to the scratch register */
+ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->fence);
+
+ /*
+ * Execute a CACHE_FLUSH_TS event. This will ensure that the
+ * timestamp is written to the memory and then triggers the interrupt
+ */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+
+ OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
+ OUT_RING(ring, submit->fence);
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ /*
+ * If dword[2:1] are non zero, they specify an address for the CP to
+ * write the value of dword[3] to on preemption complete. Write 0 to
+ * skip the write
+ */
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ /* Data value - not used if the address above is 0 */
+ OUT_RING(ring, 0x01);
+ /* Set bit 0 to trigger an interrupt on preempt complete */
+ OUT_RING(ring, 0x01);
+
+ a5xx_flush(gpu, ring);
+
+ /* Check to see if we need to start preemption */
+ a5xx_preempt_trigger(gpu);
+
+ return 0;
+}
+
+static const struct {
+ u32 offset;
+ u32 value;
+} a5xx_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
+};
+
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
+ gpu_write(gpu, a5xx_hwcg[i].offset,
+ state ? a5xx_hwcg[i].value : 0);
+
+ /* There are a few additional registers just for A540 */
+ if (adreno_is_a540(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU,
+ state ? 0x770 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU,
+ state ? 0x004 : 0);
+ }
+
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
+}
+
+static int a5xx_me_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_ME_INIT, 8);
+
+ OUT_RING(ring, 0x0000002F);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Don't enable header dump */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ /* Specify workarounds for various microcode issues */
+ if (adreno_is_a530(adreno_gpu)) {
+ /* Workaround for token end syncs
+ * Force a WFI after every direct-render 3D mode draw and every
+ * 2D mode 3 draw
+ */
+ OUT_RING(ring, 0x0000000B);
+ } else {
+ /* No workarounds enabled */
+ OUT_RING(ring, 0x00000000);
+ }
+
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu, ring);
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static int a5xx_preempt_start(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (gpu->nr_rings == 1)
+ return 0;
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x00);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+ OUT_RING(ring, 0x01);
+
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x01);
+ OUT_RING(ring, 0x01);
+
+ gpu->funcs->flush(gpu, ring);
+
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+
+static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova)
+{
+ struct drm_device *drm = gpu->dev;
+ struct drm_gem_object *bo;
+ void *ptr;
+
+ mutex_lock(&drm->struct_mutex);
+ bo = msm_gem_new(drm, fw->size - 4,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(bo))
+ return bo;
+
+ ptr = msm_gem_vaddr(bo);
+ if (!ptr) {
+ drm_gem_object_unreference_unlocked(bo);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (iova) {
+ int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
+
+ if (ret) {
+ drm_gem_object_unreference_unlocked(bo);
+ return ERR_PTR(ret);
+ }
+ }
+
+ memcpy(ptr, &fw->data[4], fw->size - 4);
+ return bo;
+}
+
+static int a5xx_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int ret;
+
+ if (!a5xx_gpu->pm4_bo) {
+ a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
+ &a5xx_gpu->pm4_iova);
+
+ if (IS_ERR(a5xx_gpu->pm4_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pm4_bo);
+ a5xx_gpu->pm4_bo = NULL;
+ dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (!a5xx_gpu->pfp_bo) {
+ a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
+ &a5xx_gpu->pfp_iova);
+
+ if (IS_ERR(a5xx_gpu->pfp_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pfp_bo);
+ a5xx_gpu->pfp_bo = NULL;
+ dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
+ REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
+
+ gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
+ REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
+
+ return 0;
+}
+
+#ifdef CONFIG_MSM_SUBSYSTEM_RESTART
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/scm.h>
+
+static void a5xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ const char *name;
+ void *ptr;
+
+ /* If no zap shader was defined, we'll assume that none is needed */
+ if (of_property_read_string(GPU_OF_NODE(gpu), "qcom,zap-shader", &name))
+ return;
+
+ /*
+ * If the zap shader has already been loaded then just ask the SCM to
+ * re-initialize the registers (not needed if CPZ retention is a thing)
+ */
+ if (test_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags)) {
+ int ret;
+ struct scm_desc desc = { 0 };
+
+ if (of_property_read_bool(GPU_OF_NODE(gpu),
+ "qcom,cpz-retention"))
+ return;
+
+ desc.args[0] = 0;
+ desc.args[1] = 13;
+ desc.arginfo = SCM_ARGS(2);
+
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, 0x0A), &desc);
+ if (ret)
+ DRM_ERROR(
+ "%s: zap-shader resume failed with error %d\n",
+ gpu->name, ret);
+
+ return;
+ }
+
+ ptr = subsystem_get(name);
+
+ if (IS_ERR_OR_NULL(ptr)) {
+ DRM_ERROR("%s: Unable to load the zap shader: %ld\n", gpu->name,
+ IS_ERR(ptr) ? PTR_ERR(ptr) : -ENODEV);
+ } else {
+ set_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags);
+ }
+}
+#else
+static void a5xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ if (of_find_property(GPU_OF_NODE(gpu), "qcom,zap-shader", NULL))
+ return;
+
+ DRM_INFO_ONCE("%s: Zap shader is defined but loader isn't available\n",
+ gpu->name);
+}
+#endif
+
+#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
+ A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
+ A5XX_RBBM_INT_0_MASK_CP_SW | \
+ A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+
+static int a5xx_hw_init(struct msm_gpu *gpu)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int ret, bit = 0;
+
+ pm_qos_update_request(&gpu->pm_qos_req_dma, 101);
+
+ gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ if (adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
+ /* Enable RBBM error reporting bits */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
+
+ if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
+ /*
+ * Mask out the activity signals from RB1-3 to avoid false
+ * positives
+ */
+
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
+ 0xF0000000);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
+ 0xFFFFFFFF);
+ }
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
+ (1 << 30) | 0xFFFF);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
+
+ /* Increase VFD cache access so LRZ and other data gets evicted less */
+ gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
+
+ /* Disable L2 bypass in the UCHE */
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
+
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO,
+ REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
+
+ gpu_write64(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
+ REG_A5XX_UCHE_GMEM_RANGE_MAX_HI,
+ 0x00100000 + adreno_gpu->gmem - 1);
+
+ gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
+
+ if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+ gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
+
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
+
+ /* Enable USE_RETENTION_FLOPS */
+ gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
+
+ /* Enable ME/PFP split notification */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
+
+ /* Enable HWCG */
+ a5xx_set_hwcg(gpu, true);
+
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
+
+ /* Set the highest bank bit if specified in the device tree */
+ if (!of_property_read_u32(pdev->dev.of_node, "qcom,highest-bank-bit",
+ &bit)) {
+ if (bit >= 13 && bit <= 16) {
+ bit -= 13;
+
+ gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, bit << 7);
+ gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, bit << 1);
+
+ if (adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2,
+ bit);
+ }
+ }
+
+ /* Try to load and initialize the zap shader if applicable */
+ a5xx_zap_shader_init(gpu);
+
+ /* Protect registers from the CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
+
+ /* RBBM */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
+
+ /* Content protect */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
+
+ /* CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
+
+ /* RB */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
+
+ /* VPC */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
+
+ /* UCHE */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
+
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
+ ADRENO_PROTECT_RW(0x10000, 0x8000));
+
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* Put the GPU into 64 bit by default */
+ gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+
+ /* Load the GPMU firmware before starting the HW init */
+ a5xx_gpmu_ucode_init(gpu);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ a5xx_preempt_hw_init(gpu);
+
+ ret = a5xx_ucode_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Disable the interrupts through the initial bringup stage */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
+
+ /* Clear ME_HALT to start the micro engine */
+ gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
+ ret = a5xx_me_init(gpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Send a pipeline event stat to get misbehaving counters to start
+ * ticking correctly
+ */
+ if (adreno_is_a530(adreno_gpu)) {
+ OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
+ OUT_RING(gpu->rb[0], 0x0F);
+
+ gpu->funcs->flush(gpu, gpu->rb[0]);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ }
+
+ /*
+ * If a zap shader was specified in the device tree, assume that we are
+ * on a secure device that blocks access to the RBBM_SECVID registers
+ * so we need to use the CP to switch out of secure mode. If a zap
+ * shader was NOT specified then we assume we are on an unlocked device.
+ * If we guessed wrong then the access to the register will probably
+ * cause a XPU violation.
+ */
+ if (test_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags)) {
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_SET_SECURE_MODE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu, gpu->rb[0]);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ } else {
+ /* Print a warning so if we die, we know why */
+ dev_warn_once(gpu->dev->dev,
+ "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ }
+
+ /* Next, start the power */
+ ret = a5xx_power_init(gpu);
+ if (ret)
+ return ret;
+
+
+ /* Last step - yield the ringbuffer */
+ a5xx_preempt_start(gpu);
+
+ pm_qos_update_request(&gpu->pm_qos_req_dma, 501);
+
+ return 0;
+}
+
+static void a5xx_recover(struct msm_gpu *gpu)
+{
+ adreno_dump_info(gpu);
+
+ msm_gpu_snapshot(gpu, gpu->snapshot);
+
+ /* Reset the GPU so it can work again */
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
+
+ adreno_recover(gpu);
+}
+
+static void a5xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ a5xx_preempt_fini(gpu);
+
+ if (a5xx_gpu->pm4_bo) {
+ if (a5xx_gpu->pm4_iova)
+ msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
+ }
+
+ if (a5xx_gpu->pfp_bo) {
+ if (a5xx_gpu->pfp_iova)
+ msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
+ }
+
+ if (a5xx_gpu->gpmu_bo) {
+ if (a5xx_gpu->gpmu_iova)
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+ }
+
+ adreno_gpu_cleanup(adreno_gpu);
+ kfree(a5xx_gpu);
+}
+
+static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
+{
+ if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
+ return false;
+
+ /*
+ * Nearly every abnormality ends up pausing the GPU and triggering a
+ * fault so we can safely just watch for this one interrupt to fire
+ */
+ return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
+}
+
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (ring != a5xx_gpu->cur_ring) {
+ WARN(1, "Tried to idle a non-current ringbuffer\n");
+ return false;
+ }
+
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu, ring))
+ return false;
+
+ if (spin_until(_a5xx_check_idle(gpu))) {
+ DRM_ERROR(
+ "%s: timeout waiting for GPU RB %d to idle: status %8.8X rptr/wptr: %4.4X/%4.4X irq %8.8X\n",
+ gpu->name, ring->id,
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+ gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
+
+ return false;
+ }
+
+ return true;
+}
+
+static void a5xx_cp_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
+
+ if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
+ u32 val;
+
+ gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
+
+ /*
+ * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
+ * read it twice
+ */
+
+ gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+ val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+
+ dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
+ val);
+ }
+
+ if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
+ gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
+
+ if (status & A5XX_CP_INT_CP_DMA_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
+
+ if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+ val & (1 << 24) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, val);
+ }
+
+ if (status & A5XX_CP_INT_CP_AHB_ERROR) {
+ u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
+ const char *access[16] = { "reserved", "reserved",
+ "timestamp lo", "timestamp hi", "pfp read", "pfp write",
+ "", "", "me read", "me write", "", "", "crashdump read",
+ "crashdump write" };
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
+ status & 0xFFFFF, access[(status >> 24) & 0xF],
+ (status & (1 << 31)), status);
+ }
+}
+
+static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
+{
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
+ val & (1 << 28) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
+ (val >> 24) & 0xF);
+
+ /* Clear the error */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
+
+ /* Clear the interrupt */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+ }
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
+}
+
+static void a5xx_uche_err_irq(struct msm_gpu *gpu)
+{
+ uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
+
+ addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
+
+ dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
+ addr);
+}
+
+static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
+{
+ dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
+}
+
+static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+
+ dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ ring ? ring->id : -1, adreno_submitted_fence(gpu, ring),
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+ gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
+ gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
+ gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
+ gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
+#define RBBM_ERROR_MASK \
+ (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+
+static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
+
+ /*
+ * Clear all the interrupts except for RBBM_AHB_ERROR
+ * which needs to be cleared after the error condition
+ * is cleared otherwise it will storm
+ */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+
+ if (status & RBBM_ERROR_MASK)
+ a5xx_rbbm_err_irq(gpu, status);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a5xx_cp_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
+ a5xx_fault_detect_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ a5xx_uche_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+ a5xx_gpmu_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ msm_gpu_retire(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
+ a5xx_preempt_irq(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+ REG_A5XX_CP_RB_RPTR_ADDR_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
+};
+
+static const u32 a5xx_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002b,
+ 0x002e, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
+ 0x0097, 0x00bb, 0x03a0, 0x0464, 0x0469, 0x046f, 0x04d2, 0x04d3,
+ 0x04e0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081a, 0x081f, 0x0841,
+ 0x0860, 0x0860, 0x0880, 0x08a0, 0x0b00, 0x0b12, 0x0b14, 0x0b28,
+ 0x0b78, 0x0b7f, 0x0bb0, 0x0bbd, 0x0bc0, 0x0bc6, 0x0bd0, 0x0c53,
+ 0x0c60, 0x0c61, 0x0c80, 0x0c82, 0x0c84, 0x0c85, 0x0c90, 0x0c9b,
+ 0x0ca0, 0x0ca0, 0x0cb0, 0x0cb2, 0x0cc1, 0x0cc1, 0x0cc4, 0x0cc7,
+ 0x0ccc, 0x0ccc, 0x0cd0, 0x0cdb, 0x0ce0, 0x0ce5, 0x0ce8, 0x0ce8,
+ 0x0cec, 0x0cf1, 0x0cfb, 0x0d0e, 0x0d10, 0x0d17, 0x0d20, 0x0d23,
+ 0x0d30, 0x0d30, 0x0e40, 0x0e43, 0x0e4a, 0x0e4a, 0x0e50, 0x0e57,
+ 0x0e60, 0x0e7c, 0x0e80, 0x0e8e, 0x0e90, 0x0e96, 0x0ea0, 0x0eab,
+ 0x0eb0, 0x0eb2, 0x2100, 0x211e, 0x2140, 0x2145, 0x2180, 0x2185,
+ 0x2500, 0x251e, 0x2540, 0x2545, 0x2580, 0x2585, 0x3000, 0x3014,
+ 0x3018, 0x302c, 0x3030, 0x3030, 0x3034, 0x3036, 0x303c, 0x303d,
+ 0x3040, 0x3040, 0x3042, 0x3042, 0x3049, 0x3049, 0x3058, 0x3058,
+ 0x305a, 0x3061, 0x3064, 0x3068, 0x306c, 0x306d, 0x3080, 0x3088,
+ 0x308b, 0x308c, 0x3090, 0x3094, 0x3098, 0x3098, 0x309c, 0x309c,
+ 0x3124, 0x3124, 0x340c, 0x340c, 0x3410, 0x3410, 0x3800, 0x3801,
+ 0xa800, 0xa800, 0xa820, 0xa828, 0xa840, 0xa87d, 0xa880, 0xa88d,
+ 0xa890, 0xa8a3, 0xa8a8, 0xa8aa, 0xa8c0, 0xa8c3, 0xa8c6, 0xa8ca,
+ 0xa8cc, 0xa8cf, 0xa8d1, 0xa8d8, 0xa8dc, 0xa8dc, 0xa8e0, 0xa8f5,
+ 0xac00, 0xac06, 0xac40, 0xac47, 0xac60, 0xac62, 0xac80, 0xac82,
+ 0xb800, 0xb808, 0xb80c, 0xb812, 0xb814, 0xb817, 0xb900, 0xb904,
+ 0xb906, 0xb90a, 0xb90c, 0xb90f, 0xb920, 0xb924, 0xb926, 0xb92a,
+ 0xb92c, 0xb92f, 0xb940, 0xb944, 0xb946, 0xb94a, 0xb94c, 0xb94f,
+ 0xb960, 0xb964, 0xb966, 0xb96a, 0xb96c, 0xb96f, 0xb980, 0xb984,
+ 0xb986, 0xb98a, 0xb98c, 0xb98f, 0xb9a0, 0xb9b0, 0xb9b8, 0xb9ba,
+ 0xd200, 0xd23f, 0xe000, 0xe006, 0xe010, 0xe09a, 0xe0a0, 0xe0a4,
+ 0xe0aa, 0xe0eb, 0xe100, 0xe105, 0xe140, 0xe147, 0xe150, 0xe187,
+ 0xe1a0, 0xe1a9, 0xe1b0, 0xe1b6, 0xe1c0, 0xe1c7, 0xe1d0, 0xe1d1,
+ 0xe200, 0xe201, 0xe210, 0xe21c, 0xe240, 0xe268, 0xe280, 0xe280,
+ 0xe282, 0xe2a3, 0xe2a5, 0xe2c2, 0xe380, 0xe38f, 0xe3b0, 0xe3b0,
+ 0xe400, 0xe405, 0xe408, 0xe4e9, 0xe4f0, 0xe4f0, 0xe800, 0xe806,
+ 0xe810, 0xe89a, 0xe8a0, 0xe8a4, 0xe8aa, 0xe8eb, 0xe900, 0xe905,
+ 0xe940, 0xe947, 0xe950, 0xe987, 0xe9a0, 0xe9a9, 0xe9b0, 0xe9b6,
+ 0xe9c0, 0xe9c7, 0xe9d0, 0xe9d1, 0xea00, 0xea01, 0xea10, 0xea1c,
+ 0xea40, 0xea68, 0xea80, 0xea80, 0xea82, 0xeaa3, 0xeaa5, 0xeac2,
+ 0xeb80, 0xeb8f, 0xebb0, 0xebb0, 0xec00, 0xec05, 0xec08, 0xece9,
+ 0xecf0, 0xecf0, 0xf400, 0xf400, 0xf800, 0xf807,
+ ~0
+};
+
+static int a5xx_pm_resume(struct msm_gpu *gpu)
+{
+ int ret;
+
+ /* Turn on the core power */
+ ret = msm_gpu_pm_resume(gpu);
+ if (ret)
+ return ret;
+
+ /* Turn the RBCCU domain first to limit the chances of voltage droop */
+ gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
+
+ /* Wait 3 usecs before polling */
+ udelay(3);
+
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret) {
+ DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
+ gpu->name,
+ gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
+ return ret;
+ }
+
+ /* Turn on the SP domain */
+ gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret)
+ DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
+ gpu->name);
+
+ return ret;
+}
+
+static int a5xx_pm_suspend(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ /* Clear the VBIF pipe before shutting down */
+
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
+ spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
+
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ /*
+ * Reset the VBIF before power collapse to avoid issue with FIFO
+ * entries
+ */
+
+ if (adreno_is_a530(adreno_gpu)) {
+ /* These only need to be done for A530 */
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+ }
+
+ return msm_gpu_pm_suspend(gpu);
+}
+
+static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A5XX_RBBM_PERFCTR_CP_0_HI);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+ gpu->funcs->pm_resume(gpu);
+
+ seq_printf(m, "status: %08x\n",
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ gpu->funcs->pm_suspend(gpu);
+
+ adreno_show(gpu, m);
+}
+#endif
+
+static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ return a5xx_gpu->cur_ring;
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a5xx_hw_init,
+ .pm_suspend = a5xx_pm_suspend,
+ .pm_resume = a5xx_pm_resume,
+ .recover = a5xx_recover,
+ .last_fence = adreno_last_fence,
+ .submitted_fence = adreno_submitted_fence,
+ .submit = a5xx_submit,
+ .flush = a5xx_flush,
+ .active_ring = a5xx_active_ring,
+ .irq = a5xx_irq,
+ .destroy = a5xx_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .show = a5xx_show,
+#endif
+ .snapshot = a5xx_snapshot,
+ },
+ .get_timestamp = a5xx_get_timestamp,
+};
+
+/* Read the limits management leakage from the efuses */
+static void a530_efuse_leakage(struct platform_device *pdev,
+ struct adreno_gpu *adreno_gpu, void *base,
+ size_t size)
+{
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned int row0, row2;
+ unsigned int leakage_pwr_on, coeff;
+
+ if (size < 0x148)
+ return;
+
+ /* Leakage */
+ row0 = readl_relaxed(base + 0x134);
+ row2 = readl_relaxed(base + 0x144);
+
+ /* Read barrier to get the previous two reads */
+ rmb();
+
+ /* Get the leakage coefficient from device tree */
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,base-leakage-coefficent", &coeff))
+ return;
+
+ leakage_pwr_on = ((row2 >> 2) & 0xFF) * (1 << (row0 >> 1) & 0x03);
+ a5xx_gpu->lm_leakage = (leakage_pwr_on << 16) |
+ ((leakage_pwr_on * coeff) / 100);
+}
+
+/* Read the speed bin from the efuses */
+static void a530_efuse_bin(struct platform_device *pdev,
+ struct adreno_gpu *adreno_gpu, void *base,
+ size_t size)
+{
+ uint32_t speed_bin[3];
+ uint32_t val;
+
+ if (of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,gpu-speed-bin", speed_bin, 3))
+ return;
+
+ if (size < speed_bin[0] + 4)
+ return;
+
+ val = readl_relaxed(base + speed_bin[0]);
+
+ adreno_gpu->speed_bin = (val & speed_bin[1]) >> speed_bin[2];
+}
+
+/* Read target specific configuration from the efuses */
+static void a5xx_efuses_read(struct platform_device *pdev,
+ struct adreno_gpu *adreno_gpu)
+{
+ struct adreno_platform_config *config = pdev->dev.platform_data;
+ const struct adreno_info *info = adreno_info(config->rev);
+ struct resource *res;
+ void *base;
+
+ /*
+ * The adreno_gpu->revn mechanism isn't set up yet so we need to check
+ * it directly here
+ */
+ if (info->revn != 530)
+ return;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "qfprom_memory");
+ if (!res)
+ return;
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base)
+ return;
+
+ a530_efuse_bin(pdev, adreno_gpu, base, resource_size(res));
+ a530_efuse_leakage(pdev, adreno_gpu, base, resource_size(res));
+
+ iounmap(base);
+}
+
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct a5xx_gpu *a5xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "No A5XX device is defined\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
+ if (!a5xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a5xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ a5xx_gpu->pdev = pdev;
+ adreno_gpu->registers = a5xx_registers;
+ adreno_gpu->reg_offsets = a5xx_register_offsets;
+
+ a5xx_gpu->lm_leakage = 0x4E001A;
+
+ /* Check the efuses for some configuration */
+ a5xx_efuses_read(pdev, adreno_gpu);
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
+ if (ret) {
+ a5xx_destroy(&(a5xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ /* Set up the preemption specific bits and pieces for each ringbuffer */
+ a5xx_preempt_init(gpu);
+
+ return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
new file mode 100644
index 000000000000..3de14fe42a1b
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -0,0 +1,187 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A5XX_GPU_H__
+#define __A5XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* Bringing over the hack from the previous targets */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a5xx.xml.h"
+
+enum {
+ A5XX_ZAP_SHADER_LOADED = 1,
+};
+
+struct a5xx_gpu {
+ unsigned long flags;
+
+ struct adreno_gpu base;
+ struct platform_device *pdev;
+
+ struct drm_gem_object *pm4_bo;
+ uint64_t pm4_iova;
+
+ struct drm_gem_object *pfp_bo;
+ uint64_t pfp_iova;
+
+ struct drm_gem_object *gpmu_bo;
+ uint64_t gpmu_iova;
+ uint32_t gpmu_dwords;
+
+ uint32_t lm_leakage;
+
+ struct msm_ringbuffer *cur_ring;
+ struct msm_ringbuffer *next_ring;
+
+ struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
+ struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
+ uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
+
+ atomic_t preempt_state;
+ struct timer_list preempt_timer;
+
+ struct a5xx_smmu_info *smmu_info;
+ struct drm_gem_object *smmu_info_bo;
+ uint64_t smmu_info_iova;
+};
+
+#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+
+/*
+ * In order to do lockless preemption we use a simple state machine to progress
+ * through the process.
+ *
+ * PREEMPT_NONE - no preemption in progress. Next state START.
+ * PREEMPT_START - The trigger is evaulating if preemption is possible. Next
+ * states: TRIGGERED, NONE
+ * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
+ * states: FAULTED, PENDING
+ * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
+ * recovery. Next state: N/A
+ * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
+ * checking the success of the operation. Next state: FAULTED, NONE.
+ */
+
+enum preempt_state {
+ PREEMPT_NONE = 0,
+ PREEMPT_START,
+ PREEMPT_TRIGGERED,
+ PREEMPT_FAULTED,
+ PREEMPT_PENDING,
+};
+
+/*
+ * struct a5xx_preempt_record is a shared buffer between the microcode and the
+ * CPU to store the state for preemption. The record itself is much larger
+ * (64k) but most of that is used by the CP for storage.
+ *
+ * There is a preemption record assigned per ringbuffer. When the CPU triggers a
+ * preemption, it fills out the record with the useful information (wptr, ring
+ * base, etc) and the microcode uses that information to set up the CP following
+ * the preemption. When a ring is switched out, the CP will save the ringbuffer
+ * state back to the record. In this way, once the records are properly set up
+ * the CPU can quickly switch back and forth between ringbuffers by only
+ * updating a few registers (often only the wptr).
+ *
+ * These are the CPU aware registers in the record:
+ * @magic: Must always be 0x27C4BAFC
+ * @info: Type of the record - written 0 by the CPU, updated by the CP
+ * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
+ * the CP
+ * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
+ * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
+ * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
+ * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
+ * @rbase: Value of RB_BASE written by CPU, save/restored by CP
+ * @counter: GPU address of the storage area for the performance counters
+ */
+struct a5xx_preempt_record {
+ uint32_t magic;
+ uint32_t info;
+ uint32_t data;
+ uint32_t cntl;
+ uint32_t rptr;
+ uint32_t wptr;
+ uint64_t rptr_addr;
+ uint64_t rbase;
+ uint64_t counter;
+};
+
+/* Magic identifier for the preemption record */
+#define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
+
+/*
+ * Even though the structure above is only a few bytes, we need a full 64k to
+ * store the entire preemption record from the CP
+ */
+#define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
+
+/*
+ * The preemption counter block is a storage area for the value of the
+ * preemption counters that are saved immediately before context switch. We
+ * append it on to the end of the allocadtion for the preemption record.
+ */
+#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
+
+/*
+ * This is a global structure that the preemption code uses to switch in the
+ * pagetable for the preempted process - the code switches in whatever we
+ * after preempting in a new ring.
+ */
+struct a5xx_smmu_info {
+ uint32_t magic;
+ uint32_t _pad4;
+ uint64_t ttbr0;
+ uint32_t asid;
+ uint32_t contextidr;
+};
+
+#define A5XX_SMMU_INFO_MAGIC 0x3618CDA3UL
+
+int a5xx_power_init(struct msm_gpu *gpu);
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
+
+static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
+ uint32_t reg, uint32_t mask, uint32_t value)
+{
+ while (usecs--) {
+ udelay(1);
+ if ((gpu_read(gpu, reg) & mask) == value)
+ return 0;
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+
+void a5xx_preempt_init(struct msm_gpu *gpu);
+void a5xx_preempt_hw_init(struct msm_gpu *gpu);
+void a5xx_preempt_trigger(struct msm_gpu *gpu);
+void a5xx_preempt_irq(struct msm_gpu *gpu);
+void a5xx_preempt_fini(struct msm_gpu *gpu);
+
+int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+
+/* Return true if we are in a preempt state */
+static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
+{
+ return !(atomic_read(&a5xx_gpu->preempt_state) == PREEMPT_NONE);
+}
+
+#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
new file mode 100644
index 000000000000..e04feaadefb9
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -0,0 +1,509 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pm_opp.h>
+#include "a5xx_gpu.h"
+
+/*
+ * The GPMU data block is a block of shared registers that can be used to
+ * communicate back and forth. These "registers" are by convention with the GPMU
+ * firwmare and not bound to any specific hardware design
+ */
+
+#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
+#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
+#define AGC_MSG_BASE (AGC_INIT_BASE + 7)
+
+#define AGC_MSG_STATE (AGC_MSG_BASE + 0)
+#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
+#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
+#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
+
+#define AGC_POWER_CONFIG_PRODUCTION_ID 1
+#define AGC_INIT_MSG_VALUE 0xBABEFACE
+
+/* AGC_LM_CONFIG (A540+) */
+#define AGC_LM_CONFIG (136/4)
+#define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17
+#define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1
+#define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8)
+#define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4)
+#define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4)
+#define AGC_LM_CONFIG_LLM_ENABLED (1 << 16)
+#define AGC_LM_CONFIG_BCL_DISABLED (1 << 24)
+
+#define AGC_LEVEL_CONFIG (140/4)
+
+static struct {
+ uint32_t reg;
+ uint32_t value;
+} a5xx_sequence_regs[] = {
+ { 0xB9A1, 0x00010303 },
+ { 0xB9A2, 0x13000000 },
+ { 0xB9A3, 0x00460020 },
+ { 0xB9A4, 0x10000000 },
+ { 0xB9A5, 0x040A1707 },
+ { 0xB9A6, 0x00010000 },
+ { 0xB9A7, 0x0E000904 },
+ { 0xB9A8, 0x10000000 },
+ { 0xB9A9, 0x01165000 },
+ { 0xB9AA, 0x000E0002 },
+ { 0xB9AB, 0x03884141 },
+ { 0xB9AC, 0x10000840 },
+ { 0xB9AD, 0x572A5000 },
+ { 0xB9AE, 0x00000003 },
+ { 0xB9AF, 0x00000000 },
+ { 0xB9B0, 0x10000000 },
+ { 0xB828, 0x6C204010 },
+ { 0xB829, 0x6C204011 },
+ { 0xB82A, 0x6C204012 },
+ { 0xB82B, 0x6C204013 },
+ { 0xB82C, 0x6C204014 },
+ { 0xB90F, 0x00000004 },
+ { 0xB910, 0x00000002 },
+ { 0xB911, 0x00000002 },
+ { 0xB912, 0x00000002 },
+ { 0xB913, 0x00000002 },
+ { 0xB92F, 0x00000004 },
+ { 0xB930, 0x00000005 },
+ { 0xB931, 0x00000005 },
+ { 0xB932, 0x00000005 },
+ { 0xB933, 0x00000005 },
+ { 0xB96F, 0x00000001 },
+ { 0xB970, 0x00000003 },
+ { 0xB94F, 0x00000004 },
+ { 0xB950, 0x0000000B },
+ { 0xB951, 0x0000000B },
+ { 0xB952, 0x0000000B },
+ { 0xB953, 0x0000000B },
+ { 0xB907, 0x00000019 },
+ { 0xB927, 0x00000019 },
+ { 0xB947, 0x00000019 },
+ { 0xB967, 0x00000019 },
+ { 0xB987, 0x00000019 },
+ { 0xB906, 0x00220001 },
+ { 0xB926, 0x00220001 },
+ { 0xB946, 0x00220001 },
+ { 0xB966, 0x00220001 },
+ { 0xB986, 0x00300000 },
+ { 0xAC40, 0x0340FF41 },
+ { 0xAC41, 0x03BEFED0 },
+ { 0xAC42, 0x00331FED },
+ { 0xAC43, 0x021FFDD3 },
+ { 0xAC44, 0x5555AAAA },
+ { 0xAC45, 0x5555AAAA },
+ { 0xB9BA, 0x00000008 },
+};
+
+/*
+ * Get the actual voltage value for the operating point at the specified
+ * frequency
+ */
+static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct dev_pm_opp *opp;
+
+ opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
+
+ return (!IS_ERR(opp)) ? dev_pm_opp_get_voltage(opp) / 1000 : 0;
+}
+
+#define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32))
+#define LM_DCVS_LIMIT 1
+#define LEVEL_CONFIG ~(0x303)
+
+/* Setup thermal limit management for A540 */
+static void a540_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ u32 max_power = 0;
+ u32 rate = gpu->gpufreq[gpu->active_level];
+ u32 config;
+
+ /* The battery current limiter isn't enabled for A540 */
+ config = AGC_LM_CONFIG_BCL_DISABLED;
+ config |= adreno_gpu->rev.patchid << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
+
+ /* For now disable GPMU side throttling */
+ config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
+
+ /* Get the max-power from the device tree */
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-max-power", &max_power);
+
+ gpu_write(gpu, AGC_MSG_STATE, 0x80000001);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ /*
+ * For now just write the one voltage level - we will do more when we
+ * can do scaling
+ */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), max_power);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG);
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE,
+ PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1));
+
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Setup thermal limit management for A530 */
+static void a530_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ uint32_t rate = gpu->gpufreq[gpu->active_level];
+ uint32_t tsens = 0;
+ uint32_t max_power = 0;
+ unsigned int i;
+
+ /* Write the block of sequence registers */
+ for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
+ gpu_write(gpu, a5xx_sequence_regs[i].reg,
+ a5xx_sequence_regs[i].value);
+
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,gpmu-tsens", &tsens);
+
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, tsens);
+ gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
+
+ /* Write the voltage table */
+
+ /* Get the max-power from the device tree */
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-max-power", &max_power);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
+
+ gpu_write(gpu, AGC_MSG_STATE, 1);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ /*
+ * For now just write the one voltage level - we will do more when we
+ * can do scaling
+ */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), max_power);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Enable SP/TP cpower collapse */
+static void a5xx_pc_init(struct msm_gpu *gpu)
+{
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
+}
+
+/* Enable the GPMU microcontroller */
+static int a5xx_gpmu_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (!a5xx_gpu->gpmu_dwords)
+ return 0;
+
+ /* Turn off protected mode for this operation */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Kick off the IB to load the GPMU microcode */
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, a5xx_gpu->gpmu_dwords);
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ gpu->funcs->flush(gpu, ring);
+
+ /* This is "fatal" because the CP is left in a bad state */
+ if (!a5xx_idle(gpu, ring)) {
+ DRM_ERROR("%s: Unable to load GPMU firmwaren",
+ gpu->name);
+ return -EINVAL;
+ }
+
+ /* Clock gating setup for A530 targets */
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
+
+ /* Kick off the GPMU */
+ gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
+
+ /*
+ * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
+ * won't have advanced power collapse.
+ */
+ if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
+ 0xBABEFACE)) {
+ DRM_ERROR("%s: GPMU firmware initialization timed out\n",
+ gpu->name);
+ return 0;
+ }
+
+ if (!adreno_is_a530(adreno_gpu)) {
+ u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1);
+
+ if (val)
+ DRM_ERROR("%s: GPMU firmare initialization failed: %d\n",
+ gpu->name, val);
+ }
+
+ /* FIXME: Clear GPMU interrupts? */
+ return 0;
+}
+
+/* Enable limits management */
+static void a5xx_lm_enable(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ /* This init sequence only applies to A530 */
+ if (!adreno_is_a530(adreno_gpu))
+ return;
+
+ gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
+ gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
+ gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
+
+ gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
+}
+
+int a5xx_power_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+ u32 lm_limit = 6000;
+
+ /*
+ * Set up the limit management
+ * first, do some generic setup:
+ */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
+
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-limit", &lm_limit);
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | lm_limit);
+
+ /* Now do the target specific setup */
+ if (adreno_is_a530(adreno_gpu))
+ a530_lm_setup(gpu);
+ else
+ a540_lm_setup(gpu);
+
+ /* Set up SP/TP power collpase */
+ a5xx_pc_init(gpu);
+
+ /* Start the GPMU */
+ ret = a5xx_gpmu_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Start the limits management */
+ a5xx_lm_enable(gpu);
+
+ return 0;
+}
+
+static int _read_header(unsigned int *data, uint32_t fwsize,
+ unsigned int *major, unsigned int *minor)
+{
+ uint32_t size;
+ unsigned int i;
+
+ /* First dword of the header is the header size */
+ if (fwsize < 4)
+ return -EINVAL;
+
+ size = data[0];
+
+ /* Make sure the header isn't too big and is a multiple of two */
+ if ((size % 2) || (size > 10) || size > (fwsize >> 2))
+ return -EINVAL;
+
+ /* Read the values in pairs */
+ for (i = 1; i < size; i += 2) {
+ switch (data[i]) {
+ case 1:
+ *major = data[i + 1];
+ break;
+ case 2:
+ *minor = data[i + 1];
+ break;
+ default:
+ /* Invalid values are non fatal */
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Make sure cur_major and cur_minor are greater than or equal to the minimum
+ * allowable major/minor
+ */
+static inline bool _check_gpmu_version(uint32_t cur_major, uint32_t cur_minor,
+ uint32_t min_major, uint32_t min_minor)
+{
+ return ((cur_major > min_major) ||
+ ((cur_major == min_major) && (cur_minor >= min_minor)));
+}
+
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *drm = gpu->dev;
+ const char *name;
+ const struct firmware *fw;
+ uint32_t version[2] = { 0, 0 };
+ uint32_t dwords = 0, offset = 0;
+ uint32_t major = 0, minor = 0, bosize;
+ unsigned int *data, *ptr, *cmds;
+ unsigned int cmds_size;
+
+ if (a5xx_gpu->gpmu_bo)
+ return;
+
+ /*
+ * Read the firmware name from the device tree - if it doesn't exist
+ * then don't initialize the GPMU for this target
+ */
+ if (of_property_read_string(GPU_OF_NODE(gpu), "qcom,gpmu-firmware",
+ &name))
+ return;
+
+ /*
+ * The version isn't mandatory, but if it exists, we need to enforce
+ * that the version of the GPMU firmware matches or is newer than the
+ * value
+ */
+ of_property_read_u32_array(GPU_OF_NODE(gpu), "qcom,gpmu-version",
+ version, 2);
+
+ /* Get the firmware */
+ if (request_firmware(&fw, name, drm->dev)) {
+ DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
+ gpu->name);
+ return;
+ }
+
+ data = (unsigned int *) fw->data;
+
+ /*
+ * The first dword is the size of the remaining data in dwords. Use it
+ * as a checksum of sorts and make sure it matches the actual size of
+ * the firmware that we read
+ */
+
+ if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2)))
+ goto out;
+
+ /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
+ if (data[1] != 2)
+ goto out;
+
+ /* Read the header and get the major/minor of the read firmware */
+ if (_read_header(&data[2], fw->size - 8, &major, &minor))
+ goto out;
+
+ if (!_check_gpmu_version(major, minor, version[0], version[1])) {
+ DRM_ERROR("%s: Loaded GPMU version %d.%d is too old\n",
+ gpu->name, major, minor);
+ goto out;
+ }
+
+ cmds = data + data[2] + 3;
+ cmds_size = data[0] - data[2] - 2;
+
+ /*
+ * A single type4 opcode can only have so many values attached so
+ * add enough opcodes to load the all the commands
+ */
+ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
+
+ mutex_lock(&drm->struct_mutex);
+ a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(a5xx_gpu->gpmu_bo))
+ goto err;
+
+ if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
+ &a5xx_gpu->gpmu_iova))
+ goto err;
+
+ ptr = msm_gem_vaddr(a5xx_gpu->gpmu_bo);
+ if (!ptr)
+ goto err;
+
+ while (cmds_size > 0) {
+ int i;
+ uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
+ TYPE4_MAX_PAYLOAD : cmds_size;
+
+ ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
+ _size);
+
+ for (i = 0; i < _size; i++)
+ ptr[dwords++] = *cmds++;
+
+ offset += _size;
+ cmds_size -= _size;
+ }
+
+ a5xx_gpu->gpmu_dwords = dwords;
+
+ goto out;
+
+err:
+ if (a5xx_gpu->gpmu_iova)
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ if (a5xx_gpu->gpmu_bo)
+ drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+
+ a5xx_gpu->gpmu_bo = NULL;
+ a5xx_gpu->gpmu_iova = 0;
+ a5xx_gpu->gpmu_dwords = 0;
+
+out:
+ /* No need to keep that firmware laying around anymore */
+ release_firmware(fw);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
new file mode 100644
index 000000000000..648494c75abc
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -0,0 +1,383 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "msm_iommu.h"
+#include "a5xx_gpu.h"
+
+static void *alloc_kernel_bo(struct drm_device *drm, struct msm_gpu *gpu,
+ size_t size, uint32_t flags, struct drm_gem_object **bo,
+ u64 *iova)
+{
+ struct drm_gem_object *_bo;
+ u64 _iova;
+ void *ptr;
+ int ret;
+
+ mutex_lock(&drm->struct_mutex);
+ _bo = msm_gem_new(drm, size, flags);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(_bo))
+ return _bo;
+
+ ret = msm_gem_get_iova(_bo, gpu->aspace, &_iova);
+ if (ret)
+ goto out;
+
+ ptr = msm_gem_vaddr(_bo);
+ if (!ptr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (bo)
+ *bo = _bo;
+ if (iova)
+ *iova = _iova;
+
+ return ptr;
+out:
+ drm_gem_object_unreference_unlocked(_bo);
+ return ERR_PTR(ret);
+}
+
+/*
+ * Try to transition the preemption state from old to new. Return
+ * true on success or false if the original state wasn't 'old'
+ */
+static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu,
+ enum preempt_state old, enum preempt_state new)
+{
+ enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
+ old, new);
+
+ return (cur == old);
+}
+
+/*
+ * Force the preemption state to the specified state. This is used in cases
+ * where the current state is known and won't change
+ */
+static inline void set_preempt_state(struct a5xx_gpu *gpu,
+ enum preempt_state new)
+{
+ /*
+ * preempt_state may be read by other cores trying to trigger a
+ * preemption or in the interrupt handler so barriers are needed
+ * before...
+ */
+ smp_mb__before_atomic();
+ atomic_set(&gpu->preempt_state, new);
+ /* ... and after*/
+ smp_mb__after_atomic();
+}
+
+/* Write the most recent wptr for the given ring into the hardware */
+static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ unsigned long flags;
+ uint32_t wptr;
+
+ if (!ring)
+ return;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+/* Return the highest priority ringbuffer with something in it */
+static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ unsigned long flags;
+ int i;
+
+ for (i = gpu->nr_rings - 1; i >= 0; i--) {
+ bool empty;
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ spin_lock_irqsave(&ring->lock, flags);
+ empty = (get_wptr(ring) == adreno_gpu->memptrs->rptr[ring->id]);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ if (!empty)
+ return ring;
+ }
+
+ return NULL;
+}
+
+static void a5xx_preempt_timer(unsigned long data)
+{
+ struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data;
+ struct msm_gpu *gpu = &a5xx_gpu->base.base;
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
+ return;
+
+ dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
+/* Try to trigger a preemption switch */
+void a5xx_preempt_trigger(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned long flags;
+ struct msm_ringbuffer *ring;
+
+ if (gpu->nr_rings == 1)
+ return;
+
+ /*
+ * Try to start preemption by moving from NONE to START. If
+ * unsuccessful, a preemption is already in flight
+ */
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
+ return;
+
+ /* Get the next ring to preempt to */
+ ring = get_next_ring(gpu);
+
+ /*
+ * If no ring is populated or the highest priority ring is the current
+ * one do nothing except to update the wptr to the latest and greatest
+ */
+ if (!ring || (a5xx_gpu->cur_ring == ring)) {
+ update_wptr(gpu, ring);
+
+ /* Set the state back to NONE */
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+ return;
+ }
+
+ /* Make sure the wptr doesn't update while we're in motion */
+ spin_lock_irqsave(&ring->lock, flags);
+ a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Do read barrier to make sure we have updated pagetable info */
+ rmb();
+
+ /* Set the SMMU info for the preemption */
+ if (a5xx_gpu->smmu_info) {
+ a5xx_gpu->smmu_info->ttbr0 =
+ adreno_gpu->memptrs->ttbr0[ring->id];
+ a5xx_gpu->smmu_info->contextidr =
+ adreno_gpu->memptrs->contextidr[ring->id];
+ }
+
+ /* Set the address of the incoming preemption record */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
+ REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
+ a5xx_gpu->preempt_iova[ring->id]);
+
+ a5xx_gpu->next_ring = ring;
+
+ /* Start a timer to catch a stuck preemption */
+ mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
+
+ /* Set the preemption state to triggered */
+ set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED);
+
+ /* Make sure everything is written before hitting the button */
+ wmb();
+
+ /* And actually start the preemption */
+ gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
+}
+
+void a5xx_preempt_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
+ return;
+
+ /* Delete the preemption watchdog timer */
+ del_timer(&a5xx_gpu->preempt_timer);
+
+ /*
+ * The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
+ * firing the interrupt, but there is a non zero chance of a hardware
+ * condition or a software race that could set it again before we have a
+ * chance to finish. If that happens, log and go for recovery
+ */
+ status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
+ if (unlikely(status)) {
+ set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
+ dev_err(dev->dev, "%s: Preemption failed to complete\n",
+ gpu->name);
+ queue_work(priv->wq, &gpu->recover_work);
+ return;
+ }
+
+ a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
+ a5xx_gpu->next_ring = NULL;
+
+ update_wptr(gpu, a5xx_gpu->cur_ring);
+
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+}
+
+void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring;
+ int i;
+
+ if (gpu->nr_rings > 1) {
+ /* Clear the preemption records */
+ FOR_EACH_RING(gpu, ring, i) {
+ if (ring) {
+ a5xx_gpu->preempt[ring->id]->wptr = 0;
+ a5xx_gpu->preempt[ring->id]->rptr = 0;
+ a5xx_gpu->preempt[ring->id]->rbase = ring->iova;
+ }
+ }
+ }
+
+ /* Tell the CP where to find the smmu_info buffer */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
+ REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
+ a5xx_gpu->smmu_info_iova);
+
+ /* Reset the preemption state */
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+
+ /* Always come up on rb 0 */
+ a5xx_gpu->cur_ring = gpu->rb[0];
+}
+
+static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
+ struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct a5xx_preempt_record *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+
+ ptr = alloc_kernel_bo(gpu->dev, gpu,
+ A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
+ MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
+ &bo, &iova);
+
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ a5xx_gpu->preempt_bo[ring->id] = bo;
+ a5xx_gpu->preempt_iova[ring->id] = iova;
+ a5xx_gpu->preempt[ring->id] = ptr;
+
+ /* Set up the defaults on the preemption record */
+
+ ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
+ ptr->info = 0;
+ ptr->data = 0;
+ ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
+ ptr->rptr_addr = rbmemptr(adreno_gpu, ring->id, rptr);
+ ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
+
+ return 0;
+}
+
+void a5xx_preempt_fini(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring;
+ int i;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring || !a5xx_gpu->preempt_bo[i])
+ continue;
+
+ if (a5xx_gpu->preempt_iova[i])
+ msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
+
+ drm_gem_object_unreference_unlocked(a5xx_gpu->preempt_bo[i]);
+
+ a5xx_gpu->preempt_bo[i] = NULL;
+ }
+
+ if (a5xx_gpu->smmu_info_bo) {
+ if (a5xx_gpu->smmu_info_iova)
+ msm_gem_put_iova(a5xx_gpu->smmu_info_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->smmu_info_bo);
+ a5xx_gpu->smmu_info_bo = NULL;
+ }
+}
+
+void a5xx_preempt_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring;
+ struct a5xx_smmu_info *ptr;
+ struct drm_gem_object *bo;
+ uint64_t iova;
+ int i;
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings <= 1)
+ return;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
+
+ if (preempt_init_ring(a5xx_gpu, ring))
+ goto fail;
+ }
+
+ if (msm_iommu_allow_dynamic(gpu->aspace->mmu)) {
+ ptr = alloc_kernel_bo(gpu->dev, gpu,
+ sizeof(struct a5xx_smmu_info),
+ MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
+ &bo, &iova);
+
+ if (IS_ERR(ptr))
+ goto fail;
+
+ ptr->magic = A5XX_SMMU_INFO_MAGIC;
+
+ a5xx_gpu->smmu_info_bo = bo;
+ a5xx_gpu->smmu_info_iova = iova;
+ a5xx_gpu->smmu_info = ptr;
+ }
+
+ setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer,
+ (unsigned long) a5xx_gpu);
+
+ return;
+fail:
+ /*
+ * On any failure our adventure is over. Clean up and
+ * set nr_rings to 1 to force preemption off
+ */
+ a5xx_preempt_fini(gpu);
+ gpu->nr_rings = 1;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
new file mode 100644
index 000000000000..5a2edb0ea518
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
@@ -0,0 +1,796 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+#include "a5xx_gpu.h"
+#include "msm_snapshot_api.h"
+
+#define A5XX_NR_SHADER_BANKS 4
+
+/*
+ * These are a list of the registers that need to be read through the HLSQ
+ * aperture through the crashdumper. These are not nominally accessible from
+ * the CPU on a secure platform.
+ */
+static const struct {
+ u32 type;
+ u32 regoffset;
+ u32 count;
+} a5xx_hlsq_aperture_regs[] = {
+ { 0x35, 0xE00, 0x32 }, /* HSLQ non-context */
+ { 0x31, 0x2080, 0x1 }, /* HLSQ 2D context 0 */
+ { 0x33, 0x2480, 0x1 }, /* HLSQ 2D context 1 */
+ { 0x32, 0xE780, 0x62 }, /* HLSQ 3D context 0 */
+ { 0x34, 0xEF80, 0x62 }, /* HLSQ 3D context 1 */
+ { 0x3f, 0x0EC0, 0x40 }, /* SP non-context */
+ { 0x3d, 0x2040, 0x1 }, /* SP 2D context 0 */
+ { 0x3b, 0x2440, 0x1 }, /* SP 2D context 1 */
+ { 0x3e, 0xE580, 0x180 }, /* SP 3D context 0 */
+ { 0x3c, 0xED80, 0x180 }, /* SP 3D context 1 */
+ { 0x3a, 0x0F00, 0x1c }, /* TP non-context */
+ { 0x38, 0x2000, 0xa }, /* TP 2D context 0 */
+ { 0x36, 0x2400, 0xa }, /* TP 2D context 1 */
+ { 0x39, 0xE700, 0x80 }, /* TP 3D context 0 */
+ { 0x37, 0xEF00, 0x80 }, /* TP 3D context 1 */
+};
+
+/*
+ * The debugbus registers contain device state that presumably makes
+ * sense to the hardware designers. 'count' is the number of indexes to read,
+ * each index value is 64 bits
+ */
+static const struct {
+ enum a5xx_debugbus id;
+ u32 count;
+} a5xx_debugbus_blocks[] = {
+ { A5XX_RBBM_DBGBUS_CP, 0x100, },
+ { A5XX_RBBM_DBGBUS_RBBM, 0x100, },
+ { A5XX_RBBM_DBGBUS_HLSQ, 0x100, },
+ { A5XX_RBBM_DBGBUS_UCHE, 0x100, },
+ { A5XX_RBBM_DBGBUS_DPM, 0x100, },
+ { A5XX_RBBM_DBGBUS_TESS, 0x100, },
+ { A5XX_RBBM_DBGBUS_PC, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFDP, 0x100, },
+ { A5XX_RBBM_DBGBUS_VPC, 0x100, },
+ { A5XX_RBBM_DBGBUS_TSE, 0x100, },
+ { A5XX_RBBM_DBGBUS_RAS, 0x100, },
+ { A5XX_RBBM_DBGBUS_VSC, 0x100, },
+ { A5XX_RBBM_DBGBUS_COM, 0x100, },
+ { A5XX_RBBM_DBGBUS_DCOM, 0x100, },
+ { A5XX_RBBM_DBGBUS_LRZ, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_DSP, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCUFCHE, 0x100, },
+ { A5XX_RBBM_DBGBUS_GPMU, 0x100, },
+ { A5XX_RBBM_DBGBUS_RBP, 0x100, },
+ { A5XX_RBBM_DBGBUS_HM, 0x100, },
+ { A5XX_RBBM_DBGBUS_RBBM_CFG, 0x100, },
+ { A5XX_RBBM_DBGBUS_VBIF_CX, 0x100, },
+ { A5XX_RBBM_DBGBUS_GPC, 0x100, },
+ { A5XX_RBBM_DBGBUS_LARC, 0x100, },
+ { A5XX_RBBM_DBGBUS_HLSQ_SPTP, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_3, 0x100, },
+};
+
+/*
+ * The shader blocks are read from the HLSQ aperture - each one has its own
+ * identifier for the aperture read
+ */
+static const struct {
+ enum a5xx_shader_blocks id;
+ u32 size;
+} a5xx_shader_blocks[] = {
+ {A5XX_TP_W_MEMOBJ, 0x200},
+ {A5XX_TP_W_MIPMAP_BASE, 0x3C0},
+ {A5XX_TP_W_SAMPLER_TAG, 0x40},
+ {A5XX_TP_S_3D_SAMPLER, 0x80},
+ {A5XX_TP_S_3D_SAMPLER_TAG, 0x20},
+ {A5XX_TP_S_CS_SAMPLER, 0x40},
+ {A5XX_TP_S_CS_SAMPLER_TAG, 0x10},
+ {A5XX_SP_W_CONST, 0x800},
+ {A5XX_SP_W_CB_SIZE, 0x30},
+ {A5XX_SP_W_CB_BASE, 0xF0},
+ {A5XX_SP_W_STATE, 0x1},
+ {A5XX_SP_S_3D_CONST, 0x800},
+ {A5XX_SP_S_3D_CB_SIZE, 0x28},
+ {A5XX_SP_S_3D_UAV_SIZE, 0x80},
+ {A5XX_SP_S_CS_CONST, 0x400},
+ {A5XX_SP_S_CS_CB_SIZE, 0x8},
+ {A5XX_SP_S_CS_UAV_SIZE, 0x80},
+ {A5XX_SP_S_3D_CONST_DIRTY, 0x12},
+ {A5XX_SP_S_3D_CB_SIZE_DIRTY, 0x1},
+ {A5XX_SP_S_3D_UAV_SIZE_DIRTY, 0x2},
+ {A5XX_SP_S_CS_CONST_DIRTY, 0xA},
+ {A5XX_SP_S_CS_CB_SIZE_DIRTY, 0x1},
+ {A5XX_SP_S_CS_UAV_SIZE_DIRTY, 0x2},
+ {A5XX_HLSQ_ICB_DIRTY, 0xB},
+ {A5XX_SP_POWER_RESTORE_RAM_TAG, 0xA},
+ {A5XX_TP_POWER_RESTORE_RAM_TAG, 0xA},
+ {A5XX_TP_W_SAMPLER, 0x80},
+ {A5XX_TP_W_MEMOBJ_TAG, 0x40},
+ {A5XX_TP_S_3D_MEMOBJ, 0x200},
+ {A5XX_TP_S_3D_MEMOBJ_TAG, 0x20},
+ {A5XX_TP_S_CS_MEMOBJ, 0x100},
+ {A5XX_TP_S_CS_MEMOBJ_TAG, 0x10},
+ {A5XX_SP_W_INSTR, 0x800},
+ {A5XX_SP_W_UAV_SIZE, 0x80},
+ {A5XX_SP_W_UAV_BASE, 0x80},
+ {A5XX_SP_W_INST_TAG, 0x40},
+ {A5XX_SP_S_3D_INSTR, 0x800},
+ {A5XX_SP_S_3D_CB_BASE, 0xC8},
+ {A5XX_SP_S_3D_UAV_BASE, 0x80},
+ {A5XX_SP_S_CS_INSTR, 0x400},
+ {A5XX_SP_S_CS_CB_BASE, 0x28},
+ {A5XX_SP_S_CS_UAV_BASE, 0x80},
+ {A5XX_SP_S_3D_INSTR_DIRTY, 0x1},
+ {A5XX_SP_S_3D_CB_BASE_DIRTY, 0x5},
+ {A5XX_SP_S_3D_UAV_BASE_DIRTY, 0x2},
+ {A5XX_SP_S_CS_INSTR_DIRTY, 0x1},
+ {A5XX_SP_S_CS_CB_BASE_DIRTY, 0x1},
+ {A5XX_SP_S_CS_UAV_BASE_DIRTY, 0x2},
+ {A5XX_HLSQ_ICB, 0x200},
+ {A5XX_HLSQ_ICB_CB_BASE_DIRTY, 0x4},
+ {A5XX_SP_POWER_RESTORE_RAM, 0x140},
+ {A5XX_TP_POWER_RESTORE_RAM, 0x40},
+};
+
+/*
+ * The A5XX architecture has a a built in engine to asynchronously dump
+ * registers from the GPU. It is used to accelerate the copy of hundreds
+ * (thousands) of registers and as a safe way to access registers that might
+ * have secure data in them (if the GPU is in secure, the crashdumper returns
+ * bogus values for those registers). On a fully secured device the CPU will be
+ * blocked from accessing those registers directly and so the crashdump is the
+ * only way that we can access context registers and the shader banks for debug
+ * purposes.
+ *
+ * The downside of the crashdump is that it requires access to GPU accessible
+ * memory (so the VBIF and the bus and the SMMU need to be up and working) and
+ * you need enough memory to write the script for the crashdumper and to store
+ * the data that you are dumping so there is a balancing act between the work to
+ * set up a crash dumper and the value we get out of it.
+ */
+
+/*
+ * The crashdump uses a pseudo-script format to read and write registers. Each
+ * operation is two 64 bit values.
+ *
+ * READ:
+ * [qword 0] [64:00] - The absolute IOVA address target for the register value
+ * [qword 1] [63:44] - the dword address of the register offset to read
+ * [15:00] - Number of dwords to read at once
+ *
+ * WRITE:
+ * [qword 0] [31:0] 32 bit value to write to the register
+ * [qword 1] [63:44] - the dword address of the register offset to write
+ * [21:21] - set 1 to write
+ * [15:00] - Number of dwords to write (usually 1)
+ *
+ * At the bottom of the script, write quadword zeros to trigger the end.
+ */
+struct crashdump {
+ struct drm_gem_object *bo;
+ void *ptr;
+ u64 iova;
+ u32 index;
+};
+
+#define CRASHDUMP_BO_SIZE (SZ_1M)
+#define CRASHDUMP_SCRIPT_SIZE (256 * SZ_1K)
+#define CRASHDUMP_DATA_SIZE (CRASHDUMP_BO_SIZE - CRASHDUMP_SCRIPT_SIZE)
+
+static int crashdump_init(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+ struct drm_device *drm = gpu->dev;
+ int ret = -ENOMEM;
+
+ crashdump->bo = msm_gem_new(drm, CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED);
+ if (IS_ERR(crashdump->bo)) {
+ ret = PTR_ERR(crashdump->bo);
+ crashdump->bo = NULL;
+ return ret;
+ }
+
+ crashdump->ptr = msm_gem_vaddr_locked(crashdump->bo);
+ if (!crashdump->ptr)
+ goto out;
+
+ ret = msm_gem_get_iova_locked(crashdump->bo, gpu->aspace,
+ &crashdump->iova);
+
+out:
+ if (ret) {
+ drm_gem_object_unreference(crashdump->bo);
+ crashdump->bo = NULL;
+ }
+
+ return ret;
+}
+
+static int crashdump_run(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+ if (!crashdump->ptr || !crashdump->index)
+ return -EINVAL;
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
+ lower_32_bits(crashdump->iova));
+ gpu_write(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_HI,
+ upper_32_bits(crashdump->iova));
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+
+ return spin_until(gpu_read(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL) & 0x04);
+}
+
+static void crashdump_destroy(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+ if (!crashdump->bo)
+ return;
+
+ if (crashdump->iova)
+ msm_gem_put_iova(crashdump->bo, gpu->aspace);
+
+ drm_gem_object_unreference(crashdump->bo);
+
+ memset(crashdump, 0, sizeof(*crashdump));
+}
+
+static inline void CRASHDUMP_SCRIPT_WRITE(struct crashdump *crashdump,
+ u32 reg, u32 val)
+{
+ u64 *ptr = crashdump->ptr + crashdump->index;
+
+ if (WARN_ON(crashdump->index + (2 * sizeof(u64))
+ >= CRASHDUMP_SCRIPT_SIZE))
+ return;
+
+ /* This is the value to write */
+ ptr[0] = (u64) val;
+
+ /*
+ * This triggers a write to the specified register. 1 is the size of
+ * the write in dwords
+ */
+ ptr[1] = (((u64) reg) << 44) | (1 << 21) | 1;
+
+ crashdump->index += 2 * sizeof(u64);
+}
+
+static inline void CRASHDUMP_SCRIPT_READ(struct crashdump *crashdump,
+ u32 reg, u32 count, u32 offset)
+{
+ u64 *ptr = crashdump->ptr + crashdump->index;
+
+ if (WARN_ON(crashdump->index + (2 * sizeof(u64))
+ >= CRASHDUMP_SCRIPT_SIZE))
+ return;
+
+ if (WARN_ON(offset + (count * sizeof(u32)) >= CRASHDUMP_DATA_SIZE))
+ return;
+
+ ptr[0] = (u64) crashdump->iova + CRASHDUMP_SCRIPT_SIZE + offset;
+ ptr[1] = (((u64) reg) << 44) | count;
+
+ crashdump->index += 2 * sizeof(u64);
+}
+
+static inline void *CRASHDUMP_DATA_PTR(struct crashdump *crashdump, u32 offset)
+{
+ if (WARN_ON(!crashdump->ptr || offset >= CRASHDUMP_DATA_SIZE))
+ return NULL;
+
+ return crashdump->ptr + CRASHDUMP_SCRIPT_SIZE + offset;
+}
+
+static inline u32 CRASHDUMP_DATA_READ(struct crashdump *crashdump, u32 offset)
+{
+ return *((u32 *) CRASHDUMP_DATA_PTR(crashdump, offset));
+}
+
+static inline void CRASHDUMP_RESET(struct crashdump *crashdump)
+{
+ crashdump->index = 0;
+}
+
+static inline void CRASHDUMP_END(struct crashdump *crashdump)
+{
+ u64 *ptr = crashdump->ptr + crashdump->index;
+
+ if (WARN_ON((crashdump->index + (2 * sizeof(u64)))
+ >= CRASHDUMP_SCRIPT_SIZE))
+ return;
+
+ ptr[0] = 0;
+ ptr[1] = 0;
+
+ crashdump->index += 2 * sizeof(u64);
+}
+
+static u32 _crashdump_read_hlsq_aperture(struct crashdump *crashdump,
+ u32 offset, u32 statetype, u32 bank,
+ u32 count)
+{
+ CRASHDUMP_SCRIPT_WRITE(crashdump, REG_A5XX_HLSQ_DBG_READ_SEL,
+ A5XX_HLSQ_DBG_READ_SEL_STATETYPE(statetype) | bank);
+
+ CRASHDUMP_SCRIPT_READ(crashdump, REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE,
+ count, offset);
+
+ return count * sizeof(u32);
+}
+
+static u32 _copy_registers(struct msm_snapshot *snapshot,
+ struct crashdump *crashdump, u32 reg, u32 count,
+ u32 offset)
+{
+ int i;
+ u32 *ptr = (u32 *) (crashdump->ptr + CRASHDUMP_SCRIPT_SIZE + offset);
+ /*
+ * Write the offset of the first register of the group and the number of
+ * registers in the group
+ */
+ SNAPSHOT_WRITE_U32(snapshot, ((count << 16) | reg));
+
+ /* Followed by each register value in the group */
+ for (i = 0; i < count; i++)
+ SNAPSHOT_WRITE_U32(snapshot, ptr[i]);
+
+ return count * sizeof(u32);
+}
+
+/*
+ * Return the number of registers in each register group from the
+ * adreno_gpu->rgisters
+ */
+static inline u32 REG_COUNT(const unsigned int *ptr)
+{
+ return (ptr[1] - ptr[0]) + 1;
+}
+
+/*
+ * Capture what registers we can from the CPU in case the crashdumper is
+ * unavailable or broken. This will omit the SP,TP and HLSQ registers, but
+ * you'll get everything else and that ain't bad
+ */
+static void a5xx_snapshot_registers_cpu(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_snapshot_regs header;
+ u32 regcount = 0, groups = 0;
+ int i;
+
+ /*
+ * Before we write the section we need to figure out how big our data
+ * section will be
+ */
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ regcount += REG_COUNT(&(adreno_gpu->registers[i]));
+ groups++;
+ }
+
+ header.count = groups;
+
+ /*
+ * We need one dword for each group and then one dword for each register
+ * value in that group
+ */
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_REGS_V2,
+ regcount + groups))
+ return;
+
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ u32 count = REG_COUNT(&(adreno_gpu->registers[i]));
+ u32 reg = adreno_gpu->registers[i];
+ int j;
+
+ /* Write the offset and count for the group */
+ SNAPSHOT_WRITE_U32(snapshot, (count << 16) | reg);
+
+ /* Write each value in the group */
+ for (j = 0; j < count; j++)
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu, reg++));
+ }
+}
+
+static void a5xx_snapshot_registers(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct msm_snapshot_regs header;
+ struct crashdump *crashdump = snapshot->priv;
+ u32 offset = 0, regcount = 0, groups = 0;
+ int i;
+
+ /*
+ * First snapshot all the registers that we can from the CPU. Do this
+ * because the crashdumper has a tendency to "taint" the value of some
+ * of the registers (because the GPU implements the crashdumper) so we
+ * only want to use the crash dump facility if we have to
+ */
+ a5xx_snapshot_registers_cpu(gpu, snapshot);
+
+ if (!crashdump)
+ return;
+
+ CRASHDUMP_RESET(crashdump);
+
+ /* HLSQ and context registers behind the aperture */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 count = a5xx_hlsq_aperture_regs[i].count;
+
+ offset += _crashdump_read_hlsq_aperture(crashdump, offset,
+ a5xx_hlsq_aperture_regs[i].type, 0, count);
+ regcount += count;
+
+ groups++;
+ }
+
+ CRASHDUMP_END(crashdump);
+
+ if (crashdump_run(gpu, crashdump))
+ return;
+
+ header.count = groups;
+
+ /*
+ * The size of the data will be one dword for each "group" of registers,
+ * and then one dword for each of the registers in that group
+ */
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_REGS_V2,
+ groups + regcount))
+ return;
+
+ /* Copy the registers to the snapshot */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
+ offset += _copy_registers(snapshot, crashdump,
+ a5xx_hlsq_aperture_regs[i].regoffset,
+ a5xx_hlsq_aperture_regs[i].count, offset);
+}
+
+static void _a5xx_snapshot_shader_bank(struct msm_snapshot *snapshot,
+ struct crashdump *crashdump, u32 block, u32 bank,
+ u32 size, u32 offset)
+{
+ void *src;
+
+ struct msm_snapshot_shader header = {
+ .type = block,
+ .index = bank,
+ .size = size,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_SHADER, size))
+ return;
+
+ src = CRASHDUMP_DATA_PTR(crashdump, offset);
+
+ if (src)
+ SNAPSHOT_MEMCPY(snapshot, src, size * sizeof(u32));
+}
+
+static void a5xx_snapshot_shader_memory(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct crashdump *crashdump = snapshot->priv;
+ u32 offset = 0;
+ int i;
+
+ /* We can only get shader memory through the crashdump */
+ if (!crashdump)
+ return;
+
+ CRASHDUMP_RESET(crashdump);
+
+ /* For each shader block */
+ for (i = 0; i < ARRAY_SIZE(a5xx_shader_blocks); i++) {
+ int j;
+
+ /* For each block, dump 4 banks */
+ for (j = 0; j < A5XX_NR_SHADER_BANKS; j++)
+ offset += _crashdump_read_hlsq_aperture(crashdump,
+ offset, a5xx_shader_blocks[i].id, j,
+ a5xx_shader_blocks[i].size);
+ }
+
+ CRASHDUMP_END(crashdump);
+
+ /* If the crashdump fails we can't get shader memory any other way */
+ if (crashdump_run(gpu, crashdump))
+ return;
+
+ /* Each bank of each shader gets its own snapshot section */
+ for (offset = 0, i = 0; i < ARRAY_SIZE(a5xx_shader_blocks); i++) {
+ int j;
+
+ for (j = 0; j < A5XX_NR_SHADER_BANKS; j++) {
+ _a5xx_snapshot_shader_bank(snapshot, crashdump,
+ a5xx_shader_blocks[i].id, j,
+ a5xx_shader_blocks[i].size, offset);
+ offset += a5xx_shader_blocks[i].size * sizeof(u32);
+ }
+ }
+}
+
+#define A5XX_NUM_AXI_ARB_BLOCKS 2
+#define A5XX_NUM_XIN_BLOCKS 4
+#define VBIF_DATA_SIZE ((16 * A5XX_NUM_AXI_ARB_BLOCKS) + \
+ (18 * A5XX_NUM_XIN_BLOCKS) + (12 * A5XX_NUM_XIN_BLOCKS))
+
+static void a5xx_snapshot_debugbus_vbif(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+ struct msm_snapshot_debugbus header = {
+ .id = A5XX_RBBM_DBGBUS_VBIF,
+ .count = VBIF_DATA_SIZE,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUGBUS,
+ VBIF_DATA_SIZE))
+ return;
+
+ gpu_rmw(gpu, REG_A5XX_VBIF_CLKON, A5XX_VBIF_CLKON_FORCE_ON_TESTBUS,
+ A5XX_VBIF_CLKON_FORCE_ON_TESTBUS);
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL0, 0);
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS_OUT_CTRL,
+ A5XX_VBIF_TEST_BUS_OUT_CTRL_TEST_BUS_CTRL_EN);
+
+ for (i = 0; i < A5XX_NUM_AXI_ARB_BLOCKS; i++) {
+ int j;
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL0, 1 << (i + 16));
+ for (j = 0; j < 16; j++) {
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL1,
+ A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(j));
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+ REG_A5XX_VBIF_TEST_BUS_OUT));
+ }
+ }
+
+ for (i = 0; i < A5XX_NUM_XIN_BLOCKS; i++) {
+ int j;
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
+ for (j = 0; j < 18; j++) {
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL1,
+ A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(j));
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_VBIF_TEST_BUS_OUT));
+ }
+ }
+
+ for (i = 0; i < A5XX_NUM_XIN_BLOCKS; i++) {
+ int j;
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
+ for (j = 0; j < 12; j++) {
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL1,
+ A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL(j));
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+ REG_A5XX_VBIF_TEST_BUS_OUT));
+ }
+ }
+
+}
+
+static void a5xx_snapshot_debugbus_block(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot, u32 block, u32 count)
+{
+ int i;
+ struct msm_snapshot_debugbus header = {
+ .id = block,
+ .count = count * 2, /* Each value is 2 dwords */
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUGBUS,
+ (count * 2)))
+ return;
+
+ for (i = 0; i < count; i++) {
+ u32 reg = A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX(i) |
+ A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_A, reg);
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_B, reg);
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_C, reg);
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_D, reg);
+
+ /* Each debugbus entry is a quad word */
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+ REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2));
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1));
+ }
+}
+
+static void a5xx_snapshot_debugbus(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_CNTLM,
+ A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE(0xF));
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_debugbus_blocks); i++)
+ a5xx_snapshot_debugbus_block(gpu, snapshot,
+ a5xx_debugbus_blocks[i].id,
+ a5xx_debugbus_blocks[i].count);
+
+ /* VBIF is special and not in a good way */
+ a5xx_snapshot_debugbus_vbif(gpu, snapshot);
+}
+
+static void a5xx_snapshot_cp_merciu(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ unsigned int i;
+ struct msm_snapshot_debug header = {
+ .type = SNAPSHOT_DEBUG_CP_MERCIU,
+ .size = 64 << 1, /* Data size is 2 dwords per entry */
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 64 << 1))
+ return;
+
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_DBG_ADDR, 0);
+ for (i = 0; i < 64; i++) {
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_MERCIU_DBG_DATA_1));
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_MERCIU_DBG_DATA_2));
+ }
+}
+
+static void a5xx_snapshot_cp_roq(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+ struct msm_snapshot_debug header = {
+ .type = SNAPSHOT_DEBUG_CP_ROQ,
+ .size = 512,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 512))
+ return;
+
+ gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0);
+ for (i = 0; i < 512; i++)
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA));
+}
+
+static void a5xx_snapshot_cp_meq(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+ struct msm_snapshot_debug header = {
+ .type = SNAPSHOT_DEBUG_CP_MEQ,
+ .size = 64,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 64))
+ return;
+
+ gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0);
+ for (i = 0; i < 64; i++)
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
+}
+
+static void a5xx_snapshot_indexed_registers(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot, u32 addr, u32 data,
+ u32 count)
+{
+ unsigned int i;
+ struct msm_snapshot_indexed_regs header = {
+ .index_reg = addr,
+ .data_reg = data,
+ .start = 0,
+ .count = count,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_INDEXED_REGS,
+ count))
+ return;
+
+ for (i = 0; i < count; i++) {
+ gpu_write(gpu, addr, i);
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu, data));
+ }
+}
+
+int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ struct crashdump crashdump = { 0 };
+
+ if (!crashdump_init(gpu, &crashdump))
+ snapshot->priv = &crashdump;
+
+ /* To accurately read all registers, disable hardware clock gating */
+ a5xx_set_hwcg(gpu, false);
+
+ /* Kick it up to the generic level */
+ adreno_snapshot(gpu, snapshot);
+
+ /* Read the GPU registers */
+ a5xx_snapshot_registers(gpu, snapshot);
+
+ /* Read the shader memory banks */
+ a5xx_snapshot_shader_memory(gpu, snapshot);
+
+ /* Read the debugbus registers */
+ a5xx_snapshot_debugbus(gpu, snapshot);
+
+ /* PFP data */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_PFP_STAT_ADDR, REG_A5XX_CP_PFP_STAT_DATA, 36);
+
+ /* ME data */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_ME_STAT_ADDR, REG_A5XX_CP_ME_STAT_DATA, 29);
+
+ /* DRAW_STATE data */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_DRAW_STATE_ADDR, REG_A5XX_CP_DRAW_STATE_DATA,
+ 256);
+
+ /* ME cache */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_ME_UCODE_DBG_ADDR, REG_A5XX_CP_ME_UCODE_DBG_DATA,
+ 0x53F);
+
+ /* PFP cache */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_PFP_UCODE_DBG_ADDR, REG_A5XX_CP_PFP_UCODE_DBG_DATA,
+ 0x53F);
+
+ /* ME queue */
+ a5xx_snapshot_cp_meq(gpu, snapshot);
+
+ /* CP ROQ */
+ a5xx_snapshot_cp_roq(gpu, snapshot);
+
+ /* CP MERCIU */
+ a5xx_snapshot_cp_merciu(gpu, snapshot);
+
+ crashdump_destroy(gpu, &crashdump);
+ snapshot->priv = NULL;
+
+ /* Re-enable HWCG */
+ a5xx_set_hwcg(gpu, true);
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index c304468cf2bd..1cf84479e447 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -119,6 +121,25 @@ enum adreno_rb_copy_control_mode {
RB_COPY_DEPTH_STENCIL = 5,
};
+enum a3xx_rop_code {
+ ROP_CLEAR = 0,
+ ROP_NOR = 1,
+ ROP_AND_INVERTED = 2,
+ ROP_COPY_INVERTED = 3,
+ ROP_AND_REVERSE = 4,
+ ROP_INVERT = 5,
+ ROP_XOR = 6,
+ ROP_NAND = 7,
+ ROP_AND = 8,
+ ROP_EQUIV = 9,
+ ROP_NOOP = 10,
+ ROP_OR_INVERTED = 11,
+ ROP_COPY = 12,
+ ROP_OR_REVERSE = 13,
+ ROP_OR = 14,
+ ROP_SET = 15,
+};
+
enum a3xx_render_mode {
RB_RENDERING_PASS = 0,
RB_TILING_PASS = 1,
@@ -154,6 +175,14 @@ enum a3xx_color_swap {
XYZW = 3,
};
+enum a3xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_DST_MINUS_SRC = 2,
+ BLEND_MIN_DST_SRC = 3,
+ BLEND_MAX_DST_SRC = 4,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 1ea2df524fac..a498a60cd52d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -19,10 +19,6 @@
#include "adreno_gpu.h"
-#if defined(DOWNSTREAM_CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
-# include <mach/kgsl.h>
-#endif
-
#define ANY_ID 0xff
bool hang_debug = false;
@@ -31,6 +27,7 @@ module_param_named(hang_debug, hang_debug, bool, 0600);
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
static const struct adreno_info gpulist[] = {
{
@@ -73,6 +70,30 @@ static const struct adreno_info gpulist[] = {
.pfpfw = "a420_pfp.fw",
.gmem = (SZ_1M + SZ_512K),
.init = a4xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(4, 3, 0, ANY_ID),
+ .revn = 430,
+ .name = "A430",
+ .pm4fw = "a420_pm4.fw",
+ .pfpfw = "a420_pfp.fw",
+ .gmem = (SZ_1M + SZ_512K),
+ .init = a4xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(5, 3, 0, ANY_ID),
+ .revn = 530,
+ .name = "A530",
+ .pm4fw = "a530_pm4.fw",
+ .pfpfw = "a530_pfp.fw",
+ .gmem = SZ_1M,
+ .init = a5xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(5, 4, 0, ANY_ID),
+ .revn = 540,
+ .name = "A540",
+ .pm4fw = "a530_pm4.fw",
+ .pfpfw = "a530_pfp.fw",
+ .gmem = SZ_1M,
+ .init = a5xx_gpu_init,
},
};
@@ -82,6 +103,8 @@ MODULE_FIRMWARE("a330_pm4.fw");
MODULE_FIRMWARE("a330_pfp.fw");
MODULE_FIRMWARE("a420_pm4.fw");
MODULE_FIRMWARE("a420_pfp.fw");
+MODULE_FIRMWARE("a530_fm4.fw");
+MODULE_FIRMWARE("a530_pfp.fw");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
@@ -144,12 +167,16 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
gpu->funcs->pm_resume(gpu);
mutex_unlock(&dev->struct_mutex);
+
+ disable_irq(gpu->irq);
+
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
gpu->funcs->destroy(gpu);
gpu = NULL;
} else {
+ enable_irq(gpu->irq);
/* give inactive pm a chance to kick in: */
msm_gpu_retire(gpu);
}
@@ -168,12 +195,16 @@ static void set_gpu_pdev(struct drm_device *dev,
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
-#ifdef CONFIG_OF
- struct device_node *child, *node = dev->of_node;
- u32 val;
+ uint32_t val = 0;
int ret;
- ret = of_property_read_u32(node, "qcom,chipid", &val);
+ /*
+ * Read the chip ID from the device tree at bind time - we use this
+ * information to load the correct functions. All the rest of the
+ * (extensive) device tree probing should happen in the GPU specific
+ * code
+ */
+ ret = of_property_read_u32(dev->of_node, "qcom,chipid", &val);
if (ret) {
dev_err(dev, "could not find chipid: %d\n", ret);
return ret;
@@ -182,76 +213,6 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
config.rev = ADRENO_REV((val >> 24) & 0xff,
(val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
- /* find clock rates: */
- config.fast_rate = 0;
- config.slow_rate = ~0;
- for_each_child_of_node(node, child) {
- if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
- struct device_node *pwrlvl;
- for_each_child_of_node(child, pwrlvl) {
- ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
- if (ret) {
- dev_err(dev, "could not find gpu-freq: %d\n", ret);
- return ret;
- }
- config.fast_rate = max(config.fast_rate, val);
- config.slow_rate = min(config.slow_rate, val);
- }
- }
- }
-
- if (!config.fast_rate) {
- dev_err(dev, "could not find clk rates\n");
- return -ENXIO;
- }
-
-#else
- struct kgsl_device_platform_data *pdata = dev->platform_data;
- uint32_t version = socinfo_get_version();
- if (cpu_is_apq8064ab()) {
- config.fast_rate = 450000000;
- config.slow_rate = 27000000;
- config.bus_freq = 4;
- config.rev = ADRENO_REV(3, 2, 1, 0);
- } else if (cpu_is_apq8064()) {
- config.fast_rate = 400000000;
- config.slow_rate = 27000000;
- config.bus_freq = 4;
-
- if (SOCINFO_VERSION_MAJOR(version) == 2)
- config.rev = ADRENO_REV(3, 2, 0, 2);
- else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
- (SOCINFO_VERSION_MINOR(version) == 1))
- config.rev = ADRENO_REV(3, 2, 0, 1);
- else
- config.rev = ADRENO_REV(3, 2, 0, 0);
-
- } else if (cpu_is_msm8960ab()) {
- config.fast_rate = 400000000;
- config.slow_rate = 320000000;
- config.bus_freq = 4;
-
- if (SOCINFO_VERSION_MINOR(version) == 0)
- config.rev = ADRENO_REV(3, 2, 1, 0);
- else
- config.rev = ADRENO_REV(3, 2, 1, 1);
-
- } else if (cpu_is_msm8930()) {
- config.fast_rate = 400000000;
- config.slow_rate = 27000000;
- config.bus_freq = 3;
-
- if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
- (SOCINFO_VERSION_MINOR(version) == 2))
- config.rev = ADRENO_REV(3, 0, 5, 2);
- else
- config.rev = ADRENO_REV(3, 0, 5, 0);
-
- }
-# ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- config.bus_scale_table = pdata->bus_scale_table;
-# endif
-#endif
dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
return 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index a3b54cc76495..f1883825354e 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -2,7 +2,7 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -17,12 +17,12 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/utsname.h>
#include "adreno_gpu.h"
+#include "msm_snapshot.h"
#include "msm_gem.h"
#include "msm_mmu.h"
-#define RB_SIZE SZ_32K
-#define RB_BLKSIZE 16
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
@@ -35,96 +35,147 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
case MSM_PARAM_GMEM_SIZE:
*value = adreno_gpu->gmem;
return 0;
+ case MSM_PARAM_GMEM_BASE:
+ *value = 0x100000;
+ return 0;
case MSM_PARAM_CHIP_ID:
*value = adreno_gpu->rev.patchid |
(adreno_gpu->rev.minor << 8) |
(adreno_gpu->rev.major << 16) |
(adreno_gpu->rev.core << 24);
return 0;
+ case MSM_PARAM_MAX_FREQ:
+ *value = gpu->gpufreq[gpu->active_level];
+ return 0;
+ case MSM_PARAM_TIMESTAMP:
+ if (adreno_gpu->funcs->get_timestamp)
+ return adreno_gpu->funcs->get_timestamp(gpu, value);
+ return -EINVAL;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
}
}
-#define rbmemptr(adreno_gpu, member) \
- ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
-
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- int ret;
+ int i;
DBG("%s", gpu->name);
- ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
- if (ret) {
- gpu->rb_iova = 0;
- dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
- return ret;
+ for (i = 0; i < gpu->nr_rings; i++) {
+ int ret = msm_gem_get_iova(gpu->rb[i]->bo, gpu->aspace,
+ &gpu->rb[i]->iova);
+ if (ret) {
+ gpu->rb[i]->iova = 0;
+ dev_err(gpu->dev->dev,
+ "could not map ringbuffer %d: %d\n", i, ret);
+ return ret;
+ }
}
- /* Setup REG_CP_RB_CNTL: */
+ /*
+ * Setup REG_CP_RB_CNTL. The same value is used across targets (with
+ * the excpetion of A430 that disables the RPTR shadow) - the cacluation
+ * for the ringbuffer size and block size is moved to msm_gpu.h for the
+ * pre-processor to deal with and the A430 variant is ORed in here
+ */
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
- /* size is log2(quad-words): */
- AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
- AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
-
- /* Setup ringbuffer address: */
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
- rbmemptr(adreno_gpu, rptr));
+ MSM_GPU_RB_CNTL_DEFAULT |
+ (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
- /* Setup scratch/timestamp: */
- adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_ADDR,
- rbmemptr(adreno_gpu, fence));
+ /* Setup ringbuffer address - use ringbuffer[0] for GPU init */
+ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
- adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_UMSK, 0x1);
+ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(adreno_gpu, 0, rptr));
return 0;
}
-static uint32_t get_wptr(struct msm_ringbuffer *ring)
+/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
+static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
+ struct msm_ringbuffer *ring)
+{
+ if (adreno_is_a430(adreno_gpu)) {
+ /*
+ * If index is anything but 0 this will probably break horribly,
+ * but I think that we have enough infrastructure in place to
+ * ensure that it won't be. If not then this is why your
+ * a430 stopped working.
+ */
+ return adreno_gpu->memptrs->rptr[ring->id] = adreno_gpu_read(
+ adreno_gpu, REG_ADRENO_CP_RB_RPTR);
+ } else
+ return adreno_gpu->memptrs->rptr[ring->id];
+}
+
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
{
- return ring->cur - ring->start;
+ return gpu->rb[0];
}
-uint32_t adreno_last_fence(struct msm_gpu *gpu)
+uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring)
+{
+ if (!ring)
+ return 0;
+
+ return ring->submitted_fence;
+}
+
+uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- return adreno_gpu->memptrs->fence;
+
+ if (!ring)
+ return 0;
+
+ return adreno_gpu->memptrs->fence[ring->id];
}
void adreno_recover(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *dev = gpu->dev;
- int ret;
+ struct msm_ringbuffer *ring;
+ int ret, i;
gpu->funcs->pm_suspend(gpu);
- /* reset ringbuffer: */
- gpu->rb->cur = gpu->rb->start;
+ /* reset ringbuffer(s): */
+
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
- /* reset completed fence seqno, just discard anything pending: */
- adreno_gpu->memptrs->fence = gpu->submitted_fence;
- adreno_gpu->memptrs->rptr = 0;
- adreno_gpu->memptrs->wptr = 0;
+ /* No need for a lock here, nobody else is peeking in */
+ ring->cur = ring->start;
+ ring->next = ring->start;
+
+ /* reset completed fence seqno, discard anything pending: */
+ adreno_gpu->memptrs->fence[ring->id] =
+ adreno_submitted_fence(gpu, ring);
+ adreno_gpu->memptrs->rptr[ring->id] = 0;
+ }
gpu->funcs->pm_resume(gpu);
+
+ disable_irq(gpu->irq);
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
+ enable_irq(gpu->irq);
}
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx)
+int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct msm_drm_private *priv = gpu->dev->dev_private;
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[submit->ring];
unsigned i, ibs = 0;
for (i = 0; i < submit->nr_cmds; i++) {
@@ -133,12 +184,11 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* ignore IB-targets */
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- /* ignore if there has not been a ctx switch: */
- if (priv->lastctx == ctx)
break;
case MSM_SUBMIT_CMD_BUF:
- OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
- OUT_RING(ring, submit->cmd[i].iova);
+ OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
+ CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
ibs++;
break;
@@ -169,7 +219,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
- OUT_RING(ring, rbmemptr(adreno_gpu, fence));
+ OUT_RING(ring, rbmemptr(adreno_gpu, ring->id, fence));
OUT_RING(ring, submit->fence);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
@@ -196,15 +246,25 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
#endif
- gpu->funcs->flush(gpu);
+ gpu->funcs->flush(gpu, ring);
return 0;
}
-void adreno_flush(struct msm_gpu *gpu)
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t wptr;
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /*
+ * Mask the wptr value that we calculate to fit in the HW range. This is
+ * to account for the possibility that the last command fit exactly into
+ * the ringbuffer and rb->next hasn't wrapped to zero yet
+ */
+ wptr = get_wptr(ring);
/* ensure writes to ringbuffer have hit system memory: */
mb();
@@ -212,22 +272,27 @@ void adreno_flush(struct msm_gpu *gpu)
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
}
-void adreno_idle(struct msm_gpu *gpu)
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t wptr = get_wptr(ring);
/* wait for CP to drain ringbuffer: */
- if (spin_until(adreno_gpu->memptrs->rptr == wptr))
- DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+ if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
+ return true;
/* TODO maybe we need to reset GPU here to recover from hang? */
+ DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
+ gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
+
+ return false;
}
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring;
int i;
seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
@@ -235,11 +300,18 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->submitted_fence);
- seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
- seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
- seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
+
+ seq_printf(m, "rb %d: fence: %d/%d\n", i,
+ adreno_last_fence(gpu, ring),
+ adreno_submitted_fence(gpu, ring));
+
+ seq_printf(m, " rptr: %d\n",
+ get_rptr(adreno_gpu, ring));
+ seq_printf(m, "rb wptr: %d\n", get_wptr(ring));
+ }
gpu->funcs->pm_resume(gpu);
@@ -268,22 +340,29 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
*/
void adreno_dump_info(struct msm_gpu *gpu)
{
+ struct drm_device *dev = gpu->dev;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring;
int i;
- printk("revision: %d (%d.%d.%d.%d)\n",
+ dev_err(dev->dev, "revision: %d (%d.%d.%d.%d)\n",
adreno_gpu->info->revn, adreno_gpu->rev.core,
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->submitted_fence);
- printk("rptr: %d\n", adreno_gpu->memptrs->rptr);
- printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
- printk("rb wptr: %d\n", get_wptr(gpu->rb));
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
+
+ dev_err(dev->dev, " ring %d: fence %d/%d rptr/wptr %x/%x\n", i,
+ adreno_last_fence(gpu, ring),
+ adreno_submitted_fence(gpu, ring),
+ get_rptr(adreno_gpu, ring),
+ get_wptr(ring));
+ }
for (i = 0; i < 8; i++) {
- printk("CP_SCRATCH_REG%d: %u\n", i,
+ pr_err("CP_SCRATCH_REG%d: %u\n", i,
gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
}
}
@@ -308,30 +387,146 @@ void adreno_dump(struct msm_gpu *gpu)
}
}
-static uint32_t ring_freewords(struct msm_gpu *gpu)
+static uint32_t ring_freewords(struct msm_ringbuffer *ring)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t size = gpu->rb->size / 4;
- uint32_t wptr = get_wptr(gpu->rb);
- uint32_t rptr = adreno_gpu->memptrs->rptr;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
+ uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
+ /* Use ring->next to calculate free size */
+ uint32_t wptr = ring->next - ring->start;
+ uint32_t rptr = get_rptr(adreno_gpu, ring);
return (rptr + (size - 1) - wptr) % size;
}
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
{
- if (spin_until(ring_freewords(gpu) >= ndwords))
- DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
+ if (spin_until(ring_freewords(ring) >= ndwords))
+ DRM_ERROR("%s: timeout waiting for space in ringubffer %d\n",
+ ring->gpu->name, ring->id);
}
static const char *iommu_ports[] = {
- "gfx3d_user", "gfx3d_priv",
- "gfx3d1_user", "gfx3d1_priv",
+ "gfx3d_user",
+};
+
+/* Read the set of powerlevels */
+static int _adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *node)
+{
+ struct device_node *child;
+
+ gpu->active_level = 1;
+
+ /* The device tree will tell us the best clock to initialize with */
+ of_property_read_u32(node, "qcom,initial-pwrlevel", &gpu->active_level);
+
+ if (gpu->active_level >= ARRAY_SIZE(gpu->gpufreq))
+ gpu->active_level = 1;
+
+ for_each_child_of_node(node, child) {
+ unsigned int index;
+
+ if (of_property_read_u32(child, "reg", &index))
+ return -EINVAL;
+
+ if (index >= ARRAY_SIZE(gpu->gpufreq))
+ continue;
+
+ gpu->nr_pwrlevels = max(gpu->nr_pwrlevels, index + 1);
+
+ of_property_read_u32(child, "qcom,gpu-freq",
+ &gpu->gpufreq[index]);
+ of_property_read_u32(child, "qcom,bus-freq",
+ &gpu->busfreq[index]);
+ }
+
+ DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
+ gpu->gpufreq[gpu->active_level],
+ gpu->gpufreq[gpu->nr_pwrlevels - 1],
+ gpu->busfreq[gpu->active_level]);
+
+ return 0;
+}
+
+/*
+ * Escape valve for targets that don't define the binning nodes. Get the
+ * first powerlevel node and parse it
+ */
+static int adreno_get_legacy_pwrlevels(struct msm_gpu *gpu,
+ struct device_node *parent)
+{
+ struct device_node *child;
+
+ child = of_find_node_by_name(parent, "qcom,gpu-pwrlevels");
+ if (child)
+ return _adreno_get_pwrlevels(gpu, child);
+
+ dev_err(gpu->dev->dev, "Unable to parse any powerlevels\n");
+ return -EINVAL;
+}
+
+/* Get the powerlevels for the target */
+static int adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *parent)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct device_node *node, *child;
+
+ /* See if the target has defined a number of power bins */
+ node = of_find_node_by_name(parent, "qcom,gpu-pwrlevel-bins");
+ if (!node) {
+ /* If not look for the qcom,gpu-pwrlevels node */
+ return adreno_get_legacy_pwrlevels(gpu, parent);
+ }
+
+ for_each_child_of_node(node, child) {
+ unsigned int bin;
+
+ if (of_property_read_u32(child, "qcom,speed-bin", &bin))
+ continue;
+
+ /*
+ * If the bin matches the bin specified by the fuses, then we
+ * have a winner - parse it
+ */
+ if (adreno_gpu->speed_bin == bin)
+ return _adreno_get_pwrlevels(gpu, child);
+ }
+
+ return -ENODEV;
+}
+
+static const struct {
+ const char *str;
+ uint32_t flag;
+} quirks[] = {
+ { "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI },
+ { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
};
+/* Parse the statistics from the device tree */
+static int adreno_of_parse(struct platform_device *pdev, struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct device_node *node = pdev->dev.of_node;
+ int i, ret;
+
+ /* Probe the powerlevels */
+ ret = adreno_get_pwrlevels(gpu, node);
+ if (ret)
+ return ret;
+
+ /* Check to see if any quirks were specified in the device tree */
+ for (i = 0; i < ARRAY_SIZE(quirks); i++)
+ if (of_property_read_bool(node, quirks[i].str))
+ adreno_gpu->quirks |= quirks[i].flag;
+
+ return 0;
+}
+
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
- struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
+ struct adreno_gpu *adreno_gpu,
+ const struct adreno_gpu_funcs *funcs, int nr_rings)
{
struct adreno_platform_config *config = pdev->dev.platform_data;
+ struct msm_gpu_config adreno_gpu_config = { 0 };
struct msm_gpu *gpu = &adreno_gpu->base;
struct msm_mmu *mmu;
int ret;
@@ -342,19 +537,29 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->revn = adreno_gpu->info->revn;
adreno_gpu->rev = config->rev;
- gpu->fast_rate = config->fast_rate;
- gpu->slow_rate = config->slow_rate;
- gpu->bus_freq = config->bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- gpu->bus_scale_table = config->bus_scale_table;
-#endif
+ /* Get the rest of the target configuration from the device tree */
+ adreno_of_parse(pdev, gpu);
- DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
- gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
+ adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
+ adreno_gpu_config.irqname = "kgsl_3d0_irq";
+ adreno_gpu_config.nr_rings = nr_rings;
+
+ adreno_gpu_config.va_start = SZ_16M;
+ adreno_gpu_config.va_end = 0xffffffff;
+
+ if (adreno_gpu->revn >= 500) {
+ /* 5XX targets use a 64 bit region */
+ adreno_gpu_config.va_start = 0x800000000;
+ adreno_gpu_config.va_end = 0x8ffffffff;
+ } else {
+ adreno_gpu_config.va_start = 0x300000;
+ adreno_gpu_config.va_end = 0xffffffff;
+ }
+
+ adreno_gpu_config.nr_rings = nr_rings;
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
- adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
- RB_SIZE);
+ adreno_gpu->info->name, &adreno_gpu_config);
if (ret)
return ret;
@@ -372,7 +577,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return ret;
}
- mmu = gpu->mmu;
+ mmu = gpu->aspace->mmu;
if (mmu) {
ret = mmu->funcs->attach(mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
@@ -397,7 +602,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return -ENOMEM;
}
- ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
+ ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
&adreno_gpu->memptrs_iova);
if (ret) {
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
@@ -409,12 +614,98 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
void adreno_gpu_cleanup(struct adreno_gpu *gpu)
{
+ struct msm_gem_address_space *aspace = gpu->base.aspace;
+
if (gpu->memptrs_bo) {
if (gpu->memptrs_iova)
- msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+ msm_gem_put_iova(gpu->memptrs_bo, aspace);
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
release_firmware(gpu->pm4);
release_firmware(gpu->pfp);
+
msm_gpu_cleanup(&gpu->base);
+
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu);
+ msm_gem_address_space_put(aspace);
+ }
+}
+
+static void adreno_snapshot_os(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct msm_snapshot_linux header;
+
+ memset(&header, 0, sizeof(header));
+
+ header.osid = SNAPSHOT_OS_LINUX_V3;
+ strlcpy(header.release, utsname()->release, sizeof(header.release));
+ strlcpy(header.version, utsname()->version, sizeof(header.version));
+
+ header.seconds = get_seconds();
+ header.ctxtcount = 0;
+
+ SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_OS, 0);
+}
+
+static void adreno_snapshot_ringbuffer(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_snapshot_ringbuffer header;
+ unsigned int i, end = 0;
+ unsigned int *data = ring->start;
+
+ memset(&header, 0, sizeof(header));
+
+ /*
+ * We only want to copy the active contents of each ring, so find the
+ * last valid entry in the ringbuffer
+ */
+ for (i = 0; i < MSM_GPU_RINGBUFFER_SZ >> 2; i++) {
+ if (data[i])
+ end = i;
+ }
+
+ /* The dump always starts at 0 */
+ header.start = 0;
+ header.end = end;
+
+ /* This is the number of dwords being dumped */
+ header.count = end + 1;
+
+ /* This is the size of the actual ringbuffer */
+ header.rbsize = MSM_GPU_RINGBUFFER_SZ >> 2;
+
+ header.id = ring->id;
+ header.gpuaddr = ring->iova;
+ header.rptr = get_rptr(adreno_gpu, ring);
+ header.wptr = get_wptr(ring);
+ header.timestamp_queued = adreno_submitted_fence(gpu, ring);
+ header.timestamp_retired = adreno_last_fence(gpu, ring);
+
+ /* Write the header even if the ringbuffer data is empty */
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_RB_V2,
+ header.count))
+ return;
+
+ SNAPSHOT_MEMCPY(snapshot, ring->start, header.count * sizeof(u32));
+}
+
+static void adreno_snapshot_ringbuffers(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct msm_ringbuffer *ring;
+ int i;
+
+ /* Write a new section for each ringbuffer */
+ FOR_EACH_RING(gpu, ring, i)
+ adreno_snapshot_ringbuffer(gpu, snapshot, ring);
+}
+
+void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ adreno_snapshot_os(gpu, snapshot);
+ adreno_snapshot_ringbuffers(gpu, snapshot);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 0a312e9d3afd..30461115281c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -2,7 +2,7 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -24,10 +24,17 @@
#include "msm_gpu.h"
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
#include "adreno_common.xml.h"
#include "adreno_pm4.xml.h"
#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
+#define REG_SKIP ~0
+#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
+
/**
* adreno_regs: List of registers that are used in across all
* 3D devices. Each device type has different offset value for the same
@@ -35,73 +42,21 @@
* and are indexed by the enumeration values defined in this enum
*/
enum adreno_regs {
- REG_ADRENO_CP_DEBUG,
- REG_ADRENO_CP_ME_RAM_WADDR,
- REG_ADRENO_CP_ME_RAM_DATA,
- REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_ADRENO_CP_WFI_PEND_CTR,
REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_BASE_HI,
REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI,
REG_ADRENO_CP_RB_RPTR,
REG_ADRENO_CP_RB_WPTR,
- REG_ADRENO_CP_PROTECT_CTRL,
- REG_ADRENO_CP_ME_CNTL,
REG_ADRENO_CP_RB_CNTL,
- REG_ADRENO_CP_IB1_BASE,
- REG_ADRENO_CP_IB1_BUFSZ,
- REG_ADRENO_CP_IB2_BASE,
- REG_ADRENO_CP_IB2_BUFSZ,
- REG_ADRENO_CP_TIMESTAMP,
- REG_ADRENO_CP_ME_RAM_RADDR,
- REG_ADRENO_CP_ROQ_ADDR,
- REG_ADRENO_CP_ROQ_DATA,
- REG_ADRENO_CP_MERCIU_ADDR,
- REG_ADRENO_CP_MERCIU_DATA,
- REG_ADRENO_CP_MERCIU_DATA2,
- REG_ADRENO_CP_MEQ_ADDR,
- REG_ADRENO_CP_MEQ_DATA,
- REG_ADRENO_CP_HW_FAULT,
- REG_ADRENO_CP_PROTECT_STATUS,
- REG_ADRENO_SCRATCH_ADDR,
- REG_ADRENO_SCRATCH_UMSK,
- REG_ADRENO_SCRATCH_REG2,
- REG_ADRENO_RBBM_STATUS,
- REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
- REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_ADRENO_RBBM_INT_0_MASK,
- REG_ADRENO_RBBM_INT_0_STATUS,
- REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_ADRENO_RBBM_PM_OVERRIDE2,
- REG_ADRENO_RBBM_AHB_CMD,
- REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_ADRENO_RBBM_CLOCK_CTL,
- REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
- REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
- REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_ADRENO_VFD_CONTROL_0,
- REG_ADRENO_VFD_INDEX_MAX,
- REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_ADRENO_PA_SC_AA_CONFIG,
- REG_ADRENO_SQ_GPR_MANAGEMENT,
- REG_ADRENO_SQ_INST_STORE_MANAGMENT,
- REG_ADRENO_TP0_CHICKEN,
- REG_ADRENO_RBBM_RBBM_CTL,
- REG_ADRENO_UCHE_INVALIDATE0,
- REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
REG_ADRENO_REGISTER_MAX,
};
+enum adreno_quirks {
+ ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
+ ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
+};
+
struct adreno_rev {
uint8_t core;
uint8_t major;
@@ -114,6 +69,7 @@ struct adreno_rev {
struct adreno_gpu_funcs {
struct msm_gpu_funcs base;
+ int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
};
struct adreno_info {
@@ -127,10 +83,20 @@ struct adreno_info {
const struct adreno_info *adreno_info(struct adreno_rev rev);
+#define _sizeof(member) \
+ sizeof(((struct adreno_rbmemptrs *) 0)->member[0])
+
+#define _base(adreno_gpu, member) \
+ ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
+
+#define rbmemptr(adreno_gpu, index, member) \
+ (_base((adreno_gpu), member) + ((index) * _sizeof(member)))
+
struct adreno_rbmemptrs {
- volatile uint32_t rptr;
- volatile uint32_t wptr;
- volatile uint32_t fence;
+ volatile uint32_t rptr[MSM_GPU_MAX_RINGS];
+ volatile uint32_t fence[MSM_GPU_MAX_RINGS];
+ volatile uint64_t ttbr0[MSM_GPU_MAX_RINGS];
+ volatile unsigned int contextidr[MSM_GPU_MAX_RINGS];
};
struct adreno_gpu {
@@ -152,7 +118,7 @@ struct adreno_gpu {
// different for z180..
struct adreno_rbmemptrs *memptrs;
struct drm_gem_object *memptrs_bo;
- uint32_t memptrs_iova;
+ uint64_t memptrs_iova;
/*
* Register offsets are different between some GPUs.
@@ -160,16 +126,15 @@ struct adreno_gpu {
* code (a3xx_gpu.c) and stored in this common location.
*/
const unsigned int *reg_offsets;
+
+ uint32_t quirks;
+ uint32_t speed_bin;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
/* platform config data (ie. from DT, or pdata) */
struct adreno_platform_config {
struct adreno_rev rev;
- uint32_t fast_rate, slow_rate, bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- struct msm_bus_scale_pdata *bus_scale_table;
-#endif
};
#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -186,6 +151,9 @@ struct adreno_platform_config {
__ret; \
})
+#define GPU_OF_NODE(_g) \
+ (((struct msm_drm_private *) \
+ ((_g)->dev->dev_private))->gpu_pdev->dev.of_node)
static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
{
@@ -228,32 +196,51 @@ static inline int adreno_is_a420(struct adreno_gpu *gpu)
return gpu->revn == 420;
}
+static inline int adreno_is_a430(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 430;
+}
+
+static inline int adreno_is_a530(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 530;
+}
+
+static inline int adreno_is_a540(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 540;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu);
-uint32_t adreno_last_fence(struct msm_gpu *gpu);
+uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring);
void adreno_recover(struct msm_gpu *gpu);
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx);
-void adreno_flush(struct msm_gpu *gpu);
-void adreno_idle(struct msm_gpu *gpu);
+int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
#endif
void adreno_dump_info(struct msm_gpu *gpu);
void adreno_dump(struct msm_gpu *gpu);
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
- struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs);
+ struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+ int nr_rings);
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
+void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
/* ringbuffer helpers (the parts that are adreno specific) */
static inline void
OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt+1);
+ adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
}
@@ -261,19 +248,49 @@ OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
static inline void
OUT_PKT2(struct msm_ringbuffer *ring)
{
- adreno_wait_ring(ring->gpu, 1);
+ adreno_wait_ring(ring, 1);
OUT_RING(ring, CP_TYPE2_PKT);
}
static inline void
OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt+1);
+ adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
}
+static inline u32 PM4_PARITY(u32 val)
+{
+ return (0x9669 >> (0xF & (val ^
+ (val >> 4) ^ (val >> 8) ^ (val >> 12) ^
+ (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
+ (val >> 28)))) & 1;
+}
+
+/* Maximum number of values that can be executed for one opcode */
+#define TYPE4_MAX_PAYLOAD 127
+
+#define PKT4(_reg, _cnt) \
+ (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
+ (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
+
+static inline void
+OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt + 1);
+ OUT_RING(ring, PKT4(regindx, cnt));
+}
+
+static inline void
+OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt + 1);
+ OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
+ ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
+}
+
/*
- * adreno_checkreg_off() - Checks the validity of a register enum
+ * adreno_reg_check() - Checks the validity of a register enum
* @gpu: Pointer to struct adreno_gpu
* @offset_name: The register enum that is checked
*/
@@ -284,6 +301,16 @@ static inline bool adreno_reg_check(struct adreno_gpu *gpu,
!gpu->reg_offsets[offset_name]) {
BUG();
}
+
+ /*
+ * REG_SKIP is a special value that tell us that the register in
+ * question isn't implemented on target but don't trigger a BUG(). This
+ * is used to cleanly implement adreno_gpu_write64() and
+ * adreno_gpu_read64() in a generic fashion
+ */
+ if (gpu->reg_offsets[offset_name] == REG_SKIP)
+ return false;
+
return true;
}
@@ -305,4 +332,40 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
gpu_write(&gpu->base, reg - 1, data);
}
+static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
+ enum adreno_regs lo, enum adreno_regs hi, u64 data)
+{
+ adreno_gpu_write(gpu, lo, lower_32_bits(data));
+ adreno_gpu_write(gpu, hi, upper_32_bits(data));
+}
+
+static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
+{
+ return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
+}
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ *
+ * The register base needs to be a multiple of the length. If it is not, the
+ * hardware will quietly mask off the bits for you and shift the size. For
+ * example, if you intend the protection to start at 0x07 for a length of 4
+ * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
+ * expose registers you intended to protect!
+ */
+#define ADRENO_PROTECT_RW(_reg, _len) \
+ ((1 << 30) | (1 << 29) | \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define ADRENO_PROTECT_RDONLY(_reg, _len) \
+ ((1 << 29) \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index a22fef569499..9911a181f9c2 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -57,6 +59,7 @@ enum vgt_event_type {
RST_PIX_CNT = 13,
RST_VTX_CNT = 14,
TILE_FLUSH = 15,
+ STAT_EVENT = 16,
CACHE_FLUSH_AND_INV_TS_EVENT = 20,
ZPASS_DONE = 21,
CACHE_FLUSH_AND_INV_EVENT = 22,
@@ -81,7 +84,6 @@ enum pc_di_primtype {
DI_PT_LINESTRIP_ADJ = 11,
DI_PT_TRI_ADJ = 12,
DI_PT_TRISTRIP_ADJ = 13,
- DI_PT_PATCHES = 34,
};
enum pc_di_src_sel {
@@ -109,11 +111,15 @@ enum adreno_pm4_packet_type {
CP_TYPE1_PKT = 0x40000000,
CP_TYPE2_PKT = 0x80000000,
CP_TYPE3_PKT = 0xc0000000,
+ CP_TYPE4_PKT = 0x40000000,
+ CP_TYPE7_PKT = 0x70000000,
};
enum adreno_pm4_type3_packets {
CP_ME_INIT = 72,
CP_NOP = 16,
+ CP_PREEMPT_ENABLE = 28,
+ CP_PREEMPT_TOKEN = 30,
CP_INDIRECT_BUFFER = 63,
CP_INDIRECT_BUFFER_PFD = 55,
CP_WAIT_FOR_IDLE = 38,
@@ -162,6 +168,7 @@ enum adreno_pm4_type3_packets {
CP_TEST_TWO_MEMS = 113,
CP_REG_WR_NO_CTXT = 120,
CP_RECORD_PFP_TIMESTAMP = 17,
+ CP_SET_SECURE_MODE = 102,
CP_WAIT_FOR_ME = 19,
CP_SET_DRAW_STATE = 67,
CP_DRAW_INDX_OFFSET = 56,
@@ -172,6 +179,26 @@ enum adreno_pm4_type3_packets {
CP_UNKNOWN_1A = 26,
CP_UNKNOWN_4E = 78,
CP_WIDE_REG_WRITE = 116,
+ CP_SCRATCH_TO_REG = 77,
+ CP_REG_TO_SCRATCH = 74,
+ CP_WAIT_MEM_WRITES = 18,
+ CP_COND_REG_EXEC = 71,
+ CP_MEM_TO_REG = 66,
+ CP_EXEC_CS = 51,
+ CP_PERFCOUNTER_ACTION = 80,
+ CP_SMMU_TABLE_UPDATE = 83,
+ CP_CONTEXT_REG_BUNCH = 92,
+ CP_YIELD_ENABLE = 28,
+ CP_SKIP_IB2_ENABLE_GLOBAL = 29,
+ CP_SKIP_IB2_ENABLE_LOCAL = 35,
+ CP_SET_SUBDRAW_SIZE = 53,
+ CP_SET_VISIBILITY_OVERRIDE = 100,
+ CP_PREEMPT_ENABLE_GLOBAL = 105,
+ CP_PREEMPT_ENABLE_LOCAL = 106,
+ CP_CONTEXT_SWITCH_YIELD = 107,
+ CP_SET_RENDER_MODE = 108,
+ CP_COMPUTE_CHECKPOINT = 110,
+ CP_MEM_TO_MEM = 115,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -190,6 +217,7 @@ enum adreno_state_block {
SB_VERT_SHADER = 4,
SB_GEOM_SHADER = 5,
SB_FRAG_SHADER = 6,
+ SB_COMPUTE_SHADER = 7,
};
enum adreno_state_type {
@@ -199,7 +227,11 @@ enum adreno_state_type {
enum adreno_state_src {
SS_DIRECT = 0,
+ SS_INVALID_ALL_IC = 2,
+ SS_INVALID_PART_IC = 3,
SS_INDIRECT = 4,
+ SS_INDIRECT_TCM = 5,
+ SS_INDIRECT_STM = 6,
};
enum a4xx_index_size {
@@ -227,7 +259,7 @@ static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val)
{
return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
}
-#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0x7fc00000
+#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0xffc00000
#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22
static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
{
@@ -379,7 +411,12 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel va
{
return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
}
-#define CP_DRAW_INDX_OFFSET_0_TESSELLATE 0x00000100
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000300
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
+}
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10
static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val)
@@ -499,5 +536,102 @@ static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val)
return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK;
}
+#define REG_CP_REG_TO_MEM_0 0x00000000
+#define CP_REG_TO_MEM_0_REG__MASK 0x0000ffff
+#define CP_REG_TO_MEM_0_REG__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_0_REG(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_0_CNT__MASK 0x3ff80000
+#define CP_REG_TO_MEM_0_CNT__SHIFT 19
+static inline uint32_t CP_REG_TO_MEM_0_CNT(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_0_64B 0x40000000
+#define CP_REG_TO_MEM_0_ACCUMULATE 0x80000000
+
+#define REG_CP_REG_TO_MEM_1 0x00000001
+#define CP_REG_TO_MEM_1_DEST__MASK 0xffffffff
+#define CP_REG_TO_MEM_1_DEST__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_0 0x00000000
+
+#define REG_CP_DISPATCH_COMPUTE_1 0x00000001
+#define CP_DISPATCH_COMPUTE_1_X__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_1_X__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_2 0x00000002
+#define CP_DISPATCH_COMPUTE_2_Y__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_2_Y__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_3 0x00000003
+#define CP_DISPATCH_COMPUTE_3_Z__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_3_Z__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_0 0x00000000
+
+#define REG_CP_SET_RENDER_MODE_1 0x00000001
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_2 0x00000002
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_3 0x00000003
+#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010
+
+#define REG_CP_SET_RENDER_MODE_4 0x00000004
+
+#define REG_CP_SET_RENDER_MODE_5 0x00000005
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_6 0x00000006
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_7 0x00000007
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK;
+}
+
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index b8520aadbc0c..fb9617ce572a 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1186,7 +1186,7 @@ static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl)
int dsi_ctrl_buffer_init(struct dsi_ctrl *dsi_ctrl)
{
int rc = 0;
- u32 iova = 0;
+ u64 iova = 0;
dsi_ctrl->tx_cmd_buf = msm_gem_new(dsi_ctrl->drm_dev,
SZ_4K,
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 5f5a3732cdf6..4cb4764e7492 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -89,7 +89,7 @@ int msm_dsi_manager_phy_enable(int id,
u32 *clk_pre, u32 *clk_post);
void msm_dsi_manager_phy_disable(int id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
-bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u64 iova, u32 len);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
@@ -143,7 +143,7 @@ int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg);
void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
- u32 iova, u32 len);
+ u64 iova, u32 len);
int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index b2b5f3dd1b4c..4958594d5266 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4c49868efcda..4580a6e3c877 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -836,7 +836,7 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
{
struct drm_device *dev = msm_host->dev;
int ret;
- u32 iova;
+ u64 iova;
mutex_lock(&dev->struct_mutex);
msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
@@ -974,7 +974,7 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
{
int ret;
- u32 iova;
+ uint64_t iova;
bool triggered;
ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
@@ -1750,11 +1750,12 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
return ret;
}
-void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u64 iova, u32 len)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
+ /* FIXME: Verify that the iova < 32 bits? */
+ dsi_write(msm_host, REG_DSI_DMA_BASE, lower_32_bits(iova));
dsi_write(msm_host, REG_DSI_DMA_LEN, len);
dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 0455ff75074a..2091b748abbb 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -774,7 +774,7 @@ restore_host0:
return ret;
}
-bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u64 iova, u32 len)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 80ec65e47468..2d999494cdea 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 7d7662e69e11..506434fac993 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index 90bf5ed46746..f1072c18c81e 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
new file mode 100644
index 000000000000..347b78886b24
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -0,0 +1,1352 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "sde-hdmi:[%s] " fmt, __func__
+
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include "sde_kms.h"
+#include "sde_connector.h"
+#include "msm_drv.h"
+#include "sde_hdmi.h"
+
+static DEFINE_MUTEX(sde_hdmi_list_lock);
+static LIST_HEAD(sde_hdmi_list);
+
+static const struct of_device_id sde_hdmi_dt_match[] = {
+ {.compatible = "qcom,hdmi-display"},
+ {}
+};
+
+static ssize_t _sde_hdmi_debugfs_dump_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char *buf;
+ u32 len = 0;
+
+ if (!display)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(SZ_1K, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += snprintf(buf, SZ_4K, "name = %s\n", display->name);
+
+ if (copy_to_user(buff, buf, len)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+
+ kfree(buf);
+ return len;
+}
+
+
+static const struct file_operations dump_info_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_dump_info_read,
+};
+
+static int _sde_hdmi_debugfs_init(struct sde_hdmi *display)
+{
+ int rc = 0;
+ struct dentry *dir, *dump_file;
+
+ dir = debugfs_create_dir(display->name, NULL);
+ if (!dir) {
+ rc = -ENOMEM;
+ SDE_ERROR("[%s]debugfs create dir failed, rc = %d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ dump_file = debugfs_create_file("dump_info",
+ 0444,
+ dir,
+ display,
+ &dump_info_fops);
+ if (IS_ERR_OR_NULL(dump_file)) {
+ rc = PTR_ERR(dump_file);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ display->root = dir;
+ return rc;
+error_remove_dir:
+ debugfs_remove(dir);
+error:
+ return rc;
+}
+
+static void _sde_hdmi_debugfs_deinit(struct sde_hdmi *display)
+{
+ debugfs_remove(display->root);
+}
+
+static void _sde_hdmi_phy_reset(struct hdmi *hdmi)
+{
+ unsigned int val;
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+}
+
+static int _sde_hdmi_gpio_config(struct hdmi *hdmi, bool on)
+{
+ const struct hdmi_platform_config *config = hdmi->config;
+ int ret;
+
+ if (on) {
+ if (config->ddc_clk_gpio != -1) {
+ ret = gpio_request(config->ddc_clk_gpio,
+ "HDMI_DDC_CLK");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_CLK", config->ddc_clk_gpio,
+ ret);
+ goto error_ddc_clk_gpio;
+ }
+ gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
+ }
+
+ if (config->ddc_data_gpio != -1) {
+ ret = gpio_request(config->ddc_data_gpio,
+ "HDMI_DDC_DATA");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_DATA", config->ddc_data_gpio,
+ ret);
+ goto error_ddc_data_gpio;
+ }
+ gpio_set_value_cansleep(config->ddc_data_gpio, 1);
+ }
+
+ ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_HPD", config->hpd_gpio, ret);
+ goto error_hpd_gpio;
+ }
+ gpio_direction_output(config->hpd_gpio, 1);
+ if (config->hpd5v_gpio != -1) {
+ ret = gpio_request(config->hpd5v_gpio, "HDMI_HPD_5V");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_HPD_5V",
+ config->hpd5v_gpio,
+ ret);
+ goto error_hpd5v_gpio;
+ }
+ gpio_set_value_cansleep(config->hpd5v_gpio, 1);
+ }
+
+ if (config->mux_en_gpio != -1) {
+ ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_EN", config->mux_en_gpio,
+ ret);
+ goto error_en_gpio;
+ }
+ gpio_set_value_cansleep(config->mux_en_gpio, 1);
+ }
+
+ if (config->mux_sel_gpio != -1) {
+ ret = gpio_request(config->mux_sel_gpio,
+ "HDMI_MUX_SEL");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_SEL", config->mux_sel_gpio,
+ ret);
+ goto error_sel_gpio;
+ }
+ gpio_set_value_cansleep(config->mux_sel_gpio, 0);
+ }
+
+ if (config->mux_lpm_gpio != -1) {
+ ret = gpio_request(config->mux_lpm_gpio,
+ "HDMI_MUX_LPM");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_LPM",
+ config->mux_lpm_gpio, ret);
+ goto error_lpm_gpio;
+ }
+ gpio_set_value_cansleep(config->mux_lpm_gpio, 1);
+ }
+ SDE_DEBUG("gpio on");
+ } else {
+ if (config->ddc_clk_gpio != -1)
+ gpio_free(config->ddc_clk_gpio);
+
+ if (config->ddc_data_gpio != -1)
+ gpio_free(config->ddc_data_gpio);
+
+ gpio_free(config->hpd_gpio);
+
+ if (config->mux_en_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_en_gpio, 0);
+ gpio_free(config->mux_en_gpio);
+ }
+
+ if (config->mux_sel_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_sel_gpio, 1);
+ gpio_free(config->mux_sel_gpio);
+ }
+
+ if (config->mux_lpm_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_lpm_gpio, 0);
+ gpio_free(config->mux_lpm_gpio);
+ }
+ SDE_DEBUG("gpio off");
+ }
+
+ return 0;
+
+error_lpm_gpio:
+ if (config->mux_sel_gpio != -1)
+ gpio_free(config->mux_sel_gpio);
+error_sel_gpio:
+ if (config->mux_en_gpio != -1)
+ gpio_free(config->mux_en_gpio);
+error_en_gpio:
+ gpio_free(config->hpd5v_gpio);
+error_hpd5v_gpio:
+ gpio_free(config->hpd_gpio);
+error_hpd_gpio:
+ if (config->ddc_data_gpio != -1)
+ gpio_free(config->ddc_data_gpio);
+error_ddc_data_gpio:
+ if (config->ddc_clk_gpio != -1)
+ gpio_free(config->ddc_clk_gpio);
+error_ddc_clk_gpio:
+ return ret;
+}
+
+static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct device *dev = &hdmi->pdev->dev;
+ uint32_t hpd_ctrl;
+ int i, ret;
+ unsigned long flags;
+
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->hpd_regs[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
+ }
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret) {
+ SDE_ERROR("pinctrl state chg failed: %d\n", ret);
+ goto fail;
+ }
+
+ ret = _sde_hdmi_gpio_config(hdmi, true);
+ if (ret) {
+ SDE_ERROR("failed to configure GPIOs: %d\n", ret);
+ goto fail;
+ }
+
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ if (config->hpd_freq && config->hpd_freq[i]) {
+ ret = clk_set_rate(hdmi->hpd_clks[i],
+ config->hpd_freq[i]);
+ if (ret)
+ pr_warn("failed to set clk %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+
+ ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ goto fail;
+ }
+ }
+
+ sde_hdmi_set_mode(hdmi, false);
+ _sde_hdmi_phy_reset(hdmi);
+ sde_hdmi_set_mode(hdmi, true);
+
+ hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
+
+ /* set timeout to 4.1ms (max) for hardware debounce */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
+
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+
+ /* enable HPD events: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ HDMI_HPD_INT_CTRL_INT_CONNECT |
+ HDMI_HPD_INT_CTRL_INT_EN);
+
+ /* Toggle HPD circuit to trigger HPD sense */
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static void _sde_hdmi_hdp_disable(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct device *dev = &hdmi->pdev->dev;
+ int i, ret = 0;
+
+ /* Disable HPD interrupt */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
+
+ sde_hdmi_set_mode(hdmi, false);
+
+ for (i = 0; i < config->hpd_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->hpd_clks[i]);
+
+ ret = _sde_hdmi_gpio_config(hdmi, false);
+ if (ret)
+ pr_warn("failed to unconfigure GPIOs: %d\n", ret);
+
+ ret = pinctrl_pm_select_sleep_state(dev);
+ if (ret)
+ pr_warn("pinctrl state chg failed: %d\n", ret);
+
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->hpd_regs[i]);
+ if (ret)
+ pr_warn("failed to disable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ }
+}
+
+static void _sde_hdmi_hotplug_work(struct work_struct *work)
+{
+ struct sde_hdmi *sde_hdmi =
+ container_of(work, struct sde_hdmi, hpd_work);
+ struct drm_connector *connector;
+
+ if (!sde_hdmi || !sde_hdmi->ctrl.ctrl ||
+ !sde_hdmi->ctrl.ctrl->connector) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ sde_hdmi);
+ return;
+ }
+
+ connector = sde_hdmi->ctrl.ctrl->connector;
+
+ if (sde_hdmi->connected)
+ sde_hdmi_get_edid(connector, sde_hdmi);
+ else
+ sde_hdmi_free_edid(sde_hdmi);
+
+ sde_hdmi_notify_clients(connector, sde_hdmi->connected);
+ drm_helper_hpd_irq_event(connector->dev);
+}
+
+static void _sde_hdmi_connector_irq(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ uint32_t hpd_int_status, hpd_int_ctrl;
+
+ /* Process HPD: */
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ hpd_int_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL);
+
+ if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
+ (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
+ sde_hdmi->connected = !!(hpd_int_status &
+ HDMI_HPD_INT_STATUS_CABLE_DETECTED);
+ /* ack & disable (temporarily) HPD events: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ HDMI_HPD_INT_CTRL_INT_ACK);
+
+ DRM_DEBUG("status=%04x, ctrl=%04x", hpd_int_status,
+ hpd_int_ctrl);
+
+ /* detect disconnect if we are connected or visa versa: */
+ hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
+ if (!sde_hdmi->connected)
+ hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
+
+ if (!sde_hdmi->non_pluggable)
+ queue_work(hdmi->workq, &sde_hdmi->hpd_work);
+ }
+}
+
+static irqreturn_t _sde_hdmi_irq(int irq, void *dev_id)
+{
+ struct sde_hdmi *sde_hdmi = dev_id;
+ struct hdmi *hdmi;
+
+ if (!sde_hdmi || !sde_hdmi->ctrl.ctrl) {
+ SDE_ERROR("sde_hdmi=%p or hdmi is NULL\n", sde_hdmi);
+ return IRQ_NONE;
+ }
+ hdmi = sde_hdmi->ctrl.ctrl;
+ /* Process HPD: */
+ _sde_hdmi_connector_irq(sde_hdmi);
+
+ /* Process DDC: */
+ hdmi_i2c_irq(hdmi->i2c);
+
+ /* Process HDCP: */
+ if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
+ hdmi_hdcp_ctrl_irq(hdmi->hdcp_ctrl);
+
+ /* TODO audio.. */
+
+ return IRQ_HANDLED;
+}
+
+static int _sde_hdmi_audio_info_setup(struct platform_device *pdev,
+ struct msm_ext_disp_audio_setup_params *params)
+{
+ int rc = -EPERM;
+ struct sde_hdmi *display = NULL;
+ struct hdmi *hdmi = NULL;
+
+ display = platform_get_drvdata(pdev);
+
+ if (!display || !params) {
+ SDE_ERROR("invalid param(s), display %pK, params %pK\n",
+ display, params);
+ return -ENODEV;
+ }
+
+ hdmi = display->ctrl.ctrl;
+
+ if (hdmi->hdmi_mode)
+ rc = sde_hdmi_audio_on(hdmi, params);
+
+ return rc;
+}
+
+static int _sde_hdmi_get_audio_edid_blk(struct platform_device *pdev,
+ struct msm_ext_disp_audio_edid_blk *blk)
+{
+ struct sde_hdmi *display = platform_get_drvdata(pdev);
+
+ if (!display || !blk) {
+ SDE_ERROR("invalid param(s), display %pK, blk %pK\n",
+ display, blk);
+ return -ENODEV;
+ }
+
+ blk->audio_data_blk = display->edid.audio_data_block;
+ blk->audio_data_blk_size = display->edid.adb_size;
+
+ blk->spk_alloc_data_blk = display->edid.spkr_alloc_data_block;
+ blk->spk_alloc_data_blk_size = display->edid.sadb_size;
+
+ return 0;
+}
+
+static int _sde_hdmi_get_cable_status(struct platform_device *pdev, u32 vote)
+{
+ struct sde_hdmi *display = NULL;
+ struct hdmi *hdmi = NULL;
+
+ display = platform_get_drvdata(pdev);
+
+ if (!display) {
+ SDE_ERROR("invalid param(s), display %pK\n", display);
+ return -ENODEV;
+ }
+
+ hdmi = display->ctrl.ctrl;
+
+ return hdmi->power_on && display->connected;
+}
+
+static int _sde_hdmi_ext_disp_init(struct sde_hdmi *display)
+{
+ int rc = 0;
+ struct device_node *pd_np;
+ const char *phandle = "qcom,msm_ext_disp";
+
+ if (!display) {
+ SDE_ERROR("[%s]Invalid params\n", display->name);
+ return -EINVAL;
+ }
+
+ display->ext_audio_data.type = EXT_DISPLAY_TYPE_HDMI;
+ display->ext_audio_data.pdev = display->pdev;
+ display->ext_audio_data.codec_ops.audio_info_setup =
+ _sde_hdmi_audio_info_setup;
+ display->ext_audio_data.codec_ops.get_audio_edid_blk =
+ _sde_hdmi_get_audio_edid_blk;
+ display->ext_audio_data.codec_ops.cable_status =
+ _sde_hdmi_get_cable_status;
+
+ if (!display->pdev->dev.of_node) {
+ SDE_ERROR("[%s]cannot find sde_hdmi of_node\n", display->name);
+ return -ENODEV;
+ }
+
+ pd_np = of_parse_phandle(display->pdev->dev.of_node, phandle, 0);
+ if (!pd_np) {
+ SDE_ERROR("[%s]cannot find %s device node\n",
+ display->name, phandle);
+ return -ENODEV;
+ }
+
+ display->ext_pdev = of_find_device_by_node(pd_np);
+ if (!display->ext_pdev) {
+ SDE_ERROR("[%s]cannot find %s platform device\n",
+ display->name, phandle);
+ return -ENODEV;
+ }
+
+ rc = msm_ext_disp_register_intf(display->ext_pdev,
+ &display->ext_audio_data);
+ if (rc)
+ SDE_ERROR("[%s]failed to register disp\n", display->name);
+
+ return rc;
+}
+
+void sde_hdmi_notify_clients(struct drm_connector *connector,
+ bool connected)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+ int state = connected ?
+ EXT_DISPLAY_CABLE_CONNECT : EXT_DISPLAY_CABLE_DISCONNECT;
+
+ if (display && display->ext_audio_data.intf_ops.hpd) {
+ struct hdmi *hdmi = display->ctrl.ctrl;
+ u32 flags = MSM_EXT_DISP_HPD_VIDEO;
+
+ if (hdmi->hdmi_mode)
+ flags |= MSM_EXT_DISP_HPD_AUDIO;
+
+ display->ext_audio_data.intf_ops.hpd(display->ext_pdev,
+ display->ext_audio_data.type, state, flags);
+ }
+}
+
+void sde_hdmi_ack_state(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+ if (display) {
+ struct hdmi *hdmi = display->ctrl.ctrl;
+
+ if (hdmi->hdmi_mode && display->ext_audio_data.intf_ops.notify)
+ display->ext_audio_data.intf_ops.notify(
+ display->ext_pdev, status);
+ }
+}
+
+void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
+{
+ uint32_t ctrl = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ if (power_on) {
+ ctrl |= HDMI_CTRL_ENABLE;
+ if (!hdmi->hdmi_mode) {
+ ctrl |= HDMI_CTRL_HDMI;
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ ctrl &= ~HDMI_CTRL_HDMI;
+ } else {
+ ctrl |= HDMI_CTRL_HDMI;
+ }
+ } else {
+ ctrl &= ~HDMI_CTRL_HDMI;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+ DRM_DEBUG("HDMI Core: %s, HDMI_CTRL=0x%08x\n",
+ power_on ? "Enable" : "Disable", ctrl);
+}
+
+int sde_hdmi_get_info(struct msm_display_info *info,
+ void *display)
+{
+ int rc = 0;
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct hdmi *hdmi = hdmi_display->ctrl.ctrl;
+
+ if (!display || !info) {
+ SDE_ERROR("display=%p or info=%p is NULL\n", display, info);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_display->display_lock);
+
+ info->intf_type = DRM_MODE_CONNECTOR_HDMIA;
+ info->num_of_h_tiles = 1;
+ info->h_tile_instance[0] = 0;
+ if (hdmi_display->non_pluggable) {
+ info->capabilities = MSM_DISPLAY_CAP_VID_MODE;
+ hdmi_display->connected = true;
+ hdmi->hdmi_mode = true;
+ } else {
+ info->capabilities = MSM_DISPLAY_CAP_HOT_PLUG |
+ MSM_DISPLAY_CAP_EDID | MSM_DISPLAY_CAP_VID_MODE;
+ }
+ info->is_connected = hdmi_display->connected;
+ info->max_width = 1920;
+ info->max_height = 1080;
+ info->compression = MSM_DISPLAY_COMPRESS_NONE;
+
+ mutex_unlock(&hdmi_display->display_lock);
+ return rc;
+}
+
+u32 sde_hdmi_get_num_of_displays(void)
+{
+ u32 count = 0;
+ struct sde_hdmi *display;
+
+ mutex_lock(&sde_hdmi_list_lock);
+
+ list_for_each_entry(display, &sde_hdmi_list, list)
+ count++;
+
+ mutex_unlock(&sde_hdmi_list_lock);
+ return count;
+}
+
+int sde_hdmi_get_displays(void **display_array, u32 max_display_count)
+{
+ struct sde_hdmi *display;
+ int i = 0;
+
+ SDE_DEBUG("\n");
+
+ if (!display_array || !max_display_count) {
+ if (!display_array)
+ SDE_ERROR("invalid param\n");
+ return 0;
+ }
+
+ mutex_lock(&sde_hdmi_list_lock);
+ list_for_each_entry(display, &sde_hdmi_list, list) {
+ if (i >= max_display_count)
+ break;
+ display_array[i++] = display;
+ }
+ mutex_unlock(&sde_hdmi_list_lock);
+
+ return i;
+}
+
+int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+ void *display)
+{
+ struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)display;
+
+ if (!sde_hdmi || !sde_hdmi->ctrl.ctrl) {
+ SDE_ERROR("sde_hdmi=%p or hdmi is NULL\n", sde_hdmi);
+ return -EINVAL;
+ }
+
+ _sde_hdmi_hdp_disable(sde_hdmi);
+
+ return 0;
+}
+
+int sde_hdmi_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ int rc = 0;
+ struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)display;
+ struct hdmi *hdmi;
+
+ if (!sde_hdmi) {
+ SDE_ERROR("sde_hdmi is NULL\n");
+ return -EINVAL;
+ }
+
+ hdmi = sde_hdmi->ctrl.ctrl;
+ if (!hdmi) {
+ SDE_ERROR("hdmi is NULL\n");
+ return -EINVAL;
+ }
+
+ if (info)
+ sde_kms_info_add_keystr(info,
+ "DISPLAY_TYPE",
+ sde_hdmi->display_type);
+
+ hdmi->connector = connector;
+ INIT_WORK(&sde_hdmi->hpd_work, _sde_hdmi_hotplug_work);
+
+ /* Enable HPD detection */
+ rc = _sde_hdmi_hpd_enable(sde_hdmi);
+ if (rc)
+ SDE_ERROR("failed to enable HPD: %d\n", rc);
+
+ return rc;
+}
+
+enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display)
+{
+ enum drm_connector_status status = connector_status_unknown;
+ struct msm_display_info info;
+ int rc;
+
+ if (!connector || !display) {
+ SDE_ERROR("connector=%p or display=%p is NULL\n",
+ connector, display);
+ return status;
+ }
+
+ SDE_DEBUG("\n");
+
+ /* get display dsi_info */
+ memset(&info, 0x0, sizeof(info));
+ rc = sde_hdmi_get_info(&info, display);
+ if (rc) {
+ SDE_ERROR("failed to get display info, rc=%d\n", rc);
+ return connector_status_disconnected;
+ }
+
+ if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+ status = (info.is_connected ? connector_status_connected :
+ connector_status_disconnected);
+ else
+ status = connector_status_connected;
+
+ connector->display_info.width_mm = info.width_mm;
+ connector->display_info.height_mm = info.height_mm;
+
+ return status;
+}
+
+int _sde_hdmi_update_modes(struct drm_connector *connector,
+ struct sde_hdmi *display)
+{
+ int rc = 0;
+ struct hdmi_edid_ctrl *edid_ctrl = &display->edid;
+
+ if (edid_ctrl->edid) {
+ drm_mode_connector_update_edid_property(connector,
+ edid_ctrl->edid);
+
+ rc = drm_add_edid_modes(connector, edid_ctrl->edid);
+ return rc;
+ }
+
+ drm_mode_connector_update_edid_property(connector, NULL);
+
+ return rc;
+}
+
+int sde_hdmi_connector_get_modes(struct drm_connector *connector, void *display)
+{
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct drm_display_mode *mode, *m;
+ int ret = 0;
+
+ if (!connector || !display) {
+ SDE_ERROR("connector=%p or display=%p is NULL\n",
+ connector, display);
+ return 0;
+ }
+
+ SDE_DEBUG("\n");
+
+ if (hdmi_display->non_pluggable) {
+ list_for_each_entry(mode, &hdmi_display->mode_list, head) {
+ m = drm_mode_duplicate(connector->dev, mode);
+ if (!m) {
+ SDE_ERROR("failed to add hdmi mode %dx%d\n",
+ mode->hdisplay, mode->vdisplay);
+ break;
+ }
+ drm_mode_probed_add(connector, m);
+ }
+ ret = hdmi_display->num_of_modes;
+ } else {
+ ret = _sde_hdmi_update_modes(connector, display);
+ }
+
+ return ret;
+}
+
+enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display)
+{
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct hdmi *hdmi;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+ long actual, requested;
+
+ if (!connector || !display || !mode) {
+ SDE_ERROR("connector=%p or display=%p or mode=%p is NULL\n",
+ connector, display, mode);
+ return 0;
+ }
+
+ SDE_DEBUG("\n");
+
+ hdmi = hdmi_display->ctrl.ctrl;
+ priv = connector->dev->dev_private;
+ kms = priv->kms;
+ requested = 1000 * mode->clock;
+ actual = kms->funcs->round_pixclk(kms,
+ requested, hdmi->encoder);
+
+ SDE_DEBUG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+int sde_hdmi_dev_init(struct sde_hdmi *display)
+{
+ if (!display) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int sde_hdmi_dev_deinit(struct sde_hdmi *display)
+{
+ if (!display) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sde_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ int rc = 0;
+ struct sde_hdmi_ctrl *display_ctrl = NULL;
+ struct sde_hdmi *display = NULL;
+ struct drm_device *drm = NULL;
+ struct msm_drm_private *priv = NULL;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ SDE_ERROR("E\n");
+ if (!dev || !pdev || !master) {
+ pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
+ dev, pdev, master);
+ return -EINVAL;
+ }
+
+ drm = dev_get_drvdata(master);
+ display = platform_get_drvdata(pdev);
+ if (!drm || !display) {
+ pr_err("invalid param(s), drm %pK, display %pK\n",
+ drm, display);
+ return -EINVAL;
+ }
+
+ priv = drm->dev_private;
+ mutex_lock(&display->display_lock);
+
+ rc = _sde_hdmi_debugfs_init(display);
+ if (rc) {
+ SDE_ERROR("[%s]Debugfs init failed, rc=%d\n",
+ display->name, rc);
+ goto debug_error;
+ }
+
+ rc = _sde_hdmi_ext_disp_init(display);
+ if (rc) {
+ SDE_ERROR("[%s]Ext Disp init failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ rc = sde_hdmi_edid_init(display);
+ if (rc) {
+ SDE_ERROR("[%s]Ext Disp init failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ display_ctrl = &display->ctrl;
+ display_ctrl->ctrl = priv->hdmi;
+ SDE_ERROR("display_ctrl->ctrl=%p\n", display_ctrl->ctrl);
+ display->drm_dev = drm;
+
+error:
+ (void)_sde_hdmi_debugfs_deinit(display);
+debug_error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+
+static void sde_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sde_hdmi *display = NULL;
+
+ if (!dev) {
+ SDE_ERROR("invalid params\n");
+ return;
+ }
+
+ display = platform_get_drvdata(to_platform_device(dev));
+ if (!display) {
+ SDE_ERROR("Invalid display device\n");
+ return;
+ }
+ mutex_lock(&display->display_lock);
+ (void)_sde_hdmi_debugfs_deinit(display);
+ (void)sde_hdmi_edid_deinit(display);
+ display->drm_dev = NULL;
+ mutex_unlock(&display->display_lock);
+}
+
+static const struct component_ops sde_hdmi_comp_ops = {
+ .bind = sde_hdmi_bind,
+ .unbind = sde_hdmi_unbind,
+};
+
+static int _sde_hdmi_parse_dt_modes(struct device_node *np,
+ struct list_head *head,
+ u32 *num_of_modes)
+{
+ int rc = 0;
+ struct drm_display_mode *mode;
+ u32 mode_count = 0;
+ struct device_node *node = NULL;
+ struct device_node *root_node = NULL;
+ const char *name;
+ u32 h_front_porch, h_pulse_width, h_back_porch;
+ u32 v_front_porch, v_pulse_width, v_back_porch;
+ bool h_active_high, v_active_high;
+ u32 flags = 0;
+
+ root_node = of_get_child_by_name(np, "qcom,customize-modes");
+ if (!root_node) {
+ root_node = of_parse_phandle(np, "qcom,customize-modes", 0);
+ if (!root_node) {
+ DRM_INFO("No entry present for qcom,customize-modes");
+ goto end;
+ }
+ }
+ for_each_child_of_node(root_node, node) {
+ rc = 0;
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode) {
+ SDE_ERROR("Out of memory\n");
+ rc = -ENOMEM;
+ continue;
+ }
+
+ rc = of_property_read_string(node, "qcom,mode-name",
+ &name);
+ if (rc) {
+ SDE_ERROR("failed to read qcom,mode-name, rc=%d\n", rc);
+ goto fail;
+ }
+ strlcpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+ rc = of_property_read_u32(node, "qcom,mode-h-active",
+ &mode->hdisplay);
+ if (rc) {
+ SDE_ERROR("failed to read h-active, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-front-porch",
+ &h_front_porch);
+ if (rc) {
+ SDE_ERROR("failed to read h-front-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-pulse-width",
+ &h_pulse_width);
+ if (rc) {
+ SDE_ERROR("failed to read h-pulse-width, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-back-porch",
+ &h_back_porch);
+ if (rc) {
+ SDE_ERROR("failed to read h-back-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ h_active_high = of_property_read_bool(node,
+ "qcom,mode-h-active-high");
+
+ rc = of_property_read_u32(node, "qcom,mode-v-active",
+ &mode->vdisplay);
+ if (rc) {
+ SDE_ERROR("failed to read v-active, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-front-porch",
+ &v_front_porch);
+ if (rc) {
+ SDE_ERROR("failed to read v-front-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-pulse-width",
+ &v_pulse_width);
+ if (rc) {
+ SDE_ERROR("failed to read v-pulse-width, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-back-porch",
+ &v_back_porch);
+ if (rc) {
+ SDE_ERROR("failed to read v-back-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ v_active_high = of_property_read_bool(node,
+ "qcom,mode-v-active-high");
+
+ rc = of_property_read_u32(node, "qcom,mode-refersh-rate",
+ &mode->vrefresh);
+ if (rc) {
+ SDE_ERROR("failed to read refersh-rate, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-clock-in-khz",
+ &mode->clock);
+ if (rc) {
+ SDE_ERROR("failed to read clock, rc=%d\n", rc);
+ goto fail;
+ }
+
+ mode->hsync_start = mode->hdisplay + h_front_porch;
+ mode->hsync_end = mode->hsync_start + h_pulse_width;
+ mode->htotal = mode->hsync_end + h_back_porch;
+ mode->vsync_start = mode->vdisplay + v_front_porch;
+ mode->vsync_end = mode->vsync_start + v_pulse_width;
+ mode->vtotal = mode->vsync_end + v_back_porch;
+ if (h_active_high)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (v_active_high)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ mode->flags = flags;
+
+ if (!rc) {
+ mode_count++;
+ list_add_tail(&mode->head, head);
+ }
+
+ SDE_DEBUG("mode[%d] h[%d,%d,%d,%d] v[%d,%d,%d,%d] %d %xH %d\n",
+ mode_count - 1, mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal, mode->vdisplay,
+ mode->vsync_start, mode->vsync_end, mode->vtotal,
+ mode->vrefresh, mode->flags, mode->clock);
+fail:
+ if (rc) {
+ kfree(mode);
+ continue;
+ }
+ }
+
+ if (num_of_modes)
+ *num_of_modes = mode_count;
+
+end:
+ return rc;
+}
+
+static int _sde_hdmi_parse_dt(struct device_node *node,
+ struct sde_hdmi *display)
+{
+ int rc = 0;
+
+ display->name = of_get_property(node, "label", NULL);
+
+ display->display_type = of_get_property(node,
+ "qcom,display-type", NULL);
+ if (!display->display_type)
+ display->display_type = "unknown";
+
+ display->non_pluggable = of_property_read_bool(node,
+ "qcom,non-pluggable");
+
+ rc = _sde_hdmi_parse_dt_modes(node, &display->mode_list,
+ &display->num_of_modes);
+ if (rc)
+ SDE_ERROR("parse_dt_modes failed rc=%d\n", rc);
+
+ return rc;
+}
+
+static int _sde_hdmi_dev_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct sde_hdmi *display;
+ int ret = 0;
+
+
+ SDE_DEBUG("\n");
+
+ if (!pdev || !pdev->dev.of_node) {
+ SDE_ERROR("pdev not found\n");
+ return -ENODEV;
+ }
+
+ display = devm_kzalloc(&pdev->dev, sizeof(*display), GFP_KERNEL);
+ if (!display)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&display->mode_list);
+ rc = _sde_hdmi_parse_dt(pdev->dev.of_node, display);
+ if (rc)
+ SDE_ERROR("parse dt failed, rc=%d\n", rc);
+
+ mutex_init(&display->display_lock);
+ display->pdev = pdev;
+ platform_set_drvdata(pdev, display);
+ mutex_lock(&sde_hdmi_list_lock);
+ list_add(&display->list, &sde_hdmi_list);
+ mutex_unlock(&sde_hdmi_list_lock);
+ if (!sde_hdmi_dev_init(display)) {
+ ret = component_add(&pdev->dev, &sde_hdmi_comp_ops);
+ if (ret) {
+ pr_err("component add failed\n");
+ goto out;
+ }
+ }
+ return 0;
+
+out:
+ if (rc)
+ devm_kfree(&pdev->dev, display);
+ return rc;
+}
+
+static int _sde_hdmi_dev_remove(struct platform_device *pdev)
+{
+ struct sde_hdmi *display;
+ struct sde_hdmi *pos, *tmp;
+ struct drm_display_mode *mode, *n;
+
+ if (!pdev) {
+ SDE_ERROR("Invalid device\n");
+ return -EINVAL;
+ }
+
+ display = platform_get_drvdata(pdev);
+
+ mutex_lock(&sde_hdmi_list_lock);
+ list_for_each_entry_safe(pos, tmp, &sde_hdmi_list, list) {
+ if (pos == display) {
+ list_del(&display->list);
+ break;
+ }
+ }
+ mutex_unlock(&sde_hdmi_list_lock);
+
+ list_for_each_entry_safe(mode, n, &display->mode_list, head) {
+ list_del(&mode->head);
+ kfree(mode);
+ }
+
+ platform_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, display);
+ return 0;
+}
+
+static struct platform_driver sde_hdmi_driver = {
+ .probe = _sde_hdmi_dev_probe,
+ .remove = _sde_hdmi_dev_remove,
+ .driver = {
+ .name = "sde_hdmi",
+ .of_match_table = sde_hdmi_dt_match,
+ },
+};
+
+int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
+{
+ int rc = 0;
+ struct msm_drm_private *priv = NULL;
+ struct hdmi *hdmi;
+ struct platform_device *pdev;
+
+ DBG("");
+ if (!display || !display->drm_dev || !enc) {
+ SDE_ERROR("display=%p or enc=%p or drm_dev is NULL\n",
+ display, enc);
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+ priv = display->drm_dev->dev_private;
+ hdmi = display->ctrl.ctrl;
+
+ if (!priv || !hdmi) {
+ SDE_ERROR("priv=%p or hdmi=%p is NULL\n",
+ priv, hdmi);
+ mutex_unlock(&display->display_lock);
+ return -EINVAL;
+ }
+
+ pdev = hdmi->pdev;
+ hdmi->dev = display->drm_dev;
+ hdmi->encoder = enc;
+
+ hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
+
+ hdmi->bridge = sde_hdmi_bridge_init(hdmi);
+ if (IS_ERR(hdmi->bridge)) {
+ rc = PTR_ERR(hdmi->bridge);
+ SDE_ERROR("failed to create HDMI bridge: %d\n", rc);
+ hdmi->bridge = NULL;
+ goto error;
+ }
+ hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (hdmi->irq < 0) {
+ rc = hdmi->irq;
+ SDE_ERROR("failed to get irq: %d\n", rc);
+ goto error;
+ }
+
+ rc = devm_request_irq(&pdev->dev, hdmi->irq,
+ _sde_hdmi_irq, IRQF_TRIGGER_HIGH,
+ "sde_hdmi_isr", display);
+ if (rc < 0) {
+ SDE_ERROR("failed to request IRQ%u: %d\n",
+ hdmi->irq, rc);
+ goto error;
+ }
+
+ enc->bridge = hdmi->bridge;
+ priv->bridges[priv->num_bridges++] = hdmi->bridge;
+
+ mutex_unlock(&display->display_lock);
+ return 0;
+
+error:
+ /* bridge is normally destroyed by drm: */
+ if (hdmi->bridge) {
+ hdmi_bridge_destroy(hdmi->bridge);
+ hdmi->bridge = NULL;
+ }
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int sde_hdmi_drm_deinit(struct sde_hdmi *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int __init sde_hdmi_register(void)
+{
+ int rc = 0;
+
+ DBG("");
+ rc = platform_driver_register(&sde_hdmi_driver);
+ return rc;
+}
+
+static void __exit sde_hdmi_unregister(void)
+{
+ platform_driver_unregister(&sde_hdmi_driver);
+}
+
+module_init(sde_hdmi_register);
+module_exit(sde_hdmi_unregister);
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
new file mode 100644
index 000000000000..869d1bebf9db
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_HDMI_H_
+#define _SDE_HDMI_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/msm_ext_display.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "hdmi.h"
+
+#define MAX_NUMBER_ADB 5
+#define MAX_AUDIO_DATA_BLOCK_SIZE 30
+#define MAX_SPKR_ALLOC_DATA_BLOCK_SIZE 3
+
+/**
+ * struct sde_hdmi_info - defines hdmi display properties
+ * @display_type: Display type as defined by device tree.
+ * @is_hot_pluggable: Can panel be hot plugged.
+ * @is_connected: Is panel connected.
+ * @is_edid_supported: Does panel support reading EDID information.
+ * @width_mm: Physical width of panel in millimeters.
+ * @height_mm: Physical height of panel in millimeters.
+ */
+struct sde_hdmi_info {
+ const char *display_type;
+
+ /* HPD */
+ bool is_hot_pluggable;
+ bool is_connected;
+ bool is_edid_supported;
+
+ /* Physical properties */
+ u32 width_mm;
+ u32 height_mm;
+};
+
+/**
+ * struct sde_hdmi_ctrl - hdmi ctrl/phy information for the display
+ * @ctrl: Handle to the HDMI controller device.
+ * @ctrl_of_node: pHandle to the HDMI controller device.
+ * @hdmi_ctrl_idx: HDMI controller instance id.
+ */
+struct sde_hdmi_ctrl {
+ /* controller info */
+ struct hdmi *ctrl;
+ struct device_node *ctrl_of_node;
+ u32 hdmi_ctrl_idx;
+};
+
+struct hdmi_edid_ctrl {
+ struct edid *edid;
+ u8 audio_data_block[MAX_NUMBER_ADB * MAX_AUDIO_DATA_BLOCK_SIZE];
+ int adb_size;
+ u8 spkr_alloc_data_block[MAX_SPKR_ALLOC_DATA_BLOCK_SIZE];
+ int sadb_size;
+};
+
+/**
+ * struct sde_hdmi - hdmi display information
+ * @pdev: Pointer to platform device.
+ * @drm_dev: DRM device associated with the display.
+ * @name: Name of the display.
+ * @display_type: Display type as defined in device tree.
+ * @list: List pointer.
+ * @display_lock: Mutex for sde_hdmi interface.
+ * @ctrl: Controller information for HDMI display.
+ * @non_pluggable: If HDMI display is non pluggable
+ * @num_of_modes: Number of modes supported by display if non pluggable.
+ * @mode_list: Mode list if non pluggable.
+ * @connected: If HDMI display is connected.
+ * @is_tpg_enabled: TPG state.
+ * @hpd_work: HPD work structure.
+ * @root: Debug fs root entry.
+ */
+struct sde_hdmi {
+ struct platform_device *pdev;
+ struct drm_device *drm_dev;
+
+ const char *name;
+ const char *display_type;
+ struct list_head list;
+ struct mutex display_lock;
+
+ struct sde_hdmi_ctrl ctrl;
+
+ struct platform_device *ext_pdev;
+ struct msm_ext_disp_init_data ext_audio_data;
+ struct hdmi_edid_ctrl edid;
+
+ bool non_pluggable;
+ u32 num_of_modes;
+ struct list_head mode_list;
+ bool connected;
+ bool is_tpg_enabled;
+
+ struct work_struct hpd_work;
+
+ /* DEBUG FS */
+ struct dentry *root;
+};
+
+#ifdef CONFIG_DRM_SDE_HDMI
+/**
+ * sde_hdmi_get_num_of_displays() - returns number of display devices
+ * supported.
+ *
+ * Return: number of displays.
+ */
+u32 sde_hdmi_get_num_of_displays(void);
+
+/**
+ * sde_hdmi_get_displays() - returns the display list that's available.
+ * @display_array: Pointer to display list
+ * @max_display_count: Number of maximum displays in the list
+ *
+ * Return: number of available displays.
+ */
+int sde_hdmi_get_displays(void **display_array, u32 max_display_count);
+
+/**
+ * sde_hdmi_connector_pre_deinit()- perform additional deinitialization steps
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+ void *display);
+
+/**
+ * sde_hdmi_connector_post_init()- perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to sde connector info structure
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+int sde_hdmi_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display);
+
+/**
+ * sde_hdmi_connector_detect()- determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display);
+
+/**
+ * sde_hdmi_connector_get_modes - add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+
+ * Returns: Number of modes added
+ */
+int sde_hdmi_connector_get_modes(struct drm_connector *connector,
+ void *display);
+
+/**
+ * sde_hdmi_mode_valid - determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ *
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display);
+
+/**
+ * sde_hdmi_dev_init() - Initializes the display device
+ * @display: Handle to the display.
+ *
+ * Initialization will acquire references to the resources required for the
+ * display hardware to function.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_dev_init(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_dev_deinit() - Desinitializes the display device
+ * @display: Handle to the display.
+ *
+ * All the resources acquired during device init will be released.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_dev_deinit(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_drm_init() - initializes DRM objects for the display device.
+ * @display: Handle to the display.
+ * @encoder: Pointer to the encoder object which is connected to the
+ * display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_drm_init(struct sde_hdmi *display,
+ struct drm_encoder *enc);
+
+/**
+ * sde_hdmi_drm_deinit() - destroys DRM objects assosciated with the display
+ * @display: Handle to the display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_drm_deinit(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_get_info() - returns the display properties
+ * @display: Handle to the display.
+ * @info: Pointer to the structure where info is stored.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_get_info(struct msm_display_info *info,
+ void *display);
+
+/**
+ * sde_hdmi_bridge_init() - init sde hdmi bridge
+ * @hdmi: Handle to the hdmi.
+ *
+ * Return: struct drm_bridge *.
+ */
+struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi);
+
+/**
+ * sde_hdmi_set_mode() - Set HDMI mode API.
+ * @hdmi: Handle to the hdmi.
+ * @power_on: Power on/off request.
+ *
+ * Return: void.
+ */
+void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on);
+
+/**
+ * sde_hdmi_audio_on() - enable hdmi audio.
+ * @hdmi: Handle to the hdmi.
+ * @params: audio setup parameters from codec.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_audio_on(struct hdmi *hdmi,
+ struct msm_ext_disp_audio_setup_params *params);
+
+/**
+ * sde_hdmi_audio_off() - disable hdmi audio.
+ * @hdmi: Handle to the hdmi.
+ *
+ * Return: void.
+ */
+void sde_hdmi_audio_off(struct hdmi *hdmi);
+
+/**
+ * sde_hdmi_config_avmute() - mute hdmi.
+ * @hdmi: Handle to the hdmi.
+ * @set: enable/disable avmute.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_config_avmute(struct hdmi *hdmi, bool set);
+
+/**
+ * sde_hdmi_notify_clients() - notify hdmi clients of the connection status.
+ * @connector: Handle to the drm_connector.
+ * @connected: connection status.
+ *
+ * Return: void.
+ */
+void sde_hdmi_notify_clients(struct drm_connector *connector,
+ bool connected);
+
+/**
+ * sde_hdmi_ack_state() - acknowledge the connection status.
+ * @connector: Handle to the drm_connector.
+ * @status: connection status.
+ *
+ * Return: void.
+ */
+void sde_hdmi_ack_state(struct drm_connector *connector,
+ enum drm_connector_status status);
+
+/**
+ * sde_hdmi_edid_init() - init edid structure.
+ * @display: Handle to the sde_hdmi.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_edid_init(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_edid_deinit() - deinit edid structure.
+ * @display: Handle to the sde_hdmi.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_edid_deinit(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_get_edid() - get edid info.
+ * @connector: Handle to the drm_connector.
+ * @display: Handle to the sde_hdmi.
+ *
+ * Return: void.
+ */
+void sde_hdmi_get_edid(struct drm_connector *connector,
+ struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_free_edid() - free edid structure.
+ * @display: Handle to the sde_hdmi.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_free_edid(struct sde_hdmi *display);
+
+#else /*#ifdef CONFIG_DRM_SDE_HDMI*/
+
+static inline u32 sde_hdmi_get_num_of_displays(void)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_get_displays(void **display_array,
+ u32 max_display_count)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+ void *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ return 0;
+}
+
+static inline enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display)
+{
+ return connector_status_disconnected;
+}
+
+static inline int sde_hdmi_connector_get_modes(struct drm_connector *connector,
+ void *display)
+{
+ return 0;
+}
+
+static inline enum drm_mode_status sde_hdmi_mode_valid(
+ struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display)
+{
+ return MODE_OK;
+}
+
+static inline int sde_hdmi_dev_init(struct sde_hdmi *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_dev_deinit(struct sde_hdmi *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_drm_init(struct sde_hdmi *display,
+ struct drm_encoder *enc)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_drm_deinit(struct sde_hdmi *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_get_info(struct msm_display_info *info,
+ void *display)
+{
+ return 0;
+}
+#endif /*#else of CONFIG_DRM_SDE_HDMI*/
+#endif /* _SDE_HDMI_H_ */
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c
new file mode 100644
index 000000000000..13ea49cfa42d
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c
@@ -0,0 +1,393 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include <linux/switch.h>
+#include <linux/gcd.h>
+
+#include "drm_edid.h"
+#include "sde_kms.h"
+#include "sde_hdmi.h"
+#include "sde_hdmi_regs.h"
+#include "hdmi.h"
+
+#define HDMI_AUDIO_INFO_FRAME_PACKET_HEADER 0x84
+#define HDMI_AUDIO_INFO_FRAME_PACKET_VERSION 0x1
+#define HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH 0x0A
+
+#define HDMI_KHZ_TO_HZ 1000
+#define HDMI_MHZ_TO_HZ 1000000
+#define HDMI_ACR_N_MULTIPLIER 128
+#define DEFAULT_AUDIO_SAMPLE_RATE_HZ 48000
+
+/* Supported HDMI Audio channels */
+enum hdmi_audio_channels {
+ AUDIO_CHANNEL_2 = 2,
+ AUDIO_CHANNEL_3,
+ AUDIO_CHANNEL_4,
+ AUDIO_CHANNEL_5,
+ AUDIO_CHANNEL_6,
+ AUDIO_CHANNEL_7,
+ AUDIO_CHANNEL_8,
+};
+
+/* parameters for clock regeneration */
+struct hdmi_audio_acr {
+ u32 n;
+ u32 cts;
+};
+
+enum hdmi_audio_sample_rates {
+ AUDIO_SAMPLE_RATE_32KHZ,
+ AUDIO_SAMPLE_RATE_44_1KHZ,
+ AUDIO_SAMPLE_RATE_48KHZ,
+ AUDIO_SAMPLE_RATE_88_2KHZ,
+ AUDIO_SAMPLE_RATE_96KHZ,
+ AUDIO_SAMPLE_RATE_176_4KHZ,
+ AUDIO_SAMPLE_RATE_192KHZ,
+ AUDIO_SAMPLE_RATE_MAX
+};
+
+struct sde_hdmi_audio {
+ struct hdmi *hdmi;
+ struct msm_ext_disp_audio_setup_params params;
+ u32 pclk;
+};
+
+static void _sde_hdmi_audio_get_audio_sample_rate(u32 *sample_rate_hz)
+{
+ u32 rate = *sample_rate_hz;
+
+ switch (rate) {
+ case 32000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_32KHZ;
+ break;
+ case 44100:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_44_1KHZ;
+ break;
+ case 48000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_48KHZ;
+ break;
+ case 88200:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_88_2KHZ;
+ break;
+ case 96000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_96KHZ;
+ break;
+ case 176400:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_176_4KHZ;
+ break;
+ case 192000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_192KHZ;
+ break;
+ default:
+ SDE_ERROR("%d unchanged\n", rate);
+ break;
+ }
+}
+
+static void _sde_hdmi_audio_get_acr_param(u32 pclk, u32 fs,
+ struct hdmi_audio_acr *acr)
+{
+ u32 div, mul;
+
+ if (!acr) {
+ SDE_ERROR("invalid data\n");
+ return;
+ }
+
+ /*
+ * as per HDMI specification, N/CTS = (128*fs)/pclk.
+ * get the ratio using this formula.
+ */
+ acr->n = HDMI_ACR_N_MULTIPLIER * fs;
+ acr->cts = pclk;
+
+ /* get the greatest common divisor for the ratio */
+ div = gcd(acr->n, acr->cts);
+
+ /* get the n and cts values wrt N/CTS formula */
+ acr->n /= div;
+ acr->cts /= div;
+
+ /*
+ * as per HDMI specification, 300 <= 128*fs/N <= 1500
+ * with a target of 128*fs/N = 1000. To get closest
+ * value without truncating fractional values, find
+ * the corresponding multiplier
+ */
+ mul = ((HDMI_ACR_N_MULTIPLIER * fs / HDMI_KHZ_TO_HZ)
+ + (acr->n - 1)) / acr->n;
+
+ acr->n *= mul;
+ acr->cts *= mul;
+}
+
+static void _sde_hdmi_audio_acr_enable(struct sde_hdmi_audio *audio)
+{
+ struct hdmi_audio_acr acr;
+ struct msm_ext_disp_audio_setup_params *params;
+ u32 pclk, layout, multiplier = 1, sample_rate;
+ u32 acr_pkt_ctl, aud_pkt_ctl2, acr_reg_cts, acr_reg_n;
+ struct hdmi *hdmi;
+
+ hdmi = audio->hdmi;
+ params = &audio->params;
+ pclk = audio->pclk;
+ sample_rate = params->sample_rate_hz;
+
+ _sde_hdmi_audio_get_acr_param(pclk, sample_rate, &acr);
+ _sde_hdmi_audio_get_audio_sample_rate(&sample_rate);
+
+ layout = (params->num_of_channels == AUDIO_CHANNEL_2) ? 0 : 1;
+
+ SDE_DEBUG("n=%u, cts=%u, layout=%u\n", acr.n, acr.cts, layout);
+
+ /* AUDIO_PRIORITY | SOURCE */
+ acr_pkt_ctl = BIT(31) | BIT(8);
+
+ switch (sample_rate) {
+ case AUDIO_SAMPLE_RATE_44_1KHZ:
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ case AUDIO_SAMPLE_RATE_48KHZ:
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_192KHZ:
+ multiplier = 4;
+ acr.n >>= 2;
+
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_176_4KHZ:
+ multiplier = 4;
+ acr.n >>= 2;
+
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ case AUDIO_SAMPLE_RATE_96KHZ:
+ multiplier = 2;
+ acr.n >>= 1;
+
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_88_2KHZ:
+ multiplier = 2;
+ acr.n >>= 1;
+
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ default:
+ multiplier = 1;
+
+ acr_pkt_ctl |= 0x1 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_32_0;
+ acr_reg_n = HDMI_ACR_32_1;
+ break;
+ }
+
+ aud_pkt_ctl2 = BIT(0) | (layout << 1);
+
+ /* N_MULTIPLE(multiplier) */
+ acr_pkt_ctl &= ~(7 << 16);
+ acr_pkt_ctl |= (multiplier & 0x7) << 16;
+
+ /* SEND | CONT */
+ acr_pkt_ctl |= BIT(0) | BIT(1);
+
+ hdmi_write(hdmi, acr_reg_cts, acr.cts);
+ hdmi_write(hdmi, acr_reg_n, acr.n);
+ hdmi_write(hdmi, HDMI_ACR_PKT_CTRL, acr_pkt_ctl);
+ hdmi_write(hdmi, HDMI_AUDIO_PKT_CTRL2, aud_pkt_ctl2);
+}
+
+static void _sde_hdmi_audio_acr_setup(struct sde_hdmi_audio *audio, bool on)
+{
+ if (on)
+ _sde_hdmi_audio_acr_enable(audio);
+ else
+ hdmi_write(audio->hdmi, HDMI_ACR_PKT_CTRL, 0);
+}
+
+static void _sde_hdmi_audio_infoframe_setup(struct sde_hdmi_audio *audio,
+ bool enabled)
+{
+ struct hdmi *hdmi = audio->hdmi;
+ u32 channels, channel_allocation, level_shift, down_mix, layout;
+ u32 hdmi_debug_reg = 0, audio_info_0_reg = 0, audio_info_1_reg = 0;
+ u32 audio_info_ctrl_reg, aud_pck_ctrl_2_reg;
+ u32 check_sum, sample_present;
+
+ audio_info_ctrl_reg = hdmi_read(hdmi, HDMI_INFOFRAME_CTRL0);
+ audio_info_ctrl_reg &= ~0xF0;
+
+ if (!enabled)
+ goto end;
+
+ channels = audio->params.num_of_channels - 1;
+ channel_allocation = audio->params.channel_allocation;
+ level_shift = audio->params.level_shift;
+ down_mix = audio->params.down_mix;
+ sample_present = audio->params.sample_present;
+
+ layout = (audio->params.num_of_channels == AUDIO_CHANNEL_2) ? 0 : 1;
+ aud_pck_ctrl_2_reg = BIT(0) | (layout << 1);
+ hdmi_write(hdmi, HDMI_AUDIO_PKT_CTRL2, aud_pck_ctrl_2_reg);
+
+ audio_info_1_reg |= channel_allocation & 0xFF;
+ audio_info_1_reg |= ((level_shift & 0xF) << 11);
+ audio_info_1_reg |= ((down_mix & 0x1) << 15);
+
+ check_sum = 0;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_HEADER;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_VERSION;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH;
+ check_sum += channels;
+ check_sum += channel_allocation;
+ check_sum += (level_shift & 0xF) << 3 | (down_mix & 0x1) << 7;
+ check_sum &= 0xFF;
+ check_sum = (u8) (256 - check_sum);
+
+ audio_info_0_reg |= check_sum & 0xFF;
+ audio_info_0_reg |= ((channels & 0x7) << 8);
+
+ /* Enable Audio InfoFrame Transmission */
+ audio_info_ctrl_reg |= 0xF0;
+
+ if (layout) {
+ /* Set the Layout bit */
+ hdmi_debug_reg |= BIT(4);
+
+ /* Set the Sample Present bits */
+ hdmi_debug_reg |= sample_present & 0xF;
+ }
+end:
+ hdmi_write(hdmi, HDMI_DEBUG, hdmi_debug_reg);
+ hdmi_write(hdmi, HDMI_AUDIO_INFO0, audio_info_0_reg);
+ hdmi_write(hdmi, HDMI_AUDIO_INFO1, audio_info_1_reg);
+ hdmi_write(hdmi, HDMI_INFOFRAME_CTRL0, audio_info_ctrl_reg);
+}
+
+int sde_hdmi_audio_on(struct hdmi *hdmi,
+ struct msm_ext_disp_audio_setup_params *params)
+{
+ struct sde_hdmi_audio audio;
+ int rc = 0;
+
+ if (!hdmi) {
+ SDE_ERROR("invalid HDMI Ctrl\n");
+ rc = -ENODEV;
+ goto end;
+ }
+
+ audio.pclk = hdmi->pixclock;
+ audio.params = *params;
+ audio.hdmi = hdmi;
+
+ if (!audio.params.num_of_channels) {
+ audio.params.sample_rate_hz = DEFAULT_AUDIO_SAMPLE_RATE_HZ;
+ audio.params.num_of_channels = AUDIO_CHANNEL_2;
+ }
+
+ _sde_hdmi_audio_acr_setup(&audio, true);
+ _sde_hdmi_audio_infoframe_setup(&audio, true);
+
+ SDE_DEBUG("HDMI Audio: Enabled\n");
+end:
+ return rc;
+}
+
+void sde_hdmi_audio_off(struct hdmi *hdmi)
+{
+ struct sde_hdmi_audio audio;
+ int rc = 0;
+
+ if (!hdmi) {
+ SDE_ERROR("invalid HDMI Ctrl\n");
+ rc = -ENODEV;
+ return;
+ }
+
+ audio.hdmi = hdmi;
+
+ _sde_hdmi_audio_infoframe_setup(&audio, false);
+ _sde_hdmi_audio_acr_setup(&audio, false);
+
+ SDE_DEBUG("HDMI Audio: Disabled\n");
+}
+
+int sde_hdmi_config_avmute(struct hdmi *hdmi, bool set)
+{
+ u32 av_mute_status;
+ bool av_pkt_en = false;
+
+ if (!hdmi) {
+ SDE_ERROR("invalid HDMI Ctrl\n");
+ return -ENODEV;
+ }
+
+ av_mute_status = hdmi_read(hdmi, HDMI_GC);
+
+ if (set) {
+ if (!(av_mute_status & BIT(0))) {
+ hdmi_write(hdmi, HDMI_GC, av_mute_status | BIT(0));
+ av_pkt_en = true;
+ }
+ } else {
+ if (av_mute_status & BIT(0)) {
+ hdmi_write(hdmi, HDMI_GC, av_mute_status & ~BIT(0));
+ av_pkt_en = true;
+ }
+ }
+
+ /* Enable AV Mute tranmission here */
+ if (av_pkt_en)
+ hdmi_write(hdmi, HDMI_VBI_PKT_CTRL,
+ hdmi_read(hdmi, HDMI_VBI_PKT_CTRL) | (BIT(4) & BIT(5)));
+
+ SDE_DEBUG("AVMUTE %s\n", set ? "set" : "cleared");
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
new file mode 100644
index 000000000000..681dca501f9b
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "drm_edid.h"
+#include "sde_kms.h"
+#include "sde_connector.h"
+#include "sde_hdmi.h"
+#include "hdmi.h"
+
+struct sde_hdmi_bridge {
+ struct drm_bridge base;
+ struct hdmi *hdmi;
+};
+#define to_hdmi_bridge(x) container_of(x, struct sde_hdmi_bridge, base)
+
+/* for AVI program */
+#define HDMI_AVI_INFOFRAME_BUFFER_SIZE \
+ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE)
+#define HDMI_VS_INFOFRAME_BUFFER_SIZE (HDMI_INFOFRAME_HEADER_SIZE + 6)
+#define HDMI_SPD_INFOFRAME_BUFFER_SIZE \
+ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_SPD_INFOFRAME_SIZE)
+#define HDMI_DEFAULT_VENDOR_NAME "unknown"
+#define HDMI_DEFAULT_PRODUCT_NAME "msm"
+#define LEFT_SHIFT_BYTE(x) ((x) << 8)
+#define LEFT_SHIFT_WORD(x) ((x) << 16)
+#define LEFT_SHIFT_24BITS(x) ((x) << 24)
+#define HDMI_AVI_IFRAME_LINE_NUMBER 1
+#define HDMI_VENDOR_IFRAME_LINE_NUMBER 3
+
+void _sde_hdmi_bridge_destroy(struct drm_bridge *bridge)
+{
+}
+
+static void _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int i, ret;
+
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->pwr_regs[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ }
+ }
+
+ if (config->pwr_clk_cnt > 0) {
+ DRM_DEBUG("pixclock: %lu", hdmi->pixclock);
+ ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
+ if (ret) {
+ SDE_ERROR("failed to set pixel clk: %s (%d)\n",
+ config->pwr_clk_names[0], ret);
+ }
+ }
+
+ for (i = 0; i < config->pwr_clk_cnt; i++) {
+ ret = clk_prepare_enable(hdmi->pwr_clks[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable pwr clk: %s (%d)\n",
+ config->pwr_clk_names[i], ret);
+ }
+ }
+}
+
+static void _sde_hdmi_bridge_power_off(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int i, ret;
+
+ /* Wait for vsync */
+ msleep(20);
+
+ for (i = 0; i < config->pwr_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->pwr_clks[i]);
+
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->pwr_regs[i]);
+ if (ret) {
+ SDE_ERROR("failed to disable pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ }
+ }
+}
+
+static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+
+ DRM_DEBUG("power up");
+
+ if (!hdmi->power_on) {
+ _sde_hdmi_bridge_power_on(bridge);
+ hdmi->power_on = true;
+ }
+
+ if (phy)
+ phy->funcs->powerup(phy, hdmi->pixclock);
+
+ sde_hdmi_set_mode(hdmi, true);
+
+ if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
+ hdmi_hdcp_ctrl_on(hdmi->hdcp_ctrl);
+
+ sde_hdmi_ack_state(hdmi->connector, EXT_DISPLAY_CABLE_CONNECT);
+}
+
+static void sde_hdmi_force_update_audio(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+ if (display && display->non_pluggable) {
+ display->ext_audio_data.intf_ops.hpd(display->ext_pdev,
+ display->ext_audio_data.type,
+ status,
+ MSM_EXT_DISP_HPD_AUDIO);
+ }
+}
+
+static void _sde_hdmi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+
+ /* force update audio ops when there's no HPD event */
+ sde_hdmi_force_update_audio(hdmi->connector,
+ EXT_DISPLAY_CABLE_CONNECT);
+}
+
+static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+
+ /* force update audio ops when there's no HPD event */
+ sde_hdmi_force_update_audio(hdmi->connector,
+ EXT_DISPLAY_CABLE_DISCONNECT);
+}
+
+static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+
+ if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
+ hdmi_hdcp_ctrl_off(hdmi->hdcp_ctrl);
+
+ sde_hdmi_audio_off(hdmi);
+
+ DRM_DEBUG("power down");
+ sde_hdmi_set_mode(hdmi, false);
+
+ if (phy)
+ phy->funcs->powerdown(phy);
+
+ if (hdmi->power_on) {
+ _sde_hdmi_bridge_power_off(bridge);
+ hdmi->power_on = false;
+ }
+
+ sde_hdmi_ack_state(hdmi->connector, EXT_DISPLAY_CABLE_DISCONNECT);
+}
+
+static void _sde_hdmi_bridge_set_avi_infoframe(struct hdmi *hdmi,
+ const struct drm_display_mode *mode)
+{
+ u8 avi_iframe[HDMI_AVI_INFOFRAME_BUFFER_SIZE] = {0};
+ u8 *avi_frame = &avi_iframe[HDMI_INFOFRAME_HEADER_SIZE];
+ u8 checksum;
+ u32 reg_val;
+ struct hdmi_avi_infoframe info;
+
+ drm_hdmi_avi_infoframe_from_display_mode(&info, mode);
+ hdmi_avi_infoframe_pack(&info, avi_iframe, sizeof(avi_iframe));
+ checksum = avi_iframe[HDMI_INFOFRAME_HEADER_SIZE - 1];
+
+ reg_val = checksum |
+ LEFT_SHIFT_BYTE(avi_frame[0]) |
+ LEFT_SHIFT_WORD(avi_frame[1]) |
+ LEFT_SHIFT_24BITS(avi_frame[2]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(0), reg_val);
+
+ reg_val = avi_frame[3] |
+ LEFT_SHIFT_BYTE(avi_frame[4]) |
+ LEFT_SHIFT_WORD(avi_frame[5]) |
+ LEFT_SHIFT_24BITS(avi_frame[6]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(1), reg_val);
+
+ reg_val = avi_frame[7] |
+ LEFT_SHIFT_BYTE(avi_frame[8]) |
+ LEFT_SHIFT_WORD(avi_frame[9]) |
+ LEFT_SHIFT_24BITS(avi_frame[10]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(2), reg_val);
+
+ reg_val = avi_frame[11] |
+ LEFT_SHIFT_BYTE(avi_frame[12]) |
+ LEFT_SHIFT_24BITS(avi_iframe[1]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(3), reg_val);
+
+ /* AVI InfFrame enable (every frame) */
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
+ hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0) | BIT(1) | BIT(0));
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ reg_val &= ~0x3F;
+ reg_val |= HDMI_AVI_IFRAME_LINE_NUMBER;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, reg_val);
+}
+
+static void _sde_hdmi_bridge_set_vs_infoframe(struct hdmi *hdmi,
+ const struct drm_display_mode *mode)
+{
+ u8 vs_iframe[HDMI_VS_INFOFRAME_BUFFER_SIZE] = {0};
+ u32 reg_val;
+ struct hdmi_vendor_infoframe info;
+ int rc = 0;
+
+ rc = drm_hdmi_vendor_infoframe_from_display_mode(&info, mode);
+ if (rc < 0) {
+ SDE_DEBUG("don't send vendor infoframe\n");
+ return;
+ }
+ hdmi_vendor_infoframe_pack(&info, vs_iframe, sizeof(vs_iframe));
+
+ reg_val = (info.s3d_struct << 24) | (info.vic << 16) |
+ (vs_iframe[3] << 8) | (vs_iframe[7] << 5) |
+ vs_iframe[2];
+ hdmi_write(hdmi, REG_HDMI_VENSPEC_INFO0, reg_val);
+
+ /* vendor specific info-frame enable (every frame) */
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
+ hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0) | BIT(13) | BIT(12));
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ reg_val &= ~0x3F000000;
+ reg_val |= (HDMI_VENDOR_IFRAME_LINE_NUMBER << 24);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, reg_val);
+}
+
+static void _sde_hdmi_bridge_set_spd_infoframe(struct hdmi *hdmi,
+ const struct drm_display_mode *mode)
+{
+ u8 spd_iframe[HDMI_SPD_INFOFRAME_BUFFER_SIZE] = {0};
+ u32 packet_payload, packet_control, packet_header;
+ struct hdmi_spd_infoframe info;
+ int i;
+
+ /* Need to query vendor and product name from platform setup */
+ hdmi_spd_infoframe_init(&info, HDMI_DEFAULT_VENDOR_NAME,
+ HDMI_DEFAULT_PRODUCT_NAME);
+ hdmi_spd_infoframe_pack(&info, spd_iframe, sizeof(spd_iframe));
+
+ packet_header = spd_iframe[0]
+ | LEFT_SHIFT_BYTE(spd_iframe[1] & 0x7f)
+ | LEFT_SHIFT_WORD(spd_iframe[2] & 0x7f);
+ hdmi_write(hdmi, REG_HDMI_GENERIC1_HDR, packet_header);
+
+ for (i = 0; i < MAX_REG_HDMI_GENERIC1_INDEX; i++) {
+ packet_payload = spd_iframe[3 + i * 4]
+ | LEFT_SHIFT_BYTE(spd_iframe[4 + i * 4] & 0x7f)
+ | LEFT_SHIFT_WORD(spd_iframe[5 + i * 4] & 0x7f)
+ | LEFT_SHIFT_24BITS(spd_iframe[6 + i * 4] & 0x7f);
+ hdmi_write(hdmi, REG_HDMI_GENERIC1(i), packet_payload);
+ }
+
+ packet_payload = (spd_iframe[27] & 0x7f)
+ | LEFT_SHIFT_BYTE(spd_iframe[28] & 0x7f);
+ hdmi_write(hdmi, REG_HDMI_GENERIC1(MAX_REG_HDMI_GENERIC1_INDEX),
+ packet_payload);
+
+ /*
+ * GENERIC1_LINE | GENERIC1_CONT | GENERIC1_SEND
+ * Setup HDMI TX generic packet control
+ * Enable this packet to transmit every frame
+ * Enable HDMI TX engine to transmit Generic packet 1
+ */
+ packet_control = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ packet_control |= ((0x1 << 24) | (1 << 5) | (1 << 4));
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, packet_control);
+}
+
+static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ int hstart, hend, vstart, vend;
+ uint32_t frame_ctrl;
+
+ mode = adjusted_mode;
+
+ hdmi->pixclock = mode->clock * 1000;
+
+ hstart = mode->htotal - mode->hsync_start;
+ hend = mode->htotal - mode->hsync_start + mode->hdisplay;
+
+ vstart = mode->vtotal - mode->vsync_start - 1;
+ vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
+
+ DRM_DEBUG(
+ "htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
+ mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
+
+ hdmi_write(hdmi, REG_HDMI_TOTAL,
+ HDMI_TOTAL_H_TOTAL(mode->htotal - 1) |
+ HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
+
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
+ HDMI_ACTIVE_HSYNC_START(hstart) |
+ HDMI_ACTIVE_HSYNC_END(hend));
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC,
+ HDMI_ACTIVE_VSYNC_START(vstart) |
+ HDMI_ACTIVE_VSYNC_END(vend));
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) |
+ HDMI_VSYNC_ACTIVE_F2_END(vend + 1));
+ } else {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ HDMI_VSYNC_TOTAL_F2_V_TOTAL(0));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ HDMI_VSYNC_ACTIVE_F2_START(0) |
+ HDMI_VSYNC_ACTIVE_F2_END(0));
+ }
+
+ frame_ctrl = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
+ DRM_DEBUG("frame_ctrl=%08x\n", frame_ctrl);
+ hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
+
+ /*
+ * Setup info frame
+ * Current drm_edid driver doesn't have all CEA formats defined in
+ * latest CEA-861(CTA-861) spec. So, don't check if mode is CEA mode
+ * in here. Once core framework is updated, the check needs to be
+ * added back.
+ */
+ if (hdmi->hdmi_mode) {
+ _sde_hdmi_bridge_set_avi_infoframe(hdmi, mode);
+ _sde_hdmi_bridge_set_vs_infoframe(hdmi, mode);
+ _sde_hdmi_bridge_set_spd_infoframe(hdmi, mode);
+ DRM_DEBUG("hdmi setup info frame\n");
+ }
+}
+
+static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = {
+ .pre_enable = _sde_hdmi_bridge_pre_enable,
+ .enable = _sde_hdmi_bridge_enable,
+ .disable = _sde_hdmi_bridge_disable,
+ .post_disable = _sde_hdmi_bridge_post_disable,
+ .mode_set = _sde_hdmi_bridge_mode_set,
+};
+
+
+/* initialize bridge */
+struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi)
+{
+ struct drm_bridge *bridge = NULL;
+ struct sde_hdmi_bridge *sde_hdmi_bridge;
+ int ret;
+
+ sde_hdmi_bridge = devm_kzalloc(hdmi->dev->dev,
+ sizeof(*sde_hdmi_bridge), GFP_KERNEL);
+ if (!sde_hdmi_bridge) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ sde_hdmi_bridge->hdmi = hdmi;
+
+ bridge = &sde_hdmi_bridge->base;
+ bridge->funcs = &_sde_hdmi_bridge_funcs;
+
+ ret = drm_bridge_attach(hdmi->dev, bridge);
+ if (ret)
+ goto fail;
+
+ return bridge;
+
+fail:
+ if (bridge)
+ _sde_hdmi_bridge_destroy(bridge);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_edid.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_edid.c
new file mode 100644
index 000000000000..57c79e2aa812
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_edid.c
@@ -0,0 +1,227 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm_edid.h>
+
+#include "sde_kms.h"
+#include "sde_hdmi.h"
+
+/* TODO: copy from drm_edid.c and mdss_hdmi_edid.c. remove if using ELD */
+#define DBC_START_OFFSET 4
+#define EDID_DTD_LEN 18
+
+enum data_block_types {
+ RESERVED,
+ AUDIO_DATA_BLOCK,
+ VIDEO_DATA_BLOCK,
+ VENDOR_SPECIFIC_DATA_BLOCK,
+ SPEAKER_ALLOCATION_DATA_BLOCK,
+ VESA_DTC_DATA_BLOCK,
+ RESERVED2,
+ USE_EXTENDED_TAG
+};
+
+static u8 *_sde_hdmi_edid_find_cea_extension(struct edid *edid)
+{
+ u8 *edid_ext = NULL;
+ int i;
+
+ /* No EDID or EDID extensions */
+ if (edid == NULL || edid->extensions == 0)
+ return NULL;
+
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == CEA_EXT)
+ break;
+ }
+
+ if (i == edid->extensions)
+ return NULL;
+
+ return edid_ext;
+}
+
+static const u8 *_sde_hdmi_edid_find_block(const u8 *in_buf, u32 start_offset,
+ u8 type, u8 *len)
+{
+ /* the start of data block collection, start of Video Data Block */
+ u32 offset = start_offset;
+ u32 dbc_offset = in_buf[2];
+
+ /*
+ * * edid buffer 1, byte 2 being 4 means no non-DTD/Data block
+ * collection present.
+ * * edid buffer 1, byte 2 being 0 means no non-DTD/DATA block
+ * collection present and no DTD data present.
+ */
+ if ((dbc_offset == 0) || (dbc_offset == 4)) {
+ SDE_ERROR("EDID: no DTD or non-DTD data present\n");
+ return NULL;
+ }
+
+ while (offset < dbc_offset) {
+ u8 block_len = in_buf[offset] & 0x1F;
+
+ if ((offset + block_len <= dbc_offset) &&
+ (in_buf[offset] >> 5) == type) {
+ *len = block_len;
+ SDE_DEBUG("EDID: block=%d found @ 0x%x w/ len=%d\n",
+ type, offset, block_len);
+
+ return in_buf + offset;
+ }
+ offset += 1 + block_len;
+ }
+
+ return NULL;
+}
+
+static void _sde_hdmi_extract_audio_data_blocks(
+ struct hdmi_edid_ctrl *edid_ctrl)
+{
+ u8 len = 0;
+ u8 adb_max = 0;
+ const u8 *adb = NULL;
+ u32 offset = DBC_START_OFFSET;
+ u8 *cea = NULL;
+
+ if (!edid_ctrl) {
+ SDE_ERROR("invalid edid_ctrl\n");
+ return;
+ }
+
+ cea = _sde_hdmi_edid_find_cea_extension(edid_ctrl->edid);
+ if (!cea) {
+ SDE_DEBUG("CEA extension not found\n");
+ return;
+ }
+
+ edid_ctrl->adb_size = 0;
+
+ memset(edid_ctrl->audio_data_block, 0,
+ sizeof(edid_ctrl->audio_data_block));
+
+ do {
+ len = 0;
+ adb = _sde_hdmi_edid_find_block(cea, offset, AUDIO_DATA_BLOCK,
+ &len);
+
+ if ((adb == NULL) || (len > MAX_AUDIO_DATA_BLOCK_SIZE ||
+ adb_max >= MAX_NUMBER_ADB)) {
+ if (!edid_ctrl->adb_size) {
+ SDE_DEBUG("No/Invalid Audio Data Block\n");
+ return;
+ }
+
+ continue;
+ }
+
+ memcpy(edid_ctrl->audio_data_block + edid_ctrl->adb_size,
+ adb + 1, len);
+ offset = (adb - cea) + 1 + len;
+
+ edid_ctrl->adb_size += len;
+ adb_max++;
+ } while (adb);
+
+}
+
+static void _sde_hdmi_extract_speaker_allocation_data(
+ struct hdmi_edid_ctrl *edid_ctrl)
+{
+ u8 len;
+ const u8 *sadb = NULL;
+ u8 *cea = NULL;
+
+ if (!edid_ctrl) {
+ SDE_ERROR("invalid edid_ctrl\n");
+ return;
+ }
+
+ cea = _sde_hdmi_edid_find_cea_extension(edid_ctrl->edid);
+ if (!cea) {
+ SDE_DEBUG("CEA extension not found\n");
+ return;
+ }
+
+ sadb = _sde_hdmi_edid_find_block(cea, DBC_START_OFFSET,
+ SPEAKER_ALLOCATION_DATA_BLOCK, &len);
+ if ((sadb == NULL) || (len != MAX_SPKR_ALLOC_DATA_BLOCK_SIZE)) {
+ SDE_DEBUG("No/Invalid Speaker Allocation Data Block\n");
+ return;
+ }
+
+ memcpy(edid_ctrl->spkr_alloc_data_block, sadb + 1, len);
+ edid_ctrl->sadb_size = len;
+
+ SDE_DEBUG("EDID: speaker alloc data SP byte = %08x %s%s%s%s%s%s%s\n",
+ sadb[1],
+ (sadb[1] & BIT(0)) ? "FL/FR," : "",
+ (sadb[1] & BIT(1)) ? "LFE," : "",
+ (sadb[1] & BIT(2)) ? "FC," : "",
+ (sadb[1] & BIT(3)) ? "RL/RR," : "",
+ (sadb[1] & BIT(4)) ? "RC," : "",
+ (sadb[1] & BIT(5)) ? "FLC/FRC," : "",
+ (sadb[1] & BIT(6)) ? "RLC/RRC," : "");
+}
+
+int sde_hdmi_edid_init(struct sde_hdmi *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ SDE_ERROR("[%s]Invalid params\n", display->name);
+ return -EINVAL;
+ }
+
+ memset(&display->edid, 0, sizeof(display->edid));
+
+ return rc;
+}
+
+int sde_hdmi_free_edid(struct sde_hdmi *display)
+{
+ struct hdmi_edid_ctrl *edid_ctrl = &display->edid;
+
+ kfree(edid_ctrl->edid);
+ edid_ctrl->edid = NULL;
+
+ return 0;
+}
+
+int sde_hdmi_edid_deinit(struct sde_hdmi *display)
+{
+ return sde_hdmi_free_edid(display);
+}
+
+void sde_hdmi_get_edid(struct drm_connector *connector,
+ struct sde_hdmi *display)
+{
+ u32 hdmi_ctrl;
+ struct hdmi_edid_ctrl *edid_ctrl = &display->edid;
+ struct hdmi *hdmi = display->ctrl.ctrl;
+
+ /* Read EDID */
+ hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
+ edid_ctrl->edid = drm_get_edid(connector, hdmi->i2c);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+
+ if (edid_ctrl->edid) {
+ hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid_ctrl->edid);
+
+ _sde_hdmi_extract_audio_data_blocks(edid_ctrl);
+ _sde_hdmi_extract_speaker_allocation_data(edid_ctrl);
+ }
+};
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h
new file mode 100644
index 000000000000..f1bff0f08051
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h
@@ -0,0 +1,300 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HDMI_REGS_H
+#define _SDE_HDMI_REGS_H
+
+/* HDMI_TX Registers */
+#define HDMI_CTRL (0x00000000)
+#define HDMI_TEST_PATTERN (0x00000010)
+#define HDMI_RANDOM_PATTERN (0x00000014)
+#define HDMI_PKT_BLK_CTRL (0x00000018)
+#define HDMI_STATUS (0x0000001C)
+#define HDMI_AUDIO_PKT_CTRL (0x00000020)
+#define HDMI_ACR_PKT_CTRL (0x00000024)
+#define HDMI_VBI_PKT_CTRL (0x00000028)
+#define HDMI_INFOFRAME_CTRL0 (0x0000002C)
+#define HDMI_INFOFRAME_CTRL1 (0x00000030)
+#define HDMI_GEN_PKT_CTRL (0x00000034)
+#define HDMI_ACP (0x0000003C)
+#define HDMI_GC (0x00000040)
+#define HDMI_AUDIO_PKT_CTRL2 (0x00000044)
+#define HDMI_ISRC1_0 (0x00000048)
+#define HDMI_ISRC1_1 (0x0000004C)
+#define HDMI_ISRC1_2 (0x00000050)
+#define HDMI_ISRC1_3 (0x00000054)
+#define HDMI_ISRC1_4 (0x00000058)
+#define HDMI_ISRC2_0 (0x0000005C)
+#define HDMI_ISRC2_1 (0x00000060)
+#define HDMI_ISRC2_2 (0x00000064)
+#define HDMI_ISRC2_3 (0x00000068)
+#define HDMI_AVI_INFO0 (0x0000006C)
+#define HDMI_AVI_INFO1 (0x00000070)
+#define HDMI_AVI_INFO2 (0x00000074)
+#define HDMI_AVI_INFO3 (0x00000078)
+#define HDMI_MPEG_INFO0 (0x0000007C)
+#define HDMI_MPEG_INFO1 (0x00000080)
+#define HDMI_GENERIC0_HDR (0x00000084)
+#define HDMI_GENERIC0_0 (0x00000088)
+#define HDMI_GENERIC0_1 (0x0000008C)
+#define HDMI_GENERIC0_2 (0x00000090)
+#define HDMI_GENERIC0_3 (0x00000094)
+#define HDMI_GENERIC0_4 (0x00000098)
+#define HDMI_GENERIC0_5 (0x0000009C)
+#define HDMI_GENERIC0_6 (0x000000A0)
+#define HDMI_GENERIC1_HDR (0x000000A4)
+#define HDMI_GENERIC1_0 (0x000000A8)
+#define HDMI_GENERIC1_1 (0x000000AC)
+#define HDMI_GENERIC1_2 (0x000000B0)
+#define HDMI_GENERIC1_3 (0x000000B4)
+#define HDMI_GENERIC1_4 (0x000000B8)
+#define HDMI_GENERIC1_5 (0x000000BC)
+#define HDMI_GENERIC1_6 (0x000000C0)
+#define HDMI_ACR_32_0 (0x000000C4)
+#define HDMI_ACR_32_1 (0x000000C8)
+#define HDMI_ACR_44_0 (0x000000CC)
+#define HDMI_ACR_44_1 (0x000000D0)
+#define HDMI_ACR_48_0 (0x000000D4)
+#define HDMI_ACR_48_1 (0x000000D8)
+#define HDMI_ACR_STATUS_0 (0x000000DC)
+#define HDMI_ACR_STATUS_1 (0x000000E0)
+#define HDMI_AUDIO_INFO0 (0x000000E4)
+#define HDMI_AUDIO_INFO1 (0x000000E8)
+#define HDMI_CS_60958_0 (0x000000EC)
+#define HDMI_CS_60958_1 (0x000000F0)
+#define HDMI_RAMP_CTRL0 (0x000000F8)
+#define HDMI_RAMP_CTRL1 (0x000000FC)
+#define HDMI_RAMP_CTRL2 (0x00000100)
+#define HDMI_RAMP_CTRL3 (0x00000104)
+#define HDMI_CS_60958_2 (0x00000108)
+#define HDMI_HDCP_CTRL2 (0x0000010C)
+#define HDMI_HDCP_CTRL (0x00000110)
+#define HDMI_HDCP_DEBUG_CTRL (0x00000114)
+#define HDMI_HDCP_INT_CTRL (0x00000118)
+#define HDMI_HDCP_LINK0_STATUS (0x0000011C)
+#define HDMI_HDCP_DDC_CTRL_0 (0x00000120)
+#define HDMI_HDCP_DDC_CTRL_1 (0x00000124)
+#define HDMI_HDCP_DDC_STATUS (0x00000128)
+#define HDMI_HDCP_ENTROPY_CTRL0 (0x0000012C)
+#define HDMI_HDCP_RESET (0x00000130)
+#define HDMI_HDCP_RCVPORT_DATA0 (0x00000134)
+#define HDMI_HDCP_RCVPORT_DATA1 (0x00000138)
+#define HDMI_HDCP_RCVPORT_DATA2_0 (0x0000013C)
+#define HDMI_HDCP_RCVPORT_DATA2_1 (0x00000140)
+#define HDMI_HDCP_RCVPORT_DATA3 (0x00000144)
+#define HDMI_HDCP_RCVPORT_DATA4 (0x00000148)
+#define HDMI_HDCP_RCVPORT_DATA5 (0x0000014C)
+#define HDMI_HDCP_RCVPORT_DATA6 (0x00000150)
+#define HDMI_HDCP_RCVPORT_DATA7 (0x00000154)
+#define HDMI_HDCP_RCVPORT_DATA8 (0x00000158)
+#define HDMI_HDCP_RCVPORT_DATA9 (0x0000015C)
+#define HDMI_HDCP_RCVPORT_DATA10 (0x00000160)
+#define HDMI_HDCP_RCVPORT_DATA11 (0x00000164)
+#define HDMI_HDCP_RCVPORT_DATA12 (0x00000168)
+#define HDMI_VENSPEC_INFO0 (0x0000016C)
+#define HDMI_VENSPEC_INFO1 (0x00000170)
+#define HDMI_VENSPEC_INFO2 (0x00000174)
+#define HDMI_VENSPEC_INFO3 (0x00000178)
+#define HDMI_VENSPEC_INFO4 (0x0000017C)
+#define HDMI_VENSPEC_INFO5 (0x00000180)
+#define HDMI_VENSPEC_INFO6 (0x00000184)
+#define HDMI_HDCP_DEBUG (0x00000194)
+#define HDMI_TMDS_CTRL_CHAR (0x0000019C)
+#define HDMI_TMDS_CTRL_SEL (0x000001A4)
+#define HDMI_TMDS_SYNCCHAR01 (0x000001A8)
+#define HDMI_TMDS_SYNCCHAR23 (0x000001AC)
+#define HDMI_TMDS_DEBUG (0x000001B4)
+#define HDMI_TMDS_CTL_BITS (0x000001B8)
+#define HDMI_TMDS_DCBAL_CTRL (0x000001BC)
+#define HDMI_TMDS_DCBAL_CHAR (0x000001C0)
+#define HDMI_TMDS_CTL01_GEN (0x000001C8)
+#define HDMI_TMDS_CTL23_GEN (0x000001CC)
+#define HDMI_AUDIO_CFG (0x000001D0)
+#define HDMI_DEBUG (0x00000204)
+#define HDMI_USEC_REFTIMER (0x00000208)
+#define HDMI_DDC_CTRL (0x0000020C)
+#define HDMI_DDC_ARBITRATION (0x00000210)
+#define HDMI_DDC_INT_CTRL (0x00000214)
+#define HDMI_DDC_SW_STATUS (0x00000218)
+#define HDMI_DDC_HW_STATUS (0x0000021C)
+#define HDMI_DDC_SPEED (0x00000220)
+#define HDMI_DDC_SETUP (0x00000224)
+#define HDMI_DDC_TRANS0 (0x00000228)
+#define HDMI_DDC_TRANS1 (0x0000022C)
+#define HDMI_DDC_TRANS2 (0x00000230)
+#define HDMI_DDC_TRANS3 (0x00000234)
+#define HDMI_DDC_DATA (0x00000238)
+#define HDMI_HDCP_SHA_CTRL (0x0000023C)
+#define HDMI_HDCP_SHA_STATUS (0x00000240)
+#define HDMI_HDCP_SHA_DATA (0x00000244)
+#define HDMI_HDCP_SHA_DBG_M0_0 (0x00000248)
+#define HDMI_HDCP_SHA_DBG_M0_1 (0x0000024C)
+#define HDMI_HPD_INT_STATUS (0x00000250)
+#define HDMI_HPD_INT_CTRL (0x00000254)
+#define HDMI_HPD_CTRL (0x00000258)
+#define HDMI_HDCP_ENTROPY_CTRL1 (0x0000025C)
+#define HDMI_HDCP_SW_UPPER_AN (0x00000260)
+#define HDMI_HDCP_SW_LOWER_AN (0x00000264)
+#define HDMI_CRC_CTRL (0x00000268)
+#define HDMI_VID_CRC (0x0000026C)
+#define HDMI_AUD_CRC (0x00000270)
+#define HDMI_VBI_CRC (0x00000274)
+#define HDMI_DDC_REF (0x0000027C)
+#define HDMI_HDCP_SW_UPPER_AKSV (0x00000284)
+#define HDMI_HDCP_SW_LOWER_AKSV (0x00000288)
+#define HDMI_CEC_CTRL (0x0000028C)
+#define HDMI_CEC_WR_DATA (0x00000290)
+#define HDMI_CEC_RETRANSMIT (0x00000294)
+#define HDMI_CEC_STATUS (0x00000298)
+#define HDMI_CEC_INT (0x0000029C)
+#define HDMI_CEC_ADDR (0x000002A0)
+#define HDMI_CEC_TIME (0x000002A4)
+#define HDMI_CEC_REFTIMER (0x000002A8)
+#define HDMI_CEC_RD_DATA (0x000002AC)
+#define HDMI_CEC_RD_FILTER (0x000002B0)
+#define HDMI_ACTIVE_H (0x000002B4)
+#define HDMI_ACTIVE_V (0x000002B8)
+#define HDMI_ACTIVE_V_F2 (0x000002BC)
+#define HDMI_TOTAL (0x000002C0)
+#define HDMI_V_TOTAL_F2 (0x000002C4)
+#define HDMI_FRAME_CTRL (0x000002C8)
+#define HDMI_AUD_INT (0x000002CC)
+#define HDMI_DEBUG_BUS_CTRL (0x000002D0)
+#define HDMI_PHY_CTRL (0x000002D4)
+#define HDMI_CEC_WR_RANGE (0x000002DC)
+#define HDMI_CEC_RD_RANGE (0x000002E0)
+#define HDMI_VERSION (0x000002E4)
+#define HDMI_BIST_ENABLE (0x000002F4)
+#define HDMI_TIMING_ENGINE_EN (0x000002F8)
+#define HDMI_INTF_CONFIG (0x000002FC)
+#define HDMI_HSYNC_CTL (0x00000300)
+#define HDMI_VSYNC_PERIOD_F0 (0x00000304)
+#define HDMI_VSYNC_PERIOD_F1 (0x00000308)
+#define HDMI_VSYNC_PULSE_WIDTH_F0 (0x0000030C)
+#define HDMI_VSYNC_PULSE_WIDTH_F1 (0x00000310)
+#define HDMI_DISPLAY_V_START_F0 (0x00000314)
+#define HDMI_DISPLAY_V_START_F1 (0x00000318)
+#define HDMI_DISPLAY_V_END_F0 (0x0000031C)
+#define HDMI_DISPLAY_V_END_F1 (0x00000320)
+#define HDMI_ACTIVE_V_START_F0 (0x00000324)
+#define HDMI_ACTIVE_V_START_F1 (0x00000328)
+#define HDMI_ACTIVE_V_END_F0 (0x0000032C)
+#define HDMI_ACTIVE_V_END_F1 (0x00000330)
+#define HDMI_DISPLAY_HCTL (0x00000334)
+#define HDMI_ACTIVE_HCTL (0x00000338)
+#define HDMI_HSYNC_SKEW (0x0000033C)
+#define HDMI_POLARITY_CTL (0x00000340)
+#define HDMI_TPG_MAIN_CONTROL (0x00000344)
+#define HDMI_TPG_VIDEO_CONFIG (0x00000348)
+#define HDMI_TPG_COMPONENT_LIMITS (0x0000034C)
+#define HDMI_TPG_RECTANGLE (0x00000350)
+#define HDMI_TPG_INITIAL_VALUE (0x00000354)
+#define HDMI_TPG_BLK_WHT_PATTERN_FRAMES (0x00000358)
+#define HDMI_TPG_RGB_MAPPING (0x0000035C)
+#define HDMI_CEC_COMPL_CTL (0x00000360)
+#define HDMI_CEC_RD_START_RANGE (0x00000364)
+#define HDMI_CEC_RD_TOTAL_RANGE (0x00000368)
+#define HDMI_CEC_RD_ERR_RESP_LO (0x0000036C)
+#define HDMI_CEC_WR_CHECK_CONFIG (0x00000370)
+#define HDMI_INTERNAL_TIMING_MODE (0x00000374)
+#define HDMI_CTRL_SW_RESET (0x00000378)
+#define HDMI_CTRL_AUDIO_RESET (0x0000037C)
+#define HDMI_SCRATCH (0x00000380)
+#define HDMI_CLK_CTRL (0x00000384)
+#define HDMI_CLK_ACTIVE (0x00000388)
+#define HDMI_VBI_CFG (0x0000038C)
+#define HDMI_DDC_INT_CTRL0 (0x00000430)
+#define HDMI_DDC_INT_CTRL1 (0x00000434)
+#define HDMI_DDC_INT_CTRL2 (0x00000438)
+#define HDMI_DDC_INT_CTRL3 (0x0000043C)
+#define HDMI_DDC_INT_CTRL4 (0x00000440)
+#define HDMI_DDC_INT_CTRL5 (0x00000444)
+#define HDMI_HDCP2P2_DDC_CTRL (0x0000044C)
+#define HDMI_HDCP2P2_DDC_TIMER_CTRL (0x00000450)
+#define HDMI_HDCP2P2_DDC_TIMER_CTRL2 (0x00000454)
+#define HDMI_HDCP2P2_DDC_STATUS (0x00000458)
+#define HDMI_SCRAMBLER_STATUS_DDC_CTRL (0x00000464)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL (0x00000468)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2 (0x0000046C)
+#define HDMI_SCRAMBLER_STATUS_DDC_STATUS (0x00000470)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS (0x00000474)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS2 (0x00000478)
+#define HDMI_HW_DDC_CTRL (0x000004CC)
+#define HDMI_HDCP2P2_DDC_SW_TRIGGER (0x000004D0)
+#define HDMI_HDCP_STATUS (0x00000500)
+#define HDMI_HDCP_INT_CTRL2 (0x00000504)
+
+/* HDMI PHY Registers */
+#define HDMI_PHY_ANA_CFG0 (0x00000000)
+#define HDMI_PHY_ANA_CFG1 (0x00000004)
+#define HDMI_PHY_PD_CTRL0 (0x00000010)
+#define HDMI_PHY_PD_CTRL1 (0x00000014)
+#define HDMI_PHY_BIST_CFG0 (0x00000034)
+#define HDMI_PHY_BIST_PATN0 (0x0000003C)
+#define HDMI_PHY_BIST_PATN1 (0x00000040)
+#define HDMI_PHY_BIST_PATN2 (0x00000044)
+#define HDMI_PHY_BIST_PATN3 (0x00000048)
+
+/* QFPROM Registers for HDMI/HDCP */
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB (0x000000F8)
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB (0x000000FC)
+#define QFPROM_RAW_VERSION_4 (0x000000A8)
+#define SEC_CTRL_HW_VERSION (0x00006000)
+#define HDCP_KSV_LSB (0x000060D8)
+#define HDCP_KSV_MSB (0x000060DC)
+#define HDCP_KSV_VERSION_4_OFFSET (0x00000014)
+
+/* SEC_CTRL version that supports HDCP SEL */
+#define HDCP_SEL_MIN_SEC_VERSION (0x50010000)
+
+#define LPASS_LPAIF_RDDMA_CTL0 (0xFE152000)
+#define LPASS_LPAIF_RDDMA_PER_CNT0 (0x00000014)
+
+/* TX major version that supports scrambling */
+#define HDMI_TX_SCRAMBLER_MIN_TX_VERSION 0x04
+
+/* TX major versions */
+#define HDMI_TX_VERSION_4 4
+#define HDMI_TX_VERSION_3 3
+
+/* HDMI SCDC register offsets */
+#define HDMI_SCDC_UPDATE_0 0x10
+#define HDMI_SCDC_UPDATE_1 0x11
+#define HDMI_SCDC_TMDS_CONFIG 0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
+#define HDMI_SCDC_CONFIG_0 0x30
+#define HDMI_SCDC_STATUS_FLAGS_0 0x40
+#define HDMI_SCDC_STATUS_FLAGS_1 0x41
+#define HDMI_SCDC_ERR_DET_0_L 0x50
+#define HDMI_SCDC_ERR_DET_0_H 0x51
+#define HDMI_SCDC_ERR_DET_1_L 0x52
+#define HDMI_SCDC_ERR_DET_1_H 0x53
+#define HDMI_SCDC_ERR_DET_2_L 0x54
+#define HDMI_SCDC_ERR_DET_2_H 0x55
+#define HDMI_SCDC_ERR_DET_CHECKSUM 0x56
+
+/* HDCP secure registers directly accessible to HLOS since HDMI controller
+ * version major version 4.0
+ */
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x00000004)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x00000008)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x0000000C)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x00000010)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x00000014)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x00000018)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x0000001C)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x00000020)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_CTRL (0x00000024)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_DATA (0x00000028)
+
+#endif /* _SDE_HDMI_REGS_H */
+
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index ba5921149ac3..7915562057d6 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -234,6 +234,11 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
hdmi->hdcp_ctrl = NULL;
}
+ /*making it false currently to avoid ifdefs
+ *will get rid of this flag when HDCP SW
+ *support gets added to HDMI DRM driver
+ */
+ hdmi->is_hdcp_supported = false;
return hdmi;
@@ -389,7 +394,34 @@ static struct hdmi_platform_config hdmi_tx_8996_config = {
.hpd_freq = hpd_clk_freq_8x74,
};
+/*TO DO*/
+static const char *pwr_reg_names_8x98[] = {"core-vdda", "core-vcc"};
+/*TO DO*/
+static const char *hpd_reg_names_8x98[] = {"hpd-gdsc", "hpd-5v"};
+
+static const char *pwr_clk_names_8x98[] = {"core_extp_clk",
+ "hpd_alt_iface_clk"};
+
+static const char *hpd_clk_names_8x98[] = {"hpd_iface_clk",
+ "hpd_core_clk",
+ "hpd_mdp_core_clk",
+ "mnoc_clk",
+ "hpd_misc_ahb_clk",
+ "hpd_bus_clk"};
+
+static unsigned long hpd_clk_freq_8x98[] = {0, 19200000, 0, 0, 0, 0};
+
+static struct hdmi_platform_config hdmi_tx_8998_config = {
+ .phy_init = NULL,
+ HDMI_CFG(pwr_reg, 8x98),
+ HDMI_CFG(hpd_reg, 8x98),
+ HDMI_CFG(pwr_clk, 8x98),
+ HDMI_CFG(hpd_clk, 8x98),
+ .hpd_freq = hpd_clk_freq_8x98,
+};
+
static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8998_config },
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
@@ -425,7 +457,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
#ifdef CONFIG_OF
struct device_node *of_node = dev->of_node;
const struct of_device_id *match;
-
match = of_match_node(dt_match, of_node);
if (match && match->data) {
hdmi_cfg = (struct hdmi_platform_config *)match->data;
@@ -443,12 +474,13 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi_cfg->mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
-
+ hdmi_cfg->hpd5v_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd5v");
#else
static struct hdmi_platform_config config = {};
static const char *hpd_clk_names[] = {
"core_clk", "master_iface_clk", "slave_iface_clk",
};
+
if (cpu_is_apq8064()) {
static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index e22ddcd51248..9ce8ff513210 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -70,7 +70,7 @@ struct hdmi {
struct drm_encoder *encoder;
bool hdmi_mode; /* are we in hdmi mode? */
-
+ bool is_hdcp_supported;
int irq;
struct workqueue_struct *workq;
@@ -110,7 +110,9 @@ struct hdmi_platform_config {
int pwr_clk_cnt;
/* gpio's: */
- int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
+ int ddc_clk_gpio, ddc_data_gpio;
+ int hpd_gpio, mux_en_gpio;
+ int mux_sel_gpio, hpd5v_gpio;
int mux_lpm_gpio;
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 10c45700aefe..aef20f76bf02 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -110,6 +111,8 @@ static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val)
#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040
#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080
+#define REG_HDMI_INFOFRAME_CTRL1 0x00000030
+
#define REG_HDMI_GEN_PKT_CTRL 0x00000034
#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001
#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002
@@ -149,6 +152,7 @@ static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*
#define REG_HDMI_GENERIC1_HDR 0x000000a4
+#define MAX_REG_HDMI_GENERIC1_INDEX 6
static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
static inline uint32_t REG_HDMI_ACR(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index dbd9cc4daf2e..6eab7d0cf6b5 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index d5d94575fa1b..6688e79cc88e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 6ac9aa165768..caad2be87ae4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -40,7 +40,7 @@ struct mdp4_crtc {
uint32_t x, y;
/* next cursor to scan-out: */
- uint32_t next_iova;
+ uint64_t next_iova;
struct drm_gem_object *next_bo;
/* current cursor being scanned out: */
@@ -133,7 +133,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
container_of(work, struct mdp4_crtc, unref_cursor_work);
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
- msm_gem_put_iova(val, mdp4_kms->id);
+ msm_gem_put_iova(val, mdp4_kms->aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -387,25 +387,28 @@ static void update_cursor(struct drm_crtc *crtc)
if (mdp4_crtc->cursor.stale) {
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
- uint32_t iova = mdp4_crtc->cursor.next_iova;
+ uint64_t iova = mdp4_crtc->cursor.next_iova;
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_reference(next_bo);
- msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
+ msm_gem_get_iova_locked(next_bo, mdp4_kms->aspace,
+ &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
+ /* FIXME: Make sure iova < 32 bits */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
+ lower_32_bits(iova));
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
} else {
/* disable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
- mdp4_kms->blank_cursor_iova);
+ lower_32_bits(mdp4_kms->blank_cursor_iova));
}
/* and drop the iova ref + obj rev when done scanning out: */
@@ -432,7 +435,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_gem_object *cursor_bo, *old_bo;
unsigned long flags;
- uint32_t iova;
+ uint64_t iova;
int ret;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -449,7 +452,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
}
if (cursor_bo) {
- ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
+ ret = msm_gem_get_iova(cursor_bo, mdp4_kms->aspace, &iova);
if (ret)
goto fail;
} else {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index f0f66ac4b2fb..b6cddee0cf34 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -17,6 +17,7 @@
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp4_kms.h"
@@ -177,18 +178,35 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
unsigned i;
+ struct msm_gem_address_space *aspace = mdp4_kms->aspace;
for (i = 0; i < priv->num_crtcs; i++)
mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
+
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_destroy(aspace);
+ }
}
static void mdp4_destroy(struct msm_kms *kms)
{
+ struct device *dev = mdp4_kms->dev->dev;
+ struct msm_gem_address_space *aspace = mdp4_kms->aspace;
+
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
if (mdp4_kms->blank_cursor_iova)
- msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+ msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace);
if (mdp4_kms->blank_cursor_bo)
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu,
+ iommu_ports, ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_put(aspace);
+ }
+
kfree(mdp4_kms);
}
@@ -408,7 +426,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
struct mdp4_platform_config *config = mdp4_get_config(pdev);
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
int ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
@@ -497,26 +515,30 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->iommu);
+ struct msm_mmu *mmu = msm_iommu_new(&pdev->dev, config->iommu);
+
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
+
+ aspace = msm_gem_address_space_create(&pdev->dev,
+ mmu, "mdp4", 0x1000, 0xffffffff);
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
+ goto fail;
+ }
+
+ mdp4_kms->aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret)
goto fail;
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
- mmu = NULL;
- }
-
- mdp4_kms->id = msm_register_mmu(dev, mmu);
- if (mdp4_kms->id < 0) {
- ret = mdp4_kms->id;
- dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
- goto fail;
+ aspace = NULL;
}
ret = modeset_init(mdp4_kms);
@@ -535,7 +557,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+ ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace,
&mdp4_kms->blank_cursor_iova);
if (ret) {
dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
@@ -562,6 +584,7 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
/* TODO */
config.max_clk = 266667000;
config.iommu = iommu_domain_alloc(msm_iommu_get_bus(&dev->dev));
+
#else
if (cpu_is_apq8064())
config.max_clk = 266667000;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 8a7f6e1e2bca..5cf03e58a4f4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -33,8 +33,6 @@ struct mdp4_kms {
int rev;
/* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
-
void __iomem *mmio;
struct regulator *dsi_pll_vdda;
@@ -45,12 +43,13 @@ struct mdp4_kms {
struct clk *pclk;
struct clk *lut_clk;
struct clk *axi_clk;
+ struct msm_gem_address_space *aspace;
struct mdp_irq error_handler;
/* empty/blank cursor bo to use when cursor is "disabled" */
struct drm_gem_object *blank_cursor_bo;
- uint32_t blank_cursor_iova;
+ uint64_t blank_cursor_iova;
};
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 30d57e74c42f..bc1ece2a5b7e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -109,7 +109,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
return 0;
DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp4_kms->id);
+ return msm_framebuffer_prepare(fb, mdp4_kms->aspace);
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
@@ -123,7 +123,7 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
return;
DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp4_kms->id);
+ msm_framebuffer_cleanup(fb, mdp4_kms->aspace);
}
@@ -172,13 +172,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 0));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 0));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 1));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 1));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 2));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 3));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 3));
plane->fb = fb;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index c37da9c61e29..b275ce11b24b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index bb1225aa2f75..89305ad3cde2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -547,7 +547,7 @@ fail:
if (cfg_handler)
mdp5_cfg_destroy(cfg_handler);
- return NULL;
+ return ERR_PTR(ret);
}
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 7f9f4ac88029..422a9a78f3b2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -171,7 +171,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
container_of(work, struct mdp5_crtc, unref_cursor_work);
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
- msm_gem_put_iova(val, mdp5_kms->id);
+ msm_gem_put_iova(val, mdp5_kms->aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -509,7 +509,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_gem_object *cursor_bo, *old_bo = NULL;
- uint32_t blendcfg, cursor_addr, stride;
+ uint32_t blendcfg, stride;
+ uint64_t cursor_addr;
int ret, bpp, lm;
unsigned int depth;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
@@ -536,7 +537,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
+ ret = msm_gem_get_iova(cursor_bo, mdp5_kms->aspace, &cursor_addr);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index f7aebf5516ce..e4e69ebd116e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -18,6 +18,7 @@
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp5_kms.h"
@@ -130,13 +131,13 @@ static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
static void mdp5_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct msm_mmu *mmu = mdp5_kms->mmu;
+ struct msm_gem_address_space *aspace = mdp5_kms->aspace;
mdp5_irq_domain_fini(mdp5_kms);
- if (mmu) {
- mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
- mmu->funcs->destroy(mmu);
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu);
+ msm_gem_address_space_put(aspace);
}
if (mdp5_kms->ctlm)
@@ -474,7 +475,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
struct mdp5_cfg *config;
struct mdp5_kms *mdp5_kms;
struct msm_kms *kms = NULL;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
uint32_t major, minor;
int i, ret;
@@ -595,34 +596,34 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16);
if (config->platform.iommu) {
- mmu = msm_smmu_new(&pdev->dev,
+ struct msm_mmu *mmu = msm_smmu_new(&pdev->dev,
MSM_SMMU_DOMAIN_UNSECURE);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
dev_err(dev->dev, "failed to init iommu: %d\n", ret);
iommu_domain_free(config->platform.iommu);
+ }
+
+ aspace = msm_gem_smmu_address_space_create(&pdev->dev,
+ mmu, "mdp5");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
+ mdp5_kms->aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
ARRAY_SIZE(iommu_ports));
if (ret) {
- dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
- mmu->funcs->destroy(mmu);
+ dev_err(&pdev->dev, "failed to attach iommu: %d\n",
+ ret);
goto fail;
}
} else {
- dev_info(dev->dev, "no iommu, fallback to phys "
- "contig buffers for scanout\n");
- mmu = NULL;
- }
- mdp5_kms->mmu = mmu;
-
- mdp5_kms->id = msm_register_mmu(dev, mmu);
- if (mdp5_kms->id < 0) {
- ret = mdp5_kms->id;
- dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
- goto fail;
+ dev_info(&pdev->dev,
+ "no iommu, fallback to phys contig buffers for scanout\n");
+ aspace = NULL;
}
ret = modeset_init(mdp5_kms);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 84f65d415598..c1aa86d21416 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -36,8 +36,7 @@ struct mdp5_kms {
/* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
struct mdp5_smp *smp;
struct mdp5_ctl_manager *ctlm;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 81cd49045ffc..873ab11d34d2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -260,7 +260,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
return 0;
DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp5_kms->id);
+ return msm_framebuffer_prepare(fb, mdp5_kms->aspace);
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
@@ -274,7 +274,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
return;
DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp5_kms->id);
+ msm_framebuffer_cleanup(fb, mdp5_kms->aspace);
}
static int mdp5_plane_atomic_check(struct drm_plane *plane,
@@ -400,13 +400,13 @@ static void set_scanout_locked(struct drm_plane *plane,
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 0));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 0));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 1));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 1));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 2));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 3));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 3));
plane->fb = fb;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 0aec1ac1f6d0..452e3518f98b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 5d04b0384215..f821a81c53a6 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -23,6 +23,8 @@
#include "sde_wb.h"
#define TEARDOWN_DEADLOCK_RETRY_MAX 5
+#include "msm_gem.h"
+#include "msm_mmu.h"
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
@@ -38,42 +40,6 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.atomic_commit = msm_atomic_commit,
};
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
-{
- struct msm_drm_private *priv = dev->dev_private;
- int idx = priv->num_mmus++;
-
- if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
- return -EINVAL;
-
- priv->mmus[idx] = mmu;
-
- return idx;
-}
-
-void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu)
-{
- struct msm_drm_private *priv = dev->dev_private;
- int idx;
-
- if (priv->num_mmus <= 0) {
- dev_err(dev->dev, "invalid num mmus %d\n", priv->num_mmus);
- return;
- }
-
- idx = priv->num_mmus - 1;
-
- /* only support reverse-order deallocation */
- if (priv->mmus[idx] != mmu) {
- dev_err(dev->dev, "unexpected mmu at idx %d\n", idx);
- return;
- }
-
- --priv->num_mmus;
- priv->mmus[idx] = 0;
-}
-
-
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
static bool reglog = false;
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -588,29 +554,65 @@ static void load_gpu(struct drm_device *dev)
}
#endif
-static int msm_open(struct drm_device *dev, struct drm_file *file)
+static struct msm_file_private *setup_pagetable(struct msm_drm_private *priv)
{
struct msm_file_private *ctx;
+ if (!priv || !priv->gpu)
+ return NULL;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->aspace = msm_gem_address_space_create_instance(
+ priv->gpu->aspace->mmu, "gpu", 0x100000000, 0x1ffffffff);
+
+ if (IS_ERR(ctx->aspace)) {
+ int ret = PTR_ERR(ctx->aspace);
+
+ /*
+ * If dynamic domains are not supported, everybody uses the
+ * same pagetable
+ */
+ if (ret != -EOPNOTSUPP) {
+ kfree(ctx);
+ return ERR_PTR(ret);
+ }
+
+ ctx->aspace = priv->gpu->aspace;
+ }
+
+ ctx->aspace->mmu->funcs->attach(ctx->aspace->mmu, NULL, 0);
+ return ctx;
+}
+
+static int msm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_file_private *ctx = NULL;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+
+ if (!dev || !dev->dev_private)
+ return -ENODEV;
+
+ priv = dev->dev_private;
/* For now, load gpu on open.. to avoid the requirement of having
* firmware in the initrd.
*/
load_gpu(dev);
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = setup_pagetable(priv);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
file->driver_priv = ctx;
- if (dev && dev->dev_private) {
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_kms *kms;
+ kms = priv->kms;
+
+ if (kms && kms->funcs && kms->funcs->postopen)
+ kms->funcs->postopen(kms, file);
- kms = priv->kms;
- if (kms && kms->funcs && kms->funcs->postopen)
- kms->funcs->postopen(kms, file);
- }
return 0;
}
@@ -633,8 +635,10 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
kms->funcs->postclose(kms, file);
mutex_lock(&dev->struct_mutex);
- if (ctx == priv->lastctx)
- priv->lastctx = NULL;
+ if (ctx && ctx->aspace && ctx->aspace != priv->gpu->aspace) {
+ ctx->aspace->mmu->funcs->detach(ctx->aspace->mmu);
+ msm_gem_address_space_put(ctx->aspace);
+ }
mutex_unlock(&dev->struct_mutex);
kfree(ctx);
@@ -833,6 +837,13 @@ static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
return 0;
}
+static int msm_snapshot_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ return msm_snapshot_write(priv->gpu, m);
+}
+
static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -897,11 +908,22 @@ static int show_locked(struct seq_file *m, void *arg)
return ret;
}
+static int show_unlocked(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ int (*show)(struct drm_device *dev, struct seq_file *m) =
+ node->info_ent->data;
+
+ return show(dev, m);
+}
+
static struct drm_info_list msm_debugfs_list[] = {
{"gpu", show_locked, 0, msm_gpu_show},
{"gem", show_locked, 0, msm_gem_show},
{ "mm", show_locked, 0, msm_mm_show },
{ "fb", show_locked, 0, msm_fb_show },
+ { "snapshot", show_unlocked, 0, msm_snapshot_show },
};
static int late_init_minor(struct drm_minor *minor)
@@ -975,14 +997,23 @@ int msm_wait_fence(struct drm_device *dev, uint32_t fence,
ktime_t *timeout , bool interruptible)
{
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int index = FENCE_RING(fence);
+ uint32_t submitted;
int ret;
- if (!priv->gpu)
- return 0;
+ if (!gpu)
+ return -ENXIO;
+
+ if (index > MSM_GPU_MAX_RINGS || index >= gpu->nr_rings ||
+ !gpu->rb[index])
+ return -EINVAL;
+
+ submitted = gpu->funcs->submitted_fence(gpu, gpu->rb[index]);
- if (fence > priv->gpu->submitted_fence) {
+ if (fence > submitted) {
DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
- fence, priv->gpu->submitted_fence);
+ fence, submitted);
return -EINVAL;
}
@@ -1012,7 +1043,7 @@ int msm_wait_fence(struct drm_device *dev, uint32_t fence,
if (ret == 0) {
DBG("timeout waiting for fence: %u (completed: %u)",
- fence, priv->completed_fence);
+ fence, priv->completed_fence[index]);
ret = -ETIMEDOUT;
} else if (ret != -ERESTARTSYS) {
ret = 0;
@@ -1026,12 +1057,13 @@ int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
+ int index = FENCE_RING(fence);
int ret = 0;
mutex_lock(&dev->struct_mutex);
if (!list_empty(&cb->work.entry)) {
ret = -EINVAL;
- } else if (fence > priv->completed_fence) {
+ } else if (fence > priv->completed_fence[index]) {
cb->fence = fence;
list_add_tail(&cb->work.entry, &priv->fence_cbs);
} else {
@@ -1046,21 +1078,21 @@ int msm_queue_fence_cb(struct drm_device *dev,
void msm_update_fence(struct drm_device *dev, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_fence_cb *cb, *tmp;
+ int index = FENCE_RING(fence);
- mutex_lock(&dev->struct_mutex);
- priv->completed_fence = max(fence, priv->completed_fence);
-
- while (!list_empty(&priv->fence_cbs)) {
- struct msm_fence_cb *cb;
-
- cb = list_first_entry(&priv->fence_cbs,
- struct msm_fence_cb, work.entry);
+ if (index >= MSM_GPU_MAX_RINGS)
+ return;
- if (cb->fence > priv->completed_fence)
- break;
+ mutex_lock(&dev->struct_mutex);
+ priv->completed_fence[index] = max(fence, priv->completed_fence[index]);
- list_del_init(&cb->work.entry);
- queue_work(priv->wq, &cb->work);
+ list_for_each_entry_safe(cb, tmp, &priv->fence_cbs, work.entry) {
+ if (COMPARE_FENCE_LTE(cb->fence,
+ priv->completed_fence[index])) {
+ list_del_init(&cb->work.entry);
+ queue_work(priv->wq, &cb->work);
+ }
}
mutex_unlock(&dev->struct_mutex);
@@ -1165,16 +1197,28 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
{
struct drm_msm_gem_info *args = data;
struct drm_gem_object *obj;
+ struct msm_file_private *ctx = file->driver_priv;
int ret = 0;
- if (args->pad)
+ if (args->flags & ~MSM_INFO_FLAGS)
+ return -EINVAL;
+
+ if (!ctx || !ctx->aspace)
return -EINVAL;
obj = drm_gem_object_lookup(dev, file, args->handle);
if (!obj)
return -ENOENT;
- args->offset = msm_gem_mmap_offset(obj);
+ if (args->flags & MSM_INFO_IOVA) {
+ uint64_t iova;
+
+ ret = msm_gem_get_iova(obj, ctx->aspace, &iova);
+ if (!ret)
+ args->offset = iova;
+ } else {
+ args->offset = msm_gem_mmap_offset(obj);
+ }
drm_gem_object_unreference_unlocked(obj);
@@ -1185,13 +1229,24 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_wait_fence *args = data;
- ktime_t timeout = to_ktime(args->timeout);
+ ktime_t timeout;
+
if (args->pad) {
DRM_ERROR("invalid pad: %08x\n", args->pad);
return -EINVAL;
}
+ /*
+ * Special case - if the user passes a timeout of 0.0 just return the
+ * current fence status (0 for retired, -EBUSY for active) with no
+ * accompanying kernel logs. This can be a poor man's way of
+ * determining the status of a fence.
+ */
+ if (args->timeout.tv_sec == 0 && args->timeout.tv_nsec == 0)
+ return msm_wait_fence(dev, args->fence, NULL, true);
+
+ timeout = to_ktime(args->timeout);
return msm_wait_fence(dev, args->fence, &timeout, true);
}
@@ -1777,7 +1832,13 @@ static int msm_pdev_probe(struct platform_device *pdev)
component_match_add(&pdev->dev, &match, compare_dev, dev);
}
#endif
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+ /* on all devices that I am aware of, iommu's which cna map
+ * any address the cpu can see are used:
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
+ if (ret)
+ return ret;
+
ret = msm_add_master_component(&pdev->dev, match);
return ret;
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index a2678882d57a..d8a4c34e9be0 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -32,7 +32,8 @@
#include <linux/iommu.h>
#include <linux/types.h>
#include <linux/of_graph.h>
-#include <linux/mdss_io_util.h>
+#include <linux/of_device.h>
+#include <linux/sde_io_util.h>
#include <asm/sizes.h>
#include <linux/kthread.h>
@@ -63,6 +64,8 @@ struct msm_mmu;
struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
+struct msm_gem_address_space;
+struct msm_gem_vma;
#define NUM_DOMAINS 4 /* one for KMS, then one per gpu core (?) */
#define MAX_CRTCS 8
@@ -72,11 +75,7 @@ struct msm_gem_submit;
#define MAX_CONNECTORS 8
struct msm_file_private {
- /* currently we don't do anything useful with this.. but when
- * per-context address spaces are supported we'd keep track of
- * the context's page-tables here.
- */
- int dummy;
+ struct msm_gem_address_space *aspace;
};
enum msm_mdp_plane_property {
@@ -248,6 +247,8 @@ struct msm_drm_commit {
struct kthread_worker worker;
};
+#define MSM_GPU_MAX_RINGS 4
+
struct msm_drm_private {
struct msm_kms *kms;
@@ -274,11 +275,12 @@ struct msm_drm_private {
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
- struct msm_file_private *lastctx;
struct drm_fb_helper *fbdev;
- uint32_t next_fence, completed_fence;
+ uint32_t next_fence[MSM_GPU_MAX_RINGS];
+ uint32_t completed_fence[MSM_GPU_MAX_RINGS];
+
wait_queue_head_t fence_event;
struct msm_rd_state *rd;
@@ -296,9 +298,13 @@ struct msm_drm_private {
uint32_t pending_crtcs;
wait_queue_head_t pending_crtcs_event;
- /* registered MMUs: */
- unsigned int num_mmus;
- struct msm_mmu *mmus[NUM_DOMAINS];
+ /* Registered address spaces.. currently this is fixed per # of
+ * iommu's. Ie. one for display block and one for gpu block.
+ * Eventually, to do per-process gpu pagetables, we'll want one
+ * of these per-process.
+ */
+ unsigned int num_aspaces;
+ struct msm_gem_address_space *aspace[NUM_DOMAINS];
unsigned int num_planes;
struct drm_plane *planes[MAX_PLANES];
@@ -345,6 +351,31 @@ struct msm_format {
uint32_t pixel_format;
};
+/*
+ * Some GPU targets can support multiple ringbuffers and preempt between them.
+ * In order to do this without massive API changes we will steal two bits from
+ * the top of the fence and use them to identify the ringbuffer, (0x00000001 for
+ * riug 0, 0x40000001 for ring 1, 0x50000001 for ring 2, etc). If you are going
+ * to do a fence comparision you have to make sure you are only comparing
+ * against fences from the same ring, but since fences within a ringbuffer are
+ * still contigious you can still use straight comparisons (i.e 0x40000001 is
+ * older than 0x40000002). Mathmatically there will be 0x3FFFFFFF timestamps
+ * per ring or ~103 days of 120 interrupts per second (two interrupts per frame
+ * at 60 FPS).
+ */
+#define FENCE_RING(_fence) ((_fence >> 30) & 3)
+#define FENCE(_ring, _fence) ((((_ring) & 3) << 30) | ((_fence) & 0x3FFFFFFF))
+
+static inline bool COMPARE_FENCE_LTE(uint32_t a, uint32_t b)
+{
+ return ((FENCE_RING(a) == FENCE_RING(b)) && a <= b);
+}
+
+static inline bool COMPARE_FENCE_LT(uint32_t a, uint32_t b)
+{
+ return ((FENCE_RING(a) == FENCE_RING(b)) && a < b);
+}
+
/* callback from wq once fence has passed: */
struct msm_fence_cb {
struct work_struct work;
@@ -362,15 +393,34 @@ void __msm_fence_worker(struct work_struct *work);
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async);
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-
int msm_wait_fence(struct drm_device *dev, uint32_t fence,
ktime_t *timeout, bool interruptible);
int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence);
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv);
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, unsigned int flags);
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
+
+/* For GPU and legacy display */
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ const char *name);
+struct msm_gem_address_space *
+msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
+ uint64_t start, uint64_t end);
+
+/* For SDE display */
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+ const char *name);
+
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -379,13 +429,16 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova);
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -416,9 +469,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
uint32_t size, struct sg_table *sgt);
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
@@ -502,7 +558,8 @@ u32 msm_readl(const void __iomem *addr);
static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
- return priv->completed_fence >= fence;
+
+ return priv->completed_fence[FENCE_RING(fence)] >= fence;
}
static inline int align_pitch(int width, int bpp)
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index dca4de382581..a3f0392c2f88 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -92,15 +92,16 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
* should be fine, since only the scanout (mdpN) side of things needs
* this, the gpu doesn't care about fb's.
*/
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
- uint32_t iova;
+ uint64_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
- DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
+ ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
+ DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
}
@@ -108,21 +109,30 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
return 0;
}
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = drm_format_num_planes(fb->pixel_format);
for (i = 0; i < n; i++)
- msm_gem_put_iova(msm_fb->planes[i], id);
+ msm_gem_put_iova(msm_fb->planes[i], aspace);
}
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+/* FIXME: Leave this as a uint32_t and just return the lower 32 bits? */
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ uint64_t iova;
+
if (!msm_fb->planes[plane])
return 0;
- return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane];
+
+ iova = msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
+
+ /* FIXME: Make sure it is < 32 bits */
+ return lower_32_bits(iova);
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 3f6ec077b51d..cf127250c0d0 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -85,7 +85,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb = NULL;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
- uint32_t paddr;
+ uint64_t paddr;
int ret, size;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
@@ -160,11 +160,12 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
- dev->mode_config.fb_base = paddr;
+ /* FIXME: Verify paddr < 32 bits? */
+ dev->mode_config.fb_base = lower_32_bits(paddr);
fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
fbi->screen_size = fbdev->bo->size;
- fbi->fix.smem_start = paddr;
+ fbi->fix.smem_start = lower_32_bits(paddr);
fbi->fix.smem_len = fbdev->bo->size;
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 351985327214..63128d11767e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -24,6 +24,11 @@
#include "msm_gpu.h"
#include "msm_mmu.h"
+static void *get_dmabuf_ptr(struct drm_gem_object *obj)
+{
+ return (obj && obj->import_attach) ? obj->import_attach->dmabuf : NULL;
+}
+
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -268,6 +273,67 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
return offset;
}
+static void obj_remove_domain(struct msm_gem_vma *domain)
+{
+ if (domain) {
+ list_del(&domain->list);
+ kfree(domain);
+ }
+}
+
+static void
+put_iova(struct drm_gem_object *obj)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain, *tmp;
+
+ WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+
+ list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
+ if (iommu_present(&platform_bus_type)) {
+ msm_gem_unmap_vma(domain->aspace, domain,
+ msm_obj->sgt, get_dmabuf_ptr(obj));
+ }
+
+ obj_remove_domain(domain);
+ }
+}
+
+static struct msm_gem_vma *obj_add_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ domain->aspace = aspace;
+
+ list_add_tail(&domain->list, &msm_obj->domains);
+
+ return domain;
+}
+
+static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
+
+ list_for_each_entry(domain, &msm_obj->domains, list) {
+ if (domain->aspace == aspace)
+ return domain;
+ }
+
+ return NULL;
+}
+
+#ifndef IOMMU_PRIV
+#define IOMMU_PRIV 0
+#endif
+
/* should be called under struct_mutex.. although it can be called
* from atomic context without struct_mutex to acquire an extra
* iova ref if you know one is already held.
@@ -275,69 +341,64 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
* That means when I do eventually need to add support for unpinning
* the refcnt counter needs to be atomic_t.
*/
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova)
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+ struct msm_gem_vma *domain;
int ret = 0;
- if (!msm_obj->domain[id].iova) {
- struct msm_drm_private *priv = obj->dev->dev_private;
- struct page **pages = get_pages(obj);
+ if (!iommu_present(&platform_bus_type)) {
+ pages = get_pages(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
- if (iommu_present(&platform_bus_type)) {
- struct msm_mmu *mmu = priv->mmus[id];
-
- if (WARN_ON(!mmu))
- return -EINVAL;
-
- if (obj->import_attach && mmu->funcs->map_dma_buf) {
- ret = mmu->funcs->map_dma_buf(mmu, msm_obj->sgt,
- obj->import_attach->dmabuf,
- DMA_BIDIRECTIONAL);
- if (ret) {
- DRM_ERROR("Unable to map dma buf\n");
- return ret;
- }
- } else {
- ret = mmu->funcs->map_sg(mmu, msm_obj->sgt,
- DMA_BIDIRECTIONAL);
- }
-
- if (!ret)
- msm_obj->domain[id].iova =
- sg_dma_address(msm_obj->sgt->sgl);
- } else {
- WARN_ONCE(1, "physical address being used\n");
- msm_obj->domain[id].iova = physaddr(obj);
+ *iova = (uint64_t) physaddr(obj);
+ return 0;
+ }
+
+ domain = obj_get_domain(obj, aspace);
+
+ if (!domain) {
+ domain = obj_add_domain(obj, aspace);
+ if (IS_ERR(domain))
+ return PTR_ERR(domain);
+
+ pages = get_pages(obj);
+ if (IS_ERR(pages)) {
+ obj_remove_domain(domain);
+ return PTR_ERR(pages);
}
+
+ ret = msm_gem_map_vma(aspace, domain, msm_obj->sgt,
+ get_dmabuf_ptr(obj), msm_obj->flags);
}
if (!ret)
- *iova = msm_obj->domain[id].iova;
+ *iova = domain->iova;
+ else
+ obj_remove_domain(domain);
return ret;
}
/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
int ret;
- /* this is safe right now because we don't unmap until the
- * bo is deleted:
- */
- if (msm_obj->domain[id].iova) {
- *iova = msm_obj->domain[id].iova;
+ domain = obj_get_domain(obj, aspace);
+ if (domain) {
+ *iova = domain->iova;
return 0;
}
mutex_lock(&obj->dev->struct_mutex);
- ret = msm_gem_get_iova_locked(obj, id, iova);
+ ret = msm_gem_get_iova_locked(obj, aspace, iova);
mutex_unlock(&obj->dev->struct_mutex);
return ret;
}
@@ -345,14 +406,18 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
/* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_iova()'.
*/
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!msm_obj->domain[id].iova);
- return msm_obj->domain[id].iova;
+ struct msm_gem_vma *domain = obj_get_domain(obj, aspace);
+
+ WARN_ON(!domain);
+
+ return domain ? domain->iova : 0;
}
-void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
// XXX TODO ..
// NOTE: probably don't need a _locked() version.. we wouldn't
@@ -486,14 +551,21 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
uint64_t off = drm_vma_node_start(&obj->vma_node);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n",
+ seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p\t",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
msm_obj->read_fence, msm_obj->write_fence,
obj->name, obj->refcount.refcount.counter,
- off, msm_obj->vaddr, obj->size);
+ off, msm_obj->vaddr);
+
+ /* FIXME: we need to print the address space here too */
+ list_for_each_entry(domain, &msm_obj->domains, list)
+ seq_printf(m, " %08llx", domain->iova);
+
+ seq_puts(m, "\n");
}
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@@ -517,9 +589,7 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
void msm_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int id;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
@@ -528,22 +598,12 @@ void msm_gem_free_object(struct drm_gem_object *obj)
list_del(&msm_obj->mm_list);
- for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- struct msm_mmu *mmu = priv->mmus[id];
- if (mmu && msm_obj->domain[id].iova) {
- if (obj->import_attach && mmu->funcs->unmap_dma_buf) {
- mmu->funcs->unmap_dma_buf(mmu, msm_obj->sgt,
- obj->import_attach->dmabuf,
- DMA_BIDIRECTIONAL);
- } else
- mmu->funcs->unmap_sg(mmu, msm_obj->sgt,
- DMA_BIDIRECTIONAL);
- }
- }
+ put_iova(obj);
if (obj->import_attach) {
if (msm_obj->vaddr)
- dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
+ dma_buf_vunmap(obj->import_attach->dmabuf,
+ msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
@@ -597,7 +657,6 @@ static int msm_gem_new_impl(struct drm_device *dev,
{
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_object *msm_obj;
- unsigned sz;
bool use_vram = false;
switch (flags & MSM_BO_CACHE_MASK) {
@@ -619,16 +678,16 @@ static int msm_gem_new_impl(struct drm_device *dev,
if (WARN_ON(use_vram && !priv->vram.size))
return -EINVAL;
- sz = sizeof(*msm_obj);
- if (use_vram)
- sz += sizeof(struct drm_mm_node);
-
- msm_obj = kzalloc(sz, GFP_KERNEL);
+ msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
if (!msm_obj)
return -ENOMEM;
- if (use_vram)
- msm_obj->vram_node = (void *)&msm_obj[1];
+ if (use_vram) {
+ struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base, 0);
+
+ if (!IS_ERR(domain))
+ msm_obj->vram_node = &domain->node;
+ }
msm_obj->flags = flags;
@@ -636,6 +695,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
reservation_object_init(msm_obj->resv);
INIT_LIST_HEAD(&msm_obj->submit_entry);
+ INIT_LIST_HEAD(&msm_obj->domains);
+
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
*obj = &msm_obj->base;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 2e4ae6b1c5d0..ac46c473791f 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -18,12 +18,38 @@
#ifndef __MSM_GEM_H__
#define __MSM_GEM_H__
+#include <linux/kref.h>
#include <linux/reservation.h>
#include "msm_drv.h"
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
+struct msm_gem_aspace_ops {
+ int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
+ struct sg_table *sgt, void *priv, unsigned int flags);
+
+ void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *,
+ struct sg_table *sgt, void *priv);
+
+ void (*destroy)(struct msm_gem_address_space *);
+};
+
+struct msm_gem_address_space {
+ const char *name;
+ struct msm_mmu *mmu;
+ const struct msm_gem_aspace_ops *ops;
+ struct kref kref;
+};
+
+struct msm_gem_vma {
+ /* Node used by the GPU address space, but not the SDE address space */
+ struct drm_mm_node node;
+ struct msm_gem_address_space *aspace;
+ uint64_t iova;
+ struct list_head list;
+};
+
struct msm_gem_object {
struct drm_gem_object base;
@@ -52,9 +78,7 @@ struct msm_gem_object {
struct sg_table *sgt;
void *vaddr;
- struct {
- dma_addr_t iova;
- } domain[NUM_DOMAINS];
+ struct list_head domains;
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
@@ -94,24 +118,25 @@ static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
*/
struct msm_gem_submit {
struct drm_device *dev;
- struct msm_gpu *gpu;
+ struct msm_gem_address_space *aspace;
struct list_head node; /* node in gpu submit_list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;
uint32_t fence;
+ int ring;
bool valid;
unsigned int nr_cmds;
unsigned int nr_bos;
struct {
uint32_t type;
uint32_t size; /* in dwords */
- uint32_t iova;
+ uint64_t iova;
uint32_t idx; /* cmdstream buffer idx in bos[] */
} cmd[MAX_CMDS];
struct {
uint32_t flags;
struct msm_gem_object *obj;
- uint32_t iova;
+ uint64_t iova;
} bos[0];
};
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 1847f83b1e33..0566cefaae81 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -34,7 +34,7 @@ static inline void __user *to_user_ptr(u64 address)
}
static struct msm_gem_submit *submit_create(struct drm_device *dev,
- struct msm_gpu *gpu, int nr)
+ struct msm_gem_address_space *aspace, int nr)
{
struct msm_gem_submit *submit;
int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
@@ -42,7 +42,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (submit) {
submit->dev = dev;
- submit->gpu = gpu;
+ submit->aspace = aspace;
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
@@ -90,7 +90,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
pagefault_disable();
}
- if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
+ if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
+ !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
goto out_unlock;
@@ -141,7 +142,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
- msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+ msm_gem_put_iova(&msm_obj->base, submit->aspace);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
@@ -162,7 +163,7 @@ retry:
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- uint32_t iova;
+ uint64_t iova;
if (slow_locked == i)
slow_locked = -1;
@@ -180,7 +181,7 @@ retry:
/* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
+ submit->aspace, &iova);
/* this would break the logic in the fail path.. there is no
* reason for this to happen, but just to be on the safe side
@@ -229,7 +230,7 @@ fail:
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
- struct msm_gem_object **obj, uint32_t *iova, bool *valid)
+ struct msm_gem_object **obj, uint64_t *iova, bool *valid)
{
if (idx >= submit->nr_bos) {
DRM_ERROR("invalid buffer index: %u (out of %u)\n",
@@ -275,7 +276,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
struct drm_msm_gem_submit_reloc submit_reloc;
void __user *userptr =
to_user_ptr(relocs + (i * sizeof(submit_reloc)));
- uint32_t iova, off;
+ uint64_t iova;
+ uint32_t off;
bool valid;
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
@@ -347,17 +349,19 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
*/
- if (args->pipe != MSM_PIPE_3D0)
+ if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
return -EINVAL;
gpu = priv->gpu;
+ if (!gpu)
+ return -ENXIO;
if (args->nr_cmds > MAX_CMDS)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
- submit = submit_create(dev, gpu, args->nr_bos);
+ submit = submit_create(dev, ctx->aspace, args->nr_bos);
if (!submit) {
ret = -ENOMEM;
goto out;
@@ -376,7 +380,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
void __user *userptr =
to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
struct msm_gem_object *msm_obj;
- uint32_t iova;
+ uint64_t iova;
ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
if (ret) {
@@ -408,8 +412,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
}
- if ((submit_cmd.size + submit_cmd.submit_offset) >=
- msm_obj->base.size) {
+ if (!(submit_cmd.size) ||
+ ((submit_cmd.size + submit_cmd.submit_offset) >
+ msm_obj->base.size)) {
DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
ret = -EINVAL;
goto out;
@@ -431,7 +436,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
- ret = msm_gpu_submit(gpu, submit, ctx);
+ /* Clamp the user submitted ring to the range of available rings */
+ submit->ring = clamp_t(uint32_t,
+ (args->flags & MSM_SUBMIT_RING_MASK) >> MSM_SUBMIT_RING_SHIFT,
+ 0, gpu->nr_rings - 1);
+
+ ret = msm_gpu_submit(gpu, submit);
args->fence = submit->fence;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
new file mode 100644
index 000000000000..7ca96831a9b3
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+static void
+msm_gem_address_space_destroy(struct kref *kref)
+{
+ struct msm_gem_address_space *aspace = container_of(kref,
+ struct msm_gem_address_space, kref);
+
+ if (aspace->ops->destroy)
+ aspace->ops->destroy(aspace);
+
+ kfree(aspace);
+}
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
+{
+ if (aspace)
+ kref_put(&aspace->kref, msm_gem_address_space_destroy);
+}
+
+/* SDE address space operations */
+static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv)
+{
+ struct dma_buf *buf = priv;
+
+ if (buf)
+ aspace->mmu->funcs->unmap_dma_buf(aspace->mmu,
+ sgt, buf, DMA_BIDIRECTIONAL);
+ else
+ aspace->mmu->funcs->unmap_sg(aspace->mmu, sgt,
+ DMA_BIDIRECTIONAL);
+
+ vma->iova = 0;
+
+ msm_gem_address_space_put(aspace);
+}
+
+
+static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, unsigned int flags)
+{
+ struct dma_buf *buf = priv;
+ int ret;
+
+ if (buf)
+ ret = aspace->mmu->funcs->map_dma_buf(aspace->mmu, sgt, buf,
+ DMA_BIDIRECTIONAL);
+ else
+ ret = aspace->mmu->funcs->map_sg(aspace->mmu, sgt,
+ DMA_BIDIRECTIONAL);
+
+ if (!ret)
+ vma->iova = sg_dma_address(sgt->sgl);
+
+ /* Get a reference to the aspace to keep it around */
+ kref_get(&aspace->kref);
+
+ return ret;
+}
+
+static const struct msm_gem_aspace_ops smmu_aspace_ops = {
+ .map = smmu_aspace_map_vma,
+ .unmap = smmu_aspace_unmap_vma,
+};
+
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+ const char *name)
+{
+ struct msm_gem_address_space *aspace;
+
+ if (!mmu)
+ return ERR_PTR(-EINVAL);
+
+ aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+ if (!aspace)
+ return ERR_PTR(-ENOMEM);
+
+ aspace->name = name;
+ aspace->mmu = mmu;
+ aspace->ops = &smmu_aspace_ops;
+
+ kref_init(&aspace->kref);
+
+ return aspace;
+}
+
+/* GPU address space operations */
+struct msm_iommu_aspace {
+ struct msm_gem_address_space base;
+ struct drm_mm mm;
+};
+
+#define to_iommu_aspace(aspace) \
+ ((struct msm_iommu_aspace *) \
+ container_of(aspace, struct msm_iommu_aspace, base))
+
+static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
+{
+ if (!vma->iova)
+ return;
+
+ if (aspace->mmu)
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt);
+
+ drm_mm_remove_node(&vma->node);
+
+ vma->iova = 0;
+
+ msm_gem_address_space_put(aspace);
+}
+
+static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, void *priv,
+ unsigned int flags)
+{
+ struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
+ size_t size = 0;
+ struct scatterlist *sg;
+ int ret, i;
+ int iommu_flags = IOMMU_READ;
+
+ if (!(flags & MSM_BO_GPU_READONLY))
+ iommu_flags |= IOMMU_WRITE;
+
+ if (flags & MSM_BO_PRIVILEGED)
+ iommu_flags |= IOMMU_PRIV;
+
+ if (WARN_ON(drm_mm_node_allocated(&vma->node)))
+ return 0;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ size += sg->length + sg->offset;
+
+ ret = drm_mm_insert_node(&local->mm, &vma->node, size >> PAGE_SHIFT,
+ 0, DRM_MM_SEARCH_DEFAULT);
+ if (ret)
+ return ret;
+
+ vma->iova = vma->node.start << PAGE_SHIFT;
+
+ if (aspace->mmu)
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+ iommu_flags);
+
+ /* Get a reference to the aspace to keep it around */
+ kref_get(&aspace->kref);
+
+ return ret;
+}
+
+static void iommu_aspace_destroy(struct msm_gem_address_space *aspace)
+{
+ struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
+
+ drm_mm_takedown(&local->mm);
+ aspace->mmu->funcs->destroy(aspace->mmu);
+}
+
+static const struct msm_gem_aspace_ops msm_iommu_aspace_ops = {
+ .map = iommu_aspace_map_vma,
+ .unmap = iommu_aspace_unmap_vma,
+ .destroy = iommu_aspace_destroy,
+};
+
+static struct msm_gem_address_space *
+msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
+ uint64_t start, uint64_t end)
+{
+ struct msm_iommu_aspace *local;
+
+ if (!mmu)
+ return ERR_PTR(-EINVAL);
+
+ local = kzalloc(sizeof(*local), GFP_KERNEL);
+ if (!local)
+ return ERR_PTR(-ENOMEM);
+
+ drm_mm_init(&local->mm, (start >> PAGE_SHIFT),
+ (end >> PAGE_SHIFT) - 1);
+
+ local->base.name = name;
+ local->base.mmu = mmu;
+ local->base.ops = &msm_iommu_aspace_ops;
+
+ kref_init(&local->base.kref);
+
+ return &local->base;
+}
+
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, unsigned int flags)
+{
+ if (aspace && aspace->ops->map)
+ return aspace->ops->map(aspace, vma, sgt, priv, flags);
+
+ return -EINVAL;
+}
+
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt, void *priv)
+{
+ if (aspace && aspace->ops->unmap)
+ aspace->ops->unmap(aspace, vma, sgt, priv);
+}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ const char *name)
+{
+ struct msm_mmu *mmu = msm_iommu_new(dev, domain);
+
+ if (IS_ERR(mmu))
+ return (struct msm_gem_address_space *) mmu;
+
+ return msm_gem_address_space_new(mmu, name,
+ domain->geometry.aperture_start,
+ domain->geometry.aperture_end);
+}
+
+/* Create a new dynamic instance */
+struct msm_gem_address_space *
+msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
+ uint64_t start, uint64_t end)
+{
+ struct msm_mmu *child = msm_iommu_new_dynamic(parent);
+
+ if (IS_ERR(child))
+ return (struct msm_gem_address_space *) child;
+
+ return msm_gem_address_space_new(child, name, start, end);
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 6b02ada6579a..3176f301e7a8 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -90,21 +90,20 @@ static int disable_pwrrail(struct msm_gpu *gpu)
static int enable_clk(struct msm_gpu *gpu)
{
- struct clk *rate_clk = NULL;
+ uint32_t rate = gpu->gpufreq[gpu->active_level];
int i;
- /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
- if (gpu->grp_clks[i]) {
- clk_prepare(gpu->grp_clks[i]);
- rate_clk = gpu->grp_clks[i];
- }
- }
+ if (gpu->core_clk)
+ clk_set_rate(gpu->core_clk, rate);
- if (rate_clk && gpu->fast_rate)
- clk_set_rate(rate_clk, gpu->fast_rate);
+ if (gpu->rbbmtimer_clk)
+ clk_set_rate(gpu->rbbmtimer_clk, 19200000);
+
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
+ clk_prepare(gpu->grp_clks[i]);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_enable(gpu->grp_clks[i]);
@@ -113,24 +112,23 @@ static int enable_clk(struct msm_gpu *gpu)
static int disable_clk(struct msm_gpu *gpu)
{
- struct clk *rate_clk = NULL;
+ uint32_t rate = gpu->gpufreq[gpu->nr_pwrlevels - 1];
int i;
- /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
- if (gpu->grp_clks[i]) {
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
clk_disable(gpu->grp_clks[i]);
- rate_clk = gpu->grp_clks[i];
- }
- }
- if (rate_clk && gpu->slow_rate)
- clk_set_rate(rate_clk, gpu->slow_rate);
-
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_unprepare(gpu->grp_clks[i]);
+ if (gpu->core_clk)
+ clk_set_rate(gpu->core_clk, rate);
+
+ if (gpu->rbbmtimer_clk)
+ clk_set_rate(gpu->rbbmtimer_clk, 0);
+
return 0;
}
@@ -138,8 +136,9 @@ static int enable_axi(struct msm_gpu *gpu)
{
if (gpu->ebi1_clk)
clk_prepare_enable(gpu->ebi1_clk);
- if (gpu->bus_freq)
- bs_set(gpu, gpu->bus_freq);
+
+ if (gpu->busfreq[gpu->active_level])
+ bs_set(gpu, gpu->busfreq[gpu->active_level]);
return 0;
}
@@ -147,7 +146,8 @@ static int disable_axi(struct msm_gpu *gpu)
{
if (gpu->ebi1_clk)
clk_disable_unprepare(gpu->ebi1_clk);
- if (gpu->bus_freq)
+
+ if (gpu->busfreq[gpu->active_level])
bs_set(gpu, 0);
return 0;
}
@@ -155,6 +155,8 @@ static int disable_axi(struct msm_gpu *gpu)
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
int ret;
DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
@@ -167,6 +169,8 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (WARN_ON(gpu->active_cnt <= 0))
return -EINVAL;
+ WARN_ON(pm_runtime_get_sync(&pdev->dev) < 0);
+
ret = enable_pwrrail(gpu);
if (ret)
return ret;
@@ -185,6 +189,8 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
int msm_gpu_pm_suspend(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
int ret;
DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
@@ -209,6 +215,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
if (ret)
return ret;
+ pm_runtime_put(&pdev->dev);
return 0;
}
@@ -277,17 +284,35 @@ static void recover_worker(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
if (msm_gpu_active(gpu)) {
struct msm_gem_submit *submit;
- uint32_t fence = gpu->funcs->last_fence(gpu);
-
- /* retire completed submits, plus the one that hung: */
- retire_submits(gpu, fence + 1);
+ struct msm_ringbuffer *ring;
+ int i;
inactive_cancel(gpu);
+
+ FOR_EACH_RING(gpu, ring, i) {
+ uint32_t fence;
+
+ if (!ring)
+ continue;
+
+ fence = gpu->funcs->last_fence(gpu, ring);
+
+ /*
+ * Retire the faulting command on the active ring and
+ * make sure the other rings are cleaned up
+ */
+ if (ring == gpu->funcs->active_ring(gpu))
+ retire_submits(gpu, fence + 1);
+ else
+ retire_submits(gpu, fence);
+ }
+
+ /* Recover the GPU */
gpu->funcs->recover(gpu);
- /* replay the remaining submits after the one that hung: */
+ /* replay the remaining submits for all rings: */
list_for_each_entry(submit, &gpu->submit_list, node) {
- gpu->funcs->submit(gpu, submit, NULL);
+ gpu->funcs->submit(gpu, submit);
}
}
mutex_unlock(&dev->struct_mutex);
@@ -307,25 +332,28 @@ static void hangcheck_handler(unsigned long data)
struct msm_gpu *gpu = (struct msm_gpu *)data;
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+ uint32_t fence = gpu->funcs->last_fence(gpu, ring);
+ uint32_t submitted = gpu->funcs->submitted_fence(gpu, ring);
- if (fence != gpu->hangcheck_fence) {
+ if (fence != gpu->hangcheck_fence[ring->id]) {
/* some progress has been made.. ya! */
- gpu->hangcheck_fence = fence;
- } else if (fence < gpu->submitted_fence) {
+ gpu->hangcheck_fence[ring->id] = fence;
+ } else if (fence < submitted) {
/* no progress and not done.. hung! */
- gpu->hangcheck_fence = fence;
- dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
- gpu->name);
+ gpu->hangcheck_fence[ring->id] = fence;
+ dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+ gpu->name, ring->id);
dev_err(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
dev_err(dev->dev, "%s: submitted fence: %u\n",
- gpu->name, gpu->submitted_fence);
+ gpu->name, submitted);
+
queue_work(priv->wq, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
- if (gpu->submitted_fence > gpu->hangcheck_fence)
+ if (submitted > gpu->hangcheck_fence[ring->id])
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
@@ -434,54 +462,66 @@ out:
static void retire_submits(struct msm_gpu *gpu, uint32_t fence)
{
struct drm_device *dev = gpu->dev;
+ struct msm_gem_submit *submit, *tmp;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- while (!list_empty(&gpu->submit_list)) {
- struct msm_gem_submit *submit;
-
- submit = list_first_entry(&gpu->submit_list,
- struct msm_gem_submit, node);
+ /*
+ * Find and retire all the submits in the same ring that are older than
+ * or equal to 'fence'
+ */
- if (submit->fence <= fence) {
+ list_for_each_entry_safe(submit, tmp, &gpu->submit_list, node) {
+ if (COMPARE_FENCE_LTE(submit->fence, fence)) {
list_del(&submit->node);
kfree(submit);
- } else {
- break;
}
}
}
-static void retire_worker(struct work_struct *work)
+static bool _fence_signaled(struct msm_gem_object *obj, uint32_t fence)
{
- struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
- struct drm_device *dev = gpu->dev;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ if (obj->write_fence & 0x3FFFFFFF)
+ return COMPARE_FENCE_LTE(obj->write_fence, fence);
- msm_update_fence(gpu->dev, fence);
+ return COMPARE_FENCE_LTE(obj->read_fence, fence);
+}
- mutex_lock(&dev->struct_mutex);
+static void _retire_ring(struct msm_gpu *gpu, uint32_t fence)
+{
+ struct msm_gem_object *obj, *tmp;
retire_submits(gpu, fence);
- while (!list_empty(&gpu->active_list)) {
- struct msm_gem_object *obj;
-
- obj = list_first_entry(&gpu->active_list,
- struct msm_gem_object, mm_list);
-
- if ((obj->read_fence <= fence) &&
- (obj->write_fence <= fence)) {
- /* move to inactive: */
+ list_for_each_entry_safe(obj, tmp, &gpu->active_list, mm_list) {
+ if (_fence_signaled(obj, fence)) {
msm_gem_move_to_inactive(&obj->base);
- msm_gem_put_iova(&obj->base, gpu->id);
+ msm_gem_put_iova(&obj->base, gpu->aspace);
drm_gem_object_unreference(&obj->base);
- } else {
- break;
}
}
+}
- mutex_unlock(&dev->struct_mutex);
+static void retire_worker(struct work_struct *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
+ struct drm_device *dev = gpu->dev;
+ struct msm_ringbuffer *ring;
+ int i;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ uint32_t fence;
+
+ if (!ring)
+ continue;
+
+ fence = gpu->funcs->last_fence(gpu, ring);
+ msm_update_fence(gpu->dev, fence);
+
+ mutex_lock(&dev->struct_mutex);
+ _retire_ring(gpu, fence);
+ mutex_unlock(&dev->struct_mutex);
+ }
if (!msm_gpu_active(gpu))
inactive_start(gpu);
@@ -496,18 +536,16 @@ void msm_gpu_retire(struct msm_gpu *gpu)
}
/* add bo's to gpu's ring, and kick gpu: */
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx)
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->rb[submit->ring];
int i, ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- submit->fence = ++priv->next_fence;
-
- gpu->submitted_fence = submit->fence;
+ submit->fence = FENCE(submit->ring, ++priv->next_fence[submit->ring]);
inactive_cancel(gpu);
@@ -515,7 +553,7 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_rd_dump_submit(submit);
- gpu->submitted_fence = submit->fence;
+ ring->submitted_fence = submit->fence;
update_sw_cntrs(gpu);
@@ -528,12 +566,12 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
if (!is_active(msm_obj)) {
- uint32_t iova;
+ uint64_t iova;
/* ring takes a reference to the bo and iova: */
drm_gem_object_reference(&msm_obj->base);
msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
+ submit->aspace, &iova);
}
if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
@@ -543,8 +581,7 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
}
- ret = gpu->funcs->submit(gpu, submit, ctx);
- priv->lastctx = ctx;
+ ret = gpu->funcs->submit(gpu, submit);
hangcheck_timer_reset(gpu);
@@ -561,17 +598,54 @@ static irqreturn_t irq_handler(int irq, void *data)
return gpu->funcs->irq(gpu);
}
-static const char *clk_names[] = {
- "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
- "alt_mem_iface_clk",
-};
+static struct clk *get_clock(struct device *dev, const char *name)
+{
+ struct clk *clk = devm_clk_get(dev, name);
+
+ DBG("clks[%s]: %p", name, clk);
+
+ return IS_ERR(clk) ? NULL : clk;
+}
+
+static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
+{
+ struct device *dev = &pdev->dev;
+ struct property *prop;
+ const char *name;
+ int i = 0;
+
+ gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
+ if (gpu->nr_clocks < 1) {
+ gpu->nr_clocks = 0;
+ return 0;
+ }
+
+ gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
+ GFP_KERNEL);
+ if (!gpu->grp_clks)
+ return -ENOMEM;
+
+ of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+ gpu->grp_clks[i] = get_clock(dev, name);
+
+ /* Remember the key clocks that we need to control later */
+ if (!strcmp(name, "core_clk"))
+ gpu->core_clk = gpu->grp_clks[i];
+ else if (!strcmp(name, "rbbmtimer_clk"))
+ gpu->rbbmtimer_clk = gpu->grp_clks[i];
+
+ ++i;
+ }
+
+ return 0;
+}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
- const char *name, const char *ioname, const char *irqname, int ringsz)
+ const char *name, struct msm_gpu_config *config)
{
struct iommu_domain *iommu;
- int i, ret;
+ int i, ret, nr_rings;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
@@ -595,17 +669,16 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
spin_lock_init(&gpu->perf_lock);
- BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
/* Map registers: */
- gpu->mmio = msm_ioremap(pdev, ioname, name);
+ gpu->mmio = msm_ioremap(pdev, config->ioname, name);
if (IS_ERR(gpu->mmio)) {
ret = PTR_ERR(gpu->mmio);
goto fail;
}
/* Get Interrupt: */
- gpu->irq = platform_get_irq_byname(pdev, irqname);
+ gpu->irq = platform_get_irq_byname(pdev, config->irqname);
if (gpu->irq < 0) {
ret = gpu->irq;
dev_err(drm->dev, "failed to get irq: %d\n", ret);
@@ -619,13 +692,11 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
- /* Acquire clocks: */
- for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
- gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
- DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
- if (IS_ERR(gpu->grp_clks[i]))
- gpu->grp_clks[i] = NULL;
- }
+ pm_runtime_enable(&pdev->dev);
+
+ ret = get_clocks(pdev, gpu);
+ if (ret)
+ goto fail;
gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
DBG("ebi1_clk: %p", gpu->ebi1_clk);
@@ -649,12 +720,17 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
*/
iommu = iommu_domain_alloc(&platform_bus_type);
if (iommu) {
+ /* TODO 32b vs 64b address space.. */
+ iommu->geometry.aperture_start = config->va_start;
+ iommu->geometry.aperture_end = config->va_end;
+
dev_info(drm->dev, "%s: using IOMMU\n", name);
- gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
- if (IS_ERR(gpu->mmu)) {
- ret = PTR_ERR(gpu->mmu);
+ gpu->aspace = msm_gem_address_space_create(&pdev->dev,
+ iommu, "gpu");
+ if (IS_ERR(gpu->aspace)) {
+ ret = PTR_ERR(gpu->aspace);
dev_err(drm->dev, "failed to init iommu: %d\n", ret);
- gpu->mmu = NULL;
+ gpu->aspace = NULL;
iommu_domain_free(iommu);
goto fail;
}
@@ -662,42 +738,80 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
- gpu->id = msm_register_mmu(drm, gpu->mmu);
+ nr_rings = config->nr_rings;
- /* Create ringbuffer: */
- mutex_lock(&drm->struct_mutex);
- gpu->rb = msm_ringbuffer_new(gpu, ringsz);
- mutex_unlock(&drm->struct_mutex);
- if (IS_ERR(gpu->rb)) {
- ret = PTR_ERR(gpu->rb);
- gpu->rb = NULL;
- dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
- goto fail;
+ if (nr_rings > ARRAY_SIZE(gpu->rb)) {
+ WARN(1, "Only creating %lu ringbuffers\n", ARRAY_SIZE(gpu->rb));
+ nr_rings = ARRAY_SIZE(gpu->rb);
}
+ /* Create ringbuffer(s): */
+ for (i = 0; i < nr_rings; i++) {
+ mutex_lock(&drm->struct_mutex);
+ gpu->rb[i] = msm_ringbuffer_new(gpu, i);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(gpu->rb[i])) {
+ ret = PTR_ERR(gpu->rb[i]);
+ gpu->rb[i] = NULL;
+ dev_err(drm->dev,
+ "could not create ringbuffer %d: %d\n", i, ret);
+ goto fail;
+ }
+ }
+
+ gpu->nr_rings = nr_rings;
+
+#ifdef CONFIG_SMP
+ gpu->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
+ gpu->pm_qos_req_dma.irq = gpu->irq;
+#endif
+
+ pm_qos_add_request(&gpu->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
bs_init(gpu);
+ gpu->snapshot = msm_snapshot_new(gpu);
+ if (IS_ERR(gpu->snapshot))
+ gpu->snapshot = NULL;
+
return 0;
fail:
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ if (gpu->rb[i])
+ msm_ringbuffer_destroy(gpu->rb[i]);
+ }
+
+ pm_runtime_disable(&pdev->dev);
return ret;
}
void msm_gpu_cleanup(struct msm_gpu *gpu)
{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ int i;
+
DBG("%s", gpu->name);
WARN_ON(!list_empty(&gpu->active_list));
bs_fini(gpu);
- if (gpu->rb) {
- if (gpu->rb_iova)
- msm_gem_put_iova(gpu->rb->bo, gpu->id);
- msm_ringbuffer_destroy(gpu->rb);
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ if (!gpu->rb[i])
+ continue;
+
+ if (gpu->rb[i]->iova)
+ msm_gem_put_iova(gpu->rb[i]->bo, gpu->aspace);
+
+ msm_ringbuffer_destroy(gpu->rb[i]);
}
- if (gpu->mmu)
- gpu->mmu->funcs->destroy(gpu->mmu);
+ msm_snapshot_destroy(gpu, gpu->snapshot);
+ pm_runtime_disable(&pdev->dev);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 2bbe85a3d6f6..06dfaabbfcfe 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -19,14 +19,24 @@
#define __MSM_GPU_H__
#include <linux/clk.h>
+#include <linux/pm_qos.h>
#include <linux/regulator/consumer.h>
#include "msm_drv.h"
#include "msm_ringbuffer.h"
+#include "msm_snapshot.h"
struct msm_gem_submit;
struct msm_gpu_perfcntr;
+struct msm_gpu_config {
+ const char *ioname;
+ const char *irqname;
+ int nr_rings;
+ uint64_t va_start;
+ uint64_t va_end;
+};
+
/* So far, with hardware that I've seen to date, we can have:
* + zero, one, or two z180 2d cores
* + a3xx or a2xx 3d core, which share a common CP (the firmware
@@ -46,18 +56,21 @@ struct msm_gpu_funcs {
int (*hw_init)(struct msm_gpu *gpu);
int (*pm_suspend)(struct msm_gpu *gpu);
int (*pm_resume)(struct msm_gpu *gpu);
- int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx);
- void (*flush)(struct msm_gpu *gpu);
- void (*idle)(struct msm_gpu *gpu);
+ int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+ void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
irqreturn_t (*irq)(struct msm_gpu *irq);
- uint32_t (*last_fence)(struct msm_gpu *gpu);
+ uint32_t (*last_fence)(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring);
+ uint32_t (*submitted_fence)(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring);
+ struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu);
void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
/* show GPU status in debugfs: */
void (*show)(struct msm_gpu *gpu, struct seq_file *m);
#endif
+ int (*snapshot)(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
};
struct msm_gpu {
@@ -77,14 +90,12 @@ struct msm_gpu {
const struct msm_gpu_perfcntr *perfcntrs;
uint32_t num_perfcntrs;
- struct msm_ringbuffer *rb;
- uint32_t rb_iova;
+ struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
+ int nr_rings;
/* list of GEM active objects: */
struct list_head active_list;
- uint32_t submitted_fence;
-
/* is gpu powered/active? */
int active_cnt;
bool inactive;
@@ -95,13 +106,20 @@ struct msm_gpu {
void __iomem *mmio;
int irq;
- struct msm_mmu *mmu;
- int id;
+ struct msm_gem_address_space *aspace;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
- struct clk *ebi1_clk, *grp_clks[6];
- uint32_t fast_rate, slow_rate, bus_freq;
+ struct clk **grp_clks;
+ struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
+ int nr_clocks;
+
+ uint32_t gpufreq[10];
+ uint32_t busfreq[10];
+ uint32_t nr_pwrlevels;
+ uint32_t active_level;
+
+ struct pm_qos_request pm_qos_req_dma;
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
@@ -117,15 +135,44 @@ struct msm_gpu {
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
- uint32_t hangcheck_fence;
+ uint32_t hangcheck_fence[MSM_GPU_MAX_RINGS];
struct work_struct recover_work;
struct list_head submit_list;
+
+ struct msm_snapshot *snapshot;
};
+/* It turns out that all targets use the same ringbuffer size. */
+#define MSM_GPU_RINGBUFFER_SZ SZ_32K
+#define MSM_GPU_RINGBUFFER_BLKSIZE 32
+
+#define MSM_GPU_RB_CNTL_DEFAULT \
+ (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
+ AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
+
+static inline struct msm_ringbuffer *__get_ring(struct msm_gpu *gpu, int index)
+{
+ return (index < ARRAY_SIZE(gpu->rb) ? gpu->rb[index] : NULL);
+}
+
+#define FOR_EACH_RING(gpu, ring, index) \
+ for (index = 0, ring = (gpu)->rb[0]; \
+ index < (gpu)->nr_rings && index < ARRAY_SIZE((gpu)->rb); \
+ index++, ring = __get_ring(gpu, index))
+
static inline bool msm_gpu_active(struct msm_gpu *gpu)
{
- return gpu->submitted_fence > gpu->funcs->last_fence(gpu);
+ struct msm_ringbuffer *ring;
+ int i;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ if (gpu->funcs->submitted_fence(gpu, ring) >
+ gpu->funcs->last_fence(gpu, ring))
+ return true;
+ }
+
+ return false;
}
/* Perf-Counters:
@@ -151,6 +198,45 @@ static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
return msm_readl(gpu->mmio + (reg << 2));
}
+static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
+{
+ uint32_t val = gpu_read(gpu, reg);
+
+ val &= ~mask;
+ gpu_write(gpu, reg, val | or);
+}
+
+static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
+{
+ u64 val;
+
+ /*
+ * Why not a readq here? Two reasons: 1) many of the LO registers are
+ * not quad word aligned and 2) the GPU hardware designers have a bit
+ * of a history of putting registers where they fit, especially in
+ * spins. The longer a GPU family goes the higher the chance that
+ * we'll get burned. We could do a series of validity checks if we
+ * wanted to, but really is a readq() that much better? Nah.
+ */
+
+ /*
+ * For some lo/hi registers (like perfcounters), the hi value is latched
+ * when the lo is read, so make sure to read the lo first to trigger
+ * that
+ */
+ val = (u64) msm_readl(gpu->mmio + (lo << 2));
+ val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
+
+ return val;
+}
+
+static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
+{
+ /* Why not a writeq here? Read the screed above */
+ msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
+ msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
+}
+
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
@@ -160,12 +246,12 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
void msm_gpu_retire(struct msm_gpu *gpu);
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx);
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
- const char *name, const char *ioname, const char *irqname, int ringsz);
+ const char *name, struct msm_gpu_config *config);
+
void msm_gpu_cleanup(struct msm_gpu *gpu);
struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 7ac2f1997e4a..3c16222b8890 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -15,41 +15,189 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/of_platform.h>
#include "msm_drv.h"
-#include "msm_mmu.h"
-
-struct msm_iommu {
- struct msm_mmu base;
- struct iommu_domain *domain;
-};
-#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+#include "msm_iommu.h"
static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
unsigned long iova, int flags, void *arg)
{
- pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
+ pr_warn_ratelimited("*** fault: iova=%16llX, flags=%d\n", (u64) iova, flags);
return 0;
}
+/*
+ * Get and enable the IOMMU clocks so that we can make
+ * sure they stay on the entire duration so that we can
+ * safely change the pagetable from the GPU
+ */
+static void _get_iommu_clocks(struct msm_mmu *mmu, struct platform_device *pdev)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ struct device *dev;
+ struct property *prop;
+ const char *name;
+ int i = 0;
+
+ if (WARN_ON(!pdev))
+ return;
+
+ dev = &pdev->dev;
+
+ iommu->nr_clocks =
+ of_property_count_strings(dev->of_node, "clock-names");
+
+ if (iommu->nr_clocks < 0) {
+ iommu->nr_clocks = 0;
+ return;
+ }
+
+ if (WARN_ON(iommu->nr_clocks > ARRAY_SIZE(iommu->clocks)))
+ iommu->nr_clocks = ARRAY_SIZE(iommu->clocks);
+
+ of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+ if (i == iommu->nr_clocks)
+ break;
+
+ iommu->clocks[i] = clk_get(dev, name);
+ if (iommu->clocks[i])
+ clk_prepare_enable(iommu->clocks[i]);
+
+ i++;
+ }
+}
+
+static int _attach_iommu_device(struct msm_mmu *mmu,
+ struct iommu_domain *domain, const char **names, int cnt)
+{
+ int i;
+
+ /* See if there is a iommus member in the current device. If not, look
+ * for the names and see if there is one in there.
+ */
+
+ if (of_find_property(mmu->dev->of_node, "iommus", NULL))
+ return iommu_attach_device(domain, mmu->dev);
+
+ /* Look through the list of names for a target */
+ for (i = 0; i < cnt; i++) {
+ struct device_node *node =
+ of_find_node_by_name(mmu->dev->of_node, names[i]);
+
+ if (!node)
+ continue;
+
+ if (of_find_property(node, "iommus", NULL)) {
+ struct platform_device *pdev;
+
+ /* Get the platform device for the node */
+ of_platform_populate(node->parent, NULL, NULL,
+ mmu->dev);
+
+ pdev = of_find_device_by_node(node);
+
+ if (!pdev)
+ continue;
+
+ _get_iommu_clocks(mmu,
+ of_find_device_by_node(node->parent));
+
+ mmu->dev = &pdev->dev;
+
+ return iommu_attach_device(domain, mmu->dev);
+ }
+ }
+
+ dev_err(mmu->dev, "Couldn't find a IOMMU device\n");
+ return -ENODEV;
+}
+
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- return iommu_attach_device(iommu->domain, mmu->dev);
+ int val = 1, ret;
+
+ /* Hope springs eternal */
+ iommu->allow_dynamic = true;
+
+ /* per-instance pagetables need TTBR1 support in the IOMMU driver */
+ ret = iommu_domain_set_attr(iommu->domain,
+ DOMAIN_ATTR_ENABLE_TTBR1, &val);
+ if (ret)
+ iommu->allow_dynamic = false;
+
+ /* Attach the device to the domain */
+ ret = _attach_iommu_device(mmu, iommu->domain, names, cnt);
+ if (ret)
+ return ret;
+
+ /*
+ * Get the context bank for the base domain; this will be shared with
+ * the children.
+ */
+ iommu->cb = -1;
+ if (iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_CONTEXT_BANK,
+ &iommu->cb))
+ iommu->allow_dynamic = false;
+
+ return 0;
+}
+
+static int msm_iommu_attach_dynamic(struct msm_mmu *mmu, const char **names,
+ int cnt)
+{
+ static unsigned int procid;
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int ret;
+ unsigned int id;
+
+ /* Assign a unique procid for the domain to cut down on TLB churn */
+ id = ++procid;
+
+ iommu_domain_set_attr(iommu->domain, DOMAIN_ATTR_PROCID, &id);
+
+ ret = iommu_attach_device(iommu->domain, mmu->dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Get the TTBR0 and the CONTEXTIDR - these will be used by the GPU to
+ * switch the pagetable on its own.
+ */
+ iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_TTBR0,
+ &iommu->ttbr0);
+ iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_CONTEXTIDR,
+ &iommu->contextidr);
+
+ return 0;
+}
+
+static void msm_iommu_detach(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int i;
+
+ iommu_detach_device(iommu->domain, mmu->dev);
+
+ for (i = 0; i < iommu->nr_clocks; i++) {
+ if (iommu->clocks[i])
+ clk_disable(iommu->clocks[i]);
+ }
}
-static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+static void msm_iommu_detach_dynamic(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
iommu_detach_device(iommu->domain, mmu->dev);
}
-static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
- struct sg_table *sgt, unsigned len, int prot)
+static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ uint64_t da = iova;
unsigned int i, j;
int ret;
@@ -57,10 +205,10 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
return -EINVAL;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- u32 pa = sg_phys(sg) - sg->offset;
+ phys_addr_t pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
- VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+ VERB("map[%d]: %016llx %pa(%zx)", i, iova, &pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
@@ -82,13 +230,13 @@ fail:
return ret;
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
- struct sg_table *sgt, unsigned len)
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ uint64_t da = iova;
int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
@@ -99,7 +247,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
if (unmapped < bytes)
return unmapped;
- VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+ VERB("unmap[%d]: %016llx(%zx)", i, iova, bytes);
BUG_ON(!PAGE_ALIGNED(bytes));
@@ -124,7 +272,16 @@ static const struct msm_mmu_funcs funcs = {
.destroy = msm_iommu_destroy,
};
-struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
+static const struct msm_mmu_funcs dynamic_funcs = {
+ .attach = msm_iommu_attach_dynamic,
+ .detach = msm_iommu_detach_dynamic,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .destroy = msm_iommu_destroy,
+};
+
+struct msm_mmu *_msm_iommu_new(struct device *dev, struct iommu_domain *domain,
+ const struct msm_mmu_funcs *funcs)
{
struct msm_iommu *iommu;
@@ -133,8 +290,54 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
return ERR_PTR(-ENOMEM);
iommu->domain = domain;
- msm_mmu_init(&iommu->base, dev, &funcs);
+ msm_mmu_init(&iommu->base, dev, funcs);
iommu_set_fault_handler(domain, msm_fault_handler, dev);
return &iommu->base;
}
+struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
+{
+ return _msm_iommu_new(dev, domain, &funcs);
+}
+
+/*
+ * Given a base domain that is attached to a IOMMU device try to create a
+ * dynamic domain that is also attached to the same device but allocates a new
+ * pagetable. This is used to allow multiple pagetables to be attached to the
+ * same device.
+ */
+struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *base)
+{
+ struct msm_iommu *base_iommu = to_msm_iommu(base);
+ struct iommu_domain *domain;
+ struct msm_mmu *mmu;
+ int ret, val = 1;
+
+ /* Don't continue if the base domain didn't have the support we need */
+ if (!base || base_iommu->allow_dynamic == false)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ domain = iommu_domain_alloc(&platform_bus_type);
+ if (!domain)
+ return ERR_PTR(-ENODEV);
+
+ mmu = _msm_iommu_new(base->dev, domain, &dynamic_funcs);
+
+ if (IS_ERR(mmu)) {
+ if (domain)
+ iommu_domain_free(domain);
+ return mmu;
+ }
+
+ ret = iommu_domain_set_attr(domain, DOMAIN_ATTR_DYNAMIC, &val);
+ if (ret) {
+ msm_iommu_destroy(mmu);
+ return ERR_PTR(ret);
+ }
+
+ /* Set the context bank to match the base domain */
+ iommu_domain_set_attr(domain, DOMAIN_ATTR_CONTEXT_BANK,
+ &base_iommu->cb);
+
+ return mmu;
+}
diff --git a/drivers/gpu/drm/msm/msm_iommu.h b/drivers/gpu/drm/msm/msm_iommu.h
new file mode 100644
index 000000000000..d005cfb9758f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_iommu.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_IOMMU_H_
+#define _MSM_IOMMU_H_
+
+#include "msm_mmu.h"
+
+struct msm_iommu {
+ struct msm_mmu base;
+ struct iommu_domain *domain;
+ int cb;
+ phys_addr_t ttbr0;
+ uint32_t contextidr;
+ bool allow_dynamic;
+
+ struct clk *clocks[5];
+ int nr_clocks;
+};
+#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+
+static inline bool msm_iommu_allow_dynamic(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ return iommu->allow_dynamic;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index cbf0d4593522..501f12bef00d 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -21,7 +21,6 @@
#include <linux/iommu.h>
struct msm_mmu;
-struct msm_gpu;
enum msm_mmu_domain_type {
MSM_SMMU_DOMAIN_UNSECURE,
@@ -33,11 +32,10 @@ enum msm_mmu_domain_type {
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
- void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
- int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
- unsigned len, int prot);
- int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
- unsigned len);
+ void (*detach)(struct msm_mmu *mmu);
+ int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
+ int prot);
+ int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt);
int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
enum dma_data_direction dir);
void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
@@ -62,8 +60,8 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
}
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
-struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
struct msm_mmu *msm_smmu_new(struct device *dev,
enum msm_mmu_domain_type domain);
+struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *orig);
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 9a78c48817c6..6dbb516cd4dc 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -307,7 +307,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_cmds; i++) {
uint32_t idx = submit->cmd[i].idx;
- uint32_t iova = submit->cmd[i].iova;
+ uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf = msm_gem_vaddr_locked(&obj->base);
@@ -315,7 +315,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
buf += iova - submit->bos[idx].iova;
rd_write_section(rd, RD_GPUADDR,
- (uint32_t[2]){ iova, szd * 4 }, 8);
+ (uint64_t[2]) { iova, szd * 4 }, 16);
rd_write_section(rd, RD_BUFFER_CONTENTS,
buf, szd * 4);
@@ -329,7 +329,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
case MSM_SUBMIT_CMD_BUF:
rd_write_section(rd, RD_CMDSTREAM_ADDR,
- (uint32_t[2]){ iova, szd }, 8);
+ (uint64_t[2]) { iova, szd }, 16);
break;
}
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 1f14b908b221..14a16c4578d9 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -18,12 +18,13 @@
#include "msm_ringbuffer.h"
#include "msm_gpu.h"
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id)
{
struct msm_ringbuffer *ring;
int ret;
- size = ALIGN(size, 4); /* size should be dword aligned */
+ /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
+ BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
@@ -32,7 +33,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->gpu = gpu;
- ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
+ ring->id = id;
+ ring->bo = msm_gem_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, MSM_BO_WC);
if (IS_ERR(ring->bo)) {
ret = PTR_ERR(ring->bo);
ring->bo = NULL;
@@ -40,10 +42,11 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->start = msm_gem_vaddr_locked(ring->bo);
- ring->end = ring->start + (size / 4);
+ ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
+ ring->next = ring->start;
ring->cur = ring->start;
- ring->size = size;
+ spin_lock_init(&ring->lock);
return ring;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index 6e0e1049fa4f..1e84905073bf 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -22,12 +22,15 @@
struct msm_ringbuffer {
struct msm_gpu *gpu;
- int size;
+ int id;
struct drm_gem_object *bo;
- uint32_t *start, *end, *cur;
+ uint32_t *start, *end, *cur, *next;
+ uint64_t iova;
+ uint32_t submitted_fence;
+ spinlock_t lock;
};
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id);
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
@@ -35,9 +38,13 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
static inline void
OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
{
- if (ring->cur == ring->end)
- ring->cur = ring->start;
- *(ring->cur++) = data;
+ /*
+ * ring->next points to the current command being written - it won't be
+ * committed as ring->cur until the flush
+ */
+ if (ring->next == ring->end)
+ ring->next = ring->start;
+ *(ring->next++) = data;
}
#endif /* __MSM_RINGBUFFER_H__ */
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index f29c1df46691..c99f51e09700 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -86,7 +86,7 @@ static int msm_smmu_attach(struct msm_mmu *mmu, const char **names, int cnt)
return 0;
}
-static void msm_smmu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+static void msm_smmu_detach(struct msm_mmu *mmu)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
@@ -104,14 +104,14 @@ static void msm_smmu_detach(struct msm_mmu *mmu, const char **names, int cnt)
dev_dbg(client->dev, "iommu domain detached\n");
}
-static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
- struct sg_table *sgt, unsigned len, int prot)
+static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, int prot)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
struct iommu_domain *domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ uint64_t da = iova;
unsigned int i, j;
int ret;
@@ -126,7 +126,7 @@ static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
u32 pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
- VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+ VERB("map[%d]: %16llx %08x(%zx)", i, iova, pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
@@ -172,14 +172,14 @@ static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt,
dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
}
-static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
- struct sg_table *sgt, unsigned len)
+static int msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
struct iommu_domain *domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ uint64_t da = iova;
int i;
if (!client)
@@ -197,7 +197,7 @@ static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
if (unmapped < bytes)
return unmapped;
- VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+ VERB("unmap[%d]: %16llx(%zx)", i, iova, bytes);
WARN_ON(!PAGE_ALIGNED(bytes));
diff --git a/drivers/gpu/drm/msm/msm_snapshot.c b/drivers/gpu/drm/msm/msm_snapshot.c
new file mode 100644
index 000000000000..30f3e5c64ebd
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_snapshot.c
@@ -0,0 +1,105 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+#include "msm_snapshot_api.h"
+
+void msm_snapshot_destroy(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+
+ if (!snapshot)
+ return;
+
+ dma_free_coherent(&pdev->dev, SZ_1M, snapshot->ptr,
+ snapshot->physaddr);
+
+ kfree(snapshot);
+}
+
+struct msm_snapshot *msm_snapshot_new(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct msm_snapshot *snapshot;
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
+ if (!snapshot)
+ return ERR_PTR(-ENOMEM);
+
+ snapshot->ptr = dma_alloc_coherent(&pdev->dev, SZ_1M,
+ &snapshot->physaddr, GFP_KERNEL);
+
+ if (!snapshot->ptr) {
+ kfree(snapshot);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ seq_buf_init(&snapshot->buf, snapshot->ptr, SZ_1M);
+
+ return snapshot;
+}
+
+int msm_gpu_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ int ret;
+ struct msm_snapshot_header header;
+ uint64_t val;
+
+ if (!snapshot)
+ return -ENOMEM;
+
+ /*
+ * For now, blow away the snapshot and take a new one - the most
+ * interesting hang is the last one we saw
+ */
+ seq_buf_init(&snapshot->buf, snapshot->ptr, SZ_1M);
+
+ header.magic = SNAPSHOT_MAGIC;
+ gpu->funcs->get_param(gpu, MSM_PARAM_GPU_ID, &val);
+ header.gpuid = lower_32_bits(val);
+
+ gpu->funcs->get_param(gpu, MSM_PARAM_CHIP_ID, &val);
+ header.chipid = lower_32_bits(val);
+
+ seq_buf_putmem(&snapshot->buf, &header, sizeof(header));
+
+ ret = gpu->funcs->snapshot(gpu, snapshot);
+
+ if (!ret) {
+ struct msm_snapshot_section_header end;
+
+ end.magic = SNAPSHOT_SECTION_MAGIC;
+ end.id = SNAPSHOT_SECTION_END;
+ end.size = sizeof(end);
+
+ seq_buf_putmem(&snapshot->buf, &end, sizeof(end));
+
+ dev_info(gpu->dev->dev, "GPU snapshot created [0x%pa (%d bytes)]\n",
+ &snapshot->physaddr, seq_buf_used(&snapshot->buf));
+ }
+
+ return ret;
+}
+
+int msm_snapshot_write(struct msm_gpu *gpu, struct seq_file *m)
+{
+ if (gpu && gpu->snapshot)
+ seq_write(m, gpu->snapshot->ptr,
+ seq_buf_used(&gpu->snapshot->buf));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/msm_snapshot.h b/drivers/gpu/drm/msm/msm_snapshot.h
new file mode 100644
index 000000000000..247e1358c885
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_snapshot.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SNAPSHOT_H_
+#define MSM_SNAPSHOT_H_
+
+#include <linux/string.h>
+#include <linux/seq_buf.h>
+#include "msm_snapshot_api.h"
+
+struct msm_snapshot {
+ void *ptr;
+ struct seq_buf buf;
+ phys_addr_t physaddr;
+ uint32_t index;
+ uint32_t remain;
+ unsigned long timestamp;
+ void *priv;
+};
+
+/* Write a uint32_t value to the next position in the snapshot buffer */
+static inline void SNAPSHOT_WRITE_U32(struct msm_snapshot *snapshot,
+ uint32_t value)
+{
+ seq_buf_putmem(&snapshot->buf, &value, sizeof(value));
+}
+
+/* Copy a block of memory to the next position in the snapshot buffer */
+static inline void SNAPSHOT_MEMCPY(struct msm_snapshot *snapshot, void *src,
+ uint32_t size)
+{
+ if (size)
+ seq_buf_putmem(&snapshot->buf, src, size);
+}
+
+static inline bool _snapshot_header(struct msm_snapshot *snapshot,
+ struct msm_snapshot_section_header *header,
+ u32 headsz, u32 datasz, u32 id)
+{
+ u32 size = headsz + datasz;
+
+ if (seq_buf_buffer_left(&snapshot->buf) <= size)
+ return false;
+
+ /* Write the section header */
+ header->magic = SNAPSHOT_SECTION_MAGIC;
+ header->id = id;
+ header->size = headsz + datasz;
+
+ /* Write the section header */
+ seq_buf_putmem(&snapshot->buf, header, headsz);
+
+ /* The caller will fill in the data from here */
+ return true;
+}
+
+/* SNAPSHOT_HEADER
+ * _snapshot: pointer to struct msm_snapshot
+ * _header: Local variable containing the sub-section header
+ * _id: Section ID to write
+ * _dword: Size of the data section (in dword)
+ */
+#define SNAPSHOT_HEADER(_snapshot, _header, _id, _dwords) \
+ _snapshot_header((_snapshot), \
+ (struct msm_snapshot_section_header *) &(header), \
+ sizeof(header), (_dwords) << 2, (_id))
+
+struct msm_gpu;
+
+struct msm_snapshot *msm_snapshot_new(struct msm_gpu *gpu);
+void msm_snapshot_destroy(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+int msm_gpu_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+int msm_snapshot_write(struct msm_gpu *gpu, struct seq_file *m);
+
+#endif
+
diff --git a/drivers/gpu/drm/msm/msm_snapshot_api.h b/drivers/gpu/drm/msm/msm_snapshot_api.h
new file mode 100644
index 000000000000..9f0adb9ee784
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_snapshot_api.h
@@ -0,0 +1,121 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SNAPSHOT_API_H_
+#define MSM_SNAPSHOT_API_H_
+
+#include <linux/types.h>
+
+/* High word is the magic, low word is the snapshot header version */
+#define SNAPSHOT_MAGIC 0x504D0002
+
+struct msm_snapshot_header {
+ __u32 magic;
+ __u32 gpuid;
+ __u32 chipid;
+} __packed;
+
+#define SNAPSHOT_SECTION_MAGIC 0xABCD
+
+struct msm_snapshot_section_header {
+ __u16 magic;
+ __u16 id;
+ __u32 size;
+} __packed;
+
+/* Section identifiers */
+#define SNAPSHOT_SECTION_OS 0x0101
+#define SNAPSHOT_SECTION_REGS_V2 0x0202
+#define SNAPSHOT_SECTION_RB_V2 0x0302
+#define SNAPSHOT_SECTION_IB_V2 0x0402
+#define SNAPSHOT_SECTION_INDEXED_REGS 0x0501
+#define SNAPSHOT_SECTION_DEBUG 0x0901
+#define SNAPSHOT_SECTION_DEBUGBUS 0x0A01
+#define SNAPSHOT_SECTION_GPU_OBJECT_V2 0x0B02
+#define SNAPSHOT_SECTION_MEMLIST_V2 0x0E02
+#define SNAPSHOT_SECTION_SHADER 0x1201
+#define SNAPSHOT_SECTION_END 0xFFFF
+
+#define SNAPSHOT_OS_LINUX_V3 0x00000202
+
+struct msm_snapshot_linux {
+ struct msm_snapshot_section_header header;
+ int osid;
+ __u32 seconds;
+ __u32 power_flags;
+ __u32 power_level;
+ __u32 power_interval_timeout;
+ __u32 grpclk;
+ __u32 busclk;
+ __u64 ptbase;
+ __u32 pid;
+ __u32 current_context;
+ __u32 ctxtcount;
+ unsigned char release[32];
+ unsigned char version[32];
+ unsigned char comm[16];
+} __packed;
+
+struct msm_snapshot_ringbuffer {
+ struct msm_snapshot_section_header header;
+ int start;
+ int end;
+ int rbsize;
+ int wptr;
+ int rptr;
+ int count;
+ __u32 timestamp_queued;
+ __u32 timestamp_retired;
+ __u64 gpuaddr;
+ __u32 id;
+} __packed;
+
+struct msm_snapshot_regs {
+ struct msm_snapshot_section_header header;
+ __u32 count;
+} __packed;
+
+struct msm_snapshot_indexed_regs {
+ struct msm_snapshot_section_header header;
+ __u32 index_reg;
+ __u32 data_reg;
+ __u32 start;
+ __u32 count;
+} __packed;
+
+#define SNAPSHOT_DEBUG_CP_MEQ 7
+#define SNAPSHOT_DEBUG_CP_PM4_RAM 8
+#define SNAPSHOT_DEBUG_CP_PFP_RAM 9
+#define SNAPSHOT_DEBUG_CP_ROQ 10
+#define SNAPSHOT_DEBUG_SHADER_MEMORY 11
+#define SNAPSHOT_DEBUG_CP_MERCIU 12
+
+struct msm_snapshot_debug {
+ struct msm_snapshot_section_header header;
+ __u32 type;
+ __u32 size;
+} __packed;
+
+struct msm_snapshot_debugbus {
+ struct msm_snapshot_section_header header;
+ __u32 id;
+ __u32 count;
+} __packed;
+
+struct msm_snapshot_shader {
+ struct msm_snapshot_section_header header;
+ __u32 type;
+ __u32 index;
+ __u32 size;
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index ac9997c238cd..31cf25ab5691 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -65,8 +65,12 @@ static void sde_connector_destroy(struct drm_connector *connector)
c_conn = to_sde_connector(connector);
+ if (c_conn->ops.pre_deinit)
+ c_conn->ops.pre_deinit(connector, c_conn->display);
+
if (c_conn->blob_caps)
drm_property_unreference_blob(c_conn->blob_caps);
+
msm_property_destroy(&c_conn->property_info);
drm_connector_unregister(connector);
@@ -88,8 +92,7 @@ static void _sde_connector_destroy_fb(struct sde_connector *c_conn,
return;
}
- msm_framebuffer_cleanup(c_state->out_fb,
- c_state->mmu_id);
+ msm_framebuffer_cleanup(c_state->out_fb, c_state->aspace);
drm_framebuffer_unreference(c_state->out_fb);
c_state->out_fb = NULL;
@@ -193,7 +196,7 @@ sde_connector_atomic_duplicate_state(struct drm_connector *connector)
if (c_state->out_fb) {
drm_framebuffer_reference(c_state->out_fb);
rc = msm_framebuffer_prepare(c_state->out_fb,
- c_state->mmu_id);
+ c_state->aspace);
if (rc)
SDE_ERROR("failed to prepare fb, %d\n", rc);
}
@@ -241,14 +244,14 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
rc = -EFAULT;
} else {
if (c_state->out_fb->flags & DRM_MODE_FB_SECURE)
- c_state->mmu_id =
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE];
+ c_state->aspace =
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE];
else
- c_state->mmu_id =
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+ c_state->aspace =
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
rc = msm_framebuffer_prepare(c_state->out_fb,
- c_state->mmu_id);
+ c_state->aspace);
if (rc)
SDE_ERROR("prep fb failed, %d\n", rc);
}
@@ -492,18 +495,17 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
c_conn->panel = panel;
c_conn->display = display;
- /* cache mmu_id's for later */
sde_kms = to_sde_kms(priv->kms);
if (sde_kms->vbif[VBIF_NRT]) {
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
} else {
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
}
if (ops)
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 9580282291db..3f26ee7d5965 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -44,6 +44,15 @@ struct sde_connector_ops {
void *display);
/**
+ * pre_deinit - perform additional deinitialization steps
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+ int (*pre_deinit)(struct drm_connector *connector,
+ void *display);
+
+ /**
* detect - determine if connector is connected
* @connector: Pointer to drm connector structure
* @force: Force detect setting from drm framework
@@ -140,7 +149,7 @@ struct sde_connector {
struct drm_panel *panel;
void *display;
- int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+ struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
char name[SDE_CONNECTOR_NAME_SIZE];
@@ -195,13 +204,13 @@ struct sde_connector {
* struct sde_connector_state - private connector status structure
* @base: Base drm connector structure
* @out_fb: Pointer to output frame buffer, if applicable
- * @mmu_id: MMU ID for accessing frame buffer objects, if applicable
+ * @aspace: Address space for accessing frame buffer objects, if applicable
* @property_values: Local cache of current connector property values
*/
struct sde_connector_state {
struct drm_connector_state base;
struct drm_framebuffer *out_fb;
- int mmu_id;
+ struct msm_gem_address_space *aspace;
uint64_t property_values[CONNECTOR_PROP_COUNT];
};
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index b2853e874d92..dbfc2dd11a17 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -432,6 +432,99 @@ void sde_core_irq_uninstall(struct sde_kms *sde_kms)
sde_kms->irq_obj.total_irqs = 0;
}
+static void sde_hw_irq_mask(struct irq_data *irqd)
+{
+ struct sde_kms *sde_kms;
+
+ if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+ SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
+ return;
+ }
+ sde_kms = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static void sde_hw_irq_unmask(struct irq_data *irqd)
+{
+ struct sde_kms *sde_kms;
+
+ if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+ SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
+ return;
+ }
+ sde_kms = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip sde_hw_irq_chip = {
+ .name = "sde",
+ .irq_mask = sde_hw_irq_mask,
+ .irq_unmask = sde_hw_irq_unmask,
+};
+
+static int sde_hw_irqdomain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct sde_kms *sde_kms;
+ int rc;
+
+ if (!domain || !domain->host_data) {
+ SDE_ERROR("invalid parameters domain %d\n", domain != 0);
+ return -EINVAL;
+ }
+ sde_kms = domain->host_data;
+
+ irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
+ rc = irq_set_chip_data(irq, sde_kms);
+
+ return rc;
+}
+
+static struct irq_domain_ops sde_hw_irqdomain_ops = {
+ .map = sde_hw_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+int sde_core_irq_domain_add(struct sde_kms *sde_kms)
+{
+ struct device *dev;
+ struct irq_domain *domain;
+
+ if (!sde_kms->dev || !sde_kms->dev->dev) {
+ pr_err("invalid device handles\n");
+ return -EINVAL;
+ }
+
+ dev = sde_kms->dev->dev;
+
+ domain = irq_domain_add_linear(dev->of_node, 32,
+ &sde_hw_irqdomain_ops, sde_kms);
+ if (!domain) {
+ pr_err("failed to add irq_domain\n");
+ return -EINVAL;
+ }
+
+ sde_kms->irq_controller.enabled_mask = 0;
+ sde_kms->irq_controller.domain = domain;
+
+ return 0;
+}
+
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms)
+{
+ if (sde_kms->irq_controller.domain) {
+ irq_domain_remove(sde_kms->irq_controller.domain);
+ sde_kms->irq_controller.domain = NULL;
+ }
+ return 0;
+}
+
irqreturn_t sde_core_irq(struct sde_kms *sde_kms)
{
/*
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.h b/drivers/gpu/drm/msm/sde/sde_core_irq.h
index 92642e73daa8..ee1b9bd1d32b 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,20 @@ int sde_core_irq_postinstall(struct sde_kms *sde_kms);
void sde_core_irq_uninstall(struct sde_kms *sde_kms);
/**
+ * sde_core_irq_domain_add - Add core IRQ domain for SDE
+ * @sde_kms: SDE handle
+ * @return: none
+ */
+int sde_core_irq_domain_add(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_domain_fini - uninstall core IRQ domain
+ * @sde_kms: SDE handle
+ * @return: 0 if success; error code otherwise
+ */
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms);
+
+/**
* sde_core_irq - core IRQ handler
* @sde_kms: SDE handle
* @return: interrupt handling status
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
index 05e6da14cec0..82da4c1bc86c 100644
--- a/drivers/gpu/drm/msm/sde/sde_crtc.c
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -358,7 +358,17 @@ bool sde_crtc_is_rt(struct drm_crtc *crtc)
return to_sde_crtc_state(crtc->state)->is_rt;
}
-/* if file!=NULL, this is preclose potential cancel-flip path */
+/**
+ * _sde_crtc_complete_flip - signal pending page_flip events
+ * Any pending vblank events are added to the vblank_event_list
+ * so that the next vblank interrupt shall signal them.
+ * However PAGE_FLIP events are not handled through the vblank_event_list.
+ * This API signals any pending PAGE_FLIP events requested through
+ * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the sde_crtc->event.
+ * if file!=NULL, this is preclose potential cancel-flip path
+ * @crtc: Pointer to drm crtc structure
+ * @file: Pointer to drm file
+ */
static void _sde_crtc_complete_flip(struct drm_crtc *crtc,
struct drm_file *file)
{
@@ -395,7 +405,7 @@ static void sde_crtc_vblank_cb(void *data)
sde_crtc->vblank_cb_time = ktime_get();
else
sde_crtc->vblank_cb_count++;
-
+ _sde_crtc_complete_flip(crtc, NULL);
drm_crtc_handle_vblank(crtc);
DRM_DEBUG_VBL("crtc%d\n", crtc->base.id);
SDE_EVT32_IRQ(DRMID(crtc));
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index ed4b7be34281..2205dd98a927 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -264,7 +264,7 @@ struct sde_encoder_phys_cmd {
* @wb_fmt: Writeback pixel format
* @frame_count: Counter of completed writeback operations
* @kickoff_count: Counter of issued writeback operations
- * @mmu_id: mmu identifier for non-secure/secure domain
+ * @aspace: address space identifier for non-secure/secure domain
* @wb_dev: Pointer to writeback device
* @start_time: Start time of writeback latest request
* @end_time: End time of writeback latest request
@@ -285,7 +285,7 @@ struct sde_encoder_phys_wb {
const struct sde_format *wb_fmt;
u32 frame_count;
u32 kickoff_count;
- int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+ struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
struct sde_wb_device *wb_dev;
ktime_t start_time;
ktime_t end_time;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 9943e3906df0..9368c4974126 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -180,7 +180,8 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
struct sde_hw_wb *hw_wb;
struct sde_hw_wb_cfg *wb_cfg;
const struct msm_format *format;
- int ret, mmu_id;
+ int ret;
+ struct msm_gem_address_space *aspace;
if (!phys_enc) {
SDE_ERROR("invalid encoder\n");
@@ -193,9 +194,9 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
wb_cfg->intf_mode = phys_enc->intf_mode;
wb_cfg->is_secure = (fb->flags & DRM_MODE_FB_SECURE) ? true : false;
- mmu_id = (wb_cfg->is_secure) ?
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] :
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+ aspace = (wb_cfg->is_secure) ?
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] :
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
SDE_DEBUG("[fb_secure:%d]\n", wb_cfg->is_secure);
@@ -217,7 +218,7 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
wb_cfg->roi = *wb_roi;
if (hw_wb->caps->features & BIT(SDE_WB_XY_ROI_OFFSET)) {
- ret = sde_format_populate_layout(mmu_id, fb, &wb_cfg->dest);
+ ret = sde_format_populate_layout(aspace, fb, &wb_cfg->dest);
if (ret) {
SDE_DEBUG("failed to populate layout %d\n", ret);
return;
@@ -226,7 +227,7 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
wb_cfg->dest.height = fb->height;
wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
} else {
- ret = sde_format_populate_layout_with_roi(mmu_id, fb, wb_roi,
+ ret = sde_format_populate_layout_with_roi(aspace, fb, wb_roi,
&wb_cfg->dest);
if (ret) {
/* this error should be detected during atomic_check */
@@ -1017,15 +1018,15 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init(
phys_enc = &wb_enc->base;
if (p->sde_kms->vbif[VBIF_NRT]) {
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
} else {
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
}
hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 41180f5dec12..dc7827872276 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -102,169 +102,169 @@ flg, fm, np) \
static const struct sde_format sde_format_map[] = {
INTERLEAVED_RGB_FMT(ARGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGB888,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
false, 3, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGR888,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 3, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGB565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ARGB1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ARGB4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
@@ -366,13 +366,13 @@ static const struct sde_format sde_format_map[] = {
PLANAR_YUV_FMT(YUV420,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C0_G_Y, C1_B_Cb, C2_R_Cr,
+ C2_R_Cr, C1_B_Cb, C0_G_Y,
false, SDE_CHROMA_420, 1, SDE_FORMAT_FLAG_YUV,
SDE_FETCH_LINEAR, 3),
PLANAR_YUV_FMT(YVU420,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C0_G_Y, C2_R_Cr, C1_B_Cb,
+ C1_B_Cb, C2_R_Cr, C0_G_Y,
false, SDE_CHROMA_420, 1, SDE_FORMAT_FLAG_YUV,
SDE_FETCH_LINEAR, 3),
};
@@ -384,19 +384,19 @@ static const struct sde_format sde_format_map[] = {
* the data will be passed by user-space.
*/
static const struct sde_format sde_format_map_ubwc[] = {
- INTERLEAVED_RGB_FMT(RGB565,
+ INTERLEAVED_RGB_FMT(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, 0,
SDE_FETCH_UBWC, 2),
- INTERLEAVED_RGB_FMT(RGBA8888,
+ INTERLEAVED_RGB_FMT(ABGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, 0,
SDE_FETCH_UBWC, 2),
- INTERLEAVED_RGB_FMT(RGBX8888,
+ INTERLEAVED_RGB_FMT(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, 0,
@@ -513,14 +513,15 @@ static int _sde_format_get_plane_sizes_ubwc(
ALIGN(DIV_ROUND_UP(height / 2, uv_tile_height), 16),
4096);
- } else if (fmt->base.pixel_format == DRM_FORMAT_RGBA8888 ||
- fmt->base.pixel_format == DRM_FORMAT_RGBX8888 ||
- fmt->base.pixel_format == DRM_FORMAT_RGBA1010102 ||
- fmt->base.pixel_format == DRM_FORMAT_RGBX1010102 ||
- fmt->base.pixel_format == DRM_FORMAT_RGB565) {
+ } else if (fmt->base.pixel_format == DRM_FORMAT_ABGR8888 ||
+ fmt->base.pixel_format == DRM_FORMAT_XBGR8888 ||
+ fmt->base.pixel_format == DRM_FORMAT_BGRA1010102 ||
+ fmt->base.pixel_format == DRM_FORMAT_BGRX1010102 ||
+ fmt->base.pixel_format == DRM_FORMAT_BGR565) {
+
uint32_t stride_alignment, aligned_bitstream_width;
- if (fmt->base.pixel_format == DRM_FORMAT_RGB565)
+ if (fmt->base.pixel_format == DRM_FORMAT_BGR565)
stride_alignment = 128;
else
stride_alignment = 64;
@@ -630,7 +631,7 @@ static int _sde_format_get_plane_sizes(
}
static int _sde_format_populate_addrs_ubwc(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -641,7 +642,7 @@ static int _sde_format_populate_addrs_ubwc(
return -EINVAL;
}
- base_addr = msm_framebuffer_iova(fb, mmu_id, 0);
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
if (!base_addr) {
DRM_ERROR("failed to retrieve base addr\n");
return -EFAULT;
@@ -711,7 +712,7 @@ static int _sde_format_populate_addrs_ubwc(
}
static int _sde_format_populate_addrs_linear(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -728,7 +729,7 @@ static int _sde_format_populate_addrs_linear(
/* Populate addresses for simple formats here */
for (i = 0; i < layout->num_planes; ++i) {
- layout->plane_addr[i] = msm_framebuffer_iova(fb, mmu_id, i);
+ layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
if (!layout->plane_addr[i]) {
DRM_ERROR("failed to retrieve base addr\n");
return -EFAULT;
@@ -739,7 +740,7 @@ static int _sde_format_populate_addrs_linear(
}
int sde_format_populate_layout(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -770,9 +771,9 @@ int sde_format_populate_layout(
/* Populate the addresses given the fb */
if (SDE_FORMAT_IS_UBWC(layout->format))
- ret = _sde_format_populate_addrs_ubwc(mmu_id, fb, layout);
+ ret = _sde_format_populate_addrs_ubwc(aspace, fb, layout);
else
- ret = _sde_format_populate_addrs_linear(mmu_id, fb, layout);
+ ret = _sde_format_populate_addrs_linear(aspace, fb, layout);
/* check if anything changed */
if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
@@ -814,14 +815,14 @@ static void _sde_format_calc_offset_linear(struct sde_hw_fmt_layout *source,
}
int sde_format_populate_layout_with_roi(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_rect *roi,
struct sde_hw_fmt_layout *layout)
{
int ret;
- ret = sde_format_populate_layout(mmu_id, fb, layout);
+ ret = sde_format_populate_layout(aspace, fb, layout);
if (ret || !roi)
return ret;
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
index 5dcdfbb653ed..0de081d619b7 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.h
+++ b/drivers/gpu/drm/msm/sde/sde_formats.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#define _SDE_FORMATS_H
#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
#include "sde_hw_mdss.h"
/**
@@ -76,7 +77,7 @@ int sde_format_check_modified_format(
/**
* sde_format_populate_layout - populate the given format layout based on
* mmu, fb, and format found in the fb
- * @mmu_id: mmu id handle
+ * @aspace: address space pointer
* @fb: framebuffer pointer
* @fmtl: format layout structure to populate
*
@@ -84,14 +85,14 @@ int sde_format_check_modified_format(
* are the same as before or 0 if new addresses were populated
*/
int sde_format_populate_layout(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *fmtl);
/**
* sde_format_populate_layout_with_roi - populate the given format layout
* based on mmu, fb, roi, and format found in the fb
- * @mmu_id: mmu id handle
+ * @aspace: mmu id handle
* @fb: framebuffer pointer
* @roi: region of interest (optional)
* @fmtl: format layout structure to populate
@@ -99,7 +100,7 @@ int sde_format_populate_layout(
* Return: error code on failure, 0 on success
*/
int sde_format_populate_layout_with_roi(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_rect *roi,
struct sde_hw_fmt_layout *fmtl);
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
index 31a6d985c38f..519288f0dda2 100644
--- a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -1830,12 +1830,20 @@ static int sde_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
cfg->qseed_type = SDE_SSPP_SCALER_QSEED3;
else if (!rc && !strcmp(type, "qseedv2"))
cfg->qseed_type = SDE_SSPP_SCALER_QSEED2;
+ else if (rc) {
+ SDE_DEBUG("qseed property not found\n");
+ rc = 0;
+ }
rc = of_property_read_string(np, sde_prop[CSC_TYPE].prop_name, &type);
if (!rc && !strcmp(type, "csc"))
cfg->csc_type = SDE_SSPP_CSC;
else if (!rc && !strcmp(type, "csc-10bit"))
cfg->csc_type = SDE_SSPP_CSC_10BIT;
+ else if (rc) {
+ SDE_DEBUG("CSC property not found\n");
+ rc = 0;
+ }
cfg->has_src_split = PROP_VALUE_ACCESS(prop_value, SRC_SPLIT, 0);
end:
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
index 909d6df38260..eeb7a0002eab 100644
--- a/drivers/gpu/drm/msm/sde/sde_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -49,86 +49,14 @@ irqreturn_t sde_irq(struct msm_kms *kms)
return IRQ_HANDLED;
}
-static void sde_hw_irq_mask(struct irq_data *irqd)
-{
- struct sde_kms *sde_kms;
-
- if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
- SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
- return;
- }
- sde_kms = irq_data_get_irq_chip_data(irqd);
-
- smp_mb__before_atomic();
- clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static void sde_hw_irq_unmask(struct irq_data *irqd)
-{
- struct sde_kms *sde_kms;
-
- if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
- SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
- return;
- }
- sde_kms = irq_data_get_irq_chip_data(irqd);
-
- smp_mb__before_atomic();
- set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static struct irq_chip sde_hw_irq_chip = {
- .name = "sde",
- .irq_mask = sde_hw_irq_mask,
- .irq_unmask = sde_hw_irq_unmask,
-};
-
-static int sde_hw_irqdomain_map(struct irq_domain *domain,
- unsigned int irq, irq_hw_number_t hwirq)
-{
- struct sde_kms *sde_kms;
- int rc;
-
- if (!domain || !domain->host_data) {
- SDE_ERROR("invalid parameters domain %d\n", domain != 0);
- return -EINVAL;
- }
- sde_kms = domain->host_data;
-
- irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
- rc = irq_set_chip_data(irq, sde_kms);
-
- return rc;
-}
-
-static struct irq_domain_ops sde_hw_irqdomain_ops = {
- .map = sde_hw_irqdomain_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
void sde_irq_preinstall(struct msm_kms *kms)
{
struct sde_kms *sde_kms = to_sde_kms(kms);
- struct device *dev;
- struct irq_domain *domain;
if (!sde_kms->dev || !sde_kms->dev->dev) {
pr_err("invalid device handles\n");
return;
}
- dev = sde_kms->dev->dev;
-
- domain = irq_domain_add_linear(dev->of_node, 32,
- &sde_hw_irqdomain_ops, sde_kms);
- if (!domain) {
- pr_err("failed to add irq_domain\n");
- return;
- }
-
- sde_kms->irq_controller.enabled_mask = 0;
- sde_kms->irq_controller.domain = domain;
sde_core_irq_preinstall(sde_kms);
}
@@ -158,9 +86,5 @@ void sde_irq_uninstall(struct msm_kms *kms)
}
sde_core_irq_uninstall(sde_kms);
-
- if (sde_kms->irq_controller.domain) {
- irq_domain_remove(sde_kms->irq_controller.domain);
- sde_kms->irq_controller.domain = NULL;
- }
+ sde_core_irq_domain_fini(sde_kms);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index afe90d16e31d..581918da183f 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -27,6 +27,7 @@
#include "dsi_display.h"
#include "dsi_drm.h"
#include "sde_wb.h"
+#include "sde_hdmi.h"
#include "sde_kms.h"
#include "sde_core_irq.h"
@@ -504,8 +505,30 @@ static int _sde_kms_get_displays(struct sde_kms *sde_kms)
wb_display_get_displays(sde_kms->wb_displays,
sde_kms->wb_display_count);
}
+
+ /* hdmi */
+ sde_kms->hdmi_displays = NULL;
+ sde_kms->hdmi_display_count = sde_hdmi_get_num_of_displays();
+ SDE_DEBUG("hdmi display count=%d", sde_kms->hdmi_display_count);
+ if (sde_kms->hdmi_display_count) {
+ sde_kms->hdmi_displays = kcalloc(sde_kms->hdmi_display_count,
+ sizeof(void *),
+ GFP_KERNEL);
+ if (!sde_kms->hdmi_displays) {
+ SDE_ERROR("failed to allocate hdmi displays\n");
+ goto exit_deinit_hdmi;
+ }
+ sde_kms->hdmi_display_count =
+ sde_hdmi_get_displays(sde_kms->hdmi_displays,
+ sde_kms->hdmi_display_count);
+ }
+
return 0;
+exit_deinit_hdmi:
+ sde_kms->hdmi_display_count = 0;
+ sde_kms->hdmi_displays = NULL;
+
exit_deinit_wb:
kfree(sde_kms->wb_displays);
sde_kms->wb_display_count = 0;
@@ -528,6 +551,9 @@ static void _sde_kms_release_displays(struct sde_kms *sde_kms)
SDE_ERROR("invalid sde kms\n");
return;
}
+ kfree(sde_kms->hdmi_displays);
+ sde_kms->hdmi_display_count = 0;
+ sde_kms->hdmi_displays = NULL;
kfree(sde_kms->wb_displays);
sde_kms->wb_displays = NULL;
@@ -565,18 +591,30 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
.set_property = sde_wb_connector_set_property,
.get_info = sde_wb_get_info,
};
- struct msm_display_info info;
+ static const struct sde_connector_ops hdmi_ops = {
+ .pre_deinit = sde_hdmi_connector_pre_deinit,
+ .post_init = sde_hdmi_connector_post_init,
+ .detect = sde_hdmi_connector_detect,
+ .get_modes = sde_hdmi_connector_get_modes,
+ .mode_valid = sde_hdmi_mode_valid,
+ .get_info = sde_hdmi_get_info,
+ };
+ struct msm_display_info info = {0};
struct drm_encoder *encoder;
void *display, *connector;
int i, max_encoders;
int rc = 0;
+ int connector_poll;
if (!dev || !priv || !sde_kms) {
SDE_ERROR("invalid argument(s)\n");
return -EINVAL;
}
- max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count;
+ max_encoders = sde_kms->dsi_display_count +
+ sde_kms->wb_display_count +
+ sde_kms->hdmi_display_count;
+
if (max_encoders > ARRAY_SIZE(priv->encoders)) {
max_encoders = ARRAY_SIZE(priv->encoders);
SDE_ERROR("capping number of displays to %d", max_encoders);
@@ -666,6 +704,57 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
}
}
+ /* hdmi */
+ for (i = 0; i < sde_kms->hdmi_display_count &&
+ priv->num_encoders < max_encoders; ++i) {
+ display = sde_kms->hdmi_displays[i];
+ encoder = NULL;
+
+ memset(&info, 0x0, sizeof(info));
+ rc = sde_hdmi_dev_init(display);
+ if (rc) {
+ SDE_ERROR("hdmi dev_init %d failed\n", i);
+ continue;
+ }
+ rc = sde_hdmi_get_info(&info, display);
+ if (rc) {
+ SDE_ERROR("hdmi get_info %d failed\n", i);
+ continue;
+ }
+ if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+ connector_poll = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector_poll = 0;
+ encoder = sde_encoder_init(dev, &info);
+ if (IS_ERR_OR_NULL(encoder)) {
+ SDE_ERROR("encoder init failed for hdmi %d\n", i);
+ continue;
+ }
+
+ rc = sde_hdmi_drm_init(display, encoder);
+ if (rc) {
+ SDE_ERROR("hdmi drm %d init failed, %d\n", i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
+ connector = sde_connector_init(dev,
+ encoder,
+ 0,
+ display,
+ &hdmi_ops,
+ connector_poll,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (connector) {
+ priv->encoders[priv->num_encoders++] = encoder;
+ } else {
+ SDE_ERROR("hdmi %d connector init failed\n", i);
+ sde_hdmi_dev_deinit(display);
+ sde_hdmi_drm_deinit(display);
+ sde_encoder_destroy(encoder);
+ }
+ }
+
return 0;
}
@@ -726,6 +815,9 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
priv = dev->dev_private;
catalog = sde_kms->catalog;
+ ret = sde_core_irq_domain_add(sde_kms);
+ if (ret)
+ goto fail_irq;
/*
* Query for underlying display drivers, and create connectors,
* bridges and encoders for them.
@@ -784,6 +876,8 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
return 0;
fail:
_sde_kms_drm_obj_destroy(sde_kms);
+fail_irq:
+ sde_core_irq_domain_fini(sde_kms);
return ret;
}
@@ -940,17 +1034,16 @@ static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
struct msm_mmu *mmu;
int i;
- for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) {
- if (!sde_kms->mmu[i])
+ for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
+ if (!sde_kms->aspace[i])
continue;
- mmu = sde_kms->mmu[i];
- msm_unregister_mmu(sde_kms->dev, mmu);
- mmu->funcs->detach(mmu, (const char **)iommu_ports,
- ARRAY_SIZE(iommu_ports));
- mmu->funcs->destroy(mmu);
- sde_kms->mmu[i] = 0;
- sde_kms->mmu_id[i] = 0;
+ mmu = sde_kms->aspace[i]->mmu;
+
+ mmu->funcs->detach(mmu);
+ msm_gem_address_space_put(sde_kms->aspace[i]);
+
+ sde_kms->aspace[i] = NULL;
}
return 0;
@@ -962,6 +1055,8 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
int i, ret;
for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
+ struct msm_gem_address_space *aspace;
+
mmu = msm_smmu_new(sde_kms->dev->dev, i);
if (IS_ERR(mmu)) {
/* MMU's can be optional depending on platform */
@@ -971,25 +1066,24 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
continue;
}
- ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
- ARRAY_SIZE(iommu_ports));
- if (ret) {
- SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
+ aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev,
+ mmu, "sde");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
mmu->funcs->destroy(mmu);
goto fail;
}
- sde_kms->mmu_id[i] = msm_register_mmu(sde_kms->dev, mmu);
- if (sde_kms->mmu_id[i] < 0) {
- ret = sde_kms->mmu_id[i];
- SDE_ERROR("failed to register sde iommu %d: %d\n",
- i, ret);
- mmu->funcs->detach(mmu, (const char **)iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ sde_kms->aspace[i] = aspace;
+
+ ret = mmu->funcs->attach(mmu, (const char **)iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret) {
+ SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
+ msm_gem_address_space_put(aspace);
goto fail;
}
- sde_kms->mmu[i] = mmu;
}
return 0;
@@ -1134,6 +1228,14 @@ static int sde_kms_hw_init(struct msm_kms *kms)
goto perf_err;
}
+ sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
+ if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
+ rc = PTR_ERR(sde_kms->hw_intr);
+ SDE_ERROR("hw_intr init failed: %d\n", rc);
+ sde_kms->hw_intr = NULL;
+ goto hw_intr_init_err;
+ }
+
/*
* _sde_kms_drm_obj_init should create the DRM related objects
* i.e. CRTCs, planes, encoders, connectors and so forth
@@ -1159,21 +1261,12 @@ static int sde_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
- sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
- if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
- rc = PTR_ERR(sde_kms->hw_intr);
- SDE_ERROR("hw_intr init failed: %d\n", rc);
- sde_kms->hw_intr = NULL;
- goto hw_intr_init_err;
- }
-
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
return 0;
-hw_intr_init_err:
- _sde_kms_drm_obj_destroy(sde_kms);
drm_obj_init_err:
sde_core_perf_destroy(&sde_kms->perf);
+hw_intr_init_err:
perf_err:
power_error:
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index bf127ffe9eb6..44f6be959ac9 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -22,6 +22,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_mmu.h"
+#include "msm_gem.h"
#include "sde_dbg.h"
#include "sde_hw_catalog.h"
#include "sde_hw_ctl.h"
@@ -121,8 +122,7 @@ struct sde_kms {
int core_rev;
struct sde_mdss_cfg *catalog;
- struct msm_mmu *mmu[MSM_SMMU_DOMAIN_MAX];
- int mmu_id[MSM_SMMU_DOMAIN_MAX];
+ struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
struct sde_power_client *core_client;
/* directory entry for debugfs */
@@ -154,8 +154,9 @@ struct sde_kms {
void **dsi_displays;
int wb_display_count;
void **wb_displays;
-
bool has_danger_ctrl;
+ void **hdmi_displays;
+ int hdmi_display_count;
};
struct vsync_info {
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index b3de45302dea..114acfd7a173 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -86,7 +86,7 @@ enum sde_plane_qos {
struct sde_plane {
struct drm_plane base;
- int mmu_id;
+ struct msm_gem_address_space *aspace;
struct mutex lock;
@@ -580,7 +580,7 @@ static inline void _sde_plane_set_scanout(struct drm_plane *plane,
return;
}
- ret = sde_format_populate_layout(psde->mmu_id, fb, &pipe_cfg->layout);
+ ret = sde_format_populate_layout(psde->aspace, fb, &pipe_cfg->layout);
if (ret == -EAGAIN)
SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
else if (ret)
@@ -1285,7 +1285,7 @@ static int sde_plane_prepare_fb(struct drm_plane *plane,
return 0;
SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
- return msm_framebuffer_prepare(fb, psde->mmu_id);
+ return msm_framebuffer_prepare(fb, psde->aspace);
}
static void sde_plane_cleanup_fb(struct drm_plane *plane,
@@ -1294,11 +1294,11 @@ static void sde_plane_cleanup_fb(struct drm_plane *plane,
struct drm_framebuffer *fb = old_state ? old_state->fb : NULL;
struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
- if (!fb)
+ if (!fb || !psde)
return;
SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
- msm_framebuffer_cleanup(fb, psde->mmu_id);
+ msm_framebuffer_cleanup(fb, psde->aspace);
}
static void _sde_plane_atomic_check_mode_changed(struct sde_plane *psde,
@@ -2384,7 +2384,7 @@ struct drm_plane *sde_plane_init(struct drm_device *dev,
/* cache local stuff for later */
plane = &psde->base;
psde->pipe = pipe;
- psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
+ psde->aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
/* initialize underlying h/w driver */
psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, kms->catalog);
diff --git a/drivers/gpu/drm/msm/sde_io_util.c b/drivers/gpu/drm/msm/sde_io_util.c
new file mode 100644
index 000000000000..70a42254a909
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_io_util.c
@@ -0,0 +1,502 @@
+/* Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/sde_io_util.h>
+
+#define MAX_I2C_CMDS 16
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug)
+{
+ u32 in_val;
+
+ if (!io || !io->base) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ if (offset > io->len) {
+ DEV_ERR("%pS->%s: offset out of range\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ writel_relaxed(value, io->base + offset);
+ if (debug) {
+ in_val = readl_relaxed(io->base + offset);
+ DEV_DBG("[%08x] => %08x [%08x]\n",
+ (u32)(unsigned long)(io->base + offset),
+ value, in_val);
+ }
+} /* dss_reg_w */
+EXPORT_SYMBOL(dss_reg_w);
+
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug)
+{
+ u32 value;
+
+ if (!io || !io->base) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ if (offset > io->len) {
+ DEV_ERR("%pS->%s: offset out of range\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ value = readl_relaxed(io->base + offset);
+ if (debug)
+ DEV_DBG("[%08x] <= %08x\n",
+ (u32)(unsigned long)(io->base + offset), value);
+
+ return value;
+} /* dss_reg_r */
+EXPORT_SYMBOL(dss_reg_r);
+
+void dss_reg_dump(void __iomem *base, u32 length, const char *prefix,
+ u32 debug)
+{
+ if (debug)
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
+ (void *)base, length, false);
+} /* dss_reg_dump */
+EXPORT_SYMBOL(dss_reg_dump);
+
+static struct resource *msm_dss_get_res_byname(struct platform_device *pdev,
+ unsigned int type, const char *name)
+{
+ struct resource *res = NULL;
+
+ res = platform_get_resource_byname(pdev, type, name);
+ if (!res)
+ DEV_ERR("%s: '%s' resource not found\n", __func__, name);
+
+ return res;
+} /* msm_dss_get_res_byname */
+EXPORT_SYMBOL(msm_dss_get_res_byname);
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+ struct dss_io_data *io_data, const char *name)
+{
+ struct resource *res = NULL;
+
+ if (!pdev || !io_data) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ res = msm_dss_get_res_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ DEV_ERR("%pS->%s: '%s' msm_dss_get_res_byname failed\n",
+ __builtin_return_address(0), __func__, name);
+ return -ENODEV;
+ }
+
+ io_data->len = (u32)resource_size(res);
+ io_data->base = ioremap(res->start, io_data->len);
+ if (!io_data->base) {
+ DEV_ERR("%pS->%s: '%s' ioremap failed\n",
+ __builtin_return_address(0), __func__, name);
+ return -EIO;
+ }
+
+ return 0;
+} /* msm_dss_ioremap_byname */
+EXPORT_SYMBOL(msm_dss_ioremap_byname);
+
+void msm_dss_iounmap(struct dss_io_data *io_data)
+{
+ if (!io_data) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ if (io_data->base) {
+ iounmap(io_data->base);
+ io_data->base = NULL;
+ }
+ io_data->len = 0;
+} /* msm_dss_iounmap */
+EXPORT_SYMBOL(msm_dss_iounmap);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+ int num_vreg, int config)
+{
+ int i = 0, rc = 0;
+ struct dss_vreg *curr_vreg = NULL;
+ enum dss_vreg_type type;
+
+ if (!in_vreg || !num_vreg)
+ return rc;
+
+ if (config) {
+ for (i = 0; i < num_vreg; i++) {
+ curr_vreg = &in_vreg[i];
+ curr_vreg->vreg = regulator_get(dev,
+ curr_vreg->vreg_name);
+ rc = PTR_RET(curr_vreg->vreg);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ curr_vreg->vreg_name, rc);
+ curr_vreg->vreg = NULL;
+ goto vreg_get_fail;
+ }
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ if (type == DSS_REG_LDO) {
+ rc = regulator_set_voltage(
+ curr_vreg->vreg,
+ curr_vreg->min_voltage,
+ curr_vreg->max_voltage);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s set vltg fail\n",
+ __builtin_return_address(0),
+ __func__,
+ curr_vreg->vreg_name);
+ goto vreg_set_voltage_fail;
+ }
+ }
+ }
+ } else {
+ for (i = num_vreg-1; i >= 0; i--) {
+ curr_vreg = &in_vreg[i];
+ if (curr_vreg->vreg) {
+ type = (regulator_count_voltages(
+ curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ if (type == DSS_REG_LDO) {
+ regulator_set_voltage(curr_vreg->vreg,
+ 0, curr_vreg->max_voltage);
+ }
+ regulator_put(curr_vreg->vreg);
+ curr_vreg->vreg = NULL;
+ }
+ }
+ }
+ return 0;
+
+vreg_unconfig:
+if (type == DSS_REG_LDO)
+ regulator_set_load(curr_vreg->vreg, 0);
+
+vreg_set_voltage_fail:
+ regulator_put(curr_vreg->vreg);
+ curr_vreg->vreg = NULL;
+
+vreg_get_fail:
+ for (i--; i >= 0; i--) {
+ curr_vreg = &in_vreg[i];
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ goto vreg_unconfig;
+ }
+ return rc;
+} /* msm_dss_config_vreg */
+EXPORT_SYMBOL(msm_dss_config_vreg);
+
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable)
+{
+ int i = 0, rc = 0;
+ bool need_sleep;
+
+ if (enable) {
+ for (i = 0; i < num_vreg; i++) {
+ rc = PTR_RET(in_vreg[i].vreg);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name, rc);
+ goto vreg_set_opt_mode_fail;
+ }
+ need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
+ if (in_vreg[i].pre_on_sleep && need_sleep)
+ usleep_range(in_vreg[i].pre_on_sleep * 1000,
+ in_vreg[i].pre_on_sleep * 1000);
+ rc = regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].enable_load);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s set opt m fail\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name);
+ goto vreg_set_opt_mode_fail;
+ }
+ rc = regulator_enable(in_vreg[i].vreg);
+ if (in_vreg[i].post_on_sleep && need_sleep)
+ usleep_range(in_vreg[i].post_on_sleep * 1000,
+ in_vreg[i].post_on_sleep * 1000);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s enable failed\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name);
+ goto disable_vreg;
+ }
+ }
+ } else {
+ for (i = num_vreg-1; i >= 0; i--) {
+ if (in_vreg[i].pre_off_sleep)
+ usleep_range(in_vreg[i].pre_off_sleep * 1000,
+ in_vreg[i].pre_off_sleep * 1000);
+ regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].disable_load);
+ regulator_disable(in_vreg[i].vreg);
+ if (in_vreg[i].post_off_sleep)
+ usleep_range(in_vreg[i].post_off_sleep * 1000,
+ in_vreg[i].post_off_sleep * 1000);
+ }
+ }
+ return rc;
+
+disable_vreg:
+ regulator_set_load(in_vreg[i].vreg, in_vreg[i].disable_load);
+
+vreg_set_opt_mode_fail:
+ for (i--; i >= 0; i--) {
+ if (in_vreg[i].pre_off_sleep)
+ usleep_range(in_vreg[i].pre_off_sleep * 1000,
+ in_vreg[i].pre_off_sleep * 1000);
+ regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].disable_load);
+ regulator_disable(in_vreg[i].vreg);
+ if (in_vreg[i].post_off_sleep)
+ usleep_range(in_vreg[i].post_off_sleep * 1000,
+ in_vreg[i].post_off_sleep * 1000);
+ }
+
+ return rc;
+} /* msm_dss_enable_vreg */
+EXPORT_SYMBOL(msm_dss_enable_vreg);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable)
+{
+ int i = 0, rc = 0;
+
+ if (enable) {
+ for (i = 0; i < num_gpio; i++) {
+ DEV_DBG("%pS->%s: %s enable\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+
+ rc = gpio_request(in_gpio[i].gpio,
+ in_gpio[i].gpio_name);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s enable failed\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+ goto disable_gpio;
+ }
+ gpio_set_value(in_gpio[i].gpio, in_gpio[i].value);
+ }
+ } else {
+ for (i = num_gpio-1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: %s disable\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+ if (in_gpio[i].gpio)
+ gpio_free(in_gpio[i].gpio);
+ }
+ }
+ return rc;
+
+disable_gpio:
+ for (i--; i >= 0; i--)
+ if (in_gpio[i].gpio)
+ gpio_free(in_gpio[i].gpio);
+
+ return rc;
+} /* msm_dss_enable_gpio */
+EXPORT_SYMBOL(msm_dss_enable_gpio);
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+ int i;
+
+ for (i = num_clk - 1; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+} /* msm_dss_put_clk */
+EXPORT_SYMBOL(msm_dss_put_clk);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+ rc = PTR_RET(clk_arry[i].clk);
+ if (rc) {
+ DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name, rc);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ msm_dss_put_clk(clk_arry, num_clk);
+
+ return rc;
+} /* msm_dss_get_clk */
+EXPORT_SYMBOL(msm_dss_get_clk);
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ if (clk_arry[i].clk) {
+ if (clk_arry[i].type != DSS_CLK_AHB) {
+ DEV_DBG("%pS->%s: '%s' rate %ld\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name,
+ clk_arry[i].rate);
+ rc = clk_set_rate(clk_arry[i].clk,
+ clk_arry[i].rate);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ break;
+ }
+ }
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ break;
+ }
+ }
+
+ return rc;
+} /* msm_dss_clk_set_rate */
+EXPORT_SYMBOL(msm_dss_clk_set_rate);
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+ int i, rc = 0;
+
+ if (enable) {
+ for (i = 0; i < num_clk; i++) {
+ DEV_DBG("%pS->%s: enable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ if (clk_arry[i].clk) {
+ rc = clk_prepare_enable(clk_arry[i].clk);
+ if (rc)
+ DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ }
+
+ if (rc) {
+ msm_dss_enable_clk(&clk_arry[i],
+ i, false);
+ break;
+ }
+ }
+ } else {
+ for (i = num_clk - 1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: disable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+
+ if (clk_arry[i].clk)
+ clk_disable_unprepare(clk_arry[i].clk);
+ else
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ }
+ }
+
+ return rc;
+} /* msm_dss_enable_clk */
+EXPORT_SYMBOL(msm_dss_enable_clk);
+
+
+int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *read_buf)
+{
+ struct i2c_msg msgs[2];
+ int ret = -1;
+
+ pr_debug("%s: reading from slave_addr=[%x] and offset=[%x]\n",
+ __func__, slave_addr, reg_offset);
+
+ msgs[0].addr = slave_addr >> 1;
+ msgs[0].flags = 0;
+ msgs[0].buf = &reg_offset;
+ msgs[0].len = 1;
+
+ msgs[1].addr = slave_addr >> 1;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].buf = read_buf;
+ msgs[1].len = 1;
+
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ if (ret < 1) {
+ pr_err("%s: I2C READ FAILED=[%d]\n", __func__, ret);
+ return -EACCES;
+ }
+ pr_debug("%s: i2c buf is [%x]\n", __func__, *read_buf);
+ return 0;
+}
+EXPORT_SYMBOL(sde_i2c_byte_read);
+
+int sde_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *value)
+{
+ struct i2c_msg msgs[1];
+ uint8_t data[2];
+ int status = -EACCES;
+
+ pr_debug("%s: writing from slave_addr=[%x] and offset=[%x]\n",
+ __func__, slave_addr, reg_offset);
+
+ data[0] = reg_offset;
+ data[1] = *value;
+
+ msgs[0].addr = slave_addr >> 1;
+ msgs[0].flags = 0;
+ msgs[0].len = 2;
+ msgs[0].buf = data;
+
+ status = i2c_transfer(client->adapter, msgs, 1);
+ if (status < 1) {
+ pr_err("I2C WRITE FAILED=[%d]\n", status);
+ return -EACCES;
+ }
+ pr_debug("%s: I2C write status=%x\n", __func__, status);
+ return status;
+}
+EXPORT_SYMBOL(sde_i2c_byte_write);
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 3c82a261e3fb..ab65283ceafc 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -24,7 +24,7 @@
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
-#include <linux/mdss_io_util.h>
+#include <linux/sde_io_util.h>
#include "sde_power_handle.h"
#include "sde_trace.h"
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 973884c2c5e7..b58391adf3ab 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -161,6 +161,7 @@ static const struct {
{ adreno_is_a530, a530_efuse_speed_bin },
{ adreno_is_a505, a530_efuse_speed_bin },
{ adreno_is_a512, a530_efuse_speed_bin },
+ { adreno_is_a508, a530_efuse_speed_bin },
};
static void a5xx_check_features(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index fffe08038bcd..5306303b8d15 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2008-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -168,6 +168,7 @@ static const struct flag_entry context_flags[] = {KGSL_CONTEXT_FLAGS};
* KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC so it is ok to cross the streams here.
*/
static const struct flag_entry context_priv[] = {
+ { KGSL_CONTEXT_PRIV_SUBMITTED, "submitted"},
{ KGSL_CONTEXT_PRIV_DETACHED, "detached"},
{ KGSL_CONTEXT_PRIV_INVALID, "invalid"},
{ KGSL_CONTEXT_PRIV_PAGEFAULT, "pagefault"},
diff --git a/drivers/gpu/msm/adreno_dispatch.c b/drivers/gpu/msm/adreno_dispatch.c
index 0d068e9c5805..89218b62bb7d 100644
--- a/drivers/gpu/msm/adreno_dispatch.c
+++ b/drivers/gpu/msm/adreno_dispatch.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2327,10 +2327,6 @@ static int adreno_dispatch_process_drawqueue(struct adreno_device *adreno_dev,
if (adreno_drawqueue_is_empty(drawqueue))
return count;
- /* Don't update the drawqueue timeout if we are about to preempt out */
- if (!adreno_in_preempt_state(adreno_dev, ADRENO_PREEMPT_NONE))
- return count;
-
/* Don't update the drawqueue timeout if it isn't active */
if (!drawqueue_is_current(drawqueue))
return count;
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 1cbd2ef4b6b4..21a6399ba38e 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -217,10 +217,12 @@ static int adreno_drawctxt_wait_rb(struct adreno_device *adreno_dev,
BUG_ON(!mutex_is_locked(&device->mutex));
/*
- * If the context is invalid then return immediately - we may end up
- * waiting for a timestamp that will never come
+ * If the context is invalid (OR) not submitted commands to GPU
+ * then return immediately - we may end up waiting for a timestamp
+ * that will never come
*/
- if (kgsl_context_invalid(context))
+ if (kgsl_context_invalid(context) ||
+ !test_bit(KGSL_CONTEXT_PRIV_SUBMITTED, &context->priv))
goto done;
trace_adreno_drawctxt_wait_start(drawctxt->rb->id, context->id,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index fc0602a60ac1..161b718b8a38 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -959,6 +959,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
drawobj->timestamp, time);
if (!ret) {
+ set_bit(KGSL_CONTEXT_PRIV_SUBMITTED, &context->priv);
cmdobj->global_ts = drawctxt->internal_timestamp;
/* Put the timevalues in the profiling buffer */
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 2b227f2c3a6c..601e7a23101b 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -4379,8 +4379,9 @@ kgsl_get_unmapped_area(struct file *file, unsigned long addr,
val = _get_svm_area(private, entry, addr, len, flags);
if (IS_ERR_VALUE(val))
KGSL_MEM_ERR(device,
- "_get_svm_area: pid %d addr %lx pgoff %lx len %ld failed error %d\n",
- private->pid, addr, pgoff, len, (int) val);
+ "_get_svm_area: pid %d mmap_base %lx addr %lx pgoff %lx len %ld failed error %d\n",
+ private->pid, current->mm->mmap_base, addr,
+ pgoff, len, (int) val);
}
put:
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index cb7ffd51460a..0a2c39b82781 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -322,6 +322,7 @@ struct kgsl_device {
/**
* enum bits for struct kgsl_context.priv
+ * @KGSL_CONTEXT_PRIV_SUBMITTED - The context has submitted commands to gpu.
* @KGSL_CONTEXT_PRIV_DETACHED - The context has been destroyed by userspace
* and is no longer using the gpu.
* @KGSL_CONTEXT_PRIV_INVALID - The context has been destroyed by the kernel
@@ -331,7 +332,8 @@ struct kgsl_device {
* reserved for devices specific use.
*/
enum kgsl_context_priv {
- KGSL_CONTEXT_PRIV_DETACHED = 0,
+ KGSL_CONTEXT_PRIV_SUBMITTED = 0,
+ KGSL_CONTEXT_PRIV_DETACHED,
KGSL_CONTEXT_PRIV_INVALID,
KGSL_CONTEXT_PRIV_PAGEFAULT,
KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC = 16,
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index 1caa673db6ff..7de43dd27ffe 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -156,8 +156,10 @@ static size_t snapshot_os(struct kgsl_device *device,
header->osid = KGSL_SNAPSHOT_OS_LINUX_V3;
/* Get the kernel build information */
- strlcpy(header->release, utsname()->release, sizeof(header->release));
- strlcpy(header->version, utsname()->version, sizeof(header->version));
+ strlcpy(header->release, init_utsname()->release,
+ sizeof(header->release));
+ strlcpy(header->version, init_utsname()->version,
+ sizeof(header->version));
/* Get the Unix time for the timestamp */
header->seconds = get_seconds();
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index ed70a980d9ac..6e72cda433db 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -136,6 +136,17 @@ config CORESIGHT_CTI
hardware component to another. It can also be used to pass
software generated events.
+config CORESIGHT_CTI_SAVE_DISABLE
+ bool "Turn off CTI save and restore"
+ depends on CORESIGHT_CTI
+ help
+ Turns off CoreSight CTI save and restore support for cpu CTIs. This
+ avoids voting for the clocks during probe as well as the associated
+ save and restore latency at the cost of breaking cpu CTI support on
+ targets where cpu CTIs have to be preserved across power collapse.
+
+ If unsure, say 'N' here to avoid breaking cpu CTI support.
+
config CORESIGHT_TPDA
bool "CoreSight Trace, Profiling & Diagnostics Aggregator driver"
help
diff --git a/drivers/i2c/busses/i2c-msm-v2.c b/drivers/i2c/busses/i2c-msm-v2.c
index 04b1b62f85c3..bf2a1dd7cf15 100644
--- a/drivers/i2c/busses/i2c-msm-v2.c
+++ b/drivers/i2c/busses/i2c-msm-v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -50,6 +50,8 @@ static int i2c_msm_xfer_wait_for_completion(struct i2c_msm_ctrl *ctrl,
static int i2c_msm_pm_resume(struct device *dev);
static void i2c_msm_pm_suspend(struct device *dev);
static void i2c_msm_clk_path_init(struct i2c_msm_ctrl *ctrl);
+static void i2c_msm_pm_pinctrl_state(struct i2c_msm_ctrl *ctrl,
+ bool runtime_active);
/* string table for enum i2c_msm_xfer_mode_id */
const char * const i2c_msm_mode_str_tbl[] = {
@@ -2157,27 +2159,54 @@ static bool i2c_msm_xfer_next_buf(struct i2c_msm_ctrl *ctrl)
return true;
}
-static void i2c_msm_pm_clk_disable_unprepare(struct i2c_msm_ctrl *ctrl)
+static void i2c_msm_pm_clk_unprepare(struct i2c_msm_ctrl *ctrl)
{
- clk_disable_unprepare(ctrl->rsrcs.core_clk);
- clk_disable_unprepare(ctrl->rsrcs.iface_clk);
+ clk_unprepare(ctrl->rsrcs.core_clk);
+ clk_unprepare(ctrl->rsrcs.iface_clk);
}
-static int i2c_msm_pm_clk_prepare_enable(struct i2c_msm_ctrl *ctrl)
+static int i2c_msm_pm_clk_prepare(struct i2c_msm_ctrl *ctrl)
{
int ret;
- ret = clk_prepare_enable(ctrl->rsrcs.iface_clk);
+ ret = clk_prepare(ctrl->rsrcs.iface_clk);
if (ret) {
dev_err(ctrl->dev,
- "error on clk_prepare_enable(iface_clk):%d\n", ret);
+ "error on clk_prepare(iface_clk):%d\n", ret);
return ret;
}
- ret = clk_prepare_enable(ctrl->rsrcs.core_clk);
+ ret = clk_prepare(ctrl->rsrcs.core_clk);
+ if (ret) {
+ clk_unprepare(ctrl->rsrcs.iface_clk);
+ dev_err(ctrl->dev,
+ "error clk_prepare(core_clk):%d\n", ret);
+ }
+ return ret;
+}
+
+static void i2c_msm_pm_clk_disable(struct i2c_msm_ctrl *ctrl)
+{
+ clk_disable(ctrl->rsrcs.core_clk);
+ clk_disable(ctrl->rsrcs.iface_clk);
+}
+
+static int i2c_msm_pm_clk_enable(struct i2c_msm_ctrl *ctrl)
+{
+ int ret;
+
+ ret = clk_enable(ctrl->rsrcs.iface_clk);
+ if (ret) {
+ dev_err(ctrl->dev,
+ "error on clk_enable(iface_clk):%d\n", ret);
+ i2c_msm_pm_clk_unprepare(ctrl);
+ return ret;
+ }
+ ret = clk_enable(ctrl->rsrcs.core_clk);
if (ret) {
- clk_disable_unprepare(ctrl->rsrcs.iface_clk);
+ clk_disable(ctrl->rsrcs.iface_clk);
+ i2c_msm_pm_clk_unprepare(ctrl);
dev_err(ctrl->dev,
- "error clk_prepare_enable(core_clk):%d\n", ret);
+ "error clk_enable(core_clk):%d\n", ret);
}
return ret;
}
@@ -2198,6 +2227,7 @@ static int i2c_msm_pm_xfer_start(struct i2c_msm_ctrl *ctrl)
return -EIO;
}
+ i2c_msm_pm_pinctrl_state(ctrl, true);
pm_runtime_get_sync(ctrl->dev);
/*
* if runtime PM callback was not invoked (when both runtime-pm
@@ -2208,7 +2238,7 @@ static int i2c_msm_pm_xfer_start(struct i2c_msm_ctrl *ctrl)
i2c_msm_pm_resume(ctrl->dev);
}
- ret = i2c_msm_pm_clk_prepare_enable(ctrl);
+ ret = i2c_msm_pm_clk_enable(ctrl);
if (ret) {
mutex_unlock(&ctrl->xfer.mtx);
return ret;
@@ -2235,13 +2265,14 @@ static void i2c_msm_pm_xfer_end(struct i2c_msm_ctrl *ctrl)
if (ctrl->xfer.mode_id == I2C_MSM_XFER_MODE_DMA)
i2c_msm_dma_free_channels(ctrl);
- i2c_msm_pm_clk_disable_unprepare(ctrl);
+ i2c_msm_pm_clk_disable(ctrl);
if (!pm_runtime_enabled(ctrl->dev))
i2c_msm_pm_suspend(ctrl->dev);
pm_runtime_mark_last_busy(ctrl->dev);
pm_runtime_put_autosuspend(ctrl->dev);
+ i2c_msm_pm_pinctrl_state(ctrl, false);
mutex_unlock(&ctrl->xfer.mtx);
}
@@ -2663,7 +2694,7 @@ static void i2c_msm_pm_suspend(struct device *dev)
return;
}
i2c_msm_dbg(ctrl, MSM_DBG, "suspending...");
- i2c_msm_pm_pinctrl_state(ctrl, false);
+ i2c_msm_pm_clk_unprepare(ctrl);
i2c_msm_clk_path_unvote(ctrl);
/*
@@ -2690,7 +2721,7 @@ static int i2c_msm_pm_resume(struct device *dev)
i2c_msm_dbg(ctrl, MSM_DBG, "resuming...");
i2c_msm_clk_path_vote(ctrl);
- i2c_msm_pm_pinctrl_state(ctrl, true);
+ i2c_msm_pm_clk_prepare(ctrl);
ctrl->pwr_state = I2C_MSM_PM_RT_ACTIVE;
return 0;
}
@@ -2870,9 +2901,13 @@ static int i2c_msm_probe(struct platform_device *pdev)
/* vote for clock to enable reading the version number off the HW */
i2c_msm_clk_path_vote(ctrl);
- ret = i2c_msm_pm_clk_prepare_enable(ctrl);
+ ret = i2c_msm_pm_clk_prepare(ctrl);
+ if (ret)
+ goto clk_err;
+
+ ret = i2c_msm_pm_clk_enable(ctrl);
if (ret) {
- dev_err(ctrl->dev, "error in enabling clocks:%d\n", ret);
+ i2c_msm_pm_clk_unprepare(ctrl);
goto clk_err;
}
@@ -2884,7 +2919,8 @@ static int i2c_msm_probe(struct platform_device *pdev)
if (ret)
dev_err(ctrl->dev, "error error on qup software reset\n");
- i2c_msm_pm_clk_disable_unprepare(ctrl);
+ i2c_msm_pm_clk_disable(ctrl);
+ i2c_msm_pm_clk_unprepare(ctrl);
i2c_msm_clk_path_unvote(ctrl);
ret = i2c_msm_rsrcs_gpio_pinctrl_init(ctrl);
diff --git a/drivers/input/touchscreen/Kconfig b/drivers/input/touchscreen/Kconfig
index f13017cd9d46..99de4002275e 100644
--- a/drivers/input/touchscreen/Kconfig
+++ b/drivers/input/touchscreen/Kconfig
@@ -1128,17 +1128,6 @@ config TOUCHSCREEN_FT5X06_GESTURE
If unsure, say N.
-config TOUCHSCREEN_MSTAR21XX
- tristate "Mstar touchscreens"
- depends on I2C
- help
- Say Y here if you have a mstar touchscreen.
-
- If unsure, say N.
-
- To compile this driver as a module, choose M here: the
- module will be called msg21xx_ts.
-
config TOUCHSCREEN_ROHM_BU21023
tristate "ROHM BU21023/24 Dual touch support resistive touchscreens"
depends on I2C
diff --git a/drivers/input/touchscreen/Makefile b/drivers/input/touchscreen/Makefile
index 7606fe53fcff..a32132cffe92 100644
--- a/drivers/input/touchscreen/Makefile
+++ b/drivers/input/touchscreen/Makefile
@@ -98,6 +98,5 @@ obj-$(CONFIG_TOUCHSCREEN_TPS6507X) += tps6507x-ts.o
obj-$(CONFIG_TOUCHSCREEN_ZFORCE) += zforce_ts.o
obj-$(CONFIG_TOUCHSCREEN_COLIBRI_VF50) += colibri-vf50-ts.o
obj-$(CONFIG_TOUCHSCREEN_ROHM_BU21023) += rohm_bu21023.o
-obj-$(CONFIG_TOUCHSCREEN_MSTAR21XX) += msg21xx_ts.o
obj-$(CONFIG_TOUCHSCREEN_GT9XX) += gt9xx/
obj-$(CONFIG_TOUCHSCREEN_ST) += st/
diff --git a/drivers/input/touchscreen/msg21xx_ts.c b/drivers/input/touchscreen/msg21xx_ts.c
deleted file mode 100644
index fe8c6e1647f8..000000000000
--- a/drivers/input/touchscreen/msg21xx_ts.c
+++ /dev/null
@@ -1,2260 +0,0 @@
-/*
- * MStar MSG21XX touchscreen driver
- *
- * Copyright (c) 2006-2012 MStar Semiconductor, Inc.
- *
- * Copyright (C) 2012 Bruce Ding <bruce.ding@mstarsemi.com>
- *
- * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- */
-
-#include <linux/module.h>
-#include <linux/kernel.h>
-#include <linux/input.h>
-#include <linux/input/mt.h>
-#include <linux/interrupt.h>
-#include <linux/i2c.h>
-#include <linux/gpio.h>
-#include <linux/of_gpio.h>
-#include <linux/sysfs.h>
-#include <linux/init.h>
-#include <linux/mutex.h>
-#include <linux/delay.h>
-#include <linux/firmware.h>
-#include <linux/debugfs.h>
-#include <linux/regulator/consumer.h>
-
-#if defined(CONFIG_FB)
-#include <linux/notifier.h>
-#include <linux/fb.h>
-#endif
-#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
-#include <linux/input/vir_ps.h>
-#endif
-
-/* Macro Definition */
-
-#define TOUCH_DRIVER_DEBUG 0
-#if (TOUCH_DRIVER_DEBUG == 1)
-#define DBG(fmt, arg...) pr_info(fmt, ##arg)
-#else
-#define DBG(fmt, arg...)
-#endif
-
-/* Constant Value & Variable Definition */
-
-#define MSTAR_VTG_MIN_UV 2800000
-#define MSTAR_VTG_MAX_UV 3300000
-#define MSTAR_I2C_VTG_MIN_UV 1800000
-#define MSTAR_I2C_VTG_MAX_UV 1800000
-
-#define MAX_BUTTONS 4
-#define FT_COORDS_ARR_SIZE 4
-#define MSTAR_FW_NAME_MAX_LEN 50
-
-#define MSTAR_CHIPTOP_REGISTER_BANK 0x1E
-#define MSTAR_CHIPTOP_REGISTER_ICTYPE 0xCC
-#define MSTAR_INIT_SW_ID 0x7FF
-#define MSTAR_DEBUG_DIR_NAME "ts_debug"
-
-#define MSG_FW_FILE_MAJOR_VERSION(x) \
- (((x)->data[0x7f4f] << 8) + ((x)->data[0x7f4e]))
-
-#define MSG_FW_FILE_MINOR_VERSION(x) \
- (((x)->data[0x7f51] << 8) + ((x)->data[0x7f50]))
-
-/*
- * Note.
- * Please do not change the below setting.
- */
-#define TPD_WIDTH (2048)
-#define TPD_HEIGHT (2048)
-
-#ifdef FIRMWARE_AUTOUPDATE
-enum {
- SWID_START = 1,
- SWID_TRULY = SWID_START,
- SWID_NULL,
-};
-
-static unsigned char MSG_FIRMWARE[1][33*1024] = { {
- #include "msg21xx_truly_update_bin.h"
- }
-};
-#endif
-
-#define CONFIG_TP_HAVE_KEY
-#define PINCTRL_STATE_ACTIVE "pmx_ts_active"
-#define PINCTRL_STATE_SUSPEND "pmx_ts_suspend"
-#define PINCTRL_STATE_RELEASE "pmx_ts_release"
-
-#define SLAVE_I2C_ID_DBBUS (0xC4>>1)
-
-#define DEMO_MODE_PACKET_LENGTH (8)
-
-#define TP_PRINT
-
-static char *fw_version; /* customer firmware version */
-static unsigned short fw_version_major;
-static unsigned short fw_version_minor;
-static unsigned char temp[94][1024];
-static unsigned int crc32_table[256];
-
-static unsigned short fw_file_major, fw_file_minor;
-static unsigned short main_sw_id = MSTAR_INIT_SW_ID;
-static unsigned short info_sw_id = MSTAR_INIT_SW_ID;
-static unsigned int bin_conf_crc32;
-
-struct msg21xx_ts_platform_data {
- const char *name;
- char fw_name[MSTAR_FW_NAME_MAX_LEN];
- u8 fw_version_major;
- u8 fw_version_minor;
- u32 irq_gpio;
- u32 irq_gpio_flags;
- u32 reset_gpio;
- u32 reset_gpio_flags;
- u32 x_max;
- u32 y_max;
- u32 x_min;
- u32 y_min;
- u32 panel_minx;
- u32 panel_miny;
- u32 panel_maxx;
- u32 panel_maxy;
- u32 num_max_touches;
- bool no_force_update;
- bool i2c_pull_up;
- bool ignore_id_check;
- int (*power_init)(bool);
- int (*power_on)(bool);
- int (*power_init)(bool);
- int (*power_on)(bool);
- u8 ic_type;
- u32 button_map[MAX_BUTTONS];
- u32 num_buttons;
- u32 hard_reset_delay_ms;
- u32 post_hard_reset_delay_ms;
- bool updating_fw;
-};
-
-/* Touch Data Type Definition */
-struct touchPoint_t {
- unsigned short x;
- unsigned short y;
-};
-
-struct touchInfo_t {
- struct touchPoint_t *point;
- unsigned char count;
- unsigned char keycode;
-};
-
-struct msg21xx_ts_data {
- struct i2c_client *client;
- struct input_dev *input_dev;
- struct msg21xx_ts_platform_data *pdata;
- struct regulator *vdd;
- struct regulator *vcc_i2c;
- bool suspended;
-#if defined(CONFIG_FB)
- struct notifier_block fb_notif;
-#endif
- struct pinctrl *ts_pinctrl;
- struct pinctrl_state *pinctrl_state_active;
- struct pinctrl_state *pinctrl_state_suspend;
- struct pinctrl_state *pinctrl_state_release;
- struct mutex ts_mutex;
- struct touchInfo_t info;
-};
-
-#if defined(CONFIG_FB)
-static int fb_notifier_callback(struct notifier_block *self,
- unsigned long event, void *data);
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
-static unsigned char bEnableTpProximity;
-static unsigned char bFaceClosingTp;
-#endif
-
-#ifdef TP_PRINT
-static int tp_print_proc_read(struct msg21xx_ts_data *ts_data);
-static void tp_print_create_entry(struct msg21xx_ts_data *ts_data);
-#endif
-
-static void _ReadBinConfig(struct msg21xx_ts_data *ts_data);
-static unsigned int _CalMainCRC32(struct msg21xx_ts_data *ts_data);
-
-static struct mutex msg21xx_mutex;
-
-enum EMEM_TYPE_t {
- EMEM_ALL = 0,
- EMEM_MAIN,
- EMEM_INFO,
-};
-
-/* Function Definition */
-
-static unsigned int _CRC_doReflect(unsigned int ref, signed char ch)
-{
- unsigned int value = 0;
- unsigned int i = 0;
-
- for (i = 1; i < (ch + 1); i++) {
- if (ref & 1)
- value |= 1 << (ch - i);
- ref >>= 1;
- }
-
- return value;
-}
-
-static unsigned int _CRC_getValue(unsigned int text, unsigned int prevCRC)
-{
- unsigned int ulCRC = prevCRC;
-
- ulCRC = (ulCRC >> 8) ^ crc32_table[(ulCRC & 0xFF) ^ text];
-
- return ulCRC;
-}
-
-static void _CRC_initTable(void)
-{
- unsigned int magic_number = 0x04c11db7;
- unsigned int i, j;
-
- for (i = 0; i <= 0xFF; i++) {
- crc32_table[i] = _CRC_doReflect(i, 8) << 24;
- for (j = 0; j < 8; j++)
- crc32_table[i] = (crc32_table[i] << 1) ^
- (crc32_table[i] & (0x80000000L) ?
- magic_number : 0);
- crc32_table[i] = _CRC_doReflect(crc32_table[i], 32);
- }
-}
-
-static void msg21xx_reset_hw(struct msg21xx_ts_platform_data *pdata)
-{
- gpio_direction_output(pdata->reset_gpio, 1);
- gpio_set_value_cansleep(pdata->reset_gpio, 0);
- /* Note that the RST must be in LOW 10ms at least */
- usleep(pdata->hard_reset_delay_ms * 1000);
- gpio_set_value_cansleep(pdata->reset_gpio, 1);
- /* Enable the interrupt service thread/routine for INT after 50ms */
- usleep(pdata->post_hard_reset_delay_ms * 1000);
-}
-
-static int read_i2c_seq(struct msg21xx_ts_data *ts_data, unsigned char addr,
- unsigned char *buf, unsigned short size)
-{
- int rc = 0;
- struct i2c_msg msgs[] = {
- {
- .addr = addr,
- .flags = I2C_M_RD, /* read flag */
- .len = size,
- .buf = buf,
- },
- };
-
- /* If everything went ok (i.e. 1 msg transmitted), return #bytes
- * transmitted, else error code.
- */
- if (ts_data->client != NULL) {
- rc = i2c_transfer(ts_data->client->adapter, msgs, 1);
- if (rc < 0)
- dev_err(&ts_data->client->dev,
- "%s error %d\n", __func__, rc);
- } else {
- dev_err(&ts_data->client->dev, "ts_data->client is NULL\n");
- }
-
- return rc;
-}
-
-static int write_i2c_seq(struct msg21xx_ts_data *ts_data, unsigned char addr,
- unsigned char *buf, unsigned short size)
-{
- int rc = 0;
- struct i2c_msg msgs[] = {
- {
- .addr = addr,
- /*
- * if read flag is undefined,
- * then it means write flag.
- */
- .flags = 0,
- .len = size,
- .buf = buf,
- },
- };
-
- /*
- * If everything went ok (i.e. 1 msg transmitted), return #bytes
- * transmitted, else error code.
- */
- if (ts_data->client != NULL) {
- rc = i2c_transfer(ts_data->client->adapter, msgs, 1);
- if (rc < 0)
- dev_err(&ts_data->client->dev,
- "%s error %d\n", __func__, rc);
- } else {
- dev_err(&ts_data->client->dev, "ts_data->client is NULL\n");
- }
-
- return rc;
-}
-
-static unsigned short read_reg(struct msg21xx_ts_data *ts_data,
- unsigned char bank, unsigned char addr)
-{
- unsigned char tx_data[3] = {0x10, bank, addr};
- unsigned char rx_data[2] = {0};
-
- write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, tx_data, sizeof(tx_data));
- read_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, rx_data, sizeof(rx_data));
-
- return rx_data[1] << 8 | rx_data[0];
-}
-
-static void write_reg(struct msg21xx_ts_data *ts_data, unsigned char bank,
- unsigned char addr,
- unsigned short data)
-{
- unsigned char tx_data[5] = {0x10, bank, addr, data & 0xFF, data >> 8};
-
- write_i2c_seq(SLAVE_I2C_ID_DBBUS, &tx_data[0], 5);
- write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, tx_data, sizeof(tx_data));
-}
-
-static void write_reg_8bit(struct msg21xx_ts_data *ts_data, unsigned char bank,
- unsigned char addr,
- unsigned char data)
-{
- unsigned char tx_data[4] = {0x10, bank, addr, data};
-
- write_i2c_seq(SLAVE_I2C_ID_DBBUS, &tx_data[0], 4);
- write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, tx_data, sizeof(tx_data));
-}
-
-static void dbbusDWIICEnterSerialDebugMode(struct msg21xx_ts_data *ts_data)
-{
- unsigned char data[5];
-
- /* Enter the Serial Debug Mode */
- data[0] = 0x53;
- data[1] = 0x45;
- data[2] = 0x52;
- data[3] = 0x44;
- data[4] = 0x42;
-
- write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data));
-}
-
-static void dbbusDWIICStopMCU(struct msg21xx_ts_data *ts_data)
-{
- unsigned char data[1];
-
- /* Stop the MCU */
- data[0] = 0x37;
-
- write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data));
-}
-
-static void dbbusDWIICIICUseBus(struct msg21xx_ts_data *ts_data)
-{
- unsigned char data[1];
-
- /* IIC Use Bus */
- data[0] = 0x35;
-
- write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data));
-}
-
-static void dbbusDWIICIICReshape(struct msg21xx_ts_data *ts_data)
-{
- unsigned char data[1];
-
- /* IIC Re-shape */
- data[0] = 0x71;
-
- write_i2c_seq(ts_data, SLAVE_I2C_ID_DBBUS, data, sizeof(data));
-}
-
-static unsigned char msg21xx_get_ic_type(struct msg21xx_ts_data *ts_data)
-{
- unsigned char ic_type = 0;
- unsigned char bank;
- unsigned char addr;
-
- msg21xx_reset_hw(ts_data->pdata);
- dbbusDWIICEnterSerialDebugMode(ts_data);
- dbbusDWIICStopMCU(ts_data);
- dbbusDWIICIICUseBus(ts_data);
- dbbusDWIICIICReshape(ts_data);
- msleep(300);
-
- /* stop mcu */
- write_reg_8bit(ts_data, 0x0F, 0xE6, 0x01);
- /* disable watch dog */
- write_reg(ts_data, 0x3C, 0x60, 0xAA55);
- /* get ic type */
- bank = MSTAR_CHIPTOP_REGISTER_BANK;
- addr = MSTAR_CHIPTOP_REGISTER_ICTYPE;
- ic_type = (0xff)&(read_reg(ts_data, bank, addr));
-
- if (ic_type != ts_data->pdata->ic_type)
- ic_type = 0;
-
- msg21xx_reset_hw(ts_data->pdata);
-
- return ic_type;
-}
-
-static int msg21xx_read_firmware_id(struct msg21xx_ts_data *ts_data)
-{
- unsigned char command[3] = { 0x53, 0x00, 0x2A};
- unsigned char response[4] = { 0 };
-
- mutex_lock(&msg21xx_mutex);
- write_i2c_seq(ts_data, ts_data->client->addr, command, sizeof(command));
- read_i2c_seq(ts_data, ts_data->client->addr, response,
- sizeof(response));
- mutex_unlock(&msg21xx_mutex);
- ts_data->pdata->fw_version_major = (response[1]<<8) + response[0];
- ts_data->pdata->fw_version_minor = (response[3]<<8) + response[2];
-
- dev_info(&ts_data->client->dev, "major num = %d, minor num = %d\n",
- ts_data->pdata->fw_version_major,
- ts_data->pdata->fw_version_minor);
-
- return 0;
-}
-
-static int firmware_erase_c33(struct msg21xx_ts_data *ts_data,
- enum EMEM_TYPE_t emem_type)
-{
- /* stop mcu */
- write_reg(ts_data, 0x0F, 0xE6, 0x0001);
-
- /* disable watch dog */
- write_reg_8bit(ts_data, 0x3C, 0x60, 0x55);
- write_reg_8bit(ts_data, 0x3C, 0x61, 0xAA);
-
- /* set PROGRAM password */
- write_reg_8bit(ts_data, 0x16, 0x1A, 0xBA);
- write_reg_8bit(ts_data, 0x16, 0x1B, 0xAB);
-
- write_reg_8bit(ts_data, 0x16, 0x18, 0x80);
-
- if (emem_type == EMEM_ALL)
- write_reg_8bit(ts_data, 0x16, 0x08, 0x10);
-
- write_reg_8bit(ts_data, 0x16, 0x18, 0x40);
- msleep(20);
-
- /* clear pce */
- write_reg_8bit(0x16, 0x18, 0x80);
-
- /* erase trigger */
- if (emem_type == EMEM_MAIN)
- write_reg_8bit(ts_data, 0x16, 0x0E, 0x04); /* erase main */
- else
- write_reg_8bit(0x16, 0x0E, 0x08); /* erase all block */
-
- return 1;
-}
-
-static void _ReadBinConfig(void);
-static unsigned int _CalMainCRC32(void);
-
-static int check_fw_update(void)
-{
- int ret = 0;
-
- msg21xx_read_firmware_id();
- _ReadBinConfig();
- if (main_sw_id == info_sw_id) {
- if (_CalMainCRC32() == bin_conf_crc32) {
- /*check upgrading*/
- if ((update_bin_major == pdata->fw_version_major) &&
- (update_bin_minor > pdata->fw_version_minor)) {
- ret = 1;
- }
- }
- }
- return ret;
- write_reg_8bit(ts_data, 0x16, 0x0E, 0x08); /* erase all block */
-
- return 0;
-}
-
-static ssize_t firmware_update_c33(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size,
- enum EMEM_TYPE_t emem_type,
- bool isForce) {
- unsigned int i, j;
- unsigned int crc_main, crc_main_tp;
- unsigned int crc_info, crc_info_tp;
- unsigned short reg_data = 0;
- int update_pass = 1;
- bool fw_upgrade = false;
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- crc_main = 0xffffffff;
- crc_info = 0xffffffff;
-
- msg21xx_reset_hw(ts_data->pdata);
-
- msg21xx_read_firmware_id(ts_data);
- _ReadBinConfig(ts_data);
- if ((main_sw_id == info_sw_id) &&
- (_CalMainCRC32(ts_data) == bin_conf_crc32) &&
- (fw_file_major == ts_data->pdata->fw_version_major) &&
- (fw_file_minor > ts_data->pdata->fw_version_minor)) {
- fw_upgrade = true;
- }
-
- if (!fw_upgrade && !isForce) {
- dev_dbg(dev, "no need to update\n");
- msg21xx_reset_hw(ts_data->pdata);
- return size;
- }
- msg21xx_reset_hw(ts_data->pdata);
- msleep(300);
-
- dbbusDWIICEnterSerialDebugMode(ts_data);
- dbbusDWIICStopMCU(ts_data);
- dbbusDWIICIICUseBus(ts_data);
- dbbusDWIICIICReshape(ts_data);
- msleep(300);
-
- /* erase main */
- firmware_erase_c33(ts_data, EMEM_MAIN);
- msleep(1000);
-
- msg21xx_reset_hw(ts_data->pdata);
- dbbusDWIICEnterSerialDebugMode(ts_data);
- dbbusDWIICStopMCU(ts_data);
- dbbusDWIICIICUseBus(ts_data);
- dbbusDWIICIICReshape(ts_data);
- msleep(300);
- /*
- * Program
- */
- /* polling 0x3CE4 is 0x1C70 */
- if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) {
- do {
- reg_data = read_reg(ts_data, 0x3C, 0xE4);
- } while (reg_data != 0x1C70);
- }
-
- switch (emem_type) {
- case EMEM_ALL:
- write_reg(ts_data, 0x3C, 0xE4, 0xE38F); /* for all-blocks */
- break;
- case EMEM_MAIN:
- write_reg(ts_data, 0x3C, 0xE4, 0x7731); /* for main block */
- break;
- case EMEM_INFO:
- write_reg(ts_data, 0x3C, 0xE4, 0x7731); /* for info block */
-
- write_reg_8bit(ts_data, 0x0F, 0xE6, 0x01);
-
- write_reg_8bit(ts_data, 0x3C, 0xE4, 0xC5);
- write_reg_8bit(ts_data, 0x3C, 0xE5, 0x78);
-
- write_reg_8bit(ts_data, MSTAR_CHIPTOP_REGISTER_BANK,
- 0x04, 0x9F);
- write_reg_8bit(ts_data, MSTAR_CHIPTOP_REGISTER_BANK,
- 0x05, 0x82);
-
- write_reg_8bit(ts_data, 0x0F, 0xE6, 0x00);
- msleep(100);
- break;
- }
-
- /* polling 0x3CE4 is 0x2F43 */
- do {
- reg_data = read_reg(ts_data, 0x3C, 0xE4);
- } while (reg_data != 0x2F43);
-
- /* calculate CRC 32 */
- _CRC_initTable();
-
- /* total 32 KB : 2 byte per R/W */
- for (i = 0; i < 32; i++) {
- if (i == 31) {
- fw_bin_data[i][1014] = 0x5A;
- fw_bin_data[i][1015] = 0xA5;
-
- for (j = 0; j < 1016; j++)
- crc_main = _CRC_getValue(fw_bin_data[i][j],
- crc_main);
- } else {
- for (j = 0; j < 1024; j++)
- crc_main = _CRC_getValue(fw_bin_data[i][j],
- crc_main);
- }
-
- for (j = 0; j < 8; j++)
- write_i2c_seq(ts_data, ts_data->client->addr,
- &fw_bin_data[i][j * 128], 128);
- msleep(100);
-
- /* polling 0x3CE4 is 0xD0BC */
- do {
- reg_data = read_reg(ts_data, 0x3C, 0xE4);
- } while (reg_data != 0xD0BC);
-
- write_reg(ts_data, 0x3C, 0xE4, 0x2F43);
- }
-
- if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) {
- /* write file done and check crc */
- write_reg(ts_data, 0x3C, 0xE4, 0x1380);
- }
- msleep(20);
-
- if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) {
- /* polling 0x3CE4 is 0x9432 */
- do {
- reg_data = read_reg(ts_data, 0x3C, 0xE4);
- } while (reg_data != 0x9432);
- }
-
- crc_main = crc_main ^ 0xffffffff;
- crc_info = crc_info ^ 0xffffffff;
-
- if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) {
- /* CRC Main from TP */
- crc_main_tp = read_reg(ts_data, 0x3C, 0x80);
- crc_main_tp = (crc_main_tp << 16) |
- read_reg(ts_data, 0x3C, 0x82);
-
- /* CRC Info from TP */
- crc_info_tp = read_reg(ts_data, 0x3C, 0xA0);
- crc_info_tp = (crc_info_tp << 16) |
- read_reg(ts_data, 0x3C, 0xA2);
- }
-
- update_pass = 1;
- if ((emem_type == EMEM_ALL) || (emem_type == EMEM_MAIN)) {
- if (crc_main_tp != crc_main)
- update_pass = 0;
- }
-
- if (!update_pass) {
- dev_err(dev, "update_C33 failed\n");
- msg21xx_reset_hw(ts_data->pdata);
- return 0;
- }
-
- dev_dbg(dev, "update_C33 OK\n");
- msg21xx_reset_hw(ts_data->pdata);
- return size;
-}
-
-static unsigned int _CalMainCRC32(struct msg21xx_ts_data *ts_data)
-{
- unsigned int ret = 0;
- unsigned short reg_data = 0;
-
- msg21xx_reset_hw(ts_data->pdata);
-
- dbbusDWIICEnterSerialDebugMode(ts_data);
- dbbusDWIICStopMCU(ts_data);
- dbbusDWIICIICUseBus(ts_data);
- dbbusDWIICIICReshape(ts_data);
- msleep(100);
-
- /* Stop MCU */
- write_reg(ts_data, 0x0F, 0xE6, 0x0001);
-
- /* Stop Watchdog */
- write_reg_8bit(ts_data, 0x3C, 0x60, 0x55);
- write_reg_8bit(ts_data, 0x3C, 0x61, 0xAA);
-
- /* cmd */
- write_reg(ts_data, 0x3C, 0xE4, 0xDF4C);
- write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x7d60);
- /* TP SW reset */
- write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x829F);
-
- /* MCU run */
- write_reg(ts_data, 0x0F, 0xE6, 0x0000);
-
- /* polling 0x3CE4 */
- do {
- reg_data = read_reg(ts_data, 0x3C, 0xE4);
- } while (reg_data != 0x9432);
-
- /* Cal CRC Main from TP */
- ret = read_reg(ts_data, 0x3C, 0x80);
- ret = (ret << 16) | read_reg(ts_data, 0x3C, 0x82);
-
- dev_dbg(&ts_data->client->dev,
- "[21xxA]:Current main crc32=0x%x\n", ret);
- return ret;
-}
-
-static void _ReadBinConfig(struct msg21xx_ts_data *ts_data)
-{
- unsigned char dbbus_tx_data[5] = {0};
- unsigned char dbbus_rx_data[4] = {0};
- unsigned short reg_data = 0;
-
- msg21xx_reset_hw(ts_data->pdata);
-
- dbbusDWIICEnterSerialDebugMode(ts_data);
- dbbusDWIICStopMCU(ts_data);
- dbbusDWIICIICUseBus(ts_data);
- dbbusDWIICIICReshape(ts_data);
- msleep(100);
-
- /* Stop MCU */
- write_reg(ts_data, 0x0F, 0xE6, 0x0001);
-
- /* Stop Watchdog */
- write_reg_8bit(ts_data, 0x3C, 0x60, 0x55);
- write_reg_8bit(ts_data, 0x3C, 0x61, 0xAA);
-
- /* cmd */
- write_reg(ts_data, 0x3C, 0xE4, 0xA4AB);
- write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x7d60);
-
- /* TP SW reset */
- write_reg(ts_data, MSTAR_CHIPTOP_REGISTER_BANK, 0x04, 0x829F);
-
- /* MCU run */
- write_reg(ts_data, 0x0F, 0xE6, 0x0000);
-
- /* polling 0x3CE4 */
- do {
- reg_data = read_reg(ts_data, 0x3C, 0xE4);
- } while (reg_data != 0x5B58);
-
- dbbus_tx_data[0] = 0x72;
- dbbus_tx_data[1] = 0x7F;
- dbbus_tx_data[2] = 0x55;
- dbbus_tx_data[3] = 0x00;
- dbbus_tx_data[4] = 0x04;
- write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 5);
- read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4);
- if ((dbbus_rx_data[0] >= 0x30 && dbbus_rx_data[0] <= 0x39)
- && (dbbus_rx_data[1] >= 0x30 && dbbus_rx_data[1] <= 0x39)
- && (dbbus_rx_data[2] >= 0x31 && dbbus_rx_data[2] <= 0x39)) {
- main_sw_id = (dbbus_rx_data[0] - 0x30) * 100 +
- (dbbus_rx_data[1] - 0x30) * 10 +
- (dbbus_rx_data[2] - 0x30);
- }
-
- dbbus_tx_data[0] = 0x72;
- dbbus_tx_data[1] = 0x7F;
- dbbus_tx_data[2] = 0xFC;
- dbbus_tx_data[3] = 0x00;
- dbbus_tx_data[4] = 0x04;
- write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 5);
- read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4);
- bin_conf_crc32 = (dbbus_rx_data[0] << 24) |
- (dbbus_rx_data[1] << 16) |
- (dbbus_rx_data[2] << 8) |
- (dbbus_rx_data[3]);
-
- dbbus_tx_data[0] = 0x72;
- dbbus_tx_data[1] = 0x83;
- dbbus_tx_data[2] = 0x00;
- dbbus_tx_data[3] = 0x00;
- dbbus_tx_data[4] = 0x04;
- write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 5);
- read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4);
- if ((dbbus_rx_data[0] >= 0x30 && dbbus_rx_data[0] <= 0x39)
- && (dbbus_rx_data[1] >= 0x30 && dbbus_rx_data[1] <= 0x39)
- && (dbbus_rx_data[2] >= 0x31 && dbbus_rx_data[2] <= 0x39)) {
- info_sw_id = (dbbus_rx_data[0] - 0x30) * 100 +
- (dbbus_rx_data[1] - 0x30) * 10 +
- (dbbus_rx_data[2] - 0x30);
- }
-
- dev_dbg(&ts_data->client->dev,
- "[21xxA]:main_sw_id = %d, info_sw_id = %d, bin_conf_crc32 = 0x%x\n",
- main_sw_id, info_sw_id, bin_conf_crc32);
-}
-
-static ssize_t firmware_update_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- return snprintf(buf, 3, "%d\n", ts_data->pdata->updating_fw);
-}
-
-static ssize_t firmware_update_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size)
-{
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- ts_data->pdata->updating_fw = true;
- disable_irq(ts_data->client->irq);
-
- size = firmware_update_c33(dev, attr, buf, size, EMEM_MAIN, false);
-
- enable_irq(ts_data->client->irq);
- ts_data->pdata->updating_fw = false;
-
- return size;
-}
-
-static DEVICE_ATTR(update, (S_IRUGO | S_IWUSR),
- firmware_update_show,
- firmware_update_store);
-
-static int prepare_fw_data(struct device *dev)
-{
- int count;
- int i;
- int ret;
- const struct firmware *fw = NULL;
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- ret = request_firmware(&fw, ts_data->pdata->fw_name, dev);
- if (ret < 0) {
- dev_err(dev, "Request firmware failed - %s (%d)\n",
- ts_data->pdata->fw_name, ret);
- return ret;
- }
-
- count = fw->size / 1024;
-
- for (i = 0; i < count; i++)
- memcpy(fw_bin_data[i], fw->data + (i * 1024), 1024);
-
- fw_file_major = MSG_FW_FILE_MAJOR_VERSION(fw);
- fw_file_minor = MSG_FW_FILE_MINOR_VERSION(fw);
- dev_dbg(dev, "New firmware: %d.%d",
- fw_file_major, fw_file_minor);
-
- return fw->size;
-}
-
-static ssize_t firmware_update_smart_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size)
-{
- int ret;
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- ret = prepare_fw_data(dev);
- if (ret < 0) {
- dev_err(dev, "Request firmware failed -(%d)\n", ret);
- return ret;
- }
- ts_data->pdata->updating_fw = true;
- disable_irq(ts_data->client->irq);
-
- ret = firmware_update_c33(dev, attr, buf, size, EMEM_MAIN, false);
- if (ret == 0)
- dev_err(dev, "firmware_update_c33 ret = %d\n", ret);
-
- enable_irq(ts_data->client->irq);
- ts_data->pdata->updating_fw = false;
-
- return ret;
-}
-
-static ssize_t firmware_force_update_smart_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size)
-{
- int ret;
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- ret = prepare_fw_data(dev);
- if (ret < 0) {
- dev_err(dev, "Request firmware failed -(%d)\n", ret);
- return ret;
- }
- ts_data->pdata->updating_fw = true;
- disable_irq(ts_data->client->irq);
-
- ret = firmware_update_c33(dev, attr, buf, size, EMEM_MAIN, true);
- if (ret == 0)
- dev_err(dev, "firmware_update_c33 et = %d\n", ret);
-
- enable_irq(ts_data->client->irq);
- ts_data->pdata->updating_fw = false;
-
- return ret;
-}
-
-static DEVICE_ATTR(update_fw, (S_IRUGO | S_IWUSR),
- firmware_update_show,
- firmware_update_smart_store);
-
-static DEVICE_ATTR(force_update_fw, (S_IRUGO | S_IWUSR),
- firmware_update_show,
- firmware_force_update_smart_store);
-
-static ssize_t firmware_version_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- msg21xx_read_firmware_id(ts_data);
- return snprintf(buf, sizeof(char) * 8, "%03d%03d\n",
- ts_data->pdata->fw_version_major,
- ts_data->pdata->fw_version_minor);
-}
-
-static DEVICE_ATTR(version, S_IRUGO,
- firmware_version_show,
- NULL);
-
-
-static ssize_t msg21xx_fw_name_show(struct device *dev,
- struct device_attribute *attr, char *buf)
-{
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- return snprintf(buf, MSTAR_FW_NAME_MAX_LEN - 1,
- "%s\n", ts_data->pdata->fw_name);
-}
-
-static ssize_t msg21xx_fw_name_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- if (size > MSTAR_FW_NAME_MAX_LEN - 1)
- return -EINVAL;
-
- strlcpy(ts_data->pdata->fw_name, buf, size);
- if (ts_data->pdata->fw_name[size - 1] == '\n')
- ts_data->pdata->fw_name[size - 1] = 0;
-
- return size;
-}
-
-static DEVICE_ATTR(fw_name, (S_IRUGO | S_IWUSR),
- msg21xx_fw_name_show, msg21xx_fw_name_store);
-
-static ssize_t firmware_data_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size)
-{
- int count = size / 1024;
- int i;
-
- for (i = 0; i < count; i++)
- memcpy(fw_bin_data[i], buf + (i * 1024), 1024);
-
- if (buf != NULL)
- dev_dbg(dev, "buf[0] = %c\n", buf[0]);
-
- return size;
-}
-
-static DEVICE_ATTR(data, S_IWUSR, NULL, firmware_data_store);
-
-static ssize_t tp_print_show(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- tp_print_proc_read(ts_data);
-
- return snprintf(buf, 3, "%d\n", ts_data->suspended);
-}
-
-static ssize_t tp_print_store(struct device *dev,
- struct device_attribute *attr,
- const char *buf,
- size_t size)
-{
- return size;
-}
-
-static DEVICE_ATTR(tpp, (S_IRUGO | S_IWUSR),
- tp_print_show, tp_print_store);
-
-#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
-static void _msg_enable_proximity(void)
-{
- unsigned char tx_data[4] = {0};
-
- tx_data[0] = 0x52;
- tx_data[1] = 0x00;
- tx_data[2] = 0x47;
- tx_data[3] = 0xa0;
- mutex_lock(&msg21xx_mutex);
- write_i2c_seq(ts_data->client->addr, &tx_data[0], 4);
- mutex_unlock(&msg21xx_mutex);
-
- bEnableTpProximity = 1;
-}
-
-static void _msg_disable_proximity(void)
-{
- unsigned char tx_data[4] = {0};
-
- tx_data[0] = 0x52;
- tx_data[1] = 0x00;
- tx_data[2] = 0x47;
- tx_data[3] = 0xa1;
- mutex_lock(&msg21xx_mutex);
- write_i2c_seq(ts_data->client->addr, &tx_data[0], 4);
- mutex_unlock(&msg21xx_mutex);
-
- bEnableTpProximity = 0;
- bFaceClosingTp = 0;
-}
-
-static void tsps_msg21xx_enable(int en)
-{
- if (en)
- _msg_enable_proximity();
- else
- _msg_disable_proximity();
-}
-
-static int tsps_msg21xx_data(void)
-{
- return bFaceClosingTp;
-}
-#endif
-
-static int msg21xx_pinctrl_init(struct msg21xx_ts_data *ts_data)
-{
- int retval;
-
- /* Get pinctrl if target uses pinctrl */
- ts_data->ts_pinctrl = devm_pinctrl_get(&(ts_data->client->dev));
- if (IS_ERR_OR_NULL(ts_data->ts_pinctrl)) {
- retval = PTR_ERR(ts_data->ts_pinctrl);
- dev_dbg(&ts_data->client->dev,
- "Target does not use pinctrl %d\n", retval);
- goto err_pinctrl_get;
- }
-
- ts_data->pinctrl_state_active = pinctrl_lookup_state(
- ts_data->ts_pinctrl, PINCTRL_STATE_ACTIVE);
- if (IS_ERR_OR_NULL(ts_data->pinctrl_state_active)) {
- retval = PTR_ERR(ts_data->pinctrl_state_active);
- dev_dbg(&ts_data->client->dev,
- "Can't lookup %s pinstate %d\n",
- PINCTRL_STATE_ACTIVE, retval);
- goto err_pinctrl_lookup;
- }
-
- ts_data->pinctrl_state_suspend = pinctrl_lookup_state(
- ts_data->ts_pinctrl, PINCTRL_STATE_SUSPEND);
- if (IS_ERR_OR_NULL(ts_data->pinctrl_state_suspend)) {
- retval = PTR_ERR(ts_data->pinctrl_state_suspend);
- dev_dbg(&ts_data->client->dev,
- "Can't lookup %s pinstate %d\n",
- PINCTRL_STATE_SUSPEND, retval);
- goto err_pinctrl_lookup;
- }
-
- ts_data->pinctrl_state_release = pinctrl_lookup_state(
- ts_data->ts_pinctrl, PINCTRL_STATE_RELEASE);
- if (IS_ERR_OR_NULL(ts_data->pinctrl_state_release)) {
- retval = PTR_ERR(ts_data->pinctrl_state_release);
- dev_dbg(&ts_data->client->dev,
- "Can't lookup %s pinstate %d\n",
- PINCTRL_STATE_RELEASE, retval);
- }
-
- return 0;
-
-err_pinctrl_lookup:
- devm_pinctrl_put(ts_data->ts_pinctrl);
-err_pinctrl_get:
- ts_data->ts_pinctrl = NULL;
- return retval;
-}
-
-static unsigned char calculate_checksum(unsigned char *msg, int length)
-{
- int checksum = 0, i;
-
- for (i = 0; i < length; i++)
- checksum += msg[i];
-
- return (unsigned char)((-checksum) & 0xFF);
-}
-
-static int parse_info(struct msg21xx_ts_data *ts_data)
-{
- unsigned char data[DEMO_MODE_PACKET_LENGTH] = {0};
- unsigned char checksum = 0;
- unsigned int x = 0, y = 0;
- unsigned int x2 = 0, y2 = 0;
- unsigned int delta_x = 0, delta_y = 0;
-
- mutex_lock(&msg21xx_mutex);
- read_i2c_seq(ts_data, ts_data->client->addr, &data[0],
- DEMO_MODE_PACKET_LENGTH);
- mutex_unlock(&msg21xx_mutex);
- checksum = calculate_checksum(&data[0], (DEMO_MODE_PACKET_LENGTH-1));
- dev_dbg(&ts_data->client->dev, "check sum: [%x] == [%x]?\n",
- data[DEMO_MODE_PACKET_LENGTH-1], checksum);
-
- if (data[DEMO_MODE_PACKET_LENGTH-1] != checksum) {
- dev_err(&ts_data->client->dev, "WRONG CHECKSUM\n");
- return -EINVAL;
- }
-
- if (data[0] != 0x52) {
- dev_err(&ts_data->client->dev, "WRONG HEADER\n");
- return -EINVAL;
- }
-
- ts_data->info.keycode = 0xFF;
- if ((data[1] == 0xFF) && (data[2] == 0xFF) &&
- (data[3] == 0xFF) && (data[4] == 0xFF) &&
- (data[6] == 0xFF)) {
- if ((data[5] == 0xFF) || (data[5] == 0)) {
- ts_data->info.keycode = 0xFF;
- } else if ((data[5] == 1) || (data[5] == 2) ||
- (data[5] == 4) || (data[5] == 8)) {
- ts_data->info.keycode = data[5] >> 1;
-
- dev_dbg(&ts_data->client->dev,
- "ts_data->info.keycode index %d\n",
- ts_data->info.keycode);
- }
- #ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
- else if (bEnableTpProximity && ((data[5] == 0x80) ||
- (data[5] == 0x40))) {
- if (data[5] == 0x80)
- bFaceClosingTp = 1;
- else if (data[5] == 0x40)
- bFaceClosingTp = 0;
-
- return -EINVAL;
- }
- #endif
- else {
- dev_err(&ts_data->client->dev, "WRONG KEY\n");
- return -EINVAL;
- }
- } else {
- x = (((data[1] & 0xF0) << 4) | data[2]);
- y = (((data[1] & 0x0F) << 8) | data[3]);
- delta_x = (((data[4] & 0xF0) << 4) | data[5]);
- delta_y = (((data[4] & 0x0F) << 8) | data[6]);
-
- if ((delta_x == 0) && (delta_y == 0)) {
- ts_data->info.point[0].x =
- x * ts_data->pdata->x_max / TPD_WIDTH;
- ts_data->info.point[0].y =
- y * ts_data->pdata->y_max / TPD_HEIGHT;
- ts_data->info.count = 1;
- } else {
- if (delta_x > 2048)
- delta_x -= 4096;
-
- if (delta_y > 2048)
- delta_y -= 4096;
-
- x2 = (unsigned int)((signed short)x +
- (signed short)delta_x);
- y2 = (unsigned int)((signed short)y +
- (signed short)delta_y);
- ts_data->info.point[0].x =
- x * ts_data->pdata->x_max / TPD_WIDTH;
- ts_data->info.point[0].y =
- y * ts_data->pdata->y_max / TPD_HEIGHT;
- ts_data->info.point[1].x =
- x2 * ts_data->pdata->x_max / TPD_WIDTH;
- ts_data->info.point[1].y =
- y2 * ts_data->pdata->y_max / TPD_HEIGHT;
- ts_data->info.count = ts_data->pdata->num_max_touches;
- }
- }
-
- return 0;
-}
-
-static void touch_driver_touch_released(struct msg21xx_ts_data *ts_data)
-{
- int i;
-
- for (i = 0; i < ts_data->pdata->num_max_touches; i++) {
- input_mt_slot(ts_data->input_dev, i);
- input_mt_report_slot_state(ts_data->input_dev,
- MT_TOOL_FINGER, 0);
- }
-
- input_report_key(ts_data->input_dev, BTN_TOUCH, 0);
- input_report_key(ts_data->input_dev, BTN_TOOL_FINGER, 0);
- input_sync(ts_data->input_dev);
-}
-
-/* read data through I2C then report data to input
- *sub-system when interrupt occurred
- */
-static irqreturn_t msg21xx_ts_interrupt(int irq, void *dev_id)
-{
- int i = 0;
- static int last_keycode = 0xFF;
- static int last_count;
- struct msg21xx_ts_data *ts_data = dev_id;
-
- ts_data->info.count = 0;
- if (parse_info(ts_data) == 0) {
- if (ts_data->info.keycode != 0xFF) { /* key touch pressed */
- if (ts_data->info.keycode <
- ts_data->pdata->num_buttons) {
- if (ts_data->info.keycode != last_keycode) {
- dev_dbg(&ts_data->client->dev,
- "key touch pressed");
-
- input_report_key(ts_data->input_dev,
- BTN_TOUCH, 1);
- input_report_key(ts_data->input_dev,
- ts_data->pdata->button_map[
- ts_data->info.keycode], 1);
-
- last_keycode = ts_data->info.keycode;
- } else {
- /* pass duplicate key-pressing */
- dev_dbg(&ts_data->client->dev,
- "REPEATED KEY\n");
- }
- } else {
- dev_dbg(&ts_data->client->dev, "WRONG KEY\n");
- }
- } else { /* key touch released */
- if (last_keycode != 0xFF) {
- dev_dbg(&ts_data->client->dev, "key touch released");
-
- input_report_key(ts_data->input_dev,
- BTN_TOUCH, 0);
- input_report_key(ts_data->input_dev,
- ts_data->pdata->button_map[last_keycode],
- 0);
-
- last_keycode = 0xFF;
- }
- }
-
- if (ts_data->info.count > 0) { /* point touch pressed */
- for (i = 0; i < ts_data->info.count; i++) {
- input_mt_slot(ts_data->input_dev, i);
- input_mt_report_slot_state(ts_data->input_dev,
- MT_TOOL_FINGER, 1);
- input_report_abs(ts_data->input_dev,
- ABS_MT_TOUCH_MAJOR, 1);
- input_report_abs(ts_data->input_dev,
- ABS_MT_POSITION_X,
- ts_data->info.point[i].x);
- input_report_abs(ts_data->input_dev,
- ABS_MT_POSITION_Y,
- ts_data->info.point[i].y);
- }
- }
-
- if (last_count > info.count) {
- for (i = info.count; i < MAX_TOUCH_NUM; i++) {
- input_mt_slot(input_dev, i);
- input_mt_report_slot_state(input_dev,
- }
-
- if (last_count > ts_data->info.count) {
- for (i = ts_data->info.count;
- i < ts_data->pdata->num_max_touches;
- i++) {
- input_mt_slot(ts_data->input_dev, i);
- input_mt_report_slot_state(ts_data->input_dev,
- MT_TOOL_FINGER, 0);
- }
- }
- last_count = ts_data->info.count;
-
- input_report_key(ts_data->input_dev, BTN_TOUCH,
- ts_data->info.count > 0);
- input_report_key(ts_data->input_dev, BTN_TOOL_FINGER,
- ts_data->info.count > 0);
-
- input_sync(ts_data->input_dev);
- }
-
- return IRQ_HANDLED;
-}
-
-static int msg21xx_ts_power_init(struct msg21xx_ts_data *ts_data, bool init)
-{
- int rc;
-
- if (init) {
- ts_data->vdd = regulator_get(&ts_data->client->dev,
- "vdd");
- if (IS_ERR(ts_data->vdd)) {
- rc = PTR_ERR(ts_data->vdd);
- dev_err(&ts_data->client->dev,
- "Regulator get failed vdd rc=%d\n", rc);
- return rc;
- }
-
- if (regulator_count_voltages(ts_data->vdd) > 0) {
- rc = regulator_set_voltage(ts_data->vdd,
- MSTAR_VTG_MIN_UV,
- MSTAR_VTG_MAX_UV);
- if (rc) {
- dev_err(&ts_data->client->dev,
- "Regulator set_vtg failed vdd rc=%d\n",
- rc);
- goto reg_vdd_put;
- }
- }
-
- ts_data->vcc_i2c = regulator_get(&ts_data->client->dev,
- "vcc_i2c");
- if (IS_ERR(ts_data->vcc_i2c)) {
- rc = PTR_ERR(ts_data->vcc_i2c);
- dev_err(&ts_data->client->dev,
- "Regulator get failed vcc_i2c rc=%d\n", rc);
- goto reg_vdd_set_vtg;
- }
-
- if (regulator_count_voltages(ts_data->vcc_i2c) > 0) {
- rc = regulator_set_voltage(ts_data->vcc_i2c,
- MSTAR_I2C_VTG_MIN_UV,
- MSTAR_I2C_VTG_MAX_UV);
- if (rc) {
- dev_err(&ts_data->client->dev,
- "Regulator set_vtg failed vcc_i2c rc=%d\n", rc);
- goto reg_vcc_i2c_put;
- }
- }
- } else {
- if (regulator_count_voltages(ts_data->vdd) > 0)
- regulator_set_voltage(ts_data->vdd, 0,
- MSTAR_VTG_MAX_UV);
-
- regulator_put(ts_data->vdd);
-
- if (regulator_count_voltages(ts_data->vcc_i2c) > 0)
- regulator_set_voltage(ts_data->vcc_i2c, 0,
- MSTAR_I2C_VTG_MAX_UV);
-
- regulator_put(ts_data->vcc_i2c);
- }
-
- return 0;
-
-reg_vcc_i2c_put:
- regulator_put(ts_data->vcc_i2c);
-reg_vdd_set_vtg:
- if (regulator_count_voltages(ts_data->vdd) > 0)
- regulator_set_voltage(ts_data->vdd, 0, MSTAR_VTG_MAX_UV);
-reg_vdd_put:
- regulator_put(ts_data->vdd);
- return rc;
-}
-
-static int msg21xx_ts_power_on(struct msg21xx_ts_data *ts_data, bool on)
-{
- int rc;
-
- if (!on)
- goto power_off;
-
- rc = regulator_enable(ts_data->vdd);
- if (rc) {
- dev_err(&ts_data->client->dev,
- "Regulator vdd enable failed rc=%d\n", rc);
- return rc;
- }
-
- rc = regulator_enable(ts_data->vcc_i2c);
- if (rc) {
- dev_err(&ts_data->client->dev,
- "Regulator vcc_i2c enable failed rc=%d\n", rc);
- regulator_disable(ts_data->vdd);
- }
-
- return rc;
-
- DBG("*** %s ***\n", __func__);
- rc = regulator_disable(vdd);
-power_off:
- rc = regulator_disable(ts_data->vdd);
- if (rc) {
- dev_err(&ts_data->client->dev,
- "Regulator vdd disable failed rc=%d\n", rc);
- return rc;
- }
-
- rc = regulator_disable(ts_data->vcc_i2c);
- if (rc) {
- dev_err(&ts_data->client->dev,
- "Regulator vcc_i2c disable failed rc=%d\n", rc);
- rc = regulator_enable(ts_data->vdd);
- }
-
- return rc;
-}
-
-static int msg21xx_ts_gpio_configure(struct msg21xx_ts_data *ts_data, bool on)
-{
- int ret = 0;
-
- if (!on)
- goto pwr_deinit;
-
- if (gpio_is_valid(ts_data->pdata->irq_gpio)) {
- ret = gpio_request(ts_data->pdata->irq_gpio,
- "msg21xx_irq_gpio");
- if (ret) {
- dev_err(&ts_data->client->dev,
- "Failed to request GPIO[%d], %d\n",
- ts_data->pdata->irq_gpio, ret);
- goto err_irq_gpio_req;
- }
- ret = gpio_direction_input(ts_data->pdata->irq_gpio);
- if (ret) {
- dev_err(&ts_data->client->dev,
- "Failed to set direction for gpio[%d], %d\n",
- ts_data->pdata->irq_gpio, ret);
- goto err_irq_gpio_dir;
- }
- gpio_set_value_cansleep(ts_data->pdata->irq_gpio, 1);
- } else {
- dev_err(&ts_data->client->dev, "irq gpio not provided\n");
- goto err_irq_gpio_req;
- }
-
- if (gpio_is_valid(ts_data->pdata->reset_gpio)) {
- ret = gpio_request(ts_data->pdata->reset_gpio,
- "msg21xx_reset_gpio");
- if (ret) {
- dev_err(&ts_data->client->dev,
- "Failed to request GPIO[%d], %d\n",
- ts_data->pdata->reset_gpio, ret);
- goto err_reset_gpio_req;
- }
-
- } else {
- if (gpio_is_valid(pdata->irq_gpio))
- gpio_free(pdata->irq_gpio);
- if (gpio_is_valid(pdata->reset_gpio)) {
- gpio_set_value_cansleep(pdata->reset_gpio, 0);
- ret = gpio_direction_input(pdata->reset_gpio);
- if (ret)
- dev_err(&i2c_client->dev,
- "Unable to set direction for gpio [%d]\n",
- pdata->reset_gpio);
- gpio_free(pdata->reset_gpio);
- }
- }
- return 0;
- /* power on TP */
- ret = gpio_direction_output(
- ts_data->pdata->reset_gpio, 1);
- if (ret) {
- dev_err(&ts_data->client->dev,
- "Failed to set direction for GPIO[%d], %d\n",
- ts_data->pdata->reset_gpio, ret);
- goto err_reset_gpio_dir;
- }
- msleep(100);
- gpio_set_value_cansleep(ts_data->pdata->reset_gpio, 0);
- msleep(20);
- gpio_set_value_cansleep(ts_data->pdata->reset_gpio, 1);
- msleep(200);
- } else {
- dev_err(&ts_data->client->dev, "reset gpio not provided\n");
- goto err_reset_gpio_req;
- }
-
- return 0;
-
-err_reset_gpio_dir:
- if (gpio_is_valid(ts_data->pdata->reset_gpio))
- gpio_free(ts_data->pdata->irq_gpio);
-err_reset_gpio_req:
-err_irq_gpio_dir:
- if (gpio_is_valid(ts_data->pdata->irq_gpio))
- gpio_free(ts_data->pdata->irq_gpio);
-err_irq_gpio_req:
- return ret;
-
-pwr_deinit:
- if (gpio_is_valid(ts_data->pdata->irq_gpio))
- gpio_free(ts_data->pdata->irq_gpio);
- if (gpio_is_valid(ts_data->pdata->reset_gpio)) {
- gpio_set_value_cansleep(ts_data->pdata->reset_gpio, 0);
- ret = gpio_direction_input(ts_data->pdata->reset_gpio);
- if (ret)
- dev_err(&ts_data->client->dev,
- "Unable to set direction for gpio [%d]\n",
- ts_data->pdata->reset_gpio);
- gpio_free(ts_data->pdata->reset_gpio);
- }
- return 0;
-}
-
-#ifdef CONFIG_PM
-static int msg21xx_ts_resume(struct device *dev)
-{
- int retval;
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- if (!ts_data->suspended) {
- dev_info(dev, "msg21xx_ts already in resume\n");
- return 0;
- }
-
- mutex_lock(&ts_data->ts_mutex);
-
- retval = msg21xx_ts_power_on(ts_data, true);
- if (retval) {
- dev_err(dev, "msg21xx_ts power on failed");
- mutex_unlock(&ts_data->ts_mutex);
- return retval;
- }
-
- if (ts_data->ts_pinctrl) {
- retval = pinctrl_select_state(ts_data->ts_pinctrl,
- ts_data->pinctrl_state_active);
- if (retval < 0) {
- dev_err(dev, "Cannot get active pinctrl state\n");
- mutex_unlock(&ts_data->ts_mutex);
- return retval;
- }
- }
-
- retval = msg21xx_ts_gpio_configure(ts_data, true);
- if (retval) {
- dev_err(dev, "Failed to put gpios in active state %d",
- retval);
- mutex_unlock(&ts_data->ts_mutex);
- return retval;
- }
-
- enable_irq(ts_data->client->irq);
- ts_data->suspended = false;
-
- mutex_unlock(&ts_data->ts_mutex);
-
- return 0;
-}
-
-static int msg21xx_ts_suspend(struct device *dev)
-{
- int retval;
- struct msg21xx_ts_data *ts_data = dev_get_drvdata(dev);
-
- if (ts_data->pdata->updating_fw) {
- dev_info(dev, "Firmware loading in progress\n");
- return 0;
- }
-
- if (ts_data->suspended) {
- dev_info(dev, "msg21xx_ts already in suspend\n");
- return 0;
- }
-
-#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
- if (bEnableTpProximity) {
- dev_dbg(dev, "suspend bEnableTpProximity=%d\n",
- bEnableTpProximity);
- return 0;
- }
-#endif
-
- mutex_lock(&ts_data->ts_mutex);
-
- disable_irq(ts_data->client->irq);
-
- touch_driver_touch_released(ts_data);
-
- if (ts_data->ts_pinctrl) {
- retval = pinctrl_select_state(ts_data->ts_pinctrl,
- ts_data->pinctrl_state_suspend);
- if (retval < 0) {
- dev_err(dev, "Cannot get idle pinctrl state %d\n",
- retval);
- mutex_unlock(&ts_data->ts_mutex);
- return retval;
- }
- }
-
- retval = msg21xx_ts_gpio_configure(ts_data, false);
- if (retval) {
- dev_err(dev, "Failed to put gpios in idle state %d",
- retval);
- mutex_unlock(&ts_data->ts_mutex);
- return retval;
- }
-
- retval = msg21xx_ts_power_on(ts_data, false);
- if (retval) {
- dev_err(dev, "msg21xx_ts power off failed");
- mutex_unlock(&ts_data->ts_mutex);
- return retval;
- }
-
- ts_data->suspended = true;
-
- mutex_unlock(&ts_data->ts_mutex);
-
- return 0;
-}
-#else
-static int msg21xx_ts_resume(struct device *dev)
-{
- return 0;
-}
-static int msg21xx_ts_suspend(struct device *dev)
-{
- return 0;
-}
-#endif
-
-static int msg21xx_debug_suspend_set(void *_data, u64 val)
-{
- struct msg21xx_ts_data *data = _data;
-
- mutex_lock(&data->input_dev->mutex);
-
- if (val)
- msg21xx_ts_suspend(&data->client->dev);
- else
- msg21xx_ts_resume(&data->client->dev);
-
- mutex_unlock(&data->input_dev->mutex);
-
- return 0;
-}
-
-static int msg21xx_debug_suspend_get(void *_data, u64 *val)
-{
- struct msg21xx_ts_data *data = _data;
-
- mutex_lock(&data->input_dev->mutex);
- *val = data->suspended;
- mutex_unlock(&data->input_dev->mutex);
-
- return 0;
-}
-
-DEFINE_SIMPLE_ATTRIBUTE(debug_suspend_fops, msg21xx_debug_suspend_get,
- msg21xx_debug_suspend_set, "%lld\n");
-
-
-#if defined(CONFIG_FB)
-static int fb_notifier_callback(struct notifier_block *self,
- unsigned long event, void *data)
-{
- struct fb_event *evdata = data;
- int *blank;
- struct msg21xx_ts_data *ts_data =
- container_of(self, struct msg21xx_ts_data, fb_notif);
-
- if (evdata && evdata->data && event == FB_EVENT_BLANK) {
- blank = evdata->data;
- if (*blank == FB_BLANK_UNBLANK)
- msg21xx_ts_resume(&ts_data->client->dev);
- else if (*blank == FB_BLANK_POWERDOWN)
- msg21xx_ts_suspend(&ts_data->client->dev);
- }
-
- return 0;
-}
-#endif
-
-static int msg21xx_get_dt_coords(struct device *dev, char *name,
- struct msg21xx_ts_platform_data *pdata)
-{
- u32 coords[FT_COORDS_ARR_SIZE];
- struct property *prop;
- struct device_node *np = dev->of_node;
- int coords_size, rc;
-
- prop = of_find_property(np, name, NULL);
- if (!prop)
- return -EINVAL;
- if (!prop->value)
- return -ENODATA;
-
- coords_size = prop->length / sizeof(u32);
- if (coords_size != FT_COORDS_ARR_SIZE) {
- dev_err(dev, "invalid %s\n", name);
- return -EINVAL;
- }
-
- rc = of_property_read_u32_array(np, name, coords, coords_size);
- if (rc && (rc != -EINVAL)) {
- dev_err(dev, "Unable to read %s\n", name);
- return rc;
- }
-
- if (!strcmp(name, "mstar,panel-coords")) {
- pdata->panel_minx = coords[0];
- pdata->panel_miny = coords[1];
- pdata->panel_maxx = coords[2];
- pdata->panel_maxy = coords[3];
- } else if (!strcmp(name, "mstar,display-coords")) {
- pdata->x_min = coords[0];
- pdata->y_min = coords[1];
- pdata->x_max = coords[2];
- pdata->y_max = coords[3];
- } else {
- dev_err(dev, "unsupported property %s\n", name);
- return -EINVAL;
- }
-
- return 0;
-}
-
-static int msg21xx_parse_dt(struct device *dev,
- struct msg21xx_ts_platform_data *pdata)
-{
- int rc;
- struct device_node *np = dev->of_node;
- struct property *prop;
- u32 temp_val;
-
- rc = msg21xx_get_dt_coords(dev, "mstar,panel-coords", pdata);
- if (rc && (rc != -EINVAL))
- return rc;
-
- rc = msg21xx_get_dt_coords(dev, "mstar,display-coords", pdata);
- if (rc)
- return rc;
-
- rc = of_property_read_u32(np, "mstar,hard-reset-delay-ms",
- &temp_val);
- if (!rc)
- pdata->hard_reset_delay_ms = temp_val;
- else
- return rc;
-
- rc = of_property_read_u32(np, "mstar,post-hard-reset-delay-ms",
- &temp_val);
- if (!rc)
- pdata->post_hard_reset_delay_ms = temp_val;
- else
- return rc;
-
- /* reset, irq gpio info */
- pdata->reset_gpio = of_get_named_gpio_flags(np, "mstar,reset-gpio",
- 0, &pdata->reset_gpio_flags);
- if (pdata->reset_gpio < 0)
- return pdata->reset_gpio;
-
- pdata->irq_gpio = of_get_named_gpio_flags(np, "mstar,irq-gpio",
- 0, &pdata->irq_gpio_flags);
- if (pdata->irq_gpio < 0)
- return pdata->irq_gpio;
-
- rc = of_property_read_u32(np, "mstar,ic-type", &temp_val);
- if (rc && (rc != -EINVAL))
- return rc;
-
- pdata->ic_type = temp_val;
-
- rc = of_property_read_u32(np, "mstar,num-max-touches", &temp_val);
- if (!rc)
- pdata->num_max_touches = temp_val;
- else
- return rc;
-
- prop = of_find_property(np, "mstar,button-map", NULL);
- if (prop) {
- pdata->num_buttons = prop->length / sizeof(temp_val);
- if (pdata->num_buttons > MAX_BUTTONS)
- return -EINVAL;
-
- rc = of_property_read_u32_array(np,
- "mstar,button-map", pdata->button_map,
- pdata->num_buttons);
- if (rc) {
- dev_err(dev, "Unable to read key codes\n");
- return rc;
- }
- }
-
- return 0;
-}
-
-/* probe function is used for matching and initializing input device */
-static int msg21xx_ts_probe(struct i2c_client *client,
- const struct i2c_device_id *id) {
-
- int ret = 0, i;
- struct dentry *temp, *dir;
- struct input_dev *input_dev;
- struct msg21xx_ts_data *ts_data;
- struct msg21xx_ts_platform_data *pdata;
-
- if (client->dev.of_node) {
- pdata = devm_kzalloc(&client->dev,
- sizeof(struct msg21xx_ts_platform_data), GFP_KERNEL);
- if (!pdata)
- return -ENOMEM;
-
- ret = msg21xx_parse_dt(&client->dev, pdata);
- if (ret) {
- dev_err(&client->dev, "DT parsing failed\n");
- return ret;
- }
- } else
- pdata = client->dev.platform_data;
-
- if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
- dev_err(&client->dev, "I2C not supported\n");
- return -ENODEV;
- }
-
- ts_data = devm_kzalloc(&client->dev,
- sizeof(struct msg21xx_ts_data), GFP_KERNEL);
- if (!ts_data)
- return -ENOMEM;
-
- ts_data->client = client;
- ts_data->info.point = devm_kzalloc(&client->dev,
- sizeof(struct touchPoint_t) * pdata->num_max_touches,
- GFP_KERNEL);
- if (!ts_data->info.point) {
- dev_err(&client->dev, "Not enough memory\n");
- return -ENOMEM;
- }
-
- /* allocate an input device */
- input_dev = input_allocate_device();
- if (!input_dev) {
- ret = -ENOMEM;
- dev_err(&client->dev, "input device allocation failed\n");
- goto err_input_allocate_dev;
- }
-
- input_dev->name = client->name;
- input_dev->phys = "I2C";
- input_dev->dev.parent = &client->dev;
- input_dev->id.bustype = BUS_I2C;
-
- ts_data->input_dev = input_dev;
- ts_data->client = client;
- ts_data->pdata = pdata;
-
- input_set_drvdata(input_dev, ts_data);
- i2c_set_clientdata(client, ts_data);
-
- ret = msg21xx_ts_power_init(ts_data, true);
- if (ret) {
- dev_err(&client->dev, "Mstar power init failed\n");
- return ret;
- }
-
- ret = msg21xx_ts_power_on(ts_data, true);
- if (ret) {
- dev_err(&client->dev, "Mstar power on failed\n");
- goto exit_deinit_power;
- }
-
- ret = msg21xx_pinctrl_init(ts_data);
- if (!ret && ts_data->ts_pinctrl) {
- /*
- * Pinctrl handle is optional. If pinctrl handle is found
- * let pins to be configured in active state. If not
- * found continue further without error.
- */
- ret = pinctrl_select_state(ts_data->ts_pinctrl,
- ts_data->pinctrl_state_active);
- if (ret < 0)
- dev_err(&client->dev,
- "Failed to select %s pinatate %d\n",
- PINCTRL_STATE_ACTIVE, ret);
- }
-
- ret = msg21xx_ts_gpio_configure(ts_data, true);
- if (ret) {
- dev_err(&client->dev, "Failed to configure gpio %d\n", ret);
- goto exit_gpio_config;
- }
-
- if (msg21xx_get_ic_type(ts_data) == 0) {
- dev_err(&client->dev, "The current IC is not Mstar\n");
- ret = -1;
- goto err_wrong_ic_type;
- }
-
- mutex_init(&msg21xx_mutex);
- mutex_init(&ts_data->ts_mutex);
-
- /* set the supported event type for input device */
- set_bit(EV_ABS, input_dev->evbit);
- set_bit(EV_SYN, input_dev->evbit);
- set_bit(EV_KEY, input_dev->evbit);
- set_bit(BTN_TOUCH, input_dev->keybit);
- set_bit(BTN_TOOL_FINGER, input_dev->keybit);
- set_bit(INPUT_PROP_DIRECT, input_dev->propbit);
-
- for (i = 0; i < pdata->num_buttons; i++)
- input_set_capability(input_dev, EV_KEY, pdata->button_map[i]);
-
- input_set_drvdata(input_dev, ts_data);
- i2c_set_clientdata(client, ts_data);
-
-#ifdef CONFIG_TP_HAVE_KEY
- {
- int i;
-
- for (i = 0; i < num_buttons; i++)
- input_set_capability(input_dev, EV_KEY, button_map[i]);
- }
-#endif
-
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR,
- 0, 2, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_TOUCH_MAJOR, 0, 2, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_X,
- 0, pdata->x_max, 0, 0);
- input_set_abs_params(input_dev, ABS_MT_POSITION_Y,
- 0, pdata->y_max, 0, 0);
- ret = input_mt_init_slots(input_dev, pdata->num_max_touches, 0);
- if (ret) {
- dev_err(&client->dev,
- "Error %d initialising slots\n", ret);
- goto err_free_mem;
- }
-
- /* register the input device to input sub-system */
- ret = input_register_device(input_dev);
- if (ret < 0) {
- dev_err(&client->dev,
- "Unable to register ms-touchscreen input device\n");
- goto err_input_reg_dev;
- }
-
- /* version */
- if (device_create_file(&client->dev, &dev_attr_version) < 0) {
- dev_err(&client->dev,
- "Failed to create device file(%s)!\n",
- dev_attr_version.attr.name);
- goto err_create_fw_ver_file;
- }
- /* update */
- if (device_create_file(&client->dev, &dev_attr_update) < 0) {
- dev_err(&client->dev,
- "Failed to create device file(%s)!\n",
- dev_attr_update.attr.name);
- goto err_create_fw_update_file;
- }
- /* data */
- if (device_create_file(&client->dev, &dev_attr_data) < 0) {
- dev_err(&client->dev,
- "Failed to create device file(%s)!\n",
- dev_attr_data.attr.name);
- goto err_create_fw_data_file;
- }
- /* fw name */
- if (device_create_file(&client->dev, &dev_attr_fw_name) < 0) {
- dev_err(&client->dev,
- "Failed to create device file(%s)!\n",
- dev_attr_fw_name.attr.name);
- goto err_create_fw_name_file;
- }
- /* smart fw update */
- if (device_create_file(&client->dev, &dev_attr_update_fw) < 0) {
- dev_err(&client->dev,
- "Failed to create device file(%s)!\n",
- dev_attr_update_fw.attr.name);
- goto err_create_update_fw_file;
- }
- /* smart fw force update */
- if (device_create_file(&client->dev,
- &dev_attr_force_update_fw) < 0) {
- dev_err(&client->dev,
- "Failed to create device file(%s)!\n",
- dev_attr_force_update_fw.attr.name);
- goto err_create_force_update_fw_file;
- }
- dir = debugfs_create_dir(MSTAR_DEBUG_DIR_NAME, NULL);
- temp = debugfs_create_file("suspend", S_IRUSR | S_IWUSR, dir,
- ts_data, &debug_suspend_fops);
- if (temp == NULL || IS_ERR(temp)) {
- dev_err(&client->dev,
- "debugfs_create_file failed: rc=%ld\n", PTR_ERR(temp));
- goto free_debug_dir;
- }
-
-#ifdef TP_PRINT
- tp_print_create_entry(ts_data);
-#endif
-
- ret = request_threaded_irq(client->irq, NULL,
- msg21xx_ts_interrupt,
- pdata->irq_gpio_flags | IRQF_ONESHOT,
- "msg21xx", ts_data);
- if (ret)
- goto err_req_irq;
-
- disable_irq(client->irq);
-
-#if defined(CONFIG_FB)
- ts_data->fb_notif.notifier_call = fb_notifier_callback;
- ret = fb_register_client(&ts_data->fb_notif);
-#endif
-
-#ifdef CONFIG_TOUCHSCREEN_PROXIMITY_SENSOR
- tsps_assist_register_callback("msg21xx", &tsps_msg21xx_enable,
- &tsps_msg21xx_data);
-#endif
-
- dev_dbg(&client->dev, "mstar touch screen registered\n");
- enable_irq(client->irq);
- return 0;
-
-err_req_irq:
- free_irq(client->irq, ts_data);
- device_remove_file(&client->dev, &dev_attr_data);
-free_debug_dir:
- debugfs_remove_recursive(dir);
-err_create_fw_data_file:
- device_remove_file(&client->dev, &dev_attr_update);
-err_create_fw_update_file:
- device_remove_file(&client->dev, &dev_attr_version);
-err_create_fw_name_file:
- device_remove_file(&client->dev, &dev_attr_fw_name);
-err_create_update_fw_file:
- device_remove_file(&client->dev, &dev_attr_update_fw);
-err_create_force_update_fw_file:
- device_remove_file(&client->dev, &dev_attr_force_update_fw);
-err_create_fw_ver_file:
- input_unregister_device(input_dev);
-
-err_input_reg_dev:
- input_free_device(input_dev);
- input_dev = NULL;
-err_input_allocate_dev:
- mutex_destroy(&msg21xx_mutex);
- mutex_destroy(&ts_data->ts_mutex);
-
-err_wrong_ic_type:
- msg21xx_ts_gpio_configure(ts_data, false);
-exit_gpio_config:
- if (ts_data->ts_pinctrl) {
- if (IS_ERR_OR_NULL(ts_data->pinctrl_state_release)) {
- devm_pinctrl_put(ts_data->ts_pinctrl);
- ts_data->ts_pinctrl = NULL;
- } else {
- ret = pinctrl_select_state(ts_data->ts_pinctrl,
- ts_data->pinctrl_state_release);
- if (ret < 0)
- dev_err(&ts_data->client->dev,
- "Cannot get release pinctrl state\n");
- }
- }
- msg21xx_ts_power_on(ts_data, false);
-exit_deinit_power:
- msg21xx_ts_power_init(ts_data, false);
-err_free_mem:
- input_free_device(input_dev);
-
- return ret;
-}
-
-/* remove function is triggered when the input device is removed
- *from input sub-system
- */
-static int touch_driver_remove(struct i2c_client *client)
-{
- int retval = 0;
- struct msg21xx_ts_data *ts_data = i2c_get_clientdata(client);
-
- free_irq(ts_data->client->irq, ts_data);
- gpio_free(ts_data->pdata->irq_gpio);
- gpio_free(ts_data->pdata->reset_gpio);
-
- if (ts_data->ts_pinctrl) {
- if (IS_ERR_OR_NULL(ts_data->pinctrl_state_release)) {
- devm_pinctrl_put(ts_data->ts_pinctrl);
- ts_data->ts_pinctrl = NULL;
- } else {
- retval = pinctrl_select_state(ts_data->ts_pinctrl,
- ts_data->pinctrl_state_release);
- if (retval < 0)
- dev_err(&ts_data->client->dev,
- "Cannot get release pinctrl state\n");
- }
- }
-
- input_unregister_device(ts_data->input_dev);
- mutex_destroy(&msg21xx_mutex);
- mutex_destroy(&ts_data->ts_mutex);
-
- return retval;
-}
-
-/* The I2C device list is used for matching I2C device
- *and I2C device driver.
- */
-static const struct i2c_device_id touch_device_id[] = {
- {"msg21xx", 0},
- {}, /* should not omitted */
-};
-
-static const struct of_device_id msg21xx_match_table[] = {
- { .compatible = "mstar,msg21xx", },
- { },
-};
-
-MODULE_DEVICE_TABLE(i2c, touch_device_id);
-
-static struct i2c_driver touch_device_driver = {
- .driver = {
- .name = "ms-msg21xx",
- .owner = THIS_MODULE,
- .of_match_table = msg21xx_match_table,
- },
- .probe = msg21xx_ts_probe,
- .remove = touch_driver_remove,
- .id_table = touch_device_id,
-};
-
-module_i2c_driver(touch_device_driver);
-
-#ifdef TP_PRINT
-#include <linux/proc_fs.h>
-
-static unsigned short InfoAddr = 0x0F, PoolAddr = 0x10, TransLen = 256;
-static unsigned char row, units, cnt;
-
-static int tp_print_proc_read(struct msg21xx_ts_data *ts_data)
-{
- unsigned short i, j;
- unsigned short left, offset = 0;
- unsigned char dbbus_tx_data[3] = {0};
- unsigned char u8Data;
- signed short s16Data;
- int s32Data;
- char *buf = NULL;
-
- left = cnt*row*units;
- if ((ts_data->suspended == 0) &&
- (InfoAddr != 0x0F) &&
- (PoolAddr != 0x10) &&
- (left > 0)) {
- buf = kmalloc(left, GFP_KERNEL);
- if (buf != NULL) {
-
- while (left > 0) {
- dbbus_tx_data[0] = 0x53;
- dbbus_tx_data[1] = ((PoolAddr + offset) >> 8)
- & 0xFF;
- dbbus_tx_data[2] = (PoolAddr + offset) & 0xFF;
- mutex_lock(&msg21xx_mutex);
- write_i2c_seq(ts_data, ts_data->client->addr,
- &dbbus_tx_data[0], 3);
- read_i2c_seq(ts_data, ts_data->client->addr,
- &buf[offset],
- left > TransLen ? TransLen : left);
- mutex_unlock(&msg21xx_mutex);
-
- if (left > TransLen) {
- left -= TransLen;
- offset += TransLen;
- } else {
- left = 0;
- }
- }
-
- for (i = 0; i < cnt; i++) {
- for (j = 0; j < row; j++) {
- if (units == 1) {
- u8Data = buf[i * row * units +
- j * units];
- } else if (units == 2) {
- s16Data = buf[i * row * units +
- j * units] +
- (buf[i * row * units +
- j * units + 1] << 8);
- } else if (units == 4) {
- s32Data = buf[i * row * units +
- j * units] +
- (buf[i * row * units +
- j * units + 1] << 8) +
- (buf[i * row * units +
- j * units + 2] << 16) +
- (buf[i * row * units +
- j * units + 3] << 24);
- }
- }
- }
-
- kfree(buf);
- }
- }
-
- return 0;
-}
-
-static void tp_print_create_entry(struct msg21xx_ts_data *ts_data)
-{
- unsigned char dbbus_tx_data[3] = {0};
- unsigned char dbbus_rx_data[8] = {0};
-
- dbbus_tx_data[0] = 0x53;
- dbbus_tx_data[1] = 0x00;
- dbbus_tx_data[2] = 0x58;
- mutex_lock(&msg21xx_mutex);
- write_i2c_seq(ts_data, ts_data->client->addr, &dbbus_tx_data[0], 3);
- read_i2c_seq(ts_data, ts_data->client->addr, &dbbus_rx_data[0], 4);
- mutex_unlock(&msg21xx_mutex);
- InfoAddr = (dbbus_rx_data[1]<<8) + dbbus_rx_data[0];
- PoolAddr = (dbbus_rx_data[3]<<8) + dbbus_rx_data[2];
-
- if ((InfoAddr != 0x0F) && (PoolAddr != 0x10)) {
- msleep(20);
- dbbus_tx_data[0] = 0x53;
- dbbus_tx_data[1] = (InfoAddr >> 8) & 0xFF;
- dbbus_tx_data[2] = InfoAddr & 0xFF;
- mutex_lock(&msg21xx_mutex);
- write_i2c_seq(ts_data, ts_data->client->addr,
- &dbbus_tx_data[0], 3);
- read_i2c_seq(ts_data, ts_data->client->addr,
- &dbbus_rx_data[0], 8);
- mutex_unlock(&msg21xx_mutex);
-
- units = dbbus_rx_data[0];
- row = dbbus_rx_data[1];
- cnt = dbbus_rx_data[2];
- TransLen = (dbbus_rx_data[7]<<8) + dbbus_rx_data[6];
-
- if (device_create_file(&ts_data->client->dev,
- &dev_attr_tpp) < 0)
- dev_err(&ts_data->client->dev, "Failed to create device file(%s)!\n",
- dev_attr_tpp.attr.name);
- }
-}
-#endif
-
-MODULE_AUTHOR("MStar Semiconductor, Inc.");
-MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index ce15e150277e..ce1eb562be36 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -249,17 +249,6 @@
#define RESUME_RETRY (0 << 0)
#define RESUME_TERMINATE (1 << 0)
-#define TTBCR2_SEP_SHIFT 15
-#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
-
-#define TTBCR2_SEP_31 0
-#define TTBCR2_SEP_35 1
-#define TTBCR2_SEP_39 2
-#define TTBCR2_SEP_41 3
-#define TTBCR2_SEP_43 4
-#define TTBCR2_SEP_47 5
-#define TTBCR2_SEP_NOSIGN 7
-
#define TTBRn_ASID_SHIFT 48
#define FSR_MULTI (1 << 31)
@@ -1614,7 +1603,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
if (smmu->version > ARM_SMMU_V1) {
reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
- reg |= TTBCR2_SEP_UPSTREAM;
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
}
} else {
@@ -1745,7 +1733,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
- unsigned long quirks = 0;
+ unsigned long quirks =
+ smmu_domain->attributes & (1 << DOMAIN_ATTR_ENABLE_TTBR1) ?
+ IO_PGTABLE_QUIRK_ARM_TTBR1 : 0;
if (smmu_domain->smmu)
goto out;
@@ -1837,6 +1827,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
};
fmt = ARM_MSM_SECURE;
} else {
+
smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
.quirks = quirks,
.pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
@@ -3140,6 +3131,12 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
& (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
ret = 0;
break;
+ case DOMAIN_ATTR_ENABLE_TTBR1:
+ *((int *)data) = !!(smmu_domain->attributes
+ & (1 << DOMAIN_ATTR_ENABLE_TTBR1));
+ ret = 0;
+ break;
+
default:
ret = -ENODEV;
break;
@@ -3283,6 +3280,12 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
ret = 0;
break;
}
+ case DOMAIN_ATTR_ENABLE_TTBR1:
+ if (*((int *)data))
+ smmu_domain->attributes |=
+ 1 << DOMAIN_ATTR_ENABLE_TTBR1;
+ ret = 0;
+ break;
default:
ret = -ENODEV;
break;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 0d057ca92972..5f2b66286c0c 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -131,14 +131,21 @@
#define ARM_LPAE_TCR_TG0_64K (1 << 14)
#define ARM_LPAE_TCR_TG0_16K (2 << 14)
+#define ARM_LPAE_TCR_TG1_16K 1ULL
+#define ARM_LPAE_TCR_TG1_4K 2ULL
+#define ARM_LPAE_TCR_TG1_64K 3ULL
+
#define ARM_LPAE_TCR_SH0_SHIFT 12
#define ARM_LPAE_TCR_SH0_MASK 0x3
+#define ARM_LPAE_TCR_SH1_SHIFT 28
#define ARM_LPAE_TCR_SH_NS 0
#define ARM_LPAE_TCR_SH_OS 2
#define ARM_LPAE_TCR_SH_IS 3
#define ARM_LPAE_TCR_ORGN0_SHIFT 10
+#define ARM_LPAE_TCR_ORGN1_SHIFT 26
#define ARM_LPAE_TCR_IRGN0_SHIFT 8
+#define ARM_LPAE_TCR_IRGN1_SHIFT 24
#define ARM_LPAE_TCR_RGN_MASK 0x3
#define ARM_LPAE_TCR_RGN_NC 0
#define ARM_LPAE_TCR_RGN_WBWA 1
@@ -151,6 +158,9 @@
#define ARM_LPAE_TCR_T0SZ_SHIFT 0
#define ARM_LPAE_TCR_SZ_MASK 0xf
+#define ARM_LPAE_TCR_T1SZ_SHIFT 16
+#define ARM_LPAE_TCR_T1SZ_MASK 0x3f
+
#define ARM_LPAE_TCR_PS_SHIFT 16
#define ARM_LPAE_TCR_PS_MASK 0x7
@@ -167,6 +177,16 @@
#define ARM_LPAE_TCR_EPD1_SHIFT 23
#define ARM_LPAE_TCR_EPD1_FAULT 1
+#define ARM_LPAE_TCR_SEP_SHIFT (15 + 32)
+
+#define ARM_LPAE_TCR_SEP_31 0ULL
+#define ARM_LPAE_TCR_SEP_35 1ULL
+#define ARM_LPAE_TCR_SEP_39 2ULL
+#define ARM_LPAE_TCR_SEP_41 3ULL
+#define ARM_LPAE_TCR_SEP_43 4ULL
+#define ARM_LPAE_TCR_SEP_47 5ULL
+#define ARM_LPAE_TCR_SEP_UPSTREAM 7ULL
+
#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
#define ARM_LPAE_MAIR_ATTR_MASK 0xff
#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
@@ -206,7 +226,7 @@ struct arm_lpae_io_pgtable {
unsigned long pg_shift;
unsigned long bits_per_level;
- void *pgd;
+ void *pgd[2];
};
typedef u64 arm_lpae_iopte;
@@ -524,14 +544,26 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
return pte;
}
+static inline arm_lpae_iopte *arm_lpae_get_table(
+ struct arm_lpae_io_pgtable *data, unsigned long iova)
+{
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+
+ return ((cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) &&
+ (iova & (1UL << (cfg->ias - 1)))) ?
+ data->pgd[1] : data->pgd[0];
+}
+
static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int iommu_prot)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
- arm_lpae_iopte *ptep = data->pgd;
+ arm_lpae_iopte *ptep;
int ret, lvl = ARM_LPAE_START_LVL(data);
arm_lpae_iopte prot;
+ ptep = arm_lpae_get_table(data, iova);
+
/* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
return 0;
@@ -554,7 +586,7 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
- arm_lpae_iopte *ptep = data->pgd;
+ arm_lpae_iopte *ptep;
int lvl = ARM_LPAE_START_LVL(data);
arm_lpae_iopte prot;
struct scatterlist *s;
@@ -563,6 +595,8 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
unsigned int min_pagesz;
struct map_state ms;
+ ptep = arm_lpae_get_table(data, iova);
+
/* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
goto out_err;
@@ -672,7 +706,10 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
{
struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
- __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
+ __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd[0]);
+ if (data->pgd[1])
+ __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data),
+ data->pgd[1]);
kfree(data);
}
@@ -800,9 +837,11 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t unmapped = 0;
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable *iop = &data->iop;
- arm_lpae_iopte *ptep = data->pgd;
+ arm_lpae_iopte *ptep;
int lvl = ARM_LPAE_START_LVL(data);
+ ptep = arm_lpae_get_table(data, iova);
+
while (unmapped < size) {
size_t ret, size_to_unmap, remaining;
@@ -828,7 +867,10 @@ static int arm_lpae_iova_to_pte(struct arm_lpae_io_pgtable *data,
unsigned long iova, int *plvl_ret,
arm_lpae_iopte *ptep_ret)
{
- arm_lpae_iopte pte, *ptep = data->pgd;
+ arm_lpae_iopte pte, *ptep;
+
+ ptep = arm_lpae_get_table(data, iova);
+
*plvl_ret = ARM_LPAE_START_LVL(data);
*ptep_ret = 0;
@@ -994,6 +1036,71 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
return data;
}
+static u64 arm64_lpae_setup_ttbr1(struct io_pgtable_cfg *cfg,
+ struct arm_lpae_io_pgtable *data)
+
+{
+ u64 reg;
+
+ /* If TTBR1 is disabled, disable speculative walks through the TTBR1 */
+ if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)) {
+ reg = ARM_LPAE_TCR_EPD1;
+ reg |= (ARM_LPAE_TCR_SEP_UPSTREAM << ARM_LPAE_TCR_SEP_SHIFT);
+ return reg;
+ }
+
+ if (cfg->iommu_dev && cfg->iommu_dev->archdata.dma_coherent)
+ reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH1_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN1_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN1_SHIFT);
+ else
+ reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH1_SHIFT) |
+ (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN1_SHIFT) |
+ (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN1_SHIFT);
+
+ switch (1 << data->pg_shift) {
+ case SZ_4K:
+ reg |= (ARM_LPAE_TCR_TG1_4K << 30);
+ break;
+ case SZ_16K:
+ reg |= (ARM_LPAE_TCR_TG1_16K << 30);
+ break;
+ case SZ_64K:
+ reg |= (ARM_LPAE_TCR_TG1_64K << 30);
+ break;
+ }
+
+ /* Set T1SZ */
+ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T1SZ_SHIFT;
+
+ /* Set the SEP bit based on the size */
+ switch (cfg->ias) {
+ case 32:
+ reg |= (ARM_LPAE_TCR_SEP_31 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ case 36:
+ reg |= (ARM_LPAE_TCR_SEP_35 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ case 40:
+ reg |= (ARM_LPAE_TCR_SEP_39 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ case 42:
+ reg |= (ARM_LPAE_TCR_SEP_41 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ case 44:
+ reg |= (ARM_LPAE_TCR_SEP_43 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ case 48:
+ reg |= (ARM_LPAE_TCR_SEP_47 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ default:
+ reg |= (ARM_LPAE_TCR_SEP_UPSTREAM << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ }
+
+ return reg;
+}
+
static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
{
@@ -1050,8 +1157,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
- /* Disable speculative walks through TTBR1 */
- reg |= ARM_LPAE_TCR_EPD1;
+ /* Bring in the TTBR1 configuration */
+ reg |= arm64_lpae_setup_ttbr1(cfg, data);
+
cfg->arm_lpae_s1_cfg.tcr = reg;
/* MAIRs */
@@ -1066,16 +1174,33 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s1_cfg.mair[1] = 0;
/* Looking good; allocate a pgd */
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg, cookie);
- if (!data->pgd)
+ data->pgd[0] = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
+ cookie);
+ if (!data->pgd[0])
goto out_free_data;
+
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
+ data->pgd[1] = __arm_lpae_alloc_pages(data->pgd_size,
+ GFP_KERNEL, cfg, cookie);
+ if (!data->pgd[1]) {
+ __arm_lpae_free_pages(data->pgd[0], data->pgd_size, cfg,
+ cookie);
+ goto out_free_data;
+ }
+ } else {
+ data->pgd[1] = NULL;
+ }
+
/* Ensure the empty pgd is visible before any actual TTBR write */
wmb();
/* TTBRs */
- cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
- cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
+ cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd[0]);
+
+ if (data->pgd[1])
+ cfg->arm_lpae_s1_cfg.ttbr[1] = virt_to_phys(data->pgd[1]);
+
return &data->iop;
out_free_data:
@@ -1155,15 +1280,16 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s2_cfg.vtcr = reg;
/* Allocate pgd pages */
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg, cookie);
- if (!data->pgd)
+ data->pgd[0] = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
+ cookie);
+ if (!data->pgd[0])
goto out_free_data;
/* Ensure the empty pgd is visible before any actual TTBR write */
wmb();
/* VTTBR */
- cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
+ cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd[0]);
return &data->iop;
out_free_data:
@@ -1261,7 +1387,7 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
cfg->pgsize_bitmap, cfg->ias);
pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
data->levels, data->pgd_size, data->pg_shift,
- data->bits_per_level, data->pgd);
+ data->bits_per_level, data->pgd[0]);
}
#define __FAIL(ops, i) ({ \
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index a3f366f559a7..f4533040806f 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -62,6 +62,7 @@ struct io_pgtable_cfg {
*/
#define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */
#define IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT (1 << 1)
+ #define IO_PGTABLE_QUIRK_ARM_TTBR1 (1 << 2) /* Allocate TTBR1 PT */
int quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
diff --git a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
index 1c0a10e2fbef..a0f1d5148c94 100644
--- a/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
+++ b/drivers/media/platform/msm/camera_v2/common/cam_smmu_api.c
@@ -878,6 +878,8 @@ static int cam_smmu_detach_device(int idx)
static int cam_smmu_attach_sec_cpp(int idx)
{
+ int32_t rc = 0;
+
/*
* When switching to secure, detach CPP NS, do scm call
* with CPP SID and no need of attach again, because
@@ -889,8 +891,12 @@ static int cam_smmu_attach_sec_cpp(int idx)
return -EINVAL;
}
- msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_SECURE,
+ rc = msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_SECURE,
MSM_CAMERA_TZ_HW_BLOCK_CPP);
+ if (rc != 0) {
+ pr_err("fail to set secure mode for cpp, rc %d", rc);
+ return rc;
+ }
iommu_cb_set.cb_info[idx].state = CAM_SMMU_ATTACH;
@@ -899,8 +905,14 @@ static int cam_smmu_attach_sec_cpp(int idx)
static int cam_smmu_detach_sec_cpp(int idx)
{
- msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_NON_SECURE,
+ int32_t rc = 0;
+
+ rc = msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_NON_SECURE,
MSM_CAMERA_TZ_HW_BLOCK_CPP);
+ if (rc != 0) {
+ pr_err("fail to switch to non secure mode for cpp, rc %d", rc);
+ return rc;
+ }
iommu_cb_set.cb_info[idx].state = CAM_SMMU_DETACH;
@@ -917,6 +929,8 @@ static int cam_smmu_detach_sec_cpp(int idx)
static int cam_smmu_attach_sec_vfe_ns_stats(int idx)
{
+ int32_t rc = 0;
+
/*
*When switching to secure, for secure pixel and non-secure stats
*localizing scm/attach of non-secure SID's in attach secure
@@ -933,16 +947,26 @@ static int cam_smmu_attach_sec_vfe_ns_stats(int idx)
}
}
- msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_SECURE,
+ rc = msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_SECURE,
MSM_CAMERA_TZ_HW_BLOCK_ISP);
+ if (rc != 0) {
+ pr_err("fail to set secure mode for vfe, rc %d", rc);
+ return rc;
+ }
return 0;
}
static int cam_smmu_detach_sec_vfe_ns_stats(int idx)
{
- msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_NON_SECURE,
+ int32_t rc = 0;
+
+ rc = msm_camera_tz_set_mode(MSM_CAMERA_TZ_MODE_NON_SECURE,
MSM_CAMERA_TZ_HW_BLOCK_ISP);
+ if (rc != 0) {
+ pr_err("fail to switch to non secure mode for vfe, rc %d", rc);
+ return rc;
+ }
/*
*While exiting from secure mode for secure pixel and non-secure stats,
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
index de29692414d2..f7eb0f8ac5a8 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_buf_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1179,26 +1179,6 @@ int msm_isp_smmu_attach(struct msm_isp_buf_mgr *buf_mgr,
rc = msm_isp_buf_get_scratch(buf_mgr);
if (rc)
goto err2;
- } else {
- if (buf_mgr->attach_ref_cnt > 0)
- buf_mgr->attach_ref_cnt--;
- else
- pr_err("%s: Error! Invalid ref_cnt %d\n",
- __func__, buf_mgr->attach_ref_cnt);
-
- if (buf_mgr->attach_ref_cnt == 0) {
- rc = msm_isp_buf_put_scratch(buf_mgr);
- if (buf_mgr->secure_enable == SECURE_MODE)
- rc |= cam_smmu_ops(buf_mgr->iommu_hdl,
- CAM_SMMU_DETACH_SEC_VFE_NS_STATS);
- else
- rc |= cam_smmu_ops(buf_mgr->iommu_hdl,
- CAM_SMMU_DETACH);
- if (rc < 0) {
- pr_err("%s: img/stats smmu detach error, rc :%d\n",
- __func__, rc);
- }
- }
}
mutex_unlock(&buf_mgr->lock);
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index 840d84388a17..bb3f0dca9d92 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -588,6 +588,12 @@ int vfe_hw_probe(struct platform_device *pdev)
}
vfe_dev->hw_info =
(struct msm_vfe_hardware_info *) match_dev->data;
+ /* Cx ipeak support */
+ if (of_find_property(pdev->dev.of_node,
+ "qcom,vfe_cx_ipeak", NULL)) {
+ vfe_dev->vfe_cx_ipeak = cx_ipeak_register(
+ pdev->dev.of_node, "qcom,vfe_cx_ipeak");
+ }
} else {
vfe_dev->hw_info = (struct msm_vfe_hardware_info *)
platform_get_device_id(pdev)->driver_data;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index f6fabc61620d..aca8e99650ba 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -29,6 +29,7 @@
#include "msm_buf_mgr.h"
#include "cam_hw_ops.h"
+#include <soc/qcom/cx_ipeak.h>
#define VFE40_8974V1_VERSION 0x10000018
#define VFE40_8974V2_VERSION 0x1001001A
@@ -767,6 +768,8 @@ struct vfe_device {
size_t num_hvx_clk;
size_t num_norm_clk;
enum cam_ahb_clk_vote ahb_vote;
+ bool turbo_vote;
+ struct cx_ipeak_client *vfe_cx_ipeak;
/* Sync variables*/
struct completion reset_complete;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index c7f3b97c83c9..d22e35431fc8 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -331,6 +331,7 @@ int msm_vfe47_init_hardware(struct vfe_device *vfe_dev)
goto ahb_vote_fail;
}
vfe_dev->ahb_vote = CAM_AHB_SVS_VOTE;
+ vfe_dev->turbo_vote = 0;
vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] =
vfe_dev->vfe_base;
@@ -2556,6 +2557,7 @@ int msm_vfe47_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
{
int rc = 0;
int clk_idx = vfe_dev->hw_info->vfe_clk_idx;
+ int ret;
rc = msm_camera_clk_set_rate(&vfe_dev->pdev->dev,
vfe_dev->vfe_clk[clk_idx], *rate);
@@ -2563,7 +2565,26 @@ int msm_vfe47_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
return rc;
*rate = clk_round_rate(vfe_dev->vfe_clk[clk_idx], *rate);
vfe_dev->msm_isp_vfe_clk_rate = *rate;
-
+ if (vfe_dev->vfe_cx_ipeak) {
+ if (vfe_dev->msm_isp_vfe_clk_rate >=
+ vfe_dev->vfe_clk_rates[MSM_VFE_CLK_RATE_TURBO]
+ [vfe_dev->hw_info->vfe_clk_idx] &&
+ vfe_dev->turbo_vote == 0) {
+ ret = cx_ipeak_update(vfe_dev->vfe_cx_ipeak, true);
+ if (ret)
+ pr_debug("%s: cx_ipeak_update failed %d\n",
+ __func__, ret);
+ else
+ vfe_dev->turbo_vote = 1;
+ } else if (vfe_dev->turbo_vote == 1) {
+ ret = cx_ipeak_update(vfe_dev->vfe_cx_ipeak, false);
+ if (ret)
+ pr_debug("%s: cx_ipeak_update failed %d\n",
+ __func__, ret);
+ else
+ vfe_dev->turbo_vote = 0;
+ }
+ }
if (vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg)
vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg(vfe_dev, NULL);
return 0;
diff --git a/drivers/media/platform/msm/camera_v2/msm.c b/drivers/media/platform/msm/camera_v2/msm.c
index c6dc4b75e479..5cf5582b55ab 100644
--- a/drivers/media/platform/msm/camera_v2/msm.c
+++ b/drivers/media/platform/msm/camera_v2/msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -53,6 +53,8 @@ spinlock_t msm_eventq_lock;
static struct pid *msm_pid;
spinlock_t msm_pid_lock;
+static uint32_t gpu_limit;
+
/*
* It takes 20 bytes + NULL character to write the
* largest decimal value of an uint64_t
@@ -442,6 +444,14 @@ int msm_create_session(unsigned int session_id, struct video_device *vdev)
mutex_init(&session->lock);
mutex_init(&session->lock_q);
mutex_init(&session->close_lock);
+
+ if (gpu_limit) {
+ session->sysfs_pwr_limit = kgsl_pwr_limits_add(KGSL_DEVICE_3D0);
+ if (session->sysfs_pwr_limit)
+ kgsl_pwr_limits_set_freq(session->sysfs_pwr_limit,
+ gpu_limit);
+ }
+
return 0;
}
EXPORT_SYMBOL(msm_create_session);
@@ -607,6 +617,11 @@ int msm_destroy_session(unsigned int session_id)
if (!session)
return -EINVAL;
+ if (gpu_limit && session->sysfs_pwr_limit) {
+ kgsl_pwr_limits_set_default(session->sysfs_pwr_limit);
+ kgsl_pwr_limits_del(session->sysfs_pwr_limit);
+ }
+
msm_destroy_session_streams(session);
msm_remove_session_cmd_ack_q(session);
mutex_destroy(&session->lock);
@@ -1290,6 +1305,9 @@ static int msm_probe(struct platform_device *pdev)
goto v4l2_fail;
}
+ of_property_read_u32(pdev->dev.of_node,
+ "qcom,gpu-limit", &gpu_limit);
+
goto probe_end;
v4l2_fail:
diff --git a/drivers/media/platform/msm/camera_v2/msm.h b/drivers/media/platform/msm/camera_v2/msm.h
index 2b3576b8edd2..7474cb119147 100644
--- a/drivers/media/platform/msm/camera_v2/msm.h
+++ b/drivers/media/platform/msm/camera_v2/msm.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,7 @@
#include <linux/pm_qos.h>
#include <linux/msm_ion.h>
#include <linux/iommu.h>
+#include <linux/msm_kgsl.h>
#include <media/v4l2-dev.h>
#include <media/v4l2-ioctl.h>
#include <media/v4l2-device.h>
@@ -110,6 +111,7 @@ struct msm_session {
struct mutex lock;
struct mutex lock_q;
struct mutex close_lock;
+ struct kgsl_pwr_limit *sysfs_pwr_limit;
};
static inline bool msm_is_daemon_present(void)
diff --git a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
index 730f8b32ff1a..76a7c6942c68 100644
--- a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -149,10 +149,7 @@ static int32_t msm_buf_mngr_buf_done(struct msm_buf_mngr_device *buf_mngr_dev,
list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) {
if ((bufs->session_id == buf_info->session_id) &&
(bufs->stream_id == buf_info->stream_id) &&
- (bufs->vb2_v4l2_buf->vb2_buf.index ==
- buf_info->index)) {
- bufs->vb2_v4l2_buf->sequence = buf_info->frame_id;
- bufs->vb2_v4l2_buf->timestamp = buf_info->timestamp;
+ (bufs->index == buf_info->index)) {
ret = buf_mngr_dev->vb2_ops.buf_done
(bufs->vb2_v4l2_buf,
buf_info->session_id,
@@ -181,7 +178,7 @@ static int32_t msm_buf_mngr_put_buf(struct msm_buf_mngr_device *buf_mngr_dev,
list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) {
if ((bufs->session_id == buf_info->session_id) &&
(bufs->stream_id == buf_info->stream_id) &&
- (bufs->vb2_v4l2_buf->vb2_buf.index == buf_info->index)) {
+ (bufs->index == buf_info->index)) {
ret = buf_mngr_dev->vb2_ops.put_buf(bufs->vb2_v4l2_buf,
buf_info->session_id, buf_info->stream_id);
list_del_init(&bufs->entry);
@@ -214,7 +211,7 @@ static int32_t msm_generic_buf_mngr_flush(
buf_info->session_id,
buf_info->stream_id, 0, &ts, 0);
pr_err("Bufs not flushed: str_id = %d buf_index = %d ret = %d\n",
- buf_info->stream_id, bufs->vb2_v4l2_buf->vb2_buf.index,
+ buf_info->stream_id, bufs->index,
ret);
list_del_init(&bufs->entry);
kfree(bufs);
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index 7885149440f9..064c1e8c5bab 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -834,8 +834,10 @@ static int cpp_init_mem(struct cpp_device *cpp_dev)
else
rc = cam_smmu_get_handle("cpp", &iommu_hdl);
- if (rc < 0)
+ if (rc < 0) {
+ pr_err("smmu get handle failed\n");
return -ENODEV;
+ }
cpp_dev->iommu_hdl = iommu_hdl;
cam_smmu_reg_client_page_fault_handler(
@@ -1466,10 +1468,16 @@ static int cpp_close_node(struct v4l2_subdev *sd, struct v4l2_subdev_fh *fh)
msm_cpp_clear_timer(cpp_dev);
cpp_release_hardware(cpp_dev);
if (cpp_dev->iommu_state == CPP_IOMMU_STATE_ATTACHED) {
- cpp_dev->iommu_state = CPP_IOMMU_STATE_DETACHED;
- rc = cam_smmu_ops(cpp_dev->iommu_hdl, CAM_SMMU_DETACH);
+ if (cpp_dev->security_mode == SECURE_MODE)
+ rc = cam_smmu_ops(cpp_dev->iommu_hdl,
+ CAM_SMMU_DETACH_SEC_CPP);
+ else
+ rc = cam_smmu_ops(cpp_dev->iommu_hdl,
+ CAM_SMMU_DETACH);
+
if (rc < 0)
pr_err("Error: Detach fail in release\n");
+ cpp_dev->iommu_state = CPP_IOMMU_STATE_DETACHED;
}
cam_smmu_destroy_handle(cpp_dev->iommu_hdl);
msm_cpp_empty_list(processing_q, list_frame);
@@ -3450,6 +3458,7 @@ STREAM_BUFF_END:
rc = msm_cpp_copy_from_ioctl_ptr(&cpp_attach_info,
ioctl_ptr);
if (rc < 0) {
+ pr_err("CPP_IOMMU_ATTACH copy from user fail");
ERR_COPY_FROM_USER();
return -EINVAL;
}
@@ -3487,6 +3496,7 @@ STREAM_BUFF_END:
rc = msm_cpp_copy_from_ioctl_ptr(&cpp_attach_info,
ioctl_ptr);
if (rc < 0) {
+ pr_err("CPP_IOMMU_DETTACH copy from user fail");
ERR_COPY_FROM_USER();
return -EINVAL;
}
@@ -3825,6 +3835,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
struct msm_cpp_frame_info32_t k32_frame_info;
struct msm_cpp_frame_info_t k64_frame_info;
uint32_t identity_k = 0;
+ bool is_copytouser_req = true;
void __user *up = (void __user *)arg;
if (sd == NULL) {
@@ -3959,9 +3970,8 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
break;
}
}
- if (copy_to_user(
- (void __user *)kp_ioctl.ioctl_ptr, &inst_info,
- sizeof(struct msm_cpp_frame_info32_t))) {
+ if (copy_to_user((void __user *)kp_ioctl.ioctl_ptr,
+ &inst_info, sizeof(struct msm_cpp_frame_info32_t))) {
mutex_unlock(&cpp_dev->mutex);
return -EFAULT;
}
@@ -3997,6 +4007,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
sizeof(struct msm_cpp_stream_buff_info_t);
}
}
+ is_copytouser_req = false;
if (cmd == VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO32)
cmd = VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO;
else if (cmd == VIDIOC_MSM_CPP_DELETE_STREAM_BUFF32)
@@ -4011,6 +4022,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
get_user(identity_k, identity_u);
kp_ioctl.ioctl_ptr = (void *)&identity_k;
kp_ioctl.len = sizeof(uint32_t);
+ is_copytouser_req = false;
cmd = VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO;
break;
}
@@ -4069,6 +4081,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
sizeof(struct msm_cpp_clock_settings_t);
}
}
+ is_copytouser_req = false;
cmd = VIDIOC_MSM_CPP_SET_CLOCK;
break;
}
@@ -4094,6 +4107,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
kp_ioctl.ioctl_ptr = (void *)&k_queue_buf;
kp_ioctl.len = sizeof(struct msm_pproc_queue_buf_info);
+ is_copytouser_req = false;
cmd = VIDIOC_MSM_CPP_QUEUE_BUF;
break;
}
@@ -4118,6 +4132,8 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
k64_frame_info.frame_id = k32_frame_info.frame_id;
kp_ioctl.ioctl_ptr = (void *)&k64_frame_info;
+
+ is_copytouser_req = false;
cmd = VIDIOC_MSM_CPP_POP_STREAM_BUFFER;
break;
}
@@ -4171,13 +4187,16 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
break;
}
- up32_ioctl.id = kp_ioctl.id;
- up32_ioctl.len = kp_ioctl.len;
- up32_ioctl.trans_code = kp_ioctl.trans_code;
- up32_ioctl.ioctl_ptr = ptr_to_compat(kp_ioctl.ioctl_ptr);
+ if (is_copytouser_req) {
+ up32_ioctl.id = kp_ioctl.id;
+ up32_ioctl.len = kp_ioctl.len;
+ up32_ioctl.trans_code = kp_ioctl.trans_code;
+ up32_ioctl.ioctl_ptr = ptr_to_compat(kp_ioctl.ioctl_ptr);
- if (copy_to_user((void __user *)up, &up32_ioctl, sizeof(up32_ioctl)))
- return -EFAULT;
+ if (copy_to_user((void __user *)up, &up32_ioctl,
+ sizeof(up32_ioctl)))
+ return -EFAULT;
+ }
return rc;
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h
index 07d522cb01bb..9120a4cc85ca 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -99,6 +99,7 @@ struct csiphy_reg_3ph_parms_t csiphy_v5_0_1_3ph = {
{0x70C, 0xA5},
{0x38, 0xFE},
{0x81c, 0x2},
+ {0x700, 0x80},
};
struct csiphy_settings_t csiphy_combo_mode_v5_0_1 = {
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h
index 198d130b24fc..8591f0646080 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -100,6 +100,7 @@ struct csiphy_reg_3ph_parms_t csiphy_v5_0_3ph = {
{0x70C, 0x16},
{0x38, 0xFE},
{0x81c, 0x6},
+ {0x700, 0x80},
};
struct csiphy_settings_t csiphy_combo_mode_v5_0 = {
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index a7cd44636d1d..be266641a105 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,7 @@
#define MSM_CSIPHY_DRV_NAME "msm_csiphy"
#define CLK_LANE_OFFSET 1
#define NUM_LANES_OFFSET 4
+#define CLOCK_LANE 0x02
#define CSI_3PHASE_HW 1
#define MAX_DPHY_DATA_LN 4
@@ -683,12 +684,21 @@ static int msm_csiphy_2phase_lane_config_v50(
csiphybase + csiphy_dev->ctrl_reg->
csiphy_3ph_reg.
mipi_csiphy_2ph_lnn_ctrl15.addr + offset);
- msm_camera_io_w(csiphy_dev->ctrl_reg->
- csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_ctrl0.data,
- csiphybase + csiphy_dev->ctrl_reg->
- csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_ctrl0.addr + offset);
+ if (mask == CLOCK_LANE)
+ msm_camera_io_w(csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnck_ctrl0.data,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnck_ctrl0.addr);
+ else
+ msm_camera_io_w(csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl0.data,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl0.addr +
+ offset);
msm_camera_io_w(csiphy_dev->ctrl_reg->
csiphy_3ph_reg.
mipi_csiphy_2ph_lnn_cfg1.data,
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
index 70462dcd3b12..c1a9748e8af5 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -141,6 +141,7 @@ struct csiphy_reg_3ph_parms_t {
struct csiphy_reg_t mipi_csiphy_2ph_lnck_ctrl3;
struct csiphy_reg_t mipi_csiphy_2ph_lnn_ctrl14;
struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl7_cphy;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnck_ctrl0;
};
struct csiphy_ctrl_t {
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
index 5cbdc03ced9d..9ba0b7d93616 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_base.h
@@ -122,6 +122,7 @@ struct sde_smmu_client {
struct sde_module_power mp;
struct reg_bus_client *reg_bus_clt;
bool domain_attached;
+ bool domain_reattach;
int domain;
};
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index a2da663e2046..f41382b5b20c 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -990,11 +990,14 @@ static int sde_rotator_debug_base_release(struct inode *inode,
{
struct sde_rotator_debug_base *dbg = file->private_data;
- if (dbg && dbg->buf) {
+ if (dbg) {
+ mutex_lock(&dbg->buflock);
kfree(dbg->buf);
dbg->buf_len = 0;
dbg->buf = NULL;
+ mutex_unlock(&dbg->buflock);
}
+
return 0;
}
@@ -1026,8 +1029,10 @@ static ssize_t sde_rotator_debug_base_offset_write(struct file *file,
if (cnt > (dbg->max_offset - off))
cnt = dbg->max_offset - off;
+ mutex_lock(&dbg->buflock);
dbg->off = off;
dbg->cnt = cnt;
+ mutex_unlock(&dbg->buflock);
SDEROT_DBG("offset=%x cnt=%x\n", off, cnt);
@@ -1047,7 +1052,10 @@ static ssize_t sde_rotator_debug_base_offset_read(struct file *file,
if (*ppos)
return 0; /* the end */
+ mutex_lock(&dbg->buflock);
len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
+ mutex_unlock(&dbg->buflock);
+
if (len < 0 || len >= sizeof(buf))
return 0;
@@ -1086,6 +1094,8 @@ static ssize_t sde_rotator_debug_base_reg_write(struct file *file,
if (off >= dbg->max_offset)
return -EFAULT;
+ mutex_lock(&dbg->buflock);
+
/* Enable Clock for register access */
sde_rotator_clk_ctrl(dbg->mgr, true);
@@ -1094,6 +1104,8 @@ static ssize_t sde_rotator_debug_base_reg_write(struct file *file,
/* Disable Clock after register access */
sde_rotator_clk_ctrl(dbg->mgr, false);
+ mutex_unlock(&dbg->buflock);
+
SDEROT_DBG("addr=%zx data=%x\n", off, data);
return count;
@@ -1104,12 +1116,14 @@ static ssize_t sde_rotator_debug_base_reg_read(struct file *file,
{
struct sde_rotator_debug_base *dbg = file->private_data;
size_t len;
+ int rc = 0;
if (!dbg) {
SDEROT_ERR("invalid handle\n");
return -ENODEV;
}
+ mutex_lock(&dbg->buflock);
if (!dbg->buf) {
char dump_buf[64];
char *ptr;
@@ -1121,7 +1135,8 @@ static ssize_t sde_rotator_debug_base_reg_read(struct file *file,
if (!dbg->buf) {
SDEROT_ERR("not enough memory to hold reg dump\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto debug_read_error;
}
ptr = dbg->base + dbg->off;
@@ -1151,18 +1166,26 @@ static ssize_t sde_rotator_debug_base_reg_read(struct file *file,
dbg->buf_len = tot;
}
- if (*ppos >= dbg->buf_len)
- return 0; /* done reading */
+ if (*ppos >= dbg->buf_len) {
+ rc = 0; /* done reading */
+ goto debug_read_error;
+ }
len = min(count, dbg->buf_len - (size_t) *ppos);
if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
SDEROT_ERR("failed to copy to user\n");
- return -EFAULT;
+ rc = -EFAULT;
+ goto debug_read_error;
}
*ppos += len; /* increase offset */
+ mutex_unlock(&dbg->buflock);
return len;
+
+debug_read_error:
+ mutex_unlock(&dbg->buflock);
+ return rc;
}
static const struct file_operations sde_rotator_off_fops = {
@@ -1196,6 +1219,9 @@ int sde_rotator_debug_register_base(struct sde_rotator_device *rot_dev,
if (!dbg)
return -ENOMEM;
+ mutex_init(&dbg->buflock);
+ mutex_lock(&dbg->buflock);
+
if (name)
strlcpy(dbg->name, name, sizeof(dbg->name));
dbg->base = io_data->base;
@@ -1217,6 +1243,7 @@ int sde_rotator_debug_register_base(struct sde_rotator_device *rot_dev,
dbg->base += rot_dev->mdata->regdump ?
rot_dev->mdata->regdump[0].offset : 0;
}
+ mutex_unlock(&dbg->buflock);
strlcpy(dbgname + prefix_len, "off", sizeof(dbgname) - prefix_len);
ent_off = debugfs_create_file(dbgname, 0644, debugfs_root, dbg,
@@ -1234,7 +1261,9 @@ int sde_rotator_debug_register_base(struct sde_rotator_device *rot_dev,
goto reg_fail;
}
+ mutex_lock(&dbg->buflock);
dbg->mgr = rot_dev->mgr;
+ mutex_unlock(&dbg->buflock);
return 0;
reg_fail:
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
index c2c6f9775602..c6d0151d37de 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -53,6 +53,7 @@ struct sde_rotator_debug_base {
char *buf;
size_t buf_len;
struct sde_rot_mgr *mgr;
+ struct mutex buflock;
};
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
index 89fde987d4c1..2c79ad7e45be 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3.c
@@ -1599,6 +1599,7 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
u32 danger_lut = 0; /* applicable for realtime client only */
u32 safe_lut = 0; /* applicable for realtime client only */
u32 flags = 0;
+ u32 rststs = 0;
struct sde_rotation_item *item;
if (!hw || !entry) {
@@ -1616,10 +1617,46 @@ static int sde_hw_rotator_config(struct sde_rot_hw_resource *hw,
return -EINVAL;
}
+ /*
+ * if Rotator HW is reset, but missing PM event notification, we
+ * need to init the SW timestamp automatically.
+ */
+ rststs = SDE_ROTREG_READ(rot->mdss_base, REGDMA_RESET_STATUS_REG);
+ if (!rot->reset_hw_ts && rststs) {
+ u32 l_ts, h_ts, swts;
+
+ swts = SDE_ROTREG_READ(rot->mdss_base, REGDMA_TIMESTAMP_REG);
+ h_ts = atomic_read(&rot->timestamp[ROT_QUEUE_HIGH_PRIORITY]);
+ l_ts = atomic_read(&rot->timestamp[ROT_QUEUE_LOW_PRIORITY]);
+ SDEROT_EVTLOG(0xbad0, rststs, swts, h_ts, l_ts);
+
+ if (ctx->q_id == ROT_QUEUE_HIGH_PRIORITY)
+ h_ts = (h_ts - 1) & SDE_REGDMA_SWTS_MASK;
+ else
+ l_ts = (l_ts - 1) & SDE_REGDMA_SWTS_MASK;
+
+ /* construct the combined timstamp */
+ swts = (h_ts & SDE_REGDMA_SWTS_MASK) |
+ ((l_ts & SDE_REGDMA_SWTS_MASK) <<
+ SDE_REGDMA_SWTS_SHIFT);
+
+ SDEROT_DBG("swts:0x%x, h_ts:0x%x, l_ts;0x%x\n",
+ swts, h_ts, l_ts);
+ SDEROT_EVTLOG(0x900d, swts, h_ts, l_ts);
+ rot->last_hw_ts = swts;
+
+ SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
+ rot->last_hw_ts);
+ SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
+ /* ensure write is issued to the rotator HW */
+ wmb();
+ }
+
if (rot->reset_hw_ts) {
SDEROT_EVTLOG(rot->last_hw_ts);
SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_TIMESTAMP_REG,
rot->last_hw_ts);
+ SDE_ROTREG_WRITE(rot->mdss_base, REGDMA_RESET_STATUS_REG, 0);
/* ensure write is issued to the rotator HW */
wmb();
rot->reset_hw_ts = false;
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
index fedade122b88..b721ec54229d 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_r3_hwio.h
@@ -277,5 +277,6 @@
#define REGDMA_INT_LOW_MASK 0x00000700
#define REGDMA_INT_ERR_MASK 0x000F0000
#define REGDMA_TIMESTAMP_REG ROT_SSPP_TPG_PATTERN_GEN_INIT_VAL
+#define REGDMA_RESET_STATUS_REG ROT_SSPP_TPG_RGB_MAPPING
#endif /*_SDE_ROTATOR_R3_HWIO_H */
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index 9b1175d8f4a6..58cb160f118e 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -45,6 +45,13 @@ struct sde_smmu_domain {
unsigned long size;
};
+#ifndef CONFIG_FB_MSM_MDSS
+int mdss_smmu_request_mappings(msm_smmu_handler_t callback)
+{
+ return 0;
+}
+#endif
+
int sde_smmu_set_dma_direction(int dir)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
@@ -247,6 +254,14 @@ int sde_smmu_attach(struct sde_rot_data_type *mdata)
goto err;
}
sde_smmu->domain_attached = true;
+ if (sde_smmu->domain_reattach) {
+ SDEROT_DBG(
+ "domain[%i] re-attach\n",
+ i);
+ /* remove extra vote */
+ sde_smmu_enable_power(sde_smmu, false);
+ sde_smmu->domain_reattach = false;
+ }
SDEROT_DBG("iommu v2 domain[%i] attached\n", i);
}
} else {
@@ -292,6 +307,12 @@ int sde_smmu_detach(struct sde_rot_data_type *mdata)
arm_iommu_detach_device(sde_smmu->dev);
SDEROT_DBG("iommu domain[%i] detached\n", i);
sde_smmu->domain_attached = false;
+
+ /*
+ * since we are leaving the clock vote, on
+ * re-attaching do not vote for clocks
+ */
+ sde_smmu->domain_reattach = true;
}
else {
sde_smmu_enable_power(sde_smmu, false);
@@ -530,11 +551,18 @@ static int sde_smmu_fault_handler(struct iommu_domain *domain,
sde_smmu = (struct sde_smmu_client *)token;
- /* trigger rotator panic and dump */
- SDEROT_ERR("trigger rotator panic and dump, iova=0x%08lx\n", iova);
+ /* trigger rotator dump */
+ SDEROT_ERR("trigger rotator dump, iova=0x%08lx, flags=0x%x\n",
+ iova, flags);
+ SDEROT_ERR("SMMU device:%s", sde_smmu->dev->kobj.name);
- sde_rot_dump_panic();
+ /* generate dump, but no panic */
+ sde_rot_evtlog_tout_handler(false, __func__, "rot", "vbif_dbg_bus");
+ /*
+ * return -ENOSYS to allow smmu driver to dump out useful
+ * debug info.
+ */
return rc;
}
diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c
index bd21f8cca2aa..460ffc79f566 100644
--- a/drivers/misc/hdcp.c
+++ b/drivers/misc/hdcp.c
@@ -2103,7 +2103,8 @@ static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle)
(rc == 0) && (rsp_buf->status == 0)) {
pr_debug("Got Auth_Stream_Ready, nothing sent to rx\n");
- if (!hdcp_lib_enable_encryption(handle)) {
+ if (!handle->authenticated &&
+ !hdcp_lib_enable_encryption(handle)) {
handle->authenticated = true;
cdata.cmd = HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 134995c9cd3c..8d03c36858b3 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -7043,7 +7043,11 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
break;
}
pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
ret = qseecom_set_client_mem_param(data, argp);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
if (ret)
pr_err("failed Qqseecom_set_mem_param request: %d\n",
ret);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 52e3e9b5b778..60b02f28a8ff 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -3482,15 +3482,23 @@ static void mmc_blk_cmdq_err(struct mmc_queue *mq)
/* RED error - Fatal: requires reset */
if (mrq->cmdq_req->resp_err) {
err = mrq->cmdq_req->resp_err;
+ goto reset;
+ }
+
+ /*
+ * TIMEOUT errrors can happen because of execution error
+ * in the last command. So send cmd 13 to get device status
+ */
+ if ((mrq->cmd && (mrq->cmd->error == -ETIMEDOUT)) ||
+ (mrq->data && (mrq->data->error == -ETIMEDOUT))) {
if (mmc_host_halt(host) || mmc_host_cq_disable(host)) {
ret = get_card_status(host->card, &status, 0);
if (ret)
pr_err("%s: CMD13 failed with err %d\n",
mmc_hostname(host), ret);
}
- pr_err("%s: Response error detected with device status 0x%08x\n",
+ pr_err("%s: Timeout error detected with device status 0x%08x\n",
mmc_hostname(host), status);
- goto reset;
}
/*
diff --git a/drivers/mmc/core/core.c b/drivers/mmc/core/core.c
index 9f5c67e850d4..26e57f3c5228 100644
--- a/drivers/mmc/core/core.c
+++ b/drivers/mmc/core/core.c
@@ -3095,7 +3095,6 @@ int mmc_resume_bus(struct mmc_host *host)
pr_debug("%s: Starting deferred resume\n", mmc_hostname(host));
spin_lock_irqsave(&host->lock, flags);
host->bus_resume_flags &= ~MMC_BUSRESUME_NEEDS_RESUME;
- host->rescan_disable = 0;
spin_unlock_irqrestore(&host->lock, flags);
mmc_bus_get(host);
@@ -4041,6 +4040,7 @@ EXPORT_SYMBOL(mmc_detect_card_removed);
void mmc_rescan(struct work_struct *work)
{
+ unsigned long flags;
struct mmc_host *host =
container_of(work, struct mmc_host, detect.work);
@@ -4049,8 +4049,12 @@ void mmc_rescan(struct work_struct *work)
host->trigger_card_event = false;
}
- if (host->rescan_disable)
+ spin_lock_irqsave(&host->lock, flags);
+ if (host->rescan_disable) {
+ spin_unlock_irqrestore(&host->lock, flags);
return;
+ }
+ spin_unlock_irqrestore(&host->lock, flags);
/* If there is a non-removable card registered, only scan once */
if ((host->caps & MMC_CAP_NONREMOVABLE) && host->rescan_entered)
@@ -4297,10 +4301,6 @@ int mmc_pm_notify(struct notifier_block *notify_block,
case PM_SUSPEND_PREPARE:
case PM_RESTORE_PREPARE:
spin_lock_irqsave(&host->lock, flags);
- if (mmc_bus_needs_resume(host)) {
- spin_unlock_irqrestore(&host->lock, flags);
- break;
- }
host->rescan_disable = 1;
spin_unlock_irqrestore(&host->lock, flags);
cancel_delayed_work_sync(&host->detect);
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index a7546471e780..96d4fbf1a823 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -33,8 +33,8 @@
#define DCMD_SLOT 31
#define NUM_SLOTS 32
-/* 1 sec */
-#define HALT_TIMEOUT_MS 1000
+/* 10 sec */
+#define HALT_TIMEOUT_MS 10000
static int cmdq_halt_poll(struct mmc_host *mmc, bool halt);
static int cmdq_halt(struct mmc_host *mmc, bool halt);
@@ -197,6 +197,10 @@ static void cmdq_dump_adma_mem(struct cmdq_host *cq_host)
static void cmdq_dumpregs(struct cmdq_host *cq_host)
{
struct mmc_host *mmc = cq_host->mmc;
+ int offset = 0;
+
+ if (cq_host->offset_changed)
+ offset = CQ_V5_VENDOR_CFG;
MMC_TRACE(mmc,
"%s: 0x0C=0x%08x 0x10=0x%08x 0x14=0x%08x 0x18=0x%08x 0x28=0x%08x 0x2C=0x%08x 0x30=0x%08x 0x34=0x%08x 0x54=0x%08x 0x58=0x%08x 0x5C=0x%08x 0x48=0x%08x\n",
@@ -243,7 +247,7 @@ static void cmdq_dumpregs(struct cmdq_host *cq_host)
cmdq_readl(cq_host, CQCRI),
cmdq_readl(cq_host, CQCRA));
pr_err(DRV_NAME": Vendor cfg 0x%08x\n",
- cmdq_readl(cq_host, CQ_VENDOR_CFG));
+ cmdq_readl(cq_host, CQ_VENDOR_CFG + offset));
pr_err(DRV_NAME ": ===========================================\n");
cmdq_dump_task_history(cq_host);
@@ -384,6 +388,12 @@ static int cmdq_enable(struct mmc_host *mmc)
cq_host->caps |= CMDQ_CAP_CRYPTO_SUPPORT |
CMDQ_TASK_DESC_SZ_128;
cqcfg |= CQ_ICE_ENABLE;
+ /*
+ * For SDHC v5.0 onwards, ICE 3.0 specific registers are added
+ * in CQ register space, due to which few CQ registers are
+ * shifted. Set offset_changed boolean to use updated address.
+ */
+ cq_host->offset_changed = true;
}
cmdq_writel(cq_host, cqcfg, CQCFG);
@@ -818,14 +828,18 @@ static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
{
struct mmc_request *mrq;
struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ int offset = 0;
+ if (cq_host->offset_changed)
+ offset = CQ_V5_VENDOR_CFG;
mrq = get_req_by_tag(cq_host, tag);
if (tag == cq_host->dcmd_slot)
mrq->cmd->resp[0] = cmdq_readl(cq_host, CQCRDCT);
if (mrq->cmdq_req->cmdq_req_flags & DCMD)
- cmdq_writel(cq_host, cmdq_readl(cq_host, CQ_VENDOR_CFG) |
- CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG);
+ cmdq_writel(cq_host,
+ cmdq_readl(cq_host, CQ_VENDOR_CFG + offset) |
+ CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG + offset);
cmdq_runtime_pm_put(cq_host);
if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) &&
@@ -845,7 +859,6 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
u32 dbr_set = 0;
status = cmdq_readl(cq_host, CQIS);
- cmdq_writel(cq_host, status, CQIS);
if (!status && !err)
return IRQ_NONE;
@@ -868,6 +881,17 @@ irqreturn_t cmdq_irq(struct mmc_host *mmc, int err)
if (ret)
pr_err("%s: %s: halt failed ret=%d\n",
mmc_hostname(mmc), __func__, ret);
+
+ /*
+ * Clear the CQIS after halting incase of error. This is done
+ * because if CQIS is cleared before halting, the CQ will
+ * continue with issueing commands for rest of requests with
+ * Doorbell rung. This will overwrite the Resp Arg register.
+ * So CQ must be halted first and then CQIS cleared incase
+ * of error
+ */
+ cmdq_writel(cq_host, status, CQIS);
+
cmdq_dumpregs(cq_host);
if (!err_info) {
@@ -956,13 +980,16 @@ skip_cqterri:
mrq->cmdq_req->resp_err = true;
pr_err("%s: Response error (0x%08x) from card !!!",
- mmc_hostname(mmc), status);
+ mmc_hostname(mmc), cmdq_readl(cq_host, CQCRA));
+
} else {
mrq->cmdq_req->resp_idx = cmdq_readl(cq_host, CQCRI);
mrq->cmdq_req->resp_arg = cmdq_readl(cq_host, CQCRA);
}
cmdq_finish_data(mmc, tag);
+ } else {
+ cmdq_writel(cq_host, status, CQIS);
}
if (status & CQIS_TCC) {
@@ -995,6 +1022,9 @@ skip_cqterri:
if (status & CQIS_HAC) {
if (cq_host->ops->post_cqe_halt)
cq_host->ops->post_cqe_halt(mmc);
+ /* halt done: re-enable legacy interrupts */
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, false);
/* halt is completed, wakeup waiting thread */
complete(&cq_host->halt_comp);
}
@@ -1052,6 +1082,7 @@ static int cmdq_halt(struct mmc_host *mmc, bool halt)
{
struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
u32 ret = 0;
+ u32 config = 0;
int retries = 3;
cmdq_runtime_pm_get(cq_host);
@@ -1061,16 +1092,31 @@ static int cmdq_halt(struct mmc_host *mmc, bool halt)
CQCTL);
ret = wait_for_completion_timeout(&cq_host->halt_comp,
msecs_to_jiffies(HALT_TIMEOUT_MS));
- if (!ret && !(cmdq_readl(cq_host, CQCTL) & HALT)) {
- retries--;
- continue;
+ if (!ret) {
+ pr_warn("%s: %s: HAC int timeout\n",
+ mmc_hostname(mmc), __func__);
+ if ((cmdq_readl(cq_host, CQCTL) & HALT)) {
+ /*
+ * Don't retry if CQE is halted but irq
+ * is not triggered in timeout period.
+ * And since we are returning error,
+ * un-halt CQE. Since irq was not fired
+ * yet, no need to set other params
+ */
+ retries = 0;
+ config = cmdq_readl(cq_host, CQCTL);
+ config &= ~HALT;
+ cmdq_writel(cq_host, config, CQCTL);
+ } else {
+ pr_warn("%s: %s: retryng halt (%d)\n",
+ mmc_hostname(mmc), __func__,
+ retries);
+ retries--;
+ continue;
+ }
} else {
MMC_TRACE(mmc, "%s: halt done , retries: %d\n",
__func__, retries);
- /* halt done: re-enable legacy interrupts */
- if (cq_host->ops->clear_set_irqs)
- cq_host->ops->clear_set_irqs(mmc,
- false);
break;
}
}
diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h
index 05c924ae0935..6c10ab3859d1 100644
--- a/drivers/mmc/host/cmdq_hci.h
+++ b/drivers/mmc/host/cmdq_hci.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -143,6 +143,11 @@
#define DAT_ADDR_LO(x) ((x & 0xFFFFFFFF) << 32)
#define DAT_ADDR_HI(x) ((x & 0xFFFFFFFF) << 0)
+/*
+ * Add new macro for updated CQ vendor specific
+ * register address for SDHC v5.0 onwards.
+ */
+#define CQ_V5_VENDOR_CFG 0x900
#define CQ_VENDOR_CFG 0x100
#define CMDQ_SEND_STATUS_TRIGGER (1 << 31)
@@ -175,6 +180,7 @@ struct cmdq_host {
bool enabled;
bool halted;
bool init_done;
+ bool offset_changed;
u8 *desc_base;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 127a2602aa81..15d1eface2d4 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -310,6 +310,9 @@ void sdhci_msm_writel_relaxed(u32 val, struct sdhci_host *host, u32 offset)
writel_relaxed(val, base_addr + offset);
}
+/* Timeout value to avoid infinite waiting for pwr_irq */
+#define MSM_PWR_IRQ_TIMEOUT_MS 5000
+
static const u32 tuning_block_64[] = {
0x00FF0FFF, 0xCCC3CCFF, 0xFFCC3CC3, 0xEFFEFFFE,
0xDDFFDFFF, 0xFBFFFBFF, 0xFF7FFFBF, 0xEFBDF777,
@@ -2779,8 +2782,10 @@ static void sdhci_msm_check_power_status(struct sdhci_host *host, u32 req_type)
*/
if (done)
init_completion(&msm_host->pwr_irq_completion);
- else
- wait_for_completion(&msm_host->pwr_irq_completion);
+ else if (!wait_for_completion_timeout(&msm_host->pwr_irq_completion,
+ msecs_to_jiffies(MSM_PWR_IRQ_TIMEOUT_MS)))
+ __WARN_printf("%s: request(%d) timed out waiting for pwr_irq\n",
+ mmc_hostname(host->mmc), req_type);
pr_debug("%s: %s: request %d done\n", mmc_hostname(host->mmc),
__func__, req_type);
@@ -3284,6 +3289,8 @@ static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
/* registers offset changed starting from 4.2.0 */
int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
+ if (cq_host->offset_changed)
+ offset += CQ_V5_VENDOR_CFG;
pr_err("---- Debug RAM dump ----\n");
pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
diff --git a/drivers/net/ethernet/msm/msm_rmnet_mhi.c b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
index a55a26be4273..50d8e72a96c8 100644
--- a/drivers/net/ethernet/msm/msm_rmnet_mhi.c
+++ b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
@@ -625,6 +625,9 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
tx_ring_full_count[rmnet_mhi_ptr->dev_index]++;
netif_stop_queue(dev);
rmnet_log(MSG_VERBOSE, "Stopping Queue\n");
+ write_unlock_irqrestore(
+ &rmnet_mhi_ptr->out_chan_full_lock,
+ flags);
goto rmnet_mhi_xmit_error_cleanup;
} else {
retry = 1;
@@ -652,7 +655,6 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
rmnet_mhi_xmit_error_cleanup:
rmnet_log(MSG_VERBOSE, "Ring full\n");
- write_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock, flags);
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/ppp/ppp_generic.c b/drivers/net/ppp/ppp_generic.c
index fa76ca128e1b..e5bb870b5461 100644
--- a/drivers/net/ppp/ppp_generic.c
+++ b/drivers/net/ppp/ppp_generic.c
@@ -2390,8 +2390,6 @@ ppp_unregister_channel(struct ppp_channel *chan)
spin_lock_bh(&pn->all_channels_lock);
list_del(&pch->list);
spin_unlock_bh(&pn->all_channels_lock);
- put_net(pch->chan_net);
- pch->chan_net = NULL;
pch->file.dead = 1;
wake_up_interruptible(&pch->file.rwait);
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index e5213de8a686..f1ead7c28823 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -90,6 +90,20 @@ static inline u32 ath10k_ce_src_ring_read_index_get(struct ath10k *ar,
return ar->bus_read32(ar, ce_ctrl_addr + CURRENT_SRRI_ADDRESS);
}
+static inline void ath10k_ce_shadow_src_ring_write_index_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ar->bus_write32(ar, shadow_sr_wr_ind_addr(ar, ce_ctrl_addr), n);
+}
+
+static inline void ath10k_ce_shadow_dest_ring_write_index_set(struct ath10k *ar,
+ u32 ce_ctrl_addr,
+ unsigned int n)
+{
+ ar->bus_write32(ar, shadow_dst_wr_ind_addr(ar, ce_ctrl_addr), n);
+}
+
static inline void ath10k_ce_src_ring_base_addr_set(struct ath10k *ar,
u32 ce_ctrl_addr,
unsigned int addr)
@@ -259,6 +273,72 @@ static inline void ath10k_ce_engine_int_status_clear(struct ath10k *ar,
ar->bus_write32(ar, ce_ctrl_addr + HOST_IS_ADDRESS, mask);
}
+u32 shadow_sr_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr)
+{
+ u32 addr = 0;
+ u32 ce = COPY_ENGINE_ID(ctrl_addr);
+
+ switch (ce) {
+ case 0:
+ addr = SHADOW_VALUE0;
+ break;
+ case 3:
+ addr = SHADOW_VALUE3;
+ break;
+ case 4:
+ addr = SHADOW_VALUE4;
+ break;
+ case 5:
+ addr = SHADOW_VALUE5;
+ break;
+ case 7:
+ addr = SHADOW_VALUE7;
+ break;
+ default:
+ ath10k_err(ar, "invalid CE ctrl_addr (CE=%d)", ce);
+ WARN_ON(1);
+ }
+ return addr;
+}
+
+u32 shadow_dst_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr)
+{
+ u32 addr = 0;
+ u32 ce = COPY_ENGINE_ID(ctrl_addr);
+
+ switch (ce) {
+ case 1:
+ addr = SHADOW_VALUE13;
+ break;
+ case 2:
+ addr = SHADOW_VALUE14;
+ break;
+ case 5:
+ addr = SHADOW_VALUE17;
+ break;
+ case 7:
+ addr = SHADOW_VALUE19;
+ break;
+ case 8:
+ addr = SHADOW_VALUE20;
+ break;
+ case 9:
+ addr = SHADOW_VALUE21;
+ break;
+ case 10:
+ addr = SHADOW_VALUE22;
+ break;
+ case 11:
+ addr = SHADOW_VALUE23;
+ break;
+ default:
+ ath10k_err(ar, "invalid CE ctrl_addr (CE=%d)", ce);
+ WARN_ON(1);
+ }
+
+ return addr;
+}
+
/*
* Guts of ath10k_ce_send, used by both ath10k_ce_send and
* ath10k_ce_sendlist_send.
@@ -325,8 +405,14 @@ int ath10k_ce_send_nolock(struct ath10k_ce_pipe *ce_state,
write_index = CE_RING_IDX_INCR(nentries_mask, write_index);
/* WORKAROUND */
- if (!(flags & CE_SEND_FLAG_GATHER))
- ath10k_ce_src_ring_write_index_set(ar, ctrl_addr, write_index);
+ if (!(flags & CE_SEND_FLAG_GATHER)) {
+ if (QCA_REV_WCN3990(ar))
+ ath10k_ce_shadow_src_ring_write_index_set(ar, ctrl_addr,
+ write_index);
+ else
+ ath10k_ce_src_ring_write_index_set(ar, ctrl_addr,
+ write_index);
+ }
src_ring->write_index = write_index;
exit:
@@ -957,6 +1043,24 @@ ath10k_ce_alloc_src_ring(struct ath10k *ar, unsigned int ce_id,
src_ring->base_addr_ce_space_unaligned,
CE_DESC_RING_ALIGN);
+ src_ring->shadow_base_unaligned = kzalloc(
+ nentries * sizeof(struct ce_desc),
+ GFP_KERNEL);
+
+ if (!src_ring->shadow_base_unaligned) {
+ dma_free_coherent(ar->dev,
+ (nentries * sizeof(struct ce_desc) +
+ CE_DESC_RING_ALIGN),
+ src_ring->base_addr_owner_space_unaligned,
+ base_addr);
+ kfree(src_ring);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ src_ring->shadow_base = (struct ce_desc *)PTR_ALIGN(
+ src_ring->shadow_base_unaligned,
+ CE_DESC_RING_ALIGN);
+
return src_ring;
}
@@ -1135,6 +1239,7 @@ void ath10k_ce_free_pipe(struct ath10k *ar, int ce_id)
((struct ath10k_ce_pipe *)ar->ce_states + ce_id);
if (ce_state->src_ring) {
+ kfree(ce_state->src_ring->shadow_base_unaligned);
dma_free_coherent(ar->dev,
(ce_state->src_ring->nentries *
sizeof(struct ce_desc) +
diff --git a/drivers/net/wireless/ath/ath10k/ce.h b/drivers/net/wireless/ath/ath10k/ce.h
index 1b49db14d387..160a13e681df 100644
--- a/drivers/net/wireless/ath/ath10k/ce.h
+++ b/drivers/net/wireless/ath/ath10k/ce.h
@@ -120,6 +120,9 @@ struct ath10k_ce_ring {
/* CE address space */
u32 base_addr_ce_space;
+ char *shadow_base_unaligned;
+ struct ce_desc *shadow_base;
+
/* keep last */
void *per_transfer_context[0];
};
@@ -143,6 +146,61 @@ struct ath10k_ce_pipe {
/* Copy Engine settable attributes */
struct ce_attr;
+#define SHADOW_VALUE0 (ar->shadow_reg_value->shadow_reg_value_0)
+#define SHADOW_VALUE1 (ar->shadow_reg_value->shadow_reg_value_1)
+#define SHADOW_VALUE2 (ar->shadow_reg_value->shadow_reg_value_2)
+#define SHADOW_VALUE3 (ar->shadow_reg_value->shadow_reg_value_3)
+#define SHADOW_VALUE4 (ar->shadow_reg_value->shadow_reg_value_4)
+#define SHADOW_VALUE5 (ar->shadow_reg_value->shadow_reg_value_5)
+#define SHADOW_VALUE6 (ar->shadow_reg_value->shadow_reg_value_6)
+#define SHADOW_VALUE7 (ar->shadow_reg_value->shadow_reg_value_7)
+#define SHADOW_VALUE8 (ar->shadow_reg_value->shadow_reg_value_8)
+#define SHADOW_VALUE9 (ar->shadow_reg_value->shadow_reg_value_9)
+#define SHADOW_VALUE10 (ar->shadow_reg_value->shadow_reg_value_10)
+#define SHADOW_VALUE11 (ar->shadow_reg_value->shadow_reg_value_11)
+#define SHADOW_VALUE12 (ar->shadow_reg_value->shadow_reg_value_12)
+#define SHADOW_VALUE13 (ar->shadow_reg_value->shadow_reg_value_13)
+#define SHADOW_VALUE14 (ar->shadow_reg_value->shadow_reg_value_14)
+#define SHADOW_VALUE15 (ar->shadow_reg_value->shadow_reg_value_15)
+#define SHADOW_VALUE16 (ar->shadow_reg_value->shadow_reg_value_16)
+#define SHADOW_VALUE17 (ar->shadow_reg_value->shadow_reg_value_17)
+#define SHADOW_VALUE18 (ar->shadow_reg_value->shadow_reg_value_18)
+#define SHADOW_VALUE19 (ar->shadow_reg_value->shadow_reg_value_19)
+#define SHADOW_VALUE20 (ar->shadow_reg_value->shadow_reg_value_20)
+#define SHADOW_VALUE21 (ar->shadow_reg_value->shadow_reg_value_21)
+#define SHADOW_VALUE22 (ar->shadow_reg_value->shadow_reg_value_22)
+#define SHADOW_VALUE23 (ar->shadow_reg_value->shadow_reg_value_23)
+#define SHADOW_ADDRESS0 (ar->shadow_reg_address->shadow_reg_address_0)
+#define SHADOW_ADDRESS1 (ar->shadow_reg_address->shadow_reg_address_1)
+#define SHADOW_ADDRESS2 (ar->shadow_reg_address->shadow_reg_address_2)
+#define SHADOW_ADDRESS3 (ar->shadow_reg_address->shadow_reg_address_3)
+#define SHADOW_ADDRESS4 (ar->shadow_reg_address->shadow_reg_address_4)
+#define SHADOW_ADDRESS5 (ar->shadow_reg_address->shadow_reg_address_5)
+#define SHADOW_ADDRESS6 (ar->shadow_reg_address->shadow_reg_address_6)
+#define SHADOW_ADDRESS7 (ar->shadow_reg_address->shadow_reg_address_7)
+#define SHADOW_ADDRESS8 (ar->shadow_reg_address->shadow_reg_address_8)
+#define SHADOW_ADDRESS9 (ar->shadow_reg_address->shadow_reg_address_9)
+#define SHADOW_ADDRESS10 (ar->shadow_reg_address->shadow_reg_address_10)
+#define SHADOW_ADDRESS11 (ar->shadow_reg_address->shadow_reg_address_11)
+#define SHADOW_ADDRESS12 (ar->shadow_reg_address->shadow_reg_address_12)
+#define SHADOW_ADDRESS13 (ar->shadow_reg_address->shadow_reg_address_13)
+#define SHADOW_ADDRESS14 (ar->shadow_reg_address->shadow_reg_address_14)
+#define SHADOW_ADDRESS15 (ar->shadow_reg_address->shadow_reg_address_15)
+#define SHADOW_ADDRESS16 (ar->shadow_reg_address->shadow_reg_address_16)
+#define SHADOW_ADDRESS17 (ar->shadow_reg_address->shadow_reg_address_17)
+#define SHADOW_ADDRESS18 (ar->shadow_reg_address->shadow_reg_address_18)
+#define SHADOW_ADDRESS19 (ar->shadow_reg_address->shadow_reg_address_19)
+#define SHADOW_ADDRESS20 (ar->shadow_reg_address->shadow_reg_address_20)
+#define SHADOW_ADDRESS21 (ar->shadow_reg_address->shadow_reg_address_21)
+#define SHADOW_ADDRESS22 (ar->shadow_reg_address->shadow_reg_address_22)
+#define SHADOW_ADDRESS23 (ar->shadow_reg_address->shadow_reg_address_23)
+
+#define SHADOW_ADDRESS(i) (SHADOW_ADDRESS0 + \
+ i * (SHADOW_ADDRESS1 - SHADOW_ADDRESS0))
+
+u32 shadow_sr_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr);
+u32 shadow_dst_wr_ind_addr(struct ath10k *ar, u32 ctrl_addr);
+
/*==================Send====================*/
/* ath10k_ce_send flags */
@@ -591,6 +649,9 @@ struct ce_attr {
& (uint64_t)(0xF00000000)) >> 32))
#endif
+#define COPY_ENGINE_ID(COPY_ENGINE_BASE_ADDRESS) ((COPY_ENGINE_BASE_ADDRESS \
+ - CE0_BASE_ADDRESS) / (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS))
+
static inline u32 ath10k_ce_base_address(struct ath10k *ar, unsigned int ce_id)
{
return CE0_BASE_ADDRESS + (CE1_BASE_ADDRESS - CE0_BASE_ADDRESS) * ce_id;
diff --git a/drivers/net/wireless/ath/ath10k/core.c b/drivers/net/wireless/ath/ath10k/core.c
index 0803a963da3c..052ebd7dd26b 100644
--- a/drivers/net/wireless/ath/ath10k/core.c
+++ b/drivers/net/wireless/ath/ath10k/core.c
@@ -2350,6 +2350,8 @@ struct ath10k *ath10k_core_create(size_t priv_size, struct device *dev,
/* WCN3990 chip set is non bmi based */
ar->is_bmi = false;
ar->fw_flags = &wcn3990_fw_flags;
+ ar->shadow_reg_value = &wcn3990_shadow_reg_value;
+ ar->shadow_reg_address = &wcn3990_shadow_reg_address;
break;
default:
ath10k_err(ar, "unsupported core hardware revision %d\n",
diff --git a/drivers/net/wireless/ath/ath10k/core.h b/drivers/net/wireless/ath/ath10k/core.h
index f2f0338696e0..bb2c5fb9a125 100644
--- a/drivers/net/wireless/ath/ath10k/core.h
+++ b/drivers/net/wireless/ath/ath10k/core.h
@@ -741,6 +741,8 @@ struct ath10k {
const struct ath10k_hw_regs *regs;
const struct ath10k_hw_values *hw_values;
+ struct ath10k_shadow_reg_value *shadow_reg_value;
+ struct ath10k_shadow_reg_address *shadow_reg_address;
struct ath10k_bmi bmi;
struct ath10k_wmi wmi;
struct ath10k_htc htc;
diff --git a/drivers/net/wireless/ath/ath10k/hw.c b/drivers/net/wireless/ath/ath10k/hw.c
index 0cd1068b0beb..1a8f3a388ce2 100644
--- a/drivers/net/wireless/ath/ath10k/hw.c
+++ b/drivers/net/wireless/ath/ath10k/hw.c
@@ -220,6 +220,60 @@ struct fw_flag wcn3990_fw_flags = {
.flags = 0x82E,
};
+struct ath10k_shadow_reg_value wcn3990_shadow_reg_value = {
+ .shadow_reg_value_0 = 0x00032000,
+ .shadow_reg_value_1 = 0x00032004,
+ .shadow_reg_value_2 = 0x00032008,
+ .shadow_reg_value_3 = 0x0003200C,
+ .shadow_reg_value_4 = 0x00032010,
+ .shadow_reg_value_5 = 0x00032014,
+ .shadow_reg_value_6 = 0x00032018,
+ .shadow_reg_value_7 = 0x0003201C,
+ .shadow_reg_value_8 = 0x00032020,
+ .shadow_reg_value_9 = 0x00032024,
+ .shadow_reg_value_10 = 0x00032028,
+ .shadow_reg_value_11 = 0x0003202C,
+ .shadow_reg_value_12 = 0x00032030,
+ .shadow_reg_value_13 = 0x00032034,
+ .shadow_reg_value_14 = 0x00032038,
+ .shadow_reg_value_15 = 0x0003203C,
+ .shadow_reg_value_16 = 0x00032040,
+ .shadow_reg_value_17 = 0x00032044,
+ .shadow_reg_value_18 = 0x00032048,
+ .shadow_reg_value_19 = 0x0003204C,
+ .shadow_reg_value_20 = 0x00032050,
+ .shadow_reg_value_21 = 0x00032054,
+ .shadow_reg_value_22 = 0x00032058,
+ .shadow_reg_value_23 = 0x0003205C
+};
+
+struct ath10k_shadow_reg_address wcn3990_shadow_reg_address = {
+ .shadow_reg_address_0 = 0x00030020,
+ .shadow_reg_address_1 = 0x00030024,
+ .shadow_reg_address_2 = 0x00030028,
+ .shadow_reg_address_3 = 0x0003002C,
+ .shadow_reg_address_4 = 0x00030030,
+ .shadow_reg_address_5 = 0x00030034,
+ .shadow_reg_address_6 = 0x00030038,
+ .shadow_reg_address_7 = 0x0003003C,
+ .shadow_reg_address_8 = 0x00030040,
+ .shadow_reg_address_9 = 0x00030044,
+ .shadow_reg_address_10 = 0x00030048,
+ .shadow_reg_address_11 = 0x0003004C,
+ .shadow_reg_address_12 = 0x00030050,
+ .shadow_reg_address_13 = 0x00030054,
+ .shadow_reg_address_14 = 0x00030058,
+ .shadow_reg_address_15 = 0x0003005C,
+ .shadow_reg_address_16 = 0x00030060,
+ .shadow_reg_address_17 = 0x00030064,
+ .shadow_reg_address_18 = 0x00030068,
+ .shadow_reg_address_19 = 0x0003006C,
+ .shadow_reg_address_20 = 0x00030070,
+ .shadow_reg_address_21 = 0x00030074,
+ .shadow_reg_address_22 = 0x00030078,
+ .shadow_reg_address_23 = 0x0003007C
+};
+
void ath10k_hw_fill_survey_time(struct ath10k *ar, struct survey_info *survey,
u32 cc, u32 rcc, u32 cc_prev, u32 rcc_prev)
{
diff --git a/drivers/net/wireless/ath/ath10k/hw.h b/drivers/net/wireless/ath/ath10k/hw.h
index b59bde40714c..ce87f8112928 100644
--- a/drivers/net/wireless/ath/ath10k/hw.h
+++ b/drivers/net/wireless/ath/ath10k/hw.h
@@ -835,4 +835,61 @@ ath10k_rx_desc_get_l3_pad_bytes(struct ath10k_hw_params *hw,
#define RTC_STATE_V_GET(x) (((x) & RTC_STATE_V_MASK) >> RTC_STATE_V_LSB)
+struct ath10k_shadow_reg_value {
+ u32 shadow_reg_value_0;
+ u32 shadow_reg_value_1;
+ u32 shadow_reg_value_2;
+ u32 shadow_reg_value_3;
+ u32 shadow_reg_value_4;
+ u32 shadow_reg_value_5;
+ u32 shadow_reg_value_6;
+ u32 shadow_reg_value_7;
+ u32 shadow_reg_value_8;
+ u32 shadow_reg_value_9;
+ u32 shadow_reg_value_10;
+ u32 shadow_reg_value_11;
+ u32 shadow_reg_value_12;
+ u32 shadow_reg_value_13;
+ u32 shadow_reg_value_14;
+ u32 shadow_reg_value_15;
+ u32 shadow_reg_value_16;
+ u32 shadow_reg_value_17;
+ u32 shadow_reg_value_18;
+ u32 shadow_reg_value_19;
+ u32 shadow_reg_value_20;
+ u32 shadow_reg_value_21;
+ u32 shadow_reg_value_22;
+ u32 shadow_reg_value_23;
+};
+
+struct ath10k_shadow_reg_address {
+ u32 shadow_reg_address_0;
+ u32 shadow_reg_address_1;
+ u32 shadow_reg_address_2;
+ u32 shadow_reg_address_3;
+ u32 shadow_reg_address_4;
+ u32 shadow_reg_address_5;
+ u32 shadow_reg_address_6;
+ u32 shadow_reg_address_7;
+ u32 shadow_reg_address_8;
+ u32 shadow_reg_address_9;
+ u32 shadow_reg_address_10;
+ u32 shadow_reg_address_11;
+ u32 shadow_reg_address_12;
+ u32 shadow_reg_address_13;
+ u32 shadow_reg_address_14;
+ u32 shadow_reg_address_15;
+ u32 shadow_reg_address_16;
+ u32 shadow_reg_address_17;
+ u32 shadow_reg_address_18;
+ u32 shadow_reg_address_19;
+ u32 shadow_reg_address_20;
+ u32 shadow_reg_address_21;
+ u32 shadow_reg_address_22;
+ u32 shadow_reg_address_23;
+};
+
+extern struct ath10k_shadow_reg_value wcn3990_shadow_reg_value;
+extern struct ath10k_shadow_reg_address wcn3990_shadow_reg_address;
+
#endif /* _HW_H_ */
diff --git a/drivers/net/wireless/ath/ath10k/snoc.c b/drivers/net/wireless/ath/ath10k/snoc.c
index be8ec97574f0..6c8797d5e5fc 100644
--- a/drivers/net/wireless/ath/ath10k/snoc.c
+++ b/drivers/net/wireless/ath/ath10k/snoc.c
@@ -395,10 +395,23 @@ static struct service_to_pipe target_service_to_ce_map_wlan[] = {
},
};
-#define ADRASTEA_SRC_WR_INDEX_OFFSET 0x3C
-#define ADRASTEA_DST_WR_INDEX_OFFSET 0x40
-
-static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = { };
+#define WCN3990_SRC_WR_INDEX_OFFSET 0x3C
+#define WCN3990_DST_WR_INDEX_OFFSET 0x40
+
+static struct ath10k_shadow_reg_cfg target_shadow_reg_cfg_map[] = {
+ { 0, WCN3990_SRC_WR_INDEX_OFFSET},
+ { 3, WCN3990_SRC_WR_INDEX_OFFSET},
+ { 4, WCN3990_SRC_WR_INDEX_OFFSET},
+ { 5, WCN3990_SRC_WR_INDEX_OFFSET},
+ { 7, WCN3990_SRC_WR_INDEX_OFFSET},
+ { 1, WCN3990_DST_WR_INDEX_OFFSET},
+ { 2, WCN3990_DST_WR_INDEX_OFFSET},
+ { 7, WCN3990_DST_WR_INDEX_OFFSET},
+ { 8, WCN3990_DST_WR_INDEX_OFFSET},
+ { 9, WCN3990_DST_WR_INDEX_OFFSET},
+ { 10, WCN3990_DST_WR_INDEX_OFFSET},
+ { 11, WCN3990_DST_WR_INDEX_OFFSET},
+};
void ath10k_snoc_write32(void *ar, u32 offset, u32 value)
{
@@ -1048,7 +1061,8 @@ static int ath10k_snoc_wlan_enable(struct ath10k *ar)
sizeof(struct ce_svc_pipe_cfg);
cfg.ce_svc_cfg = (struct ce_svc_pipe_cfg *)
&target_service_to_ce_map_wlan;
- cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map);
+ cfg.num_shadow_reg_cfg = sizeof(target_shadow_reg_cfg_map) /
+ sizeof(struct icnss_shadow_reg_cfg);
cfg.shadow_reg_cfg = (struct icnss_shadow_reg_cfg *)
&target_shadow_reg_cfg_map;
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index a0f76581e6eb..4874c5ba1e61 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -5,6 +5,7 @@ wil6210-y += netdev.o
wil6210-y += cfg80211.o
wil6210-y += pcie_bus.o
wil6210-y += debugfs.o
+wil6210-y += sysfs.o
wil6210-y += wmi.o
wil6210-y += interrupt.o
wil6210-y += txrx.o
diff --git a/drivers/net/wireless/ath/wil6210/cfg80211.c b/drivers/net/wireless/ath/wil6210/cfg80211.c
index ca06394c48bb..210ea654c83f 100644
--- a/drivers/net/wireless/ath/wil6210/cfg80211.c
+++ b/drivers/net/wireless/ath/wil6210/cfg80211.c
@@ -22,7 +22,7 @@
#define WIL_MAX_ROC_DURATION_MS 5000
bool disable_ap_sme;
-module_param(disable_ap_sme, bool, S_IRUGO);
+module_param(disable_ap_sme, bool, 0444);
MODULE_PARM_DESC(disable_ap_sme, " let user space handle AP mode SME");
#define CHAN60G(_channel, _flags) { \
@@ -290,7 +290,7 @@ static int wil_cfg80211_get_station(struct wiphy *wiphy,
int cid = wil_find_cid(wil, mac);
- wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+ wil_dbg_misc(wil, "get_station: %pM CID %d\n", mac, cid);
if (cid < 0)
return cid;
@@ -329,7 +329,7 @@ static int wil_cfg80211_dump_station(struct wiphy *wiphy,
return -ENOENT;
ether_addr_copy(mac, wil->sta[cid].addr);
- wil_dbg_misc(wil, "%s(%pM) CID %d\n", __func__, mac, cid);
+ wil_dbg_misc(wil, "dump_station: %pM CID %d\n", mac, cid);
rc = wil_cid_fill_sinfo(wil, cid, sinfo);
@@ -346,16 +346,15 @@ wil_cfg80211_add_iface(struct wiphy *wiphy, const char *name,
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *p2p_wdev;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "add_iface\n");
if (type != NL80211_IFTYPE_P2P_DEVICE) {
- wil_err(wil, "%s: unsupported iftype %d\n", __func__, type);
+ wil_err(wil, "unsupported iftype %d\n", type);
return ERR_PTR(-EINVAL);
}
if (wil->p2p_wdev) {
- wil_err(wil, "%s: P2P_DEVICE interface already created\n",
- __func__);
+ wil_err(wil, "P2P_DEVICE interface already created\n");
return ERR_PTR(-EINVAL);
}
@@ -378,11 +377,10 @@ static int wil_cfg80211_del_iface(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "del_iface\n");
if (wdev != wil->p2p_wdev) {
- wil_err(wil, "%s: delete of incorrect interface 0x%p\n",
- __func__, wdev);
+ wil_err(wil, "delete of incorrect interface 0x%p\n", wdev);
return -EINVAL;
}
@@ -400,7 +398,7 @@ static int wil_cfg80211_change_iface(struct wiphy *wiphy,
struct wireless_dev *wdev = wil_to_wdev(wil);
int rc;
- wil_dbg_misc(wil, "%s() type=%d\n", __func__, type);
+ wil_dbg_misc(wil, "change_iface: type=%d\n", type);
if (netif_running(wil_to_ndev(wil)) && !wil_is_recovery_blocked(wil)) {
wil_dbg_misc(wil, "interface is up. resetting...\n");
@@ -447,8 +445,7 @@ static int wil_cfg80211_scan(struct wiphy *wiphy,
uint i, n;
int rc;
- wil_dbg_misc(wil, "%s(), wdev=0x%p iftype=%d\n",
- __func__, wdev, wdev->iftype);
+ wil_dbg_misc(wil, "scan: wdev=0x%p iftype=%d\n", wdev, wdev->iftype);
/* check we are client side */
switch (wdev->iftype) {
@@ -653,7 +650,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
int rc = 0;
enum ieee80211_bss_type bss_type = IEEE80211_BSS_TYPE_ESS;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "connect\n");
wil_print_connect_params(wil, sme);
if (test_bit(wil_status_fwconnecting, wil->status) ||
@@ -689,6 +686,7 @@ static int wil_cfg80211_connect(struct wiphy *wiphy,
goto out;
}
wil->privacy = sme->privacy;
+ wil->pbss = sme->pbss;
if (wil->privacy) {
/* For secure assoc, remove old keys */
@@ -786,12 +784,11 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
int rc;
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s(reason=%d)\n", __func__, reason_code);
+ wil_dbg_misc(wil, "disconnect: reason=%d\n", reason_code);
if (!(test_bit(wil_status_fwconnecting, wil->status) ||
test_bit(wil_status_fwconnected, wil->status))) {
- wil_err(wil, "%s: Disconnect was called while disconnected\n",
- __func__);
+ wil_err(wil, "Disconnect was called while disconnected\n");
return 0;
}
@@ -799,7 +796,7 @@ static int wil_cfg80211_disconnect(struct wiphy *wiphy,
WMI_DISCONNECT_EVENTID, NULL, 0,
WIL6210_DISCONNECT_TO_MS);
if (rc)
- wil_err(wil, "%s: disconnect error %d\n", __func__, rc);
+ wil_err(wil, "disconnect error %d\n", rc);
return rc;
}
@@ -847,7 +844,7 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
* different from currently "listened" channel and fail if it is.
*/
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "mgmt_tx\n");
print_hex_dump_bytes("mgmt tx frame ", DUMP_PREFIX_OFFSET, buf, len);
cmd = kmalloc(sizeof(*cmd) + len, GFP_KERNEL);
@@ -908,7 +905,7 @@ static enum wmi_key_usage wil_detect_key_usage(struct wil6210_priv *wil,
break;
}
}
- wil_dbg_misc(wil, "%s() -> %s\n", __func__, key_usage_str[rc]);
+ wil_dbg_misc(wil, "detect_key_usage: -> %s\n", key_usage_str[rc]);
return rc;
}
@@ -1013,13 +1010,13 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
return -EINVAL;
}
- wil_dbg_misc(wil, "%s(%pM %s[%d] PN %*phN)\n", __func__,
+ wil_dbg_misc(wil, "add_key: %pM %s[%d] PN %*phN\n",
mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
if (IS_ERR(cs)) {
- wil_err(wil, "Not connected, %s(%pM %s[%d] PN %*phN)\n",
- __func__, mac_addr, key_usage_str[key_usage], key_index,
+ wil_err(wil, "Not connected, %pM %s[%d] PN %*phN\n",
+ mac_addr, key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
return -EINVAL;
}
@@ -1028,8 +1025,8 @@ static int wil_cfg80211_add_key(struct wiphy *wiphy,
if (params->seq && params->seq_len != IEEE80211_GCMP_PN_LEN) {
wil_err(wil,
- "Wrong PN len %d, %s(%pM %s[%d] PN %*phN)\n",
- params->seq_len, __func__, mac_addr,
+ "Wrong PN len %d, %pM %s[%d] PN %*phN\n",
+ params->seq_len, mac_addr,
key_usage_str[key_usage], key_index,
params->seq_len, params->seq);
return -EINVAL;
@@ -1053,11 +1050,11 @@ static int wil_cfg80211_del_key(struct wiphy *wiphy,
struct wil_sta_info *cs = wil_find_sta_by_key_usage(wil, key_usage,
mac_addr);
- wil_dbg_misc(wil, "%s(%pM %s[%d])\n", __func__, mac_addr,
+ wil_dbg_misc(wil, "del_key: %pM %s[%d]\n", mac_addr,
key_usage_str[key_usage], key_index);
if (IS_ERR(cs))
- wil_info(wil, "Not connected, %s(%pM %s[%d])\n", __func__,
+ wil_info(wil, "Not connected, %pM %s[%d]\n",
mac_addr, key_usage_str[key_usage], key_index);
if (!IS_ERR_OR_NULL(cs))
@@ -1074,7 +1071,7 @@ static int wil_cfg80211_set_default_key(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s: entered\n", __func__);
+ wil_dbg_misc(wil, "set_default_key: entered\n");
return 0;
}
@@ -1087,8 +1084,9 @@ static int wil_remain_on_channel(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
int rc;
- wil_dbg_misc(wil, "%s() center_freq=%d, duration=%d iftype=%d\n",
- __func__, chan->center_freq, duration, wdev->iftype);
+ wil_dbg_misc(wil,
+ "remain_on_channel: center_freq=%d, duration=%d iftype=%d\n",
+ chan->center_freq, duration, wdev->iftype);
rc = wil_p2p_listen(wil, wdev, duration, chan, cookie);
return rc;
@@ -1100,7 +1098,7 @@ static int wil_cancel_remain_on_channel(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "cancel_remain_on_channel\n");
return wil_p2p_cancel_listen(wil, cookie);
}
@@ -1256,9 +1254,9 @@ static int _wil_cfg80211_start_ap(struct wiphy *wiphy,
if (pbss)
wmi_nettype = WMI_NETTYPE_P2P;
- wil_dbg_misc(wil, "%s: is_go=%d\n", __func__, is_go);
+ wil_dbg_misc(wil, "start_ap: is_go=%d\n", is_go);
if (is_go && !pbss) {
- wil_err(wil, "%s: P2P GO must be in PBSS\n", __func__);
+ wil_err(wil, "P2P GO must be in PBSS\n");
return -ENOTSUPP;
}
@@ -1315,7 +1313,7 @@ static int wil_cfg80211_change_beacon(struct wiphy *wiphy,
int rc;
u32 privacy = 0;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "change_beacon\n");
wil_print_bcon_data(bcon);
if (bcon->tail &&
@@ -1354,7 +1352,7 @@ static int wil_cfg80211_start_ap(struct wiphy *wiphy,
struct cfg80211_crypto_settings *crypto = &info->crypto;
u8 hidden_ssid;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "start_ap\n");
if (!channel) {
wil_err(wil, "AP: No channel???\n");
@@ -1405,7 +1403,7 @@ static int wil_cfg80211_stop_ap(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "stop_ap\n");
netif_carrier_off(ndev);
wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
@@ -1450,7 +1448,7 @@ static int wil_cfg80211_del_station(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s(%pM, reason=%d)\n", __func__, params->mac,
+ wil_dbg_misc(wil, "del_station: %pM, reason=%d\n", params->mac,
params->reason_code);
mutex_lock(&wil->mutex);
@@ -1555,7 +1553,7 @@ void wil_probe_client_flush(struct wil6210_priv *wil)
{
struct wil_probe_client_req *req, *t;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "probe_client_flush\n");
mutex_lock(&wil->probe_client_mutex);
@@ -1575,7 +1573,7 @@ static int wil_cfg80211_probe_client(struct wiphy *wiphy,
struct wil_probe_client_req *req;
int cid = wil_find_cid(wil, peer);
- wil_dbg_misc(wil, "%s(%pM => CID %d)\n", __func__, peer, cid);
+ wil_dbg_misc(wil, "probe_client: %pM => CID %d\n", peer, cid);
if (cid < 0)
return -ENOLINK;
@@ -1603,7 +1601,7 @@ static int wil_cfg80211_change_bss(struct wiphy *wiphy,
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
if (params->ap_isolate >= 0) {
- wil_dbg_misc(wil, "%s(ap_isolate %d => %d)\n", __func__,
+ wil_dbg_misc(wil, "change_bss: ap_isolate %d => %d\n",
wil->ap_isolate, params->ap_isolate);
wil->ap_isolate = params->ap_isolate;
}
@@ -1616,7 +1614,7 @@ static int wil_cfg80211_start_p2p_device(struct wiphy *wiphy,
{
struct wil6210_priv *wil = wiphy_to_wil(wiphy);
- wil_dbg_misc(wil, "%s: entered\n", __func__);
+ wil_dbg_misc(wil, "start_p2p_device: entered\n");
wil->p2p.p2p_dev_started = 1;
return 0;
}
@@ -1630,7 +1628,7 @@ static void wil_cfg80211_stop_p2p_device(struct wiphy *wiphy,
if (!p2p->p2p_dev_started)
return;
- wil_dbg_misc(wil, "%s: entered\n", __func__);
+ wil_dbg_misc(wil, "stop_p2p_device: entered\n");
mutex_lock(&wil->mutex);
mutex_lock(&wil->p2p_wdev_mutex);
wil_p2p_stop_radio_operations(wil);
diff --git a/drivers/net/wireless/ath/wil6210/debugfs.c b/drivers/net/wireless/ath/wil6210/debugfs.c
index 865bb11e2a07..dc22a29349cb 100644
--- a/drivers/net/wireless/ath/wil6210/debugfs.c
+++ b/drivers/net/wireless/ath/wil6210/debugfs.c
@@ -364,13 +364,13 @@ static void wil6210_debugfs_init_offset(struct wil6210_priv *wil,
}
static const struct dbg_off isr_off[] = {
- {"ICC", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICC), doff_io32},
- {"ICR", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICR), doff_io32},
- {"ICM", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, ICM), doff_io32},
- {"ICS", S_IWUSR, offsetof(struct RGF_ICR, ICS), doff_io32},
- {"IMV", S_IRUGO | S_IWUSR, offsetof(struct RGF_ICR, IMV), doff_io32},
- {"IMS", S_IWUSR, offsetof(struct RGF_ICR, IMS), doff_io32},
- {"IMC", S_IWUSR, offsetof(struct RGF_ICR, IMC), doff_io32},
+ {"ICC", 0644, offsetof(struct RGF_ICR, ICC), doff_io32},
+ {"ICR", 0644, offsetof(struct RGF_ICR, ICR), doff_io32},
+ {"ICM", 0644, offsetof(struct RGF_ICR, ICM), doff_io32},
+ {"ICS", 0244, offsetof(struct RGF_ICR, ICS), doff_io32},
+ {"IMV", 0644, offsetof(struct RGF_ICR, IMV), doff_io32},
+ {"IMS", 0244, offsetof(struct RGF_ICR, IMS), doff_io32},
+ {"IMC", 0244, offsetof(struct RGF_ICR, IMC), doff_io32},
{},
};
@@ -390,9 +390,9 @@ static int wil6210_debugfs_create_ISR(struct wil6210_priv *wil,
}
static const struct dbg_off pseudo_isr_off[] = {
- {"CAUSE", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32},
- {"MASK_SW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32},
- {"MASK_FW", S_IRUGO, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32},
+ {"CAUSE", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE), doff_io32},
+ {"MASK_SW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_SW), doff_io32},
+ {"MASK_FW", 0444, HOSTADDR(RGF_DMA_PSEUDO_CAUSE_MASK_FW), doff_io32},
{},
};
@@ -411,40 +411,40 @@ static int wil6210_debugfs_create_pseudo_ISR(struct wil6210_priv *wil,
}
static const struct dbg_off lgc_itr_cnt_off[] = {
- {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32},
- {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32},
- {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32},
+ {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_CNT_TRSH), doff_io32},
+ {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_CNT_DATA), doff_io32},
+ {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_CNT_CRL), doff_io32},
{},
};
static const struct dbg_off tx_itr_cnt_off[] = {
- {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH),
+ {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_TRSH),
doff_io32},
- {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA),
+ {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_DATA),
doff_io32},
- {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL),
+ {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_CNT_CTL),
doff_io32},
- {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH),
+ {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_TRSH),
doff_io32},
- {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA),
+ {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_DATA),
doff_io32},
- {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL),
+ {"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_TX_IDL_CNT_CTL),
doff_io32},
{},
};
static const struct dbg_off rx_itr_cnt_off[] = {
- {"TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH),
+ {"TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_TRSH),
doff_io32},
- {"DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA),
+ {"DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_DATA),
doff_io32},
- {"CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL),
+ {"CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_CNT_CTL),
doff_io32},
- {"IDL_TRSH", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH),
+ {"IDL_TRSH", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_TRSH),
doff_io32},
- {"IDL_DATA", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA),
+ {"IDL_DATA", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_DATA),
doff_io32},
- {"IDL_CTL", S_IRUGO | S_IWUSR, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL),
+ {"IDL_CTL", 0644, HOSTADDR(RGF_DMA_ITR_RX_IDL_CNT_CTL),
doff_io32},
{},
};
@@ -819,7 +819,7 @@ static ssize_t wil_write_file_txmgmt(struct file *file, const char __user *buf,
rc = wil_cfg80211_mgmt_tx(wiphy, wdev, &params, NULL);
kfree(frame);
- wil_info(wil, "%s() -> %d\n", __func__, rc);
+ wil_info(wil, "-> %d\n", rc);
return len;
}
@@ -861,7 +861,7 @@ static ssize_t wil_write_file_wmi(struct file *file, const char __user *buf,
rc1 = wmi_send(wil, cmdid, cmd, cmdlen);
kfree(wmi);
- wil_info(wil, "%s(0x%04x[%d]) -> %d\n", __func__, cmdid, cmdlen, rc1);
+ wil_info(wil, "0x%04x[%d] -> %d\n", cmdid, cmdlen, rc1);
return rc;
}
@@ -1385,6 +1385,7 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
for (i = 0; i < ARRAY_SIZE(wil->sta); i++) {
struct wil_sta_info *p = &wil->sta[i];
char *status = "unknown";
+ u8 aid = 0;
switch (p->status) {
case wil_sta_unused:
@@ -1395,9 +1396,10 @@ __acquires(&p->tid_rx_lock) __releases(&p->tid_rx_lock)
break;
case wil_sta_connected:
status = "connected";
+ aid = p->aid;
break;
}
- seq_printf(s, "[%d] %pM %s\n", i, p->addr, status);
+ seq_printf(s, "[%d] %pM %s AID %d\n", i, p->addr, status, aid);
if (p->status == wil_sta_connected) {
spin_lock_bh(&p->tid_rx_lock);
@@ -1628,7 +1630,7 @@ static void wil6210_debugfs_init_blobs(struct wil6210_priv *wil,
blob->data = (void * __force)wil->csr + HOSTADDR(map->host);
blob->size = map->to - map->from;
snprintf(name, sizeof(name), "blob_%s", map->name);
- wil_debugfs_create_ioblob(name, S_IRUGO, dbg, wil_blob);
+ wil_debugfs_create_ioblob(name, 0444, dbg, wil_blob);
}
}
@@ -1638,29 +1640,29 @@ static const struct {
umode_t mode;
const struct file_operations *fops;
} dbg_files[] = {
- {"mbox", S_IRUGO, &fops_mbox},
- {"vrings", S_IRUGO, &fops_vring},
- {"stations", S_IRUGO, &fops_sta},
- {"desc", S_IRUGO, &fops_txdesc},
- {"bf", S_IRUGO, &fops_bf},
- {"ssid", S_IRUGO | S_IWUSR, &fops_ssid},
- {"mem_val", S_IRUGO, &fops_memread},
- {"reset", S_IWUSR, &fops_reset},
- {"rxon", S_IWUSR, &fops_rxon},
- {"tx_mgmt", S_IWUSR, &fops_txmgmt},
- {"wmi_send", S_IWUSR, &fops_wmi},
- {"back", S_IRUGO | S_IWUSR, &fops_back},
- {"pmccfg", S_IRUGO | S_IWUSR, &fops_pmccfg},
- {"pmcdata", S_IRUGO, &fops_pmcdata},
- {"temp", S_IRUGO, &fops_temp},
- {"freq", S_IRUGO, &fops_freq},
- {"link", S_IRUGO, &fops_link},
- {"info", S_IRUGO, &fops_info},
- {"recovery", S_IRUGO | S_IWUSR, &fops_recovery},
- {"led_cfg", S_IRUGO | S_IWUSR, &fops_led_cfg},
- {"led_blink_time", S_IRUGO | S_IWUSR, &fops_led_blink_time},
- {"fw_capabilities", S_IRUGO, &fops_fw_capabilities},
- {"fw_version", S_IRUGO, &fops_fw_version},
+ {"mbox", 0444, &fops_mbox},
+ {"vrings", 0444, &fops_vring},
+ {"stations", 0444, &fops_sta},
+ {"desc", 0444, &fops_txdesc},
+ {"bf", 0444, &fops_bf},
+ {"ssid", 0644, &fops_ssid},
+ {"mem_val", 0644, &fops_memread},
+ {"reset", 0244, &fops_reset},
+ {"rxon", 0244, &fops_rxon},
+ {"tx_mgmt", 0244, &fops_txmgmt},
+ {"wmi_send", 0244, &fops_wmi},
+ {"back", 0644, &fops_back},
+ {"pmccfg", 0644, &fops_pmccfg},
+ {"pmcdata", 0444, &fops_pmcdata},
+ {"temp", 0444, &fops_temp},
+ {"freq", 0444, &fops_freq},
+ {"link", 0444, &fops_link},
+ {"info", 0444, &fops_info},
+ {"recovery", 0644, &fops_recovery},
+ {"led_cfg", 0644, &fops_led_cfg},
+ {"led_blink_time", 0644, &fops_led_blink_time},
+ {"fw_capabilities", 0444, &fops_fw_capabilities},
+ {"fw_version", 0444, &fops_fw_version},
};
static void wil6210_debugfs_init_files(struct wil6210_priv *wil,
@@ -1699,31 +1701,32 @@ static void wil6210_debugfs_init_isr(struct wil6210_priv *wil,
/* fields in struct wil6210_priv */
static const struct dbg_off dbg_wil_off[] = {
- WIL_FIELD(privacy, S_IRUGO, doff_u32),
- WIL_FIELD(status[0], S_IRUGO | S_IWUSR, doff_ulong),
- WIL_FIELD(hw_version, S_IRUGO, doff_x32),
- WIL_FIELD(recovery_count, S_IRUGO, doff_u32),
- WIL_FIELD(ap_isolate, S_IRUGO, doff_u32),
- WIL_FIELD(discovery_mode, S_IRUGO | S_IWUSR, doff_u8),
- WIL_FIELD(chip_revision, S_IRUGO, doff_u8),
+ WIL_FIELD(privacy, 0444, doff_u32),
+ WIL_FIELD(status[0], 0644, doff_ulong),
+ WIL_FIELD(hw_version, 0444, doff_x32),
+ WIL_FIELD(recovery_count, 0444, doff_u32),
+ WIL_FIELD(ap_isolate, 0444, doff_u32),
+ WIL_FIELD(discovery_mode, 0644, doff_u8),
+ WIL_FIELD(chip_revision, 0444, doff_u8),
+ WIL_FIELD(abft_len, 0644, doff_u8),
{},
};
static const struct dbg_off dbg_wil_regs[] = {
- {"RGF_MAC_MTRL_COUNTER_0", S_IRUGO, HOSTADDR(RGF_MAC_MTRL_COUNTER_0),
+ {"RGF_MAC_MTRL_COUNTER_0", 0444, HOSTADDR(RGF_MAC_MTRL_COUNTER_0),
doff_io32},
- {"RGF_USER_USAGE_1", S_IRUGO, HOSTADDR(RGF_USER_USAGE_1), doff_io32},
+ {"RGF_USER_USAGE_1", 0444, HOSTADDR(RGF_USER_USAGE_1), doff_io32},
{},
};
/* static parameters */
static const struct dbg_off dbg_statics[] = {
- {"desc_index", S_IRUGO | S_IWUSR, (ulong)&dbg_txdesc_index, doff_u32},
- {"vring_index", S_IRUGO | S_IWUSR, (ulong)&dbg_vring_index, doff_u32},
- {"mem_addr", S_IRUGO | S_IWUSR, (ulong)&mem_addr, doff_u32},
- {"vring_idle_trsh", S_IRUGO | S_IWUSR, (ulong)&vring_idle_trsh,
+ {"desc_index", 0644, (ulong)&dbg_txdesc_index, doff_u32},
+ {"vring_index", 0644, (ulong)&dbg_vring_index, doff_u32},
+ {"mem_addr", 0644, (ulong)&mem_addr, doff_u32},
+ {"vring_idle_trsh", 0644, (ulong)&vring_idle_trsh,
doff_u32},
- {"led_polarity", S_IRUGO | S_IWUSR, (ulong)&led_polarity, doff_u8},
+ {"led_polarity", 0644, (ulong)&led_polarity, doff_u8},
{},
};
diff --git a/drivers/net/wireless/ath/wil6210/ethtool.c b/drivers/net/wireless/ath/wil6210/ethtool.c
index 7053b62ca8d3..adcfef4dabf7 100644
--- a/drivers/net/wireless/ath/wil6210/ethtool.c
+++ b/drivers/net/wireless/ath/wil6210/ethtool.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -27,7 +27,7 @@ static int wil_ethtoolops_begin(struct net_device *ndev)
mutex_lock(&wil->mutex);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "ethtoolops_begin\n");
return 0;
}
@@ -36,7 +36,7 @@ static void wil_ethtoolops_complete(struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "ethtoolops_complete\n");
mutex_unlock(&wil->mutex);
}
@@ -48,7 +48,7 @@ static int wil_ethtoolops_get_coalesce(struct net_device *ndev,
u32 tx_itr_en, tx_itr_val = 0;
u32 rx_itr_en, rx_itr_val = 0;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "ethtoolops_get_coalesce\n");
tx_itr_en = wil_r(wil, RGF_DMA_ITR_TX_CNT_CTL);
if (tx_itr_en & BIT_DMA_ITR_TX_CNT_CTL_EN)
@@ -68,7 +68,7 @@ static int wil_ethtoolops_set_coalesce(struct net_device *ndev,
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- wil_dbg_misc(wil, "%s(rx %d usec, tx %d usec)\n", __func__,
+ wil_dbg_misc(wil, "ethtoolops_set_coalesce: rx %d usec, tx %d usec\n",
cp->rx_coalesce_usecs, cp->tx_coalesce_usecs);
if (wil->wdev->iftype == NL80211_IFTYPE_MONITOR) {
diff --git a/drivers/net/wireless/ath/wil6210/interrupt.c b/drivers/net/wireless/ath/wil6210/interrupt.c
index 64046e0bd0a2..cab1e5c0e374 100644
--- a/drivers/net/wireless/ath/wil6210/interrupt.c
+++ b/drivers/net/wireless/ath/wil6210/interrupt.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -94,7 +94,7 @@ static void wil6210_mask_irq_rx(struct wil6210_priv *wil)
static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
{
- wil_dbg_irq(wil, "%s: mask_halp(%s)\n", __func__,
+ wil_dbg_irq(wil, "mask_irq_misc: mask_halp(%s)\n",
mask_halp ? "true" : "false");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
@@ -103,7 +103,7 @@ static void wil6210_mask_irq_misc(struct wil6210_priv *wil, bool mask_halp)
void wil6210_mask_halp(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "mask_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMS),
BIT_DMA_EP_MISC_ICR_HALP);
@@ -111,7 +111,7 @@ void wil6210_mask_halp(struct wil6210_priv *wil)
static void wil6210_mask_irq_pseudo(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "mask_irq_pseudo\n");
wil_w(wil, RGF_DMA_PSEUDO_CAUSE_MASK_SW, WIL6210_IRQ_DISABLE);
@@ -134,7 +134,7 @@ void wil6210_unmask_irq_rx(struct wil6210_priv *wil)
static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
{
- wil_dbg_irq(wil, "%s: unmask_halp(%s)\n", __func__,
+ wil_dbg_irq(wil, "unmask_irq_misc: unmask_halp(%s)\n",
unmask_halp ? "true" : "false");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
@@ -143,7 +143,7 @@ static void wil6210_unmask_irq_misc(struct wil6210_priv *wil, bool unmask_halp)
static void wil6210_unmask_halp(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "unmask_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, IMC),
BIT_DMA_EP_MISC_ICR_HALP);
@@ -151,7 +151,7 @@ static void wil6210_unmask_halp(struct wil6210_priv *wil)
static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "unmask_irq_pseudo\n");
set_bit(wil_status_irqen, wil->status);
@@ -160,7 +160,7 @@ static void wil6210_unmask_irq_pseudo(struct wil6210_priv *wil)
void wil_mask_irq(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "mask_irq\n");
wil6210_mask_irq_tx(wil);
wil6210_mask_irq_rx(wil);
@@ -170,7 +170,7 @@ void wil_mask_irq(struct wil6210_priv *wil)
void wil_unmask_irq(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "unmask_irq\n");
wil_w(wil, RGF_DMA_EP_RX_ICR + offsetof(struct RGF_ICR, ICC),
WIL_ICR_ICC_VALUE);
@@ -187,7 +187,7 @@ void wil_unmask_irq(struct wil6210_priv *wil)
void wil_configure_interrupt_moderation(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "configure_interrupt_moderation\n");
/* disable interrupt moderation for monitor
* to get better timestamp precision
@@ -400,7 +400,7 @@ static irqreturn_t wil6210_irq_misc(int irq, void *cookie)
}
if (isr & BIT_DMA_EP_MISC_ICR_HALP) {
- wil_dbg_irq(wil, "%s: HALP IRQ invoked\n", __func__);
+ wil_dbg_irq(wil, "irq_misc: HALP IRQ invoked\n");
wil6210_mask_halp(wil);
isr &= ~BIT_DMA_EP_MISC_ICR_HALP;
complete(&wil->halp.comp);
@@ -599,7 +599,7 @@ void wil6210_clear_irq(struct wil6210_priv *wil)
void wil6210_set_halp(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "set_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICS),
BIT_DMA_EP_MISC_ICR_HALP);
@@ -607,7 +607,7 @@ void wil6210_set_halp(struct wil6210_priv *wil)
void wil6210_clear_halp(struct wil6210_priv *wil)
{
- wil_dbg_irq(wil, "%s()\n", __func__);
+ wil_dbg_irq(wil, "clear_halp\n");
wil_w(wil, RGF_DMA_EP_MISC_ICR + offsetof(struct RGF_ICR, ICR),
BIT_DMA_EP_MISC_ICR_HALP);
@@ -618,7 +618,7 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
{
int rc;
- wil_dbg_misc(wil, "%s(%s)\n", __func__, use_msi ? "MSI" : "INTx");
+ wil_dbg_misc(wil, "init_irq: %s\n", use_msi ? "MSI" : "INTx");
rc = request_threaded_irq(irq, wil6210_hardirq,
wil6210_thread_irq,
@@ -629,7 +629,7 @@ int wil6210_init_irq(struct wil6210_priv *wil, int irq, bool use_msi)
void wil6210_fini_irq(struct wil6210_priv *wil, int irq)
{
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "fini_irq:\n");
wil_mask_irq(wil);
free_irq(irq, wil);
diff --git a/drivers/net/wireless/ath/wil6210/main.c b/drivers/net/wireless/ath/wil6210/main.c
index 32e217e7159e..75ae474367f9 100644
--- a/drivers/net/wireless/ath/wil6210/main.c
+++ b/drivers/net/wireless/ath/wil6210/main.c
@@ -27,23 +27,23 @@
#define WAIT_FOR_SCAN_ABORT_MS 1000
bool debug_fw; /* = false; */
-module_param(debug_fw, bool, S_IRUGO);
+module_param(debug_fw, bool, 0444);
MODULE_PARM_DESC(debug_fw, " do not perform card reset. For FW debug");
static bool oob_mode;
-module_param(oob_mode, bool, S_IRUGO);
+module_param(oob_mode, bool, 0444);
MODULE_PARM_DESC(oob_mode,
" enable out of the box (OOB) mode in FW, for diagnostics and certification");
bool no_fw_recovery;
-module_param(no_fw_recovery, bool, S_IRUGO | S_IWUSR);
+module_param(no_fw_recovery, bool, 0644);
MODULE_PARM_DESC(no_fw_recovery, " disable automatic FW error recovery");
/* if not set via modparam, will be set to default value of 1/8 of
* rx ring size during init flow
*/
unsigned short rx_ring_overflow_thrsh = WIL6210_RX_HIGH_TRSH_INIT;
-module_param(rx_ring_overflow_thrsh, ushort, S_IRUGO);
+module_param(rx_ring_overflow_thrsh, ushort, 0444);
MODULE_PARM_DESC(rx_ring_overflow_thrsh,
" RX ring overflow threshold in descriptors.");
@@ -73,7 +73,7 @@ static const struct kernel_param_ops mtu_max_ops = {
.get = param_get_uint,
};
-module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, S_IRUGO);
+module_param_cb(mtu_max, &mtu_max_ops, &mtu_max, 0444);
MODULE_PARM_DESC(mtu_max, " Max MTU value.");
static uint rx_ring_order = WIL_RX_RING_SIZE_ORDER_DEFAULT;
@@ -102,11 +102,11 @@ static const struct kernel_param_ops ring_order_ops = {
.get = param_get_uint,
};
-module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, S_IRUGO);
+module_param_cb(rx_ring_order, &ring_order_ops, &rx_ring_order, 0444);
MODULE_PARM_DESC(rx_ring_order, " Rx ring order; size = 1 << order");
-module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, S_IRUGO);
+module_param_cb(tx_ring_order, &ring_order_ops, &tx_ring_order, 0444);
MODULE_PARM_DESC(tx_ring_order, " Tx ring order; size = 1 << order");
-module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, S_IRUGO);
+module_param_cb(bcast_ring_order, &ring_order_ops, &bcast_ring_order, 0444);
MODULE_PARM_DESC(bcast_ring_order, " Bcast ring order; size = 1 << order");
#define RST_DELAY (20) /* msec, for loop in @wil_target_reset */
@@ -172,8 +172,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
struct wil_sta_info *sta = &wil->sta[cid];
might_sleep();
- wil_dbg_misc(wil, "%s(CID %d, status %d)\n", __func__, cid,
- sta->status);
+ wil_dbg_misc(wil, "disconnect_cid: CID %d, status %d\n",
+ cid, sta->status);
/* inform upper/lower layers */
if (sta->status != wil_sta_unused) {
if (!from_event) {
@@ -241,7 +241,7 @@ static void _wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
return;
might_sleep();
- wil_info(wil, "%s(bssid=%pM, reason=%d, ev%s)\n", __func__, bssid,
+ wil_info(wil, "bssid=%pM, reason=%d, ev%s\n", bssid,
reason_code, from_event ? "+" : "-");
/* Cases are:
@@ -352,7 +352,7 @@ static int wil_wait_for_recovery(struct wil6210_priv *wil)
void wil_set_recovery_state(struct wil6210_priv *wil, int state)
{
- wil_dbg_misc(wil, "%s(%d -> %d)\n", __func__,
+ wil_dbg_misc(wil, "set_recovery_state: %d -> %d\n",
wil->recovery_state, state);
wil->recovery_state = state;
@@ -494,7 +494,7 @@ int wil_priv_init(struct wil6210_priv *wil)
{
uint i;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "priv_init\n");
memset(wil->sta, 0, sizeof(wil->sta));
for (i = 0; i < WIL6210_MAX_CID; i++)
@@ -577,7 +577,7 @@ void wil6210_bus_request(struct wil6210_priv *wil, u32 kbps)
void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
u16 reason_code, bool from_event)
{
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "disconnect\n");
del_timer_sync(&wil->connect_timer);
_wil6210_disconnect(wil, bssid, reason_code, from_event);
@@ -585,7 +585,7 @@ void wil6210_disconnect(struct wil6210_priv *wil, const u8 *bssid,
void wil_priv_deinit(struct wil6210_priv *wil)
{
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "priv_deinit\n");
wil_ftm_deinit(wil);
wil_set_recovery_state(wil, fw_recovery_idle);
@@ -619,7 +619,7 @@ static inline void wil_release_cpu(struct wil6210_priv *wil)
static void wil_set_oob_mode(struct wil6210_priv *wil, bool enable)
{
- wil_info(wil, "%s: enable=%d\n", __func__, enable);
+ wil_info(wil, "enable=%d\n", enable);
if (enable)
wil_s(wil, RGF_USER_USAGE_6, BIT_USER_OOB_MODE);
else
@@ -875,7 +875,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
{
int rc;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "reset\n");
WARN_ON(!mutex_is_locked(&wil->mutex));
WARN_ON(test_bit(wil_status_napi_en, wil->status));
@@ -898,9 +898,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_PRE_RESET);
if (rc)
- wil_err(wil,
- "%s: PRE_RESET platform notify failed, rc %d\n",
- __func__, rc);
+ wil_err(wil, "PRE_RESET platform notify failed, rc %d\n",
+ rc);
}
set_bit(wil_status_resetting, wil->status);
@@ -993,8 +992,7 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
/* check FW is responsive */
rc = wmi_echo(wil);
if (rc) {
- wil_err(wil, "%s: wmi_echo failed, rc %d\n",
- __func__, rc);
+ wil_err(wil, "wmi_echo failed, rc %d\n", rc);
return rc;
}
@@ -1004,9 +1002,8 @@ int wil_reset(struct wil6210_priv *wil, bool load_fw)
rc = wil->platform_ops.notify(wil->platform_handle,
WIL_PLATFORM_EVT_FW_RDY);
if (rc) {
- wil_err(wil,
- "%s: FW_RDY notify failed, rc %d\n",
- __func__, rc);
+ wil_err(wil, "FW_RDY notify failed, rc %d\n",
+ rc);
rc = 0;
}
}
@@ -1088,7 +1085,7 @@ int wil_up(struct wil6210_priv *wil)
{
int rc;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "up\n");
mutex_lock(&wil->mutex);
rc = __wil_up(wil);
@@ -1129,7 +1126,7 @@ int wil_down(struct wil6210_priv *wil)
{
int rc;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "down\n");
wil_set_recovery_state(wil, fw_recovery_idle);
mutex_lock(&wil->mutex);
@@ -1162,25 +1159,24 @@ void wil_halp_vote(struct wil6210_priv *wil)
mutex_lock(&wil->halp.lock);
- wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil_dbg_irq(wil, "halp_vote: start, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
if (++wil->halp.ref_cnt == 1) {
wil6210_set_halp(wil);
rc = wait_for_completion_timeout(&wil->halp.comp, to_jiffies);
if (!rc) {
- wil_err(wil, "%s: HALP vote timed out\n", __func__);
+ wil_err(wil, "HALP vote timed out\n");
/* Mask HALP as done in case the interrupt is raised */
wil6210_mask_halp(wil);
} else {
wil_dbg_irq(wil,
- "%s: HALP vote completed after %d ms\n",
- __func__,
+ "halp_vote: HALP vote completed after %d ms\n",
jiffies_to_msecs(to_jiffies - rc));
}
}
- wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil_dbg_irq(wil, "halp_vote: end, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
@@ -1192,15 +1188,15 @@ void wil_halp_unvote(struct wil6210_priv *wil)
mutex_lock(&wil->halp.lock);
- wil_dbg_irq(wil, "%s: start, HALP ref_cnt (%d)\n", __func__,
+ wil_dbg_irq(wil, "halp_unvote: start, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
if (--wil->halp.ref_cnt == 0) {
wil6210_clear_halp(wil);
- wil_dbg_irq(wil, "%s: HALP unvote\n", __func__);
+ wil_dbg_irq(wil, "HALP unvote\n");
}
- wil_dbg_irq(wil, "%s: end, HALP ref_cnt (%d)\n", __func__,
+ wil_dbg_irq(wil, "halp_unvote:end, HALP ref_cnt (%d)\n",
wil->halp.ref_cnt);
mutex_unlock(&wil->halp.lock);
diff --git a/drivers/net/wireless/ath/wil6210/netdev.c b/drivers/net/wireless/ath/wil6210/netdev.c
index 05ecf3a00f57..7bc6149480f2 100644
--- a/drivers/net/wireless/ath/wil6210/netdev.c
+++ b/drivers/net/wireless/ath/wil6210/netdev.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -27,10 +27,11 @@ static int wil_open(struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "open\n");
- if (debug_fw) {
- wil_err(wil, "%s() while in debug_fw mode\n", __func__);
+ if (debug_fw ||
+ test_bit(WMI_FW_CAPABILITY_WMI_ONLY, wil->fw_capabilities)) {
+ wil_err(wil, "while in debug_fw or wmi_only mode\n");
return -EINVAL;
}
@@ -41,7 +42,7 @@ static int wil_stop(struct net_device *ndev)
{
struct wil6210_priv *wil = ndev_to_wil(ndev);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "stop\n");
return wil_down(wil);
}
@@ -153,7 +154,7 @@ void *wil_if_alloc(struct device *dev)
wil->wdev = wdev;
wil->radio_wdev = wdev;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "if_alloc\n");
rc = wil_priv_init(wil);
if (rc) {
@@ -200,7 +201,7 @@ void wil_if_free(struct wil6210_priv *wil)
{
struct net_device *ndev = wil_to_ndev(wil);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "if_free\n");
if (!ndev)
return;
@@ -255,7 +256,7 @@ void wil_if_remove(struct wil6210_priv *wil)
struct net_device *ndev = wil_to_ndev(wil);
struct wireless_dev *wdev = wil_to_wdev(wil);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "if_remove\n");
unregister_netdev(ndev);
wiphy_unregister(wdev->wiphy);
diff --git a/drivers/net/wireless/ath/wil6210/p2p.c b/drivers/net/wireless/ath/wil6210/p2p.c
index 24184cfc5da4..ccd98487ea09 100644
--- a/drivers/net/wireless/ath/wil6210/p2p.c
+++ b/drivers/net/wireless/ath/wil6210/p2p.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -69,7 +69,7 @@ void wil_p2p_discovery_timer_fn(ulong x)
{
struct wil6210_priv *wil = (void *)x;
- wil_dbg_misc(wil, "%s\n", __func__);
+ wil_dbg_misc(wil, "p2p_discovery_timer_fn\n");
schedule_work(&wil->p2p.discovery_expired_work);
}
@@ -80,27 +80,25 @@ int wil_p2p_search(struct wil6210_priv *wil,
int rc;
struct wil_p2p_info *p2p = &wil->p2p;
- wil_dbg_misc(wil, "%s: channel %d\n",
- __func__, P2P_DMG_SOCIAL_CHANNEL);
+ wil_dbg_misc(wil, "p2p_search: channel %d\n", P2P_DMG_SOCIAL_CHANNEL);
lockdep_assert_held(&wil->mutex);
if (p2p->discovery_started) {
- wil_err(wil, "%s: search failed. discovery already ongoing\n",
- __func__);
+ wil_err(wil, "search failed. discovery already ongoing\n");
rc = -EBUSY;
goto out;
}
rc = wmi_p2p_cfg(wil, P2P_DMG_SOCIAL_CHANNEL, P2P_DEFAULT_BI);
if (rc) {
- wil_err(wil, "%s: wmi_p2p_cfg failed\n", __func__);
+ wil_err(wil, "wmi_p2p_cfg failed\n");
goto out;
}
rc = wmi_set_ssid(wil, strlen(P2P_WILDCARD_SSID), P2P_WILDCARD_SSID);
if (rc) {
- wil_err(wil, "%s: wmi_set_ssid failed\n", __func__);
+ wil_err(wil, "wmi_set_ssid failed\n");
goto out_stop;
}
@@ -108,8 +106,7 @@ int wil_p2p_search(struct wil6210_priv *wil,
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_REQ,
request->ie_len, request->ie);
if (rc) {
- wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n",
- __func__);
+ wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_REQ) failed\n");
goto out_stop;
}
@@ -119,14 +116,13 @@ int wil_p2p_search(struct wil6210_priv *wil,
rc = wmi_set_ie(wil, WMI_FRAME_PROBE_RESP,
request->ie_len, request->ie);
if (rc) {
- wil_err(wil, "%s: wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n",
- __func__);
+ wil_err(wil, "wmi_set_ie(WMI_FRAME_PROBE_RESP) failed\n");
goto out_stop;
}
rc = wmi_start_search(wil);
if (rc) {
- wil_err(wil, "%s: wmi_start_search failed\n", __func__);
+ wil_err(wil, "wmi_start_search failed\n");
goto out_stop;
}
@@ -153,12 +149,12 @@ int wil_p2p_listen(struct wil6210_priv *wil, struct wireless_dev *wdev,
if (!chan)
return -EINVAL;
- wil_dbg_misc(wil, "%s: duration %d\n", __func__, duration);
+ wil_dbg_misc(wil, "p2p_listen: duration %d\n", duration);
mutex_lock(&wil->mutex);
if (p2p->discovery_started) {
- wil_err(wil, "%s: discovery already ongoing\n", __func__);
+ wil_err(wil, "discovery already ongoing\n");
rc = -EBUSY;
goto out;
}
@@ -220,8 +216,8 @@ int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie)
mutex_lock(&wil->mutex);
if (cookie != p2p->cookie) {
- wil_info(wil, "%s: Cookie mismatch: 0x%016llx vs. 0x%016llx\n",
- __func__, p2p->cookie, cookie);
+ wil_info(wil, "Cookie mismatch: 0x%016llx vs. 0x%016llx\n",
+ p2p->cookie, cookie);
mutex_unlock(&wil->mutex);
return -ENOENT;
}
@@ -231,7 +227,7 @@ int wil_p2p_cancel_listen(struct wil6210_priv *wil, u64 cookie)
mutex_unlock(&wil->mutex);
if (!started) {
- wil_err(wil, "%s: listen not started\n", __func__);
+ wil_err(wil, "listen not started\n");
return -ENOENT;
}
@@ -253,7 +249,7 @@ void wil_p2p_listen_expired(struct work_struct *work)
struct wil6210_priv, p2p);
u8 started;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "p2p_listen_expired\n");
mutex_lock(&wil->mutex);
started = wil_p2p_stop_discovery(wil);
@@ -279,7 +275,7 @@ void wil_p2p_search_expired(struct work_struct *work)
struct wil6210_priv, p2p);
u8 started;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "p2p_search_expired\n");
mutex_lock(&wil->mutex);
started = wil_p2p_stop_discovery(wil);
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index c97263b45cbd..d472e13fb9d9 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -23,7 +23,7 @@
#include <linux/rtnetlink.h>
static bool use_msi = true;
-module_param(use_msi, bool, S_IRUGO);
+module_param(use_msi, bool, 0444);
MODULE_PARM_DESC(use_msi, " Use MSI interrupt, default - true");
#ifdef CONFIG_PM
@@ -99,8 +99,10 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
*/
int msi_only = pdev->msi_enabled;
bool _use_msi = use_msi;
+ bool wmi_only = test_bit(WMI_FW_CAPABILITY_WMI_ONLY,
+ wil->fw_capabilities);
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "if_pcie_enable, wmi_only %d\n", wmi_only);
pdev->msi_enabled = 0;
@@ -123,9 +125,11 @@ static int wil_if_pcie_enable(struct wil6210_priv *wil)
if (rc)
goto stop_master;
- /* need reset here to obtain MAC */
+ /* need reset here to obtain MAC or in case of WMI-only FW, full reset
+ * and fw loading takes place
+ */
mutex_lock(&wil->mutex);
- rc = wil_reset(wil, false);
+ rc = wil_reset(wil, wmi_only);
mutex_unlock(&wil->mutex);
if (rc)
goto release_irq;
@@ -145,7 +149,7 @@ static int wil_if_pcie_disable(struct wil6210_priv *wil)
{
struct pci_dev *pdev = wil->pdev;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "if_pcie_disable\n");
pci_clear_master(pdev);
/* disable and release IRQ */
@@ -208,21 +212,6 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
return rc;
}
- /* device supports 48 bit addresses */
- rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
- if (rc) {
- dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
- rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
- if (rc) {
- dev_err(dev,
- "dma_set_mask_and_coherent(32) failed: %d\n",
- rc);
- goto if_free;
- }
- } else {
- wil->use_extended_dma_addr = 1;
- }
-
wil->pdev = pdev;
pci_set_drvdata(pdev, wil);
/* rollback to if_free */
@@ -236,6 +225,21 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
}
/* rollback to err_plat */
+ /* device supports 48bit addresses */
+ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
+ if (rc) {
+ dev_err(dev, "dma_set_mask_and_coherent(48) failed: %d\n", rc);
+ rc = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32));
+ if (rc) {
+ dev_err(dev,
+ "dma_set_mask_and_coherent(32) failed: %d\n",
+ rc);
+ goto err_plat;
+ }
+ } else {
+ wil->use_extended_dma_addr = 1;
+ }
+
rc = pci_enable_device(pdev);
if (rc) {
wil_err(wil,
@@ -299,7 +303,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
#endif /* CONFIG_PM */
wil6210_debugfs_init(wil);
-
+ wil6210_sysfs_init(wil);
return 0;
@@ -325,7 +329,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
struct wil6210_priv *wil = pci_get_drvdata(pdev);
void __iomem *csr = wil->csr;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "pcie_remove\n");
#ifdef CONFIG_PM
#ifdef CONFIG_PM_SLEEP
@@ -333,6 +337,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM */
+ wil6210_sysfs_remove(wil);
wil6210_debugfs_remove(wil);
rtnl_lock();
wil_p2p_wdev_free(wil);
@@ -363,8 +368,7 @@ static int wil6210_suspend(struct device *dev, bool is_runtime)
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
- wil_dbg_pm(wil, "%s(%s)\n", __func__,
- is_runtime ? "runtime" : "system");
+ wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
rc = wil_can_suspend(wil, is_runtime);
if (rc)
@@ -390,8 +394,7 @@ static int wil6210_resume(struct device *dev, bool is_runtime)
struct pci_dev *pdev = to_pci_dev(dev);
struct wil6210_priv *wil = pci_get_drvdata(pdev);
- wil_dbg_pm(wil, "%s(%s)\n", __func__,
- is_runtime ? "runtime" : "system");
+ wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
/* allow master */
pci_set_master(pdev);
@@ -411,7 +414,7 @@ static int wil6210_pm_notify(struct notifier_block *notify_block,
int rc = 0;
enum wil_platform_event evt;
- wil_dbg_pm(wil, "%s: mode (%ld)\n", __func__, mode);
+ wil_dbg_pm(wil, "pm_notify: mode (%ld)\n", mode);
switch (mode) {
case PM_HIBERNATION_PREPARE:
diff --git a/drivers/net/wireless/ath/wil6210/pm.c b/drivers/net/wireless/ath/wil6210/pm.c
index 11ee24d509e5..a0acb2d0cb79 100644
--- a/drivers/net/wireless/ath/wil6210/pm.c
+++ b/drivers/net/wireless/ath/wil6210/pm.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014,2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -21,8 +21,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
int rc = 0;
struct wireless_dev *wdev = wil->wdev;
- wil_dbg_pm(wil, "%s(%s)\n", __func__,
- is_runtime ? "runtime" : "system");
+ wil_dbg_pm(wil, "can_suspend: %s\n", is_runtime ? "runtime" : "system");
if (!netif_running(wil_to_ndev(wil))) {
/* can always sleep when down */
@@ -59,7 +58,7 @@ int wil_can_suspend(struct wil6210_priv *wil, bool is_runtime)
}
out:
- wil_dbg_pm(wil, "%s(%s) => %s (%d)\n", __func__,
+ wil_dbg_pm(wil, "can_suspend: %s => %s (%d)\n",
is_runtime ? "runtime" : "system", rc ? "No" : "Yes", rc);
return rc;
@@ -70,8 +69,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
- wil_dbg_pm(wil, "%s(%s)\n", __func__,
- is_runtime ? "runtime" : "system");
+ wil_dbg_pm(wil, "suspend: %s\n", is_runtime ? "runtime" : "system");
/* if netif up, hardware is alive, shut it down */
if (ndev->flags & IFF_UP) {
@@ -86,7 +84,7 @@ int wil_suspend(struct wil6210_priv *wil, bool is_runtime)
rc = wil->platform_ops.suspend(wil->platform_handle);
out:
- wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+ wil_dbg_pm(wil, "suspend: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
return rc;
}
@@ -96,8 +94,7 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
int rc = 0;
struct net_device *ndev = wil_to_ndev(wil);
- wil_dbg_pm(wil, "%s(%s)\n", __func__,
- is_runtime ? "runtime" : "system");
+ wil_dbg_pm(wil, "resume: %s\n", is_runtime ? "runtime" : "system");
if (wil->platform_ops.resume) {
rc = wil->platform_ops.resume(wil->platform_handle);
@@ -115,7 +112,7 @@ int wil_resume(struct wil6210_priv *wil, bool is_runtime)
rc = wil_up(wil);
out:
- wil_dbg_pm(wil, "%s(%s) => %d\n", __func__,
+ wil_dbg_pm(wil, "resume: %s => %d\n",
is_runtime ? "runtime" : "system", rc);
return rc;
}
diff --git a/drivers/net/wireless/ath/wil6210/pmc.c b/drivers/net/wireless/ath/wil6210/pmc.c
index b6765d4a4602..b067fdf086d4 100644
--- a/drivers/net/wireless/ath/wil6210/pmc.c
+++ b/drivers/net/wireless/ath/wil6210/pmc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -60,7 +60,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
if (wil_is_pmc_allocated(pmc)) {
/* sanity check */
- wil_err(wil, "%s: ERROR pmc is already allocated\n", __func__);
+ wil_err(wil, "ERROR pmc is already allocated\n");
goto no_release_err;
}
if ((num_descriptors <= 0) || (descriptor_size <= 0)) {
@@ -90,21 +90,20 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
pmc->num_descriptors = num_descriptors;
pmc->descriptor_size = descriptor_size;
- wil_dbg_misc(wil, "%s: %d descriptors x %d bytes each\n",
- __func__, num_descriptors, descriptor_size);
+ wil_dbg_misc(wil, "pmc_alloc: %d descriptors x %d bytes each\n",
+ num_descriptors, descriptor_size);
/* allocate descriptors info list in pmc context*/
pmc->descriptors = kcalloc(num_descriptors,
sizeof(struct desc_alloc_info),
GFP_KERNEL);
if (!pmc->descriptors) {
- wil_err(wil, "%s: ERROR allocating pmc skb list\n", __func__);
+ wil_err(wil, "ERROR allocating pmc skb list\n");
goto no_release_err;
}
- wil_dbg_misc(wil,
- "%s: allocated descriptors info list %p\n",
- __func__, pmc->descriptors);
+ wil_dbg_misc(wil, "pmc_alloc: allocated descriptors info list %p\n",
+ pmc->descriptors);
/* Allocate pring buffer and descriptors.
* vring->va should be aligned on its size rounded up to power of 2
@@ -131,15 +130,14 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
dma_set_mask_and_coherent(dev, DMA_BIT_MASK(48));
wil_dbg_misc(wil,
- "%s: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
- __func__,
+ "pmc_alloc: allocated pring %p => %pad. %zd x %d = total %zd bytes\n",
pmc->pring_va, &pmc->pring_pa,
sizeof(struct vring_tx_desc),
num_descriptors,
sizeof(struct vring_tx_desc) * num_descriptors);
if (!pmc->pring_va) {
- wil_err(wil, "%s: ERROR allocating pmc pring\n", __func__);
+ wil_err(wil, "ERROR allocating pmc pring\n");
goto release_pmc_skb_list;
}
@@ -158,9 +156,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
GFP_KERNEL);
if (unlikely(!pmc->descriptors[i].va)) {
- wil_err(wil,
- "%s: ERROR allocating pmc descriptor %d",
- __func__, i);
+ wil_err(wil, "ERROR allocating pmc descriptor %d", i);
goto release_pmc_skbs;
}
@@ -180,21 +176,21 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
*_d = *d;
}
- wil_dbg_misc(wil, "%s: allocated successfully\n", __func__);
+ wil_dbg_misc(wil, "pmc_alloc: allocated successfully\n");
pmc_cmd.op = WMI_PMC_ALLOCATE;
pmc_cmd.ring_size = cpu_to_le16(pmc->num_descriptors);
pmc_cmd.mem_base = cpu_to_le64(pmc->pring_pa);
- wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with ALLOCATE op\n", __func__);
+ wil_dbg_misc(wil, "pmc_alloc: send WMI_PMC_CMD with ALLOCATE op\n");
pmc->last_cmd_status = wmi_send(wil,
WMI_PMC_CMDID,
&pmc_cmd,
sizeof(pmc_cmd));
if (pmc->last_cmd_status) {
wil_err(wil,
- "%s: WMI_PMC_CMD with ALLOCATE op failed with status %d",
- __func__, pmc->last_cmd_status);
+ "WMI_PMC_CMD with ALLOCATE op failed with status %d",
+ pmc->last_cmd_status);
goto release_pmc_skbs;
}
@@ -203,7 +199,7 @@ void wil_pmc_alloc(struct wil6210_priv *wil,
return;
release_pmc_skbs:
- wil_err(wil, "%s: exit on error: Releasing skbs...\n", __func__);
+ wil_err(wil, "exit on error: Releasing skbs...\n");
for (i = 0; pmc->descriptors[i].va && i < num_descriptors; i++) {
dma_free_coherent(dev,
descriptor_size,
@@ -212,7 +208,7 @@ release_pmc_skbs:
pmc->descriptors[i].va = NULL;
}
- wil_err(wil, "%s: exit on error: Releasing pring...\n", __func__);
+ wil_err(wil, "exit on error: Releasing pring...\n");
dma_free_coherent(dev,
sizeof(struct vring_tx_desc) * num_descriptors,
@@ -222,8 +218,7 @@ release_pmc_skbs:
pmc->pring_va = NULL;
release_pmc_skb_list:
- wil_err(wil, "%s: exit on error: Releasing descriptors info list...\n",
- __func__);
+ wil_err(wil, "exit on error: Releasing descriptors info list...\n");
kfree(pmc->descriptors);
pmc->descriptors = NULL;
@@ -247,24 +242,23 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
pmc->last_cmd_status = 0;
if (!wil_is_pmc_allocated(pmc)) {
- wil_dbg_misc(wil, "%s: Error, can't free - not allocated\n",
- __func__);
+ wil_dbg_misc(wil,
+ "pmc_free: Error, can't free - not allocated\n");
pmc->last_cmd_status = -EPERM;
mutex_unlock(&pmc->lock);
return;
}
if (send_pmc_cmd) {
- wil_dbg_misc(wil, "%s: send WMI_PMC_CMD with RELEASE op\n",
- __func__);
+ wil_dbg_misc(wil, "send WMI_PMC_CMD with RELEASE op\n");
pmc_cmd.op = WMI_PMC_RELEASE;
pmc->last_cmd_status =
wmi_send(wil, WMI_PMC_CMDID, &pmc_cmd,
sizeof(pmc_cmd));
if (pmc->last_cmd_status) {
wil_err(wil,
- "%s WMI_PMC_CMD with RELEASE op failed, status %d",
- __func__, pmc->last_cmd_status);
+ "WMI_PMC_CMD with RELEASE op failed, status %d",
+ pmc->last_cmd_status);
/* There's nothing we can do with this error.
* Normally, it should never occur.
* Continue to freeing all memory allocated for pmc.
@@ -276,8 +270,8 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
size_t buf_size = sizeof(struct vring_tx_desc) *
pmc->num_descriptors;
- wil_dbg_misc(wil, "%s: free pring va %p\n",
- __func__, pmc->pring_va);
+ wil_dbg_misc(wil, "pmc_free: free pring va %p\n",
+ pmc->pring_va);
dma_free_coherent(dev, buf_size, pmc->pring_va, pmc->pring_pa);
pmc->pring_va = NULL;
@@ -296,11 +290,11 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
pmc->descriptors[i].pa);
pmc->descriptors[i].va = NULL;
}
- wil_dbg_misc(wil, "%s: free descriptor info %d/%d\n",
- __func__, i, pmc->num_descriptors);
+ wil_dbg_misc(wil, "pmc_free: free descriptor info %d/%d\n", i,
+ pmc->num_descriptors);
wil_dbg_misc(wil,
- "%s: free pmc descriptors info list %p\n",
- __func__, pmc->descriptors);
+ "pmc_free: free pmc descriptors info list %p\n",
+ pmc->descriptors);
kfree(pmc->descriptors);
pmc->descriptors = NULL;
} else {
@@ -316,7 +310,7 @@ void wil_pmc_free(struct wil6210_priv *wil, int send_pmc_cmd)
*/
int wil_pmc_last_cmd_status(struct wil6210_priv *wil)
{
- wil_dbg_misc(wil, "%s: status %d\n", __func__,
+ wil_dbg_misc(wil, "pmc_last_cmd_status: status %d\n",
wil->pmc.last_cmd_status);
return wil->pmc.last_cmd_status;
@@ -339,7 +333,7 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
mutex_lock(&pmc->lock);
if (!wil_is_pmc_allocated(pmc)) {
- wil_err(wil, "%s: error, pmc is not allocated!\n", __func__);
+ wil_err(wil, "error, pmc is not allocated!\n");
pmc->last_cmd_status = -EPERM;
mutex_unlock(&pmc->lock);
return -EPERM;
@@ -348,8 +342,8 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
pmc_size = pmc->descriptor_size * pmc->num_descriptors;
wil_dbg_misc(wil,
- "%s: size %u, pos %lld\n",
- __func__, (unsigned)count, *f_pos);
+ "pmc_read: size %u, pos %lld\n",
+ (u32)count, *f_pos);
pmc->last_cmd_status = 0;
@@ -358,15 +352,16 @@ ssize_t wil_pmc_read(struct file *filp, char __user *buf, size_t count,
offset = *f_pos - (idx * pmc->descriptor_size);
if (*f_pos >= pmc_size) {
- wil_dbg_misc(wil, "%s: reached end of pmc buf: %lld >= %u\n",
- __func__, *f_pos, (unsigned)pmc_size);
+ wil_dbg_misc(wil,
+ "pmc_read: reached end of pmc buf: %lld >= %u\n",
+ *f_pos, (u32)pmc_size);
pmc->last_cmd_status = -ERANGE;
goto out;
}
wil_dbg_misc(wil,
- "%s: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
- __func__, *f_pos, idx, offset, count);
+ "pmc_read: read from pos %lld (descriptor %llu, offset %llu) %zu bytes\n",
+ *f_pos, idx, offset, count);
/* if no errors, return the copied byte count */
retval = simple_read_from_buffer(buf,
diff --git a/drivers/net/wireless/ath/wil6210/rx_reorder.c b/drivers/net/wireless/ath/wil6210/rx_reorder.c
index 19ed127d4d05..7404b6f39c6a 100644
--- a/drivers/net/wireless/ath/wil6210/rx_reorder.c
+++ b/drivers/net/wireless/ath/wil6210/rx_reorder.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2014-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -349,8 +349,8 @@ __acquires(&sta->tid_rx_lock) __releases(&sta->tid_rx_lock)
rc = wmi_addba_rx_resp(wil, cid, tid, dialog_token, status,
agg_amsdu, agg_wsize, agg_timeout);
if (rc || (status != WLAN_STATUS_SUCCESS)) {
- wil_err(wil, "%s: do not apply ba, rc(%d), status(%d)\n",
- __func__, rc, status);
+ wil_err(wil, "do not apply ba, rc(%d), status(%d)\n", rc,
+ status);
goto out;
}
@@ -387,7 +387,7 @@ int wil_addba_tx_request(struct wil6210_priv *wil, u8 ringid, u16 wsize)
txdata->addba_in_progress = true;
rc = wmi_addba(wil, ringid, agg_wsize, agg_timeout);
if (rc) {
- wil_err(wil, "%s: wmi_addba failed, rc (%d)", __func__, rc);
+ wil_err(wil, "wmi_addba failed, rc (%d)", rc);
txdata->addba_in_progress = false;
}
diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c
new file mode 100644
index 000000000000..0faa26ca41c9
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/sysfs.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+
+static ssize_t
+wil_ftm_txrx_offset_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_tof_get_tx_rx_offset_event evt;
+ } __packed reply;
+ int rc;
+ ssize_t len;
+
+ if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ memset(&reply, 0, sizeof(reply));
+ rc = wmi_call(wil, WMI_TOF_GET_TX_RX_OFFSET_CMDID, NULL, 0,
+ WMI_TOF_GET_TX_RX_OFFSET_EVENTID,
+ &reply, sizeof(reply), 100);
+ if (rc < 0)
+ return rc;
+ if (reply.evt.status) {
+ wil_err(wil, "get_tof_tx_rx_offset failed, error %d\n",
+ reply.evt.status);
+ return -EIO;
+ }
+ len = snprintf(buf, PAGE_SIZE, "%u %u\n",
+ le32_to_cpu(reply.evt.tx_offset),
+ le32_to_cpu(reply.evt.rx_offset));
+ return len;
+}
+
+static ssize_t
+wil_ftm_txrx_offset_sysfs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
+ struct wmi_tof_set_tx_rx_offset_cmd cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_tof_set_tx_rx_offset_event evt;
+ } __packed reply;
+ unsigned int tx_offset, rx_offset;
+ int rc;
+
+ if (sscanf(buf, "%u %u", &tx_offset, &rx_offset) != 2)
+ return -EINVAL;
+
+ if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.tx_offset = cpu_to_le32(tx_offset);
+ cmd.rx_offset = cpu_to_le32(rx_offset);
+ memset(&reply, 0, sizeof(reply));
+ rc = wmi_call(wil, WMI_TOF_SET_TX_RX_OFFSET_CMDID, &cmd, sizeof(cmd),
+ WMI_TOF_SET_TX_RX_OFFSET_EVENTID,
+ &reply, sizeof(reply), 100);
+ if (rc < 0)
+ return rc;
+ if (reply.evt.status) {
+ wil_err(wil, "set_tof_tx_rx_offset failed, error %d\n",
+ reply.evt.status);
+ return -EIO;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(ftm_txrx_offset, 0644,
+ wil_ftm_txrx_offset_sysfs_show,
+ wil_ftm_txrx_offset_sysfs_store);
+
+static struct attribute *wil6210_sysfs_entries[] = {
+ &dev_attr_ftm_txrx_offset.attr,
+ NULL
+};
+
+static struct attribute_group wil6210_attribute_group = {
+ .name = "wil6210",
+ .attrs = wil6210_sysfs_entries,
+};
+
+int wil6210_sysfs_init(struct wil6210_priv *wil)
+{
+ struct device *dev = wil_to_dev(wil);
+ int err;
+
+ err = sysfs_create_group(&dev->kobj, &wil6210_attribute_group);
+ if (err) {
+ wil_err(wil, "failed to create sysfs group: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+void wil6210_sysfs_remove(struct wil6210_priv *wil)
+{
+ struct device *dev = wil_to_dev(wil);
+
+ sysfs_remove_group(&dev->kobj, &wil6210_attribute_group);
+}
diff --git a/drivers/net/wireless/ath/wil6210/txrx.c b/drivers/net/wireless/ath/wil6210/txrx.c
index b0166d224508..8b5411e4dc34 100644
--- a/drivers/net/wireless/ath/wil6210/txrx.c
+++ b/drivers/net/wireless/ath/wil6210/txrx.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016 Qualcomm Atheros, Inc.
+ * Copyright (c) 2012-2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -29,12 +29,12 @@
#include "trace.h"
static bool rtap_include_phy_info;
-module_param(rtap_include_phy_info, bool, S_IRUGO);
+module_param(rtap_include_phy_info, bool, 0444);
MODULE_PARM_DESC(rtap_include_phy_info,
" Include PHY info in the radiotap header, default - no");
bool rx_align_2;
-module_param(rx_align_2, bool, S_IRUGO);
+module_param(rx_align_2, bool, 0444);
MODULE_PARM_DESC(rx_align_2, " align Rx buffers on 4*n+2, default - no");
static inline uint wil_rx_snaplen(void)
@@ -112,7 +112,7 @@ static int wil_vring_alloc(struct wil6210_priv *wil, struct vring *vring)
size_t sz = vring->size * sizeof(vring->va[0]);
uint i;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "vring_alloc:\n");
BUILD_BUG_ON(sizeof(vring->va[0]) != 32);
@@ -762,7 +762,7 @@ void wil_rx_handle(struct wil6210_priv *wil, int *quota)
wil_err(wil, "Rx IRQ while Rx not yet initialized\n");
return;
}
- wil_dbg_txrx(wil, "%s()\n", __func__);
+ wil_dbg_txrx(wil, "rx_handle\n");
while ((*quota > 0) && (NULL != (skb = wil_vring_reap_rx(wil, v)))) {
(*quota)--;
@@ -785,7 +785,7 @@ int wil_rx_init(struct wil6210_priv *wil, u16 size)
struct vring *vring = &wil->vring_rx;
int rc;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "rx_init\n");
if (vring->va) {
wil_err(wil, "Rx ring already allocated\n");
@@ -816,7 +816,7 @@ void wil_rx_fini(struct wil6210_priv *wil)
{
struct vring *vring = &wil->vring_rx;
- wil_dbg_misc(wil, "%s()\n", __func__);
+ wil_dbg_misc(wil, "rx_fini\n");
if (vring->va)
wil_vring_free(wil, vring, 0);
@@ -868,7 +868,7 @@ int wil_vring_init_tx(struct wil6210_priv *wil, int id, int size,
struct vring *vring = &wil->vring_tx[id];
struct vring_tx_data *txdata = &wil->vring_tx_data[id];
- wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
+ wil_dbg_misc(wil, "vring_init_tx: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
lockdep_assert_held(&wil->mutex);
@@ -948,7 +948,7 @@ int wil_vring_init_bcast(struct wil6210_priv *wil, int id, int size)
struct vring *vring = &wil->vring_tx[id];
struct vring_tx_data *txdata = &wil->vring_tx_data[id];
- wil_dbg_misc(wil, "%s() max_mpdu_size %d\n", __func__,
+ wil_dbg_misc(wil, "vring_init_bcast: max_mpdu_size %d\n",
cmd.vring_cfg.tx_sw_ring.max_mpdu_size);
lockdep_assert_held(&wil->mutex);
@@ -1010,7 +1010,7 @@ void wil_vring_fini_tx(struct wil6210_priv *wil, int id)
if (!vring->va)
return;
- wil_dbg_misc(wil, "%s() id=%d\n", __func__, id);
+ wil_dbg_misc(wil, "vring_fini_tx: id=%d\n", id);
spin_lock_bh(&txdata->lock);
txdata->dot1x_open = false;
@@ -1049,12 +1049,14 @@ static struct vring *wil_find_tx_ucast(struct wil6210_priv *wil,
struct vring *v = &wil->vring_tx[i];
struct vring_tx_data *txdata = &wil->vring_tx_data[i];
- wil_dbg_txrx(wil, "%s(%pM) -> [%d]\n",
- __func__, eth->h_dest, i);
+ wil_dbg_txrx(wil, "find_tx_ucast: (%pM) -> [%d]\n",
+ eth->h_dest, i);
if (v->va && txdata->enabled) {
return v;
} else {
- wil_dbg_txrx(wil, "vring[%d] not valid\n", i);
+ wil_dbg_txrx(wil,
+ "find_tx_ucast: vring[%d] not valid\n",
+ i);
return NULL;
}
}
@@ -1210,17 +1212,6 @@ found:
return v;
}
-static struct vring *wil_find_tx_bcast(struct wil6210_priv *wil,
- struct sk_buff *skb)
-{
- struct wireless_dev *wdev = wil->wdev;
-
- if (wdev->iftype != NL80211_IFTYPE_AP)
- return wil_find_tx_bcast_2(wil, skb);
-
- return wil_find_tx_bcast_1(wil, skb);
-}
-
static int wil_tx_desc_map(struct vring_tx_desc *d, dma_addr_t pa, u32 len,
int vring_index)
{
@@ -1390,8 +1381,8 @@ static int __wil_tx_vring_tso(struct wil6210_priv *wil, struct vring *vring,
int gso_type;
int rc = -EINVAL;
- wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
- __func__, skb->len, vring_index);
+ wil_dbg_txrx(wil, "tx_vring_tso: %d bytes to vring %d\n", skb->len,
+ vring_index);
if (unlikely(!txdata->enabled))
return -EINVAL;
@@ -1660,8 +1651,8 @@ static int __wil_tx_vring(struct wil6210_priv *wil, struct vring *vring,
bool mcast = (vring_index == wil->bcast_vring);
uint len = skb_headlen(skb);
- wil_dbg_txrx(wil, "%s() %d bytes to vring %d\n",
- __func__, skb->len, vring_index);
+ wil_dbg_txrx(wil, "tx_vring: %d bytes to vring %d\n", skb->len,
+ vring_index);
if (unlikely(!txdata->enabled))
return -EINVAL;
@@ -1901,7 +1892,7 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
static bool pr_once_fw;
int rc;
- wil_dbg_txrx(wil, "%s()\n", __func__);
+ wil_dbg_txrx(wil, "start_xmit\n");
if (unlikely(!test_bit(wil_status_fwready, wil->status))) {
if (!pr_once_fw) {
wil_err(wil, "FW not ready\n");
@@ -1920,12 +1911,26 @@ netdev_tx_t wil_start_xmit(struct sk_buff *skb, struct net_device *ndev)
pr_once_fw = false;
/* find vring */
- if (wil->wdev->iftype == NL80211_IFTYPE_STATION) {
- /* in STA mode (ESS), all to same VRING */
+ if (wil->wdev->iftype == NL80211_IFTYPE_STATION && !wil->pbss) {
+ /* in STA mode (ESS), all to same VRING (to AP) */
vring = wil_find_tx_vring_sta(wil, skb);
- } else { /* direct communication, find matching VRING */
- vring = bcast ? wil_find_tx_bcast(wil, skb) :
- wil_find_tx_ucast(wil, skb);
+ } else if (bcast) {
+ if (wil->pbss)
+ /* in pbss, no bcast VRING - duplicate skb in
+ * all stations VRINGs
+ */
+ vring = wil_find_tx_bcast_2(wil, skb);
+ else if (wil->wdev->iftype == NL80211_IFTYPE_AP)
+ /* AP has a dedicated bcast VRING */
+ vring = wil_find_tx_bcast_1(wil, skb);
+ else
+ /* unexpected combination, fallback to duplicating
+ * the skb in all stations VRINGs
+ */
+ vring = wil_find_tx_bcast_2(wil, skb);
+ } else {
+ /* unicast, find specific VRING by dest. address */
+ vring = wil_find_tx_ucast(wil, skb);
}
if (unlikely(!vring)) {
wil_dbg_txrx(wil, "No Tx VRING found for %pM\n", eth->h_dest);
@@ -1999,7 +2004,7 @@ int wil_tx_complete(struct wil6210_priv *wil, int ringid)
return 0;
}
- wil_dbg_txrx(wil, "%s(%d)\n", __func__, ringid);
+ wil_dbg_txrx(wil, "tx_complete: (%d)\n", ringid);
used_before_complete = wil_vring_used_tx(vring);
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 19de0ebb48f7..fcfbcf7fbd7d 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -528,6 +528,7 @@ struct wil_sta_info {
unsigned long tid_rx_stop_requested[BITS_TO_LONGS(WIL_STA_TID_NUM)];
struct wil_tid_crypto_rx tid_crypto_rx[WIL_STA_TID_NUM];
struct wil_tid_crypto_rx group_crypto_rx;
+ u8 aid; /* 1-254; 0 if unknown/not reported */
};
enum {
@@ -672,6 +673,7 @@ struct wil6210_priv {
struct dentry *debug;
struct wil_blob_wrapper blobs[ARRAY_SIZE(fw_mapping)];
u8 discovery_mode;
+ u8 abft_len;
void *platform_handle;
struct wil_platform_ops platform_ops;
@@ -891,6 +893,8 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
int wil6210_debugfs_init(struct wil6210_priv *wil);
void wil6210_debugfs_remove(struct wil6210_priv *wil);
+int wil6210_sysfs_init(struct wil6210_priv *wil);
+void wil6210_sysfs_remove(struct wil6210_priv *wil);
int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
struct station_info *sinfo);
diff --git a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
index d051eea47a54..e53cf0cf7031 100644
--- a/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
+++ b/drivers/net/wireless/ath/wil6210/wil_crash_dump.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015 Qualcomm Atheros, Inc.
+ * Copyright (c) 2015,2017 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
@@ -62,13 +62,13 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
u32 host_min, dump_size, offset, len;
if (wil_fw_get_crash_dump_bounds(wil, &dump_size, &host_min)) {
- wil_err(wil, "%s: fail to obtain crash dump size\n", __func__);
+ wil_err(wil, "fail to obtain crash dump size\n");
return -EINVAL;
}
if (dump_size > size) {
- wil_err(wil, "%s: not enough space for dump. Need %d have %d\n",
- __func__, dump_size, size);
+ wil_err(wil, "not enough space for dump. Need %d have %d\n",
+ dump_size, size);
return -EINVAL;
}
@@ -83,8 +83,9 @@ int wil_fw_copy_crash_dump(struct wil6210_priv *wil, void *dest, u32 size)
len = map->to - map->from;
offset = map->host - host_min;
- wil_dbg_misc(wil, "%s() - dump %s, size %d, offset %d\n",
- __func__, fw_mapping[i].name, len, offset);
+ wil_dbg_misc(wil,
+ "fw_copy_crash_dump: - dump %s, size %d, offset %d\n",
+ fw_mapping[i].name, len, offset);
wil_memcpy_fromio_32((void * __force)(dest + offset),
(const void __iomem * __force)data, len);
@@ -99,7 +100,7 @@ void wil_fw_core_dump(struct wil6210_priv *wil)
u32 fw_dump_size;
if (wil_fw_get_crash_dump_bounds(wil, &fw_dump_size, NULL)) {
- wil_err(wil, "%s: fail to get fw dump size\n", __func__);
+ wil_err(wil, "fail to get fw dump size\n");
return;
}
@@ -115,6 +116,5 @@ void wil_fw_core_dump(struct wil6210_priv *wil)
* after 5 min
*/
dev_coredumpv(wil_to_dev(wil), fw_dump_data, fw_dump_size, GFP_KERNEL);
- wil_info(wil, "%s: fw core dumped, size %d bytes\n", __func__,
- fw_dump_size);
+ wil_info(wil, "fw core dumped, size %d bytes\n", fw_dump_size);
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.c b/drivers/net/wireless/ath/wil6210/wmi.c
index ef6472fb1f7e..bccd27dcb42b 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.c
+++ b/drivers/net/wireless/ath/wil6210/wmi.c
@@ -25,16 +25,16 @@
#include "ftm.h"
static uint max_assoc_sta = WIL6210_MAX_CID;
-module_param(max_assoc_sta, uint, S_IRUGO | S_IWUSR);
+module_param(max_assoc_sta, uint, 0644);
MODULE_PARM_DESC(max_assoc_sta, " Max number of stations associated to the AP");
int agg_wsize; /* = 0; */
-module_param(agg_wsize, int, S_IRUGO | S_IWUSR);
+module_param(agg_wsize, int, 0644);
MODULE_PARM_DESC(agg_wsize, " Window size for Tx Block Ack after connect;"
" 0 - use default; < 0 - don't auto-establish");
u8 led_id = WIL_LED_INVALID_ID;
-module_param(led_id, byte, S_IRUGO);
+module_param(led_id, byte, 0444);
MODULE_PARM_DESC(led_id,
" 60G device led enablement. Set the led ID (0-2) to enable");
@@ -495,8 +495,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
}
ch = evt->channel + 1;
- wil_info(wil, "Connect %pM channel [%d] cid %d\n",
- evt->bssid, ch, evt->cid);
+ wil_info(wil, "Connect %pM channel [%d] cid %d aid %d\n",
+ evt->bssid, ch, evt->cid, evt->aid);
wil_hex_dump_wmi("connect AI : ", DUMP_PREFIX_OFFSET, 16, 1,
evt->assoc_info, len - sizeof(*evt), true);
@@ -539,8 +539,8 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
} else if ((wdev->iftype == NL80211_IFTYPE_AP) ||
(wdev->iftype == NL80211_IFTYPE_P2P_GO)) {
if (wil->sta[evt->cid].status != wil_sta_unused) {
- wil_err(wil, "%s: AP: Invalid status %d for CID %d\n",
- __func__, wil->sta[evt->cid].status, evt->cid);
+ wil_err(wil, "AP: Invalid status %d for CID %d\n",
+ wil->sta[evt->cid].status, evt->cid);
mutex_unlock(&wil->mutex);
return;
}
@@ -553,13 +553,12 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
rc = wil_tx_init(wil, evt->cid);
if (rc) {
- wil_err(wil, "%s: config tx vring failed for CID %d, rc (%d)\n",
- __func__, evt->cid, rc);
+ wil_err(wil, "config tx vring failed for CID %d, rc (%d)\n",
+ evt->cid, rc);
wmi_disconnect_sta(wil, wil->sta[evt->cid].addr,
WLAN_REASON_UNSPECIFIED, false, false);
} else {
- wil_info(wil, "%s: successful connection to CID %d\n",
- __func__, evt->cid);
+ wil_info(wil, "successful connection to CID %d\n", evt->cid);
}
if ((wdev->iftype == NL80211_IFTYPE_STATION) ||
@@ -567,9 +566,7 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
if (rc) {
netif_carrier_off(ndev);
wil6210_bus_request(wil, WIL_DEFAULT_BUS_REQUEST_KBPS);
- wil_err(wil,
- "%s: cfg80211_connect_result with failure\n",
- __func__);
+ wil_err(wil, "cfg80211_connect_result with failure\n");
cfg80211_connect_result(ndev, evt->bssid, NULL, 0,
NULL, 0,
WLAN_STATUS_UNSPECIFIED_FAILURE,
@@ -602,12 +599,13 @@ static void wmi_evt_connect(struct wil6210_priv *wil, int id, void *d, int len)
cfg80211_new_sta(ndev, evt->bssid, &sinfo, GFP_KERNEL);
} else {
- wil_err(wil, "%s: unhandled iftype %d for CID %d\n",
- __func__, wdev->iftype, evt->cid);
+ wil_err(wil, "unhandled iftype %d for CID %d\n", wdev->iftype,
+ evt->cid);
goto out;
}
wil->sta[evt->cid].status = wil_sta_connected;
+ wil->sta[evt->cid].aid = evt->aid;
set_bit(wil_status_fwconnected, wil->status);
wil_update_net_queues_bh(wil, NULL, false);
@@ -961,8 +959,8 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
offsetof(struct wil6210_mbox_ctl, rx.tail), r->tail);
if (immed_reply) {
- wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n",
- __func__, wil->reply_id);
+ wil_dbg_wmi(wil, "recv_cmd: Complete WMI 0x%04x\n",
+ wil->reply_id);
kfree(evt);
num_immed_reply++;
complete(&wil->wmi_call);
@@ -976,7 +974,7 @@ void wmi_recv_cmd(struct wil6210_priv *wil)
}
}
/* normally, 1 event per IRQ should be processed */
- wil_dbg_wmi(wil, "%s -> %d events queued, %d completed\n", __func__,
+ wil_dbg_wmi(wil, "recv_cmd: -> %d events queued, %d completed\n",
n - num_immed_reply, num_immed_reply);
}
@@ -1113,6 +1111,7 @@ int wmi_pcp_start(struct wil6210_priv *wil, int bi, u8 wmi_nettype,
.hidden_ssid = hidden_ssid,
.is_go = is_go,
.disable_ap_sme = disable_ap_sme,
+ .abft_len = wil->abft_len,
};
struct {
struct wmi_cmd_hdr wmi;
@@ -1403,7 +1402,7 @@ int wmi_rxon(struct wil6210_priv *wil, bool on)
struct wmi_listen_started_event evt;
} __packed reply;
- wil_info(wil, "%s(%s)\n", __func__, on ? "on" : "off");
+ wil_info(wil, "(%s)\n", on ? "on" : "off");
if (on) {
rc = wmi_call(wil, WMI_START_LISTEN_CMDID, NULL, 0,
@@ -1523,7 +1522,7 @@ int wmi_disconnect_sta(struct wil6210_priv *wil, const u8 *mac,
struct wmi_disconnect_event evt;
} __packed reply;
- wil_dbg_wmi(wil, "%s(%pM, reason %d)\n", __func__, mac, reason);
+ wil_dbg_wmi(wil, "disconnect_sta: (%pM, reason %d)\n", mac, reason);
if (del_sta) {
ether_addr_copy(del_sta_cmd.dst_mac, mac);
@@ -1568,8 +1567,8 @@ int wmi_addba(struct wil6210_priv *wil, u8 ringid, u8 size, u16 timeout)
.amsdu = 0,
};
- wil_dbg_wmi(wil, "%s(ring %d size %d timeout %d)\n", __func__,
- ringid, size, timeout);
+ wil_dbg_wmi(wil, "addba: (ring %d size %d timeout %d)\n", ringid, size,
+ timeout);
return wmi_send(wil, WMI_VRING_BA_EN_CMDID, &cmd, sizeof(cmd));
}
@@ -1581,8 +1580,7 @@ int wmi_delba_tx(struct wil6210_priv *wil, u8 ringid, u16 reason)
.reason = cpu_to_le16(reason),
};
- wil_dbg_wmi(wil, "%s(ring %d reason %d)\n", __func__,
- ringid, reason);
+ wil_dbg_wmi(wil, "delba_tx: (ring %d reason %d)\n", ringid, reason);
return wmi_send(wil, WMI_VRING_BA_DIS_CMDID, &cmd, sizeof(cmd));
}
@@ -1594,8 +1592,8 @@ int wmi_delba_rx(struct wil6210_priv *wil, u8 cidxtid, u16 reason)
.reason = cpu_to_le16(reason),
};
- wil_dbg_wmi(wil, "%s(CID %d TID %d reason %d)\n", __func__,
- cidxtid & 0xf, (cidxtid >> 4) & 0xf, reason);
+ wil_dbg_wmi(wil, "delba_rx: (CID %d TID %d reason %d)\n", cidxtid & 0xf,
+ (cidxtid >> 4) & 0xf, reason);
return wmi_send(wil, WMI_RCP_DELBA_CMDID, &cmd, sizeof(cmd));
}
@@ -1770,7 +1768,7 @@ void wmi_event_flush(struct wil6210_priv *wil)
ulong flags;
struct pending_wmi_event *evt, *t;
- wil_dbg_wmi(wil, "%s()\n", __func__);
+ wil_dbg_wmi(wil, "event_flush\n");
spin_lock_irqsave(&wil->wmi_ev_lock, flags);
@@ -1815,8 +1813,8 @@ static void wmi_event_handle(struct wil6210_priv *wil,
WARN_ON(wil->reply_buf);
wmi_evt_call_handler(wil, id, evt_data,
len - sizeof(*wmi));
- wil_dbg_wmi(wil, "%s: Complete WMI 0x%04x\n",
- __func__, id);
+ wil_dbg_wmi(wil, "event_handle: Complete WMI 0x%04x\n",
+ id);
complete(&wil->wmi_call);
return;
}
@@ -1863,11 +1861,11 @@ void wmi_event_worker(struct work_struct *work)
struct pending_wmi_event *evt;
struct list_head *lh;
- wil_dbg_wmi(wil, "Start %s\n", __func__);
+ wil_dbg_wmi(wil, "event_worker: Start\n");
while ((lh = next_wmi_ev(wil)) != NULL) {
evt = list_entry(lh, struct pending_wmi_event, list);
wmi_event_handle(wil, &evt->event.hdr);
kfree(evt);
}
- wil_dbg_wmi(wil, "Finished %s\n", __func__);
+ wil_dbg_wmi(wil, "event_worker: Finished\n");
}
diff --git a/drivers/net/wireless/ath/wil6210/wmi.h b/drivers/net/wireless/ath/wil6210/wmi.h
index 9c4a0bdc51f3..7c9fee57aa91 100644
--- a/drivers/net/wireless/ath/wil6210/wmi.h
+++ b/drivers/net/wireless/ath/wil6210/wmi.h
@@ -57,6 +57,7 @@ enum wmi_fw_capability {
WMI_FW_CAPABILITY_RF_SECTORS = 2,
WMI_FW_CAPABILITY_MGMT_RETRY_LIMIT = 3,
WMI_FW_CAPABILITY_DISABLE_AP_SME = 4,
+ WMI_FW_CAPABILITY_WMI_ONLY = 5,
WMI_FW_CAPABILITY_MAX,
};
@@ -186,6 +187,7 @@ enum wmi_command_id {
WMI_RS_CFG_CMDID = 0x921,
WMI_GET_DETAILED_RS_RES_CMDID = 0x922,
WMI_AOA_MEAS_CMDID = 0x923,
+ WMI_BRP_SET_ANT_LIMIT_CMDID = 0x924,
WMI_SET_MGMT_RETRY_LIMIT_CMDID = 0x930,
WMI_GET_MGMT_RETRY_LIMIT_CMDID = 0x931,
WMI_NEW_STA_CMDID = 0x935,
@@ -546,7 +548,9 @@ struct wmi_pcp_start_cmd {
u8 pcp_max_assoc_sta;
u8 hidden_ssid;
u8 is_go;
- u8 reserved0[6];
+ u8 reserved0[5];
+ /* abft_len override if non-0 */
+ u8 abft_len;
u8 disable_ap_sme;
u8 network_type;
u8 channel;
@@ -1083,6 +1087,7 @@ enum wmi_event_id {
WMI_RS_CFG_DONE_EVENTID = 0x1921,
WMI_GET_DETAILED_RS_RES_EVENTID = 0x1922,
WMI_AOA_MEAS_EVENTID = 0x1923,
+ WMI_BRP_SET_ANT_LIMIT_EVENTID = 0x1924,
WMI_SET_MGMT_RETRY_LIMIT_EVENTID = 0x1930,
WMI_GET_MGMT_RETRY_LIMIT_EVENTID = 0x1931,
WMI_TOF_SESSION_END_EVENTID = 0x1991,
@@ -1303,7 +1308,8 @@ struct wmi_connect_event {
u8 assoc_req_len;
u8 assoc_resp_len;
u8 cid;
- u8 reserved2[3];
+ u8 aid;
+ u8 reserved2[2];
/* not in use */
u8 assoc_info[0];
} __packed;
@@ -1776,6 +1782,42 @@ struct wmi_get_detailed_rs_res_event {
u8 reserved[3];
} __packed;
+/* BRP antenna limit mode */
+enum wmi_brp_ant_limit_mode {
+ /* Disable BRP force antenna limit */
+ WMI_BRP_ANT_LIMIT_MODE_DISABLE = 0x00,
+ /* Define maximal antennas limit. Only effective antennas will be
+ * actually used
+ */
+ WMI_BRP_ANT_LIMIT_MODE_EFFECTIVE = 0x01,
+ /* Force a specific number of antennas */
+ WMI_BRP_ANT_LIMIT_MODE_FORCE = 0x02,
+ /* number of BRP antenna limit modes */
+ WMI_BRP_ANT_LIMIT_MODES_NUM = 0x03,
+};
+
+/* WMI_BRP_SET_ANT_LIMIT_CMDID */
+struct wmi_brp_set_ant_limit_cmd {
+ /* connection id */
+ u8 cid;
+ /* enum wmi_brp_ant_limit_mode */
+ u8 limit_mode;
+ /* antenna limit count, 1-27
+ * disable_mode - ignored
+ * effective_mode - upper limit to number of antennas to be used
+ * force_mode - exact number of antennas to be used
+ */
+ u8 ant_limit;
+ u8 reserved;
+} __packed;
+
+/* WMI_BRP_SET_ANT_LIMIT_EVENTID */
+struct wmi_brp_set_ant_limit_event {
+ /* wmi_fw_status */
+ u8 status;
+ u8 reserved[3];
+} __packed;
+
/* broadcast connection ID */
#define WMI_LINK_MAINTAIN_CFG_CID_BROADCAST (0xFFFFFFFF)
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 7c8b5e3e57a1..cd105a0bf5d1 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2414,8 +2414,16 @@ static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
dev->res[base_sel - 1].base,
wr_offset, wr_mask, wr_value);
- msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
- wr_offset, wr_mask, wr_value);
+ base_sel_size = resource_size(dev->res[base_sel - 1].resource);
+
+ if (wr_offset > base_sel_size - 4 ||
+ msm_pcie_check_align(dev, wr_offset))
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
+ dev->rc_idx, wr_offset, base_sel_size - 4);
+ else
+ msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
+ wr_offset, wr_mask, wr_value);
break;
case 13: /* dump all registers of base_sel */
diff --git a/drivers/phy/phy-qcom-ufs.c b/drivers/phy/phy-qcom-ufs.c
index 6e06fef81849..c3cb57ed6083 100644
--- a/drivers/phy/phy-qcom-ufs.c
+++ b/drivers/phy/phy-qcom-ufs.c
@@ -267,6 +267,14 @@ static int __ufs_qcom_phy_init_vreg(struct phy *phy,
char prop_name[MAX_PROP_NAME];
+ if (dev->of_node) {
+ snprintf(prop_name, MAX_PROP_NAME, "%s-supply", name);
+ if (!of_parse_phandle(dev->of_node, prop_name, 0)) {
+ dev_dbg(dev, "No vreg data found for %s\n", prop_name);
+ return optional ? err : -ENODATA;
+ }
+ }
+
vreg->name = kstrdup(name, GFP_KERNEL);
if (!vreg->name) {
err = -ENOMEM;
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.c b/drivers/pinctrl/qcom/pinctrl-msm.c
index 8e585aefebf2..c1448955d3ed 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2013, Sony Mobile Communications AB.
- * Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -27,6 +27,7 @@
#include <linux/gpio.h>
#include <linux/interrupt.h>
#include <linux/spinlock.h>
+#include <linux/syscore_ops.h>
#include <linux/reboot.h>
#include <linux/irqchip/msm-mpm-irq.h>
#include "../core.h"
@@ -70,6 +71,8 @@ struct msm_pinctrl {
void __iomem *regs;
};
+static struct msm_pinctrl *msm_pinctrl_data;
+
static inline struct msm_pinctrl *to_msm_pinctrl(struct gpio_chip *gc)
{
return container_of(gc, struct msm_pinctrl, chip);
@@ -896,6 +899,52 @@ static void msm_pinctrl_setup_pm_reset(struct msm_pinctrl *pctrl)
}
}
+#ifdef CONFIG_PM
+static int msm_pinctrl_suspend(void)
+{
+ return 0;
+}
+
+static void msm_pinctrl_resume(void)
+{
+ int i, irq;
+ u32 val;
+ unsigned long flags;
+ struct irq_desc *desc;
+ const struct msm_pingroup *g;
+ const char *name = "null";
+ struct msm_pinctrl *pctrl = msm_pinctrl_data;
+
+ if (!msm_show_resume_irq_mask)
+ return;
+
+ spin_lock_irqsave(&pctrl->lock, flags);
+ for_each_set_bit(i, pctrl->enabled_irqs, pctrl->chip.ngpio) {
+ g = &pctrl->soc->groups[i];
+ val = readl_relaxed(pctrl->regs + g->intr_status_reg);
+ if (val & BIT(g->intr_status_bit)) {
+ irq = irq_find_mapping(pctrl->chip.irqdomain, i);
+ desc = irq_to_desc(irq);
+ if (desc == NULL)
+ name = "stray irq";
+ else if (desc->action && desc->action->name)
+ name = desc->action->name;
+
+ pr_warn("%s: %d triggered %s\n", __func__, irq, name);
+ }
+ }
+ spin_unlock_irqrestore(&pctrl->lock, flags);
+}
+#else
+#define msm_pinctrl_suspend NULL
+#define msm_pinctrl_resume NULL
+#endif
+
+static struct syscore_ops msm_pinctrl_pm_ops = {
+ .suspend = msm_pinctrl_suspend,
+ .resume = msm_pinctrl_resume,
+};
+
int msm_pinctrl_probe(struct platform_device *pdev,
const struct msm_pinctrl_soc_data *soc_data)
{
@@ -903,7 +952,8 @@ int msm_pinctrl_probe(struct platform_device *pdev,
struct resource *res;
int ret;
- pctrl = devm_kzalloc(&pdev->dev, sizeof(*pctrl), GFP_KERNEL);
+ msm_pinctrl_data = pctrl = devm_kzalloc(&pdev->dev,
+ sizeof(*pctrl), GFP_KERNEL);
if (!pctrl) {
dev_err(&pdev->dev, "Can't allocate msm_pinctrl\n");
return -ENOMEM;
@@ -944,6 +994,7 @@ int msm_pinctrl_probe(struct platform_device *pdev,
pctrl->irq_chip_extn = &mpm_pinctrl_extn;
platform_set_drvdata(pdev, pctrl);
+ register_syscore_ops(&msm_pinctrl_pm_ops);
dev_dbg(&pdev->dev, "Probed Qualcomm pinctrl driver\n");
return 0;
@@ -958,6 +1009,7 @@ int msm_pinctrl_remove(struct platform_device *pdev)
pinctrl_unregister(pctrl->pctrl);
unregister_restart_handler(&pctrl->restart_nb);
+ unregister_syscore_ops(&msm_pinctrl_pm_ops);
return 0;
}
diff --git a/drivers/pinctrl/qcom/pinctrl-msm.h b/drivers/pinctrl/qcom/pinctrl-msm.h
index 54fdd04ce9d5..e986fdacc0bf 100644
--- a/drivers/pinctrl/qcom/pinctrl-msm.h
+++ b/drivers/pinctrl/qcom/pinctrl-msm.h
@@ -121,4 +121,5 @@ int msm_pinctrl_probe(struct platform_device *pdev,
const struct msm_pinctrl_soc_data *soc_data);
int msm_pinctrl_remove(struct platform_device *pdev);
+extern int msm_show_resume_irq_mask;
#endif
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 024c66ac8e57..66bdc593f811 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -217,4 +217,11 @@ config USB_BAM
Enabling this option adds USB BAM Driver.
USB BAM driver was added to supports SPS Peripheral-to-Peripheral
transfers between the USB and other peripheral.
+
+config MSM_EXT_DISPLAY
+ bool "MSM External Display Driver"
+ help
+ Enabling this option adds MSM External Display Driver.
+ External Display driver was added to support the communication
+ between external display driver and its couterparts.
endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index d5e87c209c21..d985aa81a3bb 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_SEEMP_CORE) += seemp_core/
obj-$(CONFIG_SSM) += ssm.o
obj-$(CONFIG_USB_BAM) += usb_bam.o
obj-$(CONFIG_MSM_MHI_DEV) += mhi_dev/
+obj-$(CONFIG_MSM_EXT_DISPLAY) += msm_ext_display.o
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 188388bc97a0..f48182cc04df 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -22,7 +22,7 @@
#include "gsi_reg.h"
#define GSI_CMD_TIMEOUT (5*HZ)
-#define GSI_STOP_CMD_TIMEOUT_MS 1
+#define GSI_STOP_CMD_TIMEOUT_MS 10
#define GSI_MAX_CH_LOW_WEIGHT 15
#define GSI_MHI_ER_START 10
#define GSI_MHI_ER_END 16
@@ -264,6 +264,11 @@ static void gsi_handle_glob_err(uint32_t err)
}
}
+static void gsi_handle_gp_int1(void)
+{
+ complete(&gsi_ctx->gen_ee_cmd_compl);
+}
+
static void gsi_handle_glob_ee(int ee)
{
uint32_t val;
@@ -288,8 +293,7 @@ static void gsi_handle_glob_ee(int ee)
}
if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK) {
- notify.evt_id = GSI_PER_EVT_GLOB_GP1;
- gsi_ctx->per.notify_cb(&notify);
+ gsi_handle_gp_int1();
}
if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK) {
@@ -2745,6 +2749,67 @@ void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
}
EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size);
+int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
+{
+ enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_HALT_CHANNEL;
+ uint32_t val;
+ int res;
+
+ if (!gsi_ctx) {
+ pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+ return -GSI_STATUS_NODEV;
+ }
+
+ if (chan_idx >= gsi_ctx->max_ch || !code) {
+ GSIERR("bad params chan_idx=%d\n", chan_idx);
+ return -GSI_STATUS_INVALID_PARAMS;
+ }
+
+ mutex_lock(&gsi_ctx->mlock);
+ reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
+
+ /* invalidate the response */
+ gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
+ GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+ gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
+ gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
+ GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+
+ gsi_ctx->gen_ee_cmd_dbg.halt_channel++;
+ val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
+ GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
+ ((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
+ GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
+ ((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
+ GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
+ gsi_writel(val, gsi_ctx->base +
+ GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
+
+ res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
+ msecs_to_jiffies(GSI_CMD_TIMEOUT));
+ if (res == 0) {
+ GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
+ res = -GSI_STATUS_TIMED_OUT;
+ goto free_lock;
+ }
+
+ gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
+ GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+ if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
+ GSIERR("No response received\n");
+ res = -GSI_STATUS_ERROR;
+ goto free_lock;
+ }
+
+ res = GSI_STATUS_SUCCESS;
+ *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
+free_lock:
+ mutex_unlock(&gsi_ctx->mlock);
+
+ return res;
+}
+EXPORT_SYMBOL(gsi_halt_channel_ee);
+
static int msm_gsi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -2757,6 +2822,7 @@ static int msm_gsi_probe(struct platform_device *pdev)
}
gsi_ctx->dev = dev;
+ init_completion(&gsi_ctx->gen_ee_cmd_compl);
gsi_debugfs_init();
return 0;
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index 750ae2b329d3..d0eb162c49cf 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -115,9 +115,12 @@ struct gsi_evt_ctx {
struct gsi_ee_scratch {
union __packed {
struct {
- uint32_t resvd1:15;
+ uint32_t inter_ee_cmd_return_code:3;
+ uint32_t resvd1:2;
+ uint32_t generic_ee_cmd_return_code:3;
+ uint32_t resvd2:7;
uint32_t max_usb_pkt_size:1;
- uint32_t resvd2:8;
+ uint32_t resvd3:8;
uint32_t mhi_base_chan_idx:8;
} s;
uint32_t val;
@@ -135,6 +138,10 @@ struct ch_debug_stats {
unsigned long cmd_completed;
};
+struct gsi_generic_ee_cmd_debug_stats {
+ unsigned long halt_channel;
+};
+
struct gsi_ctx {
void __iomem *base;
struct device *dev;
@@ -143,6 +150,7 @@ struct gsi_ctx {
struct gsi_chan_ctx chan[GSI_CHAN_MAX];
struct ch_debug_stats ch_dbg[GSI_CHAN_MAX];
struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX];
+ struct gsi_generic_ee_cmd_debug_stats gen_ee_cmd_dbg;
struct mutex mlock;
spinlock_t slock;
unsigned long evt_bmap;
@@ -154,6 +162,7 @@ struct gsi_ctx {
struct workqueue_struct *dp_stat_wq;
u32 max_ch;
u32 max_ev;
+ struct completion gen_ee_cmd_compl;
};
enum gsi_re_type {
@@ -227,6 +236,18 @@ enum gsi_evt_ch_cmd_opcode {
GSI_EVT_DE_ALLOC = 0xa,
};
+enum gsi_generic_ee_cmd_opcode {
+ GSI_GEN_EE_CMD_HALT_CHANNEL = 0x1,
+};
+
+enum gsi_generic_ee_cmd_return_code {
+ GSI_GEN_EE_CMD_RETURN_CODE_SUCCESS = 0x1,
+ GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING = 0x2,
+ GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_DIRECTION = 0x3,
+ GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE = 0x4,
+ GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX = 0x5,
+};
+
extern struct gsi_ctx *gsi_ctx;
void gsi_debugfs_init(void);
uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr);
diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h
index 1acaf74a0968..d0462aad72d2 100644
--- a/drivers/platform/msm/gsi/gsi_reg.h
+++ b/drivers/platform/msm/gsi/gsi_reg.h
@@ -1365,8 +1365,12 @@
(GSI_GSI_REG_BASE_OFFS + 0x0001f018 + 0x4000 * (n))
#define GSI_EE_n_GSI_EE_GENERIC_CMD_RMSK 0xffffffff
#define GSI_EE_n_GSI_EE_GENERIC_CMD_MAXn 3
-#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0xffffffff
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0x1f
#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK 0x3e0
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT 0x5
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK 0x3c00
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT 0xa
/* v1.0 */
#define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
index 66e329a03df7..74e7394a80b1 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_client.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -95,6 +95,46 @@ int ipa_disable_data_path(u32 clnt_hdl)
return res;
}
+int ipa2_enable_force_clear(u32 request_id, bool throttle_source,
+ u32 source_pipe_bitmask)
+{
+ struct ipa_enable_force_clear_datapath_req_msg_v01 req;
+ int result;
+
+ memset(&req, 0, sizeof(req));
+ req.request_id = request_id;
+ req.source_pipe_bitmask = source_pipe_bitmask;
+ if (throttle_source) {
+ req.throttle_source_valid = 1;
+ req.throttle_source = 1;
+ }
+ result = qmi_enable_force_clear_datapath_send(&req);
+ if (result) {
+ IPAERR("qmi_enable_force_clear_datapath_send failed %d\n",
+ result);
+ return result;
+ }
+
+ return 0;
+}
+
+int ipa2_disable_force_clear(u32 request_id)
+{
+ struct ipa_disable_force_clear_datapath_req_msg_v01 req;
+ int result;
+
+ memset(&req, 0, sizeof(req));
+ req.request_id = request_id;
+ result = qmi_disable_force_clear_datapath_send(&req);
+ if (result) {
+ IPAERR("qmi_disable_force_clear_datapath_send failed %d\n",
+ result);
+ return result;
+ }
+
+ return 0;
+}
+
static int ipa2_smmu_map_peer_bam(unsigned long dev)
{
phys_addr_t base;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
index b45e748b66a6..94d76db6f993 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_i.h
@@ -1803,6 +1803,9 @@ void ipa_delete_dflt_flt_rules(u32 ipa_ep_idx);
int ipa_enable_data_path(u32 clnt_hdl);
int ipa_disable_data_path(u32 clnt_hdl);
+int ipa2_enable_force_clear(u32 request_id, bool throttle_source,
+ u32 source_pipe_bitmask);
+int ipa2_disable_force_clear(u32 request_id);
int ipa_id_alloc(void *ptr);
void *ipa_id_find(u32 id);
void ipa_id_remove(u32 id);
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
index d88e5a6d3d73..e2ac9bfceed7 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_qmi_service.c
@@ -39,6 +39,8 @@
#define QMI_SEND_STATS_REQ_TIMEOUT_MS 5000
#define QMI_SEND_REQ_TIMEOUT_MS 60000
+#define QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS 1000
+
static struct qmi_handle *ipa_svc_handle;
static void ipa_a5_svc_recv_msg(struct work_struct *work);
static DECLARE_DELAYED_WORK(work_recv_msg, ipa_a5_svc_recv_msg);
@@ -583,7 +585,8 @@ int qmi_enable_force_clear_datapath_send(
&req_desc,
req,
sizeof(*req),
- &resp_desc, &resp, sizeof(resp), 0);
+ &resp_desc, &resp, sizeof(resp),
+ QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS);
if (rc < 0) {
IPAWANERR("send req failed %d\n", rc);
return rc;
@@ -628,7 +631,8 @@ int qmi_disable_force_clear_datapath_send(
&req_desc,
req,
sizeof(*req),
- &resp_desc, &resp, sizeof(resp), 0);
+ &resp_desc, &resp, sizeof(resp),
+ QMI_IPA_FORCE_CLEAR_DATAPATH_TIMEOUT_MS);
if (rc < 0) {
IPAWANERR("send req failed %d\n", rc);
return rc;
diff --git a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
index 5bda4cb9ed2b..f60669132865 100644
--- a/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
+++ b/drivers/platform/msm/ipa/ipa_v2/ipa_uc_wdi.c
@@ -1404,7 +1404,6 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
union IpaHwWdiCommonChCmdData_t disable;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
u32 prod_hdl;
- int i;
if (unlikely(!ipa_ctx)) {
IPAERR("IPA driver was not initialized\n");
@@ -1421,28 +1420,6 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
if (result)
return result;
- /* checking rdy_ring_rp_pa matches the rdy_comp_ring_wp_pa on WDI2.0 */
- if (ipa_ctx->ipa_wdi2) {
- for (i = 0; i < IPA_UC_FINISH_MAX; i++) {
- IPADBG("(%d) rp_value(%u), comp_wp_value(%u)\n",
- i,
- *ipa_ctx->uc_ctx.rdy_ring_rp_va,
- *ipa_ctx->uc_ctx.rdy_comp_ring_wp_va);
- if (*ipa_ctx->uc_ctx.rdy_ring_rp_va !=
- *ipa_ctx->uc_ctx.rdy_comp_ring_wp_va) {
- usleep_range(IPA_UC_WAIT_MIN_SLEEP,
- IPA_UC_WAII_MAX_SLEEP);
- } else {
- break;
- }
- }
- /* In case ipa_uc still haven't processed all
- * pending descriptors, we have to assert
- */
- if (i == IPA_UC_FINISH_MAX)
- BUG();
- }
-
IPADBG("ep=%d\n", clnt_hdl);
ep = &ipa_ctx->ep[clnt_hdl];
@@ -1468,6 +1445,11 @@ int ipa2_disable_wdi_pipe(u32 clnt_hdl)
* holb on IPA Producer pipe
*/
if (IPA_CLIENT_IS_PROD(ep->client)) {
+
+ IPADBG("Stopping PROD channel - hdl=%d clnt=%d\n",
+ clnt_hdl, ep->client);
+
+ /* remove delay on wlan-prod pipe*/
memset(&ep_cfg_ctrl, 0 , sizeof(struct ipa_ep_cfg_ctrl));
ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
@@ -1594,6 +1576,8 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
struct ipa_ep_context *ep;
union IpaHwWdiCommonChCmdData_t suspend;
struct ipa_ep_cfg_ctrl ep_cfg_ctrl;
+ u32 source_pipe_bitmask = 0;
+ bool disable_force_clear = false;
if (unlikely(!ipa_ctx)) {
IPAERR("IPA driver was not initialized\n");
@@ -1623,6 +1607,31 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
suspend.params.ipa_pipe_number = clnt_hdl;
if (IPA_CLIENT_IS_PROD(ep->client)) {
+ /*
+ * For WDI 2.0 need to ensure pipe will be empty before suspend
+ * as IPA uC will fail to suspend the pipe otherwise.
+ */
+ if (ipa_ctx->ipa_wdi2) {
+ source_pipe_bitmask = 1 <<
+ ipa_get_ep_mapping(ep->client);
+ result = ipa2_enable_force_clear(clnt_hdl,
+ false, source_pipe_bitmask);
+ if (result) {
+ /*
+ * assuming here modem SSR, AP can remove
+ * the delay in this case
+ */
+ IPAERR("failed to force clear %d\n", result);
+ IPAERR("remove delay from SCND reg\n");
+ memset(&ep_cfg_ctrl, 0,
+ sizeof(struct ipa_ep_cfg_ctrl));
+ ep_cfg_ctrl.ipa_ep_delay = false;
+ ep_cfg_ctrl.ipa_ep_suspend = false;
+ ipa2_cfg_ep_ctrl(clnt_hdl, &ep_cfg_ctrl);
+ } else {
+ disable_force_clear = true;
+ }
+ }
IPADBG("Post suspend event first for IPA Producer\n");
IPADBG("Client: %d clnt_hdl: %d\n", ep->client, clnt_hdl);
result = ipa_uc_send_cmd(suspend.raw32b,
@@ -1667,6 +1676,9 @@ int ipa2_suspend_wdi_pipe(u32 clnt_hdl)
}
}
+ if (disable_force_clear)
+ ipa2_disable_force_clear(clnt_hdl);
+
ipa_ctx->tag_process_before_gating = true;
IPA_ACTIVE_CLIENTS_DEC_EP(ipa2_get_client_mapping(clnt_hdl));
ep->uc_offload_state &= ~IPA_WDI_RESUMED;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index e90f69b466e1..aa681d3eacaa 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1878,6 +1878,45 @@ static void ipa3_q6_avoid_holb(void)
}
}
+static void ipa3_halt_q6_cons_gsi_channels(void)
+{
+ int ep_idx;
+ int client_idx;
+ struct ipa_gsi_ep_config *gsi_ep_cfg;
+ int ret;
+ int code = 0;
+
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+ if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
+ ep_idx = ipa3_get_ep_mapping(client_idx);
+ if (ep_idx == -1)
+ continue;
+
+ gsi_ep_cfg = ipa3_get_gsi_ep_info(ep_idx);
+ if (!gsi_ep_cfg) {
+ IPAERR("failed to get GSI config\n");
+ ipa_assert();
+ return;
+ }
+
+ ret = gsi_halt_channel_ee(
+ gsi_ep_cfg->ipa_gsi_chan_num, gsi_ep_cfg->ee,
+ &code);
+ if (ret == GSI_STATUS_SUCCESS)
+ IPADBG("halted gsi ch %d ee %d with code %d\n",
+ gsi_ep_cfg->ipa_gsi_chan_num,
+ gsi_ep_cfg->ee,
+ code);
+ else
+ IPAERR("failed to halt ch %d ee %d code %d\n",
+ gsi_ep_cfg->ipa_gsi_chan_num,
+ gsi_ep_cfg->ee,
+ code);
+ }
+ }
+}
+
+
static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
enum ipa_rule_type rlt)
{
@@ -2312,6 +2351,7 @@ void ipa3_q6_post_shutdown_cleanup(void)
/* Handle the issue where SUSPEND was removed for some reason */
ipa3_q6_avoid_holb();
+ ipa3_halt_q6_cons_gsi_channels();
for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
@@ -4116,7 +4156,7 @@ static ssize_t ipa3_write(struct file *file, const char __user *buf,
if (result) {
IPAERR("FW loading process has failed\n");
- BUG();
+ return result;
} else
ipa3_post_init(&ipa3_res, ipa3_ctx->dev);
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index e3dfe8927682..81eae05d7ed9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -1777,7 +1777,8 @@ dealloc_chan_fail:
int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
bool should_force_clear, u32 qmi_req_id, bool is_dpl)
{
- struct ipa3_ep_context *ul_ep, *dl_ep;
+ struct ipa3_ep_context *ul_ep = NULL;
+ struct ipa3_ep_context *dl_ep;
int result = -EFAULT;
u32 source_pipe_bitmask = 0;
bool dl_data_pending = true;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
index 483b2ca118fa..06f65906841d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -853,6 +853,10 @@ void ipa3_dma_async_memcpy_notify_cb(void *priv
mem_info = (struct ipa_mem_buffer *)data;
ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+ if (ep_idx < 0) {
+ IPADMA_ERR("IPA Client mapping failed\n");
+ return;
+ }
sys = ipa3_ctx->ep[ep_idx].sys;
spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 5c678f1cfc28..8ec0974711a4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -337,7 +337,7 @@ int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
int result;
u16 sps_flags = SPS_IOVEC_FLAG_EOT;
dma_addr_t dma_address;
- u16 len;
+ u16 len = 0;
u32 mem_flag = GFP_ATOMIC;
if (unlikely(!in_atomic))
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index c3a12dd0b17c..362294b0f695 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -14,7 +14,6 @@
#include "ipahal/ipahal.h"
#include "ipahal/ipahal_fltrt.h"
-#define IPA_FLT_TABLE_INDEX_NOT_FOUND (-1)
#define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
#define IPA_FLT_STATUS_OF_DEL_FAILED (-1)
#define IPA_FLT_STATUS_OF_MDFY_FAILED (-1)
@@ -1001,7 +1000,7 @@ error:
static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx)
{
*ipa_ep_idx = ipa3_get_ep_mapping(ep);
- if (*ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) {
+ if (*ipa_ep_idx < 0) {
IPAERR("ep not valid ep=%d\n", ep);
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index 4ef1a96c8450..9e2ffe70170c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -153,10 +153,16 @@ int ipa3_mhi_reset_channel_internal(enum ipa_client_type client)
int ipa3_mhi_start_channel_internal(enum ipa_client_type client)
{
int res;
+ int ipa_ep_idx;
IPA_MHI_FUNC_ENTRY();
- res = ipa3_enable_data_path(ipa3_get_ep_mapping(client));
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+ if (ipa_ep_idx < 0) {
+ IPA_MHI_ERR("Invalid client %d\n", client);
+ return -EINVAL;
+ }
+ res = ipa3_enable_data_path(ipa_ep_idx);
if (res) {
IPA_MHI_ERR("ipa3_enable_data_path failed %d\n", res);
return res;
@@ -521,6 +527,10 @@ int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
IPA_MHI_FUNC_ENTRY();
ipa_ep_idx = ipa3_get_ep_mapping(client);
+ if (ipa_ep_idx < 0) {
+ IPA_MHI_ERR("Invalid client %d\n", client);
+ return -EINVAL;
+ }
ep = &ipa3_ctx->ep[ipa_ep_idx];
if (brstmode_enabled && !LPTransitionRejected) {
@@ -557,11 +567,14 @@ int ipa3_mhi_query_ch_info(enum ipa_client_type client,
IPA_MHI_FUNC_ENTRY();
ipa_ep_idx = ipa3_get_ep_mapping(client);
-
+ if (ipa_ep_idx < 0) {
+ IPA_MHI_ERR("Invalid client %d\n", client);
+ return -EINVAL;
+ }
ep = &ipa3_ctx->ep[ipa_ep_idx];
res = gsi_query_channel_info(ep->gsi_chan_hdl, ch_info);
if (res) {
- IPAERR("gsi_query_channel_info failed\n");
+ IPA_MHI_ERR("gsi_query_channel_info failed\n");
return res;
}
@@ -596,7 +609,10 @@ int ipa3_mhi_destroy_channel(enum ipa_client_type client)
struct ipa3_ep_context *ep;
ipa_ep_idx = ipa3_get_ep_mapping(client);
-
+ if (ipa_ep_idx < 0) {
+ IPA_MHI_ERR("Invalid client %d\n", client);
+ return -EINVAL;
+ }
ep = &ipa3_ctx->ep[ipa_ep_idx];
IPA_MHI_DBG("reset event ring (hdl: %lu, ep: %d)\n",
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index 21ce28204069..c1d1d9659850 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -498,7 +498,7 @@ static int ipa3_uc_send_cmd_64b_param(u32 cmd_lo, u32 cmd_hi, u32 opcode,
{
int index;
union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
- unsigned long flags;
+ unsigned long flags = 0;
int retries = 0;
send_cmd_lock:
@@ -775,7 +775,7 @@ int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
struct ipa3_uc_hdlrs *hdlrs)
{
- unsigned long flags;
+ unsigned long flags = 0;
if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
IPAERR("Feature %u is invalid, not registering hdlrs\n",
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 9f38af1b520b..2f28ba673d5a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -929,7 +929,7 @@ int ipa3_get_ep_mapping(enum ipa_client_type client)
if (client >= IPA_CLIENT_MAX || client < 0) {
IPAERR("Bad client number! client =%d\n", client);
- return -EINVAL;
+ return IPA_EP_NOT_ALLOCATED;
}
ipa_ep_idx = ipa3_ep_mapping[ipa3_get_hw_type_index()][client].pipe_num;
@@ -3446,6 +3446,11 @@ void ipa3_suspend_apps_pipes(bool suspend)
cfg.ipa_ep_suspend = suspend;
ipa_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+ if (ipa_ep_idx < 0) {
+ IPAERR("IPA client mapping failed\n");
+ ipa_assert();
+ return;
+ }
ep = &ipa3_ctx->ep[ipa_ep_idx];
if (ep->valid) {
IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
diff --git a/drivers/platform/msm/mhi/mhi.h b/drivers/platform/msm/mhi/mhi.h
index c6fa39d6e0e2..3d40d114437a 100644
--- a/drivers/platform/msm/mhi/mhi.h
+++ b/drivers/platform/msm/mhi/mhi.h
@@ -20,6 +20,7 @@
#include <linux/completion.h>
#include <linux/atomic.h>
#include <linux/spinlock.h>
+#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/cdev.h>
#include <linux/msm_pcie.h>
@@ -29,6 +30,7 @@
#include <linux/dma-mapping.h>
extern struct mhi_pcie_devices mhi_devices;
+struct mhi_device_ctxt;
enum MHI_DEBUG_LEVEL {
MHI_MSG_RAW = 0x1,
@@ -125,6 +127,31 @@ enum MHI_STATE {
MHI_STATE_reserved = 0x80000000
};
+enum MHI_BRSTMODE {
+ /* BRST Mode Enable for HW Channels, SW Channel Disabled */
+ MHI_BRSTMODE_DEFAULT = 0x0,
+ MHI_BRSTMODE_RESERVED = 0x1,
+ MHI_BRSTMODE_DISABLE = 0x2,
+ MHI_BRSTMODE_ENABLE = 0x3
+};
+
+enum MHI_PM_STATE {
+ MHI_PM_DISABLE = 0x0, /* MHI is not enabled */
+ MHI_PM_POR = 0x1, /* Power On Reset State */
+ MHI_PM_M0 = 0x2,
+ MHI_PM_M1 = 0x4,
+ MHI_PM_M1_M2_TRANSITION = 0x8, /* Register access not allowed */
+ MHI_PM_M2 = 0x10,
+ MHI_PM_M3_ENTER = 0x20,
+ MHI_PM_M3 = 0x40,
+ MHI_PM_M3_EXIT = 0x80,
+};
+
+#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | MHI_PM_M1))
+#define MHI_WAKE_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
+ MHI_PM_M1 | MHI_PM_M2))
+#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state > MHI_PM_DISABLE) && \
+ (pm_state < MHI_PM_M3_EXIT))
struct __packed mhi_event_ctxt {
u32 mhi_intmodt;
u32 mhi_event_er_type;
@@ -136,8 +163,11 @@ struct __packed mhi_event_ctxt {
};
struct __packed mhi_chan_ctxt {
- enum MHI_CHAN_STATE mhi_chan_state;
- enum MHI_CHAN_DIR mhi_chan_type;
+ u32 chstate : 8;
+ u32 brstmode : 2;
+ u32 pollcfg : 6;
+ u32 reserved : 16;
+ u32 chtype;
u32 mhi_event_ring_index;
u64 mhi_trb_ring_base_addr;
u64 mhi_trb_ring_len;
@@ -172,11 +202,11 @@ enum MHI_PKT_TYPE {
MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
- MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD = 0x1F,
MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
MHI_PKT_TYPE_TX_EVENT = 0x22,
MHI_PKT_TYPE_EE_EVENT = 0x40,
+ MHI_PKT_TYPE_STALE_EVENT, /* Internal event */
MHI_PKT_TYPE_SYS_ERR_EVENT = 0xFF,
};
@@ -261,6 +291,17 @@ enum MHI_EVENT_CCS {
MHI_EVENT_CC_BAD_TRE = 0x11,
};
+struct db_mode {
+ /* if set do not reset DB_Mode during M0 resume */
+ u32 preserve_db_state : 1;
+ u32 db_mode : 1;
+ enum MHI_BRSTMODE brstmode;
+ void (*process_db)(struct mhi_device_ctxt *mhi_dev_ctxt,
+ void __iomem *io_addr,
+ uintptr_t chan,
+ u32 val);
+};
+
struct mhi_ring {
void *base;
void *wp;
@@ -270,6 +311,11 @@ struct mhi_ring {
uintptr_t el_size;
u32 overwrite_en;
enum MHI_CHAN_DIR dir;
+ enum MHI_CHAN_STATE ch_state;
+ struct db_mode db_mode;
+ u32 msi_disable_cntr;
+ u32 msi_enable_cntr;
+ spinlock_t ring_lock;
};
enum MHI_CMD_STATUS {
@@ -327,12 +373,19 @@ struct mhi_chan_info {
u32 flags;
};
+struct mhi_chan_cfg {
+ enum MHI_COMMAND current_cmd;
+ struct mutex chan_lock;
+ spinlock_t event_lock; /* completion event lock */
+ struct completion cmd_complete;
+ struct mhi_cmd_complete_event_pkt cmd_event_pkt;
+ union mhi_cmd_pkt cmd_pkt;
+};
+
struct mhi_client_handle {
struct mhi_chan_info chan_info;
struct mhi_device_ctxt *mhi_dev_ctxt;
struct mhi_client_info_t client_info;
- struct completion chan_reset_complete;
- struct completion chan_open_complete;
void *user_data;
struct mhi_result result;
u32 device_index;
@@ -344,12 +397,6 @@ struct mhi_client_handle {
int event_ring_index;
};
-enum MHI_EVENT_POLLING {
- MHI_EVENT_POLLING_DISABLED = 0x0,
- MHI_EVENT_POLLING_ENABLED = 0x1,
- MHI_EVENT_POLLING_reserved = 0x80000000
-};
-
enum MHI_TYPE_EVENT_RING {
MHI_ER_DATA_TYPE = 0x1,
MHI_ER_CTRL_TYPE = 0x2,
@@ -375,46 +422,27 @@ struct mhi_buf_info {
struct mhi_counters {
u32 m0_m1;
- u32 m1_m0;
u32 m1_m2;
u32 m2_m0;
u32 m0_m3;
- u32 m3_m0;
u32 m1_m3;
- u32 mhi_reset_cntr;
- u32 mhi_ready_cntr;
- u32 m3_event_timeouts;
- u32 m0_event_timeouts;
- u32 m2_event_timeouts;
- u32 msi_disable_cntr;
- u32 msi_enable_cntr;
- u32 nr_irq_migrations;
- u32 *msi_counter;
- u32 *ev_counter;
- atomic_t outbound_acks;
+ u32 m3_m0;
u32 chan_pkts_xferd[MHI_MAX_CHANNELS];
u32 bb_used[MHI_MAX_CHANNELS];
+ atomic_t device_wake;
+ atomic_t outbound_acks;
+ atomic_t events_pending;
+ u32 *msi_counter;
+ u32 mhi_reset_cntr;
};
struct mhi_flags {
u32 mhi_initialized;
- u32 pending_M3;
- u32 pending_M0;
u32 link_up;
- u32 kill_threads;
- atomic_t data_pending;
- atomic_t events_pending;
- atomic_t pending_resume;
- atomic_t pending_ssr;
- atomic_t pending_powerup;
- atomic_t m2_transition;
int stop_threads;
- atomic_t device_wake;
- u32 ssr;
+ u32 kill_threads;
u32 ev_thread_stopped;
u32 st_thread_stopped;
- u32 uldl_enabled;
- u32 db_mode[MHI_MAX_CHANNELS];
};
struct mhi_wait_queues {
@@ -458,44 +486,35 @@ struct mhi_dev_space {
};
struct mhi_device_ctxt {
- enum MHI_STATE mhi_state;
+ enum MHI_PM_STATE mhi_pm_state; /* Host driver state */
+ enum MHI_STATE mhi_state; /* protocol state */
enum MHI_EXEC_ENV dev_exec_env;
struct mhi_dev_space dev_space;
struct mhi_pcie_dev_info *dev_info;
struct pcie_core_info *dev_props;
struct mhi_ring chan_bb_list[MHI_MAX_CHANNELS];
-
struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS];
struct mhi_ring *mhi_local_event_ctxt;
struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS];
+ struct mhi_chan_cfg mhi_chan_cfg[MHI_MAX_CHANNELS];
+
- struct mutex *mhi_chan_mutex;
- struct mutex mhi_link_state;
- spinlock_t *mhi_ev_spinlock_list;
- struct mutex *mhi_cmd_mutex_list;
struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS];
struct mhi_event_ring_cfg *ev_ring_props;
struct task_struct *event_thread_handle;
struct task_struct *st_thread_handle;
+ struct tasklet_struct ev_task; /* Process control Events */
+ struct work_struct process_m1_worker;
struct mhi_wait_queues mhi_ev_wq;
struct dev_mmio_info mmio_info;
- u32 mhi_chan_db_order[MHI_MAX_CHANNELS];
- u32 mhi_ev_db_order[MHI_MAX_CHANNELS];
- spinlock_t *db_write_lock;
-
struct mhi_state_work_queue state_change_work_item_list;
- enum MHI_CMD_STATUS mhi_chan_pend_cmd_ack[MHI_MAX_CHANNELS];
- u32 cmd_ring_order;
struct mhi_counters counters;
struct mhi_flags flags;
- u32 device_wake_asserted;
-
- rwlock_t xfer_lock;
struct hrtimer m1_timer;
ktime_t m1_timeout;
@@ -508,11 +527,12 @@ struct mhi_device_ctxt {
unsigned long esoc_notif;
enum STATE_TRANSITION base_state;
- atomic_t outbound_acks;
+
+ rwlock_t pm_xfer_lock; /* lock to control PM State */
+ spinlock_t dev_wake_lock; /* lock to set wake bit */
struct mutex pm_lock;
struct wakeup_source w_lock;
- int enable_lpm;
char *chan_info;
struct dentry *mhi_parent_folder;
};
@@ -577,7 +597,9 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
enum MHI_CHAN_DIR chan_type,
u32 event_ring,
struct mhi_ring *ring,
- enum MHI_CHAN_STATE chan_state);
+ enum MHI_CHAN_STATE chan_state,
+ bool preserve_db_state,
+ enum MHI_BRSTMODE brstmode);
int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 chan);
@@ -622,8 +644,9 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_CB_REASON reason);
void mhi_notify_client(struct mhi_client_handle *client_handle,
enum MHI_CB_REASON reason);
-int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
-int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
+void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
+void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt,
+ bool force_set);
int mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action,
void *hcpu);
@@ -635,6 +658,14 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt,
int index);
int start_chan_sync(struct mhi_client_handle *client_handle);
+void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
+ void __iomem *io_addr,
+ uintptr_t chan,
+ u32 val);
+void mhi_process_db_brstmode_disable(struct mhi_device_ctxt *mhi_dev_ctxt,
+ void __iomem *io_addr,
+ uintptr_t chan,
+ u32 val);
void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt, void __iomem *io_addr,
uintptr_t io_offset, u32 val);
void mhi_reg_write_field(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -651,12 +682,19 @@ int mhi_runtime_suspend(struct device *dev);
int get_chan_props(struct mhi_device_ctxt *mhi_dev_ctxt, int chan,
struct mhi_chan_info *chan_info);
int mhi_runtime_resume(struct device *dev);
-int mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt);
+int mhi_runtime_idle(struct device *dev);
int init_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_TYPE_EVENT_RING type);
void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
int index);
void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt);
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt);
+enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt);
+void process_m1_transition(struct work_struct *work);
+int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev);
+void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
+ enum MHI_STATE new_state);
+const char *state_transition_str(enum STATE_TRANSITION state);
+void mhi_ctrl_ev_task(unsigned long data);
#endif
diff --git a/drivers/platform/msm/mhi/mhi_bhi.c b/drivers/platform/msm/mhi/mhi_bhi.c
index 4b63e880a44f..113791a62c38 100644
--- a/drivers/platform/msm/mhi/mhi_bhi.c
+++ b/drivers/platform/msm/mhi/mhi_bhi.c
@@ -41,6 +41,9 @@ static ssize_t bhi_write(struct file *file,
size_t amount_copied = 0;
uintptr_t align_len = 0x1000;
u32 tx_db_val = 0;
+ rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
+ const long bhi_timeout_ms = 1000;
+ long timeout;
if (buf == NULL || 0 == count)
return -EIO;
@@ -48,8 +51,12 @@ static ssize_t bhi_write(struct file *file,
if (count > BHI_MAX_IMAGE_SIZE)
return -ENOMEM;
- wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_BHI);
+ timeout = wait_event_interruptible_timeout(
+ *mhi_dev_ctxt->mhi_ev_wq.bhi_event,
+ mhi_dev_ctxt->mhi_state == MHI_STATE_BHI,
+ msecs_to_jiffies(bhi_timeout_ms));
+ if (timeout <= 0 && mhi_dev_ctxt->mhi_state != MHI_STATE_BHI)
+ return -EIO;
mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%zx\n", count);
@@ -95,6 +102,11 @@ static ssize_t bhi_write(struct file *file,
bhi_ctxt->image_size = count;
/* Write the image size */
+ read_lock_bh(pm_xfer_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_bh(pm_xfer_lock);
+ goto bhi_copy_error;
+ }
pcie_word_val = HIGH_WORD(bhi_ctxt->phy_image_loc);
mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
BHI_IMGADDR_HIGH,
@@ -119,10 +131,15 @@ static ssize_t bhi_write(struct file *file,
BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val);
mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_INTVEC, 0);
-
+ read_unlock_bh(pm_xfer_lock);
for (i = 0; i < BHI_POLL_NR_RETRIES; ++i) {
u32 err = 0, errdbg1 = 0, errdbg2 = 0, errdbg3 = 0;
+ read_lock_bh(pm_xfer_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_bh(pm_xfer_lock);
+ goto bhi_copy_error;
+ }
err = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRCODE);
errdbg1 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG1);
errdbg2 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG2);
@@ -131,6 +148,7 @@ static ssize_t bhi_write(struct file *file,
BHI_STATUS,
BHI_STATUS_MASK,
BHI_STATUS_SHIFT);
+ read_unlock_bh(pm_xfer_lock);
mhi_log(MHI_MSG_CRITICAL,
"BHI STATUS 0x%x, err:0x%x errdbg1:0x%x errdbg2:0x%x errdbg3:0x%x\n",
tx_db_val, err, errdbg1, errdbg2, errdbg3);
@@ -176,9 +194,6 @@ int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device)
|| 0 == mhi_pcie_device->core.bar0_end)
return -EIO;
- mhi_log(MHI_MSG_INFO,
- "Successfully registered char dev. bhi base is: 0x%p.\n",
- bhi_ctxt->bhi_base);
ret_val = alloc_chrdev_region(&bhi_ctxt->bhi_dev, 0, 1, "bhi");
if (IS_ERR_VALUE(ret_val)) {
mhi_log(MHI_MSG_CRITICAL,
diff --git a/drivers/platform/msm/mhi/mhi_event.c b/drivers/platform/msm/mhi/mhi_event.c
index aa8500d277cd..fe163f3895a5 100644
--- a/drivers/platform/msm/mhi/mhi_event.c
+++ b/drivers/platform/msm/mhi/mhi_event.c
@@ -89,32 +89,31 @@ dt_error:
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int r = 0;
+ int i;
mhi_dev_ctxt->mhi_local_event_ctxt = kzalloc(sizeof(struct mhi_ring)*
mhi_dev_ctxt->mmio_info.nr_event_rings,
GFP_KERNEL);
-
if (!mhi_dev_ctxt->mhi_local_event_ctxt)
return -ENOMEM;
- mhi_dev_ctxt->counters.ev_counter = kzalloc(sizeof(u32) *
- mhi_dev_ctxt->mmio_info.nr_event_rings,
- GFP_KERNEL);
- if (!mhi_dev_ctxt->counters.ev_counter) {
- r = -ENOMEM;
- goto free_local_ec_list;
- }
mhi_dev_ctxt->counters.msi_counter = kzalloc(sizeof(u32) *
mhi_dev_ctxt->mmio_info.nr_event_rings,
GFP_KERNEL);
if (!mhi_dev_ctxt->counters.msi_counter) {
r = -ENOMEM;
- goto free_ev_counter;
+ goto free_local_ec_list;
}
+
+ for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++) {
+ struct mhi_ring *mhi_ring = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[i];
+
+ spin_lock_init(&mhi_ring->ring_lock);
+ }
+
return r;
-free_ev_counter:
- kfree(mhi_dev_ctxt->counters.ev_counter);
free_local_ec_list:
kfree(mhi_dev_ctxt->mhi_local_event_ctxt);
return r;
@@ -123,19 +122,27 @@ void ring_ev_db(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index)
{
struct mhi_ring *event_ctxt = NULL;
u64 db_value = 0;
+ unsigned long flags;
event_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index];
+ spin_lock_irqsave(&event_ctxt->ring_lock, flags);
db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING,
event_ring_index,
(uintptr_t) event_ctxt->wp);
- mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.event_db_addr,
- event_ring_index, db_value);
+ event_ctxt->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.event_db_addr,
+ event_ring_index,
+ db_value);
+ spin_unlock_irqrestore(&event_ctxt->ring_lock, flags);
}
static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
- struct mhi_ring *ring, u32 el_per_ring,
- u32 intmodt_val, u32 msi_vec)
+ struct mhi_ring *ring,
+ u32 el_per_ring,
+ u32 intmodt_val,
+ u32 msi_vec,
+ enum MHI_BRSTMODE brstmode)
{
ev_list->mhi_event_er_type = MHI_EVENT_RING_TYPE_VALID;
ev_list->mhi_msi_vector = msi_vec;
@@ -144,6 +151,20 @@ static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
ring->len = ((size_t)(el_per_ring)*sizeof(union mhi_event_pkt));
ring->el_size = sizeof(union mhi_event_pkt);
ring->overwrite_en = 0;
+
+ ring->db_mode.db_mode = 1;
+ ring->db_mode.brstmode = brstmode;
+ switch (ring->db_mode.brstmode) {
+ case MHI_BRSTMODE_ENABLE:
+ ring->db_mode.process_db = mhi_process_db_brstmode;
+ break;
+ case MHI_BRSTMODE_DISABLE:
+ ring->db_mode.process_db = mhi_process_db_brstmode_disable;
+ break;
+ default:
+ ring->db_mode.process_db = mhi_process_db;
+ }
+
/* Flush writes to MMIO */
wmb();
return 0;
@@ -159,9 +180,12 @@ void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt)
event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
mhi_local_event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
mhi_event_ring_init(event_ctxt, mhi_local_event_ctxt,
- mhi_dev_ctxt->ev_ring_props[i].nr_desc,
- mhi_dev_ctxt->ev_ring_props[i].intmod,
- mhi_dev_ctxt->ev_ring_props[i].msi_vec);
+ mhi_dev_ctxt->ev_ring_props[i].nr_desc,
+ mhi_dev_ctxt->ev_ring_props[i].intmod,
+ mhi_dev_ctxt->ev_ring_props[i].msi_vec,
+ GET_EV_PROPS(EV_BRSTMODE,
+ mhi_dev_ctxt->
+ ev_ring_props[i].flags));
}
}
@@ -219,10 +243,9 @@ int mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 i = 0;
unsigned long flags = 0;
int ret_val = 0;
- spinlock_t *lock =
- &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
struct mhi_ring *event_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[ring_index];
+ spinlock_t *lock = &event_ctxt->ring_lock;
if (NULL == mhi_dev_ctxt || 0 == nr_ev_el) {
mhi_log(MHI_MSG_ERROR, "Bad Input data, quitting\n");
diff --git a/drivers/platform/msm/mhi/mhi_hwio.h b/drivers/platform/msm/mhi/mhi_hwio.h
index 370ee0ebab7e..88a6a74566e6 100644
--- a/drivers/platform/msm/mhi/mhi_hwio.h
+++ b/drivers/platform/msm/mhi/mhi_hwio.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,14 +23,14 @@
#define MHICFG (0x10)
-#define MHICFG_RESERVED_BITS31_24_MASK 0xff000000
-#define MHICFG_RESERVED_BITS31_24_SHIFT 0x18
-#define MHICFG_NER_MASK 0xff0000
-#define MHICFG_NER_SHIFT 0x10
-#define MHICFG_RESERVED_BITS15_8_MASK 0xff00
-#define MHICFG_RESERVED_BITS15_8_SHIFT 0x8
-#define MHICFG_NCH_MASK 0xff
-#define MHICFG_NCH_SHIFT 0x0
+#define MHICFG_NHWER_MASK (0xff000000)
+#define MHICFG_NHWER_SHIFT (24)
+#define MHICFG_NER_MASK (0xff0000)
+#define MHICFG_NER_SHIFT (16)
+#define MHICFG_NHWCH_MASK (0xff00)
+#define MHICFG_NHWCH_SHIFT (8)
+#define MHICFG_NCH_MASK (0xff)
+#define MHICFG_NCH_SHIFT (0)
#define CHDBOFF (0x18)
diff --git a/drivers/platform/msm/mhi/mhi_iface.c b/drivers/platform/msm/mhi/mhi_iface.c
index b3fff19d8fa4..395e19c91f35 100644
--- a/drivers/platform/msm/mhi/mhi_iface.c
+++ b/drivers/platform/msm/mhi/mhi_iface.c
@@ -96,22 +96,6 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
"Failed to register with esoc ret %d.\n",
ret_val);
}
- mhi_pcie_dev->mhi_ctxt.bus_scale_table =
- msm_bus_cl_get_pdata(mhi_pcie_dev->plat_dev);
- mhi_pcie_dev->mhi_ctxt.bus_client =
- msm_bus_scale_register_client(
- mhi_pcie_dev->mhi_ctxt.bus_scale_table);
- if (!mhi_pcie_dev->mhi_ctxt.bus_client) {
- mhi_log(MHI_MSG_CRITICAL,
- "Could not register for bus control ret: %d.\n",
- mhi_pcie_dev->mhi_ctxt.bus_client);
- } else {
- ret_val = mhi_set_bus_request(&mhi_pcie_dev->mhi_ctxt, 1);
- if (ret_val)
- mhi_log(MHI_MSG_CRITICAL,
- "Could not set bus frequency ret: %d\n",
- ret_val);
- }
device_disable_async_suspend(&pcie_device->dev);
ret_val = pci_enable_msi_range(pcie_device, 1, requested_msi_number);
@@ -188,9 +172,7 @@ mhi_state_transition_error:
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
- kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
- kfree(mhi_dev_ctxt->mhi_chan_mutex);
- kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
+
kfree(mhi_dev_ctxt->ev_ring_props);
mhi_rem_pm_sysfs(&pcie_device->dev);
sysfs_config_err:
@@ -203,7 +185,9 @@ msi_config_err:
}
static const struct dev_pm_ops pm_ops = {
- SET_RUNTIME_PM_OPS(mhi_runtime_suspend, mhi_runtime_resume, NULL)
+ SET_RUNTIME_PM_OPS(mhi_runtime_suspend,
+ mhi_runtime_resume,
+ mhi_runtime_idle)
SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
};
@@ -217,14 +201,15 @@ static struct pci_driver mhi_pcie_driver = {
};
static int mhi_pci_probe(struct pci_dev *pcie_device,
- const struct pci_device_id *mhi_device_id)
+ const struct pci_device_id *mhi_device_id)
{
int ret_val = 0;
struct mhi_pcie_dev_info *mhi_pcie_dev = NULL;
struct platform_device *plat_dev;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
u32 nr_dev = mhi_devices.nr_of_devices;
- mhi_log(MHI_MSG_INFO, "Entering.\n");
+ mhi_log(MHI_MSG_INFO, "Entering\n");
mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices];
if (mhi_devices.nr_of_devices + 1 > MHI_MAX_SUPPORTED_DEVICES) {
mhi_log(MHI_MSG_ERROR, "Error: Too many devices\n");
@@ -234,29 +219,120 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
mhi_devices.nr_of_devices++;
plat_dev = mhi_devices.device_list[nr_dev].plat_dev;
pcie_device->dev.of_node = plat_dev->dev.of_node;
- pm_runtime_put_noidle(&pcie_device->dev);
+ mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
+ INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
+ mutex_init(&mhi_dev_ctxt->pm_lock);
+ rwlock_init(&mhi_dev_ctxt->pm_xfer_lock);
+ spin_lock_init(&mhi_dev_ctxt->dev_wake_lock);
+ tasklet_init(&mhi_dev_ctxt->ev_task,
+ mhi_ctrl_ev_task,
+ (unsigned long)mhi_dev_ctxt);
+
+ mhi_dev_ctxt->flags.link_up = 1;
+ ret_val = mhi_set_bus_request(mhi_dev_ctxt, 1);
mhi_pcie_dev->pcie_device = pcie_device;
mhi_pcie_dev->mhi_pcie_driver = &mhi_pcie_driver;
mhi_pcie_dev->mhi_pci_link_event.events =
- (MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_LINKUP |
- MSM_PCIE_EVENT_WAKEUP);
+ (MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_WAKEUP);
mhi_pcie_dev->mhi_pci_link_event.user = pcie_device;
mhi_pcie_dev->mhi_pci_link_event.callback = mhi_link_state_cb;
mhi_pcie_dev->mhi_pci_link_event.notify.data = mhi_pcie_dev;
ret_val = msm_pcie_register_event(&mhi_pcie_dev->mhi_pci_link_event);
- if (ret_val)
+ if (ret_val) {
mhi_log(MHI_MSG_ERROR,
"Failed to register for link notifications %d.\n",
ret_val);
+ return ret_val;
+ }
+
+ /* Initialize MHI CNTXT */
+ ret_val = mhi_ctxt_init(mhi_pcie_dev);
+ if (ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "MHI Initialization failed, ret %d\n",
+ ret_val);
+ goto deregister_pcie;
+ }
+ pci_set_master(mhi_pcie_dev->pcie_device);
+
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_POR;
+ ret_val = set_mhi_base_state(mhi_pcie_dev);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ if (ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error Setting MHI Base State %d\n", ret_val);
+ goto unlock_pm_lock;
+ }
+
+ if (mhi_dev_ctxt->base_state == STATE_TRANSITION_BHI) {
+ ret_val = bhi_probe(mhi_pcie_dev);
+ if (ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error with bhi_probe ret:%d", ret_val);
+ goto unlock_pm_lock;
+ }
+ }
+
+ init_mhi_base_state(mhi_dev_ctxt);
+
+ pm_runtime_set_autosuspend_delay(&pcie_device->dev,
+ MHI_RPM_AUTOSUSPEND_TMR_VAL_MS);
+ pm_runtime_use_autosuspend(&pcie_device->dev);
+ pm_suspend_ignore_children(&pcie_device->dev, true);
+
+ /*
+ * pci framework will increment usage count (twice) before
+ * calling local device driver probe function.
+ * 1st pci.c pci_pm_init() calls pm_runtime_forbid
+ * 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync
+ * Framework expect pci device driver to call pm_runtime_put_noidle
+ * to decrement usage count after successful probe and
+ * and call pm_runtime_allow to enable runtime suspend.
+ * MHI will allow runtime after entering AMSS state.
+ */
+ pm_runtime_mark_last_busy(&pcie_device->dev);
+ pm_runtime_put_noidle(&pcie_device->dev);
+
+ /*
+ * Keep the MHI state in Active (M0) state until AMSS because EP
+ * would error fatal if we try to enter M1 before entering
+ * AMSS state.
+ */
+ read_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+
+ return 0;
+
+unlock_pm_lock:
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+deregister_pcie:
+ msm_pcie_deregister_event(&mhi_pcie_dev->mhi_pci_link_event);
return ret_val;
}
static int mhi_plat_probe(struct platform_device *pdev)
{
u32 nr_dev = mhi_devices.nr_of_devices;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
int r = 0;
mhi_log(MHI_MSG_INFO, "Entered\n");
+ mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;
+
+ mhi_dev_ctxt->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (!mhi_dev_ctxt->bus_scale_table)
+ return -ENODATA;
+ mhi_dev_ctxt->bus_client = msm_bus_scale_register_client
+ (mhi_dev_ctxt->bus_scale_table);
+ if (!mhi_dev_ctxt->bus_client)
+ return -EINVAL;
+
mhi_devices.device_list[nr_dev].plat_dev = pdev;
r = dma_set_mask(&pdev->dev, MHI_DMA_MASK);
if (r)
diff --git a/drivers/platform/msm/mhi/mhi_init.c b/drivers/platform/msm/mhi/mhi_init.c
index 93c3de35473b..a496c81239bf 100644
--- a/drivers/platform/msm/mhi/mhi_init.c
+++ b/drivers/platform/msm/mhi/mhi_init.c
@@ -27,46 +27,21 @@ static int mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int i;
- mhi_dev_ctxt->mhi_ev_spinlock_list = kmalloc(sizeof(spinlock_t) *
- mhi_dev_ctxt->mmio_info.nr_event_rings,
- GFP_KERNEL);
- if (NULL == mhi_dev_ctxt->mhi_ev_spinlock_list)
- goto ev_mutex_free;
- mhi_dev_ctxt->mhi_chan_mutex = kmalloc(sizeof(struct mutex) *
- MHI_MAX_CHANNELS, GFP_KERNEL);
- if (NULL == mhi_dev_ctxt->mhi_chan_mutex)
- goto chan_mutex_free;
- mhi_dev_ctxt->mhi_cmd_mutex_list = kmalloc(sizeof(struct mutex) *
- NR_OF_CMD_RINGS, GFP_KERNEL);
- if (NULL == mhi_dev_ctxt->mhi_cmd_mutex_list)
- goto cmd_mutex_free;
-
- mhi_dev_ctxt->db_write_lock = kmalloc(sizeof(spinlock_t) *
- MHI_MAX_CHANNELS, GFP_KERNEL);
- if (NULL == mhi_dev_ctxt->db_write_lock)
- goto db_write_lock_free;
- for (i = 0; i < MHI_MAX_CHANNELS; ++i)
- mutex_init(&mhi_dev_ctxt->mhi_chan_mutex[i]);
- for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i)
- spin_lock_init(&mhi_dev_ctxt->mhi_ev_spinlock_list[i]);
- for (i = 0; i < NR_OF_CMD_RINGS; ++i)
- mutex_init(&mhi_dev_ctxt->mhi_cmd_mutex_list[i]);
- for (i = 0; i < MHI_MAX_CHANNELS; ++i)
- spin_lock_init(&mhi_dev_ctxt->db_write_lock[i]);
- rwlock_init(&mhi_dev_ctxt->xfer_lock);
- mutex_init(&mhi_dev_ctxt->mhi_link_state);
- mutex_init(&mhi_dev_ctxt->pm_lock);
- atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0);
- return 0;
+ for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
+ struct mhi_ring *ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
-db_write_lock_free:
- kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
-cmd_mutex_free:
- kfree(mhi_dev_ctxt->mhi_chan_mutex);
-chan_mutex_free:
- kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
-ev_mutex_free:
- return -ENOMEM;
+ mutex_init(&mhi_dev_ctxt->mhi_chan_cfg[i].chan_lock);
+ spin_lock_init(&mhi_dev_ctxt->mhi_chan_cfg[i].event_lock);
+ spin_lock_init(&ring->ring_lock);
+ }
+
+ for (i = 0; i < NR_OF_CMD_RINGS; i++) {
+ struct mhi_ring *ring = &mhi_dev_ctxt->mhi_local_cmd_ctxt[i];
+
+ spin_lock_init(&ring->ring_lock);
+ }
+
+ return 0;
}
size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt)
@@ -115,7 +90,7 @@ void init_dev_chan_ctxt(struct mhi_chan_ctxt *chan_ctxt,
chan_ctxt->mhi_trb_write_ptr = p_base_addr;
chan_ctxt->mhi_trb_ring_len = len;
/* Prepulate the channel ctxt */
- chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED;
+ chan_ctxt->chstate = MHI_CHAN_STATE_ENABLED;
chan_ctxt->mhi_event_ring_index = ev_index;
}
@@ -173,6 +148,8 @@ static int mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt,
ring[PRIMARY_CMD_RING].len = ring_size;
ring[PRIMARY_CMD_RING].el_size = sizeof(union mhi_cmd_pkt);
ring[PRIMARY_CMD_RING].overwrite_en = 0;
+ ring[PRIMARY_CMD_RING].db_mode.process_db =
+ mhi_process_db_brstmode_disable;
return 0;
}
@@ -547,7 +524,6 @@ int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
}
init_event_ctxt_array(mhi_dev_ctxt);
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
- mhi_dev_ctxt->enable_lpm = 1;
r = mhi_spawn_threads(mhi_dev_ctxt);
if (r) {
@@ -573,9 +549,6 @@ error_wq_init:
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
error_during_dev_mem_init:
error_during_local_ev_ctxt:
- kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
- kfree(mhi_dev_ctxt->mhi_chan_mutex);
- kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
error_during_sync:
kfree(mhi_dev_ctxt->ev_ring_props);
error_during_props:
@@ -585,24 +558,28 @@ error_during_props:
/**
* @brief Initialize the channel context and shadow context
*
- * @cc_list: Context to initialize
- * @trb_list_phy: Physical base address for the TRE ring
- * @trb_list_virt: Virtual base address for the TRE ring
- * @el_per_ring: Number of TREs this ring will contain
- * @chan_type: Type of channel IN/OUT
- * @event_ring: Event ring to be mapped to this channel context
- * @ring: Shadow context to be initialized alongside
- *
+ * @cc_list: Context to initialize
+ * @trb_list_phy: Physical base address for the TRE ring
+ * @trb_list_virt: Virtual base address for the TRE ring
+ * @el_per_ring: Number of TREs this ring will contain
+ * @chan_type: Type of channel IN/OUT
+ * @event_ring: Event ring to be mapped to this channel context
+ * @ring: Shadow context to be initialized alongside
+ * @chan_state: Channel state
+ * @preserve_db_state: Do not reset DB state during resume
* @Return errno
*/
int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
- uintptr_t trb_list_phy, uintptr_t trb_list_virt,
- u64 el_per_ring, enum MHI_CHAN_DIR chan_type,
- u32 event_ring, struct mhi_ring *ring,
- enum MHI_CHAN_STATE chan_state)
+ uintptr_t trb_list_phy, uintptr_t trb_list_virt,
+ u64 el_per_ring, enum MHI_CHAN_DIR chan_type,
+ u32 event_ring, struct mhi_ring *ring,
+ enum MHI_CHAN_STATE chan_state,
+ bool preserve_db_state,
+ enum MHI_BRSTMODE brstmode)
{
- cc_list->mhi_chan_state = chan_state;
- cc_list->mhi_chan_type = chan_type;
+ cc_list->brstmode = brstmode;
+ cc_list->chstate = chan_state;
+ cc_list->chtype = chan_type;
cc_list->mhi_event_ring_index = event_ring;
cc_list->mhi_trb_ring_base_addr = trb_list_phy;
cc_list->mhi_trb_ring_len =
@@ -617,6 +594,22 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
ring->el_size = sizeof(struct mhi_tx_pkt);
ring->overwrite_en = 0;
ring->dir = chan_type;
+ ring->ch_state = MHI_CHAN_STATE_DISABLED;
+ ring->db_mode.db_mode = 1;
+ ring->db_mode.preserve_db_state = (preserve_db_state) ? 1 : 0;
+ ring->db_mode.brstmode = brstmode;
+
+ switch (ring->db_mode.brstmode) {
+ case MHI_BRSTMODE_ENABLE:
+ ring->db_mode.process_db = mhi_process_db_brstmode;
+ break;
+ case MHI_BRSTMODE_DISABLE:
+ ring->db_mode.process_db = mhi_process_db_brstmode_disable;
+ break;
+ default:
+ ring->db_mode.process_db = mhi_process_db;
+ }
+
/* Flush writes to MMIO */
wmb();
return 0;
diff --git a/drivers/platform/msm/mhi/mhi_isr.c b/drivers/platform/msm/mhi/mhi_isr.c
index 5336b9ff0c11..d7c604419593 100644
--- a/drivers/platform/msm/mhi/mhi_isr.c
+++ b/drivers/platform/msm/mhi/mhi_isr.c
@@ -15,57 +15,6 @@
#include "mhi_sys.h"
#include "mhi_trace.h"
-irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
-{
- struct device *mhi_device = dev_id;
- struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
-
- if (!mhi_dev_ctxt) {
- mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n");
- return IRQ_HANDLED;
- }
- mhi_dev_ctxt->counters.msi_counter[
- IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
- mhi_log(MHI_MSG_VERBOSE,
- "Got MSI 0x%x\n", IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
- trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
- atomic_inc(&mhi_dev_ctxt->flags.events_pending);
- wake_up_interruptible(
- mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
- return IRQ_HANDLED;
-}
-
-irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id)
-{
- struct device *mhi_device = dev_id;
- u32 client_index;
- struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
- struct mhi_client_handle *client_handle;
- struct mhi_client_info_t *client_info;
- struct mhi_cb_info cb_info;
- int msi_num = (IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
-
- mhi_dev_ctxt->counters.msi_counter[msi_num]++;
- mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num);
- trace_mhi_msi(msi_num);
- client_index = MHI_MAX_CHANNELS -
- (mhi_dev_ctxt->mmio_info.nr_event_rings - msi_num);
- client_handle = mhi_dev_ctxt->client_handle_list[client_index];
- client_info = &client_handle->client_info;
- if (likely(NULL != client_handle)) {
- client_handle->result.user_data =
- client_handle->user_data;
- if (likely(NULL != &client_info->mhi_client_cb)) {
- cb_info.result = &client_handle->result;
- cb_info.cb_reason = MHI_CB_XFER;
- cb_info.chan = client_handle->chan_info.chan_nr;
- cb_info.result->transaction_status = 0;
- client_info->mhi_client_cb(&cb_info);
- }
- }
- return IRQ_HANDLED;
-}
-
static int mhi_process_event_ring(
struct mhi_device_ctxt *mhi_dev_ctxt,
u32 ev_index,
@@ -76,14 +25,21 @@ static int mhi_process_event_ring(
union mhi_event_pkt event_to_process;
int ret_val = 0;
struct mhi_event_ctxt *ev_ctxt = NULL;
- union mhi_cmd_pkt *cmd_pkt = NULL;
- union mhi_event_pkt *ev_ptr = NULL;
+ unsigned long flags;
struct mhi_ring *local_ev_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[ev_index];
- u32 event_code;
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) {
+ mhi_log(MHI_MSG_ERROR, "Invalid MHI PM State\n");
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ return -EIO;
+ }
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[ev_index];
+ spin_lock_irqsave(&local_ev_ctxt->ring_lock, flags);
device_rp = (union mhi_event_pkt *)mhi_p2v_addr(
mhi_dev_ctxt,
MHI_RING_TYPE_EVENT_RING,
@@ -91,64 +47,105 @@ static int mhi_process_event_ring(
ev_ctxt->mhi_event_read_ptr);
local_rp = (union mhi_event_pkt *)local_ev_ctxt->rp;
-
+ spin_unlock_irqrestore(&local_ev_ctxt->ring_lock, flags);
BUG_ON(validate_ev_el_addr(local_ev_ctxt, (uintptr_t)device_rp));
while ((local_rp != device_rp) && (event_quota > 0) &&
(device_rp != NULL) && (local_rp != NULL)) {
+
+ spin_lock_irqsave(&local_ev_ctxt->ring_lock, flags);
event_to_process = *local_rp;
- ev_ptr = &event_to_process;
- event_code = get_cmd_pkt(mhi_dev_ctxt,
- ev_ptr, &cmd_pkt, ev_index);
- if (((MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process)) ==
- MHI_PKT_TYPE_CMD_COMPLETION_EVENT)) &&
- (event_code == MHI_EVENT_CC_SUCCESS)) {
- mhi_log(MHI_MSG_INFO, "Command Completion event\n");
- if ((MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt) ==
- MHI_PKT_TYPE_RESET_CHAN_CMD)) {
- mhi_log(MHI_MSG_INFO, "First Reset CC event\n");
- MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt,
- MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD);
- ret_val = -EINPROGRESS;
- break;
- } else if ((MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)
- == MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD)) {
- MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt,
- MHI_PKT_TYPE_RESET_CHAN_CMD);
- mhi_log(MHI_MSG_INFO,
- "Processing Reset CC event\n");
- }
- }
- if (unlikely(0 != recycle_trb_and_ring(mhi_dev_ctxt,
- local_ev_ctxt,
- MHI_RING_TYPE_EVENT_RING,
- ev_index)))
- mhi_log(MHI_MSG_ERROR, "Failed to recycle ev pkt\n");
- switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process))) {
+ recycle_trb_and_ring(mhi_dev_ctxt,
+ local_ev_ctxt,
+ MHI_RING_TYPE_EVENT_RING,
+ ev_index);
+ spin_unlock_irqrestore(&local_ev_ctxt->ring_lock, flags);
+
+ switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, &event_to_process)) {
case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
- mhi_log(MHI_MSG_INFO,
- "MHI CCE received ring 0x%x\n",
- ev_index);
+ {
+ union mhi_cmd_pkt *cmd_pkt;
+ u32 chan;
+ struct mhi_chan_cfg *cfg;
+ unsigned long flags;
+ struct mhi_ring *cmd_ring = &mhi_dev_ctxt->
+ mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
__pm_relax(&mhi_dev_ctxt->w_lock);
- ret_val = parse_cmd_event(mhi_dev_ctxt,
- &event_to_process, ev_index);
+ get_cmd_pkt(mhi_dev_ctxt,
+ &event_to_process,
+ &cmd_pkt, ev_index);
+ MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
+ cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
+ mhi_log(MHI_MSG_INFO,
+ "MHI CCE received ring 0x%x chan:%u\n",
+ ev_index,
+ chan);
+ spin_lock_irqsave(&cfg->event_lock, flags);
+ cfg->cmd_pkt = *cmd_pkt;
+ cfg->cmd_event_pkt =
+ event_to_process.cmd_complete_event_pkt;
+ complete(&cfg->cmd_complete);
+ spin_unlock_irqrestore(&cfg->event_lock, flags);
+ spin_lock_irqsave(&cmd_ring->ring_lock,
+ flags);
+ ctxt_del_element(cmd_ring, NULL);
+ spin_unlock_irqrestore(&cmd_ring->ring_lock,
+ flags);
break;
+ }
case MHI_PKT_TYPE_TX_EVENT:
+ {
+ u32 chan;
+ struct mhi_ring *ring;
+
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
- parse_xfer_event(mhi_dev_ctxt,
- &event_to_process, ev_index);
+ chan = MHI_EV_READ_CHID(EV_CHID, &event_to_process);
+ if (unlikely(!VALID_CHAN_NR(chan))) {
+ mhi_log(MHI_MSG_ERROR,
+ "Invalid chan:%d\n",
+ chan);
+ break;
+ }
+ ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+ spin_lock_bh(&ring->ring_lock);
+ if (ring->ch_state == MHI_CHAN_STATE_ENABLED)
+ parse_xfer_event(mhi_dev_ctxt,
+ &event_to_process,
+ ev_index);
+ spin_unlock_bh(&ring->ring_lock);
__pm_relax(&mhi_dev_ctxt->w_lock);
break;
+ }
case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
{
enum STATE_TRANSITION new_state;
-
+ unsigned long flags;
new_state = MHI_READ_STATE(&event_to_process);
mhi_log(MHI_MSG_INFO,
- "MHI STE received ring 0x%x\n",
- ev_index);
- mhi_init_state_transition(mhi_dev_ctxt, new_state);
+ "MHI STE received ring 0x%x State:%s\n",
+ ev_index,
+ state_transition_str(new_state));
+
+ /* If transitioning to M1 schedule worker thread */
+ if (new_state == STATE_TRANSITION_M1) {
+ write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock,
+ flags);
+ mhi_dev_ctxt->mhi_state =
+ mhi_get_m_state(mhi_dev_ctxt);
+ if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1;
+ mhi_dev_ctxt->counters.m0_m1++;
+ schedule_work(&mhi_dev_ctxt->
+ process_m1_worker);
+ }
+ write_unlock_irqrestore(&mhi_dev_ctxt->
+ pm_xfer_lock,
+ flags);
+ } else {
+ mhi_init_state_transition(mhi_dev_ctxt,
+ new_state);
+ }
break;
}
case MHI_PKT_TYPE_EE_EVENT:
@@ -174,14 +171,16 @@ static int mhi_process_event_ring(
}
break;
}
+ case MHI_PKT_TYPE_STALE_EVENT:
+ mhi_log(MHI_MSG_INFO,
+ "Stale Event received for chan:%u\n",
+ MHI_EV_READ_CHID(EV_CHID, local_rp));
+ break;
case MHI_PKT_TYPE_SYS_ERR_EVENT:
mhi_log(MHI_MSG_INFO,
"MHI System Error Detected. Triggering Reset\n");
BUG();
- if (!mhi_trigger_reset(mhi_dev_ctxt))
- mhi_log(MHI_MSG_ERROR,
- "Failed to reset for SYSERR recovery\n");
- break;
+ break;
default:
mhi_log(MHI_MSG_ERROR,
"Unsupported packet type code 0x%x\n",
@@ -189,15 +188,20 @@ static int mhi_process_event_ring(
&event_to_process));
break;
}
+ spin_lock_irqsave(&local_ev_ctxt->ring_lock, flags);
local_rp = (union mhi_event_pkt *)local_ev_ctxt->rp;
device_rp = (union mhi_event_pkt *)mhi_p2v_addr(
mhi_dev_ctxt,
MHI_RING_TYPE_EVENT_RING,
ev_index,
ev_ctxt->mhi_event_read_ptr);
+ spin_unlock_irqrestore(&local_ev_ctxt->ring_lock, flags);
ret_val = 0;
--event_quota;
}
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
return ret_val;
}
@@ -207,7 +211,7 @@ int parse_event_thread(void *ctxt)
u32 i = 0;
int ret_val = 0;
int ret_val_process_event = 0;
- atomic_t *ev_pen_ptr = &mhi_dev_ctxt->flags.events_pending;
+ atomic_t *ev_pen_ptr = &mhi_dev_ctxt->counters.events_pending;
/* Go through all event rings */
for (;;) {
@@ -215,7 +219,7 @@ int parse_event_thread(void *ctxt)
wait_event_interruptible(
*mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq,
((atomic_read(
- &mhi_dev_ctxt->flags.events_pending) > 0) &&
+ &mhi_dev_ctxt->counters.events_pending) > 0) &&
!mhi_dev_ctxt->flags.stop_threads) ||
mhi_dev_ctxt->flags.kill_threads ||
(mhi_dev_ctxt->flags.stop_threads &&
@@ -237,27 +241,45 @@ int parse_event_thread(void *ctxt)
break;
}
mhi_dev_ctxt->flags.ev_thread_stopped = 0;
- atomic_dec(&mhi_dev_ctxt->flags.events_pending);
- for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
+ atomic_dec(&mhi_dev_ctxt->counters.events_pending);
+ for (i = 1; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
if (mhi_dev_ctxt->mhi_state == MHI_STATE_SYS_ERR) {
mhi_log(MHI_MSG_INFO,
- "SYS_ERR detected, not processing events\n");
- atomic_set(&mhi_dev_ctxt->flags.events_pending,
+ "SYS_ERR detected, not processing events\n");
+ atomic_set(&mhi_dev_ctxt->
+ counters.events_pending,
0);
break;
}
if (GET_EV_PROPS(EV_MANAGED,
- mhi_dev_ctxt->ev_ring_props[i].flags)){
+ mhi_dev_ctxt->ev_ring_props[i].flags)) {
ret_val_process_event =
- mhi_process_event_ring(mhi_dev_ctxt, i,
- mhi_dev_ctxt->ev_ring_props[i].nr_desc);
- if (ret_val_process_event ==
- -EINPROGRESS)
+ mhi_process_event_ring(mhi_dev_ctxt,
+ i,
+ mhi_dev_ctxt->
+ ev_ring_props[i].nr_desc);
+ if (ret_val_process_event == -EINPROGRESS)
atomic_inc(ev_pen_ptr);
}
}
}
- return ret_val;
+}
+
+void mhi_ctrl_ev_task(unsigned long data)
+{
+ struct mhi_device_ctxt *mhi_dev_ctxt =
+ (struct mhi_device_ctxt *)data;
+ const unsigned CTRL_EV_RING = 0;
+ struct mhi_event_ring_cfg *ring_props =
+ &mhi_dev_ctxt->ev_ring_props[CTRL_EV_RING];
+
+ mhi_log(MHI_MSG_VERBOSE, "Enter\n");
+ /* Process control event ring */
+ mhi_process_event_ring(mhi_dev_ctxt,
+ CTRL_EV_RING,
+ ring_props->nr_desc);
+ enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, CTRL_EV_RING));
+ mhi_log(MHI_MSG_VERBOSE, "Exit\n");
}
struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
@@ -268,8 +290,8 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
client_handle->result.bytes_xferd = 0;
client_handle->result.transaction_status = 0;
ret_val = mhi_process_event_ring(client_handle->mhi_dev_ctxt,
- client_handle->event_ring_index,
- 1);
+ client_handle->event_ring_index,
+ 1);
if (ret_val)
mhi_log(MHI_MSG_INFO, "NAPI failed to process event ring\n");
return &(client_handle->result);
@@ -277,20 +299,79 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
void mhi_mask_irq(struct mhi_client_handle *client_handle)
{
- disable_irq_nosync(MSI_TO_IRQ(client_handle->mhi_dev_ctxt,
- client_handle->msi_vec));
- client_handle->mhi_dev_ctxt->counters.msi_disable_cntr++;
- if (client_handle->mhi_dev_ctxt->counters.msi_disable_cntr >
- (client_handle->mhi_dev_ctxt->counters.msi_enable_cntr + 1))
- mhi_log(MHI_MSG_INFO, "No nested IRQ disable Allowed\n");
+ struct mhi_device_ctxt *mhi_dev_ctxt =
+ client_handle->mhi_dev_ctxt;
+ struct mhi_ring *ev_ring = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[client_handle->event_ring_index];
+
+ disable_irq_nosync(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
+ ev_ring->msi_disable_cntr++;
}
void mhi_unmask_irq(struct mhi_client_handle *client_handle)
{
- client_handle->mhi_dev_ctxt->counters.msi_enable_cntr++;
- enable_irq(MSI_TO_IRQ(client_handle->mhi_dev_ctxt,
- client_handle->msi_vec));
- if (client_handle->mhi_dev_ctxt->counters.msi_enable_cntr >
- client_handle->mhi_dev_ctxt->counters.msi_disable_cntr)
- mhi_log(MHI_MSG_INFO, "No nested IRQ enable Allowed\n");
+ struct mhi_device_ctxt *mhi_dev_ctxt =
+ client_handle->mhi_dev_ctxt;
+ struct mhi_ring *ev_ring = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[client_handle->event_ring_index];
+
+ ev_ring->msi_enable_cntr++;
+ enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
+}
+
+irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
+{
+ struct device *mhi_device = dev_id;
+ struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
+ int msi = IRQ_TO_MSI(mhi_dev_ctxt, irq_number);
+
+ if (!mhi_dev_ctxt) {
+ mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n");
+ return IRQ_HANDLED;
+ }
+ mhi_dev_ctxt->counters.msi_counter[
+ IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
+ mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi);
+ trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
+
+ if (msi) {
+ atomic_inc(&mhi_dev_ctxt->counters.events_pending);
+ wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
+ } else {
+ disable_irq_nosync(irq_number);
+ tasklet_schedule(&mhi_dev_ctxt->ev_task);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id)
+{
+ struct device *mhi_device = dev_id;
+ u32 client_index;
+ struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
+ struct mhi_client_handle *client_handle;
+ struct mhi_client_info_t *client_info;
+ struct mhi_cb_info cb_info;
+ int msi_num = (IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
+
+ mhi_dev_ctxt->counters.msi_counter[msi_num]++;
+ mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num);
+ trace_mhi_msi(msi_num);
+ client_index = MHI_MAX_CHANNELS -
+ (mhi_dev_ctxt->mmio_info.nr_event_rings - msi_num);
+ client_handle = mhi_dev_ctxt->client_handle_list[client_index];
+ client_info = &client_handle->client_info;
+ if (likely(client_handle)) {
+ client_handle->result.user_data =
+ client_handle->user_data;
+ if (likely(client_info->mhi_client_cb)) {
+ cb_info.result = &client_handle->result;
+ cb_info.cb_reason = MHI_CB_XFER;
+ cb_info.chan = client_handle->chan_info.chan_nr;
+ cb_info.result->transaction_status = 0;
+ client_info->mhi_client_cb(&cb_info);
+ }
+ }
+ return IRQ_HANDLED;
}
diff --git a/drivers/platform/msm/mhi/mhi_macros.h b/drivers/platform/msm/mhi/mhi_macros.h
index bb47f53e244d..133c0eeb034e 100644
--- a/drivers/platform/msm/mhi/mhi_macros.h
+++ b/drivers/platform/msm/mhi/mhi_macros.h
@@ -96,7 +96,6 @@
((_mhi_dev_ctxt)->mmio_info.nr_event_rings - \
((_mhi_dev_ctxt)->mmio_info.nr_hw_event_rings)))
-
/* MHI Transfer Ring Elements 7.4.1*/
#define TX_TRB_LEN
#define MHI_TX_TRB_LEN__SHIFT (0)
@@ -244,9 +243,21 @@
#define MHI_CHAN_TYPE__MASK (3)
#define MHI_CHAN_TYPE__SHIFT (6)
+#define PRESERVE_DB_STATE
+#define MHI_PRESERVE_DB_STATE__MASK (1)
+#define MHI_PRESERVE_DB_STATE__SHIFT (8)
+
+#define BRSTMODE
+#define MHI_BRSTMODE__MASK (3)
+#define MHI_BRSTMODE__SHIFT (9)
+
#define GET_CHAN_PROPS(_FIELD, _VAL) \
(((_VAL) >> MHI_##_FIELD ## __SHIFT) & MHI_##_FIELD ## __MASK)
+#define EV_BRSTMODE
+#define MHI_EV_BRSTMODE__MASK (3)
+#define MHI_EV_BRSTMODE__SHIFT (5)
+
#define EV_TYPE
#define MHI_EV_TYPE__MASK (3)
#define MHI_EV_TYPE__SHIFT (3)
diff --git a/drivers/platform/msm/mhi/mhi_main.c b/drivers/platform/msm/mhi/mhi_main.c
index c949745f5af7..430dc918af7e 100644
--- a/drivers/platform/msm/mhi/mhi_main.c
+++ b/drivers/platform/msm/mhi/mhi_main.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,9 @@
#include "mhi_macros.h"
#include "mhi_trace.h"
+static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
+ union mhi_cmd_pkt *cmd_pkt);
+
static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el)
{
bb_ctxt->el_size = sizeof(struct mhi_buf_info);
@@ -212,7 +215,9 @@ int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
ring->len, ring->base,
cc_list->mhi_trb_ring_base_addr);
mhi_init_chan_ctxt(cc_list, 0, 0, 0, 0, 0, ring,
- MHI_CHAN_STATE_DISABLED);
+ MHI_CHAN_STATE_DISABLED,
+ false,
+ MHI_BRSTMODE_DEFAULT);
return 0;
}
@@ -259,7 +264,11 @@ static int populate_tre_ring(struct mhi_client_handle *client_handle)
client_handle->chan_info.flags),
client_handle->chan_info.ev_ring,
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
- MHI_CHAN_STATE_ENABLED);
+ MHI_CHAN_STATE_ENABLED,
+ GET_CHAN_PROPS(PRESERVE_DB_STATE,
+ client_handle->chan_info.flags),
+ GET_CHAN_PROPS(BRSTMODE,
+ client_handle->chan_info.flags));
mhi_log(MHI_MSG_INFO, "Exited\n");
return 0;
}
@@ -268,48 +277,60 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
{
int ret_val = 0;
struct mhi_device_ctxt *mhi_dev_ctxt;
- int r = 0;
+ struct mhi_ring *chan_ring;
int chan;
+ struct mhi_chan_cfg *cfg;
+ struct mhi_cmd_complete_event_pkt cmd_event_pkt;
+ union mhi_cmd_pkt cmd_pkt;
+ enum MHI_EVENT_CCS ev_code;
- if (NULL == client_handle ||
- client_handle->magic != MHI_HANDLE_MAGIC)
+ if (!client_handle || client_handle->magic != MHI_HANDLE_MAGIC)
return -EINVAL;
mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
- r = get_chan_props(mhi_dev_ctxt,
- client_handle->chan_info.chan_nr,
- &client_handle->chan_info);
- if (r)
- return r;
+ ret_val = get_chan_props(mhi_dev_ctxt,
+ client_handle->chan_info.chan_nr,
+ &client_handle->chan_info);
+ if (ret_val)
+ return ret_val;
chan = client_handle->chan_info.chan_nr;
+ cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
+ chan_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+ mutex_lock(&cfg->chan_lock);
mhi_log(MHI_MSG_INFO,
"Entered: Client opening chan 0x%x\n", chan);
if (mhi_dev_ctxt->dev_exec_env <
GET_CHAN_PROPS(CHAN_BRINGUP_STAGE,
- client_handle->chan_info.flags)) {
+ client_handle->chan_info.flags)) {
mhi_log(MHI_MSG_INFO,
"Chan %d, MHI exec_env %d, not ready!\n",
- chan, mhi_dev_ctxt->dev_exec_env);
+ chan,
+ mhi_dev_ctxt->dev_exec_env);
+ mutex_unlock(&cfg->chan_lock);
return -ENOTCONN;
}
- r = populate_tre_ring(client_handle);
- if (r) {
+ ret_val = populate_tre_ring(client_handle);
+ if (ret_val) {
mhi_log(MHI_MSG_ERROR,
"Failed to initialize tre ring chan %d ret %d\n",
- chan, r);
- return r;
+ chan,
+ ret_val);
+ mutex_unlock(&cfg->chan_lock);
+ return ret_val;
}
client_handle->event_ring_index =
- mhi_dev_ctxt->dev_space.ring_ctxt.
- cc_list[chan].mhi_event_ring_index;
- r = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan],
- client_handle->chan_info.max_desc);
- if (r) {
+ mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan].
+ mhi_event_ring_index;
+ ret_val = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan],
+ client_handle->chan_info.max_desc);
+ if (ret_val) {
mhi_log(MHI_MSG_ERROR,
"Failed to initialize bb ctxt chan %d ret %d\n",
- chan, r);
- return r;
+ chan,
+ ret_val);
+ mutex_unlock(&cfg->chan_lock);
+ return ret_val;
}
client_handle->msi_vec =
@@ -319,16 +340,70 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[
client_handle->event_ring_index].mhi_intmodt;
- init_completion(&client_handle->chan_open_complete);
- ret_val = start_chan_sync(client_handle);
+ init_completion(&cfg->cmd_complete);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) {
+ mhi_log(MHI_MSG_ERROR,
+ "MHI State is disabled\n");
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&cfg->chan_lock);
+ return -EIO;
+ }
+ WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE);
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- if (0 != ret_val)
+ spin_lock_irq(&chan_ring->ring_lock);
+ chan_ring->ch_state = MHI_CHAN_STATE_ENABLED;
+ spin_unlock_irq(&chan_ring->ring_lock);
+ ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
+ MHI_COMMAND_START_CHAN,
+ chan);
+ if (ret_val) {
mhi_log(MHI_MSG_ERROR,
- "Failed to start chan 0x%x, ret %d\n", chan, ret_val);
- BUG_ON(ret_val);
+ "Failed to send start cmd for chan %d ret %d\n",
+ chan, ret_val);
+ goto error_completion;
+ }
+ ret_val = wait_for_completion_timeout(&cfg->cmd_complete,
+ msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
+ if (!ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "Failed to receive cmd completion for %d\n",
+ chan);
+ goto error_completion;
+ } else {
+ ret_val = 0;
+ }
+
+ spin_lock(&cfg->event_lock);
+ cmd_event_pkt = cfg->cmd_event_pkt;
+ cmd_pkt = cfg->cmd_pkt;
+ spin_unlock(&cfg->event_lock);
+
+ ev_code = MHI_EV_READ_CODE(EV_TRB_CODE,
+ ((union mhi_event_pkt *)&cmd_event_pkt));
+ if (ev_code != MHI_EVENT_CC_SUCCESS) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error to receive event completion ev_code:0x%x\n",
+ ev_code);
+ ret_val = -EIO;
+ goto error_completion;
+ }
+
client_handle->chan_status = 1;
+
+error_completion:
+
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ mutex_unlock(&cfg->chan_lock);
+
mhi_log(MHI_MSG_INFO,
- "Exited chan 0x%x\n", chan);
+ "Exited chan 0x%x ret:%d\n", chan, ret_val);
return ret_val;
}
EXPORT_SYMBOL(mhi_open_channel);
@@ -387,46 +462,86 @@ EXPORT_SYMBOL(mhi_register_channel);
void mhi_close_channel(struct mhi_client_handle *client_handle)
{
u32 chan;
- int r = 0;
int ret_val = 0;
+ struct mhi_chan_cfg *cfg;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
+ struct mhi_cmd_complete_event_pkt cmd_event_pkt;
+ union mhi_cmd_pkt cmd_pkt;
+ struct mhi_ring *chan_ring;
+ enum MHI_EVENT_CCS ev_code;
if (!client_handle ||
client_handle->magic != MHI_HANDLE_MAGIC ||
!client_handle->chan_status)
return;
+ mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
chan = client_handle->chan_info.chan_nr;
+ cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
mhi_log(MHI_MSG_INFO, "Client attempting to close chan 0x%x\n", chan);
- init_completion(&client_handle->chan_reset_complete);
- if (!atomic_read(&client_handle->mhi_dev_ctxt->flags.pending_ssr)) {
- ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
- MHI_COMMAND_RESET_CHAN, chan);
- if (ret_val != 0) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to send reset cmd for chan %d ret %d\n",
- chan, ret_val);
- }
- r = wait_for_completion_timeout(
- &client_handle->chan_reset_complete,
+ chan_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+ mutex_lock(&cfg->chan_lock);
+
+ /* No more processing events for this channel */
+ spin_lock_irq(&chan_ring->ring_lock);
+ chan_ring->ch_state = MHI_CHAN_STATE_DISABLED;
+ spin_unlock_irq(&chan_ring->ring_lock);
+ init_completion(&cfg->cmd_complete);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE);
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
+ MHI_COMMAND_RESET_CHAN,
+ chan);
+ if (ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "Failed to send reset cmd for chan %d ret %d\n",
+ chan,
+ ret_val);
+ goto error_completion;
+ }
+ ret_val = wait_for_completion_timeout(&cfg->cmd_complete,
msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
- if (!r)
- mhi_log(MHI_MSG_ERROR,
- "Failed to reset chan %d ret %d\n",
- chan, r);
- } else {
- /*
- * Assumption: Device is not playing with our
- * buffers after BEFORE_SHUTDOWN
- */
- mhi_log(MHI_MSG_INFO,
- "Pending SSR local free only chan %d.\n", chan);
+ if (!ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "Failed to receive cmd completion for %d\n",
+ chan);
+ goto error_completion;
}
+ spin_lock_irq(&cfg->event_lock);
+ cmd_event_pkt = cfg->cmd_event_pkt;
+ cmd_pkt = cfg->cmd_pkt;
+ spin_unlock_irq(&cfg->event_lock);
+ ev_code = MHI_EV_READ_CODE(EV_TRB_CODE,
+ ((union mhi_event_pkt *)&cmd_event_pkt));
+ if (ev_code != MHI_EVENT_CC_SUCCESS) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error to receive event completion ev_cod:0x%x\n",
+ ev_code);
+ goto error_completion;
+ }
+
+ ret_val = reset_chan_cmd(mhi_dev_ctxt, &cmd_pkt);
+ if (ret_val)
+ mhi_log(MHI_MSG_ERROR,
+ "Error resetting cmd ret:%d\n",
+ ret_val);
+
+error_completion:
+
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
mhi_log(MHI_MSG_INFO, "Freeing ring for chan 0x%x\n", chan);
free_tre_ring(client_handle);
mhi_log(MHI_MSG_INFO, "Chan 0x%x confirmed closed.\n", chan);
client_handle->chan_status = 0;
+ mutex_unlock(&cfg->chan_lock);
}
EXPORT_SYMBOL(mhi_close_channel);
@@ -439,93 +554,47 @@ void mhi_update_chan_db(struct mhi_device_ctxt *mhi_dev_ctxt,
chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_XFER_RING, chan,
(uintptr_t) chan_ctxt->wp);
- mhi_dev_ctxt->mhi_chan_db_order[chan]++;
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.chan_db_addr,
- chan, db_value);
-}
-
-int mhi_check_m2_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- int ret_val = 0;
+ chan_ctxt->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.chan_db_addr,
+ chan,
+ db_value);
- if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
- mhi_log(MHI_MSG_INFO, "M2 Transition flag value = %d\n",
- (atomic_read(&mhi_dev_ctxt->flags.m2_transition)));
- if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) {
- if (mhi_dev_ctxt->flags.link_up) {
- mhi_assert_device_wake(mhi_dev_ctxt);
- ret_val = -ENOTCONN;
- }
- } else{
- mhi_log(MHI_MSG_INFO, "M2 transition flag is set\n");
- ret_val = -ENOTCONN;
- }
- } else {
- ret_val = 0;
- }
-
- return ret_val;
}
static inline int mhi_queue_tre(struct mhi_device_ctxt
- *mhi_dev_ctxt,
- u32 chan,
- enum MHI_RING_TYPE type)
+ *mhi_dev_ctxt,
+ u32 chan,
+ enum MHI_RING_TYPE type)
{
struct mhi_chan_ctxt *chan_ctxt;
unsigned long flags = 0;
- int ret_val = 0;
u64 db_value = 0;
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
- mhi_dev_ctxt->counters.m1_m0++;
- if (type == MHI_RING_TYPE_CMD_RING)
- atomic_inc(&mhi_dev_ctxt->counters.outbound_acks);
+ if (!MHI_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+ return -EACCES;
- ret_val = mhi_check_m2_transition(mhi_dev_ctxt);
- if (likely(((ret_val == 0) &&
- (((mhi_dev_ctxt->mhi_state == MHI_STATE_M0) ||
- (mhi_dev_ctxt->mhi_state == MHI_STATE_M1))) &&
- (chan_ctxt->mhi_chan_state != MHI_CHAN_STATE_ERROR)) &&
- (!mhi_dev_ctxt->flags.pending_M3))) {
- if (likely(type == MHI_RING_TYPE_XFER_RING)) {
- spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan],
- flags);
- db_value =
- mhi_v2p_addr(
- mhi_dev_ctxt,
- MHI_RING_TYPE_XFER_RING,
- chan,
- (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp);
- mhi_dev_ctxt->mhi_chan_db_order[chan]++;
- mhi_update_chan_db(mhi_dev_ctxt, chan);
- spin_unlock_irqrestore(
- &mhi_dev_ctxt->db_write_lock[chan], flags);
- } else if (type == MHI_RING_TYPE_CMD_RING) {
- db_value = mhi_v2p_addr(mhi_dev_ctxt,
- MHI_RING_TYPE_CMD_RING,
- PRIMARY_CMD_RING,
- (uintptr_t)
- mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING].wp);
- mhi_dev_ctxt->cmd_ring_order++;
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.cmd_db_addr,
- 0, db_value);
- } else {
- mhi_log(MHI_MSG_VERBOSE,
- "Wrong type of packet = %d\n", type);
- ret_val = -EPROTO;
- }
+ if (likely(type == MHI_RING_TYPE_XFER_RING)) {
+ struct mhi_ring *mhi_ring =
+ &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+ spin_lock_irqsave(&mhi_ring->ring_lock, flags);
+ mhi_update_chan_db(mhi_dev_ctxt, chan);
+ spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
} else {
- mhi_log(MHI_MSG_VERBOSE,
- "Wakeup, pending data state %d chan state %d\n",
- mhi_dev_ctxt->mhi_state,
- chan_ctxt->mhi_chan_state);
- ret_val = 0;
+ struct mhi_ring *cmd_ring = &mhi_dev_ctxt->
+ mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
+ db_value = mhi_v2p_addr(mhi_dev_ctxt,
+ MHI_RING_TYPE_CMD_RING,
+ PRIMARY_CMD_RING,
+ (uintptr_t)cmd_ring->wp);
+ cmd_ring->db_mode.process_db
+ (mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.cmd_db_addr,
+ 0,
+ db_value);
}
- return ret_val;
+ return 0;
}
static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
int chan, void *buf, size_t buf_len,
@@ -645,17 +714,11 @@ static int mhi_queue_dma_xfer(
int ret_val;
enum MHI_CLIENT_CHANNEL chan;
struct mhi_device_ctxt *mhi_dev_ctxt;
- unsigned long flags;
-
- if (!client_handle || !buf || !buf_len)
- return -EINVAL;
mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
MHI_ASSERT(VALID_BUF(buf, buf_len, mhi_dev_ctxt),
"Client buffer is of invalid length\n");
chan = client_handle->chan_info.chan_nr;
- mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan);
- pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
pkt_loc = mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp;
pkt_loc->data_tx_pkt.buffer_ptr = buf;
@@ -682,24 +745,14 @@ static int mhi_queue_dma_xfer(
(void *)&pkt_loc);
if (unlikely(0 != ret_val)) {
mhi_log(MHI_MSG_VERBOSE,
- "Failed to insert trb in xfer ring\n");
- goto error;
+ "Failed to insert trb in xfer ring\n");
+ return ret_val;
}
- read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
+
if (MHI_OUT ==
GET_CHAN_PROPS(CHAN_DIR, client_handle->chan_info.flags))
atomic_inc(&mhi_dev_ctxt->counters.outbound_acks);
- ret_val = mhi_queue_tre(mhi_dev_ctxt, chan, MHI_RING_TYPE_XFER_RING);
- if (unlikely(ret_val))
- mhi_log(MHI_MSG_VERBOSE, "Failed queue TRE.\n");
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
- read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-error:
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_INFO, "Putting Reference %d", chan);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
return ret_val;
}
@@ -709,10 +762,28 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
int r;
enum dma_data_direction dma_dir;
struct mhi_buf_info *bb;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
+ u32 chan;
+ unsigned long flags;
if (!client_handle || !buf || !buf_len)
return -EINVAL;
+ mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
+ chan = client_handle->chan_info.chan_nr;
+
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE) {
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ mhi_log(MHI_MSG_ERROR,
+ "MHI is not in active state\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+
if (MHI_OUT == GET_CHAN_PROPS(CHAN_DIR, client_handle->chan_info.flags))
dma_dir = DMA_TO_DEVICE;
else
@@ -723,8 +794,16 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
buf, buf_len, dma_dir, &bb);
if (r) {
mhi_log(MHI_MSG_VERBOSE,
- "Failed to create BB, chan %d ret %d\n",
- client_handle->chan_info.chan_nr, r);
+ "Failed to create BB, chan %d ret %d\n",
+ chan,
+ r);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->
+ pcie_device->dev);
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
return r;
}
@@ -733,9 +812,9 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
buf, buf_len, (u64)bb->bb_p_addr,
client_handle->chan_info.chan_nr);
r = mhi_queue_dma_xfer(client_handle,
- bb->bb_p_addr,
- bb->buf_len,
- mhi_flags);
+ bb->bb_p_addr,
+ bb->buf_len,
+ mhi_flags);
/*
* Assumption: If create_bounce_buffer did not fail, we do not
@@ -743,103 +822,76 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
* out of sync with the descriptor list which is problematic.
*/
BUG_ON(r);
- return r;
+
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ mhi_queue_tre(mhi_dev_ctxt, chan, MHI_RING_TYPE_XFER_RING);
+ if (dma_dir == DMA_FROM_DEVICE) {
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ }
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ return 0;
}
EXPORT_SYMBOL(mhi_queue_xfer);
int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_COMMAND cmd, u32 chan)
{
- unsigned long flags = 0;
union mhi_cmd_pkt *cmd_pkt = NULL;
- enum MHI_CHAN_STATE from_state = MHI_CHAN_STATE_DISABLED;
- enum MHI_CHAN_STATE to_state = MHI_CHAN_STATE_DISABLED;
enum MHI_PKT_TYPE ring_el_type = MHI_PKT_TYPE_NOOP_CMD;
- struct mutex *chan_mutex = NULL;
int ret_val = 0;
+ unsigned long flags, flags2;
+ struct mhi_ring *mhi_ring = &mhi_dev_ctxt->
+ mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
- if (chan >= MHI_MAX_CHANNELS ||
- cmd >= MHI_COMMAND_MAX_NR || mhi_dev_ctxt == NULL) {
+ if (chan >= MHI_MAX_CHANNELS || cmd >= MHI_COMMAND_MAX_NR) {
mhi_log(MHI_MSG_ERROR,
- "Invalid channel id, received id: 0x%x", chan);
+ "Invalid channel id, received id: 0x%x",
+ chan);
return -EINVAL;
}
mhi_log(MHI_MSG_INFO,
- "Entered, MHI state %d dev_exec_env %d chan %d cmd %d\n",
- mhi_dev_ctxt->mhi_state,
- mhi_dev_ctxt->dev_exec_env,
- chan, cmd);
- mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan);
- pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- /*
- * If there is a cmd pending a device confirmation,
- * do not send anymore for this channel
- */
- if (MHI_CMD_PENDING == mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan]) {
- mhi_log(MHI_MSG_ERROR, "Cmd Pending on chan %d", chan);
- ret_val = -EALREADY;
- goto error_invalid;
- }
-
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
- from_state =
- mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan].mhi_chan_state;
+ "Entered, MHI state %s dev_exec_env %d chan %d cmd %d\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
+ mhi_dev_ctxt->dev_exec_env, chan, cmd);
switch (cmd) {
break;
case MHI_COMMAND_RESET_CHAN:
- to_state = MHI_CHAN_STATE_DISABLED;
ring_el_type = MHI_PKT_TYPE_RESET_CHAN_CMD;
break;
case MHI_COMMAND_START_CHAN:
- switch (from_state) {
- case MHI_CHAN_STATE_DISABLED:
- case MHI_CHAN_STATE_ENABLED:
- case MHI_CHAN_STATE_STOP:
- to_state = MHI_CHAN_STATE_RUNNING;
- break;
- default:
- mhi_log(MHI_MSG_ERROR,
- "Invalid stt cmd 0x%x, from_state 0x%x\n",
- cmd, from_state);
- ret_val = -EPERM;
- goto error_invalid;
- }
ring_el_type = MHI_PKT_TYPE_START_CHAN_CMD;
break;
default:
mhi_log(MHI_MSG_ERROR, "Bad command received\n");
+ return -EINVAL;
}
- mutex_lock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]);
- ret_val = ctxt_add_element(mhi_dev_ctxt->mhi_local_cmd_ctxt,
- (void *)&cmd_pkt);
+ spin_lock_irqsave(&mhi_ring->ring_lock, flags);
+ ret_val = ctxt_add_element(mhi_ring, (void *)&cmd_pkt);
if (ret_val) {
mhi_log(MHI_MSG_ERROR, "Failed to insert element\n");
- goto error_general;
+ spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
+ return ret_val;
}
- chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan];
- mutex_lock(chan_mutex);
+
MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt, ring_el_type);
MHI_TRB_SET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
- mutex_unlock(chan_mutex);
- mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_PENDING;
-
- read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags2);
mhi_queue_tre(mhi_dev_ctxt, 0, MHI_RING_TYPE_CMD_RING);
- read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags2);
+ spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
- mhi_log(MHI_MSG_VERBOSE, "Sent command 0x%x for chan %d\n",
- cmd, chan);
-error_general:
- mutex_unlock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]);
-error_invalid:
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_INFO, "Putting Reference %d", chan);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ mhi_log(MHI_MSG_VERBOSE,
+ "Sent command 0x%x for chan %d\n",
+ cmd,
+ chan);
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
mhi_log(MHI_MSG_INFO, "Exited ret %d.\n", ret_val);
return ret_val;
}
@@ -870,7 +922,6 @@ static void parse_inbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
result->buf_addr = bb->client_buf;
result->bytes_xferd = bb->filled_size;
- result->transaction_status = 0;
/* At this point the bounce buffer is no longer necessary
* Whatever was received from the device was copied back to the
@@ -922,6 +973,9 @@ static int parse_outbound(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_log(MHI_MSG_RAW, "Removing BB from head, chan %d\n", chan);
atomic_dec(&mhi_dev_ctxt->counters.outbound_acks);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
ret_val = ctxt_del_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
NULL);
BUG_ON(ret_val);
@@ -1018,16 +1072,8 @@ static int parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt,
ctxt_index_rp, ctxt_index_wp, chan);
BUG_ON(bb_index != ctxt_index_rp);
} else {
- /* Hardware Channel, no client registerered,
- drop data */
- recycle_trb_and_ring(mhi_dev_ctxt,
- &mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
- MHI_RING_TYPE_XFER_RING,
- chan);
BUG();
- /* No bounce buffer to recycle as no user request
- * can be present.
- */
+
}
}
return 0;
@@ -1068,13 +1114,11 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
u32 nr_trb_to_parse;
u32 i = 0;
u32 ev_code;
+ struct mhi_ring *local_chan_ctxt;
trace_mhi_ev(event);
chan = MHI_EV_READ_CHID(EV_CHID, event);
- if (unlikely(!VALID_CHAN_NR(chan))) {
- mhi_log(MHI_MSG_ERROR, "Bad ring id.\n");
- return -EINVAL;
- }
+ local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
ev_code = MHI_EV_READ_CODE(EV_TRB_CODE, event);
client_handle = mhi_dev_ctxt->client_handle_list[chan];
client_handle->pkt_count++;
@@ -1095,10 +1139,7 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
dma_addr_t trb_data_loc;
u32 ieot_flag;
int ret_val;
- struct mhi_ring *local_chan_ctxt;
- local_chan_ctxt =
- &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
phy_ev_trb_loc = MHI_EV_READ_PTR(EV_PTR, event);
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
@@ -1120,6 +1161,7 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
mhi_log(MHI_MSG_CRITICAL,
"Failed to get nr available trbs ret: %d.\n",
ret_val);
+ panic("critical error");
return ret_val;
}
do {
@@ -1164,26 +1206,22 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
case MHI_EVENT_CC_OOB:
case MHI_EVENT_CC_DB_MODE:
{
- struct mhi_ring *chan_ctxt = NULL;
u64 db_value = 0;
- mhi_dev_ctxt->flags.uldl_enabled = 1;
- chan = MHI_EV_READ_CHID(EV_CHID, event);
- mhi_dev_ctxt->flags.db_mode[chan] = 1;
- chan_ctxt =
- &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
mhi_log(MHI_MSG_INFO, "DB_MODE/OOB Detected chan %d.\n", chan);
- if (chan_ctxt->wp != chan_ctxt->rp) {
+
+ local_chan_ctxt->db_mode.db_mode = 1;
+ if (local_chan_ctxt->wp != local_chan_ctxt->rp) {
db_value = mhi_v2p_addr(mhi_dev_ctxt,
- MHI_RING_TYPE_XFER_RING, chan,
- (uintptr_t) chan_ctxt->wp);
- mhi_process_db(mhi_dev_ctxt,
+ MHI_RING_TYPE_XFER_RING, chan,
+ (uintptr_t) local_chan_ctxt->wp);
+ local_chan_ctxt->db_mode.process_db(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.chan_db_addr, chan,
db_value);
}
client_handle = mhi_dev_ctxt->client_handle_list[chan];
- if (NULL != client_handle)
- result->transaction_status = -ENOTCONN;
+ if (client_handle)
+ result->transaction_status = -ENOTCONN;
break;
}
case MHI_EVENT_CC_BAD_TRE:
@@ -1216,6 +1254,9 @@ int recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
u64 db_value = 0;
void *removed_element = NULL;
void *added_element = NULL;
+ unsigned long flags;
+ struct mhi_ring *mhi_ring = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[ring_index];
ret_val = ctxt_del_element(ring, &removed_element);
@@ -1224,102 +1265,26 @@ int recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
return ret_val;
}
ret_val = ctxt_add_element(ring, &added_element);
- if (0 != ret_val)
+ if (ret_val) {
mhi_log(MHI_MSG_ERROR, "Could not add element to ring\n");
- db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type, ring_index,
- (uintptr_t) ring->wp);
- if (0 != ret_val)
return ret_val;
- if (MHI_RING_TYPE_XFER_RING == ring_type) {
- union mhi_xfer_pkt *removed_xfer_pkt =
- (union mhi_xfer_pkt *)removed_element;
- union mhi_xfer_pkt *added_xfer_pkt =
- (union mhi_xfer_pkt *)added_element;
- added_xfer_pkt->data_tx_pkt =
- *(struct mhi_tx_pkt *)removed_xfer_pkt;
- } else if (MHI_RING_TYPE_EVENT_RING == ring_type) {
-
- spinlock_t *lock;
- unsigned long flags;
-
- if (ring_index >= mhi_dev_ctxt->mmio_info.nr_event_rings)
- return -ERANGE;
- lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
- spin_lock_irqsave(lock, flags);
- db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type, ring_index,
- (uintptr_t) ring->wp);
- mhi_log(MHI_MSG_INFO,
- "Updating ctxt, ring index %d\n", ring_index);
- mhi_update_ctxt(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.event_db_addr,
- ring_index, db_value);
- mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
- mhi_dev_ctxt->counters.ev_counter[ring_index]++;
- spin_unlock_irqrestore(lock, flags);
}
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
- /* Asserting Device Wake here, will imediately wake mdm */
- if ((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state ||
- MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) &&
- mhi_dev_ctxt->flags.link_up) {
- switch (ring_type) {
- case MHI_RING_TYPE_CMD_RING:
- {
- struct mutex *cmd_mutex = NULL;
-
- cmd_mutex =
- &mhi_dev_ctxt->
- mhi_cmd_mutex_list[PRIMARY_CMD_RING];
- mutex_lock(cmd_mutex);
- mhi_dev_ctxt->cmd_ring_order = 1;
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.cmd_db_addr,
- ring_index, db_value);
- mutex_unlock(cmd_mutex);
- break;
- }
- case MHI_RING_TYPE_EVENT_RING:
- {
- spinlock_t *lock = NULL;
- unsigned long flags = 0;
-
- lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
- spin_lock_irqsave(lock, flags);
- mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
- if ((mhi_dev_ctxt->counters.ev_counter[ring_index] %
- MHI_EV_DB_INTERVAL) == 0) {
- db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type,
- ring_index,
- (uintptr_t) ring->wp);
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.event_db_addr,
- ring_index, db_value);
- }
- spin_unlock_irqrestore(lock, flags);
- break;
- }
- case MHI_RING_TYPE_XFER_RING:
- {
- unsigned long flags = 0;
-
- spin_lock_irqsave(
- &mhi_dev_ctxt->db_write_lock[ring_index],
- flags);
- mhi_dev_ctxt->mhi_chan_db_order[ring_index] = 1;
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.chan_db_addr,
- ring_index, db_value);
- spin_unlock_irqrestore(
- &mhi_dev_ctxt->db_write_lock[ring_index],
- flags);
- break;
- }
- default:
- mhi_log(MHI_MSG_ERROR, "Bad ring type\n");
- }
- }
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
- return ret_val;
+
+ if (!MHI_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+ return -EACCES;
+
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ db_value = mhi_v2p_addr(mhi_dev_ctxt,
+ ring_type,
+ ring_index,
+ (uintptr_t) ring->wp);
+ mhi_ring->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.event_db_addr,
+ ring_index, db_value);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+
+ return 0;
+
}
static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -1328,15 +1293,18 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 chan = 0;
int ret_val = 0;
struct mhi_ring *local_chan_ctxt;
+ struct mhi_ring *ev_ring;
struct mhi_chan_ctxt *chan_ctxt;
+ struct mhi_event_ctxt *ev_ctxt = NULL;
struct mhi_client_handle *client_handle = NULL;
- struct mutex *chan_mutex;
- int pending_el = 0;
+ int pending_el = 0, i;
struct mhi_ring *bb_ctxt;
+ unsigned long flags;
+ union mhi_event_pkt *local_rp = NULL;
+ union mhi_event_pkt *device_rp = NULL;
MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
-
if (!VALID_CHAN_NR(chan)) {
mhi_log(MHI_MSG_ERROR,
"Bad channel number for CCE\n");
@@ -1344,13 +1312,41 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
}
bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
- chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan];
- mutex_lock(chan_mutex);
client_handle = mhi_dev_ctxt->client_handle_list[chan];
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
+ ev_ring = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[chan_ctxt->mhi_event_ring_index];
+ ev_ctxt = &mhi_dev_ctxt->
+ dev_space.ring_ctxt.ec_list[chan_ctxt->mhi_event_ring_index];
mhi_log(MHI_MSG_INFO, "Processed cmd reset event\n");
+ /* Clear all stale events related to Channel */
+ spin_lock_irqsave(&ev_ring->ring_lock, flags);
+ device_rp = (union mhi_event_pkt *)mhi_p2v_addr(
+ mhi_dev_ctxt,
+ MHI_RING_TYPE_EVENT_RING,
+ chan_ctxt->mhi_event_ring_index,
+ ev_ctxt->mhi_event_read_ptr);
+ local_rp = (union mhi_event_pkt *)ev_ring->rp;
+ while (device_rp != local_rp) {
+ if (MHI_TRB_READ_INFO(EV_TRB_TYPE, local_rp) ==
+ MHI_PKT_TYPE_TX_EVENT) {
+ u32 ev_chan = MHI_EV_READ_CHID(EV_CHID, local_rp);
+
+ /* Mark as stale event */
+ if (ev_chan == chan)
+ MHI_TRB_SET_INFO(EV_TRB_TYPE,
+ local_rp,
+ MHI_PKT_TYPE_STALE_EVENT);
+ }
+
+ local_rp++;
+ if (local_rp == (ev_ring->base + ev_ring->len))
+ local_rp = ev_ring->base;
+ }
+ spin_unlock_irqrestore(&ev_ring->ring_lock, flags);
+
/*
* If outbound elements are pending, they must be cleared since
* they will never be acked after a channel reset.
@@ -1365,6 +1361,18 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
chan, pending_el);
atomic_sub(pending_el, &mhi_dev_ctxt->counters.outbound_acks);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ for (i = 0; i < pending_el; i++)
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+ for (i = 0; i < pending_el; i++) {
+ pm_runtime_put_noidle(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ }
/* Reset the local channel context */
local_chan_ctxt->rp = local_chan_ctxt->base;
@@ -1372,37 +1380,17 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
local_chan_ctxt->ack_rp = local_chan_ctxt->base;
/* Reset the mhi channel context */
- chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED;
+ chan_ctxt->chstate = MHI_CHAN_STATE_DISABLED;
chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr;
chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr;
mhi_log(MHI_MSG_INFO, "Cleaning up BB list\n");
reset_bb_ctxt(mhi_dev_ctxt, bb_ctxt);
- mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_NOT_PENDING;
- mutex_unlock(chan_mutex);
mhi_log(MHI_MSG_INFO, "Reset complete.\n");
- if (NULL != client_handle)
- complete(&client_handle->chan_reset_complete);
return ret_val;
}
-static int start_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
- union mhi_cmd_pkt *cmd_pkt)
-{
- u32 chan;
-
- MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
- if (!VALID_CHAN_NR(chan))
- mhi_log(MHI_MSG_ERROR, "Bad chan: 0x%x\n", chan);
- mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] =
- MHI_CMD_NOT_PENDING;
- mhi_log(MHI_MSG_INFO, "Processed START CMD chan %d\n", chan);
- if (NULL != mhi_dev_ctxt->client_handle_list[chan])
- complete(
- &mhi_dev_ctxt->client_handle_list[chan]->chan_open_complete);
- return 0;
-}
enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
union mhi_event_pkt *ev_pkt,
union mhi_cmd_pkt **cmd_pkt,
@@ -1421,68 +1409,13 @@ enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
return MHI_EV_READ_CODE(EV_TRB_CODE, ev_pkt);
}
-int parse_cmd_event(struct mhi_device_ctxt *mhi_dev_ctxt,
- union mhi_event_pkt *ev_pkt, u32 event_index)
-{
- int ret_val = 0;
- union mhi_cmd_pkt *cmd_pkt = NULL;
- u32 event_code = 0;
-
- event_code = get_cmd_pkt(mhi_dev_ctxt, ev_pkt, &cmd_pkt, event_index);
- switch (event_code) {
- case MHI_EVENT_CC_SUCCESS:
- {
- u32 chan = 0;
-
- MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
- switch (MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)) {
- mhi_log(MHI_MSG_INFO, "CCE chan %d cmd %d\n", chan,
- MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt));
- case MHI_PKT_TYPE_RESET_CHAN_CMD:
- ret_val = reset_chan_cmd(mhi_dev_ctxt, cmd_pkt);
- if (ret_val)
- mhi_log(MHI_MSG_INFO,
- "Failed to process reset cmd ret %d\n",
- ret_val);
- break;
- case MHI_PKT_TYPE_STOP_CHAN_CMD:
- if (ret_val) {
- mhi_log(MHI_MSG_INFO,
- "Failed to set chan state\n");
- return ret_val;
- }
- break;
- case MHI_PKT_TYPE_START_CHAN_CMD:
- ret_val = start_chan_cmd(mhi_dev_ctxt, cmd_pkt);
- if (ret_val)
- mhi_log(MHI_MSG_INFO,
- "Failed to process reset cmd\n");
- break;
- default:
- mhi_log(MHI_MSG_INFO,
- "Bad cmd type 0x%x\n",
- MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt));
- break;
- }
- mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_NOT_PENDING;
- atomic_dec(&mhi_dev_ctxt->counters.outbound_acks);
- break;
- }
- default:
- mhi_log(MHI_MSG_INFO, "Unhandled mhi completion code\n");
- break;
- }
- ctxt_del_element(mhi_dev_ctxt->mhi_local_cmd_ctxt, NULL);
- return 0;
-}
-
int mhi_poll_inbound(struct mhi_client_handle *client_handle,
struct mhi_result *result)
{
struct mhi_tx_pkt *pending_trb = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
struct mhi_ring *local_chan_ctxt = NULL;
- struct mutex *chan_mutex = NULL;
+ struct mhi_chan_cfg *cfg;
struct mhi_ring *bb_ctxt = NULL;
struct mhi_buf_info *bb = NULL;
int chan = 0, r = 0;
@@ -1495,10 +1428,10 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
chan = client_handle->chan_info.chan_nr;
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
- chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan];
+ cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
- mutex_lock(chan_mutex);
+ mutex_lock(&cfg->chan_lock);
if (bb_ctxt->rp != bb_ctxt->ack_rp) {
pending_trb = (struct mhi_tx_pkt *)(local_chan_ctxt->ack_rp);
result->flags = pending_trb->info;
@@ -1524,7 +1457,7 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
result->bytes_xferd = 0;
r = -ENODATA;
}
- mutex_unlock(chan_mutex);
+ mutex_unlock(&cfg->chan_lock);
mhi_log(MHI_MSG_VERBOSE,
"Exited Result: Buf addr: 0x%p Bytes xfed 0x%zx chan %d\n",
result->buf_addr, result->bytes_xferd, chan);
@@ -1581,49 +1514,80 @@ int mhi_get_epid(struct mhi_client_handle *client_handle)
return MHI_EPID;
}
-int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
+/*
+ * mhi_assert_device_wake - Set WAKE_DB register
+ * force_set - if true, will set bit regardless of counts
+ */
+void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt,
+ bool force_set)
{
- if ((mhi_dev_ctxt->mmio_info.chan_db_addr) &&
- (mhi_dev_ctxt->flags.link_up)) {
- mhi_log(MHI_MSG_VERBOSE, "LPM %d\n",
- mhi_dev_ctxt->enable_lpm);
- atomic_set(&mhi_dev_ctxt->flags.device_wake, 1);
+ unsigned long flags;
+
+ if (unlikely(force_set)) {
+ spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags);
+ atomic_inc(&mhi_dev_ctxt->counters.device_wake);
+ mhi_write_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.chan_db_addr,
+ MHI_DEV_WAKE_DB, 1);
+ spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags);
+ } else {
+ if (likely(atomic_add_unless(&mhi_dev_ctxt->
+ counters.device_wake,
+ 1,
+ 0)))
+ return;
+
+ spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags);
+ if ((atomic_inc_return(&mhi_dev_ctxt->counters.device_wake)
+ == 1) &&
+ MHI_WAKE_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
mhi_write_db(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.chan_db_addr,
- MHI_DEV_WAKE_DB, 1);
- mhi_dev_ctxt->device_wake_asserted = 1;
- } else {
- mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm);
+ MHI_DEV_WAKE_DB,
+ 1);
+ }
+ spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags);
}
- return 0;
}
-inline int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
+void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
{
- if ((mhi_dev_ctxt->enable_lpm) &&
- (atomic_read(&mhi_dev_ctxt->flags.device_wake)) &&
- (mhi_dev_ctxt->mmio_info.chan_db_addr != NULL) &&
- (mhi_dev_ctxt->flags.link_up)) {
- mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm);
- atomic_set(&mhi_dev_ctxt->flags.device_wake, 0);
- mhi_write_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.chan_db_addr,
- MHI_DEV_WAKE_DB, 0);
- mhi_dev_ctxt->device_wake_asserted = 0;
- } else {
- mhi_log(MHI_MSG_VERBOSE, "LPM %d DEV_WAKE %d link %d\n",
- mhi_dev_ctxt->enable_lpm,
- atomic_read(&mhi_dev_ctxt->flags.device_wake),
- mhi_dev_ctxt->flags.link_up);
- }
- return 0;
+ unsigned long flags;
+
+ WARN_ON(atomic_read(&mhi_dev_ctxt->counters.device_wake) == 0);
+
+ if (likely(atomic_add_unless
+ (&mhi_dev_ctxt->counters.device_wake, -1, 1)))
+ return;
+
+ spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags);
+ if ((atomic_dec_return(&mhi_dev_ctxt->counters.device_wake) == 0) &&
+ MHI_WAKE_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+ mhi_write_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.chan_db_addr,
+ MHI_DEV_WAKE_DB,
+ 0);
+ spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags);
}
-int mhi_set_lpm(struct mhi_client_handle *client_handle, int enable_lpm)
+int mhi_set_lpm(struct mhi_client_handle *client_handle, bool enable_lpm)
{
- mhi_log(MHI_MSG_VERBOSE, "LPM Set %d\n", enable_lpm);
- client_handle->mhi_dev_ctxt->enable_lpm = enable_lpm ? 1 : 0;
+ struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
+ unsigned long flags;
+
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+
+ /* Disable low power mode by asserting Wake */
+ if (enable_lpm == false)
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ else
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+
return 0;
}
+EXPORT_SYMBOL(mhi_set_lpm);
int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt,
int index)
@@ -1648,10 +1612,57 @@ int mhi_deregister_channel(struct mhi_client_handle
}
EXPORT_SYMBOL(mhi_deregister_channel);
+void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
+ void __iomem *io_addr,
+ uintptr_t chan,
+ u32 val)
+{
+ struct mhi_ring *ring_ctxt =
+ &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+
+ if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr)
+ ring_ctxt = &mhi_dev_ctxt->
+ mhi_local_chan_ctxt[chan];
+ else
+ ring_ctxt = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[chan];
+
+ mhi_log(MHI_MSG_VERBOSE,
+ "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+ io_addr, chan, val);
+
+ mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
+
+ if (ring_ctxt->db_mode.db_mode) {
+ mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
+ ring_ctxt->db_mode.db_mode = 0;
+ } else {
+ mhi_log(MHI_MSG_INFO,
+ "Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n",
+ chan,
+ ring_ctxt->db_mode.brstmode,
+ ring_ctxt->db_mode.db_mode);
+ }
+}
+
+void mhi_process_db_brstmode_disable(struct mhi_device_ctxt *mhi_dev_ctxt,
+ void __iomem *io_addr,
+ uintptr_t chan,
+ u32 val)
+{
+ mhi_log(MHI_MSG_VERBOSE,
+ "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+ io_addr, chan, val);
+
+ mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
+ mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
+}
+
void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
void __iomem *io_addr,
uintptr_t chan, u32 val)
{
+
mhi_log(MHI_MSG_VERBOSE,
"db.set addr: %p io_offset 0x%lx val:0x%x\n",
io_addr, chan, val);
@@ -1660,28 +1671,29 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
/* Channel Doorbell and Polling Mode Disabled or Software Channel*/
if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr) {
+ struct mhi_ring *chan_ctxt =
+ &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
if (!(IS_HARDWARE_CHANNEL(chan) &&
- mhi_dev_ctxt->flags.uldl_enabled &&
- !mhi_dev_ctxt->flags.db_mode[chan])) {
+ !chan_ctxt->db_mode.db_mode)) {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
- mhi_dev_ctxt->flags.db_mode[chan] = 0;
+ chan_ctxt->db_mode.db_mode = 0;
} else {
mhi_log(MHI_MSG_INFO,
- "Not ringing xfer db, chan %ld, ul_dl %d db_mode %d\n",
- chan, mhi_dev_ctxt->flags.uldl_enabled,
- mhi_dev_ctxt->flags.db_mode[chan]);
+ "Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n",
+ chan, chan_ctxt->db_mode.brstmode,
+ chan_ctxt->db_mode.db_mode);
}
/* Event Doorbell and Polling mode Disabled */
} else if (io_addr == mhi_dev_ctxt->mmio_info.event_db_addr) {
- /* Only ring for software channel */
- if (IS_SW_EV_RING(mhi_dev_ctxt, chan) ||
- !mhi_dev_ctxt->flags.uldl_enabled) {
+ struct mhi_ring *ev_ctxt =
+ &mhi_dev_ctxt->mhi_local_event_ctxt[chan];
+ /* Only ring for software channel or db mode*/
+ if (!(IS_HW_EV_RING(mhi_dev_ctxt, chan) &&
+ !ev_ctxt->db_mode.db_mode)) {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
- mhi_dev_ctxt->flags.db_mode[chan] = 0;
}
} else {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
- mhi_dev_ctxt->flags.db_mode[chan] = 0;
}
}
diff --git a/drivers/platform/msm/mhi/mhi_mmio_ops.c b/drivers/platform/msm/mhi/mhi_mmio_ops.c
index ddf18e466cd3..b4447378683e 100644
--- a/drivers/platform/msm/mhi/mhi_mmio_ops.c
+++ b/drivers/platform/msm/mhi/mhi_mmio_ops.c
@@ -9,6 +9,19 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+#include <linux/completion.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/msm-bus.h>
+#include <linux/cpu.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/pm_runtime.h>
#include "mhi_sys.h"
#include "mhi_hwio.h"
#include "mhi.h"
@@ -17,25 +30,40 @@ int mhi_test_for_device_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 pcie_word_val = 0;
u32 expiry_counter;
+ unsigned long flags;
+ rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
mhi_log(MHI_MSG_INFO, "Waiting for MMIO RESET bit to be cleared.\n");
+ read_lock_irqsave(pm_xfer_lock, flags);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_irqrestore(pm_xfer_lock, flags);
+ return -EIO;
+ }
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
- MHISTATUS);
+ MHISTATUS);
MHI_READ_FIELD(pcie_word_val,
MHICTRL_RESET_MASK,
MHICTRL_RESET_SHIFT);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
if (pcie_word_val == 0xFFFFFFFF)
return -ENOTCONN;
+
while (MHI_STATE_RESET != pcie_word_val && expiry_counter < 100) {
expiry_counter++;
mhi_log(MHI_MSG_ERROR,
"Device is not RESET, sleeping and retrying.\n");
msleep(MHI_READY_STATUS_TIMEOUT_MS);
+ read_lock_irqsave(pm_xfer_lock, flags);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_irqrestore(pm_xfer_lock, flags);
+ return -EIO;
+ }
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
MHICTRL);
MHI_READ_FIELD(pcie_word_val,
MHICTRL_RESET_MASK,
MHICTRL_RESET_SHIFT);
+ read_unlock_irqrestore(pm_xfer_lock, flags);
}
if (MHI_STATE_READY != pcie_word_val)
@@ -47,15 +75,23 @@ int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 pcie_word_val = 0;
u32 expiry_counter;
+ unsigned long flags;
+ rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
mhi_log(MHI_MSG_INFO, "Waiting for MMIO Ready bit to be set\n");
+ read_lock_irqsave(pm_xfer_lock, flags);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_irqrestore(pm_xfer_lock, flags);
+ return -EIO;
+ }
/* Read MMIO and poll for READY bit to be set */
pcie_word_val = mhi_reg_read(
mhi_dev_ctxt->mmio_info.mmio_addr, MHISTATUS);
MHI_READ_FIELD(pcie_word_val,
MHISTATUS_READY_MASK,
MHISTATUS_READY_SHIFT);
+ read_unlock_irqrestore(pm_xfer_lock, flags);
if (pcie_word_val == 0xFFFFFFFF)
return -ENOTCONN;
@@ -65,10 +101,16 @@ int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_log(MHI_MSG_ERROR,
"Device is not ready, sleeping and retrying.\n");
msleep(MHI_READY_STATUS_TIMEOUT_MS);
+ read_lock_irqsave(pm_xfer_lock, flags);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_irqrestore(pm_xfer_lock, flags);
+ return -EIO;
+ }
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
MHISTATUS);
MHI_READ_FIELD(pcie_word_val,
MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT);
+ read_unlock_irqrestore(pm_xfer_lock, flags);
}
if (pcie_word_val != MHI_STATE_READY)
@@ -102,21 +144,20 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_ctxt->dev_props->mhi_ver = mhi_reg_read(
mhi_dev_ctxt->mmio_info.mmio_addr, MHIVER);
if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) {
- mhi_log(MHI_MSG_CRITICAL, "Bad MMIO version, 0x%x\n",
- mhi_dev_ctxt->dev_props->mhi_ver);
- if (mhi_dev_ctxt->dev_props->mhi_ver == 0xFFFFFFFF)
- ret_val = mhi_wait_for_mdm(mhi_dev_ctxt);
- if (ret_val)
+ mhi_log(MHI_MSG_CRITICAL,
+ "Bad MMIO version, 0x%x\n",
+ mhi_dev_ctxt->dev_props->mhi_ver);
return ret_val;
}
+
/* Enable the channels */
for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
struct mhi_chan_ctxt *chan_ctxt =
&mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i];
if (VALID_CHAN_NR(i))
- chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED;
+ chan_ctxt->chstate = MHI_CHAN_STATE_ENABLED;
else
- chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED;
+ chan_ctxt->chstate = MHI_CHAN_STATE_DISABLED;
}
mhi_log(MHI_MSG_INFO,
"Read back MMIO Ready bit successfully. Moving on..\n");
@@ -144,6 +185,11 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
MHICFG,
MHICFG_NER_MASK, MHICFG_NER_SHIFT,
mhi_dev_ctxt->mmio_info.nr_event_rings);
+ mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr,
+ MHICFG,
+ MHICFG_NHWER_MASK,
+ MHICFG_NHWER_SHIFT,
+ mhi_dev_ctxt->mmio_info.nr_hw_event_rings);
pcie_dword_val = mhi_dev_ctxt->dev_space.ring_ctxt.dma_cc_list;
pcie_word_val = HIGH_WORD(pcie_dword_val);
diff --git a/drivers/platform/msm/mhi/mhi_pm.c b/drivers/platform/msm/mhi/mhi_pm.c
index a928c05b805a..2f44601e225e 100644
--- a/drivers/platform/msm/mhi/mhi_pm.c
+++ b/drivers/platform/msm/mhi/mhi_pm.c
@@ -22,16 +22,12 @@
#include "mhi_hwio.h"
/* Write only sysfs attributes */
-static DEVICE_ATTR(MHI_M3, S_IWUSR, NULL, sysfs_init_m3);
static DEVICE_ATTR(MHI_M0, S_IWUSR, NULL, sysfs_init_m0);
-static DEVICE_ATTR(MHI_RESET, S_IWUSR, NULL, sysfs_init_mhi_reset);
/* Read only sysfs attributes */
static struct attribute *mhi_attributes[] = {
- &dev_attr_MHI_M3.attr,
&dev_attr_MHI_M0.attr,
- &dev_attr_MHI_RESET.attr,
NULL,
};
@@ -42,21 +38,20 @@ static struct attribute_group mhi_attribute_group = {
int mhi_pci_suspend(struct device *dev)
{
int r = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
- if (NULL == mhi_dev_ctxt)
- return -EINVAL;
- mhi_log(MHI_MSG_INFO, "Entered, MHI state %d\n",
- mhi_dev_ctxt->mhi_state);
- atomic_set(&mhi_dev_ctxt->flags.pending_resume, 1);
+ mhi_log(MHI_MSG_INFO, "Entered\n");
- r = mhi_initiate_m3(mhi_dev_ctxt);
+ /* if rpm status still active then force suspend */
+ if (!pm_runtime_status_suspended(dev)) {
+ r = mhi_runtime_suspend(dev);
+ if (r)
+ return r;
+ }
- if (!r)
- return r;
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
- atomic_set(&mhi_dev_ctxt->flags.pending_resume, 0);
- mhi_log(MHI_MSG_INFO, "Exited, ret %d\n", r);
+ mhi_log(MHI_MSG_INFO, "Exit\n");
return r;
}
@@ -65,61 +60,150 @@ int mhi_runtime_suspend(struct device *dev)
int r = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
- mhi_log(MHI_MSG_INFO, "Entered\n");
- r = mhi_initiate_m3(mhi_dev_ctxt);
- if (r)
- mhi_log(MHI_MSG_ERROR, "Init M3 failed ret %d\n", r);
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+ mhi_log(MHI_MSG_INFO, "Entered with State:0x%x %s\n",
+ mhi_dev_ctxt->mhi_pm_state,
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+ /* Link is already disabled */
+ if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE ||
+ mhi_dev_ctxt->mhi_pm_state == MHI_PM_M3) {
+ mhi_log(MHI_MSG_INFO, "Already in active state, exiting\n");
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ return 0;
+ }
+
+ if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) {
+ mhi_log(MHI_MSG_INFO, "Busy, Aborting Runtime Suspend\n");
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ return -EBUSY;
+ }
+
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
+ msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
+ if (!r) {
+ mhi_log(MHI_MSG_CRITICAL,
+ "Failed to get M0||M1 event, timeout, current state:%s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ r = -EIO;
+ goto rpm_suspend_exit;
+ }
+
+ mhi_log(MHI_MSG_INFO, "Allowing M3 State\n");
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_ENTER;
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_log(MHI_MSG_INFO,
+ "Waiting for M3 completion.\n");
+ r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
+ msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
+ if (!r) {
+ mhi_log(MHI_MSG_CRITICAL,
+ "Failed to get M3 event, timeout, current state:%s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ r = -EIO;
+ goto rpm_suspend_exit;
+ }
+
+ r = mhi_turn_off_pcie_link(mhi_dev_ctxt);
+ if (r) {
+ mhi_log(MHI_MSG_ERROR,
+ "Failed to Turn off link ret:%d\n", r);
+ }
- pm_runtime_mark_last_busy(dev);
+rpm_suspend_exit:
mhi_log(MHI_MSG_INFO, "Exited\n");
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
return r;
}
+int mhi_runtime_idle(struct device *dev)
+{
+ mhi_log(MHI_MSG_INFO, "Entered returning -EBUSY\n");
+
+ /*
+ * RPM framework during runtime resume always calls
+ * rpm_idle to see if device ready to suspend.
+ * If dev.power usage_count count is 0, rpm fw will call
+ * rpm_idle cb to see if device is ready to suspend.
+ * if cb return 0, or cb not defined the framework will
+ * assume device driver is ready to suspend;
+ * therefore, fw will schedule runtime suspend.
+ * In MHI power management, MHI host shall go to
+ * runtime suspend only after entering MHI State M2, even if
+ * usage count is 0. Return -EBUSY to disable automatic suspend.
+ */
+ return -EBUSY;
+}
+
int mhi_runtime_resume(struct device *dev)
{
int r = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
- mhi_log(MHI_MSG_INFO, "Entered\n");
- r = mhi_initiate_m0(mhi_dev_ctxt);
- if (r)
- mhi_log(MHI_MSG_ERROR, "Init M0 failed ret %d\n", r);
- pm_runtime_mark_last_busy(dev);
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ WARN_ON(mhi_dev_ctxt->mhi_pm_state != MHI_PM_M3);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+ /* turn on link */
+ r = mhi_turn_on_pcie_link(mhi_dev_ctxt);
+ if (r) {
+ mhi_log(MHI_MSG_CRITICAL,
+ "Failed to resume link\n");
+ goto rpm_resume_exit;
+ }
+
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_EXIT;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
+ /* Set and wait for M0 Event */
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
+ msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
+ if (!r) {
+ mhi_log(MHI_MSG_ERROR,
+ "Failed to get M0 event, timeout\n");
+ r = -EIO;
+ goto rpm_resume_exit;
+ }
+ r = 0; /* no errors */
+
+rpm_resume_exit:
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ mhi_log(MHI_MSG_INFO, "Exited with :%d\n", r);
return r;
}
int mhi_pci_resume(struct device *dev)
{
int r = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
- r = mhi_initiate_m0(mhi_dev_ctxt);
- if (r)
- goto exit;
- r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
- msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
- switch (r) {
- case 0:
- mhi_log(MHI_MSG_CRITICAL,
- "Timeout: No M0 event after %d ms\n",
- MHI_MAX_SUSPEND_TIMEOUT);
- mhi_dev_ctxt->counters.m0_event_timeouts++;
- r = -ETIME;
- break;
- case -ERESTARTSYS:
+ r = mhi_runtime_resume(dev);
+ if (r) {
mhi_log(MHI_MSG_CRITICAL,
- "Going Down...\n");
- break;
- default:
- mhi_log(MHI_MSG_INFO,
- "Wait complete state: %d\n", mhi_dev_ctxt->mhi_state);
- r = 0;
+ "Failed to resume link\n");
+ } else {
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
}
-exit:
- atomic_set(&mhi_dev_ctxt->flags.pending_resume, 0);
+
return r;
}
@@ -133,57 +217,15 @@ void mhi_rem_pm_sysfs(struct device *dev)
return sysfs_remove_group(&dev->kobj, &mhi_attribute_group);
}
-ssize_t sysfs_init_m3(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int r = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
- r = mhi_initiate_m3(mhi_dev_ctxt);
- if (r) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to suspend %d\n", r);
- return r;
- }
- r = mhi_turn_off_pcie_link(mhi_dev_ctxt);
- if (r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to turn off link ret %d\n", r);
-
- return count;
-}
-ssize_t sysfs_init_mhi_reset(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
- int r = 0;
-
- mhi_log(MHI_MSG_INFO, "Triggering MHI Reset.\n");
- r = mhi_trigger_reset(mhi_dev_ctxt);
- if (r != 0)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to trigger MHI RESET ret %d\n",
- r);
- else
- mhi_log(MHI_MSG_INFO, "Triggered! MHI RESET\n");
- return count;
-}
ssize_t sysfs_init_m0(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct mhi_device_ctxt *mhi_dev_ctxt =
&mhi_devices.device_list[0].mhi_ctxt;
- if (0 != mhi_turn_on_pcie_link(mhi_dev_ctxt)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to resume link\n");
- return count;
- }
- mhi_initiate_m0(mhi_dev_ctxt);
- mhi_log(MHI_MSG_CRITICAL,
- "Current mhi_state = 0x%x\n",
- mhi_dev_ctxt->mhi_state);
+ pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
return count;
}
@@ -194,35 +236,42 @@ int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_log(MHI_MSG_INFO, "Entered...\n");
pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
- mutex_lock(&mhi_dev_ctxt->mhi_link_state);
+
if (0 == mhi_dev_ctxt->flags.link_up) {
mhi_log(MHI_MSG_CRITICAL,
"Link already marked as down, nothing to do\n");
goto exit;
}
- /* Disable shadow to avoid restoring D3 hot struct device */
- r = msm_pcie_shadow_control(mhi_dev_ctxt->dev_info->pcie_device, 0);
- if (r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to stop shadow config space: %d\n", r);
- r = pci_set_power_state(mhi_dev_ctxt->dev_info->pcie_device, PCI_D3hot);
+ r = pci_save_state(pcie_dev);
if (r) {
mhi_log(MHI_MSG_CRITICAL,
- "Failed to set pcie power state to D3 hotret: %x\n", r);
- goto exit;
+ "Failed to save pcie state ret: %d\n",
+ r);
}
+ mhi_dev_ctxt->dev_props->pcie_state = pci_store_saved_state(pcie_dev);
+ pci_disable_device(pcie_dev);
+ r = pci_set_power_state(pcie_dev, PCI_D3hot);
+ if (r) {
+ mhi_log(MHI_MSG_CRITICAL,
+ "Failed to set pcie power state to D3 hot ret: %d\n",
+ r);
+ }
+
r = msm_pcie_pm_control(MSM_PCIE_SUSPEND,
- mhi_dev_ctxt->dev_info->pcie_device->bus->number,
- mhi_dev_ctxt->dev_info->pcie_device,
+ pcie_dev->bus->number,
+ pcie_dev,
NULL,
0);
if (r)
mhi_log(MHI_MSG_CRITICAL,
- "Failed to suspend pcie bus ret 0x%x\n", r);
+ "Failed to suspend pcie bus ret 0x%x\n", r);
+
+ r = mhi_set_bus_request(mhi_dev_ctxt, 0);
+ if (r)
+ mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
mhi_dev_ctxt->flags.link_up = 0;
exit:
- mutex_unlock(&mhi_dev_ctxt->mhi_link_state);
mhi_log(MHI_MSG_INFO, "Exited...\n");
return 0;
}
@@ -234,37 +283,40 @@ int mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
- mutex_lock(&mhi_dev_ctxt->mhi_link_state);
mhi_log(MHI_MSG_INFO, "Entered...\n");
if (mhi_dev_ctxt->flags.link_up)
goto exit;
+
+ r = mhi_set_bus_request(mhi_dev_ctxt, 1);
+ if (r)
+ mhi_log(MHI_MSG_CRITICAL,
+ "Could not set bus frequency ret: %d\n",
+ r);
+
r = msm_pcie_pm_control(MSM_PCIE_RESUME,
- mhi_dev_ctxt->dev_info->pcie_device->bus->number,
- mhi_dev_ctxt->dev_info->pcie_device,
- NULL, 0);
+ pcie_dev->bus->number,
+ pcie_dev,
+ NULL,
+ 0);
if (r) {
mhi_log(MHI_MSG_CRITICAL,
"Failed to resume pcie bus ret %d\n", r);
goto exit;
}
- r = pci_set_power_state(mhi_dev_ctxt->dev_info->pcie_device,
- PCI_D0);
- if (r) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to load stored state %d\n", r);
- goto exit;
- }
- r = msm_pcie_recover_config(mhi_dev_ctxt->dev_info->pcie_device);
- if (r) {
+ r = pci_enable_device(pcie_dev);
+ if (r)
mhi_log(MHI_MSG_CRITICAL,
- "Failed to Recover config space ret: %d\n", r);
- goto exit;
- }
+ "Failed to enable device ret:%d\n",
+ r);
+
+ pci_load_and_free_saved_state(pcie_dev,
+ &mhi_dev_ctxt->dev_props->pcie_state);
+ pci_restore_state(pcie_dev);
+ pci_set_master(pcie_dev);
+
mhi_dev_ctxt->flags.link_up = 1;
exit:
- mutex_unlock(&mhi_dev_ctxt->mhi_link_state);
mhi_log(MHI_MSG_INFO, "Exited...\n");
return r;
}
-
diff --git a/drivers/platform/msm/mhi/mhi_ring_ops.c b/drivers/platform/msm/mhi/mhi_ring_ops.c
index e5a6dd51f51a..07d0098a1b61 100644
--- a/drivers/platform/msm/mhi/mhi_ring_ops.c
+++ b/drivers/platform/msm/mhi/mhi_ring_ops.c
@@ -48,6 +48,9 @@ static int add_element(struct mhi_ring *ring, void **rp,
*assigned_addr = (char *)ring->wp;
*wp = (void *)(((d_wp + 1) % ring_size) * ring->el_size +
(uintptr_t)ring->base);
+
+ /* force update visible to other cores */
+ smp_wmb();
return 0;
}
@@ -101,6 +104,9 @@ int delete_element(struct mhi_ring *ring, void **rp,
*rp = (void *)(((d_rp + 1) % ring_size) * ring->el_size +
(uintptr_t)ring->base);
+
+ /* force update visible to other cores */
+ smp_wmb();
return 0;
}
@@ -108,6 +114,7 @@ int mhi_get_free_desc(struct mhi_client_handle *client_handle)
{
u32 chan;
struct mhi_device_ctxt *ctxt;
+ int bb_ring, ch_ring;
if (!client_handle || MHI_HANDLE_MAGIC != client_handle->magic ||
!client_handle->mhi_dev_ctxt)
@@ -115,7 +122,10 @@ int mhi_get_free_desc(struct mhi_client_handle *client_handle)
ctxt = client_handle->mhi_dev_ctxt;
chan = client_handle->chan_info.chan_nr;
- return get_nr_avail_ring_elements(&ctxt->mhi_local_chan_ctxt[chan]);
+ bb_ring = get_nr_avail_ring_elements(&ctxt->chan_bb_list[chan]);
+ ch_ring = get_nr_avail_ring_elements(&ctxt->mhi_local_chan_ctxt[chan]);
+
+ return min(bb_ring, ch_ring);
}
EXPORT_SYMBOL(mhi_get_free_desc);
diff --git a/drivers/platform/msm/mhi/mhi_ssr.c b/drivers/platform/msm/mhi/mhi_ssr.c
index 8ee3deddbb95..defd6f4fd137 100644
--- a/drivers/platform/msm/mhi/mhi_ssr.c
+++ b/drivers/platform/msm/mhi/mhi_ssr.c
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <linux/pm_runtime.h>
#include <mhi_sys.h>
#include <mhi.h>
#include <mhi_bhi.h>
@@ -23,24 +24,11 @@
static int mhi_ssr_notify_cb(struct notifier_block *nb,
unsigned long action, void *data)
{
- int ret_val = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
- struct mhi_pcie_dev_info *mhi_pcie_dev = NULL;
- mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices];
- if (NULL != mhi_dev_ctxt)
- mhi_dev_ctxt->esoc_notif = action;
switch (action) {
case SUBSYS_BEFORE_POWERUP:
mhi_log(MHI_MSG_INFO,
"Received Subsystem event BEFORE_POWERUP\n");
- atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 1);
- ret_val = init_mhi_base_state(mhi_dev_ctxt);
- if (0 != ret_val)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to transition to base state %d.\n",
- ret_val);
break;
case SUBSYS_AFTER_POWERUP:
mhi_log(MHI_MSG_INFO,
@@ -148,7 +136,7 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
}
}
-static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
+int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
{
u32 pcie_word_val = 0;
int r = 0;
@@ -159,13 +147,11 @@ static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
mhi_pcie_dev->bhi_ctxt.bhi_base += pcie_word_val;
pcie_word_val = mhi_reg_read(mhi_pcie_dev->bhi_ctxt.bhi_base,
BHI_EXECENV);
+ mhi_dev_ctxt->dev_exec_env = pcie_word_val;
if (pcie_word_val == MHI_EXEC_ENV_AMSS) {
mhi_dev_ctxt->base_state = STATE_TRANSITION_RESET;
} else if (pcie_word_val == MHI_EXEC_ENV_PBL) {
mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI;
- r = bhi_probe(mhi_pcie_dev);
- if (r)
- mhi_log(MHI_MSG_ERROR, "Failed to initialize BHI.\n");
} else {
mhi_log(MHI_MSG_ERROR, "Invalid EXEC_ENV: 0x%x\n",
pcie_word_val);
@@ -178,10 +164,9 @@ static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
void mhi_link_state_cb(struct msm_pcie_notify *notify)
{
- int ret_val = 0;
+
struct mhi_pcie_dev_info *mhi_pcie_dev;
struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
- int r = 0;
if (NULL == notify || NULL == notify->data) {
mhi_log(MHI_MSG_CRITICAL,
@@ -198,32 +183,6 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify)
case MSM_PCIE_EVENT_LINKUP:
mhi_log(MHI_MSG_INFO,
"Received MSM_PCIE_EVENT_LINKUP\n");
- if (0 == mhi_pcie_dev->link_up_cntr) {
- mhi_log(MHI_MSG_INFO,
- "Initializing MHI for the first time\n");
- r = mhi_ctxt_init(mhi_pcie_dev);
- if (r) {
- mhi_log(MHI_MSG_ERROR,
- "MHI initialization failed, ret %d.\n",
- r);
- r = msm_pcie_register_event(
- &mhi_pcie_dev->mhi_pci_link_event);
- mhi_log(MHI_MSG_ERROR,
- "Deregistered from PCIe notif r %d.\n",
- r);
- return;
- }
- mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt;
- mhi_pcie_dev->mhi_ctxt.flags.link_up = 1;
- pci_set_master(mhi_pcie_dev->pcie_device);
- r = set_mhi_base_state(mhi_pcie_dev);
- if (r)
- return;
- init_mhi_base_state(mhi_dev_ctxt);
- } else {
- mhi_log(MHI_MSG_INFO,
- "Received Link Up Callback\n");
- }
mhi_pcie_dev->link_up_cntr++;
break;
case MSM_PCIE_EVENT_WAKEUP:
@@ -231,17 +190,14 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify)
"Received MSM_PCIE_EVENT_WAKE\n");
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
__pm_relax(&mhi_dev_ctxt->w_lock);
- if (atomic_read(&mhi_dev_ctxt->flags.pending_resume)) {
- mhi_log(MHI_MSG_INFO,
- "There is a pending resume, doing nothing.\n");
- return;
- }
- ret_val = mhi_init_state_transition(mhi_dev_ctxt,
- STATE_TRANSITION_WAKE);
- if (0 != ret_val) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to init state transition, to %d\n",
- STATE_TRANSITION_WAKE);
+
+ if (mhi_dev_ctxt->flags.mhi_initialized) {
+ pm_runtime_get(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
}
break;
default:
@@ -255,12 +211,6 @@ int init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int r = 0;
- mhi_assert_device_wake(mhi_dev_ctxt);
- mhi_dev_ctxt->flags.link_up = 1;
- r = mhi_set_bus_request(mhi_dev_ctxt, 1);
- if (r)
- mhi_log(MHI_MSG_INFO,
- "Failed to scale bus request to active set.\n");
r = mhi_init_state_transition(mhi_dev_ctxt, mhi_dev_ctxt->base_state);
if (r) {
mhi_log(MHI_MSG_CRITICAL,
diff --git a/drivers/platform/msm/mhi/mhi_states.c b/drivers/platform/msm/mhi/mhi_states.c
index ca4520a3c57d..1021a56d1b3d 100644
--- a/drivers/platform/msm/mhi/mhi_states.c
+++ b/drivers/platform/msm/mhi/mhi_states.c
@@ -17,7 +17,40 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-static inline void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
+const char *state_transition_str(enum STATE_TRANSITION state)
+{
+ static const char * const mhi_states_transition_str[] = {
+ "RESET",
+ "READY",
+ "M0",
+ "M1",
+ "M2",
+ "M3",
+ "BHI",
+ "SBL",
+ "AMSS",
+ "LINK_DOWN",
+ "WAKE"
+ };
+
+ if (state == STATE_TRANSITION_SYS_ERR)
+ return "SYS_ERR";
+
+ return (state <= STATE_TRANSITION_WAKE) ?
+ mhi_states_transition_str[state] : "Invalid";
+}
+
+enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ u32 state = mhi_reg_read_field(mhi_dev_ctxt->mmio_info.mmio_addr,
+ MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT);
+
+ return (state >= MHI_STATE_LIMIT) ? MHI_STATE_LIMIT : state;
+}
+
+void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_STATE new_state)
{
if (MHI_STATE_RESET == new_state) {
@@ -41,23 +74,22 @@ static void conditional_chan_db_write(
{
u64 db_value;
unsigned long flags;
-
- mhi_dev_ctxt->mhi_chan_db_order[chan] = 0;
- spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags);
- if (0 == mhi_dev_ctxt->mhi_chan_db_order[chan]) {
- db_value =
- mhi_v2p_addr(mhi_dev_ctxt,
- MHI_RING_TYPE_XFER_RING, chan,
- (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp);
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.chan_db_addr,
- chan, db_value);
- }
- mhi_dev_ctxt->mhi_chan_db_order[chan] = 0;
- spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags);
+ struct mhi_ring *mhi_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+
+ spin_lock_irqsave(&mhi_ring->ring_lock, flags);
+ db_value = mhi_v2p_addr(mhi_dev_ctxt,
+ MHI_RING_TYPE_XFER_RING,
+ chan,
+ (uintptr_t)mhi_ring->wp);
+ mhi_ring->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.chan_db_addr,
+ chan,
+ db_value);
+ spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
}
-static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
+static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt,
+ bool reset_db_mode)
{
u32 i = 0;
struct mhi_ring *local_ctxt = NULL;
@@ -66,40 +98,38 @@ static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
if (VALID_CHAN_NR(i)) {
local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
- if (IS_HARDWARE_CHANNEL(i))
- mhi_dev_ctxt->flags.db_mode[i] = 1;
- if ((local_ctxt->wp != local_ctxt->rp) ||
- ((local_ctxt->wp != local_ctxt->rp) &&
- (local_ctxt->dir == MHI_IN)))
+
+ /* Reset the DB Mode state to DB Mode */
+ if (local_ctxt->db_mode.preserve_db_state == 0
+ && reset_db_mode)
+ local_ctxt->db_mode.db_mode = 1;
+
+ if (local_ctxt->wp != local_ctxt->rp)
conditional_chan_db_write(mhi_dev_ctxt, i);
}
}
static void ring_all_cmd_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
{
- struct mutex *cmd_mutex = NULL;
u64 db_value;
u64 rp = 0;
struct mhi_ring *local_ctxt = NULL;
mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n");
- cmd_mutex = &mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING];
- mhi_dev_ctxt->cmd_ring_order = 0;
- mutex_lock(cmd_mutex);
+
local_ctxt = &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
rp = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING,
PRIMARY_CMD_RING,
(uintptr_t)local_ctxt->rp);
- db_value =
- mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING,
- PRIMARY_CMD_RING,
- (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[0].wp);
- if (0 == mhi_dev_ctxt->cmd_ring_order && rp != db_value)
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.cmd_db_addr,
- 0, db_value);
- mhi_dev_ctxt->cmd_ring_order = 0;
- mutex_unlock(cmd_mutex);
+ db_value = mhi_v2p_addr(mhi_dev_ctxt,
+ MHI_RING_TYPE_CMD_RING,
+ PRIMARY_CMD_RING,
+ (uintptr_t)local_ctxt->wp);
+ if (rp != db_value)
+ local_ctxt->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.cmd_db_addr,
+ 0,
+ db_value);
}
static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
@@ -107,24 +137,23 @@ static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
u32 i;
u64 db_value = 0;
struct mhi_event_ctxt *event_ctxt = NULL;
+ struct mhi_ring *mhi_ring;
spinlock_t *lock = NULL;
unsigned long flags;
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
- lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[i];
- mhi_dev_ctxt->mhi_ev_db_order[i] = 0;
+ mhi_ring = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
+ lock = &mhi_ring->ring_lock;
spin_lock_irqsave(lock, flags);
event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
- db_value =
- mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING,
- i,
- (uintptr_t)mhi_dev_ctxt->mhi_local_event_ctxt[i].wp);
- if (0 == mhi_dev_ctxt->mhi_ev_db_order[i]) {
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.event_db_addr,
- i, db_value);
- }
- mhi_dev_ctxt->mhi_ev_db_order[i] = 0;
+ db_value = mhi_v2p_addr(mhi_dev_ctxt,
+ MHI_RING_TYPE_EVENT_RING,
+ i,
+ (uintptr_t)mhi_ring->wp);
+ mhi_ring->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.event_db_addr,
+ i,
+ db_value);
spin_unlock_irqrestore(lock, flags);
}
}
@@ -133,168 +162,121 @@ static int process_m0_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- unsigned long flags;
- int r = 0;
- mhi_log(MHI_MSG_INFO, "Entered\n");
+ mhi_log(MHI_MSG_INFO, "Entered With State %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
- if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
+ switch (mhi_dev_ctxt->mhi_state) {
+ case MHI_STATE_M2:
mhi_dev_ctxt->counters.m2_m0++;
- } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M3) {
- mhi_dev_ctxt->counters.m3_m0++;
- } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_READY) {
- mhi_log(MHI_MSG_INFO,
- "Transitioning from READY.\n");
- } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
- mhi_log(MHI_MSG_INFO,
- "Transitioning from M1.\n");
- } else {
- mhi_log(MHI_MSG_INFO,
- "MHI State %d link state %d. Quitting\n",
- mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up);
+ break;
+ case MHI_STATE_M3:
+ mhi_dev_ctxt->counters.m3_m0++;
+ break;
+ default:
+ break;
}
- read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_M0;
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
- mhi_assert_device_wake(mhi_dev_ctxt);
- read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M0;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_assert_device_wake(mhi_dev_ctxt, true);
if (mhi_dev_ctxt->flags.mhi_initialized) {
ring_all_ev_dbs(mhi_dev_ctxt);
- ring_all_chan_dbs(mhi_dev_ctxt);
+ ring_all_chan_dbs(mhi_dev_ctxt, true);
ring_all_cmd_dbs(mhi_dev_ctxt);
}
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
- r = mhi_set_bus_request(mhi_dev_ctxt, 1);
- if (r)
- mhi_log(MHI_MSG_CRITICAL,
- "Could not set bus frequency ret: %d\n",
- r);
- mhi_dev_ctxt->flags.pending_M0 = 0;
- if (atomic_read(&mhi_dev_ctxt->flags.pending_powerup)) {
- atomic_set(&mhi_dev_ctxt->flags.pending_ssr, 0);
- atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 0);
- }
- wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m0_event);
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- if (!mhi_dev_ctxt->flags.pending_M3 &&
- mhi_dev_ctxt->flags.link_up &&
- mhi_dev_ctxt->flags.mhi_initialized)
- mhi_deassert_device_wake(mhi_dev_ctxt);
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ wake_up(mhi_dev_ctxt->mhi_ev_wq.m0_event);
mhi_log(MHI_MSG_INFO, "Exited\n");
return 0;
}
-static int process_m1_transition(
- struct mhi_device_ctxt *mhi_dev_ctxt,
- enum STATE_TRANSITION cur_work_item)
+void process_m1_transition(struct work_struct *work)
{
- unsigned long flags = 0;
- int r = 0;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
+
+ mhi_dev_ctxt = container_of(work,
+ struct mhi_device_ctxt,
+ process_m1_worker);
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_log(MHI_MSG_INFO,
- "Processing M1 state transition from state %d\n",
- mhi_dev_ctxt->mhi_state);
-
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- if (!mhi_dev_ctxt->flags.pending_M3) {
- mhi_log(MHI_MSG_INFO, "Setting M2 Transition flag\n");
- atomic_inc(&mhi_dev_ctxt->flags.m2_transition);
- mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
- mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n");
- mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M2);
- mhi_dev_ctxt->counters.m1_m2++;
+ "Processing M1 state transition from state %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+ /* We either Entered M3 or we did M3->M0 Exit */
+ if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_M1) {
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ return;
}
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- r = mhi_set_bus_request(mhi_dev_ctxt, 0);
- if (r)
- mhi_log(MHI_MSG_INFO, "Failed to update bus request\n");
- mhi_log(MHI_MSG_INFO, "Debouncing M2\n");
+ mhi_log(MHI_MSG_INFO, "Transitioning to M2 Transition\n");
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1_M2_TRANSITION;
+ mhi_dev_ctxt->counters.m1_m2++;
+ mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M2);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
msleep(MHI_M2_DEBOUNCE_TMR_MS);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_log(MHI_MSG_INFO, "Pending acks %d\n",
- atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
- if (atomic_read(&mhi_dev_ctxt->counters.outbound_acks) ||
- mhi_dev_ctxt->flags.pending_M3) {
- mhi_assert_device_wake(mhi_dev_ctxt);
- } else {
- pm_runtime_mark_last_busy(
- &mhi_dev_ctxt->dev_info->pcie_device->dev);
- r = pm_request_autosuspend(
- &mhi_dev_ctxt->dev_info->pcie_device->dev);
- if (r && r != -EAGAIN) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to remove counter ret %d\n", r);
- BUG_ON(mhi_dev_ctxt->dev_info->
- pcie_device->dev.power.runtime_error);
- }
+ /* During DEBOUNCE Time We could be receiving M0 Event */
+ if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_M1_M2_TRANSITION) {
+ mhi_log(MHI_MSG_INFO, "Entered M2 State\n");
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M2;
}
- atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0);
- mhi_log(MHI_MSG_INFO, "M2 transition complete.\n");
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- BUG_ON(atomic_read(&mhi_dev_ctxt->outbound_acks) < 0);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- return 0;
+ if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) {
+ mhi_log(MHI_MSG_INFO, "Exiting M2 Immediately, count:%d\n",
+ atomic_read(&mhi_dev_ctxt->counters.device_wake));
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_assert_device_wake(mhi_dev_ctxt, true);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ } else {
+ mhi_log(MHI_MSG_INFO, "Schedule RPM suspend");
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_request_autosuspend(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ }
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
}
static int process_m3_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- unsigned long flags;
mhi_log(MHI_MSG_INFO,
- "Processing M3 state transition\n");
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
- mhi_dev_ctxt->flags.pending_M3 = 0;
- wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m3_event);
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_dev_ctxt->counters.m0_m3++;
- return 0;
-}
-
-static int mhi_process_link_down(
- struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- unsigned long flags;
- int r;
-
- mhi_log(MHI_MSG_INFO, "Entered.\n");
- if (NULL == mhi_dev_ctxt)
- return -EINVAL;
-
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_dev_ctxt->flags.mhi_initialized = 0;
- mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
- mhi_deassert_device_wake(mhi_dev_ctxt);
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-
- mhi_dev_ctxt->flags.stop_threads = 1;
+ "Entered with State %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
- while (!mhi_dev_ctxt->flags.ev_thread_stopped) {
- wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
- mhi_log(MHI_MSG_INFO,
- "Waiting for threads to SUSPEND EVT: %d, STT: %d\n",
- mhi_dev_ctxt->flags.st_thread_stopped,
- mhi_dev_ctxt->flags.ev_thread_stopped);
- msleep(20);
+ switch (mhi_dev_ctxt->mhi_state) {
+ case MHI_STATE_M1:
+ mhi_dev_ctxt->counters.m1_m3++;
+ break;
+ case MHI_STATE_M0:
+ mhi_dev_ctxt->counters.m0_m3++;
+ break;
+ default:
+ break;
}
- r = mhi_set_bus_request(mhi_dev_ctxt, 0);
- if (r)
- mhi_log(MHI_MSG_INFO,
- "Failed to scale bus request to sleep set.\n");
- mhi_turn_off_pcie_link(mhi_dev_ctxt);
- mhi_dev_ctxt->dev_info->link_down_cntr++;
- atomic_set(&mhi_dev_ctxt->flags.data_pending, 0);
- mhi_log(MHI_MSG_INFO, "Exited.\n");
-
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ wake_up(mhi_dev_ctxt->mhi_ev_wq.m3_event);
return 0;
}
@@ -302,51 +284,20 @@ static int process_link_down_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- mhi_log(MHI_MSG_INFO, "Entered\n");
- if (0 !=
- mhi_process_link_down(mhi_dev_ctxt)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to process link down\n");
- }
- mhi_log(MHI_MSG_INFO, "Exited.\n");
- return 0;
+ mhi_log(MHI_MSG_INFO,
+ "Entered with State %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ return -EIO;
}
static int process_wake_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- int r = 0;
-
- mhi_log(MHI_MSG_INFO, "Entered\n");
- __pm_stay_awake(&mhi_dev_ctxt->w_lock);
-
- if (atomic_read(&mhi_dev_ctxt->flags.pending_ssr)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Pending SSR, Ignoring.\n");
- goto exit;
- }
- if (mhi_dev_ctxt->flags.mhi_initialized) {
- r = pm_request_resume(
- &mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_VERBOSE,
- "MHI is initialized, transitioning to M0, ret %d\n", r);
- }
-
- if (!mhi_dev_ctxt->flags.mhi_initialized) {
- mhi_log(MHI_MSG_INFO,
- "MHI is not initialized transitioning to base.\n");
- r = init_mhi_base_state(mhi_dev_ctxt);
- if (0 != r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to transition to base state %d.\n",
- r);
- }
-
-exit:
- __pm_relax(&mhi_dev_ctxt->w_lock);
- mhi_log(MHI_MSG_INFO, "Exited.\n");
- return r;
+ mhi_log(MHI_MSG_INFO,
+ "Entered with State %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ return -EIO;
}
@@ -354,9 +305,10 @@ static int process_bhi_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- mhi_turn_on_pcie_link(mhi_dev_ctxt);
mhi_log(MHI_MSG_INFO, "Entered\n");
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_BHI;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
mhi_log(MHI_MSG_INFO, "Exited\n");
return 0;
@@ -369,36 +321,42 @@ static int process_ready_transition(
int r = 0;
mhi_log(MHI_MSG_INFO, "Processing READY state transition\n");
- mhi_dev_ctxt->mhi_state = MHI_STATE_READY;
r = mhi_reset_all_thread_queues(mhi_dev_ctxt);
-
- if (r)
+ if (r) {
mhi_log(MHI_MSG_ERROR,
"Failed to reset thread queues\n");
+ return r;
+ }
+
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_state = MHI_STATE_READY;
r = mhi_init_mmio(mhi_dev_ctxt);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
/* Initialize MMIO */
if (r) {
mhi_log(MHI_MSG_ERROR,
"Failure during MMIO initialization\n");
return r;
}
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
cur_work_item);
-
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
if (r) {
mhi_log(MHI_MSG_ERROR,
"Failure during event ring init\n");
return r;
}
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->flags.stop_threads = 0;
- mhi_assert_device_wake(mhi_dev_ctxt);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
MHICTRL_MHISTATE_MASK,
MHICTRL_MHISTATE_SHIFT,
MHI_STATE_M0);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
return r;
}
@@ -421,37 +379,22 @@ static int process_reset_transition(
enum STATE_TRANSITION cur_work_item)
{
int r = 0, i = 0;
- unsigned long flags = 0;
-
mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n");
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
mhi_dev_ctxt->counters.mhi_reset_cntr++;
- mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_PBL;
r = mhi_test_for_device_reset(mhi_dev_ctxt);
if (r)
mhi_log(MHI_MSG_INFO, "Device not RESET ret %d\n", r);
r = mhi_test_for_device_ready(mhi_dev_ctxt);
- switch (r) {
- case 0:
- break;
- case -ENOTCONN:
- mhi_log(MHI_MSG_CRITICAL, "Link down detected\n");
- break;
- case -ETIMEDOUT:
- r = mhi_init_state_transition(mhi_dev_ctxt,
- STATE_TRANSITION_RESET);
- if (0 != r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to initiate 0x%x state trans\n",
- STATE_TRANSITION_RESET);
- break;
- default:
- mhi_log(MHI_MSG_CRITICAL,
- "Unexpected ret code detected for\n");
- break;
+ if (r) {
+ mhi_log(MHI_MSG_ERROR, "timed out waiting for ready ret:%d\n",
+ r);
+ return r;
}
+
for (i = 0; i < NR_OF_CMD_RINGS; ++i) {
mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp =
mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
@@ -475,8 +418,8 @@ static int process_reset_transition(
STATE_TRANSITION_READY);
if (0 != r)
mhi_log(MHI_MSG_CRITICAL,
- "Failed to initiate 0x%x state trans\n",
- STATE_TRANSITION_READY);
+ "Failed to initiate %s state trans\n",
+ state_transition_str(STATE_TRANSITION_READY));
return r;
}
@@ -484,45 +427,10 @@ static int process_syserr_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- int r = 0;
-
- mhi_log(MHI_MSG_CRITICAL, "Received SYS ERROR. Resetting MHI\n");
- mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
- r = mhi_init_state_transition(mhi_dev_ctxt,
- STATE_TRANSITION_RESET);
- if (r) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to init state transition to RESET ret %d\n", r);
- mhi_log(MHI_MSG_CRITICAL, "Failed to reset mhi\n");
- }
- return r;
-}
-
-int start_chan_sync(struct mhi_client_handle *client_handle)
-{
- int r = 0;
- int chan = client_handle->chan_info.chan_nr;
-
- init_completion(&client_handle->chan_open_complete);
- r = mhi_send_cmd(client_handle->mhi_dev_ctxt,
- MHI_COMMAND_START_CHAN,
- chan);
- if (r != 0) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to send start command for chan %d ret %d\n",
- chan, r);
- return r;
- }
- r = wait_for_completion_timeout(
- &client_handle->chan_open_complete,
- msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
- if (!r) {
- mhi_log(MHI_MSG_ERROR,
- "Timed out waiting for chan %d start completion\n",
- chan);
- r = -ETIME;
- }
- return 0;
+ mhi_log(MHI_MSG_INFO,
+ "Entered with State %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ return -EIO;
}
static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -546,8 +454,7 @@ static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
chan_info.flags))
mhi_notify_client(client_handle, MHI_CB_MHI_ENABLED);
}
- if (exec_env == MHI_EXEC_ENV_AMSS)
- mhi_deassert_device_wake(mhi_dev_ctxt);
+
mhi_log(MHI_MSG_INFO, "Done.\n");
}
@@ -555,36 +462,25 @@ static int process_sbl_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- int r = 0;
- pm_runtime_set_autosuspend_delay(
- &mhi_dev_ctxt->dev_info->pcie_device->dev,
- MHI_RPM_AUTOSUSPEND_TMR_VAL_MS);
- pm_runtime_use_autosuspend(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- r = pm_runtime_set_active(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- if (r) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to activate runtime pm ret %d\n", r);
- }
- pm_runtime_enable(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_INFO, "Enabled runtime pm autosuspend\n");
+ mhi_log(MHI_MSG_INFO, "Enabled\n");
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
return 0;
-
}
static int process_amss_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- int r = 0, i = 0;
- struct mhi_client_handle *client_handle = NULL;
+ int r = 0;
mhi_log(MHI_MSG_INFO, "Processing AMSS state transition\n");
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_AMSS;
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
- mhi_assert_device_wake(mhi_dev_ctxt);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
if (!mhi_dev_ctxt->flags.mhi_initialized) {
r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
cur_work_item);
@@ -592,54 +488,40 @@ static int process_amss_transition(
if (r) {
mhi_log(MHI_MSG_CRITICAL,
"Failed to set local chan state ret %d\n", r);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
return r;
}
- ring_all_chan_dbs(mhi_dev_ctxt);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ ring_all_chan_dbs(mhi_dev_ctxt, true);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_log(MHI_MSG_INFO,
"Notifying clients that MHI is enabled\n");
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
} else {
mhi_log(MHI_MSG_INFO, "MHI is initialized\n");
- for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
- client_handle = mhi_dev_ctxt->client_handle_list[i];
- if (client_handle && client_handle->chan_status)
- r = start_chan_sync(client_handle);
- WARN(r, "Failed to start chan %d ret %d\n",
- i, r);
- return r;
- }
- ring_all_chan_dbs(mhi_dev_ctxt);
}
+
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
ring_all_ev_dbs(mhi_dev_ctxt);
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
- if (!mhi_dev_ctxt->flags.pending_M3 &&
- mhi_dev_ctxt->flags.link_up)
- mhi_deassert_device_wake(mhi_dev_ctxt);
- mhi_log(MHI_MSG_INFO, "Exited\n");
- return 0;
-}
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
-int mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- int r = 0;
- unsigned long flags = 0;
+ /*
+ * runtime_allow will decrement usage_count, counts were
+ * incremented by pci fw pci_pm_init() or by
+ * mhi shutdown/ssr apis.
+ */
+ mhi_log(MHI_MSG_INFO, "Allow runtime suspend\n");
- mhi_log(MHI_MSG_INFO, "Entered\n");
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_dev_ctxt->mhi_state = MHI_STATE_SYS_ERR;
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ pm_runtime_allow(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_INFO, "Setting RESET to MDM.\n");
- mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_RESET);
- mhi_log(MHI_MSG_INFO, "Transitioning state to RESET\n");
- r = mhi_init_state_transition(mhi_dev_ctxt,
- STATE_TRANSITION_RESET);
- if (0 != r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to initiate 0x%x state trans ret %d\n",
- STATE_TRANSITION_RESET, r);
- mhi_log(MHI_MSG_INFO, "Exiting\n");
- return r;
+ /* During probe we incremented, releasing that count */
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+ mhi_log(MHI_MSG_INFO, "Exited\n");
+ return 0;
}
static int process_stt_work_item(
@@ -648,8 +530,8 @@ static int process_stt_work_item(
{
int r = 0;
- mhi_log(MHI_MSG_INFO, "Transitioning to %d\n",
- (int)cur_work_item);
+ mhi_log(MHI_MSG_INFO, "Transitioning to %s\n",
+ state_transition_str(cur_work_item));
trace_mhi_state(cur_work_item);
switch (cur_work_item) {
case STATE_TRANSITION_BHI:
@@ -670,9 +552,6 @@ static int process_stt_work_item(
case STATE_TRANSITION_M0:
r = process_m0_transition(mhi_dev_ctxt, cur_work_item);
break;
- case STATE_TRANSITION_M1:
- r = process_m1_transition(mhi_dev_ctxt, cur_work_item);
- break;
case STATE_TRANSITION_M3:
r = process_m3_transition(mhi_dev_ctxt, cur_work_item);
break;
@@ -689,7 +568,8 @@ static int process_stt_work_item(
break;
default:
mhi_log(MHI_MSG_ERROR,
- "Unrecongized state: %d\n", cur_work_item);
+ "Unrecongized state: %s\n",
+ state_transition_str(cur_work_item));
break;
}
return r;
@@ -762,8 +642,8 @@ int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
BUG_ON(nr_avail_work_items <= 0);
mhi_log(MHI_MSG_VERBOSE,
- "Processing state transition %x\n",
- new_state);
+ "Processing state transition %s\n",
+ state_transition_str(new_state));
*(enum STATE_TRANSITION *)stt_ring->wp = new_state;
r = ctxt_add_element(stt_ring, (void **)&cur_work_item);
BUG_ON(r);
@@ -771,216 +651,3 @@ int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
return r;
}
-
-int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- int r = 0;
- unsigned long flags;
-
- mhi_log(MHI_MSG_INFO,
- "Entered MHI state %d, Pending M0 %d Pending M3 %d\n",
- mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0,
- mhi_dev_ctxt->flags.pending_M3);
- mutex_lock(&mhi_dev_ctxt->pm_lock);
- mhi_log(MHI_MSG_INFO,
- "Waiting for M0 M1 or M3. Currently %d...\n",
- mhi_dev_ctxt->mhi_state);
-
- r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M3 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
- msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
- switch (r) {
- case 0:
- mhi_log(MHI_MSG_CRITICAL,
- "Timeout: State %d after %d ms\n",
- mhi_dev_ctxt->mhi_state,
- MHI_MAX_SUSPEND_TIMEOUT);
- mhi_dev_ctxt->counters.m0_event_timeouts++;
- r = -ETIME;
- goto exit;
- case -ERESTARTSYS:
- mhi_log(MHI_MSG_CRITICAL,
- "Going Down...\n");
- goto exit;
- default:
- mhi_log(MHI_MSG_INFO,
- "Wait complete state: %d\n", mhi_dev_ctxt->mhi_state);
- r = 0;
- break;
- }
- if (mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
- mhi_assert_device_wake(mhi_dev_ctxt);
- mhi_log(MHI_MSG_INFO,
- "MHI state %d, done\n",
- mhi_dev_ctxt->mhi_state);
- goto exit;
- } else {
- if (0 != mhi_turn_on_pcie_link(mhi_dev_ctxt)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to resume link\n");
- r = -EIO;
- goto exit;
- }
-
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_log(MHI_MSG_VERBOSE, "Setting M0 ...\n");
- if (mhi_dev_ctxt->flags.pending_M3) {
- mhi_log(MHI_MSG_INFO,
- "Pending M3 detected, aborting M0 procedure\n");
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock,
- flags);
- r = -EPERM;
- goto exit;
- }
- if (mhi_dev_ctxt->flags.link_up) {
- mhi_dev_ctxt->flags.pending_M0 = 1;
- mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
- }
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- r = wait_event_interruptible_timeout(
- *mhi_dev_ctxt->mhi_ev_wq.m0_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
- msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
- WARN_ON(!r || -ERESTARTSYS == r);
- if (!r || -ERESTARTSYS == r)
- mhi_log(MHI_MSG_ERROR,
- "Failed to get M0 event ret %d\n", r);
- r = 0;
- }
-exit:
- mutex_unlock(&mhi_dev_ctxt->pm_lock);
- mhi_log(MHI_MSG_INFO, "Exited...\n");
- return r;
-}
-
-int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
-
- unsigned long flags;
- int r = 0, abort_m3 = 0;
-
- mhi_log(MHI_MSG_INFO,
- "Entered MHI state %d, Pending M0 %d Pending M3 %d\n",
- mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0,
- mhi_dev_ctxt->flags.pending_M3);
- mutex_lock(&mhi_dev_ctxt->pm_lock);
- switch (mhi_dev_ctxt->mhi_state) {
- case MHI_STATE_RESET:
- mhi_log(MHI_MSG_INFO,
- "MHI in RESET turning link off and quitting\n");
- mhi_turn_off_pcie_link(mhi_dev_ctxt);
- r = mhi_set_bus_request(mhi_dev_ctxt, 0);
- if (r)
- mhi_log(MHI_MSG_INFO,
- "Failed to set bus freq ret %d\n", r);
- goto exit;
- case MHI_STATE_M0:
- case MHI_STATE_M1:
- case MHI_STATE_M2:
- mhi_log(MHI_MSG_INFO,
- "Triggering wake out of M2\n");
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_dev_ctxt->flags.pending_M3 = 1;
- if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) {
- mhi_log(MHI_MSG_INFO,
- "M2 transition not set\n");
- mhi_assert_device_wake(mhi_dev_ctxt);
- }
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- r = wait_event_interruptible_timeout(
- *mhi_dev_ctxt->mhi_ev_wq.m0_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
- msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
- if (0 == r || -ERESTARTSYS == r) {
- mhi_log(MHI_MSG_CRITICAL,
- "MDM failed to come out of M2.\n");
- mhi_dev_ctxt->counters.m2_event_timeouts++;
- r = -EAGAIN;
- goto exit;
- }
- break;
- case MHI_STATE_M3:
- mhi_log(MHI_MSG_INFO,
- "MHI state %d, link state %d.\n",
- mhi_dev_ctxt->mhi_state,
- mhi_dev_ctxt->flags.link_up);
- if (mhi_dev_ctxt->flags.link_up)
- r = -EAGAIN;
- else
- r = 0;
- goto exit;
- default:
- mhi_log(MHI_MSG_INFO,
- "MHI state %d, link state %d.\n",
- mhi_dev_ctxt->mhi_state,
- mhi_dev_ctxt->flags.link_up);
- break;
- }
- while (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) {
- mhi_log(MHI_MSG_INFO,
- "There are still %d acks pending from device\n",
- atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
- __pm_stay_awake(&mhi_dev_ctxt->w_lock);
- __pm_relax(&mhi_dev_ctxt->w_lock);
- abort_m3 = 1;
- r = -EAGAIN;
- goto exit;
- }
-
- if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) {
- abort_m3 = 1;
- r = -EAGAIN;
- goto exit;
- }
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- if (mhi_dev_ctxt->flags.pending_M0) {
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- r = -EAGAIN;
- goto exit;
- }
- mhi_dev_ctxt->flags.pending_M3 = 1;
-
- mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-
- mhi_log(MHI_MSG_INFO,
- "Waiting for M3 completion.\n");
- r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
- msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
- switch (r) {
- case 0:
- mhi_log(MHI_MSG_CRITICAL,
- "MDM failed to suspend after %d ms\n",
- MHI_MAX_SUSPEND_TIMEOUT);
- mhi_dev_ctxt->counters.m3_event_timeouts++;
- mhi_dev_ctxt->flags.pending_M3 = 0;
- goto exit;
- default:
- mhi_log(MHI_MSG_INFO,
- "M3 completion received\n");
- break;
- }
- mhi_turn_off_pcie_link(mhi_dev_ctxt);
- r = mhi_set_bus_request(mhi_dev_ctxt, 0);
- if (r)
- mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
-exit:
- if (abort_m3) {
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- ring_all_chan_dbs(mhi_dev_ctxt);
- ring_all_cmd_dbs(mhi_dev_ctxt);
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
- mhi_deassert_device_wake(mhi_dev_ctxt);
- }
- mhi_dev_ctxt->flags.pending_M3 = 0;
- mutex_unlock(&mhi_dev_ctxt->pm_lock);
- return r;
-}
diff --git a/drivers/platform/msm/mhi/mhi_sys.c b/drivers/platform/msm/mhi/mhi_sys.c
index b8652770275e..c5c025b8585a 100644
--- a/drivers/platform/msm/mhi/mhi_sys.c
+++ b/drivers/platform/msm/mhi/mhi_sys.c
@@ -21,9 +21,9 @@
enum MHI_DEBUG_LEVEL mhi_msg_lvl = MHI_MSG_ERROR;
#ifdef CONFIG_MSM_MHI_DEBUG
- enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_VERBOSE;
+enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_VERBOSE;
#else
- enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_ERROR;
+enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_ERROR;
#endif
unsigned int mhi_log_override;
@@ -34,6 +34,18 @@ MODULE_PARM_DESC(mhi_msg_lvl, "dbg lvl");
module_param(mhi_ipc_log_lvl, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(mhi_ipc_log_lvl, "dbg lvl");
+const char * const mhi_states_str[MHI_STATE_LIMIT] = {
+ "RESET",
+ "READY",
+ "M0",
+ "M1",
+ "M2",
+ "M3",
+ "Reserved: 0x06",
+ "BHI",
+ "SYS_ERR",
+};
+
static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
size_t count, loff_t *offp)
{
@@ -46,6 +58,7 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
int valid_chan = 0;
struct mhi_chan_ctxt *cc_list;
struct mhi_client_handle *client_handle;
+ int pkts_queued;
if (NULL == mhi_dev_ctxt)
return -EIO;
@@ -74,35 +87,37 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
&v_wp_index);
+ pkts_queued = client_handle->chan_info.max_desc -
+ get_nr_avail_ring_elements(&mhi_dev_ctxt->
+ mhi_local_chan_ctxt[*offp]) - 1;
amnt_copied =
scnprintf(mhi_dev_ctxt->chan_info,
- MHI_LOG_SIZE,
- "%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d %s %u\n",
- "chan:",
- (unsigned int)*offp,
- "pkts from dev:",
- mhi_dev_ctxt->counters.chan_pkts_xferd[*offp],
- "state:",
- chan_ctxt->mhi_chan_state,
- "p_base:",
- chan_ctxt->mhi_trb_ring_base_addr,
- "v_base:",
- mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].base,
- "v_wp:",
- mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
- "index:",
- v_wp_index,
- "v_rp:",
- mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp,
- "index:",
- v_rp_index,
- "pkts_queued",
- get_nr_avail_ring_elements(
- &mhi_dev_ctxt->mhi_local_chan_ctxt[*offp]),
- "/",
- client_handle->chan_info.max_desc,
- "bb_used:",
- mhi_dev_ctxt->counters.bb_used[*offp]);
+ MHI_LOG_SIZE,
+ "%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d %s %u\n",
+ "chan:",
+ (unsigned int)*offp,
+ "pkts from dev:",
+ mhi_dev_ctxt->counters.chan_pkts_xferd[*offp],
+ "state:",
+ chan_ctxt->chstate,
+ "p_base:",
+ chan_ctxt->mhi_trb_ring_base_addr,
+ "v_base:",
+ mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].base,
+ "v_wp:",
+ mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
+ "index:",
+ v_wp_index,
+ "v_rp:",
+ mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp,
+ "index:",
+ v_rp_index,
+ "pkts_queued",
+ pkts_queued,
+ "/",
+ client_handle->chan_info.max_desc,
+ "bb_used:",
+ mhi_dev_ctxt->counters.bb_used[*offp]);
*offp += 1;
@@ -224,39 +239,37 @@ static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf,
msleep(100);
amnt_copied =
scnprintf(mhi_dev_ctxt->chan_info,
- MHI_LOG_SIZE,
- "%s %u %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d, %s, %d, %s %d\n",
- "Our State:",
- mhi_dev_ctxt->mhi_state,
- "M0->M1:",
- mhi_dev_ctxt->counters.m0_m1,
- "M0<-M1:",
- mhi_dev_ctxt->counters.m1_m0,
- "M1->M2:",
- mhi_dev_ctxt->counters.m1_m2,
- "M0<-M2:",
- mhi_dev_ctxt->counters.m2_m0,
- "M0->M3:",
- mhi_dev_ctxt->counters.m0_m3,
- "M0<-M3:",
- mhi_dev_ctxt->counters.m3_m0,
- "M3_ev_TO:",
- mhi_dev_ctxt->counters.m3_event_timeouts,
- "M0_ev_TO:",
- mhi_dev_ctxt->counters.m0_event_timeouts,
- "MSI_d:",
- mhi_dev_ctxt->counters.msi_disable_cntr,
- "MSI_e:",
- mhi_dev_ctxt->counters.msi_enable_cntr,
- "outstanding_acks:",
- atomic_read(&mhi_dev_ctxt->counters.outbound_acks),
- "LPM:",
- mhi_dev_ctxt->enable_lpm);
+ MHI_LOG_SIZE,
+ "%s %s %s 0x%02x %s %u %s %u %s %u %s %u %s %u %s %u %s %d %s %d %s %d\n",
+ "MHI State:",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
+ "PM State:",
+ mhi_dev_ctxt->mhi_pm_state,
+ "M0->M1:",
+ mhi_dev_ctxt->counters.m0_m1,
+ "M1->M2:",
+ mhi_dev_ctxt->counters.m1_m2,
+ "M2->M0:",
+ mhi_dev_ctxt->counters.m2_m0,
+ "M0->M3:",
+ mhi_dev_ctxt->counters.m0_m3,
+ "M1->M3:",
+ mhi_dev_ctxt->counters.m1_m3,
+ "M3->M0:",
+ mhi_dev_ctxt->counters.m3_m0,
+ "device_wake:",
+ atomic_read(&mhi_dev_ctxt->counters.device_wake),
+ "usage_count:",
+ atomic_read(&mhi_dev_ctxt->dev_info->pcie_device->dev.
+ power.usage_count),
+ "outbound_acks:",
+ atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
if (amnt_copied < count)
return amnt_copied - copy_to_user(buf,
mhi_dev_ctxt->chan_info, amnt_copied);
else
return -ENOMEM;
+ return 0;
}
static const struct file_operations mhi_dbgfs_state_fops = {
diff --git a/drivers/platform/msm/mhi/mhi_sys.h b/drivers/platform/msm/mhi/mhi_sys.h
index 765fd46ef2dd..a948a2354de7 100644
--- a/drivers/platform/msm/mhi/mhi_sys.h
+++ b/drivers/platform/msm/mhi/mhi_sys.h
@@ -46,6 +46,10 @@ extern void *mhi_ipc_log;
"[%s] " _msg, __func__, ##__VA_ARGS__); \
} while (0)
+extern const char * const mhi_states_str[MHI_STATE_LIMIT];
+#define TO_MHI_STATE_STR(state) (((state) >= MHI_STATE_LIMIT) ? \
+ "INVALID_STATE" : mhi_states_str[state])
+
irqreturn_t mhi_msi_handlr(int msi_number, void *dev_id);
struct mhi_meminfo {
diff --git a/drivers/platform/msm/mhi_uci/mhi_uci.c b/drivers/platform/msm/mhi_uci/mhi_uci.c
index d6eb96a3e89e..88a213b1b241 100644
--- a/drivers/platform/msm/mhi_uci/mhi_uci.c
+++ b/drivers/platform/msm/mhi_uci/mhi_uci.c
@@ -895,6 +895,8 @@ static int uci_init_client_attributes(struct mhi_uci_ctxt_t
case MHI_CLIENT_BL_IN:
case MHI_CLIENT_DUN_OUT:
case MHI_CLIENT_DUN_IN:
+ case MHI_CLIENT_TF_OUT:
+ case MHI_CLIENT_TF_IN:
chan_attrib->uci_ownership = 1;
break;
default:
diff --git a/drivers/video/fbdev/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index d74b1432ea71..bb1259e3cfa1 100644
--- a/drivers/video/fbdev/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -23,9 +23,6 @@
#include <linux/of_platform.h>
#include <linux/msm_ext_display.h>
-#include "mdss_hdmi_util.h"
-#include "mdss_fb.h"
-
struct msm_ext_disp_list {
struct msm_ext_disp_init_data *data;
struct list_head list;
@@ -48,7 +45,6 @@ struct msm_ext_disp {
static int msm_ext_disp_get_intf_data(struct msm_ext_disp *ext_disp,
enum msm_ext_disp_type type,
struct msm_ext_disp_init_data **data);
-static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack);
static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp,
enum msm_ext_disp_type type,
enum msm_ext_disp_cable_state state, u32 flags);
@@ -103,128 +99,6 @@ end:
return;
}
-static void msm_ext_disp_get_pdev_by_name(struct device *dev,
- const char *phandle, struct platform_device **pdev)
-{
- struct device_node *pd_np;
-
- if (!dev) {
- pr_err("Invalid device\n");
- return;
- }
-
- if (!dev->of_node) {
- pr_err("Invalid of_node\n");
- return;
- }
-
- pd_np = of_parse_phandle(dev->of_node, phandle, 0);
- if (!pd_np) {
- pr_err("Cannot find %s dev\n", phandle);
- return;
- }
-
- *pdev = of_find_device_by_node(pd_np);
-}
-
-static void msm_ext_disp_get_fb_pdev(struct device *device,
- struct platform_device **fb_pdev)
-{
- struct msm_fb_data_type *mfd = NULL;
- struct fb_info *fbi = dev_get_drvdata(device);
-
- if (!fbi) {
- pr_err("fb_info is null\n");
- return;
- }
-
- mfd = (struct msm_fb_data_type *)fbi->par;
-
- *fb_pdev = mfd->pdev;
-}
-static ssize_t msm_ext_disp_sysfs_wta_audio_cb(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- int ack, ret = 0;
- ssize_t size = strnlen(buf, PAGE_SIZE);
- const char *ext_phandle = "qcom,msm_ext_disp";
- struct platform_device *ext_pdev = NULL;
- const char *intf_phandle = "qcom,mdss-intf";
- struct platform_device *intf_pdev = NULL;
- struct platform_device *fb_pdev = NULL;
-
- ret = kstrtoint(buf, 10, &ack);
- if (ret) {
- pr_err("kstrtoint failed. ret=%d\n", ret);
- goto end;
- }
-
- msm_ext_disp_get_fb_pdev(dev, &fb_pdev);
- if (!fb_pdev) {
- pr_err("failed to get fb pdev\n");
- goto end;
- }
-
- msm_ext_disp_get_pdev_by_name(&fb_pdev->dev, intf_phandle, &intf_pdev);
- if (!intf_pdev) {
- pr_err("failed to get display intf pdev\n");
- goto end;
- }
-
- msm_ext_disp_get_pdev_by_name(&intf_pdev->dev, ext_phandle, &ext_pdev);
- if (!ext_pdev) {
- pr_err("failed to get ext_pdev\n");
- goto end;
- }
-
- ret = msm_ext_disp_audio_ack(ext_pdev, ack);
- if (ret)
- pr_err("Failed to process ack. ret=%d\n", ret);
-
-end:
- return size;
-}
-
-static DEVICE_ATTR(hdmi_audio_cb, S_IWUSR, NULL,
- msm_ext_disp_sysfs_wta_audio_cb);
-
-static struct attribute *msm_ext_disp_fs_attrs[] = {
- &dev_attr_hdmi_audio_cb.attr,
- NULL,
-};
-
-static struct attribute_group msm_ext_disp_fs_attrs_group = {
- .attrs = msm_ext_disp_fs_attrs,
-};
-
-static int msm_ext_disp_sysfs_create(struct msm_ext_disp_init_data *data)
-{
- int ret = 0;
-
- if (!data || !data->kobj) {
- pr_err("Invalid params\n");
- ret = -EINVAL;
- goto end;
- }
-
- ret = sysfs_create_group(data->kobj, &msm_ext_disp_fs_attrs_group);
- if (ret)
- pr_err("Failed, ret=%d\n", ret);
-
-end:
- return ret;
-}
-
-static void msm_ext_disp_sysfs_remove(struct msm_ext_disp_init_data *data)
-{
- if (!data || !data->kobj) {
- pr_err("Invalid params\n");
- return;
- }
-
- sysfs_remove_group(data->kobj, &msm_ext_disp_fs_attrs_group);
-}
-
static const char *msm_ext_disp_name(enum msm_ext_disp_type type)
{
switch (type) {
@@ -293,31 +167,6 @@ end:
return ret;
}
-static void msm_ext_disp_remove_intf_data(struct msm_ext_disp *ext_disp,
- enum msm_ext_disp_type type)
-{
- struct msm_ext_disp_list *node;
- struct list_head *position = NULL;
- struct list_head *temp = NULL;
-
- if (!ext_disp) {
- pr_err("Invalid params\n");
- return;
- }
-
- list_for_each_safe(position, temp, &ext_disp->display_list) {
- node = list_entry(position, struct msm_ext_disp_list, list);
- if (node->data->type == type) {
- msm_ext_disp_sysfs_remove(node->data);
- list_del(&node->list);
- pr_debug("Removed display (%s)\n",
- msm_ext_disp_name(type));
- kfree(node);
- break;
- }
- }
-}
-
static int msm_ext_disp_send_cable_notification(struct msm_ext_disp *ext_disp,
enum msm_ext_disp_cable_state new_state)
{
@@ -661,6 +510,46 @@ static void msm_ext_disp_teardown_done(struct platform_device *pdev)
complete_all(&ext_disp->hpd_comp);
}
+static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack)
+{
+ u32 ack_hpd;
+ int ret = 0;
+ struct msm_ext_disp *ext_disp = NULL;
+
+ if (!pdev) {
+ pr_err("Invalid platform device\n");
+ return -EINVAL;
+ }
+
+ ext_disp = platform_get_drvdata(pdev);
+ if (!ext_disp) {
+ pr_err("Invalid drvdata\n");
+ return -EINVAL;
+ }
+
+ if (ack & AUDIO_ACK_SET_ENABLE) {
+ ext_disp->ack_enabled = ack & AUDIO_ACK_ENABLE ?
+ true : false;
+
+ pr_debug("audio ack feature %s\n",
+ ext_disp->ack_enabled ? "enabled" : "disabled");
+ goto end;
+ }
+
+ if (!ext_disp->ack_enabled)
+ goto end;
+
+ ack_hpd = ack & AUDIO_ACK_CONNECT;
+
+ pr_debug("%s acknowledging audio (%d)\n",
+ msm_ext_disp_name(ext_disp->current_disp), ack_hpd);
+
+ if (!ext_disp->audio_session_on)
+ complete_all(&ext_disp->hpd_comp);
+end:
+ return ret;
+}
+
static int msm_ext_disp_get_intf_id(struct platform_device *pdev)
{
int ret = 0;
@@ -710,12 +599,14 @@ static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp,
ops->cable_status = msm_ext_disp_cable_status;
ops->get_intf_id = msm_ext_disp_get_intf_id;
ops->teardown_done = msm_ext_disp_teardown_done;
+ ops->acknowledge = msm_ext_disp_audio_ack;
} else {
ops->audio_info_setup = NULL;
ops->get_audio_edid_blk = NULL;
ops->cable_status = NULL;
ops->get_intf_id = NULL;
ops->teardown_done = NULL;
+ ops->acknowledge = NULL;
}
end:
return ret;
@@ -755,46 +646,6 @@ end:
return ret;
}
-static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack)
-{
- u32 ack_hpd;
- int ret = 0;
- struct msm_ext_disp *ext_disp = NULL;
-
- if (!pdev) {
- pr_err("Invalid platform device\n");
- return -EINVAL;
- }
-
- ext_disp = platform_get_drvdata(pdev);
- if (!ext_disp) {
- pr_err("Invalid drvdata\n");
- return -EINVAL;
- }
-
- if (ack & AUDIO_ACK_SET_ENABLE) {
- ext_disp->ack_enabled = ack & AUDIO_ACK_ENABLE ?
- true : false;
-
- pr_debug("audio ack feature %s\n",
- ext_disp->ack_enabled ? "enabled" : "disabled");
- goto end;
- }
-
- if (!ext_disp->ack_enabled)
- goto end;
-
- ack_hpd = ack & AUDIO_ACK_CONNECT;
-
- pr_debug("%s acknowledging audio (%d)\n",
- msm_ext_disp_name(ext_disp->current_disp), ack_hpd);
-
- if (!ext_disp->audio_session_on)
- complete_all(&ext_disp->hpd_comp);
-end:
- return ret;
-}
-
int msm_hdmi_register_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_audio_codec_ops *ops)
{
@@ -849,11 +700,6 @@ static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data)
return -EINVAL;
}
- if (!init_data->kobj) {
- pr_err("Invalid display intf kobj\n");
- return -EINVAL;
- }
-
if (!init_data->codec_ops.get_audio_edid_blk ||
!init_data->codec_ops.cable_status ||
!init_data->codec_ops.audio_info_setup) {
@@ -899,10 +745,6 @@ int msm_ext_disp_register_intf(struct platform_device *pdev,
if (ret)
goto end;
- ret = msm_ext_disp_sysfs_create(init_data);
- if (ret)
- goto sysfs_failure;
-
init_data->intf_ops.hpd = msm_ext_disp_hpd;
init_data->intf_ops.notify = msm_ext_disp_notify;
@@ -913,8 +755,6 @@ int msm_ext_disp_register_intf(struct platform_device *pdev,
return ret;
-sysfs_failure:
- msm_ext_disp_remove_intf_data(ext_disp, init_data->type);
end:
mutex_unlock(&ext_disp->lock);
@@ -1035,7 +875,7 @@ static void __exit msm_ext_disp_exit(void)
platform_driver_unregister(&this_driver);
}
-module_init(msm_ext_disp_init);
+subsys_initcall(msm_ext_disp_init);
module_exit(msm_ext_disp_exit);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 35dc3842017b..4973f91c8af1 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -45,6 +45,7 @@ struct pl_data {
struct votable *fv_votable;
struct votable *pl_disable_votable;
struct votable *pl_awake_votable;
+ struct votable *hvdcp_hw_inov_dis_votable;
struct work_struct status_change_work;
struct work_struct pl_disable_forever_work;
struct delayed_work pl_taper_work;
@@ -96,7 +97,8 @@ static void split_settled(struct pl_data *chip)
* for desired split
*/
- if (chip->pl_mode != POWER_SUPPLY_PARALLEL_USBIN_USBIN)
+ if ((chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN)
+ && (chip->pl_mode != POWER_SUPPLY_PL_USBIN_USBIN_EXT))
return;
if (!chip->main_psy)
@@ -262,7 +264,7 @@ static void split_fcc(struct pl_data *chip, int total_ua,
hw_cc_delta_ua = pval.intval;
bcl_ua = INT_MAX;
- if (chip->pl_mode == POWER_SUPPLY_PARALLEL_MID_MID) {
+ if (chip->pl_mode == POWER_SUPPLY_PL_USBMID_USBMID) {
rc = power_supply_get_property(chip->main_psy,
POWER_SUPPLY_PROP_INPUT_CURRENT_SETTLED, &pval);
if (rc < 0) {
@@ -287,7 +289,15 @@ static void split_fcc(struct pl_data *chip, int total_ua,
slave_limited_ua = min(effective_total_ua, bcl_ua);
*slave_ua = (slave_limited_ua * chip->slave_pct) / 100;
*slave_ua = (*slave_ua * chip->taper_pct) / 100;
- *master_ua = max(0, total_ua - *slave_ua);
+ /*
+ * In USBIN_USBIN configuration with internal rsense parallel
+ * charger's current goes through main charger's BATFET, keep
+ * the main charger's FCC to the votable result.
+ */
+ if (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ *master_ua = max(0, total_ua);
+ else
+ *master_ua = max(0, total_ua - *slave_ua);
}
static int pl_fcc_vote_callback(struct votable *votable, void *data,
@@ -316,7 +326,7 @@ static int pl_fcc_vote_callback(struct votable *votable, void *data,
total_fcc_ua = pval.intval;
}
- if (chip->pl_mode == POWER_SUPPLY_PARALLEL_NONE
+ if (chip->pl_mode == POWER_SUPPLY_PL_NONE
|| get_effective_result_locked(chip->pl_disable_votable)) {
pval.intval = total_fcc_ua;
rc = power_supply_set_property(chip->main_psy,
@@ -391,7 +401,7 @@ static int pl_fv_vote_callback(struct votable *votable, void *data,
return rc;
}
- if (chip->pl_mode != POWER_SUPPLY_PARALLEL_NONE) {
+ if (chip->pl_mode != POWER_SUPPLY_PL_NONE) {
pval.intval += PARALLEL_FLOAT_VOLTAGE_DELTA_UV;
rc = power_supply_set_property(chip->pl_psy,
POWER_SUPPLY_PROP_VOLTAGE_MAX, &pval);
@@ -411,6 +421,10 @@ static void pl_disable_forever_work(struct work_struct *work)
/* Disable Parallel charger forever */
vote(chip->pl_disable_votable, PL_HW_ABSENT_VOTER, true, 0);
+
+ /* Re-enable autonomous mode */
+ if (chip->hvdcp_hw_inov_dis_votable)
+ vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER, false, 0);
}
static int pl_disable_vote_callback(struct votable *votable,
@@ -451,7 +465,8 @@ static int pl_disable_vote_callback(struct votable *votable,
pr_err("Couldn't change slave suspend state rc=%d\n",
rc);
- if (chip->pl_mode == POWER_SUPPLY_PARALLEL_USBIN_USBIN)
+ if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ || (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
split_settled(chip);
/*
* we could have been enabled while in taper mode,
@@ -469,7 +484,8 @@ static int pl_disable_vote_callback(struct votable *votable,
}
}
} else {
- if (chip->pl_mode == POWER_SUPPLY_PARALLEL_USBIN_USBIN)
+ if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ || (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT))
split_settled(chip);
/* pl_psy may be NULL while in the disable branch */
@@ -552,6 +568,21 @@ static bool is_parallel_available(struct pl_data *chip)
* pl_psy is present and valid.
*/
chip->pl_mode = pval.intval;
+
+ /* Disable autonomous votage increments for USBIN-USBIN */
+ if ((chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN)
+ || (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)) {
+ if (!chip->hvdcp_hw_inov_dis_votable)
+ chip->hvdcp_hw_inov_dis_votable =
+ find_votable("HVDCP_HW_INOV_DIS");
+ if (chip->hvdcp_hw_inov_dis_votable)
+ /* Read current pulse count */
+ vote(chip->hvdcp_hw_inov_dis_votable, PL_VOTER,
+ true, 0);
+ else
+ return false;
+ }
+
vote(chip->pl_disable_votable, PARALLEL_PSY_VOTER, false, 0);
return true;
@@ -604,13 +635,16 @@ static void handle_main_charge_type(struct pl_data *chip)
}
#define MIN_ICL_CHANGE_DELTA_UA 300000
-static void handle_settled_aicl_split(struct pl_data *chip)
+static void handle_settled_icl_change(struct pl_data *chip)
{
union power_supply_propval pval = {0, };
int rc;
- if (!get_effective_result(chip->pl_disable_votable)
- && chip->pl_mode == POWER_SUPPLY_PARALLEL_USBIN_USBIN) {
+ if (get_effective_result(chip->pl_disable_votable))
+ return;
+
+ if (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN
+ || chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT) {
/*
* call aicl split only when USBIN_USBIN and enabled
* and if aicl changed
@@ -627,6 +661,8 @@ static void handle_settled_aicl_split(struct pl_data *chip)
if (abs((chip->main_settled_ua - chip->pl_settled_ua)
- pval.intval) > MIN_ICL_CHANGE_DELTA_UA)
split_settled(chip);
+ } else {
+ rerun_election(chip->fcc_votable);
}
}
@@ -673,7 +709,7 @@ static void status_change_work(struct work_struct *work)
is_parallel_available(chip);
handle_main_charge_type(chip);
- handle_settled_aicl_split(chip);
+ handle_settled_icl_change(chip);
handle_parallel_in_taper(chip);
}
@@ -687,7 +723,8 @@ static int pl_notifier_call(struct notifier_block *nb,
return NOTIFY_OK;
if ((strcmp(psy->desc->name, "parallel") == 0)
- || (strcmp(psy->desc->name, "battery") == 0))
+ || (strcmp(psy->desc->name, "battery") == 0)
+ || (strcmp(psy->desc->name, "main") == 0))
schedule_work(&chip->status_change_work);
return NOTIFY_OK;
diff --git a/drivers/power/supply/qcom/qpnp-fg.c b/drivers/power/supply/qcom/qpnp-fg.c
index e4a8ade80d4f..cfd2f64a9bb8 100644
--- a/drivers/power/supply/qcom/qpnp-fg.c
+++ b/drivers/power/supply/qcom/qpnp-fg.c
@@ -549,6 +549,7 @@ struct fg_trans {
struct fg_chip *chip;
struct fg_log_buffer *log; /* log buffer */
u8 *data; /* fg data that is read */
+ struct mutex memif_dfs_lock; /* Prevent thread concurrency */
};
struct fg_dbgfs {
@@ -5730,6 +5731,7 @@ static int fg_memif_data_open(struct inode *inode, struct file *file)
trans->addr = dbgfs_data.addr;
trans->chip = dbgfs_data.chip;
trans->offset = trans->addr;
+ mutex_init(&trans->memif_dfs_lock);
file->private_data = trans;
return 0;
@@ -5741,6 +5743,7 @@ static int fg_memif_dfs_close(struct inode *inode, struct file *file)
if (trans && trans->log && trans->data) {
file->private_data = NULL;
+ mutex_destroy(&trans->memif_dfs_lock);
kfree(trans->log);
kfree(trans->data);
kfree(trans);
@@ -5898,10 +5901,13 @@ static ssize_t fg_memif_dfs_reg_read(struct file *file, char __user *buf,
size_t ret;
size_t len;
+ mutex_lock(&trans->memif_dfs_lock);
/* Is the the log buffer empty */
if (log->rpos >= log->wpos) {
- if (get_log_data(trans) <= 0)
- return 0;
+ if (get_log_data(trans) <= 0) {
+ len = 0;
+ goto unlock_mutex;
+ }
}
len = min(count, log->wpos - log->rpos);
@@ -5909,7 +5915,8 @@ static ssize_t fg_memif_dfs_reg_read(struct file *file, char __user *buf,
ret = copy_to_user(buf, &log->data[log->rpos], len);
if (ret == len) {
pr_err("error copy sram register values to user\n");
- return -EFAULT;
+ len = -EFAULT;
+ goto unlock_mutex;
}
/* 'ret' is the number of bytes not copied */
@@ -5917,6 +5924,9 @@ static ssize_t fg_memif_dfs_reg_read(struct file *file, char __user *buf,
*ppos += len;
log->rpos += len;
+
+unlock_mutex:
+ mutex_unlock(&trans->memif_dfs_lock);
return len;
}
@@ -5937,14 +5947,20 @@ static ssize_t fg_memif_dfs_reg_write(struct file *file, const char __user *buf,
int cnt = 0;
u8 *values;
size_t ret = 0;
+ char *kbuf;
+ u32 offset;
struct fg_trans *trans = file->private_data;
- u32 offset = trans->offset;
+
+ mutex_lock(&trans->memif_dfs_lock);
+ offset = trans->offset;
/* Make a copy of the user data */
- char *kbuf = kmalloc(count + 1, GFP_KERNEL);
- if (!kbuf)
- return -ENOMEM;
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf) {
+ ret = -ENOMEM;
+ goto unlock_mutex;
+ }
ret = copy_from_user(kbuf, buf, count);
if (ret == count) {
@@ -5995,6 +6011,8 @@ static ssize_t fg_memif_dfs_reg_write(struct file *file, const char __user *buf,
free_buf:
kfree(kbuf);
+unlock_mutex:
+ mutex_unlock(&trans->memif_dfs_lock);
return ret;
}
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index b51ce758a16b..64f4d46df9a0 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -848,6 +848,8 @@ static enum power_supply_property smb2_batt_props[] = {
POWER_SUPPLY_PROP_PARALLEL_DISABLE,
POWER_SUPPLY_PROP_SET_SHIP_MODE,
POWER_SUPPLY_PROP_DIE_HEALTH,
+ POWER_SUPPLY_PROP_RERUN_AICL,
+ POWER_SUPPLY_PROP_DP_DM,
};
static int smb2_batt_get_prop(struct power_supply *psy,
@@ -933,6 +935,12 @@ static int smb2_batt_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_DIE_HEALTH:
rc = smblib_get_prop_die_health(chg, val);
break;
+ case POWER_SUPPLY_PROP_DP_DM:
+ val->intval = chg->pulse_cnt;
+ break;
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ val->intval = 0;
+ break;
default:
pr_err("batt power supply prop %d not supported\n", psp);
return -EINVAL;
@@ -984,8 +992,17 @@ static int smb2_batt_set_prop(struct power_supply *psy,
/* Not in ship mode as long as the device is active */
if (!val->intval)
break;
+ if (chg->pl.psy)
+ power_supply_set_property(chg->pl.psy,
+ POWER_SUPPLY_PROP_SET_SHIP_MODE, val);
rc = smblib_set_prop_ship_mode(chg, val);
break;
+ case POWER_SUPPLY_PROP_RERUN_AICL:
+ rc = smblib_rerun_aicl(chg);
+ break;
+ case POWER_SUPPLY_PROP_DP_DM:
+ rc = smblib_dp_dm(chg, val->intval);
+ break;
default:
rc = -EINVAL;
}
@@ -1001,6 +1018,8 @@ static int smb2_batt_prop_is_writeable(struct power_supply *psy,
case POWER_SUPPLY_PROP_SYSTEM_TEMP_LEVEL:
case POWER_SUPPLY_PROP_CAPACITY:
case POWER_SUPPLY_PROP_PARALLEL_DISABLE:
+ case POWER_SUPPLY_PROP_DP_DM:
+ case POWER_SUPPLY_PROP_RERUN_AICL:
return 1;
default:
break;
@@ -1615,7 +1634,7 @@ static int smb2_chg_config_init(struct smb2 *chip)
switch (pmic_rev_id->pmic_subtype) {
case PMI8998_SUBTYPE:
chip->chg.smb_version = PMI8998_SUBTYPE;
- chip->chg.wa_flags |= BOOST_BACK_WA;
+ chip->chg.wa_flags |= BOOST_BACK_WA | QC_AUTH_INTERRUPT_WA_BIT;
if (pmic_rev_id->rev4 == PMI8998_V1P1_REV4) /* PMI rev 1.1 */
chg->wa_flags |= QC_CHARGER_DETECTION_WA_BIT;
if (pmic_rev_id->rev4 == PMI8998_V2P0_REV4) /* PMI rev 2.0 */
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index 33339588599e..5d4b46469e9c 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -58,6 +58,12 @@ int smblib_read(struct smb_charger *chg, u16 addr, u8 *val)
return rc;
}
+int smblib_multibyte_read(struct smb_charger *chg, u16 addr, u8 *val,
+ int count)
+{
+ return regmap_bulk_read(chg->regmap, addr, val, count);
+}
+
int smblib_masked_write(struct smb_charger *chg, u16 addr, u8 mask, u8 val)
{
int rc = 0;
@@ -375,13 +381,13 @@ static int smblib_set_opt_freq_buck(struct smb_charger *chg, int fsw_khz)
if (chg->mode == PARALLEL_MASTER && chg->pl.psy) {
pval.intval = fsw_khz;
- rc = power_supply_set_property(chg->pl.psy,
+ /*
+ * Some parallel charging implementations may not have
+ * PROP_BUCK_FREQ property - they could be running
+ * with a fixed frequency
+ */
+ power_supply_set_property(chg->pl.psy,
POWER_SUPPLY_PROP_BUCK_FREQ, &pval);
- if (rc < 0) {
- dev_err(chg->dev,
- "Could not set parallel buck_freq rc=%d\n", rc);
- return rc;
- }
}
return rc;
@@ -633,11 +639,15 @@ static void smblib_uusb_removal(struct smb_charger *chg)
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
- /* reset AUTH_IRQ_EN_CFG_BIT */
- rc = smblib_masked_write(chg, USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
- AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable QC auth setting rc=%d\n", rc);
+ if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+ /* re-enable AUTH_IRQ_EN_CFG_BIT */
+ rc = smblib_masked_write(chg,
+ USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+ AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't enable QC auth setting rc=%d\n", rc);
+ }
/* reconfigure allowed voltage for HVDCP */
rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG,
@@ -648,6 +658,8 @@ static void smblib_uusb_removal(struct smb_charger *chg)
chg->voltage_min_uv = MICRO_5V;
chg->voltage_max_uv = MICRO_5V;
+ chg->usb_icl_delta_ua = 0;
+ chg->pulse_cnt = 0;
/* clear USB ICL vote for USB_PSY_VOTER */
rc = vote(chg->usb_icl_votable, USB_PSY_VOTER, false, 0);
@@ -737,6 +749,40 @@ int smblib_rerun_apsd_if_required(struct smb_charger *chg)
return 0;
}
+static int smblib_get_pulse_cnt(struct smb_charger *chg, int *count)
+{
+ int rc;
+ u8 val[2];
+
+ switch (chg->smb_version) {
+ case PMI8998_SUBTYPE:
+ rc = smblib_read(chg, QC_PULSE_COUNT_STATUS_REG, val);
+ if (rc) {
+ pr_err("failed to read QC_PULSE_COUNT_STATUS_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+ *count = val[0] & QC_PULSE_COUNT_MASK;
+ break;
+ case PM660_SUBTYPE:
+ rc = smblib_multibyte_read(chg,
+ QC_PULSE_COUNT_STATUS_1_REG, val, 2);
+ if (rc) {
+ pr_err("failed to read QC_PULSE_COUNT_STATUS_1_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+ *count = (val[1] << 8) | val[0];
+ break;
+ default:
+ smblib_dbg(chg, PR_PARALLEL, "unknown SMB chip %d\n",
+ chg->smb_version);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
/*********************
* VOTABLE CALLBACKS *
*********************/
@@ -875,7 +921,6 @@ override_suspend_config:
enable_icl_changed_interrupt:
enable_irq(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
-
return rc;
}
@@ -971,8 +1016,10 @@ static int smblib_hvdcp_enable_vote_callback(struct votable *votable,
{
struct smb_charger *chg = data;
int rc;
- u8 val = HVDCP_AUTH_ALG_EN_CFG_BIT
- | HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT | HVDCP_EN_BIT;
+ u8 val = HVDCP_AUTH_ALG_EN_CFG_BIT | HVDCP_EN_BIT;
+
+ /* vote to enable/disable HW autonomous INOV */
+ vote(chg->hvdcp_hw_inov_dis_votable, client, !hvdcp_enable, 0);
/*
* Disable the autonomous bit and auth bit for disabling hvdcp.
@@ -983,9 +1030,7 @@ static int smblib_hvdcp_enable_vote_callback(struct votable *votable,
val = HVDCP_EN_BIT;
rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
- HVDCP_EN_BIT
- | HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT
- | HVDCP_AUTH_ALG_EN_CFG_BIT,
+ HVDCP_EN_BIT | HVDCP_AUTH_ALG_EN_CFG_BIT,
val);
if (rc < 0) {
smblib_err(chg, "Couldn't %s hvdcp rc=%d\n",
@@ -1054,6 +1099,37 @@ static int smblib_apsd_disable_vote_callback(struct votable *votable,
return 0;
}
+static int smblib_hvdcp_hw_inov_dis_vote_callback(struct votable *votable,
+ void *data, int disable, const char *client)
+{
+ struct smb_charger *chg = data;
+ int rc;
+
+ if (disable) {
+ /*
+ * the pulse count register get zeroed when autonomous mode is
+ * disabled. Track that in variables before disabling
+ */
+ rc = smblib_get_pulse_cnt(chg, &chg->pulse_cnt);
+ if (rc < 0) {
+ pr_err("failed to read QC_PULSE_COUNT_STATUS_REG rc=%d\n",
+ rc);
+ return rc;
+ }
+ }
+
+ rc = smblib_masked_write(chg, USBIN_OPTIONS_1_CFG_REG,
+ HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT,
+ disable ? 0 : HVDCP_AUTONOMOUS_MODE_EN_CFG_BIT);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't %s hvdcp rc=%d\n",
+ disable ? "disable" : "enable", rc);
+ return rc;
+ }
+
+ return rc;
+}
+
/*******************
* VCONN REGULATOR *
* *****************/
@@ -1641,6 +1717,119 @@ int smblib_set_prop_system_temp_level(struct smb_charger *chg,
return 0;
}
+int smblib_rerun_aicl(struct smb_charger *chg)
+{
+ int rc, settled_icl_ua;
+ u8 stat;
+
+ rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* USB is suspended so skip re-running AICL */
+ if (stat & USBIN_SUSPEND_STS_BIT)
+ return rc;
+
+ smblib_dbg(chg, PR_MISC, "re-running AICL\n");
+ switch (chg->smb_version) {
+ case PMI8998_SUBTYPE:
+ rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+ &settled_icl_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
+ return rc;
+ }
+
+ vote(chg->usb_icl_votable, AICL_RERUN_VOTER, true,
+ max(settled_icl_ua - chg->param.usb_icl.step_u,
+ chg->param.usb_icl.step_u));
+ vote(chg->usb_icl_votable, AICL_RERUN_VOTER, false, 0);
+ break;
+ case PM660_SUBTYPE:
+ /*
+ * Use restart_AICL instead of trigger_AICL as it runs the
+ * complete AICL instead of starting from the last settled
+ * value.
+ */
+ rc = smblib_masked_write(chg, CMD_HVDCP_2_REG,
+ RESTART_AICL_BIT, RESTART_AICL_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+ rc);
+ break;
+ default:
+ smblib_dbg(chg, PR_PARALLEL, "unknown SMB chip %d\n",
+ chg->smb_version);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int smblib_dp_pulse(struct smb_charger *chg)
+{
+ int rc;
+
+ /* QC 3.0 increment */
+ rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, SINGLE_INCREMENT_BIT,
+ SINGLE_INCREMENT_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+static int smblib_dm_pulse(struct smb_charger *chg)
+{
+ int rc;
+
+ /* QC 3.0 decrement */
+ rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, SINGLE_DECREMENT_BIT,
+ SINGLE_DECREMENT_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+ rc);
+
+ return rc;
+}
+
+int smblib_dp_dm(struct smb_charger *chg, int val)
+{
+ int target_icl_ua, rc = 0;
+
+ switch (val) {
+ case POWER_SUPPLY_DP_DM_DP_PULSE:
+ rc = smblib_dp_pulse(chg);
+ if (!rc)
+ chg->pulse_cnt++;
+ smblib_dbg(chg, PR_PARALLEL, "DP_DM_DP_PULSE rc=%d cnt=%d\n",
+ rc, chg->pulse_cnt);
+ break;
+ case POWER_SUPPLY_DP_DM_DM_PULSE:
+ rc = smblib_dm_pulse(chg);
+ if (!rc && chg->pulse_cnt)
+ chg->pulse_cnt--;
+ smblib_dbg(chg, PR_PARALLEL, "DP_DM_DM_PULSE rc=%d cnt=%d\n",
+ rc, chg->pulse_cnt);
+ break;
+ case POWER_SUPPLY_DP_DM_ICL_DOWN:
+ chg->usb_icl_delta_ua -= 100000;
+ target_icl_ua = get_effective_result(chg->usb_icl_votable);
+ vote(chg->usb_icl_votable, SW_QC3_VOTER, true,
+ target_icl_ua + chg->usb_icl_delta_ua);
+ break;
+ case POWER_SUPPLY_DP_DM_ICL_UP:
+ default:
+ break;
+ }
+
+ return rc;
+}
+
/*******************
* DC PSY GETTERS *
*******************/
@@ -2234,6 +2423,10 @@ int smblib_set_prop_usb_voltage_max(struct smb_charger *chg,
}
chg->voltage_max_uv = max_uv;
+ rc = smblib_rerun_aicl(chg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't re-run AICL rc=%d\n", rc);
+
return rc;
}
@@ -2660,7 +2853,6 @@ int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua)
int current_ua, rc;
if (reduction_ua == 0) {
- chg->icl_reduction_ua = 0;
vote(chg->usb_icl_votable, PL_USBIN_USBIN_VOTER, false, 0);
} else {
/*
@@ -2678,6 +2870,8 @@ int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua)
}
}
+ chg->icl_reduction_ua = reduction_ua;
+
return rerun_election(chg->usb_icl_votable);
}
@@ -2910,26 +3104,38 @@ irqreturn_t smblib_handle_usb_plugin(int irq, void *data)
}
#define USB_WEAK_INPUT_UA 1400000
+#define ICL_CHANGE_DELAY_MS 1000
irqreturn_t smblib_handle_icl_change(int irq, void *data)
{
+ u8 stat;
+ int rc, settled_ua, delay = ICL_CHANGE_DELAY_MS;
struct smb_irq_data *irq_data = data;
struct smb_charger *chg = irq_data->parent_data;
- int rc, settled_ua;
-
- rc = smblib_get_charge_param(chg, &chg->param.icl_stat, &settled_ua);
- if (rc < 0) {
- smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
- return IRQ_HANDLED;
- }
if (chg->mode == PARALLEL_MASTER) {
- power_supply_changed(chg->usb_main_psy);
- vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER,
- settled_ua >= USB_WEAK_INPUT_UA, 0);
+ rc = smblib_read(chg, AICL_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read AICL_STATUS rc=%d\n",
+ rc);
+ return IRQ_HANDLED;
+ }
+
+ rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+ &settled_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
+ return IRQ_HANDLED;
+ }
+
+ /* If AICL settled then schedule work now */
+ if ((settled_ua == get_effective_result(chg->usb_icl_votable))
+ || (stat & AICL_DONE_BIT))
+ delay = 0;
+
+ schedule_delayed_work(&chg->icl_change_work,
+ msecs_to_jiffies(delay));
}
- smblib_dbg(chg, PR_INTERRUPT, "IRQ: %s icl_settled=%d\n",
- irq_data->name, settled_ua);
return IRQ_HANDLED;
}
@@ -3019,14 +3225,18 @@ static void smblib_handle_hvdcp_3p0_auth_done(struct smb_charger *chg,
if (!rising)
return;
- /*
- * Disable AUTH_IRQ_EN_CFG_BIT to receive adapter voltage
- * change interrupt.
- */
- rc = smblib_masked_write(chg, USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
- AUTH_IRQ_EN_CFG_BIT, 0);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable QC auth setting rc=%d\n", rc);
+ if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+ /*
+ * Disable AUTH_IRQ_EN_CFG_BIT to receive adapter voltage
+ * change interrupt.
+ */
+ rc = smblib_masked_write(chg,
+ USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+ AUTH_IRQ_EN_CFG_BIT, 0);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't enable QC auth setting rc=%d\n", rc);
+ }
if (chg->mode == PARALLEL_MASTER)
vote(chg->pl_enable_votable_indirect, USBIN_V_VOTER, true, 0);
@@ -3184,11 +3394,15 @@ static void typec_source_removal(struct smb_charger *chg)
cancel_delayed_work_sync(&chg->hvdcp_detect_work);
- /* reset AUTH_IRQ_EN_CFG_BIT */
- rc = smblib_masked_write(chg, USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
- AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
- if (rc < 0)
- smblib_err(chg, "Couldn't enable QC auth setting rc=%d\n", rc);
+ if (chg->wa_flags & QC_AUTH_INTERRUPT_WA_BIT) {
+ /* re-enable AUTH_IRQ_EN_CFG_BIT */
+ rc = smblib_masked_write(chg,
+ USBIN_SOURCE_CHANGE_INTRPT_ENB_REG,
+ AUTH_IRQ_EN_CFG_BIT, AUTH_IRQ_EN_CFG_BIT);
+ if (rc < 0)
+ smblib_err(chg,
+ "Couldn't enable QC auth setting rc=%d\n", rc);
+ }
/* reconfigure allowed voltage for HVDCP */
rc = smblib_write(chg, USBIN_ADAPTER_ALLOW_CFG_REG,
@@ -3266,6 +3480,8 @@ static void smblib_handle_typec_removal(struct smb_charger *chg)
chg->vconn_attempts = 0;
chg->otg_attempts = 0;
+ chg->pulse_cnt = 0;
+ chg->usb_icl_delta_ua = 0;
chg->usb_ever_removed = true;
@@ -3788,6 +4004,25 @@ static void smblib_otg_ss_done_work(struct work_struct *work)
mutex_unlock(&chg->otg_oc_lock);
}
+static void smblib_icl_change_work(struct work_struct *work)
+{
+ struct smb_charger *chg = container_of(work, struct smb_charger,
+ icl_change_work.work);
+ int rc, settled_ua;
+
+ rc = smblib_get_charge_param(chg, &chg->param.icl_stat, &settled_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get ICL status rc=%d\n", rc);
+ return;
+ }
+
+ power_supply_changed(chg->usb_main_psy);
+ vote(chg->pl_enable_votable_indirect, USBIN_I_VOTER,
+ settled_ua >= USB_WEAK_INPUT_UA, 0);
+
+ smblib_dbg(chg, PR_INTERRUPT, "icl_settled=%d\n", settled_ua);
+}
+
static int smblib_create_votables(struct smb_charger *chg)
{
int rc = 0;
@@ -3904,6 +4139,15 @@ static int smblib_create_votables(struct smb_charger *chg)
return rc;
}
+ chg->hvdcp_hw_inov_dis_votable = create_votable("HVDCP_HW_INOV_DIS",
+ VOTE_SET_ANY,
+ smblib_hvdcp_hw_inov_dis_vote_callback,
+ chg);
+ if (IS_ERR(chg->hvdcp_hw_inov_dis_votable)) {
+ rc = PTR_ERR(chg->hvdcp_hw_inov_dis_votable);
+ return rc;
+ }
+
return rc;
}
@@ -3927,6 +4171,8 @@ static void smblib_destroy_votables(struct smb_charger *chg)
destroy_votable(chg->pl_enable_votable_indirect);
if (chg->apsd_disable_votable)
destroy_votable(chg->apsd_disable_votable);
+ if (chg->hvdcp_hw_inov_dis_votable)
+ destroy_votable(chg->hvdcp_hw_inov_dis_votable);
}
static void smblib_iio_deinit(struct smb_charger *chg)
@@ -3957,6 +4203,7 @@ int smblib_init(struct smb_charger *chg)
INIT_WORK(&chg->otg_oc_work, smblib_otg_oc_work);
INIT_WORK(&chg->vconn_oc_work, smblib_vconn_oc_work);
INIT_DELAYED_WORK(&chg->otg_ss_done_work, smblib_otg_ss_done_work);
+ INIT_DELAYED_WORK(&chg->icl_change_work, smblib_icl_change_work);
chg->fake_capacity = -EINVAL;
switch (chg->mode) {
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index b2cfeeda5792..21ccd3ce57c7 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -56,6 +56,8 @@ enum print_reason {
#define PD_SUSPEND_SUPPORTED_VOTER "PD_SUSPEND_SUPPORTED_VOTER"
#define PL_DELAY_HVDCP_VOTER "PL_DELAY_HVDCP_VOTER"
#define CTM_VOTER "CTM_VOTER"
+#define SW_QC3_VOTER "SW_QC3_VOTER"
+#define AICL_RERUN_VOTER "AICL_RERUN_VOTER"
#define VCONN_MAX_ATTEMPTS 3
#define OTG_MAX_ATTEMPTS 3
@@ -77,6 +79,7 @@ enum {
QC_CHARGER_DETECTION_WA_BIT = BIT(0),
BOOST_BACK_WA = BIT(1),
TYPEC_CC2_REMOVAL_WA_BIT = BIT(2),
+ QC_AUTH_INTERRUPT_WA_BIT = BIT(3),
};
enum smb_irq_index {
@@ -201,6 +204,7 @@ struct smb_iio {
struct iio_channel *usbin_i_chan;
struct iio_channel *usbin_v_chan;
struct iio_channel *batt_i_chan;
+ struct iio_channel *connector_temp_chan;
struct iio_channel *connector_temp_thr1_chan;
struct iio_channel *connector_temp_thr2_chan;
struct iio_channel *connector_temp_thr3_chan;
@@ -266,6 +270,7 @@ struct smb_charger {
struct votable *hvdcp_disable_votable_indirect;
struct votable *hvdcp_enable_votable;
struct votable *apsd_disable_votable;
+ struct votable *hvdcp_hw_inov_dis_votable;
/* work */
struct work_struct bms_update_work;
@@ -277,6 +282,7 @@ struct smb_charger {
struct work_struct otg_oc_work;
struct work_struct vconn_oc_work;
struct delayed_work otg_ss_done_work;
+ struct delayed_work icl_change_work;
/* cached status */
int voltage_min_uv;
@@ -314,6 +320,8 @@ struct smb_charger {
/* qnovo */
int qnovo_fcc_ua;
int qnovo_fv_uv;
+ int usb_icl_delta_ua;
+ int pulse_cnt;
};
int smblib_read(struct smb_charger *chg, u16 addr, u8 *val);
@@ -471,6 +479,8 @@ int smblib_get_prop_fcc_delta(struct smb_charger *chg,
union power_supply_propval *val);
int smblib_icl_override(struct smb_charger *chg, bool override);
int smblib_set_icl_reduction(struct smb_charger *chg, int reduction_ua);
+int smblib_dp_dm(struct smb_charger *chg, int val);
+int smblib_rerun_aicl(struct smb_charger *chg);
int smblib_init(struct smb_charger *chg);
int smblib_deinit(struct smb_charger *chg);
diff --git a/drivers/power/supply/qcom/smb-reg.h b/drivers/power/supply/qcom/smb-reg.h
index e005a27e8dd9..54b6b38d134b 100644
--- a/drivers/power/supply/qcom/smb-reg.h
+++ b/drivers/power/supply/qcom/smb-reg.h
@@ -535,6 +535,8 @@ enum {
#define USBIN_LT_3P6V_RT_STS_BIT BIT(1)
#define USBIN_COLLAPSE_RT_STS_BIT BIT(0)
+#define QC_PULSE_COUNT_STATUS_1_REG (USBIN_BASE + 0x30)
+
#define USBIN_CMD_IL_REG (USBIN_BASE + 0x40)
#define BAT_2_SYS_FET_DIS_BIT BIT(1)
#define USBIN_SUSPEND_BIT BIT(0)
@@ -544,6 +546,7 @@ enum {
#define APSD_RERUN_BIT BIT(0)
#define CMD_HVDCP_2_REG (USBIN_BASE + 0x43)
+#define RESTART_AICL_BIT BIT(7)
#define TRIGGER_AICL_BIT BIT(6)
#define FORCE_12V_BIT BIT(5)
#define FORCE_9V_BIT BIT(4)
diff --git a/drivers/power/supply/qcom/smb1351-charger.c b/drivers/power/supply/qcom/smb1351-charger.c
index d0cf37b0613a..8467d167512f 100644
--- a/drivers/power/supply/qcom/smb1351-charger.c
+++ b/drivers/power/supply/qcom/smb1351-charger.c
@@ -460,6 +460,7 @@ struct smb1351_charger {
int workaround_flags;
int parallel_pin_polarity_setting;
+ int parallel_mode;
bool parallel_charger;
bool parallel_charger_suspended;
bool bms_controlled_charging;
@@ -1699,7 +1700,7 @@ static int smb1351_parallel_get_property(struct power_supply *psy,
val->intval = 0;
break;
case POWER_SUPPLY_PROP_PARALLEL_MODE:
- val->intval = POWER_SUPPLY_PARALLEL_USBIN_USBIN;
+ val->intval = chip->parallel_mode;
break;
default:
return -EINVAL;
@@ -3191,6 +3192,12 @@ static int smb1351_parallel_charger_probe(struct i2c_client *client,
chip->parallel_pin_polarity_setting ?
EN_BY_PIN_HIGH_ENABLE : EN_BY_PIN_LOW_ENABLE;
+ if (of_property_read_bool(node,
+ "qcom,parallel-external-current-sense"))
+ chip->parallel_mode = POWER_SUPPLY_PL_USBIN_USBIN_EXT;
+ else
+ chip->parallel_mode = POWER_SUPPLY_PL_USBIN_USBIN;
+
i2c_set_clientdata(client, chip);
chip->parallel_psy_d.name = "parallel";
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index 4180edc89a4c..1c7c1e78699f 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -44,6 +44,8 @@
#define SMB2CHG_DC_TM_SREFGEN (DCIN_BASE + 0xE2)
#define STACKED_DIODE_EN_BIT BIT(2)
+#define TDIE_AVG_COUNT 10
+
enum {
OOB_COMP_WA_BIT = BIT(0),
};
@@ -118,6 +120,27 @@ irqreturn_t smb138x_handle_slave_chg_state_change(int irq, void *data)
return IRQ_HANDLED;
}
+static int smb138x_get_prop_charger_temp(struct smb138x *chip,
+ union power_supply_propval *val)
+{
+ union power_supply_propval pval;
+ int rc = 0, avg = 0, i;
+ struct smb_charger *chg = &chip->chg;
+
+ for (i = 0; i < TDIE_AVG_COUNT; i++) {
+ pval.intval = 0;
+ rc = smblib_get_prop_charger_temp(chg, &pval);
+ if (rc < 0) {
+ pr_err("Couldnt read chg temp at %dth iteration rc = %d\n",
+ i + 1, rc);
+ return rc;
+ }
+ avg += pval.intval;
+ }
+ val->intval = avg / TDIE_AVG_COUNT;
+ return rc;
+}
+
static int smb138x_parse_dt(struct smb138x *chip)
{
struct smb_charger *chg = &chip->chg;
@@ -312,6 +335,7 @@ static enum power_supply_property smb138x_batt_props[] = {
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CHARGER_TEMP,
POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+ POWER_SUPPLY_PROP_SET_SHIP_MODE,
};
static int smb138x_batt_get_prop(struct power_supply *psy,
@@ -342,11 +366,15 @@ static int smb138x_batt_get_prop(struct power_supply *psy,
rc = smblib_get_prop_batt_capacity(chg, val);
break;
case POWER_SUPPLY_PROP_CHARGER_TEMP:
- rc = smblib_get_prop_charger_temp(chg, val);
+ rc = smb138x_get_prop_charger_temp(chip, val);
break;
case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
rc = smblib_get_prop_charger_temp_max(chg, val);
break;
+ case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+ /* Not in ship mode as long as device is active */
+ val->intval = 0;
+ break;
default:
pr_err("batt power supply get prop %d not supported\n", prop);
return -EINVAL;
@@ -375,6 +403,12 @@ static int smb138x_batt_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CAPACITY:
rc = smblib_set_prop_batt_capacity(chg, val);
break;
+ case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+ /* Not in ship mode as long as the device is active */
+ if (!val->intval)
+ break;
+ rc = smblib_set_prop_ship_mode(chg, val);
+ break;
default:
pr_err("batt power supply set prop %d not supported\n", prop);
return -EINVAL;
@@ -430,6 +464,57 @@ static int smb138x_init_batt_psy(struct smb138x *chip)
* PARALLEL PSY REGISTRATION *
*****************************/
+static int smb138x_get_prop_connector_health(struct smb138x *chip)
+{
+ struct smb_charger *chg = &chip->chg;
+ int rc, lb_mdegc, ub_mdegc, rst_mdegc, connector_mdegc;
+
+ if (!chg->iio.connector_temp_chan ||
+ PTR_ERR(chg->iio.connector_temp_chan) == -EPROBE_DEFER)
+ chg->iio.connector_temp_chan = iio_channel_get(chg->dev,
+ "connector_temp");
+
+ if (IS_ERR(chg->iio.connector_temp_chan))
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+
+ rc = iio_read_channel_processed(chg->iio.connector_temp_thr1_chan,
+ &lb_mdegc);
+ if (rc < 0) {
+ pr_err("Couldn't read connector lower bound rc=%d\n", rc);
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ rc = iio_read_channel_processed(chg->iio.connector_temp_thr2_chan,
+ &ub_mdegc);
+ if (rc < 0) {
+ pr_err("Couldn't read connector upper bound rc=%d\n", rc);
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ rc = iio_read_channel_processed(chg->iio.connector_temp_thr3_chan,
+ &rst_mdegc);
+ if (rc < 0) {
+ pr_err("Couldn't read connector reset bound rc=%d\n", rc);
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ rc = iio_read_channel_processed(chg->iio.connector_temp_chan,
+ &connector_mdegc);
+ if (rc < 0) {
+ pr_err("Couldn't read connector temperature rc=%d\n", rc);
+ return POWER_SUPPLY_HEALTH_UNKNOWN;
+ }
+
+ if (connector_mdegc < lb_mdegc)
+ return POWER_SUPPLY_HEALTH_COOL;
+ else if (connector_mdegc < ub_mdegc)
+ return POWER_SUPPLY_HEALTH_WARM;
+ else if (connector_mdegc < rst_mdegc)
+ return POWER_SUPPLY_HEALTH_HOT;
+
+ return POWER_SUPPLY_HEALTH_OVERHEAT;
+}
+
static enum power_supply_property smb138x_parallel_props[] = {
POWER_SUPPLY_PROP_CHARGE_TYPE,
POWER_SUPPLY_PROP_CHARGING_ENABLED,
@@ -443,6 +528,7 @@ static enum power_supply_property smb138x_parallel_props[] = {
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_PARALLEL_MODE,
POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+ POWER_SUPPLY_PROP_SET_SHIP_MODE,
};
static int smb138x_parallel_get_prop(struct power_supply *psy,
@@ -484,7 +570,7 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
rc = smblib_get_prop_slave_current_now(chg, val);
break;
case POWER_SUPPLY_PROP_CHARGER_TEMP:
- rc = smblib_get_prop_charger_temp(chg, val);
+ rc = smb138x_get_prop_charger_temp(chip, val);
break;
case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
rc = smblib_get_prop_charger_temp_max(chg, val);
@@ -493,10 +579,14 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
val->strval = "smb138x";
break;
case POWER_SUPPLY_PROP_PARALLEL_MODE:
- val->intval = POWER_SUPPLY_PARALLEL_MID_MID;
+ val->intval = POWER_SUPPLY_PL_USBMID_USBMID;
break;
case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
- rc = smblib_get_prop_die_health(chg, val);
+ val->intval = smb138x_get_prop_connector_health(chip);
+ break;
+ case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+ /* Not in ship mode as long as device is active */
+ val->intval = 0;
break;
default:
pr_err("parallel power supply get prop %d not supported\n",
@@ -554,12 +644,14 @@ static int smb138x_parallel_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
break;
- case POWER_SUPPLY_PROP_BUCK_FREQ:
- rc = smblib_set_charge_param(chg, &chg->param.freq_buck,
- val->intval);
+ case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+ /* Not in ship mode as long as the device is active */
+ if (!val->intval)
+ break;
+ rc = smblib_set_prop_ship_mode(chg, val);
break;
default:
- pr_err("parallel power supply set prop %d not supported\n",
+ pr_debug("parallel power supply set prop %d not supported\n",
prop);
return -EINVAL;
}
@@ -838,6 +930,13 @@ static int smb138x_init_hw(struct smb138x *chip)
chg->dcp_icl_ua = chip->dt.usb_icl_ua;
+ /* configure to a fixed 700khz freq to avoid tdie errors */
+ rc = smblib_set_charge_param(chg, &chg->param.freq_buck, 700);
+ if (rc < 0) {
+ pr_err("Couldn't configure 700Khz switch freq rc=%d\n", rc);
+ return rc;
+ }
+
/* configure charge enable for software control; active high */
rc = smblib_masked_write(chg, CHGR_CFG2_REG,
CHG_EN_POLARITY_BIT | CHG_EN_SRC_BIT, 0);
diff --git a/drivers/regulator/cpr4-mmss-ldo-regulator.c b/drivers/regulator/cpr4-mmss-ldo-regulator.c
index 525f9d6fcf75..6c6d112d2a6a 100644
--- a/drivers/regulator/cpr4-mmss-ldo-regulator.c
+++ b/drivers/regulator/cpr4-mmss-ldo-regulator.c
@@ -135,9 +135,15 @@ static const int sdm660_mmss_fuse_ref_volt[SDM660_MMSS_FUSE_CORNERS] = {
#define SDM660_MMSS_VOLTAGE_FUSE_SIZE 5
#define SDM660_MMSS_CPR_SENSOR_COUNT 11
+#define SDM630_MMSS_CPR_SENSOR_COUNT 7
#define SDM660_MMSS_CPR_CLOCK_RATE 19200000
+enum {
+ SDM660_SOC_ID,
+ SDM630_SOC_ID,
+};
+
/**
* cpr4_sdm660_mmss_read_fuse_data() - load MMSS specific fuse parameter
* values
@@ -594,7 +600,10 @@ static int cpr4_mmss_init_controller(struct cpr3_controller *ctrl)
return rc;
}
- ctrl->sensor_count = SDM660_MMSS_CPR_SENSOR_COUNT;
+ if (ctrl->soc_revision == SDM660_SOC_ID)
+ ctrl->sensor_count = SDM660_MMSS_CPR_SENSOR_COUNT;
+ else if (ctrl->soc_revision == SDM630_SOC_ID)
+ ctrl->sensor_count = SDM630_MMSS_CPR_SENSOR_COUNT;
/*
* MMSS only has one thread (0) so the zeroed array does not need
@@ -632,9 +641,23 @@ static int cpr4_mmss_init_controller(struct cpr3_controller *ctrl)
return 0;
}
+/* Data corresponds to the SoC revision */
+static const struct of_device_id cpr4_mmss_regulator_match_table[] = {
+ {
+ .compatible = "qcom,cpr4-sdm660-mmss-ldo-regulator",
+ .data = (void *)(uintptr_t)SDM660_SOC_ID,
+ },
+ {
+ .compatible = "qcom,cpr4-sdm630-mmss-ldo-regulator",
+ .data = (void *)(uintptr_t)SDM630_SOC_ID,
+ },
+ { },
+};
+
static int cpr4_mmss_regulator_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ const struct of_device_id *match;
struct cpr3_controller *ctrl;
int rc;
@@ -659,6 +682,12 @@ static int cpr4_mmss_regulator_probe(struct platform_device *pdev)
return rc;
}
+ match = of_match_node(cpr4_mmss_regulator_match_table, dev->of_node);
+ if (match)
+ ctrl->soc_revision = (uintptr_t)match->data;
+ else
+ cpr3_err(ctrl, "could not find compatible string match\n");
+
rc = cpr3_map_fuse_base(ctrl, pdev);
if (rc) {
cpr3_err(ctrl, "could not map fuse base address\n");
@@ -731,19 +760,6 @@ static int cpr4_mmss_regulator_resume(struct platform_device *pdev)
return cpr3_regulator_resume(ctrl);
}
-/* Data corresponds to the SoC revision */
-static const struct of_device_id cpr4_mmss_regulator_match_table[] = {
- {
- .compatible = "qcom,cpr4-sdm660-mmss-ldo-regulator",
- .data = (void *)NULL,
- },
- {
- .compatible = "qcom,cpr4-sdm630-mmss-ldo-regulator",
- .data = (void *)NULL,
- },
- { },
-};
-
static struct platform_driver cpr4_mmss_regulator_driver = {
.driver = {
.name = "qcom,cpr4-mmss-ldo-regulator",
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index 9be8bb94b257..2307da9497dc 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -37,7 +37,8 @@
#define MSM8998_KBSS_FUSE_CORNERS 4
#define SDM660_KBSS_FUSE_CORNERS 5
-#define SDM630_KBSS_FUSE_CORNERS 4
+#define SDM630_POWER_KBSS_FUSE_CORNERS 3
+#define SDM630_PERF_KBSS_FUSE_CORNERS 5
/**
* struct cprh_kbss_fuses - KBSS specific fuse data
@@ -131,18 +132,32 @@ static const char * const cprh_sdm660_perf_kbss_fuse_corner_name[] = {
[CPRH_SDM660_PERF_KBSS_FUSE_CORNER_TURBO_L2] = "TURBO_L2",
};
-enum cprh_sdm630_kbss_fuse_corner {
- CPRH_SDM630_KBSS_FUSE_CORNER_LOWSVS = 0,
- CPRH_SDM630_KBSS_FUSE_CORNER_SVSPLUS = 1,
- CPRH_SDM630_KBSS_FUSE_CORNER_NOM = 2,
- CPRH_SDM630_KBSS_FUSE_CORNER_TURBO_L1 = 3,
+enum cprh_sdm630_power_kbss_fuse_corner {
+ CPRH_SDM630_POWER_KBSS_FUSE_CORNER_LOWSVS = 0,
+ CPRH_SDM630_POWER_KBSS_FUSE_CORNER_SVSPLUS = 1,
+ CPRH_SDM630_POWER_KBSS_FUSE_CORNER_TURBO_L1 = 2,
};
-static const char * const cprh_sdm630_kbss_fuse_corner_name[] = {
- [CPRH_SDM630_KBSS_FUSE_CORNER_LOWSVS] = "LowSVS",
- [CPRH_SDM630_KBSS_FUSE_CORNER_SVSPLUS] = "SVSPLUS",
- [CPRH_SDM630_KBSS_FUSE_CORNER_NOM] = "NOM",
- [CPRH_SDM630_KBSS_FUSE_CORNER_TURBO_L1] = "TURBO_L1",
+static const char * const cprh_sdm630_power_kbss_fuse_corner_name[] = {
+ [CPRH_SDM630_POWER_KBSS_FUSE_CORNER_LOWSVS] = "LowSVS",
+ [CPRH_SDM630_POWER_KBSS_FUSE_CORNER_SVSPLUS] = "SVSPLUS",
+ [CPRH_SDM630_POWER_KBSS_FUSE_CORNER_TURBO_L1] = "TURBO_L1",
+};
+
+enum cprh_sdm630_perf_kbss_fuse_corner {
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_LOWSVS = 0,
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_SVSPLUS = 1,
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_NOM = 2,
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO = 3,
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO_L2 = 4,
+};
+
+static const char * const cprh_sdm630_perf_kbss_fuse_corner_name[] = {
+ [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_LOWSVS] = "LowSVS",
+ [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_SVSPLUS] = "SVSPLUS",
+ [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_NOM] = "NOM",
+ [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO] = "TURBO",
+ [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO_L2] = "TURBO_L2",
};
/* KBSS cluster IDs */
@@ -202,17 +217,17 @@ sdm660_kbss_ro_sel_param[2][SDM660_KBSS_FUSE_CORNERS][3] = {
};
static const struct cpr3_fuse_param
-sdm630_kbss_ro_sel_param[2][SDM630_KBSS_FUSE_CORNERS][3] = {
+sdm630_kbss_ro_sel_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][3] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
{{67, 12, 15}, {} },
{{65, 56, 59}, {} },
- {{67, 4, 7}, {} },
{{67, 0, 3}, {} },
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
{{68, 61, 63}, {69, 0, 0} },
{{69, 1, 4}, {} },
{{68, 57, 60}, {} },
+ {{68, 53, 56}, {} },
{{66, 14, 17}, {} },
},
};
@@ -252,17 +267,17 @@ sdm660_kbss_init_voltage_param[2][SDM660_KBSS_FUSE_CORNERS][2] = {
};
static const struct cpr3_fuse_param
-sdm630_kbss_init_voltage_param[2][SDM630_KBSS_FUSE_CORNERS][2] = {
+sdm630_kbss_init_voltage_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][2] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
{{67, 34, 39}, {} },
{{71, 3, 8}, {} },
- {{67, 22, 27}, {} },
{{67, 16, 21}, {} },
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
{{69, 17, 22}, {} },
{{69, 23, 28}, {} },
{{69, 11, 16}, {} },
+ {{69, 5, 10}, {} },
{{70, 42, 47}, {} },
},
};
@@ -302,17 +317,17 @@ sdm660_kbss_target_quot_param[2][SDM660_KBSS_FUSE_CORNERS][3] = {
};
static const struct cpr3_fuse_param
-sdm630_kbss_target_quot_param[2][SDM630_KBSS_FUSE_CORNERS][3] = {
+sdm630_kbss_target_quot_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][3] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
{{68, 12, 23}, {} },
{{71, 9, 20}, {} },
- {{67, 52, 63}, {} },
{{67, 40, 51}, {} },
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
{{69, 53, 63}, {70, 0, 0}, {} },
{{70, 1, 12}, {} },
{{69, 41, 52}, {} },
+ {{69, 29, 40}, {} },
{{70, 48, 59}, {} },
},
};
@@ -352,17 +367,17 @@ sdm660_kbss_quot_offset_param[2][SDM660_KBSS_FUSE_CORNERS][3] = {
};
static const struct cpr3_fuse_param
-sdm630_kbss_quot_offset_param[2][SDM630_KBSS_FUSE_CORNERS][3] = {
+sdm630_kbss_quot_offset_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][3] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
{{} },
{{71, 21, 27}, {} },
- {{68, 31, 37}, {} },
{{68, 24, 30}, {} },
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
{{} },
{{70, 27, 33}, {} },
{{70, 20, 26}, {} },
+ {{70, 13, 19}, {} },
{{70, 60, 63}, {71, 0, 2}, {} },
},
};
@@ -484,18 +499,27 @@ sdm660_kbss_fuse_ref_volt[2][SDM660_KBSS_FUSE_CORNERS] = {
* Open loop voltage fuse reference voltages in microvolts for SDM630
*/
static const int
-sdm630_kbss_fuse_ref_volt[SDM630_KBSS_FUSE_CORNERS] = {
- 644000,
- 788000,
- 868000,
- 1068000,
+sdm630_kbss_fuse_ref_volt[2][SDM630_PERF_KBSS_FUSE_CORNERS] = {
+ [CPRH_KBSS_POWER_CLUSTER_ID] = {
+ 644000,
+ 788000,
+ 1068000,
+ },
+ [CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+ 644000,
+ 788000,
+ 868000,
+ 988000,
+ 1068000,
+ },
};
static const int
-sdm630_kbss_speed_bin_2_fuse_ref_volt[SDM630_KBSS_FUSE_CORNERS] = {
+sdm630_perf_kbss_speed_bin_2_fuse_ref_volt[SDM630_PERF_KBSS_FUSE_CORNERS] = {
644000,
788000,
868000,
+ 988000,
1140000,
};
@@ -552,7 +576,7 @@ sdm630_kbss_speed_bin_2_fuse_ref_volt[SDM630_KBSS_FUSE_CORNERS] = {
* sdm630 configuration
*/
#define SDM630_KBSS_POWER_CPR_SENSOR_COUNT 6
-#define SDM630_KBSS_PERFORMANCE_CPR_SENSOR_COUNT 9
+#define SDM630_KBSS_PERFORMANCE_CPR_SENSOR_COUNT 6
/*
* SOC IDs
@@ -760,7 +784,7 @@ static int cprh_sdm630_kbss_read_fuse_data(struct cpr3_regulator *vreg,
struct cprh_kbss_fuses *fuse)
{
void __iomem *base = vreg->thread->ctrl->fuse_base;
- int i, id, rc;
+ int i, id, rc, fuse_corners;
rc = cpr3_read_fuse_param(base, sdm630_cpr_fusing_rev_param,
&fuse->cpr_fusing_rev);
@@ -772,7 +796,12 @@ static int cprh_sdm630_kbss_read_fuse_data(struct cpr3_regulator *vreg,
cpr3_info(vreg, "CPR fusing revision = %llu\n", fuse->cpr_fusing_rev);
id = vreg->thread->ctrl->ctrl_id;
- for (i = 0; i < SDM630_KBSS_FUSE_CORNERS; i++) {
+ if (id == CPRH_KBSS_POWER_CLUSTER_ID)
+ fuse_corners = SDM630_POWER_KBSS_FUSE_CORNERS;
+ else
+ fuse_corners = SDM630_PERF_KBSS_FUSE_CORNERS;
+
+ for (i = 0; i < fuse_corners; i++) {
rc = cpr3_read_fuse_param(base,
sdm630_kbss_init_voltage_param[id][i],
&fuse->init_voltage[i]);
@@ -856,7 +885,10 @@ static int cprh_kbss_read_fuse_data(struct cpr3_regulator *vreg)
fuse_corners = SDM660_KBSS_FUSE_CORNERS;
break;
case SDM630_SOC_ID:
- fuse_corners = SDM630_KBSS_FUSE_CORNERS;
+ if (vreg->thread->ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID)
+ fuse_corners = SDM630_POWER_KBSS_FUSE_CORNERS;
+ else
+ fuse_corners = SDM630_PERF_KBSS_FUSE_CORNERS;
break;
case MSM8998_V1_SOC_ID:
case MSM8998_V2_SOC_ID:
@@ -1008,10 +1040,15 @@ static int cprh_kbss_calculate_open_loop_voltages(struct cpr3_regulator *vreg)
corner_name = cprh_sdm660_perf_kbss_fuse_corner_name;
break;
case SDM630_SOC_ID:
- ref_volt = sdm630_kbss_fuse_ref_volt;
- if (vreg->speed_bin_fuse == 2)
- ref_volt = sdm630_kbss_speed_bin_2_fuse_ref_volt;
- corner_name = cprh_sdm630_kbss_fuse_corner_name;
+ ref_volt = sdm630_kbss_fuse_ref_volt[id];
+ if (id == CPRH_KBSS_PERFORMANCE_CLUSTER_ID
+ && vreg->speed_bin_fuse == 2)
+ ref_volt = sdm630_perf_kbss_speed_bin_2_fuse_ref_volt;
+
+ if (id == CPRH_KBSS_POWER_CLUSTER_ID)
+ corner_name = cprh_sdm630_power_kbss_fuse_corner_name;
+ else
+ corner_name = cprh_sdm630_perf_kbss_fuse_corner_name;
break;
case MSM8998_V1_SOC_ID:
ref_volt = msm8998_v1_kbss_fuse_ref_volt;
@@ -1581,11 +1618,19 @@ static int cprh_kbss_calculate_target_quotients(struct cpr3_regulator *vreg)
}
break;
case SDM630_SOC_ID:
- corner_name = cprh_sdm630_kbss_fuse_corner_name;
- lowest_fuse_corner =
- CPRH_SDM630_KBSS_FUSE_CORNER_LOWSVS;
- highest_fuse_corner =
- CPRH_SDM630_KBSS_FUSE_CORNER_TURBO_L1;
+ if (vreg->thread->ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID) {
+ corner_name = cprh_sdm630_power_kbss_fuse_corner_name;
+ lowest_fuse_corner =
+ CPRH_SDM630_POWER_KBSS_FUSE_CORNER_LOWSVS;
+ highest_fuse_corner =
+ CPRH_SDM630_POWER_KBSS_FUSE_CORNER_TURBO_L1;
+ } else {
+ corner_name = cprh_sdm630_perf_kbss_fuse_corner_name;
+ lowest_fuse_corner =
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_LOWSVS;
+ highest_fuse_corner =
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO_L2;
+ }
break;
case MSM8998_V1_SOC_ID:
case MSM8998_V2_SOC_ID:
diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c
index cf8f00085a0c..dbe2a08f1776 100644
--- a/drivers/regulator/qpnp-labibb-regulator.c
+++ b/drivers/regulator/qpnp-labibb-regulator.c
@@ -19,16 +19,19 @@
#include <linux/kernel.h>
#include <linux/regmap.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/spmi.h>
#include <linux/platform_device.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/qpnp/qpnp-revid.h>
+#include <linux/regulator/qpnp-labibb-regulator.h>
#define QPNP_LABIBB_REGULATOR_DRIVER_NAME "qcom,qpnp-labibb-regulator"
@@ -594,6 +597,7 @@ struct qpnp_labibb {
const struct lab_ver_ops *lab_ver_ops;
struct mutex bus_mutex;
enum qpnp_labibb_mode mode;
+ struct work_struct lab_vreg_ok_work;
bool standalone;
bool ttw_en;
bool in_ttw_mode;
@@ -603,10 +607,13 @@ struct qpnp_labibb {
bool ttw_force_lab_on;
bool skip_2nd_swire_cmd;
bool pfm_enable;
+ bool notify_lab_vreg_ok_sts;
u32 swire_2nd_cmd_delay;
u32 swire_ibb_ps_enable_delay;
};
+static RAW_NOTIFIER_HEAD(labibb_notifier);
+
struct ibb_ver_ops {
int (*set_default_voltage)(struct qpnp_labibb *labibb,
bool use_default);
@@ -2124,6 +2131,36 @@ static int qpnp_labibb_regulator_ttw_mode_exit(struct qpnp_labibb *labibb)
return rc;
}
+static void qpnp_lab_vreg_notifier_work(struct work_struct *work)
+{
+ int rc = 0;
+ u16 retries = 1000, dly = 5000;
+ u8 val;
+ struct qpnp_labibb *labibb = container_of(work, struct qpnp_labibb,
+ lab_vreg_ok_work);
+
+ while (retries--) {
+ rc = qpnp_labibb_read(labibb, labibb->lab_base +
+ REG_LAB_STATUS1, &val, 1);
+ if (rc < 0) {
+ pr_err("read register %x failed rc = %d\n",
+ REG_LAB_STATUS1, rc);
+ return;
+ }
+
+ if (val & LAB_STATUS1_VREG_OK) {
+ raw_notifier_call_chain(&labibb_notifier,
+ LAB_VREG_OK, NULL);
+ break;
+ }
+
+ usleep_range(dly, dly + 100);
+ }
+
+ if (!retries)
+ pr_err("LAB_VREG_OK not set, failed to notify\n");
+}
+
static int qpnp_labibb_regulator_enable(struct qpnp_labibb *labibb)
{
int rc;
@@ -2326,6 +2363,9 @@ static int qpnp_lab_regulator_enable(struct regulator_dev *rdev)
labibb->lab_vreg.vreg_enabled = 1;
}
+ if (labibb->notify_lab_vreg_ok_sts)
+ schedule_work(&labibb->lab_vreg_ok_work);
+
return 0;
}
@@ -2578,6 +2618,9 @@ static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb,
return rc;
}
+ labibb->notify_lab_vreg_ok_sts = of_property_read_bool(of_node,
+ "qcom,notify-lab-vreg-ok-sts");
+
rc = of_property_read_u32(of_node, "qcom,qpnp-lab-soft-start",
&(labibb->lab_vreg.soft_start));
if (!rc) {
@@ -3817,6 +3860,8 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
goto fail_registration;
}
}
+
+ INIT_WORK(&labibb->lab_vreg_ok_work, qpnp_lab_vreg_notifier_work);
dev_set_drvdata(&pdev->dev, labibb);
pr_info("LAB/IBB registered successfully, lab_vreg enable=%d ibb_vreg enable=%d swire_control=%d\n",
labibb->lab_vreg.vreg_enabled,
@@ -3834,6 +3879,18 @@ fail_registration:
return rc;
}
+int qpnp_labibb_notifier_register(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&labibb_notifier, nb);
+}
+EXPORT_SYMBOL(qpnp_labibb_notifier_register);
+
+int qpnp_labibb_notifier_unregister(struct notifier_block *nb)
+{
+ return raw_notifier_chain_unregister(&labibb_notifier, nb);
+}
+EXPORT_SYMBOL(qpnp_labibb_notifier_unregister);
+
static int qpnp_labibb_regulator_remove(struct platform_device *pdev)
{
struct qpnp_labibb *labibb = dev_get_drvdata(&pdev->dev);
@@ -3843,6 +3900,8 @@ static int qpnp_labibb_regulator_remove(struct platform_device *pdev)
regulator_unregister(labibb->lab_vreg.rdev);
if (labibb->ibb_vreg.rdev)
regulator_unregister(labibb->ibb_vreg.rdev);
+
+ cancel_work_sync(&labibb->lab_vreg_ok_work);
}
return 0;
}
diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c
index a08ade61e057..19b1d319ef45 100644
--- a/drivers/regulator/qpnp-lcdb-regulator.c
+++ b/drivers/regulator/qpnp-lcdb-regulator.c
@@ -190,6 +190,9 @@ struct qpnp_lcdb {
bool ttw_enable;
bool ttw_mode_sw;
+ /* top level DT params */
+ bool force_module_reenable;
+
/* status parameters */
bool lcdb_enabled;
bool settings_saved;
@@ -588,6 +591,23 @@ static int qpnp_lcdb_enable(struct qpnp_lcdb *lcdb)
goto fail_enable;
}
+ if (lcdb->force_module_reenable) {
+ val = 0;
+ rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+ &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to enable lcdb rc= %d\n", rc);
+ goto fail_enable;
+ }
+ val = MODULE_EN_BIT;
+ rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+ &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to disable lcdb rc= %d\n", rc);
+ goto fail_enable;
+ }
+ }
+
/* poll for vreg_ok */
timeout = 10;
delay = lcdb->bst.soft_start_us + lcdb->ldo.soft_start_us +
@@ -1590,6 +1610,9 @@ static int qpnp_lcdb_parse_dt(struct qpnp_lcdb *lcdb)
}
}
+ lcdb->force_module_reenable = of_property_read_bool(node,
+ "qcom,force-module-reenable");
+
if (of_property_read_bool(node, "qcom,ttw-enable")) {
rc = qpnp_lcdb_parse_ttw(lcdb);
if (rc < 0) {
diff --git a/drivers/scsi/ufs/ufs-qcom.c b/drivers/scsi/ufs/ufs-qcom.c
index 7369478a8c5d..bc00a7e9572f 100644
--- a/drivers/scsi/ufs/ufs-qcom.c
+++ b/drivers/scsi/ufs/ufs-qcom.c
@@ -35,6 +35,9 @@
#include "ufs-qcom-debugfs.h"
#include <linux/clk/msm-clk.h>
+#define MAX_PROP_SIZE 32
+#define VDDP_REF_CLK_MIN_UV 1200000
+#define VDDP_REF_CLK_MAX_UV 1200000
/* TODO: further tuning for this parameter may be required */
#define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US (10000) /* microseconds */
@@ -709,40 +712,105 @@ static int ufs_qcom_link_startup_notify(struct ufs_hba *hba,
return err;
}
-static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+
+static int ufs_qcom_config_vreg(struct device *dev,
+ struct ufs_vreg *vreg, bool on)
{
- struct ufs_qcom_host *host = ufshcd_get_variant(hba);
- struct phy *phy = host->generic_phy;
int ret = 0;
+ struct regulator *reg;
+ int min_uV, uA_load;
- if (ufs_qcom_is_link_off(hba)) {
- /*
- * Disable the tx/rx lane symbol clocks before PHY is
- * powered down as the PLL source should be disabled
- * after downstream clocks are disabled.
- */
- ufs_qcom_disable_lane_clks(host);
- phy_power_off(phy);
- ret = ufs_qcom_ice_suspend(host);
+ if (!vreg) {
+ WARN_ON(1);
+ ret = -EINVAL;
+ goto out;
+ }
+
+ reg = vreg->reg;
+ if (regulator_count_voltages(reg) > 0) {
+ min_uV = on ? vreg->min_uV : 0;
+ ret = regulator_set_voltage(reg, min_uV, vreg->max_uV);
+ if (ret) {
+ dev_err(dev, "%s: %s set voltage failed, err=%d\n",
+ __func__, vreg->name, ret);
+ goto out;
+ }
+
+ uA_load = on ? vreg->max_uA : 0;
+ ret = regulator_set_load(vreg->reg, uA_load);
if (ret)
- dev_err(hba->dev, "%s: failed ufs_qcom_ice_suspend %d\n",
- __func__, ret);
+ goto out;
+ }
+out:
+ return ret;
+}
+
+static int ufs_qcom_enable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (vreg->enabled)
+ return ret;
- /* Assert PHY soft reset */
- ufs_qcom_assert_reset(hba);
+ ret = ufs_qcom_config_vreg(dev, vreg, true);
+ if (ret)
+ goto out;
+
+ ret = regulator_enable(vreg->reg);
+ if (ret)
+ goto out;
+
+ vreg->enabled = true;
+out:
+ return ret;
+}
+
+static int ufs_qcom_disable_vreg(struct device *dev, struct ufs_vreg *vreg)
+{
+ int ret = 0;
+
+ if (!vreg->enabled)
+ return ret;
+
+ ret = regulator_disable(vreg->reg);
+ if (ret)
+ goto out;
+
+ ret = ufs_qcom_config_vreg(dev, vreg, false);
+ if (ret)
goto out;
- }
+
+ vreg->enabled = false;
+out:
+ return ret;
+}
+
+static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
+{
+ struct ufs_qcom_host *host = ufshcd_get_variant(hba);
+ struct phy *phy = host->generic_phy;
+ int ret = 0;
/*
- * If UniPro link is not active, PHY ref_clk, main PHY analog power
- * rail and low noise analog power rail for PLL can be switched off.
+ * If UniPro link is not active or OFF, PHY ref_clk, main PHY analog
+ * power rail and low noise analog power rail for PLL can be
+ * switched off.
*/
if (!ufs_qcom_is_link_active(hba)) {
ufs_qcom_disable_lane_clks(host);
phy_power_off(phy);
+
+ if (host->vddp_ref_clk && ufs_qcom_is_link_off(hba))
+ ret = ufs_qcom_disable_vreg(hba->dev,
+ host->vddp_ref_clk);
ufs_qcom_ice_suspend(host);
- }
+ if (ufs_qcom_is_link_off(hba)) {
+ /* Assert PHY soft reset */
+ ufs_qcom_assert_reset(hba);
+ goto out;
+ }
+ }
/* Unvote PM QoS */
ufs_qcom_pm_qos_suspend(host);
@@ -763,6 +831,11 @@ static int ufs_qcom_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto out;
}
+ if (host->vddp_ref_clk && (hba->rpm_lvl > UFS_PM_LVL_3 ||
+ hba->spm_lvl > UFS_PM_LVL_3))
+ ufs_qcom_enable_vreg(hba->dev,
+ host->vddp_ref_clk);
+
err = ufs_qcom_enable_lane_clks(host);
if (err)
goto out;
@@ -1951,6 +2024,57 @@ static void ufs_qcom_parse_lpm(struct ufs_qcom_host *host)
pr_info("%s: will disable all LPM modes\n", __func__);
}
+static int ufs_qcom_parse_reg_info(struct ufs_qcom_host *host, char *name,
+ struct ufs_vreg **out_vreg)
+{
+ int ret = 0;
+ char prop_name[MAX_PROP_SIZE];
+ struct ufs_vreg *vreg = NULL;
+ struct device *dev = host->hba->dev;
+ struct device_node *np = dev->of_node;
+
+ if (!np) {
+ dev_err(dev, "%s: non DT initialization\n", __func__);
+ goto out;
+ }
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-supply", name);
+ if (!of_parse_phandle(np, prop_name, 0)) {
+ dev_info(dev, "%s: Unable to find %s regulator, assuming enabled\n",
+ __func__, prop_name);
+ ret = -ENODEV;
+ goto out;
+ }
+
+ vreg = devm_kzalloc(dev, sizeof(*vreg), GFP_KERNEL);
+ if (!vreg)
+ return -ENOMEM;
+
+ vreg->name = name;
+
+ snprintf(prop_name, MAX_PROP_SIZE, "%s-max-microamp", name);
+ ret = of_property_read_u32(np, prop_name, &vreg->max_uA);
+ if (ret) {
+ dev_err(dev, "%s: unable to find %s err %d\n",
+ __func__, prop_name, ret);
+ goto out;
+ }
+
+ vreg->reg = devm_regulator_get(dev, vreg->name);
+ if (IS_ERR(vreg->reg)) {
+ ret = PTR_ERR(vreg->reg);
+ dev_err(dev, "%s: %s get failed, err=%d\n",
+ __func__, vreg->name, ret);
+ }
+ vreg->min_uV = VDDP_REF_CLK_MIN_UV;
+ vreg->max_uV = VDDP_REF_CLK_MAX_UV;
+
+out:
+ if (!ret)
+ *out_vreg = vreg;
+ return ret;
+}
+
/**
* ufs_qcom_init - bind phy with controller
* @hba: host controller instance
@@ -2068,14 +2192,24 @@ static int ufs_qcom_init(struct ufs_hba *hba)
ufs_qcom_phy_save_controller_version(host->generic_phy,
host->hw_ver.major, host->hw_ver.minor, host->hw_ver.step);
+ err = ufs_qcom_parse_reg_info(host, "qcom,vddp-ref-clk",
+ &host->vddp_ref_clk);
phy_init(host->generic_phy);
err = phy_power_on(host->generic_phy);
if (err)
goto out_unregister_bus;
+ if (host->vddp_ref_clk) {
+ err = ufs_qcom_enable_vreg(dev, host->vddp_ref_clk);
+ if (err) {
+ dev_err(dev, "%s: failed enabling ref clk supply: %d\n",
+ __func__, err);
+ goto out_disable_phy;
+ }
+ }
err = ufs_qcom_init_lane_clks(host);
if (err)
- goto out_disable_phy;
+ goto out_disable_vddp;
ufs_qcom_parse_lpm(host);
if (host->disable_lpm)
@@ -2100,6 +2234,9 @@ static int ufs_qcom_init(struct ufs_hba *hba)
goto out;
+out_disable_vddp:
+ if (host->vddp_ref_clk)
+ ufs_qcom_disable_vreg(dev, host->vddp_ref_clk);
out_disable_phy:
phy_power_off(host->generic_phy);
out_unregister_bus:
diff --git a/drivers/scsi/ufs/ufs-qcom.h b/drivers/scsi/ufs/ufs-qcom.h
index 74d8a7a30ad6..12154b268132 100644
--- a/drivers/scsi/ufs/ufs-qcom.h
+++ b/drivers/scsi/ufs/ufs-qcom.h
@@ -373,6 +373,7 @@ struct ufs_qcom_host {
spinlock_t ice_work_lock;
struct work_struct ice_cfg_work;
struct request *req_pending;
+ struct ufs_vreg *vddp_ref_clk;
};
static inline u32
diff --git a/drivers/scsi/ufs/ufshcd.c b/drivers/scsi/ufs/ufshcd.c
index f5d7cd0f4701..c891fc8d34a3 100644
--- a/drivers/scsi/ufs/ufshcd.c
+++ b/drivers/scsi/ufs/ufshcd.c
@@ -8603,9 +8603,13 @@ static int ufshcd_resume(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto vendor_suspend;
}
} else if (ufshcd_is_link_off(hba)) {
- ret = ufshcd_host_reset_and_restore(hba);
/*
- * ufshcd_host_reset_and_restore() should have already
+ * A full initialization of the host and the device is required
+ * since the link was put to off during suspend.
+ */
+ ret = ufshcd_reset_and_restore(hba);
+ /*
+ * ufshcd_reset_and_restore() should have already
* set the link state as active
*/
if (ret || !ufshcd_is_link_active(hba))
diff --git a/drivers/sensors/sensors_ssc.c b/drivers/sensors/sensors_ssc.c
index 0910ef34e777..0e299efece94 100644
--- a/drivers/sensors/sensors_ssc.c
+++ b/drivers/sensors/sensors_ssc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -26,6 +26,8 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
#include <soc/qcom/subsystem_restart.h>
#define IMAGE_LOAD_CMD 1
@@ -64,10 +66,11 @@ static struct attribute *attrs[] = {
};
static struct platform_device *slpi_private;
+static struct work_struct slpi_ldr_work;
-static void slpi_loader_do(struct platform_device *pdev)
+static void slpi_load_fw(struct work_struct *slpi_ldr_work)
{
-
+ struct platform_device *pdev = slpi_private;
struct slpi_loader_private *priv = NULL;
int ret;
const char *firmware_name = NULL;
@@ -111,6 +114,12 @@ fail:
dev_err(&pdev->dev, "%s: SLPI image loading failed\n", __func__);
}
+static void slpi_loader_do(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "%s: scheduling work to load SLPI fw\n", __func__);
+ schedule_work(&slpi_ldr_work);
+}
+
static void slpi_loader_unload(struct platform_device *pdev)
{
struct slpi_loader_private *priv = NULL;
@@ -336,6 +345,8 @@ static int sensors_ssc_probe(struct platform_device *pdev)
goto cdev_add_err;
}
+ INIT_WORK(&slpi_ldr_work, slpi_load_fw);
+
return 0;
cdev_add_err:
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index d3f1050499f3..75bebf66376d 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -46,6 +46,15 @@ config QPNP_HAPTIC
module provides haptic feedback for user actions such as a long press
on the touch screen. It uses the Android timed-output framework.
+config QPNP_PBS
+ tristate "PBS trigger support for QPNP PMIC"
+ depends on SPMI
+ help
+ This driver supports configuring software PBS trigger event through PBS
+ RAM on Qualcomm Technologies, Inc. QPNP PMICs. This module provides
+ the APIs to the client drivers that wants to send the PBS trigger
+ event to the PBS RAM.
+
config MSM_SMD
depends on MSM_SMEM
bool "MSM Shared Memory Driver (SMD)"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 2605107e2dbd..fa350d122384 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -52,6 +52,7 @@ endif
obj-$(CONFIG_QPNP_HAPTIC) += qpnp-haptic.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_CPUSS_DUMP) += cpuss_dump.o
+obj-$(CONFIG_QPNP_PBS) += qpnp-pbs.o
obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_SMD) += smd.o
obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 33cf48454414..09c50a81a943 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -2048,7 +2048,7 @@ static struct glink_core_xprt_ctx *find_open_transport(const char *edge,
uint16_t *best_id)
{
struct glink_core_xprt_ctx *xprt;
- struct glink_core_xprt_ctx *best_xprt;
+ struct glink_core_xprt_ctx *best_xprt = NULL;
struct glink_core_xprt_ctx *ret;
bool first = true;
@@ -5502,7 +5502,7 @@ static void tx_func(struct kthread_work *work)
{
struct channel_ctx *ch_ptr;
uint32_t prio;
- uint32_t tx_ready_head_prio;
+ uint32_t tx_ready_head_prio = 0;
int ret;
struct channel_ctx *tx_ready_head = NULL;
bool transmitted_successfully = true;
diff --git a/drivers/soc/qcom/glink_spi_xprt.c b/drivers/soc/qcom/glink_spi_xprt.c
index 5de6e7eac7ea..6794c30605d7 100644
--- a/drivers/soc/qcom/glink_spi_xprt.c
+++ b/drivers/soc/qcom/glink_spi_xprt.c
@@ -891,7 +891,7 @@ static void __rx_worker(struct edge_info *einfo)
}
glink_spi_xprt_set_poll_mode(einfo);
- while (inactive_cycles < MAX_INACTIVE_CYCLES) {
+ do {
if (einfo->tx_resume_needed &&
glink_spi_xprt_write_avail(einfo)) {
einfo->tx_resume_needed = false;
@@ -926,7 +926,7 @@ static void __rx_worker(struct edge_info *einfo)
}
process_rx_cmd(einfo, rx_data, rx_avail);
kfree(rx_data);
- }
+ } while (inactive_cycles < MAX_INACTIVE_CYCLES && !einfo->in_ssr);
glink_spi_xprt_set_irq_mode(einfo);
srcu_read_unlock(&einfo->use_ref, rcu_id);
}
diff --git a/drivers/soc/qcom/msm_smem.c b/drivers/soc/qcom/msm_smem.c
index d3f44ab75f67..a94f741c2056 100644
--- a/drivers/soc/qcom/msm_smem.c
+++ b/drivers/soc/qcom/msm_smem.c
@@ -1157,7 +1157,7 @@ static void smem_module_init_notify(uint32_t state, void *data)
static void smem_init_security_partition(struct smem_toc_entry *entry,
uint32_t num)
{
- uint16_t remote_host;
+ uint16_t remote_host = 0;
struct smem_partition_header *hdr;
bool is_comm_partition = false;
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 4d9767b6f8a3..21e9f17e6a7e 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -595,6 +595,22 @@ static void pil_release_mmap(struct pil_desc *desc)
struct pil_priv *priv = desc->priv;
struct pil_seg *p, *tmp;
u64 zero = 0ULL;
+
+ if (priv->info) {
+ __iowrite32_copy(&priv->info->start, &zero,
+ sizeof(zero) / 4);
+ writel_relaxed(0, &priv->info->size);
+ }
+
+ list_for_each_entry_safe(p, tmp, &priv->segs, list) {
+ list_del(&p->list);
+ kfree(p);
+ }
+}
+
+static void pil_clear_segment(struct pil_desc *desc)
+{
+ struct pil_priv *priv = desc->priv;
u8 __iomem *buf;
struct pil_map_fw_info map_fw_info = {
@@ -613,16 +629,6 @@ static void pil_release_mmap(struct pil_desc *desc)
desc->unmap_fw_mem(buf, (priv->region_end - priv->region_start),
map_data);
- if (priv->info) {
- __iowrite32_copy(&priv->info->start, &zero,
- sizeof(zero) / 4);
- writel_relaxed(0, &priv->info->size);
- }
-
- list_for_each_entry_safe(p, tmp, &priv->segs, list) {
- list_del(&p->list);
- kfree(p);
- }
}
#define IOMAP_SIZE SZ_1M
@@ -914,6 +920,7 @@ out:
&desc->attrs);
priv->region = NULL;
}
+ pil_clear_segment(desc);
pil_release_mmap(desc);
}
return ret;
diff --git a/drivers/soc/qcom/pil-msa.c b/drivers/soc/qcom/pil-msa.c
index bbd77d8e0650..53bddc5987df 100644
--- a/drivers/soc/qcom/pil-msa.c
+++ b/drivers/soc/qcom/pil-msa.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -332,6 +332,7 @@ int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
struct modem_data *drv = dev_get_drvdata(pil->dev);
struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
int ret = 0;
+ struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
s32 status;
u64 val = is_timeout_disabled() ? 0 : pbl_mba_boot_timeout_ms * 1000;
@@ -360,7 +361,7 @@ int __pil_mss_deinit_image(struct pil_desc *pil, bool err_path)
if (pil->subsys_vmid > 0)
pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
drv->q6->mba_dp_size);
- dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+ dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
&drv->attrs_dma);
drv->q6->mba_dp_virt = NULL;
@@ -552,6 +553,7 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
dma_addr_t mba_dp_phys, mba_dp_phys_end;
int ret, count;
const u8 *data;
+ struct device *dma_dev = md->mba_mem_dev_fixed ?: &md->mba_mem_dev;
fw_name_p = drv->non_elf_image ? fw_name_legacy : fw_name;
ret = request_firmware(&fw, fw_name_p, pil->dev);
@@ -570,11 +572,12 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
drv->mba_dp_size = SZ_1M;
- arch_setup_dma_ops(&md->mba_mem_dev, 0, 0, NULL, 0);
+ arch_setup_dma_ops(dma_dev, 0, 0, NULL, 0);
+
+ dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
- md->mba_mem_dev.coherent_dma_mask =
- DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
init_dma_attrs(&md->attrs_dma);
+ dma_set_attr(DMA_ATTR_SKIP_ZEROING, &md->attrs_dma);
dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &md->attrs_dma);
ret = request_firmware(&dp_fw, dp_name, pil->dev);
@@ -591,10 +594,10 @@ int pil_mss_reset_load_mba(struct pil_desc *pil)
drv->mba_dp_size += drv->dp_size;
}
- mba_dp_virt = dma_alloc_attrs(&md->mba_mem_dev, drv->mba_dp_size,
- &mba_dp_phys, GFP_KERNEL, &md->attrs_dma);
+ mba_dp_virt = dma_alloc_attrs(dma_dev, drv->mba_dp_size, &mba_dp_phys,
+ GFP_KERNEL, &md->attrs_dma);
if (!mba_dp_virt) {
- dev_err(pil->dev, "%s MBA metadata buffer allocation %zx bytes failed\n",
+ dev_err(pil->dev, "%s MBA/DP buffer allocation %zx bytes failed\n",
__func__, drv->mba_dp_size);
ret = -ENOMEM;
goto err_invalid_fw;
@@ -651,7 +654,7 @@ err_mss_reset:
pil_assign_mem_to_linux(pil, drv->mba_dp_phys,
drv->mba_dp_size);
err_mba_data:
- dma_free_attrs(&md->mba_mem_dev, drv->mba_dp_size, drv->mba_dp_virt,
+ dma_free_attrs(dma_dev, drv->mba_dp_size, drv->mba_dp_virt,
drv->mba_dp_phys, &md->attrs_dma);
err_invalid_fw:
if (dp_fw)
@@ -670,14 +673,16 @@ static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
s32 status;
int ret;
u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
+ struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
DEFINE_DMA_ATTRS(attrs);
- drv->mba_mem_dev.coherent_dma_mask =
- DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+
+ dma_dev->coherent_dma_mask = DMA_BIT_MASK(sizeof(dma_addr_t) * 8);
+ dma_set_attr(DMA_ATTR_SKIP_ZEROING, &attrs);
dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
/* Make metadata physically contiguous and 4K aligned. */
- mdata_virt = dma_alloc_attrs(&drv->mba_mem_dev, size, &mdata_phys,
- GFP_KERNEL, &attrs);
+ mdata_virt = dma_alloc_attrs(dma_dev, size, &mdata_phys, GFP_KERNEL,
+ &attrs);
if (!mdata_virt) {
dev_err(pil->dev, "%s MBA metadata buffer allocation %zx bytes failed\n",
__func__, size);
@@ -694,8 +699,8 @@ static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
if (ret) {
pr_err("scm_call to unprotect modem metadata mem failed(rc:%d)\n",
ret);
- dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt,
- mdata_phys, &attrs);
+ dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys,
+ &attrs);
goto fail;
}
}
@@ -721,7 +726,7 @@ static int pil_msa_auth_modem_mdt(struct pil_desc *pil, const u8 *metadata,
if (pil->subsys_vmid > 0)
pil_assign_mem_to_linux(pil, mdata_phys, ALIGN(size, SZ_4K));
- dma_free_attrs(&drv->mba_mem_dev, size, mdata_virt, mdata_phys, &attrs);
+ dma_free_attrs(dma_dev, size, mdata_virt, mdata_phys, &attrs);
if (!ret)
return ret;
@@ -733,7 +738,7 @@ fail:
if (pil->subsys_vmid > 0)
pil_assign_mem_to_linux(pil, drv->q6->mba_dp_phys,
drv->q6->mba_dp_size);
- dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
+ dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
&drv->attrs_dma);
drv->q6->mba_dp_virt = NULL;
@@ -785,6 +790,7 @@ static int pil_msa_mba_auth(struct pil_desc *pil)
struct modem_data *drv = dev_get_drvdata(pil->dev);
struct q6v5_data *q6_drv = container_of(pil, struct q6v5_data, desc);
int ret;
+ struct device *dma_dev = drv->mba_mem_dev_fixed ?: &drv->mba_mem_dev;
s32 status;
u64 val = is_timeout_disabled() ? 0 : modem_auth_timeout_ms * 1000;
@@ -806,9 +812,9 @@ static int pil_msa_mba_auth(struct pil_desc *pil)
pil_assign_mem_to_linux(pil,
drv->q6->mba_dp_phys,
drv->q6->mba_dp_size);
- dma_free_attrs(&drv->mba_mem_dev, drv->q6->mba_dp_size,
- drv->q6->mba_dp_virt,
- drv->q6->mba_dp_phys, &drv->attrs_dma);
+ dma_free_attrs(dma_dev, drv->q6->mba_dp_size,
+ drv->q6->mba_dp_virt, drv->q6->mba_dp_phys,
+ &drv->attrs_dma);
drv->q6->mba_dp_virt = NULL;
}
diff --git a/drivers/soc/qcom/pil-msa.h b/drivers/soc/qcom/pil-msa.h
index fea8e1f9db37..896f0c7c232b 100644
--- a/drivers/soc/qcom/pil-msa.h
+++ b/drivers/soc/qcom/pil-msa.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -32,6 +32,7 @@ struct modem_data {
struct clk *xo;
struct pil_desc desc;
struct device mba_mem_dev;
+ struct device *mba_mem_dev_fixed;
struct dma_attrs attrs_dma;
};
diff --git a/drivers/soc/qcom/pil-q6v5-mss.c b/drivers/soc/qcom/pil-q6v5-mss.c
index 5f01d30de8d9..0e023a019280 100644
--- a/drivers/soc/qcom/pil-q6v5-mss.c
+++ b/drivers/soc/qcom/pil-q6v5-mss.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#include <linux/init.h>
#include <linux/module.h>
#include <linux/platform_device.h>
+#include <linux/of_platform.h>
#include <linux/io.h>
#include <linux/iopoll.h>
#include <linux/ioport.h>
@@ -394,6 +395,11 @@ static int pil_mss_driver_probe(struct platform_device *pdev)
}
init_completion(&drv->stop_ack);
+ /* Probe the MBA mem device if present */
+ ret = of_platform_populate(pdev->dev.of_node, NULL, NULL, &pdev->dev);
+ if (ret)
+ return ret;
+
return pil_subsys_init(drv, pdev);
}
@@ -407,6 +413,33 @@ static int pil_mss_driver_exit(struct platform_device *pdev)
return 0;
}
+static int pil_mba_mem_driver_probe(struct platform_device *pdev)
+{
+ struct modem_data *drv;
+
+ if (!pdev->dev.parent) {
+ pr_err("No parent found.\n");
+ return -EINVAL;
+ }
+ drv = dev_get_drvdata(pdev->dev.parent);
+ drv->mba_mem_dev_fixed = &pdev->dev;
+ return 0;
+}
+
+static const struct of_device_id mba_mem_match_table[] = {
+ { .compatible = "qcom,pil-mba-mem" },
+ {}
+};
+
+static struct platform_driver pil_mba_mem_driver = {
+ .probe = pil_mba_mem_driver_probe,
+ .driver = {
+ .name = "pil-mba-mem",
+ .of_match_table = mba_mem_match_table,
+ .owner = THIS_MODULE,
+ },
+};
+
static struct of_device_id mss_match_table[] = {
{ .compatible = "qcom,pil-q6v5-mss" },
{ .compatible = "qcom,pil-q6v55-mss" },
@@ -426,7 +459,12 @@ static struct platform_driver pil_mss_driver = {
static int __init pil_mss_init(void)
{
- return platform_driver_register(&pil_mss_driver);
+ int ret;
+
+ ret = platform_driver_register(&pil_mba_mem_driver);
+ if (!ret)
+ ret = platform_driver_register(&pil_mss_driver);
+ return ret;
}
module_init(pil_mss_init);
diff --git a/drivers/soc/qcom/qdsp6v2/adsp-loader.c b/drivers/soc/qcom/qdsp6v2/adsp-loader.c
index 51539a36a74f..b45eac88ed12 100644
--- a/drivers/soc/qcom/qdsp6v2/adsp-loader.c
+++ b/drivers/soc/qcom/qdsp6v2/adsp-loader.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,8 @@
#include <linux/qdsp6v2/apr.h>
#include <linux/of_device.h>
#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
#include <soc/qcom/subsystem_restart.h>
#define Q6_PIL_GET_DELAY_MS 100
@@ -44,12 +46,13 @@ static struct attribute *attrs[] = {
NULL,
};
+static struct work_struct adsp_ldr_work;
static struct platform_device *adsp_private;
static void adsp_loader_unload(struct platform_device *pdev);
-static void adsp_loader_do(struct platform_device *pdev)
+static void adsp_load_fw(struct work_struct *adsp_ldr_work)
{
-
+ struct platform_device *pdev = adsp_private;
struct adsp_loader_private *priv = NULL;
const char *adsp_dt = "qcom,adsp-state";
@@ -146,6 +149,11 @@ fail:
return;
}
+static void adsp_loader_do(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "%s: scheduling work to load ADSP fw\n", __func__);
+ schedule_work(&adsp_ldr_work);
+}
static ssize_t adsp_boot_store(struct kobject *kobj,
struct kobj_attribute *attr,
@@ -270,6 +278,8 @@ static int adsp_loader_probe(struct platform_device *pdev)
return ret;
}
+ INIT_WORK(&adsp_ldr_work, adsp_load_fw);
+
return 0;
}
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
index 19974b61ec1c..d11ffdde23be 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
@@ -218,6 +218,7 @@ static void apr_tal_notify_remote_rx_intent(void *handle, const void *priv,
*/
pr_debug("%s: remote queued an intent\n", __func__);
apr_ch->if_remote_intent_ready = true;
+ wake_up(&apr_ch->wait);
}
void apr_tal_notify_state(void *handle, const void *priv, unsigned event)
diff --git a/drivers/soc/qcom/qdsp6v2/audio_notifier.c b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
index 47adc3bb3f40..b120883afbb0 100644
--- a/drivers/soc/qcom/qdsp6v2/audio_notifier.c
+++ b/drivers/soc/qcom/qdsp6v2/audio_notifier.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -394,8 +394,8 @@ static void audio_notifer_reg_all_clients(void)
int ret;
list_for_each_safe(ptr, next, &client_list) {
- client_data = list_entry(ptr,
- struct client_data, list);
+ client_data = list_entry(ptr, struct client_data, list);
+
ret = audio_notifer_reg_client(client_data);
if (IS_ERR_VALUE(ret))
pr_err("%s: audio_notifer_reg_client failed for client %s, ret %d\n",
@@ -518,9 +518,8 @@ int audio_notifier_deregister(char *client_name)
goto done;
}
mutex_lock(&notifier_mutex);
- list_for_each_safe(ptr, next, &client_data->list) {
- client_data = list_entry(ptr, struct client_data,
- list);
+ list_for_each_safe(ptr, next, &client_list) {
+ client_data = list_entry(ptr, struct client_data, list);
if (!strcmp(client_name, client_data->client_name)) {
ret2 = audio_notifer_dereg_client(client_data);
if (ret2 < 0) {
diff --git a/drivers/soc/qcom/qdsp6v2/cdsp-loader.c b/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
index 0b801c5cd7dd..cae26e3913d6 100644
--- a/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
+++ b/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014,2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,8 @@
#include <linux/platform_device.h>
#include <linux/of_device.h>
#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
#include <soc/qcom/subsystem_restart.h>
#define BOOT_CMD 1
@@ -47,11 +49,12 @@ static struct attribute *attrs[] = {
static u32 cdsp_state = CDSP_SUBSYS_DOWN;
static struct platform_device *cdsp_private;
+static struct work_struct cdsp_ldr_work;
static void cdsp_loader_unload(struct platform_device *pdev);
-static int cdsp_loader_do(struct platform_device *pdev)
+static void cdsp_load_fw(struct work_struct *cdsp_ldr_work)
{
-
+ struct platform_device *pdev = cdsp_private;
struct cdsp_loader_private *priv = NULL;
int rc = 0;
@@ -101,14 +104,19 @@ static int cdsp_loader_do(struct platform_device *pdev)
}
dev_dbg(&pdev->dev, "%s: CDSP image is loaded\n", __func__);
- return rc;
+ return;
}
fail:
dev_err(&pdev->dev, "%s: CDSP image loading failed\n", __func__);
- return rc;
+ return;
}
+static void cdsp_loader_do(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "%s: scheduling work to load CDSP fw\n", __func__);
+ schedule_work(&cdsp_ldr_work);
+}
static ssize_t cdsp_boot_store(struct kobject *kobj,
struct kobj_attribute *attr,
@@ -126,7 +134,7 @@ static ssize_t cdsp_boot_store(struct kobject *kobj,
pr_debug("%s: going to call cdsp_loader_do\n", __func__);
cdsp_loader_do(cdsp_private);
} else if (boot == IMAGE_UNLOAD_CMD) {
- pr_debug("%s: going to call adsp_unloader\n", __func__);
+ pr_debug("%s: going to call cdsp_unloader\n", __func__);
cdsp_loader_unload(cdsp_private);
}
return count;
@@ -238,6 +246,8 @@ static int cdsp_loader_probe(struct platform_device *pdev)
return ret;
}
+ INIT_WORK(&cdsp_ldr_work, cdsp_load_fw);
+
return 0;
}
diff --git a/drivers/soc/qcom/qpnp-pbs.c b/drivers/soc/qcom/qpnp-pbs.c
new file mode 100644
index 000000000000..287c8a25b391
--- /dev/null
+++ b/drivers/soc/qcom/qpnp-pbs.c
@@ -0,0 +1,361 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "PBS: %s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/qpnp/qpnp-pbs.h>
+
+#define QPNP_PBS_DEV_NAME "qcom,qpnp-pbs"
+
+#define PBS_CLIENT_TRIG_CTL 0x42
+#define PBS_CLIENT_SW_TRIG_BIT BIT(7)
+#define PBS_CLIENT_SCRATCH1 0x50
+#define PBS_CLIENT_SCRATCH2 0x51
+
+static LIST_HEAD(pbs_dev_list);
+static DEFINE_MUTEX(pbs_list_lock);
+
+struct qpnp_pbs {
+ struct platform_device *pdev;
+ struct device *dev;
+ struct device_node *dev_node;
+ struct regmap *regmap;
+ struct mutex pbs_lock;
+ struct list_head link;
+
+ u32 base;
+};
+
+static int qpnp_pbs_read(struct qpnp_pbs *pbs, u32 address,
+ u8 *val, int count)
+{
+ int rc = 0;
+ struct platform_device *pdev = pbs->pdev;
+
+ rc = regmap_bulk_read(pbs->regmap, address, val, count);
+ if (rc)
+ pr_err("Failed to read address=0x%02x sid=0x%02x rc=%d\n",
+ address, to_spmi_device(pdev->dev.parent)->usid, rc);
+
+ return rc;
+}
+
+static int qpnp_pbs_write(struct qpnp_pbs *pbs, u16 address,
+ u8 *val, int count)
+{
+ int rc = 0;
+ struct platform_device *pdev = pbs->pdev;
+
+ rc = regmap_bulk_write(pbs->regmap, address, val, count);
+ if (rc < 0)
+ pr_err("Failed to write address =0x%02x sid=0x%02x rc=%d\n",
+ address, to_spmi_device(pdev->dev.parent)->usid, rc);
+ else
+ pr_debug("Wrote 0x%02X to addr 0x%04x\n", *val, address);
+
+ return rc;
+}
+
+static int qpnp_pbs_masked_write(struct qpnp_pbs *pbs, u16 address,
+ u8 mask, u8 val)
+{
+ int rc;
+
+ rc = regmap_update_bits(pbs->regmap, address, mask, val);
+ if (rc < 0)
+ pr_err("Failed to write address 0x%04X, rc = %d\n",
+ address, rc);
+ else
+ pr_debug("Wrote 0x%02X to addr 0x%04X\n",
+ val, address);
+
+ return rc;
+}
+
+static struct qpnp_pbs *get_pbs_client_node(struct device_node *dev_node)
+{
+ struct qpnp_pbs *pbs;
+
+ mutex_lock(&pbs_list_lock);
+ list_for_each_entry(pbs, &pbs_dev_list, link) {
+ if (dev_node == pbs->dev_node) {
+ mutex_unlock(&pbs_list_lock);
+ return pbs;
+ }
+ }
+
+ mutex_unlock(&pbs_list_lock);
+ return ERR_PTR(-EINVAL);
+}
+
+static int qpnp_pbs_wait_for_ack(struct qpnp_pbs *pbs, u8 bit_pos)
+{
+ int rc = 0;
+ u16 retries = 2000, dly = 1000;
+ u8 val;
+
+ while (retries--) {
+ rc = qpnp_pbs_read(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH2, &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to read register %x rc = %d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ return rc;
+ }
+
+ if (val == 0xFF) {
+ /* PBS error - clear SCRATCH2 register */
+ rc = qpnp_pbs_write(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH2, 0, 1);
+ if (rc < 0) {
+ pr_err("Failed to clear register %x rc=%d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ return rc;
+ }
+
+ pr_err("NACK from PBS for bit %d\n", bit_pos);
+ return -EINVAL;
+ }
+
+ if (val & BIT(bit_pos)) {
+ pr_debug("PBS sequence for bit %d executed!\n",
+ bit_pos);
+ break;
+ }
+
+ usleep_range(dly, dly + 100);
+ }
+
+ if (!retries) {
+ pr_err("Timeout for PBS ACK/NACK for bit %d\n", bit_pos);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * qpnp_pbs_trigger_event - Trigger the PBS RAM sequence
+ *
+ * Returns = 0 If the PBS RAM sequence executed successfully.
+ *
+ * Returns < 0 for errors.
+ *
+ * This function is used to trigger the PBS RAM sequence to be
+ * executed by the client driver.
+ *
+ * The PBS trigger sequence involves
+ * 1. setting the PBS sequence bit in PBS_CLIENT_SCRATCH1
+ * 2. Initiating the SW PBS trigger
+ * 3. Checking the equivalent bit in PBS_CLIENT_SCRATCH2 for the
+ * completion of the sequence.
+ * 4. If PBS_CLIENT_SCRATCH2 == 0xFF, the PBS sequence failed to execute
+ */
+int qpnp_pbs_trigger_event(struct device_node *dev_node, u8 bitmap)
+{
+ struct qpnp_pbs *pbs;
+ int rc = 0;
+ u16 bit_pos = 0;
+ u8 val, mask = 0;
+
+ if (!dev_node)
+ return -EINVAL;
+
+ if (!bitmap) {
+ pr_err("Invalid bitmap passed by client\n");
+ return -EINVAL;
+ }
+
+ pbs = get_pbs_client_node(dev_node);
+ if (IS_ERR_OR_NULL(pbs)) {
+ pr_err("Unable to find the PBS dev_node\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&pbs->pbs_lock);
+ rc = qpnp_pbs_read(pbs, pbs->base + PBS_CLIENT_SCRATCH2, &val, 1);
+ if (rc < 0) {
+ pr_err("read register %x failed rc = %d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ goto out;
+ }
+
+ if (val == 0xFF) {
+ /* PBS error - clear SCRATCH2 register */
+ rc = qpnp_pbs_write(pbs, pbs->base + PBS_CLIENT_SCRATCH2, 0, 1);
+ if (rc < 0) {
+ pr_err("Failed to clear register %x rc=%d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ goto out;
+ }
+ }
+
+ for (bit_pos = 0; bit_pos < 8; bit_pos++) {
+ if (bitmap & BIT(bit_pos)) {
+ /*
+ * Clear the PBS sequence bit position in
+ * PBS_CLIENT_SCRATCH2 mask register.
+ */
+ rc = qpnp_pbs_masked_write(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH2, BIT(bit_pos), 0);
+ if (rc < 0) {
+ pr_err("Failed to clear %x reg bit rc=%d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ goto error;
+ }
+
+ /*
+ * Set the PBS sequence bit position in
+ * PBS_CLIENT_SCRATCH1 register.
+ */
+ val = mask = BIT(bit_pos);
+ rc = qpnp_pbs_masked_write(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH1, mask, val);
+ if (rc < 0) {
+ pr_err("Failed to set %x reg bit rc=%d\n",
+ PBS_CLIENT_SCRATCH1, rc);
+ goto error;
+ }
+
+ /* Initiate the SW trigger */
+ val = mask = PBS_CLIENT_SW_TRIG_BIT;
+ rc = qpnp_pbs_masked_write(pbs, pbs->base +
+ PBS_CLIENT_TRIG_CTL, mask, val);
+ if (rc < 0) {
+ pr_err("Failed to write register %x rc=%d\n",
+ PBS_CLIENT_TRIG_CTL, rc);
+ goto error;
+ }
+
+ rc = qpnp_pbs_wait_for_ack(pbs, bit_pos);
+ if (rc < 0) {
+ pr_err("Error during wait_for_ack\n");
+ goto error;
+ }
+
+ /*
+ * Clear the PBS sequence bit position in
+ * PBS_CLIENT_SCRATCH1 register.
+ */
+ rc = qpnp_pbs_masked_write(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH1, BIT(bit_pos), 0);
+ if (rc < 0) {
+ pr_err("Failed to clear %x reg bit rc=%d\n",
+ PBS_CLIENT_SCRATCH1, rc);
+ goto error;
+ }
+
+ /*
+ * Clear the PBS sequence bit position in
+ * PBS_CLIENT_SCRATCH2 mask register.
+ */
+ rc = qpnp_pbs_masked_write(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH2, BIT(bit_pos), 0);
+ if (rc < 0) {
+ pr_err("Failed to clear %x reg bit rc=%d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ goto error;
+ }
+
+ }
+ }
+
+error:
+ /* Clear all the requested bitmap */
+ rc = qpnp_pbs_masked_write(pbs, pbs->base + PBS_CLIENT_SCRATCH1,
+ bitmap, 0);
+ if (rc < 0)
+ pr_err("Failed to clear %x reg bit rc=%d\n",
+ PBS_CLIENT_SCRATCH1, rc);
+out:
+ mutex_unlock(&pbs->pbs_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_pbs_trigger_event);
+
+static int qpnp_pbs_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ u32 val = 0;
+ struct qpnp_pbs *pbs;
+
+ pbs = devm_kzalloc(&pdev->dev, sizeof(*pbs), GFP_KERNEL);
+ if (!pbs)
+ return -ENOMEM;
+
+ pbs->pdev = pdev;
+ pbs->dev = &pdev->dev;
+ pbs->dev_node = pdev->dev.of_node;
+ pbs->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!pbs->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "reg", &val);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't find reg in node = %s rc = %d\n",
+ pdev->dev.of_node->full_name, rc);
+ return rc;
+ }
+
+ pbs->base = val;
+ mutex_init(&pbs->pbs_lock);
+
+ dev_set_drvdata(&pdev->dev, pbs);
+
+ mutex_lock(&pbs_list_lock);
+ list_add(&pbs->link, &pbs_dev_list);
+ mutex_unlock(&pbs_list_lock);
+
+ return 0;
+}
+
+static const struct of_device_id qpnp_pbs_match_table[] = {
+ { .compatible = QPNP_PBS_DEV_NAME },
+ {}
+};
+
+static struct platform_driver qpnp_pbs_driver = {
+ .driver = {
+ .name = QPNP_PBS_DEV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = qpnp_pbs_match_table,
+ },
+ .probe = qpnp_pbs_probe,
+};
+
+static int __init qpnp_pbs_init(void)
+{
+ return platform_driver_register(&qpnp_pbs_driver);
+}
+arch_initcall(qpnp_pbs_init);
+
+static void __exit qpnp_pbs_exit(void)
+{
+ return platform_driver_unregister(&qpnp_pbs_driver);
+}
+module_exit(qpnp_pbs_exit);
+
+MODULE_DESCRIPTION("QPNP PBS DRIVER");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_PBS_DEV_NAME);
diff --git a/drivers/soc/qcom/ramdump.c b/drivers/soc/qcom/ramdump.c
index 08499c549b47..c712ed392b0b 100644
--- a/drivers/soc/qcom/ramdump.c
+++ b/drivers/soc/qcom/ramdump.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -326,13 +326,7 @@ static int _do_ramdump(void *handle, struct ramdump_segment *segments,
if (rd_dev->complete_ramdump) {
for (i = 0; i < nsegments-1; i++)
segments[i].size =
- PAGE_ALIGN(segments[i+1].address - segments[i].address);
-
- segments[nsegments-1].size =
- PAGE_ALIGN(segments[nsegments-1].size);
- } else {
- for (i = 0; i < nsegments; i++)
- segments[i].size = PAGE_ALIGN(segments[i].size);
+ segments[i + 1].address - segments[i].address;
}
rd_dev->segments = segments;
diff --git a/drivers/soc/qcom/rpm_rail_stats.c b/drivers/soc/qcom/rpm_rail_stats.c
index d80a9fc5146a..9ef96dc54eb6 100644
--- a/drivers/soc/qcom/rpm_rail_stats.c
+++ b/drivers/soc/qcom/rpm_rail_stats.c
@@ -27,7 +27,7 @@
#include "rpm_stats.h"
-#define RPM_RAIL_BUF_LEN 600
+#define RPM_RAIL_BUF_LEN 1300
#define SNPRINTF(buf, size, format, ...) \
{ \
@@ -118,7 +118,7 @@ static int msm_rpm_rail_type_copy(void __iomem **base, char **buf, int count)
rail[NAMELEN - 1] = '\0';
memcpy(rail, &rt.rail, NAMELEN - 1);
SNPRINTF(*buf, count,
- "\trail:%-2s num_corners:%-2u current_corner:%-2u last_entered:%-8u\n",
+ "\trail:%-2s \tnum_corners:%-2u current_corner:%-2u last_entered:%-8u\n",
rail, rt.num_corners, rt.current_corner, rt.last_entered);
*base += sizeof(rt);
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index 2b708732760f..8581ed587ead 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -111,6 +111,7 @@ static void service_locator_svc_arrive(struct work_struct *work)
qmi_handle_create(service_locator_clnt_notify, NULL);
if (!service_locator.clnt_handle) {
service_locator.clnt_handle = NULL;
+ complete_all(&service_locator.service_available);
mutex_unlock(&service_locator.service_mutex);
pr_err("Service locator QMI client handle alloc failed!\n");
return;
@@ -123,6 +124,7 @@ static void service_locator_svc_arrive(struct work_struct *work)
if (rc) {
qmi_handle_destroy(service_locator.clnt_handle);
service_locator.clnt_handle = NULL;
+ complete_all(&service_locator.service_available);
mutex_unlock(&service_locator.service_mutex);
pr_err("Unable to connnect to service rc:%d\n", rc);
return;
@@ -138,6 +140,7 @@ static void service_locator_svc_exit(struct work_struct *work)
mutex_lock(&service_locator.service_mutex);
qmi_handle_destroy(service_locator.clnt_handle);
service_locator.clnt_handle = NULL;
+ complete_all(&service_locator.service_available);
mutex_unlock(&service_locator.service_mutex);
pr_info("Connection with service locator lost\n");
}
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
index d0fef9d31755..99ae24735f05 100644
--- a/drivers/soc/qcom/smcinvoke.c
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -207,12 +207,11 @@ static int prepare_send_scm_msg(const uint8_t *in_buf, size_t in_buf_len,
ret = qseecom_process_listener_from_smcinvoke(&desc);
*smcinvoke_result = (int32_t)desc.ret[1];
- if (ret || desc.ret[1] || desc.ret[2] || desc.ret[0]) {
+ if (ret || desc.ret[1] || desc.ret[2] || desc.ret[0])
pr_err("SCM call failed with ret val = %d %d %d %d\n",
ret, (int)desc.ret[0],
(int)desc.ret[1], (int)desc.ret[2]);
- ret = ret | desc.ret[0] | desc.ret[1] | desc.ret[2];
- }
+
dmac_inv_range(in_buf, in_buf + inbuf_flush_size);
dmac_inv_range(out_buf, out_buf + outbuf_flush_size);
return ret;
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 35ccc57a2dea..07610877f140 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -359,6 +359,11 @@ static void spcom_link_state_notif_cb(struct glink_link_state_cb_info *cb_info,
struct spcom_channel *ch = NULL;
const char *ch_name = "sp_kernel";
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return;
+ }
+
spcom_dev->link_state = cb_info->link_state;
pr_debug("spcom_link_state_notif_cb called. transport = %s edge = %s\n",
@@ -881,10 +886,11 @@ static int spcom_rx(struct spcom_channel *ch,
if (timeleft == 0) {
pr_err("rx_done timeout [%d] msec expired.\n", timeout_msec);
- goto exit_err;
+ mutex_unlock(&ch->lock);
+ return -ETIMEDOUT;
} else if (ch->rx_abort) {
- pr_err("rx aborted.\n");
- goto exit_err;
+ mutex_unlock(&ch->lock);
+ return -ERESTART; /* probably SSR */
} else if (ch->actual_rx_size) {
pr_debug("actual_rx_size is [%d].\n", ch->actual_rx_size);
} else {
@@ -1017,7 +1023,10 @@ static void spcom_rx_abort_pending_server(void)
*/
bool spcom_is_sp_subsystem_link_up(void)
{
- return (spcom_dev->link_state == GLINK_LINK_STATE_UP);
+ if (spcom_dev == NULL)
+ return false;
+
+ return (spcom_dev->link_state == GLINK_LINK_STATE_UP);
}
EXPORT_SYMBOL(spcom_is_sp_subsystem_link_up);
@@ -1039,6 +1048,11 @@ struct spcom_client *spcom_register_client(struct spcom_client_info *info)
struct spcom_channel *ch;
struct spcom_client *client;
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return NULL;
+ }
+
if (!info) {
pr_err("Invalid parameter.\n");
return NULL;
@@ -1080,6 +1094,11 @@ int spcom_unregister_client(struct spcom_client *client)
{
struct spcom_channel *ch;
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return -ENODEV;
+ }
+
if (!client) {
pr_err("Invalid parameter.\n");
return -EINVAL;
@@ -1118,6 +1137,11 @@ int spcom_client_send_message_sync(struct spcom_client *client,
int ret;
struct spcom_channel *ch;
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return -ENODEV;
+ }
+
if (!client || !req_ptr || !resp_ptr) {
pr_err("Invalid parameter.\n");
return -EINVAL;
@@ -1159,9 +1183,14 @@ bool spcom_client_is_server_connected(struct spcom_client *client)
{
bool connected;
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return false;
+ }
+
if (!client) {
pr_err("Invalid parameter.\n");
- return -EINVAL;
+ return false;
}
connected = spcom_is_channel_connected(client->ch);
@@ -1188,6 +1217,11 @@ struct spcom_server *spcom_register_service(struct spcom_service_info *info)
struct spcom_channel *ch;
struct spcom_server *server;
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return NULL;
+ }
+
if (!info) {
pr_err("Invalid parameter.\n");
return NULL;
@@ -1226,6 +1260,11 @@ int spcom_unregister_service(struct spcom_server *server)
{
struct spcom_channel *ch;
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return -ENODEV;
+ }
+
if (!server) {
pr_err("Invalid parameter.\n");
return -EINVAL;
@@ -1290,6 +1329,11 @@ int spcom_server_wait_for_request(struct spcom_server *server,
int ret;
struct spcom_channel *ch;
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return -ENODEV;
+ }
+
if (!server || !req_ptr) {
pr_err("Invalid parameter.\n");
return -EINVAL;
@@ -1323,6 +1367,11 @@ int spcom_server_send_response(struct spcom_server *server,
int ret;
struct spcom_channel *ch;
+ if (!spcom_is_ready()) {
+ pr_err("spcom is not ready.\n");
+ return -ENODEV;
+ }
+
if (!server || !resp_ptr) {
pr_err("Invalid parameter.\n");
return -EINVAL;
@@ -1928,7 +1977,8 @@ static int spcom_handle_read_req_resp(struct spcom_channel *ch,
ret = spcom_rx(ch, rx_buf, rx_buf_size, timeout_msec);
if (ret < 0) {
pr_err("rx error %d.\n", ret);
- goto exit_err;
+ kfree(rx_buf);
+ return ret;
} else {
size = ret; /* actual_rx_size */
}
@@ -2221,8 +2271,14 @@ static ssize_t spcom_device_read(struct file *filp, char __user *user_buff,
if (buf == NULL)
return -ENOMEM;
- actual_size = spcom_handle_read(ch, buf, size);
- if ((actual_size <= 0) || (actual_size > size)) {
+ ret = spcom_handle_read(ch, buf, size);
+ if (ret < 0) {
+ pr_err("read error [%d].\n", ret);
+ kfree(buf);
+ return ret;
+ }
+ actual_size = ret;
+ if ((actual_size == 0) || (actual_size > size)) {
pr_err("invalid actual_size [%d].\n", actual_size);
kfree(buf);
return -EFAULT;
@@ -2373,7 +2429,7 @@ static int spcom_create_channel_chardev(const char *name)
devt = spcom_dev->device_no + spcom_dev->channel_count;
priv = ch;
dev = device_create(cls, parent, devt, priv, name);
- if (!dev) {
+ if (IS_ERR(dev)) {
pr_err("device_create failed.\n");
kfree(cdev);
return -ENODEV;
@@ -2426,7 +2482,7 @@ static int __init spcom_register_chardev(void)
spcom_dev->device_no, priv,
DEVICE_NAME);
- if (!spcom_dev->class_dev) {
+ if (IS_ERR(spcom_dev->class_dev)) {
pr_err("class_device_create failed %d\n", ret);
ret = -ENOMEM;
goto exit_destroy_class;
@@ -2479,6 +2535,11 @@ static int spcom_parse_dt(struct device_node *np)
pr_debug("num of predefined channels [%d].\n", num_ch);
+ if (num_ch > ARRAY_SIZE(spcom_dev->predefined_ch_name)) {
+ pr_err("too many predefined channels [%d].\n", num_ch);
+ return -EINVAL;
+ }
+
for (i = 0; i < num_ch; i++) {
ret = of_property_read_string_index(np, propname, i, &name);
if (ret) {
@@ -2544,21 +2605,23 @@ static int spcom_probe(struct platform_device *pdev)
pr_debug("register_link_state_cb(), transport [%s] edge [%s]\n",
link_info.transport, link_info.edge);
notif_handle = glink_register_link_state_cb(&link_info, spcom_dev);
- if (!notif_handle) {
+ if (IS_ERR(notif_handle)) {
pr_err("glink_register_link_state_cb(), err [%d]\n", ret);
goto fail_reg_chardev;
}
spcom_dev->ion_client = msm_ion_client_create(DEVICE_NAME);
- if (spcom_dev->ion_client == NULL) {
+ if (IS_ERR(spcom_dev->ion_client)) {
pr_err("fail to create ion client.\n");
- goto fail_reg_chardev;
+ goto fail_ion_client;
}
pr_info("Driver Initialization ok.\n");
return 0;
+fail_ion_client:
+ glink_unregister_link_state_cb(notif_handle);
fail_reg_chardev:
pr_err("Failed to init driver.\n");
spcom_unregister_chrdev();
@@ -2596,7 +2659,7 @@ static int __init spcom_init(void)
if (ret)
pr_err("spcom_driver register failed %d\n", ret);
- return 0;
+ return ret;
}
module_init(spcom_init);
diff --git a/drivers/soc/qcom/spss_utils.c b/drivers/soc/qcom/spss_utils.c
index e17a1370d1f0..cbf44be7fbab 100644
--- a/drivers/soc/qcom/spss_utils.c
+++ b/drivers/soc/qcom/spss_utils.c
@@ -140,7 +140,7 @@ static ssize_t spss_debug_reg_show(struct device *dev,
{
int ret;
void __iomem *spss_debug_reg = NULL;
- int val1, val2;
+ u32 val1, val2;
if (!dev || !attr || !buf) {
pr_err("invalid param.\n");
@@ -149,7 +149,7 @@ static ssize_t spss_debug_reg_show(struct device *dev,
pr_debug("spss_debug_reg_addr [0x%x].\n", spss_debug_reg_addr);
- spss_debug_reg = ioremap_nocache(spss_debug_reg_addr, 0x16);
+ spss_debug_reg = ioremap_nocache(spss_debug_reg_addr, sizeof(u32)*2);
if (!spss_debug_reg) {
pr_err("can't map debug reg addr.\n");
@@ -157,7 +157,7 @@ static ssize_t spss_debug_reg_show(struct device *dev,
}
val1 = readl_relaxed(spss_debug_reg);
- val2 = readl_relaxed(((char *) spss_debug_reg) + 0x04);
+ val2 = readl_relaxed(((char *) spss_debug_reg) + sizeof(u32));
ret = snprintf(buf, PAGE_SIZE, "val1 [0x%x] val2 [0x%x]", val1, val2);
@@ -192,12 +192,15 @@ static int spss_create_sysfs(struct device *dev)
ret = device_create_file(dev, &dev_attr_test_fuse_state);
if (ret < 0) {
pr_err("failed to create sysfs file for test_fuse_state.\n");
+ device_remove_file(dev, &dev_attr_firmware_name);
return ret;
}
ret = device_create_file(dev, &dev_attr_spss_debug_reg);
if (ret < 0) {
pr_err("failed to create sysfs file for spss_debug_reg.\n");
+ device_remove_file(dev, &dev_attr_firmware_name);
+ device_remove_file(dev, &dev_attr_test_fuse_state);
return ret;
}
@@ -284,13 +287,16 @@ static int spss_parse_dt(struct device_node *node)
(int) spss_fuse2_addr, (int) spss_fuse2_bit);
spss_fuse1_reg = ioremap_nocache(spss_fuse1_addr, sizeof(u32));
- spss_fuse2_reg = ioremap_nocache(spss_fuse2_addr, sizeof(u32));
if (!spss_fuse1_reg) {
pr_err("can't map fuse1 addr.\n");
return -EFAULT;
}
+
+ spss_fuse2_reg = ioremap_nocache(spss_fuse2_addr, sizeof(u32));
+
if (!spss_fuse2_reg) {
+ iounmap(spss_fuse1_reg);
pr_err("can't map fuse2 addr.\n");
return -EFAULT;
}
@@ -380,7 +386,11 @@ static int spss_probe(struct platform_device *pdev)
return -EFAULT;
}
- spss_create_sysfs(dev);
+ ret = spss_create_sysfs(dev);
+ if (ret < 0) {
+ pr_err("fail to create sysfs.\n");
+ return -EFAULT;
+ }
pr_info("Initialization completed ok, firmware_name [%s].\n",
firmware_name);
@@ -415,7 +425,7 @@ static int __init spss_init(void)
if (ret)
pr_err("register platform driver failed, ret [%d]\n", ret);
- return 0;
+ return ret;
}
late_initcall(spss_init); /* start after PIL driver */
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index 2d52fcaf954a..6f75eee8f921 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -1442,9 +1442,9 @@ static inline void msm_spi_set_cs(struct spi_device *spi, bool set_flag)
u32 spi_ioc_orig;
int rc;
- rc = pm_runtime_get_sync(dd->dev);
- if (rc < 0) {
- dev_err(dd->dev, "Failure during runtime get");
+ if (dd->suspended) {
+ dev_err(dd->dev, "%s: SPI operational state not valid %d\n",
+ __func__, dd->suspended);
return;
}
@@ -1470,8 +1470,6 @@ static inline void msm_spi_set_cs(struct spi_device *spi, bool set_flag)
writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
if (dd->pdata->is_shared)
put_local_resources(dd);
- pm_runtime_mark_last_busy(dd->dev);
- pm_runtime_put_autosuspend(dd->dev);
}
static void reset_core(struct msm_spi *dd)
@@ -1655,23 +1653,16 @@ static int msm_spi_prepare_transfer_hardware(struct spi_master *master)
struct msm_spi *dd = spi_master_get_devdata(master);
int resume_state = 0;
- resume_state = pm_runtime_get_sync(dd->dev);
- if (resume_state < 0)
+ if (!pm_runtime_enabled(dd->dev)) {
+ dev_err(dd->dev, "Runtime PM not available\n");
+ resume_state = -EBUSY;
goto spi_finalize;
+ }
- /*
- * Counter-part of system-suspend when runtime-pm is not enabled.
- * This way, resume can be left empty and device will be put in
- * active mode only if client requests anything on the bus
- */
- if (!pm_runtime_enabled(dd->dev))
- resume_state = msm_spi_pm_resume_runtime(dd->dev);
+ resume_state = pm_runtime_get_sync(dd->dev);
if (resume_state < 0)
goto spi_finalize;
- if (dd->suspended) {
- resume_state = -EBUSY;
- goto spi_finalize;
- }
+
return 0;
spi_finalize:
@@ -1683,6 +1674,11 @@ static int msm_spi_unprepare_transfer_hardware(struct spi_master *master)
{
struct msm_spi *dd = spi_master_get_devdata(master);
+ if (!pm_runtime_enabled(dd->dev)) {
+ dev_err(dd->dev, "Runtime PM not available\n");
+ return -EBUSY;
+ }
+
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
return 0;
@@ -2621,27 +2617,10 @@ resume_exit:
#ifdef CONFIG_PM_SLEEP
static int msm_spi_suspend(struct device *device)
{
- if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
- struct platform_device *pdev = to_platform_device(device);
- struct spi_master *master = platform_get_drvdata(pdev);
- struct msm_spi *dd;
-
- dev_dbg(device, "system suspend");
- if (!master)
- goto suspend_exit;
- dd = spi_master_get_devdata(master);
- if (!dd)
- goto suspend_exit;
- msm_spi_pm_suspend_runtime(device);
-
- /*
- * set the device's runtime PM status to 'suspended'
- */
- pm_runtime_disable(device);
- pm_runtime_set_suspended(device);
- pm_runtime_enable(device);
+ if (!pm_runtime_status_suspended(device)) {
+ dev_err(device, "Runtime not suspended, deny sys suspend");
+ return -EBUSY;
}
-suspend_exit:
return 0;
}
diff --git a/drivers/thermal/lmh_lite.c b/drivers/thermal/lmh_lite.c
index 44ceb723d34c..8dd7f654e1d3 100644
--- a/drivers/thermal/lmh_lite.c
+++ b/drivers/thermal/lmh_lite.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -83,6 +83,8 @@
} \
/* Have barrier before reading from TZ data */ \
mb(); \
+ dmac_inv_range(payload, (void *)payload + \
+ sizeof(uint32_t) * LMH_SCM_PAYLOAD_SIZE);\
trace_lmh_event_call("GET_TYPE exit"); \
if (ret) { \
pr_err("Error in SCM v%d get type. cmd:%x err:%d\n", \
@@ -332,6 +334,8 @@ static void lmh_read_and_update(struct lmh_driver_data *lmh_dat)
/* Have memory barrier before we access the TZ data */
mb();
trace_lmh_event_call("GET_INTENSITY exit");
+ dmac_inv_range(&payload, (void *)&payload +
+ sizeof(struct lmh_sensor_packet));
if (ret) {
pr_err("Error in SCM v%d read call. err:%d\n",
(is_scm_armv8()) ? 8 : 7, ret);
@@ -677,6 +681,7 @@ static int lmh_get_sensor_list(void)
/* Have memory barrier before we access the TZ data */
mb();
trace_lmh_event_call("GET_SENSORS exit");
+ dmac_inv_range(payload, (void *)payload + buf_size);
if (ret < 0) {
pr_err("Error in SCM v%d call. err:%d\n",
(is_scm_armv8()) ? 8 : 7, ret);
@@ -911,6 +916,7 @@ static int lmh_debug_read(struct lmh_debug_ops *ops, uint32_t **buf)
}
/* Have memory barrier before we access the TZ data */
mb();
+ dmac_inv_range(payload, (void *)payload + curr_size);
trace_lmh_event_call("GET_DEBUG_READ exit");
if (ret) {
pr_err("Error in SCM v%d get debug read. err:%d\n",
@@ -977,6 +983,7 @@ static int lmh_debug_config_write(uint32_t cmd_id, uint32_t *buf, int size)
ret = scm_call2(SCM_SIP_FNID(SCM_SVC_LMH, cmd_id), &desc_arg);
/* Have memory barrier before we access the TZ data */
mb();
+ dmac_inv_range(payload, (void *)payload + size_bytes);
trace_lmh_event_call("CONFIG_DEBUG_WRITE exit");
if (ret) {
pr_err("Error in SCM v%d config debug read. err:%d\n",
diff --git a/drivers/thermal/msm_thermal.c b/drivers/thermal/msm_thermal.c
index cc232ff9be9a..c8cbd078bb07 100644
--- a/drivers/thermal/msm_thermal.c
+++ b/drivers/thermal/msm_thermal.c
@@ -2857,6 +2857,9 @@ static void msm_thermal_bite(int zone_id, int temp)
pr_err("Tsens:%d reached temperature:%d. System reset\n",
tsens_id, temp);
}
+ /* If it is a secure device ignore triggering the thermal bite. */
+ if (scm_is_secure_device())
+ return;
if (!is_scm_armv8()) {
scm_call_atomic1(SCM_SVC_BOOT, THERM_SECURE_BITE_CMD, 0);
} else {
diff --git a/drivers/thermal/thermal_core.c b/drivers/thermal/thermal_core.c
index 2b49608756a0..04cf31c119ef 100644
--- a/drivers/thermal/thermal_core.c
+++ b/drivers/thermal/thermal_core.c
@@ -4,7 +4,7 @@
* Copyright (C) 2008 Intel Corp
* Copyright (C) 2008 Zhang Rui <rui.zhang@intel.com>
* Copyright (C) 2008 Sujith Thomas <sujith.thomas@intel.com>
- * Copyright (c) 2013-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
*
@@ -382,9 +382,11 @@ static __ref int sensor_sysfs_notify(void *data)
struct sensor_info *sensor = (struct sensor_info *)data;
while (!kthread_should_stop()) {
- while (wait_for_completion_interruptible(
- &sensor->sysfs_notify_complete) != 0)
- ;
+ if (wait_for_completion_interruptible(
+ &sensor->sysfs_notify_complete) != 0)
+ continue;
+ if (sensor->deregister_active)
+ return ret;
reinit_completion(&sensor->sysfs_notify_complete);
sysfs_notify(&sensor->tz->device.kobj, NULL,
THERMAL_UEVENT_DATA);
@@ -580,6 +582,7 @@ int sensor_init(struct thermal_zone_device *tz)
sensor->threshold_max = INT_MAX;
sensor->max_idx = -1;
sensor->min_idx = -1;
+ sensor->deregister_active = false;
mutex_init(&sensor->lock);
INIT_LIST_HEAD_RCU(&sensor->sensor_list);
INIT_LIST_HEAD_RCU(&sensor->threshold_list);
@@ -2471,6 +2474,8 @@ void thermal_zone_device_unregister(struct thermal_zone_device *tz)
thermal_remove_hwmon_sysfs(tz);
flush_work(&tz->sensor.work);
+ tz->sensor.deregister_active = true;
+ complete(&tz->sensor.sysfs_notify_complete);
kthread_stop(tz->sensor.sysfs_notify_thread);
mutex_lock(&thermal_list_lock);
list_del_rcu(&tz->sensor.sensor_list);
diff --git a/drivers/uio/msm_sharedmem/sharedmem_qmi.c b/drivers/uio/msm_sharedmem/sharedmem_qmi.c
index 48fb17ecdd95..fd95deeb6133 100644
--- a/drivers/uio/msm_sharedmem/sharedmem_qmi.c
+++ b/drivers/uio/msm_sharedmem/sharedmem_qmi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2015, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -223,6 +223,7 @@ static int sharedmem_qmi_req_cb(struct qmi_handle *handle, void *conn_h,
#define DEBUG_BUF_SIZE (2048)
static char *debug_buffer;
static u32 debug_data_size;
+static struct mutex dbg_buf_lock; /* mutex for debug_buffer */
static ssize_t debug_read(struct file *file, char __user *buf,
size_t count, loff_t *file_pos)
@@ -279,21 +280,29 @@ static int debug_open(struct inode *inode, struct file *file)
{
u32 buffer_size;
- if (debug_buffer != NULL)
+ mutex_lock(&dbg_buf_lock);
+ if (debug_buffer != NULL) {
+ mutex_unlock(&dbg_buf_lock);
return -EBUSY;
+ }
buffer_size = DEBUG_BUF_SIZE;
debug_buffer = kzalloc(buffer_size, GFP_KERNEL);
- if (debug_buffer == NULL)
+ if (debug_buffer == NULL) {
+ mutex_unlock(&dbg_buf_lock);
return -ENOMEM;
+ }
debug_data_size = fill_debug_info(debug_buffer, buffer_size);
+ mutex_unlock(&dbg_buf_lock);
return 0;
}
static int debug_close(struct inode *inode, struct file *file)
{
+ mutex_lock(&dbg_buf_lock);
kfree(debug_buffer);
debug_buffer = NULL;
debug_data_size = 0;
+ mutex_unlock(&dbg_buf_lock);
return 0;
}
@@ -324,6 +333,7 @@ static void debugfs_init(void)
{
struct dentry *f_ent;
+ mutex_init(&dbg_buf_lock);
dir_ent = debugfs_create_dir("rmt_storage", NULL);
if (IS_ERR(dir_ent)) {
pr_err("Failed to create debug_fs directory\n");
@@ -352,6 +362,7 @@ static void debugfs_init(void)
static void debugfs_exit(void)
{
debugfs_remove_recursive(dir_ent);
+ mutex_destroy(&dbg_buf_lock);
}
static void sharedmem_qmi_svc_recv_msg(struct work_struct *work)
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 8c80a8d80ac9..9473e7e31eaa 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -268,6 +268,7 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
struct ipa_req_chan_out_params ipa_in_channel_out_params;
struct ipa_req_chan_out_params ipa_out_channel_out_params;
+ log_event_dbg("%s: USB GSI IN OPS", __func__);
usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
GSI_EP_OP_PREPARE_TRBS);
usb_gsi_ep_op(d_port->in_ep, &d_port->in_request,
@@ -279,6 +280,8 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
gsi_channel_info.ch_req = &d_port->in_request;
usb_gsi_ep_op(d_port->in_ep, (void *)&gsi_channel_info,
GSI_EP_OP_GET_CH_INFO);
+
+ log_event_dbg("%s: USB GSI IN OPS Completed", __func__);
in_params->client =
(gsi->prot_id != IPA_USB_DIAG) ? IPA_CLIENT_USB_CONS :
IPA_CLIENT_USB_DPL_CONS;
@@ -307,6 +310,7 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
gsi_channel_info.depcmd_hi_addr;
if (d_port->out_ep) {
+ log_event_dbg("%s: USB GSI OUT OPS", __func__);
usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
GSI_EP_OP_PREPARE_TRBS);
usb_gsi_ep_op(d_port->out_ep, &d_port->out_request,
@@ -318,7 +322,7 @@ static int ipa_connect_channels(struct gsi_data_port *d_port)
gsi_channel_info.ch_req = &d_port->out_request;
usb_gsi_ep_op(d_port->out_ep, (void *)&gsi_channel_info,
GSI_EP_OP_GET_CH_INFO);
-
+ log_event_dbg("%s: USB GSI OUT OPS Completed", __func__);
out_params->client = IPA_CLIENT_USB_PROD;
out_params->ipa_ep_cfg.mode.mode = IPA_BASIC;
out_params->teth_prot = gsi->prot_id;
@@ -1082,9 +1086,9 @@ static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
list_add_tail(&cpkt->list, &c_port->cpkt_resp_q);
spin_unlock_irqrestore(&c_port->lock, flags);
- ret = gsi_ctrl_send_notification(gsi);
+ if (!gsi_ctrl_send_notification(gsi))
+ c_port->modem_to_host++;
- c_port->modem_to_host++;
log_event_dbg("Exit %zu", count);
return ret ? ret : count;
@@ -1388,33 +1392,18 @@ static int queue_notification_request(struct f_gsi *gsi)
{
int ret;
unsigned long flags;
- struct usb_cdc_notification *event;
- struct gsi_ctrl_pkt *cpkt;
ret = usb_func_ep_queue(&gsi->function, gsi->c_port.notify,
gsi->c_port.notify_req, GFP_ATOMIC);
- if (ret == -ENOTSUPP || (ret < 0 && ret != -EAGAIN)) {
+ if (ret < 0) {
spin_lock_irqsave(&gsi->c_port.lock, flags);
gsi->c_port.notify_req_queued = false;
- /* check if device disconnected while we dropped lock */
- if (atomic_read(&gsi->connected) &&
- !list_empty(&gsi->c_port.cpkt_resp_q)) {
- cpkt = list_first_entry(&gsi->c_port.cpkt_resp_q,
- struct gsi_ctrl_pkt, list);
- list_del(&cpkt->list);
- log_event_err("%s: drop ctrl pkt of len %d error %d",
- __func__, cpkt->len, ret);
- gsi_ctrl_pkt_free(cpkt);
- }
- gsi->c_port.cpkt_drop_cnt++;
spin_unlock_irqrestore(&gsi->c_port.lock, flags);
- } else {
- ret = 0;
- event = gsi->c_port.notify_req->buf;
- log_event_dbg("%s: Queued Notify type %02x", __func__,
- event->bNotificationType);
}
+ log_event_dbg("%s: ret:%d req_queued:%d",
+ __func__, ret, gsi->c_port.notify_req_queued);
+
return ret;
}
@@ -2537,7 +2526,7 @@ static int gsi_bind(struct usb_configuration *c, struct usb_function *f)
/* export host's Ethernet address in CDC format */
random_ether_addr(gsi->d_port.ipa_init_params.device_ethaddr);
random_ether_addr(gsi->d_port.ipa_init_params.host_ethaddr);
- log_event_dbg("setting host_ethaddr=%pKM, device_ethaddr = %pKM",
+ log_event_dbg("setting host_ethaddr=%pM, device_ethaddr = %pM",
gsi->d_port.ipa_init_params.host_ethaddr,
gsi->d_port.ipa_init_params.device_ethaddr);
memcpy(gsi->ethaddr, &gsi->d_port.ipa_init_params.host_ethaddr,
@@ -2669,7 +2658,7 @@ static int gsi_bind(struct usb_configuration *c, struct usb_function *f)
/* export host's Ethernet address in CDC format */
random_ether_addr(gsi->d_port.ipa_init_params.device_ethaddr);
random_ether_addr(gsi->d_port.ipa_init_params.host_ethaddr);
- log_event_dbg("setting host_ethaddr=%pKM, device_ethaddr = %pKM",
+ log_event_dbg("setting host_ethaddr=%pM, device_ethaddr = %pM",
gsi->d_port.ipa_init_params.host_ethaddr,
gsi->d_port.ipa_init_params.device_ethaddr);
@@ -3128,7 +3117,7 @@ MODULE_DESCRIPTION("GSI function driver");
static int fgsi_init(void)
{
ipa_usb_wq = alloc_workqueue("k_ipa_usb",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE, 1);
if (!ipa_usb_wq) {
log_event_err("Failed to create workqueue for IPA");
return -ENOMEM;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 97d86b6ac69b..e309dec68a75 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2289,8 +2289,11 @@ reset:
}
common->running = 0;
- if (!new_fsg || rc)
+ if (!new_fsg || rc) {
+ /* allow usb LPM after eps are disabled */
+ usb_gadget_autopm_put_async(common->gadget);
return rc;
+ }
common->fsg = new_fsg;
fsg = common->fsg;
@@ -2333,6 +2336,9 @@ reset:
bh->outreq->complete = bulk_out_complete;
}
+ /* prevents usb LPM until thread runs to completion */
+ usb_gadget_autopm_get_noresume(common->gadget);
+
common->running = 1;
for (i = 0; i < ARRAY_SIZE(common->luns); ++i)
if (common->luns[i])
diff --git a/drivers/usb/gadget/function/f_qc_ecm.c b/drivers/usb/gadget/function/f_qc_ecm.c
index 005a8d1f8747..d96f727b2da4 100644
--- a/drivers/usb/gadget/function/f_qc_ecm.c
+++ b/drivers/usb/gadget/function/f_qc_ecm.c
@@ -1134,7 +1134,7 @@ ecm_qc_bind_config(struct usb_configuration *c, u8 ethaddr[ETH_ALEN],
if (ecm->xport != USB_GADGET_XPORT_BAM2BAM_IPA)
return status;
- pr_debug("setting ecm_ipa, host_ethaddr=%pKM, device_ethaddr=%pKM",
+ pr_debug("setting ecm_ipa, host_ethaddr=%pM, device_ethaddr=%pM",
ipa_params.host_ethaddr, ipa_params.device_ethaddr);
status = ecm_ipa_init(&ipa_params);
if (status) {
diff --git a/drivers/usb/gadget/function/f_qc_rndis.c b/drivers/usb/gadget/function/f_qc_rndis.c
index baffff7e52e9..ac17d0aa46ae 100644
--- a/drivers/usb/gadget/function/f_qc_rndis.c
+++ b/drivers/usb/gadget/function/f_qc_rndis.c
@@ -1198,7 +1198,7 @@ usb_function *rndis_qc_bind_config_vendor(struct usb_function_instance *fi,
/* export host's Ethernet address in CDC format */
random_ether_addr(rndis_ipa_params.host_ethaddr);
random_ether_addr(rndis_ipa_params.device_ethaddr);
- pr_debug("setting host_ethaddr=%pKM, device_ethaddr=%pKM\n",
+ pr_debug("setting host_ethaddr=%pM, device_ethaddr=%pM\n",
rndis_ipa_params.host_ethaddr,
rndis_ipa_params.device_ethaddr);
rndis_ipa_supported = true;
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 70c95d0df581..81ce22e91883 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -889,7 +889,7 @@ static int get_ether_addr_str(u8 dev_addr[ETH_ALEN], char *str, int len)
if (len < 18)
return -EINVAL;
- snprintf(str, len, "%pKM", dev_addr);
+ snprintf(str, len, "%pM", dev_addr);
return 18;
}
@@ -971,8 +971,8 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
free_netdev(net);
dev = ERR_PTR(status);
} else {
- INFO(dev, "MAC %pKM\n", net->dev_addr);
- INFO(dev, "HOST MAC %pKM\n", dev->host_mac);
+ INFO(dev, "MAC %pM\n", net->dev_addr);
+ INFO(dev, "HOST MAC %pM\n", dev->host_mac);
/*
* two kinds of host-initiated state changes:
@@ -1040,7 +1040,7 @@ int gether_register_netdev(struct net_device *net)
dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
return status;
} else {
- INFO(dev, "HOST MAC %pKM\n", dev->host_mac);
+ INFO(dev, "HOST MAC %pM\n", dev->host_mac);
/* two kinds of host-initiated state changes:
* - iff DATA transfer is active, carrier is "on"
@@ -1056,7 +1056,7 @@ int gether_register_netdev(struct net_device *net)
if (status)
pr_warn("cannot set self ethernet address: %d\n", status);
else
- INFO(dev, "MAC %pKM\n", dev->dev_mac);
+ INFO(dev, "MAC %pM\n", dev->dev_mac);
return status;
}
@@ -1124,7 +1124,7 @@ int gether_get_host_addr_cdc(struct net_device *net, char *host_addr, int len)
return -EINVAL;
dev = netdev_priv(net);
- snprintf(host_addr, len, "%pKm", dev->host_mac);
+ snprintf(host_addr, len, "%pM", dev->host_mac);
return strlen(host_addr);
}
diff --git a/drivers/usb/gadget/function/u_qc_ether.c b/drivers/usb/gadget/function/u_qc_ether.c
index 2fde4850f8cd..bacaf52f42d9 100644
--- a/drivers/usb/gadget/function/u_qc_ether.c
+++ b/drivers/usb/gadget/function/u_qc_ether.c
@@ -317,8 +317,8 @@ int gether_qc_setup_name(struct usb_gadget *g, u8 ethaddr[ETH_ALEN],
dev_dbg(&g->dev, "register_netdev failed, %d\n", status);
free_netdev(net);
} else {
- INFO(dev, "MAC %pKM\n", net->dev_addr);
- INFO(dev, "HOST MAC %pKM\n", dev->host_mac);
+ INFO(dev, "MAC %pM\n", net->dev_addr);
+ INFO(dev, "HOST MAC %pM\n", dev->host_mac);
}
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index 087cd9fecfe9..935bd0778bfb 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -665,7 +665,7 @@ static void phy_msg_received(struct usbpd *pd, enum pd_msg_type type,
rx_msg->type = PD_MSG_HDR_TYPE(header);
rx_msg->len = PD_MSG_HDR_COUNT(header);
- memcpy(&rx_msg->payload, buf, len);
+ memcpy(&rx_msg->payload, buf, min(len, sizeof(rx_msg->payload)));
spin_lock_irqsave(&pd->rx_lock, flags);
list_add_tail(&rx_msg->entry, &pd->rx_q);
@@ -1931,6 +1931,11 @@ static void usbpd_sm(struct work_struct *w)
case PE_SNK_SELECT_CAPABILITY:
if (IS_CTRL(rx_msg, MSG_ACCEPT)) {
+ u32 pdo = pd->received_pdos[pd->requested_pdo - 1];
+ bool same_pps = (pd->selected_pdo == pd->requested_pdo)
+ && (PD_SRC_PDO_TYPE(pdo) ==
+ PD_SRC_PDO_TYPE_AUGMENTED);
+
usbpd_set_state(pd, PE_SNK_TRANSITION_SINK);
/* prepare for voltage increase/decrease */
@@ -1942,11 +1947,12 @@ static void usbpd_sm(struct work_struct *w)
&val);
/*
- * if we are changing voltages, we must lower input
- * current to pSnkStdby (2.5W). Calculate it and set
- * PD_CURRENT_MAX accordingly.
+ * if changing voltages (not within the same PPS PDO),
+ * we must lower input current to pSnkStdby (2.5W).
+ * Calculate it and set PD_CURRENT_MAX accordingly.
*/
- if (pd->requested_voltage != pd->current_voltage) {
+ if (!same_pps &&
+ pd->requested_voltage != pd->current_voltage) {
int mv = max(pd->requested_voltage,
pd->current_voltage) / 1000;
val.intval = (2500000 / mv) * 1000;
diff --git a/drivers/usb/phy/phy-msm-qusb.c b/drivers/usb/phy/phy-msm-qusb.c
index 6430e419fc9c..5867c6c204c9 100644
--- a/drivers/usb/phy/phy-msm-qusb.c
+++ b/drivers/usb/phy/phy-msm-qusb.c
@@ -72,6 +72,7 @@
#define QUSB2PHY_PORT_TUNE2 0x84
#define QUSB2PHY_PORT_TUNE3 0x88
#define QUSB2PHY_PORT_TUNE4 0x8C
+#define QUSB2PHY_PORT_TUNE5 0x90
/* In case Efuse register shows zero, use this value */
#define TUNE2_DEFAULT_HIGH_NIBBLE 0xB
@@ -102,10 +103,27 @@
#define QUSB2PHY_REFCLK_ENABLE BIT(0)
+unsigned int tune1;
+module_param(tune1, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune1, "QUSB PHY TUNE1");
+
unsigned int tune2;
module_param(tune2, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(tune2, "QUSB PHY TUNE2");
+unsigned int tune3;
+module_param(tune3, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune3, "QUSB PHY TUNE3");
+
+unsigned int tune4;
+module_param(tune4, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune4, "QUSB PHY TUNE4");
+
+unsigned int tune5;
+module_param(tune5, uint, S_IRUGO | S_IWUSR);
+MODULE_PARM_DESC(tune5, "QUSB PHY TUNE5");
+
+
struct qusb_phy {
struct usb_phy phy;
void __iomem *base;
@@ -562,13 +580,29 @@ static int qusb_phy_init(struct usb_phy *phy)
qphy->base + QUSB2PHY_PORT_TUNE2);
}
- /* If tune2 modparam set, override tune2 value */
- if (tune2) {
- pr_debug("%s(): (modparam) TUNE2 val:0x%02x\n",
- __func__, tune2);
+ /* If tune modparam set, override tune value */
+
+ pr_debug("%s():userspecified modparams TUNEX val:0x%x %x %x %x %x\n",
+ __func__, tune1, tune2, tune3, tune4, tune5);
+ if (tune1)
+ writel_relaxed(tune1,
+ qphy->base + QUSB2PHY_PORT_TUNE1);
+
+ if (tune2)
writel_relaxed(tune2,
qphy->base + QUSB2PHY_PORT_TUNE2);
- }
+
+ if (tune3)
+ writel_relaxed(tune3,
+ qphy->base + QUSB2PHY_PORT_TUNE3);
+
+ if (tune4)
+ writel_relaxed(tune4,
+ qphy->base + QUSB2PHY_PORT_TUNE4);
+
+ if (tune5)
+ writel_relaxed(tune5,
+ qphy->base + QUSB2PHY_PORT_TUNE5);
/* ensure above writes are completed before re-enabling PHY */
wmb();
diff --git a/drivers/video/fbdev/msm/Kconfig b/drivers/video/fbdev/msm/Kconfig
index ef5c96214c19..03ee89ad0d99 100644
--- a/drivers/video/fbdev/msm/Kconfig
+++ b/drivers/video/fbdev/msm/Kconfig
@@ -63,6 +63,7 @@ config FB_MSM_MDSS_WRITEBACK
config FB_MSM_MDSS_HDMI_PANEL
depends on FB_MSM_MDSS
+ select MSM_EXT_DISPLAY
bool "MDSS HDMI Tx Panel"
default n
---help---
@@ -98,6 +99,7 @@ config FB_MSM_MDSS_DSI_CTRL_STATUS
config FB_MSM_MDSS_DP_PANEL
depends on FB_MSM_MDSS
+ select MSM_EXT_DISPLAY
bool "MDSS DP Panel"
---help---
The MDSS DP Panel provides support for DP host controller driver
diff --git a/drivers/video/fbdev/msm/Makefile b/drivers/video/fbdev/msm/Makefile
index b905c0e855dd..e101b873f361 100644
--- a/drivers/video/fbdev/msm/Makefile
+++ b/drivers/video/fbdev/msm/Makefile
@@ -49,7 +49,6 @@ obj-$(CONFIG_FB_MSM_MDSS_DP_PANEL) += mdss_dp_aux.o
obj-$(CONFIG_FB_MSM_MDSS_DP_PANEL) += mdss_dp_hdcp2p2.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss_io_util.o
-obj-$(CONFIG_FB_MSM_MDSS) += msm_ext_display.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_tx.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_panel.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_hdcp2p2.o
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index 6182aa2c626e..d9a4bd91f3eb 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -27,6 +27,7 @@
#include <linux/msm-bus.h>
#include <linux/file.h>
#include <linux/dma-direction.h>
+#include <soc/qcom/cx_ipeak.h>
#include "mdss_panel.h"
@@ -222,6 +223,7 @@ struct mdss_smmu_client {
struct dss_module_power mp;
struct reg_bus_client *reg_bus_clt;
bool domain_attached;
+ bool domain_reattach;
bool handoff_pending;
void __iomem *mmu_base;
struct list_head _client;
@@ -534,6 +536,7 @@ struct mdss_data_type {
u32 sec_cam_en;
u32 sec_session_cnt;
wait_queue_head_t secure_waitq;
+ struct cx_ipeak_client *mdss_cx_ipeak;
};
extern struct mdss_data_type *mdss_res;
diff --git a/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c b/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c
index 3bcacf945761..5a677dfe7484 100644
--- a/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c
+++ b/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c
@@ -449,7 +449,7 @@ static struct attribute *dp_hdcp2p2_fs_attrs[] = {
};
static struct attribute_group dp_hdcp2p2_fs_attr_group = {
- .name = "dp_hdcp2p2",
+ .name = "hdcp2p2",
.attrs = dp_hdcp2p2_fs_attrs,
};
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index a183fd7cd247..db27842eaccc 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -892,6 +892,12 @@ static ssize_t mdss_fb_get_persist_mode(struct device *dev,
return ret;
}
+static ssize_t mdss_fb_idle_pc_notify(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "idle power collapsed\n");
+}
+
static DEVICE_ATTR(msm_fb_type, S_IRUGO, mdss_fb_get_type, NULL);
static DEVICE_ATTR(msm_fb_split, S_IRUGO | S_IWUSR, mdss_fb_show_split,
mdss_fb_store_split);
@@ -912,6 +918,8 @@ static DEVICE_ATTR(measured_fps, S_IRUGO | S_IWUSR | S_IWGRP,
mdss_fb_get_fps_info, NULL);
static DEVICE_ATTR(msm_fb_persist_mode, S_IRUGO | S_IWUSR,
mdss_fb_get_persist_mode, mdss_fb_change_persist_mode);
+static DEVICE_ATTR(idle_power_collapse, S_IRUGO, mdss_fb_idle_pc_notify, NULL);
+
static struct attribute *mdss_fb_attrs[] = {
&dev_attr_msm_fb_type.attr,
&dev_attr_msm_fb_split.attr,
@@ -925,6 +933,7 @@ static struct attribute *mdss_fb_attrs[] = {
&dev_attr_msm_fb_dfps_mode.attr,
&dev_attr_measured_fps.attr,
&dev_attr_msm_fb_persist_mode.attr,
+ &dev_attr_idle_power_collapse.attr,
NULL,
};
@@ -4276,8 +4285,6 @@ static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
goto buf_sync_err_2;
}
- sync_fence_install(rel_fence, rel_fen_fd);
-
ret = copy_to_user(buf_sync->rel_fen_fd, &rel_fen_fd, sizeof(int));
if (ret) {
pr_err("%s: copy_to_user failed\n", sync_pt_data->fence_name);
@@ -4314,8 +4321,6 @@ static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
goto buf_sync_err_3;
}
- sync_fence_install(retire_fence, retire_fen_fd);
-
ret = copy_to_user(buf_sync->retire_fen_fd, &retire_fen_fd,
sizeof(int));
if (ret) {
@@ -4326,6 +4331,9 @@ static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
goto buf_sync_err_3;
}
+ sync_fence_install(rel_fence, rel_fen_fd);
+ sync_fence_install(retire_fence, retire_fen_fd);
+
skip_retire_fence:
mutex_unlock(&sync_pt_data->sync_mutex);
@@ -4471,7 +4479,7 @@ err:
static int __mdss_fb_copy_destscaler_data(struct fb_info *info,
struct mdp_layer_commit *commit)
{
- int i;
+ int i = 0;
int ret = 0;
u32 data_size;
struct mdp_destination_scaler_data __user *ds_data_user;
@@ -4544,6 +4552,7 @@ static int __mdss_fb_copy_destscaler_data(struct fb_info *info,
data_size);
if (ret) {
pr_err("scale data copy from user failed\n");
+ kfree(scale_data);
goto err;
}
}
@@ -4553,7 +4562,7 @@ static int __mdss_fb_copy_destscaler_data(struct fb_info *info,
err:
if (ds_data) {
- for (i = 0; i < commit->commit_v1.dest_scaler_cnt; i++) {
+ for (i--; i >= 0; i--) {
scale_data = to_user_ptr(ds_data[i].scale);
kfree(scale_data);
}
@@ -5177,3 +5186,16 @@ void mdss_fb_calc_fps(struct msm_fb_data_type *mfd)
mfd->fps_info.frame_count = 0;
}
}
+
+void mdss_fb_idle_pc(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+ if (mdss_fb_is_power_off(mfd))
+ return;
+
+ if ((mfd->panel_info->type == MIPI_CMD_PANEL) && mdp5_data) {
+ pr_debug("Notify fb%d idle power collapsed\n", mfd->index);
+ sysfs_notify(&mfd->fbi->dev->kobj, NULL, "idle_power_collapse");
+ }
+}
diff --git a/drivers/video/fbdev/msm/mdss_fb.h b/drivers/video/fbdev/msm/mdss_fb.h
index 111d7cfc7c9a..d64580a35775 100644
--- a/drivers/video/fbdev/msm/mdss_fb.h
+++ b/drivers/video/fbdev/msm/mdss_fb.h
@@ -468,4 +468,5 @@ void mdss_fb_report_panel_dead(struct msm_fb_data_type *mfd);
void mdss_panelinfo_to_fb_var(struct mdss_panel_info *pinfo,
struct fb_var_screeninfo *var);
void mdss_fb_calc_fps(struct msm_fb_data_type *mfd);
+void mdss_fb_idle_pc(struct msm_fb_data_type *mfd);
#endif /* MDSS_FB_H */
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index d8d11f21f3b2..3cadfa4ecef7 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1173,6 +1173,31 @@ irqreturn_t mdss_mdp_isr(int irq, void *ptr)
return IRQ_HANDLED;
}
+static void mdss_mdp_cxipeak_vote(bool set_vote, unsigned long new_rate,
+ unsigned long prev_rate)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int ret = 0;
+
+ if (!mdata->mdss_cx_ipeak)
+ return;
+
+ /* fmax threshold for mdp in sdm660 is max MDP clk */
+ if (set_vote) {
+ if ((new_rate >= mdata->max_mdp_clk_rate) &&
+ (prev_rate < mdata->max_mdp_clk_rate))
+ ret = cx_ipeak_update(mdata->mdss_cx_ipeak, true);
+ } else {
+ if ((new_rate < mdata->max_mdp_clk_rate) &&
+ (prev_rate >= mdata->max_mdp_clk_rate))
+ ret = cx_ipeak_update(mdata->mdss_cx_ipeak, false);
+ }
+ if (ret) {
+ pr_err("cxipeak api fail ret:%d set_vote :%d new_rate:%lu prev_rate:%lu\n",
+ ret, (int)set_vote, new_rate, prev_rate);
+ }
+}
+
static int mdss_mdp_clk_update(u32 clk_idx, u32 enable)
{
int ret = -ENODEV;
@@ -1234,7 +1259,7 @@ void mdss_mdp_set_clk_rate(unsigned long rate, bool locked)
struct mdss_data_type *mdata = mdss_res;
unsigned long clk_rate;
struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
- unsigned long min_clk_rate;
+ unsigned long min_clk_rate, curr_clk_rate;
min_clk_rate = max(rate, mdata->perf_tune.min_mdp_clk);
@@ -1246,15 +1271,20 @@ void mdss_mdp_set_clk_rate(unsigned long rate, bool locked)
clk_rate = clk_round_rate(clk, min_clk_rate);
else
clk_rate = mdata->max_mdp_clk_rate;
+
+ curr_clk_rate = clk_get_rate(clk);
if (IS_ERR_VALUE(clk_rate)) {
pr_err("unable to round rate err=%ld\n", clk_rate);
- } else if (clk_rate != clk_get_rate(clk)) {
-
+ } else if (clk_rate != curr_clk_rate) {
+ mdss_mdp_cxipeak_vote(true, clk_rate, curr_clk_rate);
mdata->mdp_clk_rate = clk_rate;
- if (IS_ERR_VALUE(clk_set_rate(clk, clk_rate)))
+ if (IS_ERR_VALUE(clk_set_rate(clk, clk_rate))) {
pr_err("clk_set_rate failed\n");
- else
+ } else {
+ mdss_mdp_cxipeak_vote(false, clk_rate,
+ curr_clk_rate);
pr_debug("mdp clk rate=%lu\n", clk_rate);
+ }
}
if (!locked)
mutex_unlock(&mdp_clk_lock);
@@ -1355,6 +1385,68 @@ static inline void __mdss_mdp_reg_access_clk_enable(
}
}
+/*
+ * __mdss_mdp_clk_control - Overall MDSS clock control for power on/off
+ */
+static void __mdss_mdp_clk_control(struct mdss_data_type *mdata, bool enable)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ if (enable) {
+ pm_runtime_get_sync(&mdata->pdev->dev);
+
+ mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+ VOTE_INDEX_LOW);
+
+ rc = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE(rc))
+ pr_err("IOMMU attach failed\n");
+
+ /* Active+Sleep */
+ msm_bus_scale_client_update_context(mdata->bus_hdl,
+ false, mdata->curr_bw_uc_idx);
+
+ spin_lock_irqsave(&mdp_lock, flags);
+ mdata->clk_ena = enable;
+ spin_unlock_irqrestore(&mdp_lock, flags);
+
+ mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, 1);
+ mdss_mdp_clk_update(MDSS_CLK_AHB, 1);
+ mdss_mdp_clk_update(MDSS_CLK_AXI, 1);
+ mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 1);
+ mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, 1);
+ if (mdata->vsync_ena)
+ mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, 1);
+ } else {
+ spin_lock_irqsave(&mdp_lock, flags);
+ mdata->clk_ena = enable;
+ spin_unlock_irqrestore(&mdp_lock, flags);
+
+ if (mdata->vsync_ena)
+ mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, 0);
+
+ mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, 0);
+ mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 0);
+ mdss_mdp_clk_update(MDSS_CLK_AXI, 0);
+ mdss_mdp_clk_update(MDSS_CLK_AHB, 0);
+ mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, 0);
+
+ /* release iommu control */
+ mdss_iommu_ctrl(0);
+
+ /* Active-Only */
+ msm_bus_scale_client_update_context(mdata->bus_hdl,
+ true, mdata->ao_bw_uc_idx);
+
+ mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+ VOTE_INDEX_DISABLE);
+
+ pm_runtime_mark_last_busy(&mdata->pdev->dev);
+ pm_runtime_put_autosuspend(&mdata->pdev->dev);
+ }
+}
+
int __mdss_mdp_vbif_halt(struct mdss_data_type *mdata, bool is_nrt)
{
int rc = 0;
@@ -1646,9 +1738,7 @@ void mdss_mdp_clk_ctrl(int enable)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
static int mdp_clk_cnt;
- unsigned long flags;
int changed = 0;
- int rc = 0;
mutex_lock(&mdp_clk_lock);
if (enable) {
@@ -1672,49 +1762,8 @@ void mdss_mdp_clk_ctrl(int enable)
__builtin_return_address(0), current->group_leader->comm,
mdata->bus_ref_cnt, changed, enable);
- if (changed) {
- if (enable) {
- pm_runtime_get_sync(&mdata->pdev->dev);
-
- mdss_update_reg_bus_vote(mdata->reg_bus_clt,
- VOTE_INDEX_LOW);
-
- rc = mdss_iommu_ctrl(1);
- if (IS_ERR_VALUE(rc))
- pr_err("IOMMU attach failed\n");
-
- /* Active+Sleep */
- msm_bus_scale_client_update_context(mdata->bus_hdl,
- false, mdata->curr_bw_uc_idx);
- }
-
- spin_lock_irqsave(&mdp_lock, flags);
- mdata->clk_ena = enable;
- spin_unlock_irqrestore(&mdp_lock, flags);
-
- mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, enable);
- mdss_mdp_clk_update(MDSS_CLK_AHB, enable);
- mdss_mdp_clk_update(MDSS_CLK_AXI, enable);
- mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable);
- mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable);
- if (mdata->vsync_ena)
- mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
-
- if (!enable) {
- /* release iommu control */
- mdss_iommu_ctrl(0);
-
- /* Active-Only */
- msm_bus_scale_client_update_context(mdata->bus_hdl,
- true, mdata->ao_bw_uc_idx);
-
- mdss_update_reg_bus_vote(mdata->reg_bus_clt,
- VOTE_INDEX_DISABLE);
-
- pm_runtime_mark_last_busy(&mdata->pdev->dev);
- pm_runtime_put_autosuspend(&mdata->pdev->dev);
- }
- }
+ if (changed)
+ __mdss_mdp_clk_control(mdata, enable);
if (enable && changed)
mdss_mdp_idle_pc_restore();
@@ -4519,6 +4568,10 @@ static int mdss_mdp_parse_dt_misc(struct platform_device *pdev)
pr_debug("max pipe width not specified. Using default value\n");
mdata->max_pipe_width = DEFAULT_MDP_PIPE_WIDTH;
}
+
+ if (of_find_property(pdev->dev.of_node, "qcom,mdss-cx-ipeak", NULL))
+ mdata->mdss_cx_ipeak = cx_ipeak_register(pdev->dev.of_node,
+ "qcom,mdss-cx-ipeak");
return 0;
}
@@ -5092,6 +5145,22 @@ vreg_set_voltage_fail:
}
/**
+ * mdss_mdp_notify_idle_pc() - Notify fb driver of idle power collapse
+ * @mdata: MDP private data
+ *
+ * This function is called if there are active overlays.
+ */
+static void mdss_mdp_notify_idle_pc(struct mdss_data_type *mdata)
+{
+ int i;
+
+ for (i = 0; i < mdata->nctl; i++)
+ if ((mdata->ctl_off[i].ref_cnt) &&
+ !mdss_mdp_ctl_is_power_off(&mdata->ctl_off[i]))
+ mdss_fb_idle_pc(mdata->ctl_off[i].mfd);
+}
+
+/**
* mdss_mdp_footswitch_ctrl() - Disable/enable MDSS GDSC and CX/Batfet rails
* @mdata: MDP private data
* @on: 1 to turn on footswitch, 0 to turn off footswitch
@@ -5155,6 +5224,7 @@ static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on)
mdss_mdp_memory_retention_ctrl(MEM_RETAIN_ON,
PERIPH_RETAIN_OFF);
mdata->idle_pc = true;
+ mdss_mdp_notify_idle_pc(mdata);
pr_debug("idle pc. active overlays=%d\n",
active_cnt);
} else {
@@ -5487,6 +5557,8 @@ static int mdss_mdp_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
mdss_mdp_pp_term(&pdev->dev);
mdss_mdp_bus_scale_unregister(mdata);
+ if (mdata->mdss_cx_ipeak)
+ cx_ipeak_unregister(mdata->mdss_cx_ipeak);
mdss_debugfs_remove(mdata);
if (mdata->regulator_notif_register)
regulator_unregister_notifier(mdata->fs, &(mdata->gdsc_cb));
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index 5e98de043e55..36a866685f21 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -1842,7 +1842,7 @@ int mdss_mdp_calib_mode(struct msm_fb_data_type *mfd,
int mdss_mdp_pipe_handoff(struct mdss_mdp_pipe *pipe);
int mdss_mdp_smp_handoff(struct mdss_data_type *mdata);
struct mdss_mdp_pipe *mdss_mdp_pipe_alloc(struct mdss_mdp_mixer *mixer,
- u32 type, struct mdss_mdp_pipe *left_blend_pipe);
+ u32 off, u32 type, struct mdss_mdp_pipe *left_blend_pipe);
struct mdss_mdp_pipe *mdss_mdp_pipe_get(u32 ndx,
enum mdss_mdp_pipe_rect rect_num);
struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index ec37cd1d5bb0..7b0207de101a 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -742,7 +742,8 @@ int mdss_mdp_get_pipe_overlap_bw(struct mdss_mdp_pipe *pipe,
*quota = fps * src.w * src_h;
- if (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420)
+ if (pipe->src_fmt->chroma_sample == MDSS_MDP_CHROMA_420 &&
+ pipe->src_fmt->format != MDP_Y_CBCR_H2V2_TP10_UBWC)
/*
* with decimation, chroma is not downsampled, this means we
* need to allocate bw for extra lines that will be fetched
@@ -891,7 +892,7 @@ static u32 __calc_prefill_line_time_us(struct mdss_mdp_ctl *ctl)
static u32 __get_min_prefill_line_time_us(struct mdss_mdp_ctl *ctl)
{
- u32 vbp_min = 0;
+ u32 vbp_min = UINT_MAX;
int i;
struct mdss_data_type *mdata;
@@ -913,6 +914,9 @@ static u32 __get_min_prefill_line_time_us(struct mdss_mdp_ctl *ctl)
}
}
+ if (vbp_min == UINT_MAX)
+ vbp_min = 0;
+
return vbp_min;
}
@@ -4769,6 +4773,8 @@ static void __mdss_mdp_mixer_get_offsets(u32 mixer_num,
static inline int __mdss_mdp_mixer_get_hw_num(struct mdss_mdp_mixer *mixer)
{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+
/*
* mapping to hardware expectation of actual mixer programming to
* happen on following registers:
@@ -4776,6 +4782,11 @@ static inline int __mdss_mdp_mixer_get_hw_num(struct mdss_mdp_mixer *mixer)
* WB: 3, 4
* With some exceptions on certain revisions
*/
+
+ if (mdata->mdp_rev == MDSS_MDP_HW_REV_330
+ && mixer->num == MDSS_MDP_INTF_LAYERMIXER1)
+ return MDSS_MDP_INTF_LAYERMIXER2;
+
if (mixer->type == MDSS_MDP_MIXER_TYPE_WRITEBACK) {
u32 wb_offset;
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
index c97a58c86c29..dfd6226ce602 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_cmd.c
@@ -2659,10 +2659,11 @@ static void mdss_mdp_cmd_wait4_autorefresh_done(struct mdss_mdp_ctl *ctl)
struct mdss_mdp_cmd_ctx *ctx = ctl->intf_ctx[MASTER_CTX];
unsigned long flags;
unsigned long autorefresh_timeout;
+ u32 timeout_us = 20000;
line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
- MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h);
+ MDSS_XLOG(ctl->num, line_out, ctl->mixer_left->roi.h, 0x111);
reinit_completion(&ctx->autorefresh_done);
@@ -2721,6 +2722,26 @@ static void mdss_mdp_cmd_wait4_autorefresh_done(struct mdss_mdp_ctl *ctl)
"dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
"dbg_bus", "vbif_dbg_bus", "panic");
}
+
+ /* once autorefresh is done, wait for write pointer */
+ line_out = mdss_mdp_pingpong_read(pp_base, MDSS_MDP_REG_PP_LINE_COUNT);
+ MDSS_XLOG(ctl->num, line_out, 0x222);
+
+ /* wait until the first line is out to make sure transfer is on-going */
+ rc = readl_poll_timeout(pp_base +
+ MDSS_MDP_REG_PP_LINE_COUNT, val,
+ (val & 0xffff) >= 1, 10, timeout_us);
+
+ if (rc) {
+ pr_err("timed out waiting for line out ctl:%d val:0x%x\n",
+ ctl->num, val);
+ MDSS_XLOG(0xbad5, val);
+ MDSS_XLOG_TOUT_HANDLER("mdp", "dsi0_ctrl", "dsi0_phy",
+ "dsi1_ctrl", "dsi1_phy", "vbif", "vbif_nrt",
+ "dbg_bus", "vbif_dbg_bus", "panic");
+ }
+
+ MDSS_XLOG(val, 0x333);
}
/* caller needs to hold autorefresh_lock before calling this function */
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
index 663d63092ebf..5173567a3420 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -1896,7 +1896,6 @@ static int mdss_mdp_video_ctx_setup(struct mdss_mdp_ctl *ctl,
struct mdss_mdp_format_params *fmt;
struct mdss_data_type *mdata = ctl->mdata;
struct dsc_desc *dsc = NULL;
- u32 hdmi_dp_core;
ctx->ctl = ctl;
ctx->intf_type = ctl->intf_type;
@@ -2033,10 +2032,19 @@ static int mdss_mdp_video_ctx_setup(struct mdss_mdp_ctl *ctl,
mdp_video_write(ctx, MDSS_MDP_REG_INTF_PANEL_FORMAT, ctl->dst_format);
- hdmi_dp_core = (ctx->intf_type == MDSS_INTF_EDP) ? 1 : 0;
-
- writel_relaxed(hdmi_dp_core, mdata->mdp_base +
+ /* select HDMI or DP core usage */
+ switch (ctx->intf_type) {
+ case MDSS_INTF_EDP:
+ writel_relaxed(0x1, mdata->mdp_base +
+ MDSS_MDP_HDMI_DP_CORE_SELECT);
+ break;
+ case MDSS_INTF_HDMI:
+ writel_relaxed(0x0, mdata->mdp_base +
MDSS_MDP_HDMI_DP_CORE_SELECT);
+ break;
+ default:
+ break;
+ }
return 0;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index 1a9b09ca6988..5d80c80ebcef 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -609,6 +609,7 @@ int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
bool is_vig_needed = false;
u32 left_lm_w = left_lm_w_from_mfd(mfd);
u32 flags = 0;
+ u32 off = 0;
if (mdp5_data->ctl == NULL)
return -ENODEV;
@@ -692,18 +693,29 @@ int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
break;
case PIPE_TYPE_AUTO:
default:
- if (req->flags & MDP_OV_PIPE_FORCE_DMA)
+ if (req->flags & MDP_OV_PIPE_FORCE_DMA) {
pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
- else if (fmt->is_yuv ||
+ /*
+ * For paths using legacy API's for pipe
+ * allocation, use offset of 2 for allocating
+ * right pipe for pipe type DMA. This is
+ * because from SDM 3.x.x. onwards one DMA
+ * pipe has two instances for multirect.
+ */
+ off = (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
+ ? 2 : 0;
+ } else if (fmt->is_yuv ||
(req->flags & MDP_OV_PIPE_SHARE) ||
- is_vig_needed)
+ is_vig_needed) {
pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
- else
+ } else {
pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
+ }
break;
}
- pipe = mdss_mdp_pipe_alloc(mixer, pipe_type, left_blend_pipe);
+ pipe = mdss_mdp_pipe_alloc(mixer, off,
+ pipe_type, left_blend_pipe);
/* RGB pipes can be used instead of DMA */
if (IS_ERR_OR_NULL(pipe) &&
@@ -712,7 +724,7 @@ int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
pr_debug("giving RGB pipe for fb%d. flags:0x%x\n",
mfd->index, req->flags);
pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
- pipe = mdss_mdp_pipe_alloc(mixer, pipe_type,
+ pipe = mdss_mdp_pipe_alloc(mixer, off, pipe_type,
left_blend_pipe);
}
@@ -723,7 +735,7 @@ int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
pr_debug("giving ViG pipe for fb%d. flags:0x%x\n",
mfd->index, req->flags);
pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
- pipe = mdss_mdp_pipe_alloc(mixer, pipe_type,
+ pipe = mdss_mdp_pipe_alloc(mixer, off, pipe_type,
left_blend_pipe);
}
@@ -2765,6 +2777,7 @@ static int mdss_mdp_overlay_get_fb_pipe(struct msm_fb_data_type *mfd,
bpp = fbi->var.bits_per_pixel / 8;
req->id = MSMFB_NEW_REQUEST;
+ req->flags |= MDP_OV_PIPE_FORCE_DMA;
req->src.format = mfd->fb_imgType;
req->src.height = fbi->var.yres;
req->src.width = fbi->fix.line_length / bpp;
@@ -2878,7 +2891,7 @@ static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
MDSS_MDP_MIXER_MUX_LEFT, &l_pipe_allocated);
if (ret) {
pr_err("unable to allocate base pipe\n");
- goto iommu_disable;
+ goto pipe_release;
}
if (mdss_mdp_pipe_map(l_pipe)) {
@@ -2889,20 +2902,20 @@ static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
ret = mdss_mdp_overlay_start(mfd);
if (ret) {
pr_err("unable to start overlay %d (%d)\n", mfd->index, ret);
- goto clk_disable;
+ goto pipe_release;
}
ret = mdss_iommu_ctrl(1);
if (IS_ERR_VALUE(ret)) {
pr_err("IOMMU attach failed\n");
- goto clk_disable;
+ goto iommu_disable;
}
buf_l = __mdp_overlay_buf_alloc(mfd, l_pipe);
if (!buf_l) {
pr_err("unable to allocate memory for fb buffer\n");
mdss_mdp_pipe_unmap(l_pipe);
- goto pipe_release;
+ goto iommu_disable;
}
buf_l->p[0].srcp_table = mfd->fb_table;
@@ -2925,19 +2938,19 @@ static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
MDSS_MDP_MIXER_MUX_RIGHT, &r_pipe_allocated);
if (ret) {
pr_err("unable to allocate right base pipe\n");
- goto pipe_release;
+ goto iommu_disable;
}
if (mdss_mdp_pipe_map(r_pipe)) {
pr_err("unable to map right base pipe\n");
- goto pipe_release;
+ goto iommu_disable;
}
buf_r = __mdp_overlay_buf_alloc(mfd, r_pipe);
if (!buf_r) {
pr_err("unable to allocate memory for fb buffer\n");
mdss_mdp_pipe_unmap(r_pipe);
- goto pipe_release;
+ goto iommu_disable;
}
buf_r->p[0] = buf_l->p[0];
@@ -2955,6 +2968,9 @@ static void mdss_mdp_overlay_pan_display(struct msm_fb_data_type *mfd)
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
return;
+iommu_disable:
+ mdss_iommu_ctrl(0);
+
pipe_release:
if (r_pipe_allocated)
mdss_mdp_overlay_release(mfd, r_pipe->ndx);
@@ -2962,8 +2978,6 @@ pipe_release:
__mdp_overlay_buf_free(mfd, buf_l);
if (l_pipe_allocated)
mdss_mdp_overlay_release(mfd, l_pipe->ndx);
-iommu_disable:
- mdss_iommu_ctrl(0);
clk_disable:
mdss_mdp_clk_ctrl(MDP_BLOCK_POWER_OFF);
mutex_unlock(&mdp5_data->ov_lock);
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pipe.c b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
index 965a6533dfcb..8d7bd60318ad 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pipe.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
@@ -1250,11 +1250,12 @@ cursor_done:
}
struct mdss_mdp_pipe *mdss_mdp_pipe_alloc(struct mdss_mdp_mixer *mixer,
- u32 type, struct mdss_mdp_pipe *left_blend_pipe)
+ u32 off, u32 type, struct mdss_mdp_pipe *left_blend_pipe)
{
struct mdss_mdp_pipe *pipe;
+
mutex_lock(&mdss_mdp_sspp_lock);
- pipe = mdss_mdp_pipe_init(mixer, type, 0, left_blend_pipe);
+ pipe = mdss_mdp_pipe_init(mixer, type, off, left_blend_pipe);
mutex_unlock(&mdss_mdp_sspp_lock);
return pipe;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pp.c b/drivers/video/fbdev/msm/mdss_mdp_pp.c
index 38bff6b8edee..1fe8fe6f7be8 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pp.c
@@ -5319,8 +5319,8 @@ static int pp_hist_collect(struct mdp_histogram_data *hist,
pr_err("failed to get the hist data, sum = %d\n", sum);
ret = sum;
} else if (expect_sum && sum != expect_sum) {
- pr_err("hist error: bin sum incorrect! (%d/%d)\n",
- sum, expect_sum);
+ pr_err_ratelimited("hist error: bin sum incorrect! (%d/%d)\n",
+ sum, expect_sum);
ret = -EINVAL;
}
hist_collect_exit:
@@ -5390,8 +5390,8 @@ int mdss_mdp_hist_collect(struct mdp_histogram_data *hist)
ret = pp_hist_collect(hist, hists[i], ctl_base,
exp_sum, DSPP);
if (ret)
- pr_err("hist error: dspp[%d] collect %d\n",
- dspp_num, ret);
+ pr_err_ratelimited("hist error: dspp[%d] collect %d\n",
+ dspp_num, ret);
}
/* state of dspp histogram blocks attached to logical display
* should be changed atomically to idle. This will ensure that
diff --git a/drivers/video/fbdev/msm/mdss_smmu.c b/drivers/video/fbdev/msm/mdss_smmu.c
index 6930444118b6..62e25500060e 100644
--- a/drivers/video/fbdev/msm/mdss_smmu.c
+++ b/drivers/video/fbdev/msm/mdss_smmu.c
@@ -299,7 +299,6 @@ static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
for (i = 0; i < MDSS_IOMMU_MAX_DOMAIN; i++) {
if (!mdss_smmu_is_valid_domain_type(mdata, i))
continue;
-
mdss_smmu = mdss_smmu_get_cb(i);
if (mdss_smmu && mdss_smmu->base.dev) {
if (!mdss_smmu->handoff_pending) {
@@ -326,6 +325,14 @@ static int mdss_smmu_attach_v2(struct mdss_data_type *mdata)
goto err;
}
mdss_smmu->domain_attached = true;
+ if (mdss_smmu->domain_reattach) {
+ pr_debug("iommu v2 domain[%i] remove extra vote\n",
+ i);
+ /* remove extra power vote */
+ mdss_smmu_enable_power(mdss_smmu,
+ false);
+ mdss_smmu->domain_reattach = false;
+ }
pr_debug("iommu v2 domain[%i] attached\n", i);
}
} else {
@@ -379,6 +386,11 @@ static int mdss_smmu_detach_v2(struct mdss_data_type *mdata)
*/
arm_iommu_detach_device(mdss_smmu->base.dev);
mdss_smmu->domain_attached = false;
+ /*
+ * since we are leaving clocks on, on
+ * re-attach do not vote for clocks
+ */
+ mdss_smmu->domain_reattach = true;
pr_debug("iommu v2 domain[%i] detached\n", i);
} else {
mdss_smmu_enable_power(mdss_smmu, false);
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 56f7e2521202..8959b320d472 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -30,6 +30,7 @@
#include <linux/balloon_compaction.h>
#include <linux/oom.h>
#include <linux/wait.h>
+#include <linux/mount.h>
/*
* Balloon device works in 4K page units. So each page is pointed to by
@@ -45,6 +46,10 @@ static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES;
module_param(oom_pages, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(oom_pages, "pages to free on OOM");
+#ifdef CONFIG_BALLOON_COMPACTION
+static struct vfsmount *balloon_mnt;
+#endif
+
struct virtio_balloon {
struct virtio_device *vdev;
struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
@@ -486,6 +491,24 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
return MIGRATEPAGE_SUCCESS;
}
+
+static struct dentry *balloon_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ static const struct dentry_operations ops = {
+ .d_dname = simple_dname,
+ };
+
+ return mount_pseudo(fs_type, "balloon-kvm:", NULL, &ops,
+ BALLOON_KVM_MAGIC);
+}
+
+static struct file_system_type balloon_fs = {
+ .name = "balloon-kvm",
+ .mount = balloon_mount,
+ .kill_sb = kill_anon_super,
+};
+
#endif /* CONFIG_BALLOON_COMPACTION */
static int virtballoon_probe(struct virtio_device *vdev)
@@ -513,9 +536,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
vb->need_stats_update = 0;
balloon_devinfo_init(&vb->vb_dev_info);
-#ifdef CONFIG_BALLOON_COMPACTION
- vb->vb_dev_info.migratepage = virtballoon_migratepage;
-#endif
err = init_vqs(vb);
if (err)
@@ -525,7 +545,27 @@ static int virtballoon_probe(struct virtio_device *vdev)
vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY;
err = register_oom_notifier(&vb->nb);
if (err < 0)
- goto out_oom_notify;
+ goto out_del_vqs;
+
+#ifdef CONFIG_BALLOON_COMPACTION
+ balloon_mnt = kern_mount(&balloon_fs);
+ if (IS_ERR(balloon_mnt)) {
+ err = PTR_ERR(balloon_mnt);
+ unregister_oom_notifier(&vb->nb);
+ goto out_del_vqs;
+ }
+
+ vb->vb_dev_info.migratepage = virtballoon_migratepage;
+ vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
+ if (IS_ERR(vb->vb_dev_info.inode)) {
+ err = PTR_ERR(vb->vb_dev_info.inode);
+ kern_unmount(balloon_mnt);
+ unregister_oom_notifier(&vb->nb);
+ vb->vb_dev_info.inode = NULL;
+ goto out_del_vqs;
+ }
+ vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
+#endif
virtio_device_ready(vdev);
@@ -539,7 +579,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
out_del_vqs:
unregister_oom_notifier(&vb->nb);
-out_oom_notify:
vdev->config->del_vqs(vdev);
out_free_vb:
kfree(vb);
@@ -567,6 +606,8 @@ static void virtballoon_remove(struct virtio_device *vdev)
unregister_oom_notifier(&vb->nb);
kthread_stop(vb->thread);
remove_common(vb);
+ if (vb->vb_dev_info.inode)
+ iput(vb->vb_dev_info.inode);
kfree(vb);
}
diff --git a/fs/aio.c b/fs/aio.c
index 81761a4290d5..24493f07fb40 100644
--- a/fs/aio.c
+++ b/fs/aio.c
@@ -256,6 +256,7 @@ static int __init aio_setup(void)
aio_mnt = kern_mount(&aio_fs);
if (IS_ERR(aio_mnt))
panic("Failed to create aio fs mount.");
+ aio_mnt->mnt_flags |= MNT_NOEXEC;
kiocb_cachep = KMEM_CACHE(aio_kiocb, SLAB_HWCACHE_ALIGN|SLAB_PANIC);
kioctx_cachep = KMEM_CACHE(kioctx,SLAB_HWCACHE_ALIGN|SLAB_PANIC);
diff --git a/fs/block_dev.c b/fs/block_dev.c
index 44d4a1e9244e..ac9b7553c02a 100644
--- a/fs/block_dev.c
+++ b/fs/block_dev.c
@@ -1806,6 +1806,7 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
spin_lock(&blockdev_superblock->s_inode_list_lock);
list_for_each_entry(inode, &blockdev_superblock->s_inodes, i_sb_list) {
struct address_space *mapping = inode->i_mapping;
+ struct block_device *bdev;
spin_lock(&inode->i_lock);
if (inode->i_state & (I_FREEING|I_WILL_FREE|I_NEW) ||
@@ -1826,8 +1827,12 @@ void iterate_bdevs(void (*func)(struct block_device *, void *), void *arg)
*/
iput(old_inode);
old_inode = inode;
+ bdev = I_BDEV(inode);
- func(I_BDEV(inode), arg);
+ mutex_lock(&bdev->bd_mutex);
+ if (bdev->bd_openers)
+ func(bdev, arg);
+ mutex_unlock(&bdev->bd_mutex);
spin_lock(&blockdev_superblock->s_inode_list_lock);
}
diff --git a/fs/proc/base.c b/fs/proc/base.c
index fed79c45b54c..67c6dd28d59e 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -1018,15 +1018,20 @@ static ssize_t oom_adj_read(struct file *file, char __user *buf, size_t count,
int oom_adj = OOM_ADJUST_MIN;
size_t len;
unsigned long flags;
+ int mult = 1;
if (!task)
return -ESRCH;
if (lock_task_sighand(task, &flags)) {
- if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX)
+ if (task->signal->oom_score_adj == OOM_SCORE_ADJ_MAX) {
oom_adj = OOM_ADJUST_MAX;
- else
- oom_adj = (task->signal->oom_score_adj * -OOM_DISABLE) /
- OOM_SCORE_ADJ_MAX;
+ } else {
+ if (task->signal->oom_score_adj < 0)
+ mult = -1;
+ oom_adj = roundup(mult * task->signal->oom_score_adj *
+ -OOM_DISABLE, OOM_SCORE_ADJ_MAX) /
+ OOM_SCORE_ADJ_MAX * mult;
+ }
unlock_task_sighand(task, &flags);
}
put_task_struct(task);
diff --git a/include/dt-bindings/clock/msm-clocks-8998.h b/include/dt-bindings/clock/msm-clocks-8998.h
index 42617016188d..cd36374a04a7 100644
--- a/include/dt-bindings/clock/msm-clocks-8998.h
+++ b/include/dt-bindings/clock/msm-clocks-8998.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -105,7 +105,6 @@
#define clk_gcc_gpu_gpll0_div_clk 0x07d16c6a
#define clk_gpll4 0xb3b5d85b
#define clk_gpll4_out_main 0xa9a0ab9d
-#define clk_hmss_ahb_clk_src 0xaec8450f
#define clk_usb30_master_clk_src 0xc6262f89
#define clk_pcie_aux_clk_src 0xebc50566
#define clk_ufs_axi_clk_src 0x297ca380
@@ -201,7 +200,6 @@
#define clk_gcc_gpu_bimc_gfx_clk 0x3909459b
#define clk_gcc_gpu_cfg_ahb_clk 0x72f20a57
#define clk_gcc_gpu_iref_clk 0xfd82abad
-#define clk_gcc_hmss_ahb_clk 0x62818713
#define clk_gcc_hmss_dvm_bus_clk 0x17cc8b53
#define clk_gcc_hmss_rbcpr_clk 0x699183be
#define clk_hmss_gpll0_clk_src 0x17eb05d0
diff --git a/include/dt-bindings/clock/msm-clocks-hwio-8998.h b/include/dt-bindings/clock/msm-clocks-hwio-8998.h
index 8dfa36362e8c..fc42ac3d49a0 100644
--- a/include/dt-bindings/clock/msm-clocks-hwio-8998.h
+++ b/include/dt-bindings/clock/msm-clocks-hwio-8998.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -66,7 +66,6 @@
#define CLOCK_FRQ_MEASURE_STATUS 0x62008
#define GCC_GPLL0_MODE 0x00000
#define GCC_GPLL4_MODE 0x77000
-#define GCC_HMSS_AHB_CMD_RCGR 0x48014
#define GCC_USB30_MASTER_CMD_RCGR 0x0F014
#define GCC_PCIE_AUX_CMD_RCGR 0x6C000
#define GCC_UFS_AXI_CMD_RCGR 0x75018
@@ -169,7 +168,6 @@
#define GCC_GPU_BIMC_GFX_CBCR 0x71010
#define GCC_GPU_CFG_AHB_CBCR 0x71004
#define GCC_GPU_IREF_EN 0x88010
-#define GCC_HMSS_AHB_CBCR 0x48000
#define GCC_HMSS_DVM_BUS_CBCR 0x4808C
#define GCC_HMSS_RBCPR_CBCR 0x48008
#define GCC_MMSS_SYS_NOC_AXI_CBCR 0x09000
diff --git a/include/dt-bindings/clock/qcom,gcc-sdm660.h b/include/dt-bindings/clock/qcom,gcc-sdm660.h
index cd5b78e59c5b..b622a662daa8 100644
--- a/include/dt-bindings/clock/qcom,gcc-sdm660.h
+++ b/include/dt-bindings/clock/qcom,gcc-sdm660.h
@@ -80,7 +80,6 @@
#define GCC_GPU_CFG_AHB_CLK 66
#define GCC_GPU_GPLL0_CLK 67
#define GCC_GPU_GPLL0_DIV_CLK 68
-#define GCC_HMSS_AHB_CLK 70
#define GCC_HMSS_DVM_BUS_CLK 71
#define GCC_HMSS_RBCPR_CLK 72
#define GCC_MMSS_GPLL0_CLK 73
@@ -169,7 +168,6 @@
#define GPLL6_OUT_MAIN 157
#define GPLL6_OUT_TEST 158
#define HLOS1_VOTE_LPASS_ADSP_SMMU_CLK 159
-#define HMSS_AHB_CLK_SRC 160
#define HMSS_GPLL0_CLK_SRC 161
#define HMSS_GPLL4_CLK_SRC 162
#define HMSS_RBCPR_CLK_SRC 163
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 9b0a15d06a4f..79542b2698ec 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -48,6 +48,7 @@
#include <linux/migrate.h>
#include <linux/gfp.h>
#include <linux/err.h>
+#include <linux/fs.h>
/*
* Balloon device information descriptor.
@@ -62,6 +63,7 @@ struct balloon_dev_info {
struct list_head pages; /* Pages enqueued & handled to Host */
int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
struct page *page, enum migrate_mode mode);
+ struct inode *inode;
};
extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
@@ -73,45 +75,19 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
spin_lock_init(&balloon->pages_lock);
INIT_LIST_HEAD(&balloon->pages);
balloon->migratepage = NULL;
+ balloon->inode = NULL;
}
#ifdef CONFIG_BALLOON_COMPACTION
-extern bool balloon_page_isolate(struct page *page);
+extern const struct address_space_operations balloon_aops;
+extern bool balloon_page_isolate(struct page *page,
+ isolate_mode_t mode);
extern void balloon_page_putback(struct page *page);
-extern int balloon_page_migrate(struct page *newpage,
+extern int balloon_page_migrate(struct address_space *mapping,
+ struct page *newpage,
struct page *page, enum migrate_mode mode);
/*
- * __is_movable_balloon_page - helper to perform @page PageBalloon tests
- */
-static inline bool __is_movable_balloon_page(struct page *page)
-{
- return PageBalloon(page);
-}
-
-/*
- * balloon_page_movable - test PageBalloon to identify balloon pages
- * and PagePrivate to check that the page is not
- * isolated and can be moved by compaction/migration.
- *
- * As we might return false positives in the case of a balloon page being just
- * released under us, this need to be re-tested later, under the page lock.
- */
-static inline bool balloon_page_movable(struct page *page)
-{
- return PageBalloon(page) && PagePrivate(page);
-}
-
-/*
- * isolated_balloon_page - identify an isolated balloon page on private
- * compaction/migration page lists.
- */
-static inline bool isolated_balloon_page(struct page *page)
-{
- return PageBalloon(page);
-}
-
-/*
* balloon_page_insert - insert a page into the balloon's page list and make
* the page->private assignment accordingly.
* @balloon : pointer to balloon device
@@ -124,7 +100,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
__SetPageBalloon(page);
- SetPagePrivate(page);
+ __SetPageMovable(page, balloon->inode->i_mapping);
set_page_private(page, (unsigned long)balloon);
list_add(&page->lru, &balloon->pages);
}
@@ -140,11 +116,14 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
static inline void balloon_page_delete(struct page *page)
{
__ClearPageBalloon(page);
+ __ClearPageMovable(page);
set_page_private(page, 0);
- if (PagePrivate(page)) {
- ClearPagePrivate(page);
+ /*
+ * No touch page.lru field once @page has been isolated
+ * because VM is using the field.
+ */
+ if (!PageIsolated(page))
list_del(&page->lru);
- }
}
/*
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 4cd4ddf64cc7..e864751d870a 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -52,6 +52,10 @@ extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
extern bool compaction_restarting(struct zone *zone, int order);
+extern int kcompactd_run(int nid);
+extern void kcompactd_stop(int nid);
+extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
+
#else
static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
unsigned int order, int alloc_flags,
@@ -84,9 +88,22 @@ static inline bool compaction_deferred(struct zone *zone, int order)
return true;
}
+static inline int kcompactd_run(int nid)
+{
+ return 0;
+}
+static inline void kcompactd_stop(int nid)
+{
+}
+
+static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
+{
+}
+
#endif /* CONFIG_COMPACTION */
#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
+struct node;
extern int compaction_register_node(struct node *node);
extern void compaction_unregister_node(struct node *node);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 94edbb64f1c6..db16b3fd48c0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -404,6 +404,8 @@ struct address_space_operations {
*/
int (*migratepage) (struct address_space *,
struct page *, struct page *, enum migrate_mode);
+ bool (*isolate_page)(struct page *, isolate_mode_t);
+ void (*putback_page)(struct page *);
int (*launder_page) (struct page *);
int (*is_partially_uptodate) (struct page *, unsigned long,
unsigned long);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 1b3f20e8fb74..c4c25651ff21 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -136,6 +136,7 @@ enum iommu_attr {
DOMAIN_ATTR_EARLY_MAP,
DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
+ DOMAIN_ATTR_ENABLE_TTBR1,
DOMAIN_ATTR_MAX,
};
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 7ae216a39c9e..481c8c4627ca 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -43,8 +43,7 @@ static inline struct stable_node *page_stable_node(struct page *page)
static inline void set_page_stable_node(struct page *page,
struct stable_node *stable_node)
{
- page->mapping = (void *)stable_node +
- (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+ page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
}
/*
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index cac1c0904d5f..5219df44cfec 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -33,6 +33,8 @@ extern int migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode);
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason);
+extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+extern void putback_movable_page(struct page *page);
extern int migrate_prep(void);
extern int migrate_prep_local(void);
@@ -65,6 +67,21 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
+#ifdef CONFIG_COMPACTION
+extern int PageMovable(struct page *page);
+extern void __SetPageMovable(struct page *page, struct address_space *mapping);
+extern void __ClearPageMovable(struct page *page);
+#else
+static inline int PageMovable(struct page *page) { return 0; };
+static inline void __SetPageMovable(struct page *page,
+ struct address_space *mapping)
+{
+}
+static inline void __ClearPageMovable(struct page *page)
+{
+}
+#endif
+
#ifdef CONFIG_NUMA_BALANCING
extern bool pmd_trans_migrating(pmd_t pmd);
extern int migrate_misplaced_page(struct page *page,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 97b11c9fd48a..87a4bb8ed512 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -995,6 +995,7 @@ static inline int page_mapped(struct page *page)
{
return atomic_read(&(page)->_mapcount) >= 0;
}
+struct address_space *page_mapping(struct page *page);
/*
* Return true only if the page has been allocated with
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ddb3b927de39..45037a3a8e8e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -689,6 +689,12 @@ typedef struct pglist_data {
mem_hotplug_begin/end() */
int kswapd_max_order;
enum zone_type classzone_idx;
+#ifdef CONFIG_COMPACTION
+ int kcompactd_max_order;
+ enum zone_type kcompactd_classzone_idx;
+ wait_queue_head_t kcompactd_wait;
+ struct task_struct *kcompactd;
+#endif
#ifdef CONFIG_NUMA_BALANCING
/* Lock serializing the migrate rate limiting window */
spinlock_t numabalancing_migrate_lock;
diff --git a/include/linux/msm_ext_display.h b/include/linux/msm_ext_display.h
index 4378080da0d9..d9831d7cbb4e 100644
--- a/include/linux/msm_ext_display.h
+++ b/include/linux/msm_ext_display.h
@@ -109,6 +109,7 @@ struct msm_ext_disp_intf_ops {
* @get_audio_edid_blk: retrieve audio edid block
* @cable_status: cable connected/disconnected
* @get_intf_id: id of connected interface
+ * @acknowledge: acknowledge audio status
*/
struct msm_ext_disp_audio_codec_ops {
int (*audio_info_setup)(struct platform_device *pdev,
@@ -118,6 +119,7 @@ struct msm_ext_disp_audio_codec_ops {
int (*cable_status)(struct platform_device *pdev, u32 vote);
int (*get_intf_id)(struct platform_device *pdev);
void (*teardown_done)(struct platform_device *pdev);
+ int (*acknowledge)(struct platform_device *pdev, u32 ack);
};
/*
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index 6037fbf00a23..d4b4cc7f8737 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -1053,6 +1053,18 @@ int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size);
void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
unsigned long *size);
+/**
+ * gsi_halt_channel_ee - Peripheral should call this function
+ * to stop other EE's channel. This is usually used in SSR clean
+ *
+ * @chan_idx: Virtual channel index
+ * @ee: EE
+ * @code: [out] response code for operation
+
+ * @Return gsi_status
+ */
+int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
+
/*
* Here is a typical sequence of calls
*
@@ -1250,5 +1262,11 @@ static inline void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
unsigned long *size)
{
}
+
+static inline int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee,
+ int *code)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
#endif
#endif
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index 68cfe76e8652..5991485cdea4 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -3,11 +3,32 @@
#include <uapi/linux/msm_kgsl.h>
+#ifdef CONFIG_QCOM_KGSL
/* Limits mitigations APIs */
void *kgsl_pwr_limits_add(enum kgsl_deviceid id);
void kgsl_pwr_limits_del(void *limit);
int kgsl_pwr_limits_set_freq(void *limit, unsigned int freq);
void kgsl_pwr_limits_set_default(void *limit);
unsigned int kgsl_pwr_limits_get_freq(enum kgsl_deviceid id);
+#else
+static inline void *kgsl_pwr_limits_add(enum kgsl_deviceid id)
+{
+ return NULL;
+}
+
+static inline void kgsl_pwr_limits_del(void *limit) { }
+
+static inline int kgsl_pwr_limits_set_freq(void *limit, unsigned int freq)
+{
+ return -EINVAL;
+}
+
+static inline void kgsl_pwr_limits_set_default(void *limit) { }
+
+static inline unsigned int kgsl_pwr_limits_get_freq(enum kgsl_deviceid id)
+{
+ return 0;
+}
+#endif
#endif /* _MSM_KGSL_H */
diff --git a/include/linux/msm_mhi.h b/include/linux/msm_mhi.h
index b8b2226940a4..f8ba31ea7573 100644
--- a/include/linux/msm_mhi.h
+++ b/include/linux/msm_mhi.h
@@ -63,9 +63,10 @@ enum MHI_CLIENT_CHANNEL {
MHI_CLIENT_RESERVED_1_UPPER = 99,
MHI_CLIENT_IP_HW_0_OUT = 100,
MHI_CLIENT_IP_HW_0_IN = 101,
- MHI_CLIENT_RESERVED_2_LOWER = 102,
+ MHI_CLIENT_IP_HW_ADPL_IN = 102,
+ MHI_CLIENT_RESERVED_2_LOWER = 103,
MHI_CLIENT_RESERVED_2_UPPER = 127,
- MHI_MAX_CHANNELS = 102
+ MHI_MAX_CHANNELS = 103
};
enum MHI_CB_REASON {
@@ -214,7 +215,7 @@ int mhi_get_max_desc(struct mhi_client_handle *client_handle);
/* RmNET Reserved APIs, This APIs are reserved for use by the linux network
* stack only. Use by other clients will introduce system wide issues
*/
-int mhi_set_lpm(struct mhi_client_handle *client_handle, int enable_lpm);
+int mhi_set_lpm(struct mhi_client_handle *client_handle, bool enable_lpm);
int mhi_get_epid(struct mhi_client_handle *mhi_handle);
struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle);
void mhi_mask_irq(struct mhi_client_handle *client_handle);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 4b115168607f..86c233be1cfc 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -132,6 +132,9 @@ enum pageflags {
/* SLOB */
PG_slob_free = PG_private,
+
+ /* non-lru isolated movable page */
+ PG_isolated = PG_reclaim,
};
#ifndef __GENERATING_BOUNDS_H
@@ -309,25 +312,38 @@ PAGEFLAG(Idle, idle)
* with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
*
* On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
- * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
- * and then page->mapping points, not to an anon_vma, but to a private
+ * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
+ * bit; and then page->mapping points, not to an anon_vma, but to a private
* structure which KSM associates with that merged page. See ksm.h.
*
- * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
+ * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
+ * page and then page->mapping points a struct address_space.
*
* Please note that, confusingly, "page_mapping" refers to the inode
* address_space which maps the page from disk; whereas "page_mapped"
* refers to user virtual address space into which the page is mapped.
*/
-#define PAGE_MAPPING_ANON 1
-#define PAGE_MAPPING_KSM 2
-#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
+#define PAGE_MAPPING_ANON 0x1
+#define PAGE_MAPPING_MOVABLE 0x2
+#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
+#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
+
+static __always_inline int PageMappingFlags(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
+}
static inline int PageAnon(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
}
+static __always_inline int __PageMovable(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ PAGE_MAPPING_MOVABLE;
+}
+
#ifdef CONFIG_KSM
/*
* A KSM page is one of those write-protected "shared pages" or "merged pages"
@@ -338,7 +354,7 @@ static inline int PageAnon(struct page *page)
static inline int PageKsm(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
- (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+ PAGE_MAPPING_KSM;
}
#else
TESTPAGEFLAG_FALSE(Ksm)
@@ -557,6 +573,8 @@ static inline void __ClearPageBalloon(struct page *page)
atomic_set(&page->_mapcount, -1);
}
+__PAGEFLAG(Isolated, isolated);
+
/*
* If network-based swap is enabled, sl*b must keep track of whether pages
* were allocated from pfmemalloc reserves.
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index 457d862cb9a8..1effc355d7d0 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -106,9 +106,10 @@ enum {
};
enum {
- POWER_SUPPLY_PARALLEL_NONE,
- POWER_SUPPLY_PARALLEL_USBIN_USBIN,
- POWER_SUPPLY_PARALLEL_MID_MID,
+ POWER_SUPPLY_PL_NONE,
+ POWER_SUPPLY_PL_USBIN_USBIN,
+ POWER_SUPPLY_PL_USBIN_USBIN_EXT,
+ POWER_SUPPLY_PL_USBMID_USBMID,
};
enum power_supply_property {
diff --git a/include/linux/qpnp/qpnp-pbs.h b/include/linux/qpnp/qpnp-pbs.h
new file mode 100644
index 000000000000..39497ac4b552
--- /dev/null
+++ b/include/linux/qpnp/qpnp-pbs.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QPNP_PBS_H
+#define _QPNP_PBS_H
+
+#ifdef CONFIG_QPNP_PBS
+int qpnp_pbs_trigger_event(struct device_node *dev_node, u8 bitmap);
+#else
+static inline int qpnp_pbs_trigger_event(struct device_node *dev_node,
+ u8 bitmap) {
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/include/linux/regulator/qpnp-labibb-regulator.h b/include/linux/regulator/qpnp-labibb-regulator.h
new file mode 100644
index 000000000000..247069507fd9
--- /dev/null
+++ b/include/linux/regulator/qpnp-labibb-regulator.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QPNP_LABIBB_REGULATOR_H
+#define _QPNP_LABIBB_REGULATOR_H
+
+enum labibb_notify_event {
+ LAB_VREG_OK = 1,
+};
+
+int qpnp_labibb_notifier_register(struct notifier_block *nb);
+int qpnp_labibb_notifier_unregister(struct notifier_block *nb);
+
+#endif
diff --git a/include/linux/sde_io_util.h b/include/linux/sde_io_util.h
new file mode 100644
index 000000000000..6bd5c168ecd8
--- /dev/null
+++ b/include/linux/sde_io_util.h
@@ -0,0 +1,113 @@
+/* Copyright (c) 2012, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_IO_UTIL_H__
+#define __SDE_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+#ifdef DEBUG
+#define DEV_DBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define DEV_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+#define DEV_INFO(fmt, args...) pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...) pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...) pr_err(fmt, ##args)
+
+struct dss_io_data {
+ u32 len;
+ void __iomem *base;
+};
+
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug);
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug);
+void dss_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug);
+
+#define DSS_REG_W_ND(io, offset, val) dss_reg_w(io, offset, val, false)
+#define DSS_REG_W(io, offset, val) dss_reg_w(io, offset, val, true)
+#define DSS_REG_R_ND(io, offset) dss_reg_r(io, offset, false)
+#define DSS_REG_R(io, offset) dss_reg_r(io, offset, true)
+
+enum dss_vreg_type {
+ DSS_REG_LDO,
+ DSS_REG_VS,
+};
+
+struct dss_vreg {
+ struct regulator *vreg; /* vreg handle */
+ char vreg_name[32];
+ int min_voltage;
+ int max_voltage;
+ int enable_load;
+ int disable_load;
+ int pre_on_sleep;
+ int post_on_sleep;
+ int pre_off_sleep;
+ int post_off_sleep;
+};
+
+struct dss_gpio {
+ unsigned int gpio;
+ unsigned int value;
+ char gpio_name[32];
+};
+
+enum dss_clk_type {
+ DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+ DSS_CLK_PCLK,
+ DSS_CLK_OTHER,
+};
+
+struct dss_clk {
+ struct clk *clk; /* clk handle */
+ char clk_name[32];
+ enum dss_clk_type type;
+ unsigned long rate;
+ unsigned long max_rate;
+};
+
+struct dss_module_power {
+ unsigned int num_vreg;
+ struct dss_vreg *vreg_config;
+ unsigned int num_gpio;
+ struct dss_gpio *gpio_config;
+ unsigned int num_clk;
+ struct dss_clk *clk_config;
+};
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+ struct dss_io_data *io_data, const char *name);
+void msm_dss_iounmap(struct dss_io_data *io_data);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable);
+int msm_dss_gpio_enable(struct dss_gpio *in_gpio, int num_gpio, int enable);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+ int num_vreg, int config);
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+
+int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *read_buf);
+int sde_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *value);
+
+#endif /* __SDE_IO_UTIL_H__ */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index b90f8f5c663d..4d2cf47aba27 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -178,6 +178,7 @@ struct sensor_info {
struct work_struct work;
struct task_struct *sysfs_notify_thread;
struct completion sysfs_notify_complete;
+ bool deregister_active;
};
/**
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 2a03a062a395..1534086e16d0 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -52,6 +52,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PGPGOUTCLEAN, PSWPIN, PSWPOUT,
COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED,
COMPACTISOLATED,
COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
+ KCOMPACTD_WAKE,
#endif
#ifdef CONFIG_HUGETLB_PAGE
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 34eb16098a33..57a8e98f2708 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -41,10 +41,10 @@ struct zs_pool_stats {
struct zs_pool;
-struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
+struct zs_pool *zs_create_pool(const char *name);
void zs_destroy_pool(struct zs_pool *pool);
-unsigned long zs_malloc(struct zs_pool *pool, size_t size);
+unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
void zs_free(struct zs_pool *pool, unsigned long obj);
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index c92d1e1cbad9..223450aeb49e 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -350,6 +350,61 @@ DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_reset,
);
#endif
+TRACE_EVENT(mm_compaction_kcompactd_sleep,
+
+ TP_PROTO(int nid),
+
+ TP_ARGS(nid),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ ),
+
+ TP_printk("nid=%d", __entry->nid)
+);
+
+DECLARE_EVENT_CLASS(kcompactd_wake_template,
+
+ TP_PROTO(int nid, int order, enum zone_type classzone_idx),
+
+ TP_ARGS(nid, order, classzone_idx),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(int, order)
+ __field(enum zone_type, classzone_idx)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->order = order;
+ __entry->classzone_idx = classzone_idx;
+ ),
+
+ TP_printk("nid=%d order=%d classzone_idx=%-8s",
+ __entry->nid,
+ __entry->order,
+ __print_symbolic(__entry->classzone_idx, ZONE_TYPE))
+);
+
+DEFINE_EVENT(kcompactd_wake_template, mm_compaction_wakeup_kcompactd,
+
+ TP_PROTO(int nid, int order, enum zone_type classzone_idx),
+
+ TP_ARGS(nid, order, classzone_idx)
+);
+
+DEFINE_EVENT(kcompactd_wake_template, mm_compaction_kcompactd_wake,
+
+ TP_PROTO(int nid, int order, enum zone_type classzone_idx),
+
+ TP_ARGS(nid, order, classzone_idx)
+);
+
#endif /* _TRACE_COMPACTION_H */
/* This part must be outside protection */
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index fd1be42188cd..d2f19ac6f536 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -40,6 +40,15 @@
#define MSM_PIPE_2D1 0x02
#define MSM_PIPE_3D0 0x10
+/* The pipe-id just uses the lower bits, so can be OR'd with flags in
+ * the upper 16 bits (which could be extended further, if needed, maybe
+ * we extend/overload the pipe-id some day to deal with multiple rings,
+ * but even then I don't think we need the full lower 16 bits).
+ */
+#define MSM_PIPE_ID_MASK 0xffff
+#define MSM_PIPE_ID(x) ((x) & MSM_PIPE_ID_MASK)
+#define MSM_PIPE_FLAGS(x) ((x) & ~MSM_PIPE_ID_MASK)
+
/* timeouts are specified in clock-monotonic absolute times (to simplify
* restarting interrupted ioctls). The following struct is logically the
* same as 'struct timespec' but 32/64b ABI safe.
@@ -52,6 +61,9 @@ struct drm_msm_timespec {
#define MSM_PARAM_GPU_ID 0x01
#define MSM_PARAM_GMEM_SIZE 0x02
#define MSM_PARAM_CHIP_ID 0x03
+#define MSM_PARAM_MAX_FREQ 0x04
+#define MSM_PARAM_TIMESTAMP 0x05
+#define MSM_PARAM_GMEM_BASE 0x06
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
@@ -65,6 +77,7 @@ struct drm_msm_param {
#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
#define MSM_BO_GPU_READONLY 0x00000002
+#define MSM_BO_PRIVILEGED 0x00000004
#define MSM_BO_CACHE_MASK 0x000f0000
/* cache modes */
#define MSM_BO_CACHED 0x00010000
@@ -83,10 +96,14 @@ struct drm_msm_gem_new {
__u32 handle; /* out */
};
+#define MSM_INFO_IOVA 0x01
+
+#define MSM_INFO_FLAGS (MSM_INFO_IOVA)
+
struct drm_msm_gem_info {
__u32 handle; /* in */
- __u32 pad;
- __u64 offset; /* out, offset to pass to mmap() */
+ __u32 flags; /* in - combination of MSM_INFO_* flags */
+ __u64 offset; /* out, mmap() offset or iova */
};
#define MSM_PREP_READ 0x01
@@ -171,12 +188,18 @@ struct drm_msm_gem_submit_bo {
__u64 presumed; /* in/out, presumed buffer address */
};
+/* Valid submit ioctl flags: */
+#define MSM_SUBMIT_RING_MASK 0x000F0000
+#define MSM_SUBMIT_RING_SHIFT 16
+
+#define MSM_SUBMIT_FLAGS (MSM_SUBMIT_RING_MASK)
+
/* Each cmdstream submit consists of a table of buffers involved, and
* one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
*/
struct drm_msm_gem_submit {
- __u32 pipe; /* in, MSM_PIPE_x */
+ __u32 flags; /* MSM_PIPE_x | MSM_SUBMIT_x */
__u32 fence; /* out */
__u32 nr_bos; /* in, number of submit_bo's */
__u32 nr_cmds; /* in, number of submit_cmd's */
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index cfb5c406f344..0bdfe727d6e0 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -78,5 +78,7 @@
#define BTRFS_TEST_MAGIC 0x73727279
#define NSFS_MAGIC 0x6e736673
#define BPF_FS_MAGIC 0xcafe4a11
+#define BALLOON_KVM_MAGIC 0x13661366
+#define ZSMALLOC_MAGIC 0x58295829
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index e04ccf0b6e8b..e050bc758b3b 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -70,7 +70,7 @@ struct snd_compr_tstamp {
__u32 pcm_frames;
__u32 pcm_io_frames;
__u32 sampling_rate;
- uint64_t timestamp;
+ __u64 timestamp;
} __attribute__((packed, aligned(4)));
/**
@@ -128,8 +128,8 @@ struct snd_compr_codec_caps {
* @reserved: reserved for furture use
*/
struct snd_compr_audio_info {
- uint32_t frame_size;
- uint32_t reserved[15];
+ __u32 frame_size;
+ __u32 reserved[15];
} __attribute__((packed, aligned(4)));
/**
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 133b412eabc7..1e3accddd103 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -277,9 +277,6 @@ static ssize_t show_global_state(const struct cluster_data *state, char *buf)
for_each_possible_cpu(cpu) {
c = &per_cpu(cpu_state, cpu);
- if (!c->cluster)
- continue;
-
cluster = c->cluster;
if (!cluster || !cluster->inited)
continue;
@@ -301,6 +298,9 @@ static ssize_t show_global_state(const struct cluster_data *state, char *buf)
count += snprintf(buf + count, PAGE_SIZE - count,
"\tIs busy: %u\n", c->is_busy);
count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tNot preferred: %u\n",
+ c->not_preferred);
+ count += snprintf(buf + count, PAGE_SIZE - count,
"\tNr running: %u\n", cluster->nrrun);
count += snprintf(buf + count, PAGE_SIZE - count,
"\tActive CPUs: %u\n", get_active_cpu_count(cluster));
@@ -323,13 +323,14 @@ static ssize_t store_not_preferred(struct cluster_data *state,
int ret;
ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
- if (ret != 1 && ret != state->num_cpus)
+ if (ret != state->num_cpus)
return -EINVAL;
- i = 0;
spin_lock_irqsave(&state_lock, flags);
- list_for_each_entry(c, &state->lru, sib)
- c->not_preferred = val[i++];
+ for (i = 0; i < state->num_cpus; i++) {
+ c = &per_cpu(cpu_state, i + state->first_cpu);
+ c->not_preferred = val[i];
+ }
spin_unlock_irqrestore(&state_lock, flags);
return count;
@@ -340,11 +341,14 @@ static ssize_t show_not_preferred(const struct cluster_data *state, char *buf)
struct cpu_data *c;
ssize_t count = 0;
unsigned long flags;
+ int i;
spin_lock_irqsave(&state_lock, flags);
- list_for_each_entry(c, &state->lru, sib)
- count += snprintf(buf + count, PAGE_SIZE - count,
- "\tCPU:%d %u\n", c->cpu, c->not_preferred);
+ for (i = 0; i < state->num_cpus; i++) {
+ c = &per_cpu(cpu_state, i + state->first_cpu);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "CPU#%d: %u\n", c->cpu, c->not_preferred);
+ }
spin_unlock_irqrestore(&state_lock, flags);
return count;
diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c
index 1d0f1a1ac44c..069aa1aa82b6 100644
--- a/kernel/trace/trace.c
+++ b/kernel/trace/trace.c
@@ -1357,11 +1357,11 @@ void tracing_reset_all_online_cpus(void)
#define SAVED_CMDLINES_DEFAULT 128
#define NO_CMDLINE_MAP UINT_MAX
-static unsigned saved_tgids[SAVED_CMDLINES_DEFAULT];
static arch_spinlock_t trace_cmdline_lock = __ARCH_SPIN_LOCK_UNLOCKED;
struct saved_cmdlines_buffer {
unsigned map_pid_to_cmdline[PID_MAX_DEFAULT+1];
unsigned *map_cmdline_to_pid;
+ unsigned *saved_tgids;
unsigned cmdline_num;
int cmdline_idx;
char *saved_cmdlines;
@@ -1395,12 +1395,22 @@ static int allocate_cmdlines_buffer(unsigned int val,
return -ENOMEM;
}
+ s->saved_tgids = kmalloc_array(val, sizeof(*s->saved_tgids),
+ GFP_KERNEL);
+ if (!s->saved_tgids) {
+ kfree(s->map_cmdline_to_pid);
+ kfree(s->saved_cmdlines);
+ return -ENOMEM;
+ }
+
s->cmdline_idx = 0;
s->cmdline_num = val;
memset(&s->map_pid_to_cmdline, NO_CMDLINE_MAP,
sizeof(s->map_pid_to_cmdline));
memset(s->map_cmdline_to_pid, NO_CMDLINE_MAP,
val * sizeof(*s->map_cmdline_to_pid));
+ memset(s->saved_tgids, 0,
+ val * sizeof(*s->saved_tgids));
return 0;
}
@@ -1596,7 +1606,7 @@ static int trace_save_cmdline(struct task_struct *tsk)
}
set_cmdline(idx, tsk->comm);
- saved_tgids[idx] = tsk->tgid;
+ savedcmd->saved_tgids[idx] = tsk->tgid;
arch_spin_unlock(&trace_cmdline_lock);
return 1;
@@ -1648,7 +1658,7 @@ int trace_find_tgid(int pid)
arch_spin_lock(&trace_cmdline_lock);
map = savedcmd->map_pid_to_cmdline[pid];
if (map != NO_CMDLINE_MAP)
- tgid = saved_tgids[map];
+ tgid = savedcmd->saved_tgids[map];
else
tgid = -1;
@@ -4221,13 +4231,13 @@ tracing_saved_tgids_read(struct file *file, char __user *ubuf,
int pid;
int i;
- file_buf = kmalloc(SAVED_CMDLINES_DEFAULT*(16+1+16), GFP_KERNEL);
+ file_buf = kmalloc(savedcmd->cmdline_num*(16+1+16), GFP_KERNEL);
if (!file_buf)
return -ENOMEM;
buf = file_buf;
- for (i = 0; i < SAVED_CMDLINES_DEFAULT; i++) {
+ for (i = 0; i < savedcmd->cmdline_num; i++) {
int tgid;
int r;
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index 300117f1a08f..6c563a4846c4 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -70,7 +70,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
*/
if (trylock_page(page)) {
#ifdef CONFIG_BALLOON_COMPACTION
- if (!PagePrivate(page)) {
+ if (PageIsolated(page)) {
/* raced with isolation */
unlock_page(page);
continue;
@@ -106,110 +106,50 @@ EXPORT_SYMBOL_GPL(balloon_page_dequeue);
#ifdef CONFIG_BALLOON_COMPACTION
-static inline void __isolate_balloon_page(struct page *page)
+bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
+
{
struct balloon_dev_info *b_dev_info = balloon_page_device(page);
unsigned long flags;
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
- ClearPagePrivate(page);
list_del(&page->lru);
b_dev_info->isolated_pages++;
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+
+ return true;
}
-static inline void __putback_balloon_page(struct page *page)
+void balloon_page_putback(struct page *page)
{
struct balloon_dev_info *b_dev_info = balloon_page_device(page);
unsigned long flags;
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
- SetPagePrivate(page);
list_add(&page->lru, &b_dev_info->pages);
b_dev_info->isolated_pages--;
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
}
-/* __isolate_lru_page() counterpart for a ballooned page */
-bool balloon_page_isolate(struct page *page)
-{
- /*
- * Avoid burning cycles with pages that are yet under __free_pages(),
- * or just got freed under us.
- *
- * In case we 'win' a race for a balloon page being freed under us and
- * raise its refcount preventing __free_pages() from doing its job
- * the put_page() at the end of this block will take care of
- * release this page, thus avoiding a nasty leakage.
- */
- if (likely(get_page_unless_zero(page))) {
- /*
- * As balloon pages are not isolated from LRU lists, concurrent
- * compaction threads can race against page migration functions
- * as well as race against the balloon driver releasing a page.
- *
- * In order to avoid having an already isolated balloon page
- * being (wrongly) re-isolated while it is under migration,
- * or to avoid attempting to isolate pages being released by
- * the balloon driver, lets be sure we have the page lock
- * before proceeding with the balloon page isolation steps.
- */
- if (likely(trylock_page(page))) {
- /*
- * A ballooned page, by default, has PagePrivate set.
- * Prevent concurrent compaction threads from isolating
- * an already isolated balloon page by clearing it.
- */
- if (balloon_page_movable(page)) {
- __isolate_balloon_page(page);
- unlock_page(page);
- return true;
- }
- unlock_page(page);
- }
- put_page(page);
- }
- return false;
-}
-
-/* putback_lru_page() counterpart for a ballooned page */
-void balloon_page_putback(struct page *page)
-{
- /*
- * 'lock_page()' stabilizes the page and prevents races against
- * concurrent isolation threads attempting to re-isolate it.
- */
- lock_page(page);
-
- if (__is_movable_balloon_page(page)) {
- __putback_balloon_page(page);
- /* drop the extra ref count taken for page isolation */
- put_page(page);
- } else {
- WARN_ON(1);
- dump_page(page, "not movable balloon page");
- }
- unlock_page(page);
-}
/* move_to_new_page() counterpart for a ballooned page */
-int balloon_page_migrate(struct page *newpage,
- struct page *page, enum migrate_mode mode)
+int balloon_page_migrate(struct address_space *mapping,
+ struct page *newpage, struct page *page,
+ enum migrate_mode mode)
{
struct balloon_dev_info *balloon = balloon_page_device(page);
- int rc = -EAGAIN;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
- if (WARN_ON(!__is_movable_balloon_page(page))) {
- dump_page(page, "not movable balloon page");
- return rc;
- }
+ return balloon->migratepage(balloon, newpage, page, mode);
+}
- if (balloon && balloon->migratepage)
- rc = balloon->migratepage(balloon, newpage, page, mode);
+const struct address_space_operations balloon_aops = {
+ .migratepage = balloon_page_migrate,
+ .isolate_page = balloon_page_isolate,
+ .putback_page = balloon_page_putback,
+};
+EXPORT_SYMBOL_GPL(balloon_aops);
- return rc;
-}
#endif /* CONFIG_BALLOON_COMPACTION */
diff --git a/mm/compaction.c b/mm/compaction.c
index b4c33357f96e..87ed50835881 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -7,6 +7,7 @@
*
* Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
*/
+#include <linux/cpu.h>
#include <linux/swap.h>
#include <linux/migrate.h>
#include <linux/compaction.h>
@@ -14,9 +15,10 @@
#include <linux/backing-dev.h>
#include <linux/sysctl.h>
#include <linux/sysfs.h>
-#include <linux/balloon_compaction.h>
#include <linux/page-isolation.h>
#include <linux/kasan.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include "internal.h"
#ifdef CONFIG_COMPACTION
@@ -116,6 +118,44 @@ static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
#ifdef CONFIG_COMPACTION
+int PageMovable(struct page *page)
+{
+ struct address_space *mapping;
+
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ if (!__PageMovable(page))
+ return 0;
+
+ mapping = page_mapping(page);
+ if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(PageMovable);
+
+void __SetPageMovable(struct page *page, struct address_space *mapping)
+{
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
+ page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
+}
+EXPORT_SYMBOL(__SetPageMovable);
+
+void __ClearPageMovable(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(!PageMovable(page), page);
+ /*
+ * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
+ * flag so that VM can catch up released page by driver after isolation.
+ * With it, VM migration doesn't try to put it back.
+ */
+ page->mapping = (void *)((unsigned long)page->mapping &
+ PAGE_MAPPING_MOVABLE);
+}
+EXPORT_SYMBOL(__ClearPageMovable);
+
/* Do not skip compaction more than 64 times */
#define COMPACT_MAX_DEFER_SHIFT 6
@@ -717,7 +757,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/* Time to isolate some pages for migration */
for (; low_pfn < end_pfn; low_pfn++) {
- bool is_lru;
/*
* Periodically drop the lock (if held) regardless of its
@@ -758,21 +797,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
}
/*
- * Check may be lockless but that's ok as we recheck later.
- * It's possible to migrate LRU pages and balloon pages
- * Skip any other type of page
- */
- is_lru = PageLRU(page);
- if (!is_lru) {
- if (unlikely(balloon_page_movable(page))) {
- if (balloon_page_isolate(page)) {
- /* Successfully isolated */
- goto isolate_success;
- }
- }
- }
-
- /*
* Regardless of being on LRU, compound pages such as THP and
* hugetlbfs are not to be compacted. We can potentially save
* a lot of iterations if we skip them at once. The check is
@@ -788,8 +812,30 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
continue;
}
- if (!is_lru)
+ /*
+ * Check may be lockless but that's ok as we recheck later.
+ * It's possible to migrate LRU and non-lru movable pages.
+ * Skip any other type of page
+ */
+ if (!PageLRU(page)) {
+ /*
+ * __PageMovable can return false positive so we need
+ * to verify it under page_lock.
+ */
+ if (unlikely(__PageMovable(page)) &&
+ !PageIsolated(page)) {
+ if (locked) {
+ spin_unlock_irqrestore(&zone->lru_lock,
+ flags);
+ locked = false;
+ }
+
+ if (isolate_movable_page(page, isolate_mode))
+ goto isolate_success;
+ }
+
continue;
+ }
/*
* Migration will fail if an anonymous page is pinned in memory,
@@ -1223,11 +1269,11 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc,
/*
* Mark that the PG_migrate_skip information should be cleared
- * by kswapd when it goes to sleep. kswapd does not set the
+ * by kswapd when it goes to sleep. kcompactd does not set the
* flag itself as the decision to be clear should be directly
* based on an allocation request.
*/
- if (!current_is_kswapd())
+ if (cc->direct_compaction)
zone->compact_blockskip_flush = true;
return COMPACT_COMPLETE;
@@ -1370,10 +1416,9 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
/*
* Clear pageblock skip if there were failures recently and compaction
- * is about to be retried after being deferred. kswapd does not do
- * this reset as it'll reset the cached information when going to sleep.
+ * is about to be retried after being deferred.
*/
- if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
+ if (compaction_restarting(zone, cc->order))
__reset_isolation_suitable(zone);
/*
@@ -1509,6 +1554,7 @@ static unsigned long compact_zone_order(struct zone *zone, int order,
.mode = mode,
.alloc_flags = alloc_flags,
.classzone_idx = classzone_idx,
+ .direct_compaction = true,
};
INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);
@@ -1767,4 +1813,225 @@ void compaction_unregister_node(struct node *node)
}
#endif /* CONFIG_SYSFS && CONFIG_NUMA */
+static inline bool kcompactd_work_requested(pg_data_t *pgdat)
+{
+ return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
+}
+
+static bool kcompactd_node_suitable(pg_data_t *pgdat)
+{
+ int zoneid;
+ struct zone *zone;
+ enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
+
+ for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
+ zone = &pgdat->node_zones[zoneid];
+
+ if (!populated_zone(zone))
+ continue;
+
+ if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
+ classzone_idx) == COMPACT_CONTINUE)
+ return true;
+ }
+
+ return false;
+}
+
+static void kcompactd_do_work(pg_data_t *pgdat)
+{
+ /*
+ * With no special task, compact all zones so that a page of requested
+ * order is allocatable.
+ */
+ int zoneid;
+ struct zone *zone;
+ struct compact_control cc = {
+ .order = pgdat->kcompactd_max_order,
+ .classzone_idx = pgdat->kcompactd_classzone_idx,
+ .mode = MIGRATE_SYNC_LIGHT,
+ .ignore_skip_hint = true,
+
+ };
+ bool success = false;
+
+ trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
+ cc.classzone_idx);
+ count_vm_event(KCOMPACTD_WAKE);
+
+ for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
+ int status;
+
+ zone = &pgdat->node_zones[zoneid];
+ if (!populated_zone(zone))
+ continue;
+
+ if (compaction_deferred(zone, cc.order))
+ continue;
+
+ if (compaction_suitable(zone, cc.order, 0, zoneid) !=
+ COMPACT_CONTINUE)
+ continue;
+
+ cc.nr_freepages = 0;
+ cc.nr_migratepages = 0;
+ cc.zone = zone;
+ INIT_LIST_HEAD(&cc.freepages);
+ INIT_LIST_HEAD(&cc.migratepages);
+
+ if (kthread_should_stop())
+ return;
+ status = compact_zone(zone, &cc);
+
+ if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
+ cc.classzone_idx, 0)) {
+ success = true;
+ compaction_defer_reset(zone, cc.order, false);
+ } else if (status == COMPACT_COMPLETE) {
+ /*
+ * We use sync migration mode here, so we defer like
+ * sync direct compaction does.
+ */
+ defer_compaction(zone, cc.order);
+ }
+
+ VM_BUG_ON(!list_empty(&cc.freepages));
+ VM_BUG_ON(!list_empty(&cc.migratepages));
+ }
+
+ /*
+ * Regardless of success, we are done until woken up next. But remember
+ * the requested order/classzone_idx in case it was higher/tighter than
+ * our current ones
+ */
+ if (pgdat->kcompactd_max_order <= cc.order)
+ pgdat->kcompactd_max_order = 0;
+ if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
+ pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
+}
+
+void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
+{
+ if (!order)
+ return;
+
+ if (pgdat->kcompactd_max_order < order)
+ pgdat->kcompactd_max_order = order;
+
+ if (pgdat->kcompactd_classzone_idx > classzone_idx)
+ pgdat->kcompactd_classzone_idx = classzone_idx;
+
+ if (!waitqueue_active(&pgdat->kcompactd_wait))
+ return;
+
+ if (!kcompactd_node_suitable(pgdat))
+ return;
+
+ trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
+ classzone_idx);
+ wake_up_interruptible(&pgdat->kcompactd_wait);
+}
+
+/*
+ * The background compaction daemon, started as a kernel thread
+ * from the init process.
+ */
+static int kcompactd(void *p)
+{
+ pg_data_t *pgdat = (pg_data_t*)p;
+ struct task_struct *tsk = current;
+
+ const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
+
+ if (!cpumask_empty(cpumask))
+ set_cpus_allowed_ptr(tsk, cpumask);
+
+ set_freezable();
+
+ pgdat->kcompactd_max_order = 0;
+ pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
+
+ while (!kthread_should_stop()) {
+ trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
+ wait_event_freezable(pgdat->kcompactd_wait,
+ kcompactd_work_requested(pgdat));
+
+ kcompactd_do_work(pgdat);
+ }
+
+ return 0;
+}
+
+/*
+ * This kcompactd start function will be called by init and node-hot-add.
+ * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
+ */
+int kcompactd_run(int nid)
+{
+ pg_data_t *pgdat = NODE_DATA(nid);
+ int ret = 0;
+
+ if (pgdat->kcompactd)
+ return 0;
+
+ pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
+ if (IS_ERR(pgdat->kcompactd)) {
+ pr_err("Failed to start kcompactd on node %d\n", nid);
+ ret = PTR_ERR(pgdat->kcompactd);
+ pgdat->kcompactd = NULL;
+ }
+ return ret;
+}
+
+/*
+ * Called by memory hotplug when all memory in a node is offlined. Caller must
+ * hold mem_hotplug_begin/end().
+ */
+void kcompactd_stop(int nid)
+{
+ struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
+
+ if (kcompactd) {
+ kthread_stop(kcompactd);
+ NODE_DATA(nid)->kcompactd = NULL;
+ }
+}
+
+/*
+ * It's optimal to keep kcompactd on the same CPUs as their memory, but
+ * not required for correctness. So if the last cpu in a node goes
+ * away, we get changed to run anywhere: as the first one comes back,
+ * restore their cpu bindings.
+ */
+static int cpu_callback(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
+{
+ int nid;
+
+ if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
+ for_each_node_state(nid, N_MEMORY) {
+ pg_data_t *pgdat = NODE_DATA(nid);
+ const struct cpumask *mask;
+
+ mask = cpumask_of_node(pgdat->node_id);
+
+ if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
+ /* One of our CPUs online: restore mask */
+ set_cpus_allowed_ptr(pgdat->kcompactd, mask);
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static int __init kcompactd_init(void)
+{
+ int nid;
+
+ for_each_node_state(nid, N_MEMORY)
+ kcompactd_run(nid);
+ hotcpu_notifier(cpu_callback, 0);
+ return 0;
+}
+subsys_initcall(kcompactd_init)
+
#endif /* CONFIG_COMPACTION */
diff --git a/mm/internal.h b/mm/internal.h
index 55d4fa99b486..7b9e313d9dea 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -206,6 +206,7 @@ struct compact_control {
unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
enum migrate_mode mode; /* Async or sync migration mode */
bool ignore_skip_hint; /* Scan blocks even if marked skip */
+ bool direct_compaction; /* False from kcompactd or /proc/... */
int order; /* order a direct compactor needs */
const gfp_t gfp_mask; /* gfp mask of a direct compactor */
const int alloc_flags; /* alloc flags of a direct compactor */
diff --git a/mm/ksm.c b/mm/ksm.c
index 2bdad62ff809..7b59d9723adb 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -558,8 +558,8 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
void *expected_mapping;
unsigned long kpfn;
- expected_mapping = (void *)stable_node +
- (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+ expected_mapping = (void *)((unsigned long)stable_node |
+ PAGE_MAPPING_KSM);
again:
kpfn = READ_ONCE(stable_node->kpfn);
page = pfn_to_page(kpfn);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a042a9d537bb..91f7c074e385 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -32,6 +32,7 @@
#include <linux/hugetlb.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
+#include <linux/compaction.h>
#include <asm/tlbflush.h>
@@ -1012,7 +1013,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
arg.nr_pages = nr_pages;
node_states_check_changes_online(nr_pages, zone, &arg);
- nid = pfn_to_nid(pfn);
+ nid = zone_to_nid(zone);
ret = memory_notify(MEM_GOING_ONLINE, &arg);
ret = notifier_to_errno(ret);
@@ -1052,7 +1053,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
pgdat_resize_unlock(zone->zone_pgdat, &flags);
if (onlined_pages) {
- node_states_set_node(zone_to_nid(zone), &arg);
+ node_states_set_node(nid, &arg);
if (need_zonelists_rebuild)
build_all_zonelists(NULL, NULL);
else
@@ -1063,8 +1064,10 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
init_per_zone_wmark_min();
- if (onlined_pages)
- kswapd_run(zone_to_nid(zone));
+ if (onlined_pages) {
+ kswapd_run(nid);
+ kcompactd_run(nid);
+ }
vm_total_pages = nr_free_pagecache_pages();
@@ -1828,8 +1831,10 @@ repeat:
zone_pcp_update(zone);
node_states_clear_node(node, &arg);
- if (arg.status_change_nid >= 0)
+ if (arg.status_change_nid >= 0) {
kswapd_stop(node);
+ kcompactd_stop(node);
+ }
vm_total_pages = nr_free_pagecache_pages();
writeback_set_ratelimit();
diff --git a/mm/migrate.c b/mm/migrate.c
index a9ce3783361a..9a5ccfc71afc 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -31,6 +31,7 @@
#include <linux/vmalloc.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
+#include <linux/compaction.h>
#include <linux/syscalls.h>
#include <linux/hugetlb.h>
#include <linux/hugetlb_cgroup.h>
@@ -72,6 +73,81 @@ int migrate_prep_local(void)
return 0;
}
+bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+{
+ struct address_space *mapping;
+
+ /*
+ * Avoid burning cycles with pages that are yet under __free_pages(),
+ * or just got freed under us.
+ *
+ * In case we 'win' a race for a movable page being freed under us and
+ * raise its refcount preventing __free_pages() from doing its job
+ * the put_page() at the end of this block will take care of
+ * release this page, thus avoiding a nasty leakage.
+ */
+ if (unlikely(!get_page_unless_zero(page)))
+ goto out;
+
+ /*
+ * Check PageMovable before holding a PG_lock because page's owner
+ * assumes anybody doesn't touch PG_lock of newly allocated page
+ * so unconditionally grapping the lock ruins page's owner side.
+ */
+ if (unlikely(!__PageMovable(page)))
+ goto out_putpage;
+ /*
+ * As movable pages are not isolated from LRU lists, concurrent
+ * compaction threads can race against page migration functions
+ * as well as race against the releasing a page.
+ *
+ * In order to avoid having an already isolated movable page
+ * being (wrongly) re-isolated while it is under migration,
+ * or to avoid attempting to isolate pages being released,
+ * lets be sure we have the page lock
+ * before proceeding with the movable page isolation steps.
+ */
+ if (unlikely(!trylock_page(page)))
+ goto out_putpage;
+
+ if (!PageMovable(page) || PageIsolated(page))
+ goto out_no_isolated;
+
+ mapping = page_mapping(page);
+ VM_BUG_ON_PAGE(!mapping, page);
+
+ if (!mapping->a_ops->isolate_page(page, mode))
+ goto out_no_isolated;
+
+ /* Driver shouldn't use PG_isolated bit of page->flags */
+ WARN_ON_ONCE(PageIsolated(page));
+ __SetPageIsolated(page);
+ unlock_page(page);
+
+ return true;
+
+out_no_isolated:
+ unlock_page(page);
+out_putpage:
+ put_page(page);
+out:
+ return false;
+}
+
+/* It should be called on page which is PG_movable */
+void putback_movable_page(struct page *page)
+{
+ struct address_space *mapping;
+
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(!PageMovable(page), page);
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
+
+ mapping = page_mapping(page);
+ mapping->a_ops->putback_page(page);
+ __ClearPageIsolated(page);
+}
+
/*
* Put previously isolated pages back onto the appropriate lists
* from where they were once taken off for compaction/migration.
@@ -93,10 +169,23 @@ void putback_movable_pages(struct list_head *l)
list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
- if (unlikely(isolated_balloon_page(page)))
- balloon_page_putback(page);
- else
+ /*
+ * We isolated non-lru movable page so here we can use
+ * __PageMovable because LRU page's mapping cannot have
+ * PAGE_MAPPING_MOVABLE.
+ */
+ if (unlikely(__PageMovable(page))) {
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
+ lock_page(page);
+ if (PageMovable(page))
+ putback_movable_page(page);
+ else
+ __ClearPageIsolated(page);
+ unlock_page(page);
+ put_page(page);
+ } else {
putback_lru_page(page);
+ }
}
}
@@ -587,7 +676,7 @@ EXPORT_SYMBOL(migrate_page_copy);
***********************************************************/
/*
- * Common logic to directly migrate a single page suitable for
+ * Common logic to directly migrate a single LRU page suitable for
* pages that do not use PagePrivate/PagePrivate2.
*
* Pages are locked upon entry and exit.
@@ -750,24 +839,47 @@ static int move_to_new_page(struct page *newpage, struct page *page,
enum migrate_mode mode)
{
struct address_space *mapping;
- int rc;
+ int rc = -EAGAIN;
+ bool is_lru = !__PageMovable(page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
mapping = page_mapping(page);
- if (!mapping)
- rc = migrate_page(mapping, newpage, page, mode);
- else if (mapping->a_ops->migratepage)
+
+ if (likely(is_lru)) {
+ if (!mapping)
+ rc = migrate_page(mapping, newpage, page, mode);
+ else if (mapping->a_ops->migratepage)
+ /*
+ * Most pages have a mapping and most filesystems
+ * provide a migratepage callback. Anonymous pages
+ * are part of swap space which also has its own
+ * migratepage callback. This is the most common path
+ * for page migration.
+ */
+ rc = mapping->a_ops->migratepage(mapping, newpage,
+ page, mode);
+ else
+ rc = fallback_migrate_page(mapping, newpage,
+ page, mode);
+ } else {
/*
- * Most pages have a mapping and most filesystems provide a
- * migratepage callback. Anonymous pages are part of swap
- * space which also has its own migratepage callback. This
- * is the most common path for page migration.
+ * In case of non-lru page, it could be released after
+ * isolation step. In that case, we shouldn't try migration.
*/
- rc = mapping->a_ops->migratepage(mapping, newpage, page, mode);
- else
- rc = fallback_migrate_page(mapping, newpage, page, mode);
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
+ if (!PageMovable(page)) {
+ rc = MIGRATEPAGE_SUCCESS;
+ __ClearPageIsolated(page);
+ goto out;
+ }
+
+ rc = mapping->a_ops->migratepage(mapping, newpage,
+ page, mode);
+ WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
+ !PageIsolated(page));
+ }
/*
* When successful, old pagecache page->mapping must be cleared before
@@ -775,9 +887,25 @@ static int move_to_new_page(struct page *newpage, struct page *page,
*/
if (rc == MIGRATEPAGE_SUCCESS) {
set_page_memcg(page, NULL);
- if (!PageAnon(page))
+ if (__PageMovable(page)) {
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
+
+ /*
+ * We clear PG_movable under page_lock so any compactor
+ * cannot try to migrate this page.
+ */
+ __ClearPageIsolated(page);
+ }
+
+ /*
+ * Anonymous and movable page->mapping will be cleard by
+ * free_pages_prepare so don't reset it here for keeping
+ * the type to work PageAnon, for example.
+ */
+ if (!PageMappingFlags(page))
page->mapping = NULL;
}
+out:
return rc;
}
@@ -787,6 +915,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
int rc = -EAGAIN;
int page_was_mapped = 0;
struct anon_vma *anon_vma = NULL;
+ bool is_lru = !__PageMovable(page);
if (!trylock_page(page)) {
if (!force || mode == MIGRATE_ASYNC)
@@ -855,15 +984,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
if (unlikely(!trylock_page(newpage)))
goto out_unlock;
- if (unlikely(isolated_balloon_page(page))) {
- /*
- * A ballooned page does not need any special attention from
- * physical to virtual reverse mapping procedures.
- * Skip any attempt to unmap PTEs or to remap swap cache,
- * in order to avoid burning cycles at rmap level, and perform
- * the page migration right away (proteced by page lock).
- */
- rc = balloon_page_migrate(newpage, page, mode);
+ if (unlikely(!is_lru)) {
+ rc = move_to_new_page(newpage, page, mode);
goto out_unlock_both;
}
@@ -909,6 +1031,19 @@ out_unlock:
put_anon_vma(anon_vma);
unlock_page(page);
out:
+ /*
+ * If migration is successful, decrease refcount of the newpage
+ * which will not free the page because new page owner increased
+ * refcounter. As well, if it is LRU page, add the page to LRU
+ * list in here.
+ */
+ if (rc == MIGRATEPAGE_SUCCESS) {
+ if (unlikely(__PageMovable(newpage)))
+ put_page(newpage);
+ else
+ putback_lru_page(newpage);
+ }
+
return rc;
}
@@ -942,6 +1077,18 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
+ ClearPageActive(page);
+ ClearPageUnevictable(page);
+ if (unlikely(__PageMovable(page))) {
+ lock_page(page);
+ if (!PageMovable(page))
+ __ClearPageIsolated(page);
+ unlock_page(page);
+ }
+ if (put_new_page)
+ put_new_page(newpage, private);
+ else
+ put_page(newpage);
goto out;
}
@@ -950,8 +1097,6 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
goto out;
rc = __unmap_and_move(page, newpage, force, mode);
- if (rc == MIGRATEPAGE_SUCCESS)
- put_new_page = NULL;
out:
if (rc != -EAGAIN) {
@@ -964,33 +1109,45 @@ out:
list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
- /* Soft-offlined page shouldn't go through lru cache list */
- if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
+ }
+
+ /*
+ * If migration is successful, releases reference grabbed during
+ * isolation. Otherwise, restore the page to right list unless
+ * we want to retry.
+ */
+ if (rc == MIGRATEPAGE_SUCCESS) {
+ put_page(page);
+ if (reason == MR_MEMORY_FAILURE) {
/*
- * With this release, we free successfully migrated
- * page and set PG_HWPoison on just freed page
- * intentionally. Although it's rather weird, it's how
- * HWPoison flag works at the moment.
+ * Set PG_HWPoison on just freed page
+ * intentionally. Although it's rather weird,
+ * it's how HWPoison flag works at the moment.
*/
- put_page(page);
if (!test_set_page_hwpoison(page))
num_poisoned_pages_inc();
- } else
- putback_lru_page(page);
- }
+ }
+ } else {
+ if (rc != -EAGAIN) {
+ if (likely(!__PageMovable(page))) {
+ putback_lru_page(page);
+ goto put_new;
+ }
- /*
- * If migration was not successful and there's a freeing callback, use
- * it. Otherwise, putback_lru_page() will drop the reference grabbed
- * during isolation.
- */
- if (put_new_page)
- put_new_page(newpage, private);
- else if (unlikely(__is_movable_balloon_page(newpage))) {
- /* drop our reference, page already in the balloon */
- put_page(newpage);
- } else
- putback_lru_page(newpage);
+ lock_page(page);
+ if (PageMovable(page))
+ putback_movable_page(page);
+ else
+ __ClearPageIsolated(page);
+ unlock_page(page);
+ put_page(page);
+ }
+put_new:
+ if (put_new_page)
+ put_new_page(newpage, private);
+ else
+ put_page(newpage);
+ }
if (result) {
if (rc)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c42587d02190..af5901c65ba8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -999,7 +999,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
trace_mm_page_free(page, order);
kmemcheck_free_shadow(page, order);
- if (PageAnon(page))
+ if (PageMappingFlags(page))
page->mapping = NULL;
bad += free_pages_check(page);
for (i = 1; i < (1 << order); i++) {
@@ -5319,6 +5319,9 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
#endif
init_waitqueue_head(&pgdat->kswapd_wait);
init_waitqueue_head(&pgdat->pfmemalloc_wait);
+#ifdef CONFIG_COMPACTION
+ init_waitqueue_head(&pgdat->kcompactd_wait);
+#endif
pgdat_page_ext_init(pgdat);
for (j = 0; j < MAX_NR_ZONES; j++) {
diff --git a/mm/util.c b/mm/util.c
index d5259b62f8d7..d41da54e8d83 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -344,10 +344,12 @@ struct address_space *page_mapping(struct page *page)
}
mapping = (unsigned long)page->mapping;
- if (mapping & PAGE_MAPPING_FLAGS)
+ if ((unsigned long)mapping & PAGE_MAPPING_ANON)
return NULL;
- return page->mapping;
+
+ return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
}
+EXPORT_SYMBOL(page_mapping);
int overcommit_ratio_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
diff --git a/mm/vmpressure.c b/mm/vmpressure.c
index f514dc40dab1..f5383e43597a 100644
--- a/mm/vmpressure.c
+++ b/mm/vmpressure.c
@@ -149,9 +149,16 @@ static unsigned long vmpressure_calc_pressure(unsigned long scanned,
unsigned long reclaimed)
{
unsigned long scale = scanned + reclaimed;
- unsigned long pressure;
+ unsigned long pressure = 0;
/*
+ * reclaimed can be greater than scanned in cases
+ * like THP, where the scanned is 1 and reclaimed
+ * could be 512
+ */
+ if (reclaimed >= scanned)
+ goto out;
+ /*
* We calculate the ratio (in percents) of how many pages were
* scanned vs. reclaimed in a given time frame (window). Note that
* time is in VM reclaimer's "ticks", i.e. number of pages
@@ -161,6 +168,7 @@ static unsigned long vmpressure_calc_pressure(unsigned long scanned,
pressure = scale - (reclaimed * scale / scanned);
pressure = pressure * 100 / scale;
+out:
pr_debug("%s: %3lu (s: %lu r: %lu)\n", __func__, pressure,
scanned, reclaimed);
diff --git a/mm/vmscan.c b/mm/vmscan.c
index ff408638fd95..6801adacadb0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1274,7 +1274,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
list_for_each_entry_safe(page, next, page_list, lru) {
if (page_is_file_cache(page) && !PageDirty(page) &&
- !isolated_balloon_page(page)) {
+ !__PageMovable(page)) {
ClearPageActive(page);
list_move(&page->lru, &clean_pages);
}
@@ -2547,15 +2547,23 @@ static bool shrink_zone(struct zone *zone, struct scan_control *sc,
sc->nr_scanned - nr_scanned,
zone_lru_pages);
+ /*
+ * Record the subtree's reclaim efficiency. The reclaimed
+ * pages from slab is excluded here because the corresponding
+ * scanned pages is not accounted. Moreover, freeing a page
+ * by slab shrinking depends on each slab's object population,
+ * making the cost model (i.e. scan:free) different from that
+ * of LRU.
+ */
+ vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
+ sc->nr_scanned - nr_scanned,
+ sc->nr_reclaimed - nr_reclaimed);
+
if (reclaim_state) {
sc->nr_reclaimed += reclaim_state->reclaimed_slab;
reclaim_state->reclaimed_slab = 0;
}
- vmpressure(sc->gfp_mask, sc->target_mem_cgroup,
- sc->nr_scanned - nr_scanned,
- sc->nr_reclaimed - nr_reclaimed);
-
if (sc->nr_reclaimed - nr_reclaimed)
reclaimable = true;
@@ -3062,18 +3070,23 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc)
} while (memcg);
}
-static bool zone_balanced(struct zone *zone, int order,
- unsigned long balance_gap, int classzone_idx)
+static bool zone_balanced(struct zone *zone, int order, bool highorder,
+ unsigned long balance_gap, int classzone_idx)
{
- if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
- balance_gap, classzone_idx))
- return false;
+ unsigned long mark = high_wmark_pages(zone) + balance_gap;
- if (IS_ENABLED(CONFIG_COMPACTION) && order && compaction_suitable(zone,
- order, 0, classzone_idx) == COMPACT_SKIPPED)
- return false;
+ /*
+ * When checking from pgdat_balanced(), kswapd should stop and sleep
+ * when it reaches the high order-0 watermark and let kcompactd take
+ * over. Other callers such as wakeup_kswapd() want to determine the
+ * true high-order watermark.
+ */
+ if (IS_ENABLED(CONFIG_COMPACTION) && !highorder) {
+ mark += (1UL << order);
+ order = 0;
+ }
- return true;
+ return zone_watermark_ok_safe(zone, order, mark, classzone_idx);
}
/*
@@ -3123,7 +3136,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
continue;
}
- if (zone_balanced(zone, order, 0, i))
+ if (zone_balanced(zone, order, false, 0, i))
balanced_pages += zone->managed_pages;
else if (!order)
return false;
@@ -3177,10 +3190,8 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
*/
static bool kswapd_shrink_zone(struct zone *zone,
int classzone_idx,
- struct scan_control *sc,
- unsigned long *nr_attempted)
+ struct scan_control *sc)
{
- int testorder = sc->order;
unsigned long balance_gap;
bool lowmem_pressure;
@@ -3188,17 +3199,6 @@ static bool kswapd_shrink_zone(struct zone *zone,
sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
/*
- * Kswapd reclaims only single pages with compaction enabled. Trying
- * too hard to reclaim until contiguous free pages have become
- * available can hurt performance by evicting too much useful data
- * from memory. Do not reclaim more than needed for compaction.
- */
- if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
- compaction_suitable(zone, sc->order, 0, classzone_idx)
- != COMPACT_SKIPPED)
- testorder = 0;
-
- /*
* We put equal pressure on every zone, unless one zone has way too
* many pages free already. The "too many pages" is defined as the
* high wmark plus a "gap" where the gap is either the low
@@ -3212,15 +3212,12 @@ static bool kswapd_shrink_zone(struct zone *zone,
* reclaim is necessary
*/
lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone));
- if (!lowmem_pressure && zone_balanced(zone, testorder,
+ if (!lowmem_pressure && zone_balanced(zone, sc->order, false,
balance_gap, classzone_idx))
return true;
shrink_zone(zone, sc, zone_idx(zone) == classzone_idx);
- /* Account for the number of pages attempted to reclaim */
- *nr_attempted += sc->nr_to_reclaim;
-
clear_bit(ZONE_WRITEBACK, &zone->flags);
/*
@@ -3230,7 +3227,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
* waits.
*/
if (zone_reclaimable(zone) &&
- zone_balanced(zone, testorder, 0, classzone_idx)) {
+ zone_balanced(zone, sc->order, false, 0, classzone_idx)) {
clear_bit(ZONE_CONGESTED, &zone->flags);
clear_bit(ZONE_DIRTY, &zone->flags);
}
@@ -3242,7 +3239,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
* For kswapd, balance_pgdat() will work across all this node's zones until
* they are all at high_wmark_pages(zone).
*
- * Returns the final order kswapd was reclaiming at
+ * Returns the highest zone idx kswapd was reclaiming at
*
* There is special handling here for zones which are full of pinned pages.
* This can happen if the pages are all mlocked, or if they are all used by
@@ -3259,8 +3256,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
* interoperates with the page allocator fallback scheme to ensure that aging
* of pages is balanced across the zones.
*/
-static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
- int *classzone_idx)
+static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
{
int i;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
@@ -3277,9 +3273,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
count_vm_event(PAGEOUTRUN);
do {
- unsigned long nr_attempted = 0;
bool raise_priority = true;
- bool pgdat_needs_compaction = (order > 0);
sc.nr_reclaimed = 0;
@@ -3314,7 +3308,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
break;
}
- if (!zone_balanced(zone, order, 0, 0)) {
+ if (!zone_balanced(zone, order, false, 0, 0)) {
end_zone = i;
break;
} else {
@@ -3330,24 +3324,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
if (i < 0)
goto out;
- for (i = 0; i <= end_zone; i++) {
- struct zone *zone = pgdat->node_zones + i;
-
- if (!populated_zone(zone))
- continue;
-
- /*
- * If any zone is currently balanced then kswapd will
- * not call compaction as it is expected that the
- * necessary pages are already available.
- */
- if (pgdat_needs_compaction &&
- zone_watermark_ok(zone, order,
- low_wmark_pages(zone),
- *classzone_idx, 0))
- pgdat_needs_compaction = false;
- }
-
/*
* If we're getting trouble reclaiming, start doing writepage
* even in laptop mode.
@@ -3391,8 +3367,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
* that that high watermark would be met at 100%
* efficiency.
*/
- if (kswapd_shrink_zone(zone, end_zone,
- &sc, &nr_attempted))
+ if (kswapd_shrink_zone(zone, end_zone, &sc))
raise_priority = false;
}
@@ -3405,49 +3380,29 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
pfmemalloc_watermark_ok(pgdat))
wake_up_all(&pgdat->pfmemalloc_wait);
- /*
- * Fragmentation may mean that the system cannot be rebalanced
- * for high-order allocations in all zones. If twice the
- * allocation size has been reclaimed and the zones are still
- * not balanced then recheck the watermarks at order-0 to
- * prevent kswapd reclaiming excessively. Assume that a
- * process requested a high-order can direct reclaim/compact.
- */
- if (order && sc.nr_reclaimed >= 2UL << order)
- order = sc.order = 0;
-
/* Check if kswapd should be suspending */
if (try_to_freeze() || kthread_should_stop())
break;
/*
- * Compact if necessary and kswapd is reclaiming at least the
- * high watermark number of pages as requsted
- */
- if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted)
- compact_pgdat(pgdat, order);
-
- /*
* Raise priority if scanning rate is too low or there was no
* progress in reclaiming pages
*/
if (raise_priority || !sc.nr_reclaimed)
sc.priority--;
} while (sc.priority >= 1 &&
- !pgdat_balanced(pgdat, order, *classzone_idx));
+ !pgdat_balanced(pgdat, order, classzone_idx));
out:
/*
- * Return the order we were reclaiming at so prepare_kswapd_sleep()
- * makes a decision on the order we were last reclaiming at. However,
- * if another caller entered the allocator slow path while kswapd
- * was awake, order will remain at the higher level
+ * Return the highest zone idx we were reclaiming at so
+ * prepare_kswapd_sleep() makes the same decisions as here.
*/
- *classzone_idx = end_zone;
- return order;
+ return end_zone;
}
-static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
+static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
+ int classzone_idx, int balanced_classzone_idx)
{
long remaining = 0;
DEFINE_WAIT(wait);
@@ -3458,7 +3413,22 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
/* Try to sleep for a short interval */
- if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
+ if (prepare_kswapd_sleep(pgdat, order, remaining,
+ balanced_classzone_idx)) {
+ /*
+ * Compaction records what page blocks it recently failed to
+ * isolate pages from and skips them in the future scanning.
+ * When kswapd is going to sleep, it is reasonable to assume
+ * that pages and compaction may succeed so reset the cache.
+ */
+ reset_isolation_suitable(pgdat);
+
+ /*
+ * We have freed the memory, now we should compact it to make
+ * allocation of the requested order possible.
+ */
+ wakeup_kcompactd(pgdat, order, classzone_idx);
+
remaining = schedule_timeout(HZ/10);
finish_wait(&pgdat->kswapd_wait, &wait);
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
@@ -3468,7 +3438,8 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
* After a short sleep, check if it was a premature sleep. If not, then
* go fully to sleep until explicitly woken up.
*/
- if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
+ if (prepare_kswapd_sleep(pgdat, order, remaining,
+ balanced_classzone_idx)) {
trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
/*
@@ -3481,14 +3452,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
*/
set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
- /*
- * Compaction records what page blocks it recently failed to
- * isolate pages from and skips them in the future scanning.
- * When kswapd is going to sleep, it is reasonable to assume
- * that pages and compaction may succeed so reset the cache.
- */
- reset_isolation_suitable(pgdat);
-
if (!kthread_should_stop())
schedule();
@@ -3518,7 +3481,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
static int kswapd(void *p)
{
unsigned long order, new_order;
- unsigned balanced_order;
int classzone_idx, new_classzone_idx;
int balanced_classzone_idx;
pg_data_t *pgdat = (pg_data_t*)p;
@@ -3551,24 +3513,19 @@ static int kswapd(void *p)
set_freezable();
order = new_order = 0;
- balanced_order = 0;
classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
balanced_classzone_idx = classzone_idx;
for ( ; ; ) {
bool ret;
/*
- * If the last balance_pgdat was unsuccessful it's unlikely a
- * new request of a similar or harder type will succeed soon
- * so consider going to sleep on the basis we reclaimed at
+ * While we were reclaiming, there might have been another
+ * wakeup, so check the values.
*/
- if (balanced_classzone_idx >= new_classzone_idx &&
- balanced_order == new_order) {
- new_order = pgdat->kswapd_max_order;
- new_classzone_idx = pgdat->classzone_idx;
- pgdat->kswapd_max_order = 0;
- pgdat->classzone_idx = pgdat->nr_zones - 1;
- }
+ new_order = pgdat->kswapd_max_order;
+ new_classzone_idx = pgdat->classzone_idx;
+ pgdat->kswapd_max_order = 0;
+ pgdat->classzone_idx = pgdat->nr_zones - 1;
if (order < new_order || classzone_idx > new_classzone_idx) {
/*
@@ -3578,7 +3535,7 @@ static int kswapd(void *p)
order = new_order;
classzone_idx = new_classzone_idx;
} else {
- kswapd_try_to_sleep(pgdat, balanced_order,
+ kswapd_try_to_sleep(pgdat, order, classzone_idx,
balanced_classzone_idx);
order = pgdat->kswapd_max_order;
classzone_idx = pgdat->classzone_idx;
@@ -3598,9 +3555,8 @@ static int kswapd(void *p)
*/
if (!ret) {
trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
- balanced_classzone_idx = classzone_idx;
- balanced_order = balance_pgdat(pgdat, order,
- &balanced_classzone_idx);
+ balanced_classzone_idx = balance_pgdat(pgdat, order,
+ classzone_idx);
}
}
@@ -3630,7 +3586,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
}
if (!waitqueue_active(&pgdat->kswapd_wait))
return;
- if (zone_balanced(zone, order, 0, 0))
+ if (zone_balanced(zone, order, true, 0, 0))
return;
trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 77b8eabd5446..b8f2eda3381d 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -827,6 +827,7 @@ const char * const vmstat_text[] = {
"compact_stall",
"compact_fail",
"compact_success",
+ "compact_daemon_wake",
#endif
#ifdef CONFIG_HUGETLB_PAGE
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index c1ea19478119..1eb00e343523 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -16,32 +16,15 @@
* struct page(s) to form a zspage.
*
* Usage of struct page fields:
- * page->private: points to the first component (0-order) page
- * page->index (union with page->freelist): offset of the first object
- * starting in this page. For the first page, this is
- * always 0, so we use this field (aka freelist) to point
- * to the first free object in zspage.
- * page->lru: links together all component pages (except the first page)
- * of a zspage
- *
- * For _first_ page only:
- *
- * page->private: refers to the component page after the first page
- * If the page is first_page for huge object, it stores handle.
- * Look at size_class->huge.
- * page->freelist: points to the first free object in zspage.
- * Free objects are linked together using in-place
- * metadata.
- * page->objects: maximum number of objects we can store in this
- * zspage (class->zspage_order * PAGE_SIZE / class->size)
- * page->lru: links together first pages of various zspages.
- * Basically forming list of zspages in a fullness group.
- * page->mapping: class index and fullness group of the zspage
- * page->inuse: the number of objects that are used in this zspage
+ * page->private: points to zspage
+ * page->freelist(index): links together all component pages of a zspage
+ * For the huge page, this is always 0, so we use this field
+ * to store handle.
*
* Usage of struct page flags:
* PG_private: identifies the first component page
* PG_private2: identifies the last component page
+ * PG_owner_priv_1: indentifies the huge component page
*
*/
@@ -64,6 +47,11 @@
#include <linux/debugfs.h>
#include <linux/zsmalloc.h>
#include <linux/zpool.h>
+#include <linux/mount.h>
+#include <linux/migrate.h>
+#include <linux/pagemap.h>
+
+#define ZSPAGE_MAGIC 0x58
/*
* This must be power of 2 and greater than of equal to sizeof(link_free).
@@ -86,9 +74,7 @@
* Object location (<PFN>, <obj_idx>) is encoded as
* as single (unsigned long) handle value.
*
- * Note that object index <obj_idx> is relative to system
- * page <PFN> it is stored in, so for each sub-page belonging
- * to a zspage, obj_idx starts with 0.
+ * Note that object index <obj_idx> starts from 0.
*
* This is made more complicated by various memory models and PAE.
*/
@@ -147,33 +133,29 @@
* ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
* (reason above)
*/
-#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8)
+#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS)
/*
* We do not maintain any list for completely empty or full pages
*/
enum fullness_group {
- ZS_ALMOST_FULL,
- ZS_ALMOST_EMPTY,
- _ZS_NR_FULLNESS_GROUPS,
-
ZS_EMPTY,
- ZS_FULL
+ ZS_ALMOST_EMPTY,
+ ZS_ALMOST_FULL,
+ ZS_FULL,
+ NR_ZS_FULLNESS,
};
enum zs_stat_type {
+ CLASS_EMPTY,
+ CLASS_ALMOST_EMPTY,
+ CLASS_ALMOST_FULL,
+ CLASS_FULL,
OBJ_ALLOCATED,
OBJ_USED,
- CLASS_ALMOST_FULL,
- CLASS_ALMOST_EMPTY,
+ NR_ZS_STAT_TYPE,
};
-#ifdef CONFIG_ZSMALLOC_STAT
-#define NR_ZS_STAT_TYPE (CLASS_ALMOST_EMPTY + 1)
-#else
-#define NR_ZS_STAT_TYPE (OBJ_USED + 1)
-#endif
-
struct zs_size_stat {
unsigned long objs[NR_ZS_STAT_TYPE];
};
@@ -182,6 +164,10 @@ struct zs_size_stat {
static struct dentry *zs_stat_root;
#endif
+#ifdef CONFIG_COMPACTION
+static struct vfsmount *zsmalloc_mnt;
+#endif
+
/*
* number of size_classes
*/
@@ -205,35 +191,49 @@ static const int fullness_threshold_frac = 4;
struct size_class {
spinlock_t lock;
- struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
+ struct list_head fullness_list[NR_ZS_FULLNESS];
/*
* Size of objects stored in this class. Must be multiple
* of ZS_ALIGN.
*/
int size;
- unsigned int index;
-
+ int objs_per_zspage;
/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
int pages_per_zspage;
- struct zs_size_stat stats;
- /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
- bool huge;
+ unsigned int index;
+ struct zs_size_stat stats;
};
+/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
+static void SetPageHugeObject(struct page *page)
+{
+ SetPageOwnerPriv1(page);
+}
+
+static void ClearPageHugeObject(struct page *page)
+{
+ ClearPageOwnerPriv1(page);
+}
+
+static int PageHugeObject(struct page *page)
+{
+ return PageOwnerPriv1(page);
+}
+
/*
* Placed within free objects to form a singly linked list.
- * For every zspage, first_page->freelist gives head of this list.
+ * For every zspage, zspage->freeobj gives head of this list.
*
* This must be power of 2 and less than or equal to ZS_ALIGN
*/
struct link_free {
union {
/*
- * Position of next free chunk (encodes <PFN, obj_idx>)
+ * Free object index;
* It's valid for non-allocated object
*/
- void *next;
+ unsigned long next;
/*
* Handle of allocated object.
*/
@@ -246,8 +246,8 @@ struct zs_pool {
struct size_class **size_class;
struct kmem_cache *handle_cachep;
+ struct kmem_cache *zspage_cachep;
- gfp_t flags; /* allocation flags used when growing pool */
atomic_long_t pages_allocated;
struct zs_pool_stats stats;
@@ -262,16 +262,36 @@ struct zs_pool {
#ifdef CONFIG_ZSMALLOC_STAT
struct dentry *stat_dentry;
#endif
+#ifdef CONFIG_COMPACTION
+ struct inode *inode;
+ struct work_struct free_work;
+#endif
};
/*
* A zspage's class index and fullness group
* are encoded in its (first)page->mapping
*/
-#define CLASS_IDX_BITS 28
-#define FULLNESS_BITS 4
-#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
-#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
+#define FULLNESS_BITS 2
+#define CLASS_BITS 8
+#define ISOLATED_BITS 3
+#define MAGIC_VAL_BITS 8
+
+struct zspage {
+ struct {
+ unsigned int fullness:FULLNESS_BITS;
+ unsigned int class:CLASS_BITS;
+ unsigned int isolated:ISOLATED_BITS;
+ unsigned int magic:MAGIC_VAL_BITS;
+ };
+ unsigned int inuse;
+ unsigned int freeobj;
+ struct page *first_page;
+ struct list_head list; /* fullness list */
+#ifdef CONFIG_COMPACTION
+ rwlock_t lock;
+#endif
+};
struct mapping_area {
#ifdef CONFIG_PGTABLE_MAPPING
@@ -281,32 +301,76 @@ struct mapping_area {
#endif
char *vm_addr; /* address of kmap_atomic()'ed pages */
enum zs_mapmode vm_mm; /* mapping mode */
- bool huge;
};
-static int create_handle_cache(struct zs_pool *pool)
+#ifdef CONFIG_COMPACTION
+static int zs_register_migration(struct zs_pool *pool);
+static void zs_unregister_migration(struct zs_pool *pool);
+static void migrate_lock_init(struct zspage *zspage);
+static void migrate_read_lock(struct zspage *zspage);
+static void migrate_read_unlock(struct zspage *zspage);
+static void kick_deferred_free(struct zs_pool *pool);
+static void init_deferred_free(struct zs_pool *pool);
+static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
+#else
+static int zsmalloc_mount(void) { return 0; }
+static void zsmalloc_unmount(void) {}
+static int zs_register_migration(struct zs_pool *pool) { return 0; }
+static void zs_unregister_migration(struct zs_pool *pool) {}
+static void migrate_lock_init(struct zspage *zspage) {}
+static void migrate_read_lock(struct zspage *zspage) {}
+static void migrate_read_unlock(struct zspage *zspage) {}
+static void kick_deferred_free(struct zs_pool *pool) {}
+static void init_deferred_free(struct zs_pool *pool) {}
+static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
+#endif
+
+static int create_cache(struct zs_pool *pool)
{
pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
0, 0, NULL);
- return pool->handle_cachep ? 0 : 1;
+ if (!pool->handle_cachep)
+ return 1;
+
+ pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
+ 0, 0, NULL);
+ if (!pool->zspage_cachep) {
+ kmem_cache_destroy(pool->handle_cachep);
+ pool->handle_cachep = NULL;
+ return 1;
+ }
+
+ return 0;
}
-static void destroy_handle_cache(struct zs_pool *pool)
+static void destroy_cache(struct zs_pool *pool)
{
kmem_cache_destroy(pool->handle_cachep);
+ kmem_cache_destroy(pool->zspage_cachep);
}
-static unsigned long alloc_handle(struct zs_pool *pool)
+static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
{
return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
- pool->flags & ~__GFP_HIGHMEM);
+ gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
}
-static void free_handle(struct zs_pool *pool, unsigned long handle)
+static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
{
kmem_cache_free(pool->handle_cachep, (void *)handle);
}
+static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
+{
+ return kmem_cache_alloc(pool->zspage_cachep,
+ flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
+};
+
+static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
+{
+ kmem_cache_free(pool->zspage_cachep, zspage);
+}
+
static void record_obj(unsigned long handle, unsigned long obj)
{
/*
@@ -325,7 +389,12 @@ static void *zs_zpool_create(const char *name, gfp_t gfp,
const struct zpool_ops *zpool_ops,
struct zpool *zpool)
{
- return zs_create_pool(name, gfp);
+ /*
+ * Ignore global gfp flags: zs_malloc() may be invoked from
+ * different contexts and its caller must provide a valid
+ * gfp mask.
+ */
+ return zs_create_pool(name);
}
static void zs_zpool_destroy(void *pool)
@@ -336,7 +405,7 @@ static void zs_zpool_destroy(void *pool)
static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
unsigned long *handle)
{
- *handle = zs_malloc(pool, size);
+ *handle = zs_malloc(pool, size, gfp);
return *handle ? 0 : -1;
}
static void zs_zpool_free(void *pool, unsigned long handle)
@@ -404,36 +473,76 @@ static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage)
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
+static bool is_zspage_isolated(struct zspage *zspage)
+{
+ return zspage->isolated;
+}
+
static int is_first_page(struct page *page)
{
return PagePrivate(page);
}
-static int is_last_page(struct page *page)
+/* Protected by class->lock */
+static inline int get_zspage_inuse(struct zspage *zspage)
+{
+ return zspage->inuse;
+}
+
+static inline void set_zspage_inuse(struct zspage *zspage, int val)
+{
+ zspage->inuse = val;
+}
+
+static inline void mod_zspage_inuse(struct zspage *zspage, int val)
+{
+ zspage->inuse += val;
+}
+
+static inline struct page *get_first_page(struct zspage *zspage)
{
- return PagePrivate2(page);
+ struct page *first_page = zspage->first_page;
+
+ VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
+ return first_page;
}
-static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
+static inline int get_first_obj_offset(struct page *page)
+{
+ return page->units;
+}
+
+static inline void set_first_obj_offset(struct page *page, int offset)
+{
+ page->units = offset;
+}
+
+static inline unsigned int get_freeobj(struct zspage *zspage)
+{
+ return zspage->freeobj;
+}
+
+static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
+{
+ zspage->freeobj = obj;
+}
+
+static void get_zspage_mapping(struct zspage *zspage,
+ unsigned int *class_idx,
enum fullness_group *fullness)
{
- unsigned long m;
- BUG_ON(!is_first_page(page));
+ BUG_ON(zspage->magic != ZSPAGE_MAGIC);
- m = (unsigned long)page->mapping;
- *fullness = m & FULLNESS_MASK;
- *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
+ *fullness = zspage->fullness;
+ *class_idx = zspage->class;
}
-static void set_zspage_mapping(struct page *page, unsigned int class_idx,
+static void set_zspage_mapping(struct zspage *zspage,
+ unsigned int class_idx,
enum fullness_group fullness)
{
- unsigned long m;
- BUG_ON(!is_first_page(page));
-
- m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
- (fullness & FULLNESS_MASK);
- page->mapping = (struct address_space *)m;
+ zspage->class = class_idx;
+ zspage->fullness = fullness;
}
/*
@@ -457,23 +566,19 @@ static int get_size_class_index(int size)
static inline void zs_stat_inc(struct size_class *class,
enum zs_stat_type type, unsigned long cnt)
{
- if (type < NR_ZS_STAT_TYPE)
- class->stats.objs[type] += cnt;
+ class->stats.objs[type] += cnt;
}
static inline void zs_stat_dec(struct size_class *class,
enum zs_stat_type type, unsigned long cnt)
{
- if (type < NR_ZS_STAT_TYPE)
- class->stats.objs[type] -= cnt;
+ class->stats.objs[type] -= cnt;
}
static inline unsigned long zs_stat_get(struct size_class *class,
enum zs_stat_type type)
{
- if (type < NR_ZS_STAT_TYPE)
- return class->stats.objs[type];
- return 0;
+ return class->stats.objs[type];
}
#ifdef CONFIG_ZSMALLOC_STAT
@@ -495,6 +600,8 @@ static void __exit zs_stat_exit(void)
debugfs_remove_recursive(zs_stat_root);
}
+static unsigned long zs_can_compact(struct size_class *class);
+
static int zs_stats_size_show(struct seq_file *s, void *v)
{
int i;
@@ -502,14 +609,15 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
struct size_class *class;
int objs_per_zspage;
unsigned long class_almost_full, class_almost_empty;
- unsigned long obj_allocated, obj_used, pages_used;
+ unsigned long obj_allocated, obj_used, pages_used, freeable;
unsigned long total_class_almost_full = 0, total_class_almost_empty = 0;
unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
+ unsigned long total_freeable = 0;
- seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s\n",
+ seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
"class", "size", "almost_full", "almost_empty",
"obj_allocated", "obj_used", "pages_used",
- "pages_per_zspage");
+ "pages_per_zspage", "freeable");
for (i = 0; i < zs_size_classes; i++) {
class = pool->size_class[i];
@@ -522,6 +630,7 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
obj_used = zs_stat_get(class, OBJ_USED);
+ freeable = zs_can_compact(class);
spin_unlock(&class->lock);
objs_per_zspage = get_maxobj_per_zspage(class->size,
@@ -529,23 +638,25 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
pages_used = obj_allocated / objs_per_zspage *
class->pages_per_zspage;
- seq_printf(s, " %5u %5u %11lu %12lu %13lu %10lu %10lu %16d\n",
+ seq_printf(s, " %5u %5u %11lu %12lu %13lu"
+ " %10lu %10lu %16d %8lu\n",
i, class->size, class_almost_full, class_almost_empty,
obj_allocated, obj_used, pages_used,
- class->pages_per_zspage);
+ class->pages_per_zspage, freeable);
total_class_almost_full += class_almost_full;
total_class_almost_empty += class_almost_empty;
total_objs += obj_allocated;
total_used_objs += obj_used;
total_pages += pages_used;
+ total_freeable += freeable;
}
seq_puts(s, "\n");
- seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu\n",
+ seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
"Total", "", total_class_almost_full,
total_class_almost_empty, total_objs,
- total_used_objs, total_pages);
+ total_used_objs, total_pages, "", total_freeable);
return 0;
}
@@ -562,7 +673,7 @@ static const struct file_operations zs_stat_size_ops = {
.release = single_release,
};
-static int zs_pool_stat_create(const char *name, struct zs_pool *pool)
+static int zs_pool_stat_create(struct zs_pool *pool, const char *name)
{
struct dentry *entry;
@@ -602,7 +713,7 @@ static void __exit zs_stat_exit(void)
{
}
-static inline int zs_pool_stat_create(const char *name, struct zs_pool *pool)
+static inline int zs_pool_stat_create(struct zs_pool *pool, const char *name)
{
return 0;
}
@@ -620,20 +731,20 @@ static inline void zs_pool_stat_destroy(struct zs_pool *pool)
* the pool (not yet implemented). This function returns fullness
* status of the given page.
*/
-static enum fullness_group get_fullness_group(struct page *page)
+static enum fullness_group get_fullness_group(struct size_class *class,
+ struct zspage *zspage)
{
- int inuse, max_objects;
+ int inuse, objs_per_zspage;
enum fullness_group fg;
- BUG_ON(!is_first_page(page));
- inuse = page->inuse;
- max_objects = page->objects;
+ inuse = get_zspage_inuse(zspage);
+ objs_per_zspage = class->objs_per_zspage;
if (inuse == 0)
fg = ZS_EMPTY;
- else if (inuse == max_objects)
+ else if (inuse == objs_per_zspage)
fg = ZS_FULL;
- else if (inuse <= 3 * max_objects / fullness_threshold_frac)
+ else if (inuse <= 3 * objs_per_zspage / fullness_threshold_frac)
fg = ZS_ALMOST_EMPTY;
else
fg = ZS_ALMOST_FULL;
@@ -647,59 +758,41 @@ static enum fullness_group get_fullness_group(struct page *page)
* have. This functions inserts the given zspage into the freelist
* identified by <class, fullness_group>.
*/
-static void insert_zspage(struct page *page, struct size_class *class,
+static void insert_zspage(struct size_class *class,
+ struct zspage *zspage,
enum fullness_group fullness)
{
- struct page **head;
-
- BUG_ON(!is_first_page(page));
-
- if (fullness >= _ZS_NR_FULLNESS_GROUPS)
- return;
-
- zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ?
- CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1);
-
- head = &class->fullness_list[fullness];
- if (!*head) {
- *head = page;
- return;
- }
+ struct zspage *head;
+ zs_stat_inc(class, fullness, 1);
+ head = list_first_entry_or_null(&class->fullness_list[fullness],
+ struct zspage, list);
/*
- * We want to see more ZS_FULL pages and less almost
- * empty/full. Put pages with higher ->inuse first.
+ * We want to see more ZS_FULL pages and less almost empty/full.
+ * Put pages with higher ->inuse first.
*/
- list_add_tail(&page->lru, &(*head)->lru);
- if (page->inuse >= (*head)->inuse)
- *head = page;
+ if (head) {
+ if (get_zspage_inuse(zspage) < get_zspage_inuse(head)) {
+ list_add(&zspage->list, &head->list);
+ return;
+ }
+ }
+ list_add(&zspage->list, &class->fullness_list[fullness]);
}
/*
* This function removes the given zspage from the freelist identified
* by <class, fullness_group>.
*/
-static void remove_zspage(struct page *page, struct size_class *class,
+static void remove_zspage(struct size_class *class,
+ struct zspage *zspage,
enum fullness_group fullness)
{
- struct page **head;
-
- BUG_ON(!is_first_page(page));
-
- if (fullness >= _ZS_NR_FULLNESS_GROUPS)
- return;
-
- head = &class->fullness_list[fullness];
- BUG_ON(!*head);
- if (list_empty(&(*head)->lru))
- *head = NULL;
- else if (*head == page)
- *head = (struct page *)list_entry((*head)->lru.next,
- struct page, lru);
+ VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
+ VM_BUG_ON(is_zspage_isolated(zspage));
- list_del_init(&page->lru);
- zs_stat_dec(class, fullness == ZS_ALMOST_EMPTY ?
- CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1);
+ list_del_init(&zspage->list);
+ zs_stat_dec(class, fullness, 1);
}
/*
@@ -712,21 +805,22 @@ static void remove_zspage(struct page *page, struct size_class *class,
* fullness group.
*/
static enum fullness_group fix_fullness_group(struct size_class *class,
- struct page *page)
+ struct zspage *zspage)
{
int class_idx;
enum fullness_group currfg, newfg;
- BUG_ON(!is_first_page(page));
-
- get_zspage_mapping(page, &class_idx, &currfg);
- newfg = get_fullness_group(page);
+ get_zspage_mapping(zspage, &class_idx, &currfg);
+ newfg = get_fullness_group(class, zspage);
if (newfg == currfg)
goto out;
- remove_zspage(page, class, currfg);
- insert_zspage(page, class, newfg);
- set_zspage_mapping(page, class_idx, newfg);
+ if (!is_zspage_isolated(zspage)) {
+ remove_zspage(class, zspage, currfg);
+ insert_zspage(class, zspage, newfg);
+ }
+
+ set_zspage_mapping(zspage, class_idx, newfg);
out:
return newfg;
@@ -768,64 +862,49 @@ static int get_pages_per_zspage(int class_size)
return max_usedpc_order;
}
-/*
- * A single 'zspage' is composed of many system pages which are
- * linked together using fields in struct page. This function finds
- * the first/head page, given any component page of a zspage.
- */
-static struct page *get_first_page(struct page *page)
+static struct zspage *get_zspage(struct page *page)
{
- if (is_first_page(page))
- return page;
- else
- return (struct page *)page_private(page);
+ struct zspage *zspage = (struct zspage *)page->private;
+
+ BUG_ON(zspage->magic != ZSPAGE_MAGIC);
+ return zspage;
}
static struct page *get_next_page(struct page *page)
{
- struct page *next;
+ if (unlikely(PageHugeObject(page)))
+ return NULL;
- if (is_last_page(page))
- next = NULL;
- else if (is_first_page(page))
- next = (struct page *)page_private(page);
- else
- next = list_entry(page->lru.next, struct page, lru);
+ return page->freelist;
+}
- return next;
+/**
+ * obj_to_location - get (<page>, <obj_idx>) from encoded object value
+ * @page: page object resides in zspage
+ * @obj_idx: object index
+ */
+static void obj_to_location(unsigned long obj, struct page **page,
+ unsigned int *obj_idx)
+{
+ obj >>= OBJ_TAG_BITS;
+ *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
+ *obj_idx = (obj & OBJ_INDEX_MASK);
}
-/*
- * Encode <page, obj_idx> as a single handle value.
- * We use the least bit of handle for tagging.
+/**
+ * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
+ * @page: page object resides in zspage
+ * @obj_idx: object index
*/
-static void *location_to_obj(struct page *page, unsigned long obj_idx)
+static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
{
unsigned long obj;
- if (!page) {
- BUG_ON(obj_idx);
- return NULL;
- }
-
obj = page_to_pfn(page) << OBJ_INDEX_BITS;
- obj |= ((obj_idx) & OBJ_INDEX_MASK);
+ obj |= obj_idx & OBJ_INDEX_MASK;
obj <<= OBJ_TAG_BITS;
- return (void *)obj;
-}
-
-/*
- * Decode <page, obj_idx> pair from the given object handle. We adjust the
- * decoded obj_idx back to its original value since it was adjusted in
- * location_to_obj().
- */
-static void obj_to_location(unsigned long obj, struct page **page,
- unsigned long *obj_idx)
-{
- obj >>= OBJ_TAG_BITS;
- *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
- *obj_idx = (obj & OBJ_INDEX_MASK);
+ return obj;
}
static unsigned long handle_to_obj(unsigned long handle)
@@ -833,108 +912,146 @@ static unsigned long handle_to_obj(unsigned long handle)
return *(unsigned long *)handle;
}
-static unsigned long obj_to_head(struct size_class *class, struct page *page,
- void *obj)
+static unsigned long obj_to_head(struct page *page, void *obj)
{
- if (class->huge) {
- VM_BUG_ON(!is_first_page(page));
- return page_private(page);
+ if (unlikely(PageHugeObject(page))) {
+ VM_BUG_ON_PAGE(!is_first_page(page), page);
+ return page->index;
} else
return *(unsigned long *)obj;
}
-static unsigned long obj_idx_to_offset(struct page *page,
- unsigned long obj_idx, int class_size)
+static inline int testpin_tag(unsigned long handle)
{
- unsigned long off = 0;
-
- if (!is_first_page(page))
- off = page->index;
-
- return off + obj_idx * class_size;
+ return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
}
static inline int trypin_tag(unsigned long handle)
{
- unsigned long *ptr = (unsigned long *)handle;
-
- return !test_and_set_bit_lock(HANDLE_PIN_BIT, ptr);
+ return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
}
static void pin_tag(unsigned long handle)
{
- while (!trypin_tag(handle));
+ bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
}
static void unpin_tag(unsigned long handle)
{
- unsigned long *ptr = (unsigned long *)handle;
-
- clear_bit_unlock(HANDLE_PIN_BIT, ptr);
+ bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
}
static void reset_page(struct page *page)
{
+ __ClearPageMovable(page);
clear_bit(PG_private, &page->flags);
clear_bit(PG_private_2, &page->flags);
set_page_private(page, 0);
- page->mapping = NULL;
- page->freelist = NULL;
page_mapcount_reset(page);
+ ClearPageHugeObject(page);
+ page->freelist = NULL;
}
-static void free_zspage(struct page *first_page)
+/*
+ * To prevent zspage destroy during migration, zspage freeing should
+ * hold locks of all pages in the zspage.
+ */
+void lock_zspage(struct zspage *zspage)
{
- struct page *nextp, *tmp, *head_extra;
+ struct page *page = get_first_page(zspage);
- BUG_ON(!is_first_page(first_page));
- BUG_ON(first_page->inuse);
+ do {
+ lock_page(page);
+ } while ((page = get_next_page(page)) != NULL);
+}
- head_extra = (struct page *)page_private(first_page);
+int trylock_zspage(struct zspage *zspage)
+{
+ struct page *cursor, *fail;
- reset_page(first_page);
- __free_page(first_page);
+ for (cursor = get_first_page(zspage); cursor != NULL; cursor =
+ get_next_page(cursor)) {
+ if (!trylock_page(cursor)) {
+ fail = cursor;
+ goto unlock;
+ }
+ }
- /* zspage with only 1 system page */
- if (!head_extra)
- return;
+ return 1;
+unlock:
+ for (cursor = get_first_page(zspage); cursor != fail; cursor =
+ get_next_page(cursor))
+ unlock_page(cursor);
+
+ return 0;
+}
+
+static void __free_zspage(struct zs_pool *pool, struct size_class *class,
+ struct zspage *zspage)
+{
+ struct page *page, *next;
+ enum fullness_group fg;
+ unsigned int class_idx;
+
+ get_zspage_mapping(zspage, &class_idx, &fg);
+
+ assert_spin_locked(&class->lock);
+
+ VM_BUG_ON(get_zspage_inuse(zspage));
+ VM_BUG_ON(fg != ZS_EMPTY);
+
+ next = page = get_first_page(zspage);
+ do {
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ next = get_next_page(page);
+ reset_page(page);
+ unlock_page(page);
+ put_page(page);
+ page = next;
+ } while (page != NULL);
- list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
- list_del(&nextp->lru);
- reset_page(nextp);
- __free_page(nextp);
+ cache_free_zspage(pool, zspage);
+
+ zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
+ class->size, class->pages_per_zspage));
+ atomic_long_sub(class->pages_per_zspage,
+ &pool->pages_allocated);
+}
+
+static void free_zspage(struct zs_pool *pool, struct size_class *class,
+ struct zspage *zspage)
+{
+ VM_BUG_ON(get_zspage_inuse(zspage));
+ VM_BUG_ON(list_empty(&zspage->list));
+
+ if (!trylock_zspage(zspage)) {
+ kick_deferred_free(pool);
+ return;
}
- reset_page(head_extra);
- __free_page(head_extra);
+
+ remove_zspage(class, zspage, ZS_EMPTY);
+ __free_zspage(pool, class, zspage);
}
/* Initialize a newly allocated zspage */
-static void init_zspage(struct page *first_page, struct size_class *class)
+static void init_zspage(struct size_class *class, struct zspage *zspage)
{
+ unsigned int freeobj = 1;
unsigned long off = 0;
- struct page *page = first_page;
+ struct page *page = get_first_page(zspage);
- BUG_ON(!is_first_page(first_page));
while (page) {
struct page *next_page;
struct link_free *link;
- unsigned int i = 1;
void *vaddr;
- /*
- * page->index stores offset of first object starting
- * in the page. For the first page, this is always 0,
- * so we use first_page->index (aka ->freelist) to store
- * head of corresponding zspage's freelist.
- */
- if (page != first_page)
- page->index = off;
+ set_first_obj_offset(page, off);
vaddr = kmap_atomic(page);
link = (struct link_free *)vaddr + off / sizeof(*link);
while ((off += class->size) < PAGE_SIZE) {
- link->next = location_to_obj(page, i++);
+ link->next = freeobj++ << OBJ_ALLOCATED_TAG;
link += class->size / sizeof(*link);
}
@@ -944,87 +1061,108 @@ static void init_zspage(struct page *first_page, struct size_class *class)
* page (if present)
*/
next_page = get_next_page(page);
- link->next = location_to_obj(next_page, 0);
+ if (next_page) {
+ link->next = freeobj++ << OBJ_ALLOCATED_TAG;
+ } else {
+ /*
+ * Reset OBJ_ALLOCATED_TAG bit to last link to tell
+ * whether it's allocated object or not.
+ */
+ link->next = -1 << OBJ_ALLOCATED_TAG;
+ }
kunmap_atomic(vaddr);
page = next_page;
off %= PAGE_SIZE;
}
+
+ set_freeobj(zspage, 0);
}
-/*
- * Allocate a zspage for the given size class
- */
-static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
+static void create_page_chain(struct size_class *class, struct zspage *zspage,
+ struct page *pages[])
{
- int i, error;
- struct page *first_page = NULL, *uninitialized_var(prev_page);
+ int i;
+ struct page *page;
+ struct page *prev_page = NULL;
+ int nr_pages = class->pages_per_zspage;
/*
* Allocate individual pages and link them together as:
- * 1. first page->private = first sub-page
- * 2. all sub-pages are linked together using page->lru
- * 3. each sub-page is linked to the first page using page->private
+ * 1. all pages are linked together using page->freelist
+ * 2. each sub-page point to zspage using page->private
*
- * For each size class, First/Head pages are linked together using
- * page->lru. Also, we set PG_private to identify the first page
- * (i.e. no other sub-page has this flag set) and PG_private_2 to
- * identify the last page.
+ * we set PG_private to identify the first page (i.e. no other sub-page
+ * has this flag set) and PG_private_2 to identify the last page.
*/
- error = -ENOMEM;
- for (i = 0; i < class->pages_per_zspage; i++) {
- struct page *page;
-
- page = alloc_page(flags);
- if (!page)
- goto cleanup;
-
- INIT_LIST_HEAD(&page->lru);
- if (i == 0) { /* first page */
+ for (i = 0; i < nr_pages; i++) {
+ page = pages[i];
+ set_page_private(page, (unsigned long)zspage);
+ page->freelist = NULL;
+ if (i == 0) {
+ zspage->first_page = page;
SetPagePrivate(page);
- set_page_private(page, 0);
- first_page = page;
- first_page->inuse = 0;
+ if (unlikely(class->objs_per_zspage == 1 &&
+ class->pages_per_zspage == 1))
+ SetPageHugeObject(page);
+ } else {
+ prev_page->freelist = page;
}
- if (i == 1)
- set_page_private(first_page, (unsigned long)page);
- if (i >= 1)
- set_page_private(page, (unsigned long)first_page);
- if (i >= 2)
- list_add(&page->lru, &prev_page->lru);
- if (i == class->pages_per_zspage - 1) /* last page */
+ if (i == nr_pages - 1)
SetPagePrivate2(page);
prev_page = page;
}
+}
- init_zspage(first_page, class);
+/*
+ * Allocate a zspage for the given size class
+ */
+static struct zspage *alloc_zspage(struct zs_pool *pool,
+ struct size_class *class,
+ gfp_t gfp)
+{
+ int i;
+ struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
+ struct zspage *zspage = cache_alloc_zspage(pool, gfp);
- first_page->freelist = location_to_obj(first_page, 0);
- /* Maximum number of objects we can store in this zspage */
- first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
+ if (!zspage)
+ return NULL;
- error = 0; /* Success */
+ memset(zspage, 0, sizeof(struct zspage));
+ zspage->magic = ZSPAGE_MAGIC;
+ migrate_lock_init(zspage);
-cleanup:
- if (unlikely(error) && first_page) {
- free_zspage(first_page);
- first_page = NULL;
+ for (i = 0; i < class->pages_per_zspage; i++) {
+ struct page *page;
+
+ page = alloc_page(gfp);
+ if (!page) {
+ while (--i >= 0)
+ __free_page(pages[i]);
+ cache_free_zspage(pool, zspage);
+ return NULL;
+ }
+ pages[i] = page;
}
- return first_page;
+ create_page_chain(class, zspage, pages);
+ init_zspage(class, zspage);
+
+ return zspage;
}
-static struct page *find_get_zspage(struct size_class *class)
+static struct zspage *find_get_zspage(struct size_class *class)
{
int i;
- struct page *page;
+ struct zspage *zspage;
- for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
- page = class->fullness_list[i];
- if (page)
+ for (i = ZS_ALMOST_FULL; i >= ZS_EMPTY; i--) {
+ zspage = list_first_entry_or_null(&class->fullness_list[i],
+ struct zspage, list);
+ if (zspage)
break;
}
- return page;
+ return zspage;
}
#ifdef CONFIG_PGTABLE_MAPPING
@@ -1127,11 +1265,9 @@ static void __zs_unmap_object(struct mapping_area *area,
goto out;
buf = area->vm_buf;
- if (!area->huge) {
- buf = buf + ZS_HANDLE_SIZE;
- size -= ZS_HANDLE_SIZE;
- off += ZS_HANDLE_SIZE;
- }
+ buf = buf + ZS_HANDLE_SIZE;
+ size -= ZS_HANDLE_SIZE;
+ off += ZS_HANDLE_SIZE;
sizes[0] = PAGE_SIZE - off;
sizes[1] = size - sizes[0];
@@ -1231,11 +1367,9 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
return true;
}
-static bool zspage_full(struct page *page)
+static bool zspage_full(struct size_class *class, struct zspage *zspage)
{
- BUG_ON(!is_first_page(page));
-
- return page->inuse == page->objects;
+ return get_zspage_inuse(zspage) == class->objs_per_zspage;
}
unsigned long zs_get_total_pages(struct zs_pool *pool)
@@ -1261,8 +1395,10 @@ EXPORT_SYMBOL_GPL(zs_get_total_pages);
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm)
{
+ struct zspage *zspage;
struct page *page;
- unsigned long obj, obj_idx, off;
+ unsigned long obj, off;
+ unsigned int obj_idx;
unsigned int class_idx;
enum fullness_group fg;
@@ -1271,23 +1407,26 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
struct page *pages[2];
void *ret;
- BUG_ON(!handle);
-
/*
* Because we use per-cpu mapping areas shared among the
* pools/users, we can't allow mapping in interrupt context
* because it can corrupt another users mappings.
*/
- BUG_ON(in_interrupt());
+ WARN_ON_ONCE(in_interrupt());
/* From now on, migration cannot move the object */
pin_tag(handle);
obj = handle_to_obj(handle);
obj_to_location(obj, &page, &obj_idx);
- get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ zspage = get_zspage(page);
+
+ /* migration cannot move any subpage in this zspage */
+ migrate_read_lock(zspage);
+
+ get_zspage_mapping(zspage, &class_idx, &fg);
class = pool->size_class[class_idx];
- off = obj_idx_to_offset(page, obj_idx, class->size);
+ off = (class->size * obj_idx) & ~PAGE_MASK;
area = &get_cpu_var(zs_map_area);
area->vm_mm = mm;
@@ -1305,7 +1444,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
ret = __zs_map_object(area, pages, off, class->size);
out:
- if (!class->huge)
+ if (likely(!PageHugeObject(page)))
ret += ZS_HANDLE_SIZE;
return ret;
@@ -1314,21 +1453,22 @@ EXPORT_SYMBOL_GPL(zs_map_object);
void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
{
+ struct zspage *zspage;
struct page *page;
- unsigned long obj, obj_idx, off;
+ unsigned long obj, off;
+ unsigned int obj_idx;
unsigned int class_idx;
enum fullness_group fg;
struct size_class *class;
struct mapping_area *area;
- BUG_ON(!handle);
-
obj = handle_to_obj(handle);
obj_to_location(obj, &page, &obj_idx);
- get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ zspage = get_zspage(page);
+ get_zspage_mapping(zspage, &class_idx, &fg);
class = pool->size_class[class_idx];
- off = obj_idx_to_offset(page, obj_idx, class->size);
+ off = (class->size * obj_idx) & ~PAGE_MASK;
area = this_cpu_ptr(&zs_map_area);
if (off + class->size <= PAGE_SIZE)
@@ -1343,38 +1483,50 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
__zs_unmap_object(area, pages, off, class->size);
}
put_cpu_var(zs_map_area);
+
+ migrate_read_unlock(zspage);
unpin_tag(handle);
}
EXPORT_SYMBOL_GPL(zs_unmap_object);
-static unsigned long obj_malloc(struct page *first_page,
- struct size_class *class, unsigned long handle)
+static unsigned long obj_malloc(struct size_class *class,
+ struct zspage *zspage, unsigned long handle)
{
+ int i, nr_page, offset;
unsigned long obj;
struct link_free *link;
struct page *m_page;
- unsigned long m_objidx, m_offset;
+ unsigned long m_offset;
void *vaddr;
handle |= OBJ_ALLOCATED_TAG;
- obj = (unsigned long)first_page->freelist;
- obj_to_location(obj, &m_page, &m_objidx);
- m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
+ obj = get_freeobj(zspage);
+
+ offset = obj * class->size;
+ nr_page = offset >> PAGE_SHIFT;
+ m_offset = offset & ~PAGE_MASK;
+ m_page = get_first_page(zspage);
+
+ for (i = 0; i < nr_page; i++)
+ m_page = get_next_page(m_page);
vaddr = kmap_atomic(m_page);
link = (struct link_free *)vaddr + m_offset / sizeof(*link);
- first_page->freelist = link->next;
- if (!class->huge)
+ set_freeobj(zspage, link->next >> OBJ_ALLOCATED_TAG);
+ if (likely(!PageHugeObject(m_page)))
/* record handle in the header of allocated chunk */
link->handle = handle;
else
- /* record handle in first_page->private */
- set_page_private(first_page, handle);
+ /* record handle to page->index */
+ zspage->first_page->index = handle;
+
kunmap_atomic(vaddr);
- first_page->inuse++;
+ mod_zspage_inuse(zspage, 1);
zs_stat_inc(class, OBJ_USED, 1);
+ obj = location_to_obj(m_page, obj);
+
return obj;
}
@@ -1388,16 +1540,17 @@ static unsigned long obj_malloc(struct page *first_page,
* otherwise 0.
* Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
*/
-unsigned long zs_malloc(struct zs_pool *pool, size_t size)
+unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
{
unsigned long handle, obj;
struct size_class *class;
- struct page *first_page;
+ enum fullness_group newfg;
+ struct zspage *zspage;
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
return 0;
- handle = alloc_handle(pool);
+ handle = cache_alloc_handle(pool, gfp);
if (!handle)
return 0;
@@ -1406,71 +1559,79 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
class = pool->size_class[get_size_class_index(size)];
spin_lock(&class->lock);
- first_page = find_get_zspage(class);
-
- if (!first_page) {
+ zspage = find_get_zspage(class);
+ if (likely(zspage)) {
+ obj = obj_malloc(class, zspage, handle);
+ /* Now move the zspage to another fullness group, if required */
+ fix_fullness_group(class, zspage);
+ record_obj(handle, obj);
spin_unlock(&class->lock);
- first_page = alloc_zspage(class, pool->flags);
- if (unlikely(!first_page)) {
- free_handle(pool, handle);
- return 0;
- }
- set_zspage_mapping(first_page, class->index, ZS_EMPTY);
- atomic_long_add(class->pages_per_zspage,
- &pool->pages_allocated);
+ return handle;
+ }
- spin_lock(&class->lock);
- zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
- class->size, class->pages_per_zspage));
+ spin_unlock(&class->lock);
+
+ zspage = alloc_zspage(pool, class, gfp);
+ if (!zspage) {
+ cache_free_handle(pool, handle);
+ return 0;
}
- obj = obj_malloc(first_page, class, handle);
- /* Now move the zspage to another fullness group, if required */
- fix_fullness_group(class, first_page);
+ spin_lock(&class->lock);
+ obj = obj_malloc(class, zspage, handle);
+ newfg = get_fullness_group(class, zspage);
+ insert_zspage(class, zspage, newfg);
+ set_zspage_mapping(zspage, class->index, newfg);
record_obj(handle, obj);
+ atomic_long_add(class->pages_per_zspage,
+ &pool->pages_allocated);
+ zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
+ class->size, class->pages_per_zspage));
+
+ /* We completely set up zspage so mark them as movable */
+ SetZsPageMovable(pool, zspage);
spin_unlock(&class->lock);
return handle;
}
EXPORT_SYMBOL_GPL(zs_malloc);
-static void obj_free(struct zs_pool *pool, struct size_class *class,
- unsigned long obj)
+static void obj_free(struct size_class *class, unsigned long obj)
{
struct link_free *link;
- struct page *first_page, *f_page;
- unsigned long f_objidx, f_offset;
+ struct zspage *zspage;
+ struct page *f_page;
+ unsigned long f_offset;
+ unsigned int f_objidx;
void *vaddr;
- BUG_ON(!obj);
-
obj &= ~OBJ_ALLOCATED_TAG;
obj_to_location(obj, &f_page, &f_objidx);
- first_page = get_first_page(f_page);
-
- f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
+ f_offset = (class->size * f_objidx) & ~PAGE_MASK;
+ zspage = get_zspage(f_page);
vaddr = kmap_atomic(f_page);
/* Insert this object in containing zspage's freelist */
link = (struct link_free *)(vaddr + f_offset);
- link->next = first_page->freelist;
- if (class->huge)
- set_page_private(first_page, 0);
+ link->next = get_freeobj(zspage) << OBJ_ALLOCATED_TAG;
kunmap_atomic(vaddr);
- first_page->freelist = (void *)obj;
- first_page->inuse--;
+ set_freeobj(zspage, f_objidx);
+ mod_zspage_inuse(zspage, -1);
zs_stat_dec(class, OBJ_USED, 1);
}
void zs_free(struct zs_pool *pool, unsigned long handle)
{
- struct page *first_page, *f_page;
- unsigned long obj, f_objidx;
+ struct zspage *zspage;
+ struct page *f_page;
+ unsigned long obj;
+ unsigned int f_objidx;
int class_idx;
struct size_class *class;
enum fullness_group fullness;
+ bool isolated;
if (unlikely(!handle))
return;
@@ -1478,33 +1639,39 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
pin_tag(handle);
obj = handle_to_obj(handle);
obj_to_location(obj, &f_page, &f_objidx);
- first_page = get_first_page(f_page);
+ zspage = get_zspage(f_page);
+
+ migrate_read_lock(zspage);
- get_zspage_mapping(first_page, &class_idx, &fullness);
+ get_zspage_mapping(zspage, &class_idx, &fullness);
class = pool->size_class[class_idx];
spin_lock(&class->lock);
- obj_free(pool, class, obj);
- fullness = fix_fullness_group(class, first_page);
- if (fullness == ZS_EMPTY) {
- zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
- class->size, class->pages_per_zspage));
- atomic_long_sub(class->pages_per_zspage,
- &pool->pages_allocated);
- free_zspage(first_page);
+ obj_free(class, obj);
+ fullness = fix_fullness_group(class, zspage);
+ if (fullness != ZS_EMPTY) {
+ migrate_read_unlock(zspage);
+ goto out;
}
+
+ isolated = is_zspage_isolated(zspage);
+ migrate_read_unlock(zspage);
+ /* If zspage is isolated, zs_page_putback will free the zspage */
+ if (likely(!isolated))
+ free_zspage(pool, class, zspage);
+out:
+
spin_unlock(&class->lock);
unpin_tag(handle);
-
- free_handle(pool, handle);
+ cache_free_handle(pool, handle);
}
EXPORT_SYMBOL_GPL(zs_free);
-static void zs_object_copy(unsigned long dst, unsigned long src,
- struct size_class *class)
+static void zs_object_copy(struct size_class *class, unsigned long dst,
+ unsigned long src)
{
struct page *s_page, *d_page;
- unsigned long s_objidx, d_objidx;
+ unsigned int s_objidx, d_objidx;
unsigned long s_off, d_off;
void *s_addr, *d_addr;
int s_size, d_size, size;
@@ -1515,8 +1682,8 @@ static void zs_object_copy(unsigned long dst, unsigned long src,
obj_to_location(src, &s_page, &s_objidx);
obj_to_location(dst, &d_page, &d_objidx);
- s_off = obj_idx_to_offset(s_page, s_objidx, class->size);
- d_off = obj_idx_to_offset(d_page, d_objidx, class->size);
+ s_off = (class->size * s_objidx) & ~PAGE_MASK;
+ d_off = (class->size * d_objidx) & ~PAGE_MASK;
if (s_off + class->size > PAGE_SIZE)
s_size = PAGE_SIZE - s_off;
@@ -1544,7 +1711,6 @@ static void zs_object_copy(unsigned long dst, unsigned long src,
kunmap_atomic(d_addr);
kunmap_atomic(s_addr);
s_page = get_next_page(s_page);
- BUG_ON(!s_page);
s_addr = kmap_atomic(s_page);
d_addr = kmap_atomic(d_page);
s_size = class->size - written;
@@ -1554,7 +1720,6 @@ static void zs_object_copy(unsigned long dst, unsigned long src,
if (d_off >= PAGE_SIZE) {
kunmap_atomic(d_addr);
d_page = get_next_page(d_page);
- BUG_ON(!d_page);
d_addr = kmap_atomic(d_page);
d_size = class->size - written;
d_off = 0;
@@ -1569,20 +1734,19 @@ static void zs_object_copy(unsigned long dst, unsigned long src,
* Find alloced object in zspage from index object and
* return handle.
*/
-static unsigned long find_alloced_obj(struct page *page, int index,
- struct size_class *class)
+static unsigned long find_alloced_obj(struct size_class *class,
+ struct page *page, int index)
{
unsigned long head;
int offset = 0;
unsigned long handle = 0;
void *addr = kmap_atomic(page);
- if (!is_first_page(page))
- offset = page->index;
+ offset = get_first_obj_offset(page);
offset += class->size * index;
while (offset < PAGE_SIZE) {
- head = obj_to_head(class, page, addr + offset);
+ head = obj_to_head(page, addr + offset);
if (head & OBJ_ALLOCATED_TAG) {
handle = head & ~OBJ_ALLOCATED_TAG;
if (trypin_tag(handle))
@@ -1599,7 +1763,7 @@ static unsigned long find_alloced_obj(struct page *page, int index,
}
struct zs_compact_control {
- /* Source page for migration which could be a subpage of zspage. */
+ /* Source spage for migration which could be a subpage of zspage */
struct page *s_page;
/* Destination page for migration which should be a first page
* of zspage. */
@@ -1620,7 +1784,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
int ret = 0;
while (1) {
- handle = find_alloced_obj(s_page, index, class);
+ handle = find_alloced_obj(class, s_page, index);
if (!handle) {
s_page = get_next_page(s_page);
if (!s_page)
@@ -1630,15 +1794,15 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
}
/* Stop if there is no more space */
- if (zspage_full(d_page)) {
+ if (zspage_full(class, get_zspage(d_page))) {
unpin_tag(handle);
ret = -ENOMEM;
break;
}
used_obj = handle_to_obj(handle);
- free_obj = obj_malloc(d_page, class, handle);
- zs_object_copy(free_obj, used_obj, class);
+ free_obj = obj_malloc(class, get_zspage(d_page), handle);
+ zs_object_copy(class, free_obj, used_obj);
index++;
/*
* record_obj updates handle's value to free_obj and it will
@@ -1649,7 +1813,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
free_obj |= BIT(HANDLE_PIN_BIT);
record_obj(handle, free_obj);
unpin_tag(handle);
- obj_free(pool, class, used_obj);
+ obj_free(class, used_obj);
}
/* Remember last position in this iteration */
@@ -1659,71 +1823,423 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
return ret;
}
-static struct page *isolate_target_page(struct size_class *class)
+static struct zspage *isolate_zspage(struct size_class *class, bool source)
{
int i;
- struct page *page;
+ struct zspage *zspage;
+ enum fullness_group fg[2] = {ZS_ALMOST_EMPTY, ZS_ALMOST_FULL};
- for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
- page = class->fullness_list[i];
- if (page) {
- remove_zspage(page, class, i);
- break;
+ if (!source) {
+ fg[0] = ZS_ALMOST_FULL;
+ fg[1] = ZS_ALMOST_EMPTY;
+ }
+
+ for (i = 0; i < 2; i++) {
+ zspage = list_first_entry_or_null(&class->fullness_list[fg[i]],
+ struct zspage, list);
+ if (zspage) {
+ VM_BUG_ON(is_zspage_isolated(zspage));
+ remove_zspage(class, zspage, fg[i]);
+ return zspage;
}
}
- return page;
+ return zspage;
}
/*
- * putback_zspage - add @first_page into right class's fullness list
- * @pool: target pool
+ * putback_zspage - add @zspage into right class's fullness list
* @class: destination class
- * @first_page: target page
+ * @zspage: target page
*
- * Return @fist_page's fullness_group
+ * Return @zspage's fullness_group
*/
-static enum fullness_group putback_zspage(struct zs_pool *pool,
- struct size_class *class,
- struct page *first_page)
+static enum fullness_group putback_zspage(struct size_class *class,
+ struct zspage *zspage)
{
enum fullness_group fullness;
- BUG_ON(!is_first_page(first_page));
+ VM_BUG_ON(is_zspage_isolated(zspage));
- fullness = get_fullness_group(first_page);
- insert_zspage(first_page, class, fullness);
- set_zspage_mapping(first_page, class->index, fullness);
+ fullness = get_fullness_group(class, zspage);
+ insert_zspage(class, zspage, fullness);
+ set_zspage_mapping(zspage, class->index, fullness);
- if (fullness == ZS_EMPTY) {
- zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
- class->size, class->pages_per_zspage));
- atomic_long_sub(class->pages_per_zspage,
- &pool->pages_allocated);
+ return fullness;
+}
+
+#ifdef CONFIG_COMPACTION
+static struct dentry *zs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ static const struct dentry_operations ops = {
+ .d_dname = simple_dname,
+ };
+
+ return mount_pseudo(fs_type, "zsmalloc:", NULL, &ops, ZSMALLOC_MAGIC);
+}
+
+static struct file_system_type zsmalloc_fs = {
+ .name = "zsmalloc",
+ .mount = zs_mount,
+ .kill_sb = kill_anon_super,
+};
+
+static int zsmalloc_mount(void)
+{
+ int ret = 0;
+
+ zsmalloc_mnt = kern_mount(&zsmalloc_fs);
+ if (IS_ERR(zsmalloc_mnt))
+ ret = PTR_ERR(zsmalloc_mnt);
+
+ return ret;
+}
+
+static void zsmalloc_unmount(void)
+{
+ kern_unmount(zsmalloc_mnt);
+}
+
+static void migrate_lock_init(struct zspage *zspage)
+{
+ rwlock_init(&zspage->lock);
+}
+
+static void migrate_read_lock(struct zspage *zspage)
+{
+ read_lock(&zspage->lock);
+}
- free_zspage(first_page);
+static void migrate_read_unlock(struct zspage *zspage)
+{
+ read_unlock(&zspage->lock);
+}
+
+static void migrate_write_lock(struct zspage *zspage)
+{
+ write_lock(&zspage->lock);
+}
+
+static void migrate_write_unlock(struct zspage *zspage)
+{
+ write_unlock(&zspage->lock);
+}
+
+/* Number of isolated subpage for *page migration* in this zspage */
+static void inc_zspage_isolation(struct zspage *zspage)
+{
+ zspage->isolated++;
+}
+
+static void dec_zspage_isolation(struct zspage *zspage)
+{
+ zspage->isolated--;
+}
+
+static void replace_sub_page(struct size_class *class, struct zspage *zspage,
+ struct page *newpage, struct page *oldpage)
+{
+ struct page *page;
+ struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
+ int idx = 0;
+
+ page = get_first_page(zspage);
+ do {
+ if (page == oldpage)
+ pages[idx] = newpage;
+ else
+ pages[idx] = page;
+ idx++;
+ } while ((page = get_next_page(page)) != NULL);
+
+ create_page_chain(class, zspage, pages);
+ set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
+ if (unlikely(PageHugeObject(oldpage)))
+ newpage->index = oldpage->index;
+ __SetPageMovable(newpage, page_mapping(oldpage));
+}
+
+bool zs_page_isolate(struct page *page, isolate_mode_t mode)
+{
+ struct zs_pool *pool;
+ struct size_class *class;
+ int class_idx;
+ enum fullness_group fullness;
+ struct zspage *zspage;
+ struct address_space *mapping;
+
+ /*
+ * Page is locked so zspage couldn't be destroyed. For detail, look at
+ * lock_zspage in free_zspage.
+ */
+ VM_BUG_ON_PAGE(!PageMovable(page), page);
+ VM_BUG_ON_PAGE(PageIsolated(page), page);
+
+ zspage = get_zspage(page);
+
+ /*
+ * Without class lock, fullness could be stale while class_idx is okay
+ * because class_idx is constant unless page is freed so we should get
+ * fullness again under class lock.
+ */
+ get_zspage_mapping(zspage, &class_idx, &fullness);
+ mapping = page_mapping(page);
+ pool = mapping->private_data;
+ class = pool->size_class[class_idx];
+
+ spin_lock(&class->lock);
+ if (get_zspage_inuse(zspage) == 0) {
+ spin_unlock(&class->lock);
+ return false;
}
- return fullness;
+ /* zspage is isolated for object migration */
+ if (list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
+ spin_unlock(&class->lock);
+ return false;
+ }
+
+ /*
+ * If this is first time isolation for the zspage, isolate zspage from
+ * size_class to prevent further object allocation from the zspage.
+ */
+ if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
+ get_zspage_mapping(zspage, &class_idx, &fullness);
+ remove_zspage(class, zspage, fullness);
+ }
+
+ inc_zspage_isolation(zspage);
+ spin_unlock(&class->lock);
+
+ return true;
}
-static struct page *isolate_source_page(struct size_class *class)
+int zs_page_migrate(struct address_space *mapping, struct page *newpage,
+ struct page *page, enum migrate_mode mode)
+{
+ struct zs_pool *pool;
+ struct size_class *class;
+ int class_idx;
+ enum fullness_group fullness;
+ struct zspage *zspage;
+ struct page *dummy;
+ void *s_addr, *d_addr, *addr;
+ int offset, pos;
+ unsigned long handle, head;
+ unsigned long old_obj, new_obj;
+ unsigned int obj_idx;
+ int ret = -EAGAIN;
+
+ VM_BUG_ON_PAGE(!PageMovable(page), page);
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
+
+ zspage = get_zspage(page);
+
+ /* Concurrent compactor cannot migrate any subpage in zspage */
+ migrate_write_lock(zspage);
+ get_zspage_mapping(zspage, &class_idx, &fullness);
+ pool = mapping->private_data;
+ class = pool->size_class[class_idx];
+ offset = get_first_obj_offset(page);
+
+ spin_lock(&class->lock);
+ if (!get_zspage_inuse(zspage)) {
+ ret = -EBUSY;
+ goto unlock_class;
+ }
+
+ pos = offset;
+ s_addr = kmap_atomic(page);
+ while (pos < PAGE_SIZE) {
+ head = obj_to_head(page, s_addr + pos);
+ if (head & OBJ_ALLOCATED_TAG) {
+ handle = head & ~OBJ_ALLOCATED_TAG;
+ if (!trypin_tag(handle))
+ goto unpin_objects;
+ }
+ pos += class->size;
+ }
+
+ /*
+ * Here, any user cannot access all objects in the zspage so let's move.
+ */
+ d_addr = kmap_atomic(newpage);
+ memcpy(d_addr, s_addr, PAGE_SIZE);
+ kunmap_atomic(d_addr);
+
+ for (addr = s_addr + offset; addr < s_addr + pos;
+ addr += class->size) {
+ head = obj_to_head(page, addr);
+ if (head & OBJ_ALLOCATED_TAG) {
+ handle = head & ~OBJ_ALLOCATED_TAG;
+ if (!testpin_tag(handle))
+ BUG();
+
+ old_obj = handle_to_obj(handle);
+ obj_to_location(old_obj, &dummy, &obj_idx);
+ new_obj = (unsigned long)location_to_obj(newpage,
+ obj_idx);
+ new_obj |= BIT(HANDLE_PIN_BIT);
+ record_obj(handle, new_obj);
+ }
+ }
+
+ replace_sub_page(class, zspage, newpage, page);
+ get_page(newpage);
+
+ dec_zspage_isolation(zspage);
+
+ /*
+ * Page migration is done so let's putback isolated zspage to
+ * the list if @page is final isolated subpage in the zspage.
+ */
+ if (!is_zspage_isolated(zspage))
+ putback_zspage(class, zspage);
+
+ reset_page(page);
+ put_page(page);
+ page = newpage;
+
+ ret = MIGRATEPAGE_SUCCESS;
+unpin_objects:
+ for (addr = s_addr + offset; addr < s_addr + pos;
+ addr += class->size) {
+ head = obj_to_head(page, addr);
+ if (head & OBJ_ALLOCATED_TAG) {
+ handle = head & ~OBJ_ALLOCATED_TAG;
+ if (!testpin_tag(handle))
+ BUG();
+ unpin_tag(handle);
+ }
+ }
+ kunmap_atomic(s_addr);
+unlock_class:
+ spin_unlock(&class->lock);
+ migrate_write_unlock(zspage);
+
+ return ret;
+}
+
+void zs_page_putback(struct page *page)
+{
+ struct zs_pool *pool;
+ struct size_class *class;
+ int class_idx;
+ enum fullness_group fg;
+ struct address_space *mapping;
+ struct zspage *zspage;
+
+ VM_BUG_ON_PAGE(!PageMovable(page), page);
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
+
+ zspage = get_zspage(page);
+ get_zspage_mapping(zspage, &class_idx, &fg);
+ mapping = page_mapping(page);
+ pool = mapping->private_data;
+ class = pool->size_class[class_idx];
+
+ spin_lock(&class->lock);
+ dec_zspage_isolation(zspage);
+ if (!is_zspage_isolated(zspage)) {
+ fg = putback_zspage(class, zspage);
+ /*
+ * Due to page_lock, we cannot free zspage immediately
+ * so let's defer.
+ */
+ if (fg == ZS_EMPTY)
+ schedule_work(&pool->free_work);
+ }
+ spin_unlock(&class->lock);
+}
+
+const struct address_space_operations zsmalloc_aops = {
+ .isolate_page = zs_page_isolate,
+ .migratepage = zs_page_migrate,
+ .putback_page = zs_page_putback,
+};
+
+static int zs_register_migration(struct zs_pool *pool)
+{
+ pool->inode = alloc_anon_inode(zsmalloc_mnt->mnt_sb);
+ if (IS_ERR(pool->inode)) {
+ pool->inode = NULL;
+ return 1;
+ }
+
+ pool->inode->i_mapping->private_data = pool;
+ pool->inode->i_mapping->a_ops = &zsmalloc_aops;
+ return 0;
+}
+
+static void zs_unregister_migration(struct zs_pool *pool)
+{
+ flush_work(&pool->free_work);
+ if (pool->inode)
+ iput(pool->inode);
+}
+
+/*
+ * Caller should hold page_lock of all pages in the zspage
+ * In here, we cannot use zspage meta data.
+ */
+static void async_free_zspage(struct work_struct *work)
{
int i;
- struct page *page = NULL;
+ struct size_class *class;
+ unsigned int class_idx;
+ enum fullness_group fullness;
+ struct zspage *zspage, *tmp;
+ LIST_HEAD(free_pages);
+ struct zs_pool *pool = container_of(work, struct zs_pool,
+ free_work);
- for (i = ZS_ALMOST_EMPTY; i >= ZS_ALMOST_FULL; i--) {
- page = class->fullness_list[i];
- if (!page)
+ for (i = 0; i < zs_size_classes; i++) {
+ class = pool->size_class[i];
+ if (class->index != i)
continue;
- remove_zspage(page, class, i);
- break;
+ spin_lock(&class->lock);
+ list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages);
+ spin_unlock(&class->lock);
}
- return page;
+
+ list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
+ list_del(&zspage->list);
+ lock_zspage(zspage);
+
+ get_zspage_mapping(zspage, &class_idx, &fullness);
+ VM_BUG_ON(fullness != ZS_EMPTY);
+ class = pool->size_class[class_idx];
+ spin_lock(&class->lock);
+ __free_zspage(pool, pool->size_class[class_idx], zspage);
+ spin_unlock(&class->lock);
+ }
+};
+
+static void kick_deferred_free(struct zs_pool *pool)
+{
+ schedule_work(&pool->free_work);
+}
+
+static void init_deferred_free(struct zs_pool *pool)
+{
+ INIT_WORK(&pool->free_work, async_free_zspage);
}
+static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
+{
+ struct page *page = get_first_page(zspage);
+
+ do {
+ WARN_ON(!trylock_page(page));
+ __SetPageMovable(page, pool->inode->i_mapping);
+ unlock_page(page);
+ } while ((page = get_next_page(page)) != NULL);
+}
+#endif
+
/*
*
* Based on the number of unused allocated objects calculate
@@ -1748,22 +2264,20 @@ static unsigned long zs_can_compact(struct size_class *class)
static void __zs_compact(struct zs_pool *pool, struct size_class *class)
{
struct zs_compact_control cc;
- struct page *src_page;
- struct page *dst_page = NULL;
+ struct zspage *src_zspage;
+ struct zspage *dst_zspage = NULL;
spin_lock(&class->lock);
- while ((src_page = isolate_source_page(class))) {
-
- BUG_ON(!is_first_page(src_page));
+ while ((src_zspage = isolate_zspage(class, true))) {
if (!zs_can_compact(class))
break;
cc.index = 0;
- cc.s_page = src_page;
+ cc.s_page = get_first_page(src_zspage);
- while ((dst_page = isolate_target_page(class))) {
- cc.d_page = dst_page;
+ while ((dst_zspage = isolate_zspage(class, false))) {
+ cc.d_page = get_first_page(dst_zspage);
/*
* If there is no more space in dst_page, resched
* and see if anyone had allocated another zspage.
@@ -1771,23 +2285,25 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
if (!migrate_zspage(pool, class, &cc))
break;
- putback_zspage(pool, class, dst_page);
+ putback_zspage(class, dst_zspage);
}
/* Stop if we couldn't find slot */
- if (dst_page == NULL)
+ if (dst_zspage == NULL)
break;
- putback_zspage(pool, class, dst_page);
- if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
+ putback_zspage(class, dst_zspage);
+ if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
+ free_zspage(pool, class, src_zspage);
pool->stats.pages_compacted += class->pages_per_zspage;
+ }
spin_unlock(&class->lock);
cond_resched();
spin_lock(&class->lock);
}
- if (src_page)
- putback_zspage(pool, class, src_page);
+ if (src_zspage)
+ putback_zspage(class, src_zspage);
spin_unlock(&class->lock);
}
@@ -1884,7 +2400,7 @@ static int zs_register_shrinker(struct zs_pool *pool)
* On success, a pointer to the newly created pool is returned,
* otherwise NULL.
*/
-struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
+struct zs_pool *zs_create_pool(const char *name)
{
int i;
struct zs_pool *pool;
@@ -1894,6 +2410,7 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
if (!pool)
return NULL;
+ init_deferred_free(pool);
pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *),
GFP_KERNEL);
if (!pool->size_class) {
@@ -1905,7 +2422,7 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
if (!pool->name)
goto err;
- if (create_handle_cache(pool))
+ if (create_cache(pool))
goto err;
/*
@@ -1916,6 +2433,7 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
int size;
int pages_per_zspage;
struct size_class *class;
+ int fullness = 0;
size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
if (size > ZS_MAX_ALLOC_SIZE)
@@ -1945,18 +2463,21 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
class->size = size;
class->index = i;
class->pages_per_zspage = pages_per_zspage;
- if (pages_per_zspage == 1 &&
- get_maxobj_per_zspage(size, pages_per_zspage) == 1)
- class->huge = true;
+ class->objs_per_zspage = class->pages_per_zspage *
+ PAGE_SIZE / class->size;
spin_lock_init(&class->lock);
pool->size_class[i] = class;
+ for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
+ fullness++)
+ INIT_LIST_HEAD(&class->fullness_list[fullness]);
prev_class = class;
}
- pool->flags = flags;
+ if (zs_pool_stat_create(pool, name))
+ goto err;
- if (zs_pool_stat_create(name, pool))
+ if (zs_register_migration(pool))
goto err;
/*
@@ -1978,6 +2499,7 @@ void zs_destroy_pool(struct zs_pool *pool)
int i;
zs_unregister_shrinker(pool);
+ zs_unregister_migration(pool);
zs_pool_stat_destroy(pool);
for (i = 0; i < zs_size_classes; i++) {
@@ -1990,8 +2512,8 @@ void zs_destroy_pool(struct zs_pool *pool)
if (class->index != i)
continue;
- for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
- if (class->fullness_list[fg]) {
+ for (fg = ZS_EMPTY; fg < NR_ZS_FULLNESS; fg++) {
+ if (!list_empty(&class->fullness_list[fg])) {
pr_info("Freeing non-empty class with size %db, fullness group %d\n",
class->size, fg);
}
@@ -1999,7 +2521,7 @@ void zs_destroy_pool(struct zs_pool *pool)
kfree(class);
}
- destroy_handle_cache(pool);
+ destroy_cache(pool);
kfree(pool->size_class);
kfree(pool->name);
kfree(pool);
@@ -2008,7 +2530,13 @@ EXPORT_SYMBOL_GPL(zs_destroy_pool);
static int __init zs_init(void)
{
- int ret = zs_register_cpu_notifier();
+ int ret;
+
+ ret = zsmalloc_mount();
+ if (ret)
+ goto out;
+
+ ret = zs_register_cpu_notifier();
if (ret)
goto notifier_fail;
@@ -2032,7 +2560,8 @@ stat_fail:
#endif
notifier_fail:
zs_unregister_cpu_notifier();
-
+ zsmalloc_unmount();
+out:
return ret;
}
@@ -2041,6 +2570,7 @@ static void __exit zs_exit(void)
#ifdef CONFIG_ZPOOL
zpool_unregister_driver(&zs_zpool_driver);
#endif
+ zsmalloc_unmount();
zs_unregister_cpu_notifier();
zs_stat_exit();
diff --git a/net/socket.c b/net/socket.c
index 5211c40daecc..1cdfe02104a6 100644
--- a/net/socket.c
+++ b/net/socket.c
@@ -1664,6 +1664,8 @@ SYSCALL_DEFINE6(sendto, int, fd, void __user *, buff, size_t, len,
if (len > INT_MAX)
len = INT_MAX;
+ if (unlikely(!access_ok(VERIFY_READ, buff, len)))
+ return -EFAULT;
err = import_single_range(WRITE, buff, len, &iov, &msg.msg_iter);
if (unlikely(err))
@@ -1723,6 +1725,8 @@ SYSCALL_DEFINE6(recvfrom, int, fd, void __user *, ubuf, size_t, size,
if (size > INT_MAX)
size = INT_MAX;
+ if (unlikely(!access_ok(VERIFY_WRITE, ubuf, size)))
+ return -EFAULT;
err = import_single_range(READ, ubuf, size, &iov, &msg.msg_iter);
if (unlikely(err))
return err;
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 17a3a6d1785c..1dcd4ed37103 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -60,6 +60,8 @@ my $spelling_file = "$D/spelling.txt";
my $codespell = 0;
my $codespellfile = "/usr/share/codespell/dictionary.txt";
my $color = 1;
+my $qca_sign_off = 0;
+my $codeaurora_sign_off = 0;
sub help {
my ($exitcode) = @_;
@@ -2429,10 +2431,16 @@ sub process {
"email address '$email' might be better as '$suggested_email$comment'\n" . $herecurr);
}
}
- if ($chk_author && $line =~ /^\s*signed-off-by:.*(quicinc|qualcomm)\.com/i) {
- WARN("BAD_SIGN_OFF",
- "invalid Signed-off-by identity\n" . $line );
- }
+ if ($chk_author) {
+ if ($line =~ /^\s*signed-off-by:.*qca\.qualcomm\.com/i) {
+ $qca_sign_off = 1;
+ } elsif ($line =~ /^\s*signed-off-by:.*codeaurora\.org/i) {
+ $codeaurora_sign_off = 1;
+ } elsif ($line =~ /^\s*signed-off-by:.*(quicinc|qualcomm)\.com/i) {
+ WARN("BAD_SIGN_OFF",
+ "invalid Signed-off-by identity\n" . $line );
+ }
+ }
# Check for duplicate signatures
my $sig_nospace = $line;
@@ -2558,7 +2566,8 @@ sub process {
}
#check the patch for invalid author credentials
- if ($chk_author && $line =~ /^From:.*(quicinc|qualcomm)\.com/) {
+ if ($chk_author && !($line =~ /^From:.*qca\.qualcomm\.com/) &&
+ $line =~ /^From:.*(quicinc|qualcomm)\.com/) {
WARN("BAD_AUTHOR", "invalid author identity\n" . $line );
}
@@ -6042,6 +6051,11 @@ sub process {
}
}
+ if ($chk_author && $qca_sign_off && !$codeaurora_sign_off) {
+ WARN("BAD_SIGN_OFF",
+ "QCA Signed-off-by requires CODEAURORA Signed-off-by\n" . $line );
+ }
+
# If we have no input at all, then there is nothing to report on
# so just keep quiet.
if ($#rawlines == -1) {
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 8d623d01425b..566bcb04ea51 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -989,7 +989,7 @@ config SND_SOC_MSM_STUB
config SND_SOC_MSM_HDMI_CODEC_RX
bool "HDMI Audio Playback"
- depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998)
+ depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998 || SND_SOC_SDM660_COMMON)
help
HDMI audio drivers should be built only if the platform
supports hdmi panel.
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
index 850238764d87..6b267122dc08 100644
--- a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
@@ -48,10 +48,14 @@
#define MSM_SDW_VERSION_1_0 0x0001
#define MSM_SDW_VERSION_ENTRY_SIZE 32
+/*
+ * 200 Milliseconds sufficient for DSP bring up in the modem
+ * after Sub System Restart
+ */
+#define ADSP_STATE_READY_TIMEOUT_MS 200
+
static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
static struct snd_soc_dai_driver msm_sdw_dai[];
-static bool initial_boot = true;
-static bool is_ssr_en;
static bool skip_irq = true;
static int msm_sdw_config_ear_spkr_gain(struct snd_soc_codec *codec,
@@ -467,10 +471,9 @@ static int msm_sdw_codec_enable_vi_feedback(struct snd_soc_dapm_widget *w,
MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x20,
0x20);
snd_soc_update_bits(codec,
- MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x0F, 0x00);
+ MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x0F, 0x04);
snd_soc_update_bits(codec,
- MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x0F,
- 0x00);
+ MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x0F, 0x04);
snd_soc_update_bits(codec,
MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x10, 0x10);
snd_soc_update_bits(codec,
@@ -493,10 +496,10 @@ static int msm_sdw_codec_enable_vi_feedback(struct snd_soc_dapm_widget *w,
0x20);
snd_soc_update_bits(codec,
MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x0F,
- 0x00);
+ 0x04);
snd_soc_update_bits(codec,
MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x0F,
- 0x00);
+ 0x04);
snd_soc_update_bits(codec,
MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x10,
0x10);
@@ -1036,6 +1039,13 @@ static int msm_sdw_swrm_read(void *handle, int reg)
__func__, reg);
sdw_rd_addr_base = MSM_SDW_AHB_BRIDGE_RD_ADDR_0;
sdw_rd_data_base = MSM_SDW_AHB_BRIDGE_RD_DATA_0;
+
+ /*
+ * Add sleep as SWR slave access read takes time.
+ * Allow for RD_DONE to complete for previous register if any.
+ */
+ usleep_range(50, 55);
+
/* read_lock */
mutex_lock(&msm_sdw->sdw_read_lock);
ret = regmap_bulk_write(msm_sdw->regmap, sdw_rd_addr_base,
@@ -1225,7 +1235,7 @@ static int msm_sdw_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- u8 rx_clk_fs_rate, rx_fs_rate;
+ u8 clk_fs_rate, fs_rate;
dev_dbg(dai->codec->dev,
"%s: dai_name = %s DAI-ID %x rate %d num_ch %d format %d\n",
@@ -1234,28 +1244,28 @@ static int msm_sdw_hw_params(struct snd_pcm_substream *substream,
switch (params_rate(params)) {
case 8000:
- rx_clk_fs_rate = 0x00;
- rx_fs_rate = 0x00;
+ clk_fs_rate = 0x00;
+ fs_rate = 0x00;
break;
case 16000:
- rx_clk_fs_rate = 0x01;
- rx_fs_rate = 0x01;
+ clk_fs_rate = 0x01;
+ fs_rate = 0x01;
break;
case 32000:
- rx_clk_fs_rate = 0x02;
- rx_fs_rate = 0x03;
+ clk_fs_rate = 0x02;
+ fs_rate = 0x03;
break;
case 48000:
- rx_clk_fs_rate = 0x03;
- rx_fs_rate = 0x04;
+ clk_fs_rate = 0x03;
+ fs_rate = 0x04;
break;
case 96000:
- rx_clk_fs_rate = 0x04;
- rx_fs_rate = 0x05;
+ clk_fs_rate = 0x04;
+ fs_rate = 0x05;
break;
case 192000:
- rx_clk_fs_rate = 0x05;
- rx_fs_rate = 0x06;
+ clk_fs_rate = 0x05;
+ fs_rate = 0x06;
break;
default:
dev_err(dai->codec->dev,
@@ -1264,30 +1274,45 @@ static int msm_sdw_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- snd_soc_update_bits(dai->codec,
- MSM_SDW_TOP_RX_I2S_CTL, 0x1C, (rx_clk_fs_rate << 2));
- snd_soc_update_bits(dai->codec,
- MSM_SDW_RX7_RX_PATH_CTL, 0x0F, rx_fs_rate);
- snd_soc_update_bits(dai->codec,
- MSM_SDW_RX8_RX_PATH_CTL, 0x0F, rx_fs_rate);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_TX_I2S_CTL, 0x1C,
+ (clk_fs_rate << 2));
+ } else {
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_RX_I2S_CTL, 0x1C,
+ (clk_fs_rate << 2));
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_RX7_RX_PATH_CTL, 0x0F,
+ fs_rate);
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_RX8_RX_PATH_CTL, 0x0F,
+ fs_rate);
+ }
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
- snd_soc_update_bits(dai->codec,
- MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x20);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_TX_I2S_CTL, 0x20, 0x20);
+ else
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x20);
break;
case SNDRV_PCM_FORMAT_S24_LE:
case SNDRV_PCM_FORMAT_S24_3LE:
- snd_soc_update_bits(dai->codec,
- MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x00);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_TX_I2S_CTL, 0x20, 0x00);
+ else
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x00);
break;
default:
dev_err(dai->codec->dev, "%s: wrong format selected\n",
__func__);
return -EINVAL;
}
- snd_soc_update_bits(dai->codec,
- MSM_SDW_TOP_TX_I2S_CTL, 0x20, 0x20);
return 0;
}
@@ -1403,7 +1428,7 @@ static struct snd_soc_dai_driver msm_sdw_dai[] = {
.rate_max = 192000,
.rate_min = 8000,
.channels_min = 1,
- .channels_max = 2,
+ .channels_max = 4,
},
.ops = &msm_sdw_dai_ops,
},
@@ -1412,9 +1437,9 @@ static struct snd_soc_dai_driver msm_sdw_dai[] = {
.id = AIF1_SDW_VIFEED,
.capture = {
.stream_name = "VIfeed_SDW",
- .rates = SNDRV_PCM_RATE_8000,
+ .rates = MSM_SDW_RATES,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
- .rate_max = 8000,
+ .rate_max = 48000,
.rate_min = 8000,
.channels_min = 2,
.channels_max = 4,
@@ -1629,6 +1654,8 @@ static int msm_sdw_notifier_service_cb(struct notifier_block *nb,
struct msm_sdw_priv *msm_sdw = container_of(nb,
struct msm_sdw_priv,
service_nb);
+ bool adsp_ready = false;
+ unsigned long timeout;
pr_debug("%s: Service opcode 0x%lx\n", __func__, opcode);
@@ -1641,15 +1668,34 @@ static int msm_sdw_notifier_service_cb(struct notifier_block *nb,
SWR_DEVICE_DOWN, NULL);
break;
case AUDIO_NOTIFIER_SERVICE_UP:
- if (initial_boot) {
- initial_boot = false;
- break;
+ if (!q6core_is_adsp_ready()) {
+ dev_dbg(msm_sdw->dev, "ADSP isn't ready\n");
+ timeout = jiffies +
+ msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
+ while (!time_after(jiffies, timeout)) {
+ if (!q6core_is_adsp_ready()) {
+ dev_dbg(msm_sdw->dev,
+ "ADSP isn't ready\n");
+ } else {
+ dev_dbg(msm_sdw->dev,
+ "ADSP is ready\n");
+ adsp_ready = true;
+ goto powerup;
+ }
+ }
+ } else {
+ adsp_ready = true;
+ dev_dbg(msm_sdw->dev, "%s: DSP is ready\n", __func__);
+ }
+powerup:
+ if (adsp_ready) {
+ msm_sdw->dev_up = true;
+ msm_sdw_init_reg(msm_sdw->codec);
+ regcache_mark_dirty(msm_sdw->regmap);
+ regcache_sync(msm_sdw->regmap);
+ msm_sdw_set_spkr_mode(msm_sdw->codec,
+ msm_sdw->spkr_mode);
}
- msm_sdw->dev_up = true;
- msm_sdw_init_reg(msm_sdw->codec);
- regcache_mark_dirty(msm_sdw->regmap);
- regcache_sync(msm_sdw->regmap);
- msm_sdw_set_spkr_mode(msm_sdw->codec, msm_sdw->spkr_mode);
break;
default:
break;
@@ -1676,17 +1722,14 @@ static int msm_sdw_codec_probe(struct snd_soc_codec *codec)
msm_sdw_init_reg(codec);
msm_sdw->version = MSM_SDW_VERSION_1_0;
- if (is_ssr_en) {
- msm_sdw->service_nb.notifier_call = msm_sdw_notifier_service_cb;
- ret = audio_notifier_register("msm_sdw",
- AUDIO_NOTIFIER_ADSP_DOMAIN,
- &msm_sdw->service_nb);
- if (ret < 0)
- dev_err(msm_sdw->dev,
- "%s: Audio notifier register failed ret = %d\n",
- __func__, ret);
- }
-
+ msm_sdw->service_nb.notifier_call = msm_sdw_notifier_service_cb;
+ ret = audio_notifier_register("msm_sdw",
+ AUDIO_NOTIFIER_ADSP_DOMAIN,
+ &msm_sdw->service_nb);
+ if (ret < 0)
+ dev_err(msm_sdw->dev,
+ "%s: Audio notifier register failed ret = %d\n",
+ __func__, ret);
return 0;
}
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
index 8f7db4d13378..5ecd7870bb3b 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -93,8 +93,6 @@ static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
static struct snd_soc_dai_driver msm_anlg_cdc_i2s_dai[];
/* By default enable the internal speaker boost */
static bool spkr_boost_en = true;
-static bool initial_boot = true;
-static bool is_ssr_en;
static char on_demand_supply_name[][MAX_ON_DEMAND_SUPPLY_NAME_LENGTH] = {
"cdc-vdd-mic-bias",
@@ -1453,7 +1451,6 @@ static int msm_anlg_cdc_codec_enable_clock_block(struct snd_soc_codec *codec,
} else {
snd_soc_update_bits(codec,
MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x00);
- msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_CLK_OFF);
}
return 0;
}
@@ -3500,18 +3497,24 @@ static const struct snd_soc_dapm_widget msm_anlg_cdc_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("AMIC1"),
SND_SOC_DAPM_INPUT("AMIC2"),
SND_SOC_DAPM_INPUT("AMIC3"),
- SND_SOC_DAPM_INPUT("PDM_IN_RX1"),
- SND_SOC_DAPM_INPUT("PDM_IN_RX2"),
- SND_SOC_DAPM_INPUT("PDM_IN_RX3"),
+ SND_SOC_DAPM_AIF_IN("PDM_IN_RX1", "PDM Playback",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PDM_IN_RX2", "PDM Playback",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PDM_IN_RX3", "PDM Playback",
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_OUTPUT("EAR"),
SND_SOC_DAPM_OUTPUT("WSA_SPK OUT"),
SND_SOC_DAPM_OUTPUT("HEADPHONE"),
SND_SOC_DAPM_OUTPUT("SPK_OUT"),
SND_SOC_DAPM_OUTPUT("LINEOUT"),
- SND_SOC_DAPM_OUTPUT("ADC1_OUT"),
- SND_SOC_DAPM_OUTPUT("ADC2_OUT"),
- SND_SOC_DAPM_OUTPUT("ADC3_OUT"),
+ SND_SOC_DAPM_AIF_OUT("ADC1_OUT", "PDM Capture",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("ADC2_OUT", "PDM Capture",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("ADC3_OUT", "PDM Capture",
+ 0, SND_SOC_NOPM, 0, 0),
};
static const struct sdm660_cdc_reg_mask_val msm_anlg_cdc_reg_defaults[] = {
@@ -3772,7 +3775,6 @@ static int msm_anlg_cdc_device_down(struct snd_soc_codec *codec)
snd_soc_write(codec,
MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x93);
- msm_anlg_cdc_bringup(codec);
atomic_set(&pdata->int_mclk0_enabled, false);
msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_SSR_DOWN);
set_bit(BUS_DOWN, &sdm660_cdc_priv->status_mask);
@@ -3794,14 +3796,6 @@ static int msm_anlg_cdc_device_up(struct snd_soc_codec *codec)
/* delay is required to make sure sound card state updated */
usleep_range(5000, 5100);
- msm_anlg_cdc_codec_init_reg(codec);
- msm_anlg_cdc_update_reg_defaults(codec);
-
- regcache_mark_dirty(codec->component.regmap);
- regcache_sync_region(codec->component.regmap,
- MSM89XX_PMIC_DIGITAL_REVISION1,
- MSM89XX_PMIC_CDC_MAX_REGISTER);
-
snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_INT_EN_SET,
MSM89XX_PMIC_DIGITAL_INT_EN_SET__POR);
snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_INT_EN_CLR,
@@ -3850,10 +3844,6 @@ static int sdm660_cdc_notifier_service_cb(struct notifier_block *nb,
msm_anlg_cdc_device_down(codec);
break;
case AUDIO_NOTIFIER_SERVICE_UP:
- if (initial_boot) {
- initial_boot = false;
- break;
- }
dev_dbg(codec->dev,
"ADSP is about to power up. bring up codec\n");
@@ -4053,17 +4043,16 @@ int msm_anlg_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
return -ENOMEM;
}
sdm660_cdc_priv->version_entry = version_entry;
- if (is_ssr_en) {
- sdm660_cdc_priv->audio_ssr_nb.notifier_call =
- sdm660_cdc_notifier_service_cb;
- ret = audio_notifier_register("pmic_analog_cdc",
- AUDIO_NOTIFIER_ADSP_DOMAIN,
- &sdm660_cdc_priv->audio_ssr_nb);
- if (ret < 0) {
- pr_err("%s: Audio notifier register failed ret = %d\n",
- __func__, ret);
- return ret;
- }
+
+ sdm660_cdc_priv->audio_ssr_nb.notifier_call =
+ sdm660_cdc_notifier_service_cb;
+ ret = audio_notifier_register("pmic_analog_cdc",
+ AUDIO_NOTIFIER_ADSP_DOMAIN,
+ &sdm660_cdc_priv->audio_ssr_nb);
+ if (ret < 0) {
+ pr_err("%s: Audio notifier register failed ret = %d\n",
+ __func__, ret);
+ return ret;
}
return 0;
}
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
index 5e078dba0448..f1c3b4050323 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
@@ -74,6 +74,7 @@ static int msm_digcdc_clock_control(bool flag)
pdata = snd_soc_card_get_drvdata(registered_digcodec->component.card);
+ mutex_lock(&pdata->cdc_int_mclk0_mutex);
if (flag) {
if (atomic_read(&pdata->int_mclk0_enabled) == false) {
pdata->digital_cdc_core_clk.enable = 1;
@@ -83,6 +84,7 @@ static int msm_digcdc_clock_control(bool flag)
if (ret < 0) {
pr_err("%s:failed to enable the MCLK\n",
__func__);
+ mutex_unlock(&pdata->cdc_int_mclk0_mutex);
return ret;
}
pr_debug("enabled digital codec core clk\n");
@@ -94,6 +96,7 @@ static int msm_digcdc_clock_control(bool flag)
dev_dbg(registered_digcodec->dev,
"disable MCLK, workq to disable set already\n");
}
+ mutex_unlock(&pdata->cdc_int_mclk0_mutex);
return 0;
}
diff --git a/sound/soc/codecs/wsa881x-regmap.c b/sound/soc/codecs/wsa881x-regmap.c
index faa44301286c..20dc3508a5af 100644
--- a/sound/soc/codecs/wsa881x-regmap.c
+++ b/sound/soc/codecs/wsa881x-regmap.c
@@ -216,9 +216,6 @@ static bool wsa881x_readable_register(struct device *dev, unsigned int reg)
static bool wsa881x_volatile_register(struct device *dev, unsigned int reg)
{
- if (cache_always)
- return false;
-
switch (reg) {
case WSA881X_CHIP_ID0:
case WSA881X_CHIP_ID1:
diff --git a/sound/soc/codecs/wsa881x-temp-sensor.c b/sound/soc/codecs/wsa881x-temp-sensor.c
index 0079d0f6cd52..5ab0ecfdc022 100644
--- a/sound/soc/codecs/wsa881x-temp-sensor.c
+++ b/sound/soc/codecs/wsa881x-temp-sensor.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,7 +23,7 @@
#define LOW_TEMP_THRESHOLD 5
#define HIGH_TEMP_THRESHOLD 45
#define TEMP_INVALID 0xFFFF
-
+#define WSA881X_TEMP_RETRY 3
/*
* wsa881x_get_temp - get wsa temperature
* @thermal: thermal zone device
@@ -44,6 +44,7 @@ int wsa881x_get_temp(struct thermal_zone_device *thermal,
int temp_val;
int t1 = T1_TEMP;
int t2 = T2_TEMP;
+ u8 retry = WSA881X_TEMP_RETRY;
if (!thermal)
return -EINVAL;
@@ -60,6 +61,7 @@ int wsa881x_get_temp(struct thermal_zone_device *thermal,
pr_err("%s: pdata is NULL\n", __func__);
return -EINVAL;
}
+temp_retry:
if (pdata->wsa_temp_reg_read) {
ret = pdata->wsa_temp_reg_read(codec, &reg);
if (ret) {
@@ -101,6 +103,10 @@ int wsa881x_get_temp(struct thermal_zone_device *thermal,
printk_ratelimited("%s: T0: %d is out of range[%d, %d]\n",
__func__, temp_val, LOW_TEMP_THRESHOLD,
HIGH_TEMP_THRESHOLD);
+ if (retry--) {
+ msleep(20);
+ goto temp_retry;
+ }
}
if (temp)
*temp = temp_val;
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index ba74175dbe10..6addbde34545 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -78,7 +78,6 @@ enum {
WSA881X_DEV_UP,
};
-bool cache_always;
/*
* Private data Structure for wsa881x. All parameters related to
* WSA881X codec needs to be defined here.
@@ -987,6 +986,7 @@ static int32_t wsa881x_temp_reg_read(struct snd_soc_codec *codec,
{
struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
struct swr_device *dev;
+ u8 retry = WSA881X_NUM_RETRY;
u8 devnum = 0;
if (!wsa881x) {
@@ -995,7 +995,12 @@ static int32_t wsa881x_temp_reg_read(struct snd_soc_codec *codec,
}
dev = wsa881x->swr_slave;
if (dev && (wsa881x->state == WSA881X_DEV_DOWN)) {
- if (swr_get_logical_dev_num(dev, dev->addr, &devnum)) {
+ while (swr_get_logical_dev_num(dev, dev->addr, &devnum) &&
+ retry--) {
+ /* Retry after 1 msec delay */
+ usleep_range(1000, 1100);
+ }
+ if (retry == 0) {
dev_err(codec->dev,
"%s get devnum %d for dev addr %lx failed\n",
__func__, devnum, dev->addr);
@@ -1089,6 +1094,7 @@ static int wsa881x_swr_startup(struct swr_device *swr_dev)
{
int ret = 0;
u8 devnum = 0;
+ u8 retry = WSA881X_NUM_RETRY;
struct wsa881x_priv *wsa881x;
wsa881x = swr_get_dev_data(swr_dev);
@@ -1103,11 +1109,16 @@ static int wsa881x_swr_startup(struct swr_device *swr_dev)
* as per HW requirement.
*/
usleep_range(5000, 5010);
- ret = swr_get_logical_dev_num(swr_dev, swr_dev->addr, &devnum);
- if (ret) {
- dev_dbg(&swr_dev->dev, "%s failed to get devnum, err:%d\n",
- __func__, ret);
- goto err;
+ while (swr_get_logical_dev_num(swr_dev, swr_dev->addr, &devnum) &&
+ retry--) {
+ /* Retry after 1 msec delay */
+ usleep_range(1000, 1100);
+ }
+ if (retry == 0) {
+ dev_err(&swr_dev->dev,
+ "%s get devnum %d for dev addr %lx failed\n",
+ __func__, devnum, swr_dev->addr);
+ return -EINVAL;
}
swr_dev->dev_num = devnum;
@@ -1226,9 +1237,6 @@ static int wsa881x_swr_probe(struct swr_device *pdev)
if (ret)
goto err;
}
-
- cache_always = of_property_read_bool(pdev->dev.of_node,
- "qcom,cache-always");
wsa881x_gpio_ctrl(wsa881x, true);
wsa881x->state = WSA881X_DEV_UP;
diff --git a/sound/soc/codecs/wsa881x.h b/sound/soc/codecs/wsa881x.h
index 178237555c54..9bd9f9587a2e 100644
--- a/sound/soc/codecs/wsa881x.h
+++ b/sound/soc/codecs/wsa881x.h
@@ -20,7 +20,6 @@
#define WSA881X_MAX_SWR_PORTS 4
-extern bool cache_always;
extern int wsa881x_set_channel_map(struct snd_soc_codec *codec, u8 *port,
u8 num_port, unsigned int *ch_mask,
unsigned int *ch_rate);
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index 6ecd300a18ac..eb650d19a97c 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -30,16 +30,6 @@ config SND_SOC_QDSP_DEBUG
is inducing kernel panic upon encountering critical
errors from DSP audio modules
-config DOLBY_DAP
- bool "Enable Dolby DAP"
- depends on SND_SOC_MSM_QDSP6V2_INTF
- help
- To add support for dolby DAP post processing.
- This support is to configure the post processing parameters
- to DSP. The configuration includes sending the end point
- device, end point dependent post processing parameters and
- the various posrt processing parameters
-
config DOLBY_DS2
bool "Enable Dolby DS2"
depends on SND_SOC_MSM_QDSP6V2_INTF
@@ -122,9 +112,10 @@ config SND_SOC_INT_CODEC
select MSM_CDC_PINCTRL
select SND_SOC_MSM_SDW
select SND_SOC_SDM660_CDC
+ select SND_SOC_MSM_HDMI_CODEC_RX
select QTI_PP
select DTS_SRS_TM
- select DOLBY_DS2
+ select DOLBY_LICENSE
select SND_HWDEP
select MSM_ULTRASOUND
select DTS_EAGLE
@@ -153,10 +144,11 @@ config SND_SOC_EXT_CODEC
select SND_SOC_WCD9335
select SND_SOC_WCD934X
select SND_SOC_WSA881X
+ select SND_SOC_MSM_HDMI_CODEC_RX
select MFD_CORE
select QTI_PP
select DTS_SRS_TM
- select DOLBY_DS2
+ select DOLBY_LICENSE
select SND_SOC_CPE
select SND_SOC_WCD_CPE
select SND_HWDEP
diff --git a/sound/soc/msm/msm-cpe-lsm.c b/sound/soc/msm/msm-cpe-lsm.c
index b2008d6da2a1..48f8e22e7faa 100644
--- a/sound/soc/msm/msm-cpe-lsm.c
+++ b/sound/soc/msm/msm-cpe-lsm.c
@@ -2152,7 +2152,8 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "LSM_REG_SND_MODEL_V2");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&snd_model, (void *)arg,
@@ -2285,7 +2286,8 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "SNDRV_LSM_SET_PARAMS");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&det_params, (void *) arg,
@@ -2312,14 +2314,16 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if not using topology\n",
__func__, "SET_MODULE_PARAMS");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (!arg) {
dev_err(rtd->dev,
"%s: %s: No Param data to set\n",
__func__, "SET_MODULE_PARAMS");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&p_data, arg,
@@ -2327,7 +2331,8 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: copy_from_user failed, size = %zd\n",
__func__, "p_data", sizeof(p_data));
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
if (p_data.num_params > LSM_PARAMS_MAX) {
@@ -2335,7 +2340,8 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: Invalid num_params %d\n",
__func__, "SET_MODULE_PARAMS",
p_data.num_params);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
p_size = p_data.num_params *
@@ -2346,12 +2352,15 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: Invalid size %zd\n",
__func__, "SET_MODULE_PARAMS", p_size);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
params = kzalloc(p_size, GFP_KERNEL);
- if (!params)
- return -ENOMEM;
+ if (!params) {
+ err = -ENOMEM;
+ goto done;
+ }
if (copy_from_user(params, p_data.params,
p_data.data_size)) {
@@ -2359,7 +2368,8 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: copy_from_user failed, size = %d\n",
__func__, "params", p_data.data_size);
kfree(params);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
err = msm_cpe_lsm_process_params(substream, &p_data, params);
@@ -2470,7 +2480,8 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "LSM_REG_SND_MODEL_V2_32");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
dev_dbg(rtd->dev,
@@ -2690,7 +2701,9 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "SNDRV_LSM_SET_PARAMS32");
- return -EINVAL;
+
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&det_params32, arg,
@@ -2734,7 +2747,8 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if not using topology\n",
__func__, "SET_MODULE_PARAMS_32");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&p_data_32, arg,
@@ -2743,7 +2757,8 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: copy_from_user failed, size = %zd\n",
__func__, "SET_MODULE_PARAMS_32",
sizeof(p_data_32));
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
p_data.params = compat_ptr(p_data_32.params);
@@ -2755,7 +2770,8 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: Invalid num_params %d\n",
__func__, "SET_MODULE_PARAMS_32",
p_data.num_params);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (p_data.data_size !=
@@ -2764,21 +2780,25 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: Invalid size %d\n",
__func__, "SET_MODULE_PARAMS_32",
p_data.data_size);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
p_size = sizeof(struct lsm_params_info_32) *
p_data.num_params;
params32 = kzalloc(p_size, GFP_KERNEL);
- if (!params32)
- return -ENOMEM;
+ if (!params32) {
+ err = -ENOMEM;
+ goto done;
+ }
p_size = sizeof(struct lsm_params_info) * p_data.num_params;
params = kzalloc(p_size, GFP_KERNEL);
if (!params) {
kfree(params32);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto done;
}
if (copy_from_user(params32, p_data.params,
@@ -2788,7 +2808,8 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
__func__, "params32", p_data.data_size);
kfree(params32);
kfree(params);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
p_info_32 = (struct lsm_params_info_32 *) params32;
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 44a6a245c7a2..a7a25ea070da 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -964,7 +964,7 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
- .channels_max = 2,
+ .channels_max = 4,
.rate_min = 8000,
.rate_max = 192000,
},
diff --git a/sound/soc/msm/msm8998.c b/sound/soc/msm/msm8998.c
index e6fa1143af02..e7b51c5c2c00 100644
--- a/sound/soc/msm/msm8998.c
+++ b/sound/soc/msm/msm8998.c
@@ -421,7 +421,8 @@ static char const *usb_sample_rate_text[] = {"KHZ_8", "KHZ_11P025",
"KHZ_88P2", "KHZ_96", "KHZ_176P4",
"KHZ_192", "KHZ_352P8", "KHZ_384"};
static char const *ext_disp_sample_rate_text[] = {"KHZ_48", "KHZ_96",
- "KHZ_192"};
+ "KHZ_192", "KHZ_32", "KHZ_44P1",
+ "KHZ_88P2", "KHZ_176P4"};
static char const *tdm_ch_text[] = {"One", "Two", "Three", "Four",
"Five", "Six", "Seven", "Eight"};
static char const *tdm_bit_format_text[] = {"S16_LE", "S24_LE", "S32_LE"};
@@ -1500,6 +1501,22 @@ static int ext_disp_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
return idx;
switch (ext_disp_rx_cfg[idx].sample_rate) {
+ case SAMPLING_RATE_176P4KHZ:
+ sample_rate_val = 6;
+ break;
+
+ case SAMPLING_RATE_88P2KHZ:
+ sample_rate_val = 5;
+ break;
+
+ case SAMPLING_RATE_44P1KHZ:
+ sample_rate_val = 4;
+ break;
+
+ case SAMPLING_RATE_32KHZ:
+ sample_rate_val = 3;
+ break;
+
case SAMPLING_RATE_192KHZ:
sample_rate_val = 2;
break;
@@ -1530,6 +1547,18 @@ static int ext_disp_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
return idx;
switch (ucontrol->value.integer.value[0]) {
+ case 6:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_176P4KHZ;
+ break;
+ case 5:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_88P2KHZ;
+ break;
+ case 4:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_44P1KHZ;
+ break;
+ case 3:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_32KHZ;
+ break;
case 2:
ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_192KHZ;
break;
diff --git a/sound/soc/msm/qdsp6v2/Makefile b/sound/soc/msm/qdsp6v2/Makefile
index 469ab1a19c5b..d4db55fe06a2 100644
--- a/sound/soc/msm/qdsp6v2/Makefile
+++ b/sound/soc/msm/qdsp6v2/Makefile
@@ -10,7 +10,6 @@ obj-$(CONFIG_SND_SOC_QDSP6V2) += snd-soc-qdsp6v2.o msm-pcm-dtmf-v2.o \
msm-dai-stub-v2.o
obj-$(CONFIG_SND_HWDEP) += msm-pcm-routing-devdep.o
obj-$(CONFIG_DTS_EAGLE) += msm-dts-eagle.o
-obj-$(CONFIG_DOLBY_DAP) += msm-dolby-dap-config.o
obj-$(CONFIG_DOLBY_DS2) += msm-ds2-dap-config.o
obj-$(CONFIG_DOLBY_LICENSE) += msm-ds2-dap-config.o
obj-$(CONFIG_DTS_SRS_TM) += msm-dts-srs-tm-config.o
diff --git a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c b/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
deleted file mode 100644
index 4664d39e87e0..000000000000
--- a/sound/soc/msm/qdsp6v2/msm-dolby-dap-config.c
+++ /dev/null
@@ -1,1092 +0,0 @@
-/* Copyright (c) 2013-2014,2016-2017, The Linux Foundation. All rights reserved.
-* This program is free software; you can redistribute it and/or modify
-* it under the terms of the GNU General Public License version 2 and
-* only version 2 as published by the Free Software Foundation.
-*
-* This program is distributed in the hope that it will be useful,
-* but WITHOUT ANY WARRANTY; without even the implied warranty of
-* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
-* GNU General Public License for more details.
-*/
-
-#include <linux/err.h>
-#include <linux/module.h>
-#include <linux/bitops.h>
-#include <sound/control.h>
-#include <sound/q6adm-v2.h>
-#include <sound/q6core.h>
-
-#include "msm-dolby-dap-config.h"
-
-#ifndef DOLBY_PARAM_VCNB_MAX_LENGTH
-#define DOLBY_PARAM_VCNB_MAX_LENGTH 40
-#endif
-
-/* dolby endp based parameters */
-struct dolby_dap_endp_params_s {
- int device;
- int device_ch_caps;
- int dap_device;
- int params_id[DOLBY_NUM_ENDP_DEPENDENT_PARAMS];
- int params_len[DOLBY_NUM_ENDP_DEPENDENT_PARAMS];
- int params_offset[DOLBY_NUM_ENDP_DEPENDENT_PARAMS];
- int params_val[DOLBY_ENDDEP_PARAM_LENGTH];
-};
-
-const struct dolby_dap_endp_params_s
- dolby_dap_endp_params[NUM_DOLBY_ENDP_DEVICE] = {
- {EARPIECE, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {SPEAKER, 2, DOLBY_ENDP_INT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {WIRED_HEADSET, 2, DOLBY_ENDP_HEADPHONES,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {WIRED_HEADPHONE, 2, DOLBY_ENDP_HEADPHONES,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {BLUETOOTH_SCO, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {BLUETOOTH_SCO_HEADSET, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {BLUETOOTH_SCO_CARKIT, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {BLUETOOTH_A2DP, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {BLUETOOTH_A2DP_HEADPHONES, 2, DOLBY_ENDP_HEADPHONES,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {BLUETOOTH_A2DP_SPEAKER, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {AUX_DIGITAL, 2, DOLBY_ENDP_HDMI,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-496, -496, 0}
- },
- {AUX_DIGITAL, 6, DOLBY_ENDP_HDMI,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-496, -496, 0}
- },
- {AUX_DIGITAL, 8, DOLBY_ENDP_HDMI,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-496, -496, 0}
- },
- {ANLG_DOCK_HEADSET, 2, DOLBY_ENDP_HEADPHONES,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {DGTL_DOCK_HEADSET, 2, DOLBY_ENDP_HEADPHONES,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {USB_ACCESSORY, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {USB_DEVICE, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {REMOTE_SUBMIX, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {PROXY, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {PROXY, 6, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {FM, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
- {FM_TX, 2, DOLBY_ENDP_EXT_SPEAKERS,
- {DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_VMB},
- {DOLBY_ENDDEP_PARAM_DVLO_LENGTH, DOLBY_ENDDEP_PARAM_DVLI_LENGTH,
- DOLBY_ENDDEP_PARAM_VMB_LENGTH},
- {DOLBY_ENDDEP_PARAM_DVLO_OFFSET, DOLBY_ENDDEP_PARAM_DVLI_OFFSET,
- DOLBY_ENDDEP_PARAM_VMB_OFFSET},
- {-320, -320, 144}
- },
-};
-
-/* dolby param ids to/from dsp */
-static uint32_t dolby_dap_params_id[ALL_DOLBY_PARAMS] = {
- DOLBY_PARAM_ID_VDHE, DOLBY_PARAM_ID_VSPE, DOLBY_PARAM_ID_DSSF,
- DOLBY_PARAM_ID_DVLI, DOLBY_PARAM_ID_DVLO, DOLBY_PARAM_ID_DVLE,
- DOLBY_PARAM_ID_DVMC, DOLBY_PARAM_ID_DVME, DOLBY_PARAM_ID_IENB,
- DOLBY_PARAM_ID_IEBF, DOLBY_PARAM_ID_IEON, DOLBY_PARAM_ID_DEON,
- DOLBY_PARAM_ID_NGON, DOLBY_PARAM_ID_GEON, DOLBY_PARAM_ID_GENB,
- DOLBY_PARAM_ID_GEBF, DOLBY_PARAM_ID_AONB, DOLBY_PARAM_ID_AOBF,
- DOLBY_PARAM_ID_AOBG, DOLBY_PARAM_ID_AOON, DOLBY_PARAM_ID_ARNB,
- DOLBY_PARAM_ID_ARBF, DOLBY_PARAM_ID_PLB, DOLBY_PARAM_ID_PLMD,
- DOLBY_PARAM_ID_DHSB, DOLBY_PARAM_ID_DHRG, DOLBY_PARAM_ID_DSSB,
- DOLBY_PARAM_ID_DSSA, DOLBY_PARAM_ID_DVLA, DOLBY_PARAM_ID_IEBT,
- DOLBY_PARAM_ID_IEA, DOLBY_PARAM_ID_DEA, DOLBY_PARAM_ID_DED,
- DOLBY_PARAM_ID_GEBG, DOLBY_PARAM_ID_AOCC, DOLBY_PARAM_ID_ARBI,
- DOLBY_PARAM_ID_ARBL, DOLBY_PARAM_ID_ARBH, DOLBY_PARAM_ID_AROD,
- DOLBY_PARAM_ID_ARTP, DOLBY_PARAM_ID_VMON, DOLBY_PARAM_ID_VMB,
- DOLBY_PARAM_ID_VCNB, DOLBY_PARAM_ID_VCBF, DOLBY_PARAM_ID_PREG,
- DOLBY_PARAM_ID_VEN, DOLBY_PARAM_ID_PSTG, DOLBY_COMMIT_ALL_TO_DSP,
- DOLBY_COMMIT_TO_DSP, DOLBY_USE_CACHE, DOLBY_AUTO_ENDP,
- DOLBY_AUTO_ENDDEP_PARAMS
-};
-
-/* modifed state: 0x00000000 - Not updated
-* > 0x00000000 && < 0x00010000
-* Updated and not commited to DSP
-* 0x00010001 - Updated and commited to DSP
-* > 0x00010001 - Modified the commited value
-*/
-static int dolby_dap_params_modified[MAX_DOLBY_PARAMS] = { 0 };
-/* param offset */
-static uint32_t dolby_dap_params_offset[MAX_DOLBY_PARAMS] = {
- DOLBY_PARAM_VDHE_OFFSET, DOLBY_PARAM_VSPE_OFFSET,
- DOLBY_PARAM_DSSF_OFFSET, DOLBY_PARAM_DVLI_OFFSET,
- DOLBY_PARAM_DVLO_OFFSET, DOLBY_PARAM_DVLE_OFFSET,
- DOLBY_PARAM_DVMC_OFFSET, DOLBY_PARAM_DVME_OFFSET,
- DOLBY_PARAM_IENB_OFFSET, DOLBY_PARAM_IEBF_OFFSET,
- DOLBY_PARAM_IEON_OFFSET, DOLBY_PARAM_DEON_OFFSET,
- DOLBY_PARAM_NGON_OFFSET, DOLBY_PARAM_GEON_OFFSET,
- DOLBY_PARAM_GENB_OFFSET, DOLBY_PARAM_GEBF_OFFSET,
- DOLBY_PARAM_AONB_OFFSET, DOLBY_PARAM_AOBF_OFFSET,
- DOLBY_PARAM_AOBG_OFFSET, DOLBY_PARAM_AOON_OFFSET,
- DOLBY_PARAM_ARNB_OFFSET, DOLBY_PARAM_ARBF_OFFSET,
- DOLBY_PARAM_PLB_OFFSET, DOLBY_PARAM_PLMD_OFFSET,
- DOLBY_PARAM_DHSB_OFFSET, DOLBY_PARAM_DHRG_OFFSET,
- DOLBY_PARAM_DSSB_OFFSET, DOLBY_PARAM_DSSA_OFFSET,
- DOLBY_PARAM_DVLA_OFFSET, DOLBY_PARAM_IEBT_OFFSET,
- DOLBY_PARAM_IEA_OFFSET, DOLBY_PARAM_DEA_OFFSET,
- DOLBY_PARAM_DED_OFFSET, DOLBY_PARAM_GEBG_OFFSET,
- DOLBY_PARAM_AOCC_OFFSET, DOLBY_PARAM_ARBI_OFFSET,
- DOLBY_PARAM_ARBL_OFFSET, DOLBY_PARAM_ARBH_OFFSET,
- DOLBY_PARAM_AROD_OFFSET, DOLBY_PARAM_ARTP_OFFSET,
- DOLBY_PARAM_VMON_OFFSET, DOLBY_PARAM_VMB_OFFSET,
- DOLBY_PARAM_VCNB_OFFSET, DOLBY_PARAM_VCBF_OFFSET,
- DOLBY_PARAM_PREG_OFFSET, DOLBY_PARAM_VEN_OFFSET,
- DOLBY_PARAM_PSTG_OFFSET
-};
-/* param_length */
-static uint32_t dolby_dap_params_length[MAX_DOLBY_PARAMS] = {
- DOLBY_PARAM_VDHE_LENGTH, DOLBY_PARAM_VSPE_LENGTH,
- DOLBY_PARAM_DSSF_LENGTH, DOLBY_PARAM_DVLI_LENGTH,
- DOLBY_PARAM_DVLO_LENGTH, DOLBY_PARAM_DVLE_LENGTH,
- DOLBY_PARAM_DVMC_LENGTH, DOLBY_PARAM_DVME_LENGTH,
- DOLBY_PARAM_IENB_LENGTH, DOLBY_PARAM_IEBF_LENGTH,
- DOLBY_PARAM_IEON_LENGTH, DOLBY_PARAM_DEON_LENGTH,
- DOLBY_PARAM_NGON_LENGTH, DOLBY_PARAM_GEON_LENGTH,
- DOLBY_PARAM_GENB_LENGTH, DOLBY_PARAM_GEBF_LENGTH,
- DOLBY_PARAM_AONB_LENGTH, DOLBY_PARAM_AOBF_LENGTH,
- DOLBY_PARAM_AOBG_LENGTH, DOLBY_PARAM_AOON_LENGTH,
- DOLBY_PARAM_ARNB_LENGTH, DOLBY_PARAM_ARBF_LENGTH,
- DOLBY_PARAM_PLB_LENGTH, DOLBY_PARAM_PLMD_LENGTH,
- DOLBY_PARAM_DHSB_LENGTH, DOLBY_PARAM_DHRG_LENGTH,
- DOLBY_PARAM_DSSB_LENGTH, DOLBY_PARAM_DSSA_LENGTH,
- DOLBY_PARAM_DVLA_LENGTH, DOLBY_PARAM_IEBT_LENGTH,
- DOLBY_PARAM_IEA_LENGTH, DOLBY_PARAM_DEA_LENGTH,
- DOLBY_PARAM_DED_LENGTH, DOLBY_PARAM_GEBG_LENGTH,
- DOLBY_PARAM_AOCC_LENGTH, DOLBY_PARAM_ARBI_LENGTH,
- DOLBY_PARAM_ARBL_LENGTH, DOLBY_PARAM_ARBH_LENGTH,
- DOLBY_PARAM_AROD_LENGTH, DOLBY_PARAM_ARTP_LENGTH,
- DOLBY_PARAM_VMON_LENGTH, DOLBY_PARAM_VMB_LENGTH,
- DOLBY_PARAM_VCNB_LENGTH, DOLBY_PARAM_VCBF_LENGTH,
- DOLBY_PARAM_PREG_LENGTH, DOLBY_PARAM_VEN_LENGTH,
- DOLBY_PARAM_PSTG_LENGTH
-};
-
-/* param_value */
-static uint32_t dolby_dap_params_value[TOTAL_LENGTH_DOLBY_PARAM] = {0};
-
-struct dolby_dap_params_get_s {
- int32_t port_id;
- uint32_t device_id;
- uint32_t param_id;
- uint32_t offset;
- uint32_t length;
-};
-
-struct dolby_dap_params_states_s {
- bool use_cache;
- bool auto_endp;
- bool enddep_params;
- int port_id[AFE_MAX_PORTS];
- int copp_idx[AFE_MAX_PORTS];
- int port_open_count;
- int port_ids_dolby_can_be_enabled;
- int device;
-};
-
-static struct dolby_dap_params_get_s dolby_dap_params_get = {-1, DEVICE_OUT_ALL,
- 0, 0, 0};
-static struct dolby_dap_params_states_s dolby_dap_params_states = { true, true,
- true, {DOLBY_INVALID_PORT_ID},
- {-1}, 0, DEVICE_OUT_ALL, 0 };
-/*
-port_ids_dolby_can_be_enabled is set to 0x7FFFFFFF.
-this needs to be removed after interface validation
-*/
-
-static int msm_dolby_dap_map_device_to_dolby_endpoint(int device)
-{
- int i, dolby_dap_device = DOLBY_ENDP_EXT_SPEAKERS;
- for (i = 0; i < NUM_DOLBY_ENDP_DEVICE; i++) {
- if (dolby_dap_endp_params[i].device == device) {
- dolby_dap_device = dolby_dap_endp_params[i].dap_device;
- break;
- }
- }
- /* default the endpoint to speaker if corresponding device entry */
- /* not found */
- if (i >= NUM_DOLBY_ENDP_DEVICE)
- dolby_dap_params_states.device = SPEAKER;
- return dolby_dap_device;
-}
-
-static int msm_dolby_dap_send_end_point(int port_id, int copp_idx)
-{
- int rc = 0;
- char *params_value;
- int *update_params_value;
- uint32_t params_length = (DOLBY_PARAM_INT_ENDP_LENGTH +
- DOLBY_PARAM_PAYLOAD_SIZE) * sizeof(uint32_t);
-
- pr_debug("%s\n", __func__);
- params_value = kzalloc(params_length, GFP_KERNEL);
- if (!params_value) {
- pr_err("%s, params memory alloc failed", __func__);
- return -ENOMEM;
- }
- update_params_value = (int *)params_value;
- *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
- *update_params_value++ = DOLBY_PARAM_ID_INIT_ENDP;
- *update_params_value++ = DOLBY_PARAM_INT_ENDP_LENGTH * sizeof(uint32_t);
- *update_params_value++ =
- msm_dolby_dap_map_device_to_dolby_endpoint(
- dolby_dap_params_states.device);
- rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
- params_length);
- if (rc) {
- pr_err("%s: send dolby params failed\n", __func__);
- rc = -EINVAL;
- }
- kfree(params_value);
- return rc;
-}
-
-static int msm_dolby_dap_send_enddep_params(int port_id, int copp_idx,
- int device_channels)
-{
- int i, j, rc = 0, idx, offset;
- char *params_value;
- int *update_params_value;
- uint32_t params_length = (DOLBY_ENDDEP_PARAM_LENGTH +
- DOLBY_NUM_ENDP_DEPENDENT_PARAMS *
- DOLBY_PARAM_PAYLOAD_SIZE) *
- sizeof(uint32_t);
-
- pr_debug("%s\n", __func__);
- params_value = kzalloc(params_length, GFP_KERNEL);
- if (!params_value) {
- pr_err("%s, params memory alloc failed", __func__);
- return -ENOMEM;
- }
- update_params_value = (int *)params_value;
- for (idx = 0; idx < NUM_DOLBY_ENDP_DEVICE; idx++) {
- if (dolby_dap_endp_params[idx].device ==
- dolby_dap_params_states.device) {
- if (dolby_dap_params_states.device == AUX_DIGITAL ||
- dolby_dap_params_states.device == PROXY) {
- if (dolby_dap_endp_params[idx].device_ch_caps ==
- device_channels)
- break;
- } else {
- break;
- }
- }
- }
- if (idx >= NUM_DOLBY_ENDP_DEVICE) {
- pr_err("%s: device is not set accordingly\n", __func__);
- kfree(params_value);
- return -EINVAL;
- }
- for (i = 0; i < DOLBY_ENDDEP_PARAM_LENGTH; i++) {
- *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
- *update_params_value++ =
- dolby_dap_endp_params[idx].params_id[i];
- *update_params_value++ =
- dolby_dap_endp_params[idx].params_len[i] *
- sizeof(uint32_t);
- offset = dolby_dap_endp_params[idx].params_offset[i];
- for (j = 0; j < dolby_dap_endp_params[idx].params_len[i]; j++)
- *update_params_value++ =
- dolby_dap_endp_params[idx].params_val[offset+j];
- }
- rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
- params_length);
- if (rc) {
- pr_err("%s: send dolby params failed\n", __func__);
- rc = -EINVAL;
- }
- kfree(params_value);
- return rc;
-}
-
-static int msm_dolby_dap_send_cached_params(int port_id, int copp_idx,
- int commit)
-{
- char *params_value;
- int *update_params_value, rc = 0;
- uint32_t index_offset, i, j;
- uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
- MAX_DOLBY_PARAMS * DOLBY_PARAM_PAYLOAD_SIZE) *
- sizeof(uint32_t);
-
- params_value = kzalloc(params_length, GFP_KERNEL);
- if (!params_value) {
- pr_err("%s, params memory alloc failed\n", __func__);
- return -ENOMEM;
- }
- update_params_value = (int *)params_value;
- params_length = 0;
- for (i = 0; i < MAX_DOLBY_PARAMS; i++) {
- if ((dolby_dap_params_modified[i] == 0) ||
- ((commit) &&
- ((dolby_dap_params_modified[i] & 0x00010000) &&
- ((dolby_dap_params_modified[i] & 0x0000FFFF) <= 1))))
- continue;
- *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
- *update_params_value++ = dolby_dap_params_id[i];
- *update_params_value++ = dolby_dap_params_length[i] *
- sizeof(uint32_t);
- index_offset = dolby_dap_params_offset[i];
- for (j = 0; j < dolby_dap_params_length[i]; j++) {
- *update_params_value++ =
- dolby_dap_params_value[index_offset+j];
- }
- params_length += (DOLBY_PARAM_PAYLOAD_SIZE +
- dolby_dap_params_length[i]) * sizeof(uint32_t);
- }
- pr_debug("%s, valid param length: %d", __func__, params_length);
- if (params_length) {
- rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
- params_length);
- if (rc) {
- pr_err("%s: send dolby params failed\n", __func__);
- kfree(params_value);
- return -EINVAL;
- }
- for (i = 0; i < MAX_DOLBY_PARAMS; i++) {
- if ((dolby_dap_params_modified[i] == 0) ||
- ((commit) &&
- ((dolby_dap_params_modified[i] & 0x00010000) &&
- ((dolby_dap_params_modified[i] & 0x0000FFFF) <= 1))
- ))
- continue;
- dolby_dap_params_modified[i] = 0x00010001;
- }
- }
- kfree(params_value);
- return 0;
-}
-
-int msm_dolby_dap_init(int port_id, int copp_idx, int channels,
- bool is_custom_stereo_on)
-{
- int ret = 0;
- int index = adm_validate_and_get_port_index(port_id);
- if (index < 0) {
- pr_err("%s: Invalid port idx %d port_id %#x\n", __func__, index,
- port_id);
- return -EINVAL;
- }
- if ((port_id != DOLBY_INVALID_PORT_ID) &&
- (port_id & dolby_dap_params_states.port_ids_dolby_can_be_enabled)) {
- dolby_dap_params_states.port_id[index] = port_id;
- dolby_dap_params_states.copp_idx[index] = copp_idx;
- dolby_dap_params_states.port_open_count++;
- if (dolby_dap_params_states.auto_endp) {
- ret = msm_dolby_dap_send_end_point(port_id, copp_idx);
- if (ret) {
- pr_err("%s: err sending endppoint\n", __func__);
- return ret;
- }
- }
- if (dolby_dap_params_states.use_cache) {
- ret = msm_dolby_dap_send_cached_params(port_id,
- copp_idx, 0);
- if (ret) {
- pr_err("%s: err sending cached params\n",
- __func__);
- return ret;
- }
- }
- if (dolby_dap_params_states.enddep_params) {
- msm_dolby_dap_send_enddep_params(port_id, copp_idx,
- channels);
- if (ret) {
- pr_err("%s: err sending endp dependent params\n",
- __func__);
- return ret;
- }
- }
- if (is_custom_stereo_on)
- dolby_dap_set_custom_stereo_onoff(port_id, copp_idx,
- is_custom_stereo_on);
- }
- return ret;
-}
-
-void msm_dolby_dap_deinit(int port_id)
-{
- int index = adm_validate_and_get_port_index(port_id);
- if (index < 0) {
- pr_err("%s: Invalid port idx %d port_id %#x\n", __func__, index,
- port_id);
- return;
- }
- dolby_dap_params_states.port_open_count--;
- if ((dolby_dap_params_states.port_id[index] == port_id) &&
- (!dolby_dap_params_states.port_open_count)) {
- dolby_dap_params_states.port_id[index] = DOLBY_INVALID_PORT_ID;
- dolby_dap_params_states.copp_idx[index] = -1;
- }
-}
-
-static int msm_dolby_dap_set_vspe_vdhe(int port_id, int copp_idx,
- bool is_custom_stereo_enabled)
-{
- char *params_value;
- int *update_params_value, rc = 0;
- uint32_t index_offset, i, j;
- uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
- 2 * DOLBY_PARAM_PAYLOAD_SIZE) *
- sizeof(uint32_t);
- if (port_id == DOLBY_INVALID_PORT_ID) {
- pr_err("%s: Not a Dolby topology. Do not set custom stereo mixing\n",
- __func__);
- return -EINVAL;
- }
- params_value = kzalloc(params_length, GFP_KERNEL);
- if (!params_value) {
- pr_err("%s, params memory alloc failed\n", __func__);
- return -ENOMEM;
- }
- update_params_value = (int *)params_value;
- params_length = 0;
- /* for VDHE and VSPE DAP params at index 0 and 1 in table */
- for (i = 0; i < 2; i++) {
- *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
- *update_params_value++ = dolby_dap_params_id[i];
- *update_params_value++ = dolby_dap_params_length[i] *
- sizeof(uint32_t);
- index_offset = dolby_dap_params_offset[i];
- for (j = 0; j < dolby_dap_params_length[i]; j++) {
- if (is_custom_stereo_enabled)
- *update_params_value++ = 0;
- else
- *update_params_value++ =
- dolby_dap_params_value[index_offset+j];
- }
- params_length += (DOLBY_PARAM_PAYLOAD_SIZE +
- dolby_dap_params_length[i]) * sizeof(uint32_t);
- }
- pr_debug("%s, valid param length: %d", __func__, params_length);
- if (params_length) {
- rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
- params_length);
- if (rc) {
- pr_err("%s: send vdhe/vspe params failed with rc=%d\n",
- __func__, rc);
- kfree(params_value);
- return -EINVAL;
- }
- }
- kfree(params_value);
- return 0;
-}
-
-int dolby_dap_set_custom_stereo_onoff(int port_id, int copp_idx,
- bool is_custom_stereo_enabled)
-{
- char *params_value;
- int *update_params_value, rc = 0;
- uint32_t params_length = (TOTAL_LENGTH_DOLBY_PARAM +
- DOLBY_PARAM_PAYLOAD_SIZE) *
- sizeof(uint32_t);
- if (port_id == DOLBY_INVALID_PORT_ID)
- return -EINVAL;
-
- msm_dolby_dap_set_vspe_vdhe(port_id, copp_idx,
- is_custom_stereo_enabled);
- params_value = kzalloc(params_length, GFP_KERNEL);
- if (!params_value) {
- pr_err("%s, params memory alloc failed\n", __func__);
- return -ENOMEM;
- }
- update_params_value = (int *)params_value;
- params_length = 0;
- *update_params_value++ = DOLBY_BUNDLE_MODULE_ID;
- *update_params_value++ = DOLBY_ENABLE_CUSTOM_STEREO;
- *update_params_value++ = sizeof(uint32_t);
- if (is_custom_stereo_enabled)
- *update_params_value++ = 1;
- else
- *update_params_value++ = 0;
- params_length += (DOLBY_PARAM_PAYLOAD_SIZE + 1) * sizeof(uint32_t);
- pr_debug("%s, valid param length: %d", __func__, params_length);
- if (params_length) {
- rc = adm_dolby_dap_send_params(port_id, copp_idx, params_value,
- params_length);
- if (rc) {
- pr_err("%s: setting ds1 custom stereo param failed with rc=%d\n",
- __func__, rc);
- kfree(params_value);
- return -EINVAL;
- }
- }
- kfree(params_value);
- return 0;
-}
-
-static int msm_dolby_dap_map_device_to_port_id(int device)
-{
- int port_id = SLIMBUS_0_RX;
- device = DEVICE_OUT_ALL;
- /*update the device when single stream to multiple device is handled*/
- if (device == DEVICE_OUT_ALL) {
- port_id = PRIMARY_I2S_RX | SLIMBUS_0_RX | HDMI_RX |
- INT_BT_SCO_RX | INT_FM_RX |
- RT_PROXY_PORT_001_RX |
- AFE_PORT_ID_PRIMARY_PCM_RX |
- MI2S_RX | SECONDARY_I2S_RX |
- SLIMBUS_1_RX | SLIMBUS_4_RX | SLIMBUS_3_RX |
- AFE_PORT_ID_SECONDARY_MI2S_RX;
- } else {
- /* update port_id based on the device */
- }
- return port_id;
-}
-
-int msm_dolby_dap_param_to_set_control_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- /* not used while setting the parameters */
- return 0;
-}
-
-int msm_dolby_dap_param_to_set_control_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- int rc = 0, port_id, copp_idx;
- uint32_t idx, j, current_offset;
- uint32_t device = ucontrol->value.integer.value[0];
- uint32_t param_id = ucontrol->value.integer.value[1];
- uint32_t offset = ucontrol->value.integer.value[2];
- uint32_t length = ucontrol->value.integer.value[3];
-
- dolby_dap_params_states.port_ids_dolby_can_be_enabled =
- msm_dolby_dap_map_device_to_port_id(device);
- for (idx = 0; idx < ALL_DOLBY_PARAMS; idx++) {
- /*paramid from user space*/
- if (param_id == dolby_dap_params_id[idx])
- break;
- }
- if (idx > ALL_DOLBY_PARAMS-1) {
- pr_err("%s: invalid param id 0x%x to set\n", __func__,
- param_id);
- return -EINVAL;
- }
- switch (idx) {
- case DOLBY_COMMIT_ALL_IDX: {
- /* COMIIT ALL: Send all parameters to DSP */
- pr_debug("%s: COMMIT_ALL recvd\n", __func__);
- for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
- port_id = dolby_dap_params_states.port_id[idx];
- copp_idx =
- dolby_dap_params_states.copp_idx[idx];
- if ((copp_idx > 0) &&
- (copp_idx < MAX_COPPS_PER_PORT) &&
- (port_id != DOLBY_INVALID_PORT_ID))
- rc |= msm_dolby_dap_send_cached_params(
- port_id,
- copp_idx,
- 0);
- }
- }
- break;
- case DOLBY_COMMIT_IDX: {
- pr_debug("%s: COMMIT recvd\n", __func__);
- /* COMMIT: Send only modified paramters to DSP */
- for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
- port_id = dolby_dap_params_states.port_id[idx];
- copp_idx =
- dolby_dap_params_states.copp_idx[idx];
- if ((copp_idx > 0) &&
- (copp_idx < MAX_COPPS_PER_PORT) &&
- (port_id == DOLBY_INVALID_PORT_ID))
- rc |= msm_dolby_dap_send_cached_params(
- port_id,
- copp_idx,
- 1);
- }
- }
- break;
- case DOLBY_USE_CACHE_IDX: {
- pr_debug("%s: USE CACHE recvd val: %ld\n", __func__,
- ucontrol->value.integer.value[4]);
- dolby_dap_params_states.use_cache =
- ucontrol->value.integer.value[4];
- }
- break;
- case DOLBY_AUTO_ENDP_IDX: {
- pr_debug("%s: AUTO_ENDP recvd val: %ld\n", __func__,
- ucontrol->value.integer.value[4]);
- dolby_dap_params_states.auto_endp =
- ucontrol->value.integer.value[4];
- }
- break;
- case DOLBY_AUTO_ENDDEP_IDX: {
- pr_debug("%s: USE_ENDDEP_PARAMS recvd val: %ld\n",
- __func__, ucontrol->value.integer.value[4]);
- dolby_dap_params_states.enddep_params =
- ucontrol->value.integer.value[4];
- }
- break;
- default: {
- /* cache the parameters */
- dolby_dap_params_modified[idx] += 1;
- current_offset = dolby_dap_params_offset[idx] + offset;
- if (current_offset >= TOTAL_LENGTH_DOLBY_PARAM) {
- pr_err("%s: invalid offset %d at idx %d\n",
- __func__, offset, idx);
- return -EINVAL;
- }
- if ((length == 0) || (current_offset + length - 1
- < current_offset) || (current_offset + length
- > TOTAL_LENGTH_DOLBY_PARAM)) {
- pr_err("%s: invalid length %d at idx %d\n",
- __func__, length, idx);
- return -EINVAL;
- }
- dolby_dap_params_length[idx] = length;
- pr_debug("%s: param recvd deviceId=0x%x paramId=0x%x offset=%d length=%d\n",
- __func__, device, param_id, offset, length);
- for (j = 0; j < length; j++) {
- dolby_dap_params_value[
- dolby_dap_params_offset[idx] +
- offset + j]
- = ucontrol->value.integer.value[4+j];
- pr_debug("value[%d]: %ld\n", j,
- ucontrol->value.integer.value[4+j]);
- }
- }
- }
-
- return rc;
-}
-
-int msm_dolby_dap_param_to_get_control_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- int rc = 0, i, index;
- char *params_value;
- int *update_params_value;
- uint32_t params_length = DOLBY_MAX_LENGTH_INDIVIDUAL_PARAM *
- sizeof(uint32_t);
- uint32_t param_payload_len =
- DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
- int port_id = dolby_dap_params_get.port_id, copp_idx;
-
- if (port_id == DOLBY_INVALID_PORT_ID) {
- pr_err("%s, port_id not set, do not query ADM\n", __func__);
- return -EINVAL;
- }
- index = adm_validate_and_get_port_index(port_id);
- if (index < 0) {
- pr_err("%s: Invalid port idx %d port_id %#x\n", __func__, index,
- port_id);
- return -EINVAL;
- }
- copp_idx = dolby_dap_params_states.copp_idx[index];
- if ((copp_idx < 0) || (copp_idx >= MAX_COPPS_PER_PORT)) {
- pr_debug("%s: get params called before copp open.copp_idx:%d\n",
- __func__, copp_idx);
- return -EINVAL;
- }
- if (dolby_dap_params_get.length > 128 - DOLBY_PARAM_PAYLOAD_SIZE) {
- pr_err("%s: Incorrect parameter length", __func__);
- return -EINVAL;
- }
- params_value = kzalloc(params_length + param_payload_len, GFP_KERNEL);
- if (!params_value) {
- pr_err("%s, params memory alloc failed\n", __func__);
- return -ENOMEM;
- }
- if (DOLBY_PARAM_ID_VER == dolby_dap_params_get.param_id) {
- rc = adm_get_params(port_id, copp_idx,
- DOLBY_BUNDLE_MODULE_ID, DOLBY_PARAM_ID_VER,
- params_length + param_payload_len,
- params_value);
- } else {
- for (i = 0; i < MAX_DOLBY_PARAMS; i++)
- if (dolby_dap_params_id[i] ==
- dolby_dap_params_get.param_id)
- break;
- if (i > MAX_DOLBY_PARAMS-1) {
- pr_err("%s: invalid param id to set", __func__);
- rc = -EINVAL;
- } else {
- params_length = dolby_dap_params_length[i] *
- sizeof(uint32_t);
- rc = adm_get_params(port_id, copp_idx,
- DOLBY_BUNDLE_MODULE_ID,
- dolby_dap_params_id[i],
- params_length + param_payload_len,
- params_value);
- }
- }
- if (rc) {
- pr_err("%s: get parameters failed rc:%d\n", __func__, rc);
- kfree(params_value);
- return -EINVAL;
- }
- update_params_value = (int *)params_value;
- ucontrol->value.integer.value[0] = dolby_dap_params_get.device_id;
- ucontrol->value.integer.value[1] = dolby_dap_params_get.param_id;
- ucontrol->value.integer.value[2] = dolby_dap_params_get.offset;
- ucontrol->value.integer.value[3] = dolby_dap_params_get.length;
-
- pr_debug("%s: FROM DSP value[0] 0x%x value[1] %d value[2] 0x%x\n",
- __func__, update_params_value[0],
- update_params_value[1], update_params_value[2]);
- for (i = 0; i < dolby_dap_params_get.length; i++) {
- ucontrol->value.integer.value[DOLBY_PARAM_PAYLOAD_SIZE+i] =
- update_params_value[i];
- pr_debug("value[%d]:%d\n", i, update_params_value[i]);
- }
- pr_debug("%s: Returning param_id=0x%x offset=%d length=%d\n",
- __func__, dolby_dap_params_get.param_id,
- dolby_dap_params_get.offset,
- dolby_dap_params_get.length);
- kfree(params_value);
- return 0;
-}
-
-int msm_dolby_dap_param_to_get_control_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- int port_id, idx, copp_idx;
- dolby_dap_params_get.device_id = ucontrol->value.integer.value[0];
- port_id = msm_dolby_dap_map_device_to_port_id(
- dolby_dap_params_get.device_id);
- for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
- port_id = dolby_dap_params_states.port_id[idx];
- copp_idx = dolby_dap_params_states.copp_idx[idx];
- if ((copp_idx < 0) ||
- (copp_idx >= MAX_COPPS_PER_PORT) ||
- (port_id == DOLBY_INVALID_PORT_ID))
- continue;
- else
- break;
- }
- if (idx == AFE_MAX_PORTS)
- port_id = SLIMBUS_0_RX;
- dolby_dap_params_get.port_id = port_id;
- dolby_dap_params_get.param_id = ucontrol->value.integer.value[1];
- dolby_dap_params_get.offset = ucontrol->value.integer.value[2];
- dolby_dap_params_get.length = ucontrol->value.integer.value[3];
- pr_debug("%s: param_id=0x%x offset=%d length=%d\n", __func__,
- dolby_dap_params_get.param_id, dolby_dap_params_get.offset,
- dolby_dap_params_get.length);
- return 0;
-}
-
-int msm_dolby_dap_param_visualizer_control_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- uint32_t length = dolby_dap_params_value[DOLBY_PARAM_VCNB_OFFSET];
- char *visualizer_data;
- int i, rc;
- int *update_visualizer_data;
- uint32_t offset, params_length =
- (2*length + DOLBY_VIS_PARAM_HEADER_SIZE)*sizeof(uint32_t);
- uint32_t param_payload_len =
- DOLBY_PARAM_PAYLOAD_SIZE * sizeof(uint32_t);
- int port_id, copp_idx, idx;
- if (length > DOLBY_PARAM_VCNB_MAX_LENGTH || length <= 0) {
- pr_err("%s Incorrect VCNB length", __func__);
- ucontrol->value.integer.value[0] = 0;
- return -EINVAL;
- }
- for (idx = 0; idx < AFE_MAX_PORTS; idx++) {
- port_id = dolby_dap_params_states.port_id[idx];
- copp_idx = dolby_dap_params_states.copp_idx[idx];
- if ((copp_idx < 0) ||
- (copp_idx >= MAX_COPPS_PER_PORT) ||
- (port_id == DOLBY_INVALID_PORT_ID))
- continue;
- else
- break;
- }
- if (idx == AFE_MAX_PORTS) {
- pr_debug("%s, port_id not set, returning error", __func__);
- ucontrol->value.integer.value[0] = 0;
- return -EINVAL;
- }
- visualizer_data = kzalloc(params_length, GFP_KERNEL);
- if (!visualizer_data) {
- pr_err("%s, params memory alloc failed\n", __func__);
- return -ENOMEM;
- }
- offset = 0;
- params_length = length * sizeof(uint32_t);
- rc = adm_get_params(port_id, copp_idx, DOLBY_BUNDLE_MODULE_ID,
- DOLBY_PARAM_ID_VCBG,
- params_length + param_payload_len,
- visualizer_data + offset);
- if (rc) {
- pr_err("%s: get parameters failed\n", __func__);
- kfree(visualizer_data);
- return -EINVAL;
- }
-
- offset = length * sizeof(uint32_t);
- rc = adm_get_params(port_id, copp_idx, DOLBY_BUNDLE_MODULE_ID,
- DOLBY_PARAM_ID_VCBE,
- params_length + param_payload_len,
- visualizer_data + offset);
- if (rc) {
- pr_err("%s: get parameters failed\n", __func__);
- kfree(visualizer_data);
- return -EINVAL;
- }
-
- ucontrol->value.integer.value[0] = 2*length;
- pr_debug("%s: visualizer data length %ld\n", __func__,
- ucontrol->value.integer.value[0]);
- update_visualizer_data = (int *)visualizer_data;
- for (i = 0; i < 2*length; i++) {
- ucontrol->value.integer.value[1+i] = update_visualizer_data[i];
- pr_debug("value[%d] %d\n", i, update_visualizer_data[i]);
- }
- kfree(visualizer_data);
- return 0;
-}
-
-int msm_dolby_dap_param_visualizer_control_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- /* not used while getting the visualizer data */
- return 0;
-}
-
-int msm_dolby_dap_endpoint_control_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- /* not used while setting the endpoint */
- return 0;
-}
-
-int msm_dolby_dap_endpoint_control_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- int device = ucontrol->value.integer.value[0];
- dolby_dap_params_states.device = device;
- return 0;
-}
-
-int msm_dolby_dap_security_control_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- /* not used while setting the manfr id*/
- return 0;
-}
-
-int msm_dolby_dap_security_control_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- int manufacturer_id = ucontrol->value.integer.value[0];
- core_set_dolby_manufacturer_id(manufacturer_id);
- return 0;
-}
-
-int msm_dolby_dap_license_control_get(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- ucontrol->value.integer.value[0] =
- core_get_license_status(DOLBY_DS1_LICENSE_ID);
- return 0;
-}
-
-int msm_dolby_dap_license_control_put(struct snd_kcontrol *kcontrol,
- struct snd_ctl_elem_value *ucontrol)
-{
- return core_set_license(ucontrol->value.integer.value[0],
- DOLBY_DS1_LICENSE_ID);
-}
-
-static const struct snd_kcontrol_new dolby_license_controls[] = {
- SOC_SINGLE_MULTI_EXT("DS1 License", SND_SOC_NOPM, 0,
- 0xFFFFFFFF, 0, 1, msm_dolby_dap_license_control_get,
- msm_dolby_dap_license_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_security_controls[] = {
- SOC_SINGLE_MULTI_EXT("DS1 Security", SND_SOC_NOPM, 0,
- 0xFFFFFFFF, 0, 1, msm_dolby_dap_security_control_get,
- msm_dolby_dap_security_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_to_set_controls[] = {
- SOC_SINGLE_MULTI_EXT("DS1 DAP Set Param", SND_SOC_NOPM, 0, 0xFFFFFFFF,
- 0, 128, msm_dolby_dap_param_to_set_control_get,
- msm_dolby_dap_param_to_set_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_to_get_controls[] = {
- SOC_SINGLE_MULTI_EXT("DS1 DAP Get Param", SND_SOC_NOPM, 0, 0xFFFFFFFF,
- 0, 128, msm_dolby_dap_param_to_get_control_get,
- msm_dolby_dap_param_to_get_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_visualizer_controls[] = {
- SOC_SINGLE_MULTI_EXT("DS1 DAP Get Visualizer", SND_SOC_NOPM, 0,
- 0xFFFFFFFF, 0, 41, msm_dolby_dap_param_visualizer_control_get,
- msm_dolby_dap_param_visualizer_control_put),
-};
-
-static const struct snd_kcontrol_new dolby_dap_param_end_point_controls[] = {
- SOC_SINGLE_MULTI_EXT("DS1 DAP Endpoint", SND_SOC_NOPM, 0,
- 0xFFFFFFFF, 0, 1, msm_dolby_dap_endpoint_control_get,
- msm_dolby_dap_endpoint_control_put),
-};
-
-void msm_dolby_dap_add_controls(struct snd_soc_platform *platform)
-{
- snd_soc_add_platform_controls(platform,
- dolby_license_controls,
- ARRAY_SIZE(dolby_license_controls));
-
- snd_soc_add_platform_controls(platform,
- dolby_security_controls,
- ARRAY_SIZE(dolby_security_controls));
-
- snd_soc_add_platform_controls(platform,
- dolby_dap_param_to_set_controls,
- ARRAY_SIZE(dolby_dap_param_to_set_controls));
-
- snd_soc_add_platform_controls(platform,
- dolby_dap_param_to_get_controls,
- ARRAY_SIZE(dolby_dap_param_to_get_controls));
-
- snd_soc_add_platform_controls(platform,
- dolby_dap_param_visualizer_controls,
- ARRAY_SIZE(dolby_dap_param_visualizer_controls));
-
- snd_soc_add_platform_controls(platform,
- dolby_dap_param_end_point_controls,
- ARRAY_SIZE(dolby_dap_param_end_point_controls));
-}
diff --git a/sound/soc/msm/qdsp6v2/msm-lsm-client.c b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
index 3fa14d0113ef..ec4380036047 100644
--- a/sound/soc/msm/qdsp6v2/msm-lsm-client.c
+++ b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
@@ -86,6 +86,7 @@ struct lsm_priv {
atomic_t buf_count;
atomic_t read_abort;
wait_queue_head_t period_wait;
+ struct mutex lsm_api_lock;
int appl_cnt;
int dma_write;
};
@@ -954,10 +955,18 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
dev_dbg(rtd->dev, "%s: Get event status\n", __func__);
atomic_set(&prtd->event_wait_stop, 0);
+
+ /*
+ * Release the api lock before wait to allow
+ * other IOCTLs to be invoked while waiting
+ * for event
+ */
+ mutex_unlock(&prtd->lsm_api_lock);
rc = wait_event_freezable(prtd->event_wait,
(cmpxchg(&prtd->event_avail, 1, 0) ||
(xchg = atomic_cmpxchg(&prtd->event_wait_stop,
1, 0))));
+ mutex_lock(&prtd->lsm_api_lock);
dev_dbg(rtd->dev, "%s: wait_event_freezable %d event_wait_stop %d\n",
__func__, rc, xchg);
if (!rc && !xchg) {
@@ -1281,6 +1290,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
rtd = substream->private_data;
prtd = runtime->private_data;
+ mutex_lock(&prtd->lsm_api_lock);
+
switch (cmd) {
case SNDRV_LSM_EVENT_STATUS: {
struct snd_lsm_event_status *user = NULL, userarg32;
@@ -1288,7 +1299,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
if (copy_from_user(&userarg32, arg, sizeof(userarg32))) {
dev_err(rtd->dev, "%s: err copyuser ioctl %s\n",
__func__, "SNDRV_LSM_EVENT_STATUS");
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
if (userarg32.payload_size >
@@ -1296,7 +1308,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
__func__, userarg32.payload_size,
LISTEN_MAX_STATUS_PAYLOAD_SIZE);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
size = sizeof(*user) + userarg32.payload_size;
@@ -1305,7 +1318,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: Allocation failed event status size %d\n",
__func__, size);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
} else {
cmd = SNDRV_LSM_EVENT_STATUS;
user->payload_size = userarg32.payload_size;
@@ -1421,7 +1435,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "REG_SND_MODEL_V2");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&snd_modelv232, arg,
@@ -1462,7 +1477,7 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "SET_PARAMS_32");
- return -EINVAL;
+ err = -EINVAL;
}
if (copy_from_user(&det_params32, arg,
@@ -1505,7 +1520,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if not using topology\n",
__func__, "SET_MODULE_PARAMS_32");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&p_data_32, arg,
@@ -1514,7 +1530,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: copy_from_user failed, size = %zd\n",
__func__, "SET_MODULE_PARAMS_32",
sizeof(p_data_32));
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
p_data.params = compat_ptr(p_data_32.params);
@@ -1526,7 +1543,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: Invalid num_params %d\n",
__func__, "SET_MODULE_PARAMS_32",
p_data.num_params);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (p_data.data_size !=
@@ -1535,7 +1553,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: Invalid size %d\n",
__func__, "SET_MODULE_PARAMS_32",
p_data.data_size);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
p_size = sizeof(struct lsm_params_info_32) *
@@ -1546,7 +1565,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: no memory for params32, size = %zd\n",
__func__, p_size);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto done;
}
p_size = sizeof(struct lsm_params_info) * p_data.num_params;
@@ -1556,7 +1576,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: no memory for params, size = %zd\n",
__func__, p_size);
kfree(params32);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto done;
}
if (copy_from_user(params32, p_data.params,
@@ -1566,7 +1587,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
__func__, "params32", p_data.data_size);
kfree(params32);
kfree(params);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
p_info_32 = (struct lsm_params_info_32 *) params32;
@@ -1609,6 +1631,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
err = msm_lsm_ioctl_shared(substream, cmd, arg);
break;
}
+done:
+ mutex_unlock(&prtd->lsm_api_lock);
return err;
}
#else
@@ -1633,6 +1657,7 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
prtd = runtime->private_data;
rtd = substream->private_data;
+ mutex_lock(&prtd->lsm_api_lock);
switch (cmd) {
case SNDRV_LSM_REG_SND_MODEL_V2: {
struct snd_lsm_sound_model_v2 snd_model_v2;
@@ -1641,7 +1666,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "REG_SND_MODEL_V2");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&snd_model_v2, arg, sizeof(snd_model_v2))) {
@@ -1668,7 +1694,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "SET_PARAMS");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
pr_debug("%s: SNDRV_LSM_SET_PARAMS\n", __func__);
@@ -1689,7 +1716,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: LSM_SET_PARAMS failed, err %d\n",
__func__, err);
- return err;
+
+ goto done;
}
case SNDRV_LSM_SET_MODULE_PARAMS: {
@@ -1701,7 +1729,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if not using topology\n",
__func__, "SET_MODULE_PARAMS");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&p_data, arg,
@@ -1709,7 +1738,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: copy_from_user failed, size = %zd\n",
__func__, "p_data", sizeof(p_data));
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
if (p_data.num_params > LSM_PARAMS_MAX) {
@@ -1717,7 +1747,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: Invalid num_params %d\n",
__func__, "SET_MODULE_PARAMS",
p_data.num_params);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
p_size = p_data.num_params *
@@ -1728,7 +1759,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: Invalid size %zd\n",
__func__, "SET_MODULE_PARAMS", p_size);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
params = kzalloc(p_size, GFP_KERNEL);
@@ -1736,7 +1768,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: no memory for params\n",
__func__);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto done;
}
if (copy_from_user(params, p_data.params,
@@ -1745,7 +1778,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: copy_from_user failed, size = %d\n",
__func__, "params", p_data.data_size);
kfree(params);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
err = msm_lsm_process_params(substream, &p_data, params);
@@ -1765,7 +1799,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: err copyuser event_status\n",
__func__);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
if (userarg.payload_size >
@@ -1773,7 +1808,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
__func__, userarg.payload_size,
LISTEN_MAX_STATUS_PAYLOAD_SIZE);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
size = sizeof(struct snd_lsm_event_status) +
@@ -1783,7 +1819,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: Allocation failed event status size %d\n",
__func__, size);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
} else {
user->payload_size = userarg.payload_size;
err = msm_lsm_ioctl_shared(substream, cmd, user);
@@ -1806,7 +1843,7 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
if (err)
dev_err(rtd->dev,
"%s: lsmevent failed %d", __func__, err);
- return err;
+ goto done;
}
case SNDRV_LSM_EVENT_STATUS_V3: {
@@ -1873,6 +1910,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
err = msm_lsm_ioctl_shared(substream, cmd, arg);
break;
}
+done:
+ mutex_unlock(&prtd->lsm_api_lock);
return err;
}
@@ -1889,6 +1928,7 @@ static int msm_lsm_open(struct snd_pcm_substream *substream)
__func__);
return -ENOMEM;
}
+ mutex_init(&prtd->lsm_api_lock);
spin_lock_init(&prtd->event_lock);
init_waitqueue_head(&prtd->event_wait);
init_waitqueue_head(&prtd->period_wait);
@@ -2048,6 +2088,7 @@ static int msm_lsm_close(struct snd_pcm_substream *substream)
kfree(prtd->event_status);
prtd->event_status = NULL;
spin_unlock_irqrestore(&prtd->event_lock, flags);
+ mutex_destroy(&prtd->lsm_api_lock);
kfree(prtd);
runtime->private_data = NULL;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index cfade420c509..3677a06c65ae 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -10016,6 +10016,9 @@ static int msm_audio_sound_focus_derive_port_id(struct snd_kcontrol *kcontrol,
} else if (!strcmp(kcontrol->id.name + strlen(prefix),
"TERT_MI2S")) {
*port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+ } else if (!strcmp(kcontrol->id.name + strlen(prefix),
+ "INT3_MI2S")) {
+ *port_id = AFE_PORT_ID_INT3_MI2S_TX;
} else {
pr_err("%s: mixer ctl name=%s, could not derive valid port id\n",
__func__, kcontrol->id.name);
@@ -10220,6 +10223,36 @@ static const struct snd_kcontrol_new msm_source_tracking_controls[] = {
.info = msm_source_tracking_info,
.get = msm_audio_source_tracking_get,
},
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Sound Focus Voice Tx INT3_MI2S",
+ .info = msm_sound_focus_info,
+ .get = msm_voice_sound_focus_get,
+ .put = msm_voice_sound_focus_put,
+ },
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Source Tracking Voice Tx INT3_MI2S",
+ .info = msm_source_tracking_info,
+ .get = msm_voice_source_tracking_get,
+ },
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Sound Focus Audio Tx INT3_MI2S",
+ .info = msm_sound_focus_info,
+ .get = msm_audio_sound_focus_get,
+ .put = msm_audio_sound_focus_put,
+ },
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Source Tracking Audio Tx INT3_MI2S",
+ .info = msm_source_tracking_info,
+ .get = msm_audio_source_tracking_get,
+ },
};
static int spkr_prot_put_vi_lch_port(struct snd_kcontrol *kcontrol,
@@ -14668,6 +14701,7 @@ static void __exit msm_soc_routing_platform_exit(void)
{
msm_routing_delete_cal_data();
memset(&be_dai_name_table, 0, sizeof(be_dai_name_table));
+ mutex_destroy(&routing_lock);
platform_driver_unregister(&msm_routing_pcm_driver);
}
module_exit(msm_soc_routing_platform_exit);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
index c444a27c06e6..b2387a746f61 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -814,20 +814,25 @@ static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
if (prtd->mode == MODE_PCM) {
ret = copy_from_user(&buf_node->frame.voc_pkt,
buf, count);
+ if (ret) {
+ pr_err("%s: copy from user failed %d\n",
+ __func__, ret);
+ return -EFAULT;
+ }
buf_node->frame.pktlen = count;
} else {
ret = copy_from_user(&buf_node->frame,
buf, count);
+ if (ret) {
+ pr_err("%s: copy from user failed %d\n",
+ __func__, ret);
+ return -EFAULT;
+ }
if (buf_node->frame.pktlen >= count)
buf_node->frame.pktlen = count -
(sizeof(buf_node->frame.frm_hdr) +
sizeof(buf_node->frame.pktlen));
}
- if (ret) {
- pr_err("%s: copy from user failed %d\n",
- __func__, ret);
- return -EFAULT;
- }
spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
list_add_tail(&buf_node->list, &prtd->in_queue);
spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
diff --git a/sound/soc/msm/qdsp6v2/q6afe.c b/sound/soc/msm/qdsp6v2/q6afe.c
index bc1d21a826b5..fcff383b9c3b 100644
--- a/sound/soc/msm/qdsp6v2/q6afe.c
+++ b/sound/soc/msm/qdsp6v2/q6afe.c
@@ -1303,13 +1303,22 @@ static struct cal_block_data *afe_find_cal_topo_id_by_port(
MSM_AFE_PORT_TYPE_TX)?(TX_DEVICE):(RX_DEVICE));
afe_top =
(struct audio_cal_info_afe_top *)cal_block->cal_info;
- if ((afe_top->path == path) &&
- (afe_top->acdb_id ==
- this_afe.dev_acdb_id[afe_port_index])) {
- pr_debug("%s: top_id:%x acdb_id:%d afe_port:%d\n",
+ if (afe_top->path == path) {
+ if (this_afe.dev_acdb_id[afe_port_index] > 0) {
+ if (afe_top->acdb_id ==
+ this_afe.dev_acdb_id[afe_port_index]) {
+ pr_debug("%s: top_id:%x acdb_id:%d afe_port_id:%d\n",
+ __func__, afe_top->topology,
+ afe_top->acdb_id,
+ q6audio_get_port_id(port_id));
+ return cal_block;
+ }
+ } else {
+ pr_debug("%s: top_id:%x acdb_id:%d afe_port:%d\n",
__func__, afe_top->topology, afe_top->acdb_id,
q6audio_get_port_id(port_id));
- return cal_block;
+ return cal_block;
+ }
}
}
@@ -1497,6 +1506,8 @@ static void send_afe_cal_type(int cal_index, int port_id)
{
struct cal_block_data *cal_block = NULL;
int ret;
+ int afe_port_index = q6audio_get_port_index(port_id);
+
pr_debug("%s:\n", __func__);
if (this_afe.cal_data[cal_index] == NULL) {
@@ -1505,10 +1516,17 @@ static void send_afe_cal_type(int cal_index, int port_id)
goto done;
}
+ if (afe_port_index < 0) {
+ pr_err("%s: Error getting AFE port index %d\n",
+ __func__, afe_port_index);
+ goto done;
+ }
+
mutex_lock(&this_afe.cal_data[cal_index]->lock);
- if ((cal_index == AFE_COMMON_RX_CAL) ||
- (cal_index == AFE_COMMON_TX_CAL))
+ if (((cal_index == AFE_COMMON_RX_CAL) ||
+ (cal_index == AFE_COMMON_TX_CAL)) &&
+ (this_afe.dev_acdb_id[afe_port_index] > 0))
cal_block = afe_find_cal(cal_index, port_id);
else
cal_block = cal_utils_get_only_cal_block(
diff --git a/sound/soc/msm/sdm660-common.c b/sound/soc/msm/sdm660-common.c
index 3a8fdf8f1256..edf5719fbf3e 100644
--- a/sound/soc/msm/sdm660-common.c
+++ b/sound/soc/msm/sdm660-common.c
@@ -36,6 +36,11 @@ struct dev_config {
u32 channels;
};
+enum {
+ DP_RX_IDX,
+ EXT_DISP_RX_IDX_MAX,
+};
+
/* TDM default config */
static struct dev_config tdm_rx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
{ /* PRI TDM */
@@ -124,6 +129,10 @@ static struct dev_config tdm_tx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
}
};
+/* Default configuration of external display BE */
+static struct dev_config ext_disp_rx_cfg[] = {
+ [DP_RX_IDX] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+};
static struct dev_config usb_rx_cfg = {
.sample_rate = SAMPLING_RATE_48KHZ,
.bit_format = SNDRV_PCM_FORMAT_S16_LE,
@@ -251,6 +260,8 @@ static const char *const mi2s_ch_text[] = {"One", "Two", "Three", "Four",
"Eight"};
static char const *bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE",
"S32_LE"};
+static char const *mi2s_format_text[] = {"S16_LE", "S24_LE", "S24_3LE",
+ "S32_LE"};
static char const *tdm_ch_text[] = {"One", "Two", "Three", "Four",
"Five", "Six", "Seven", "Eight"};
static char const *tdm_bit_format_text[] = {"S16_LE", "S24_LE", "S32_LE"};
@@ -264,7 +275,11 @@ static char const *usb_sample_rate_text[] = {"KHZ_8", "KHZ_11P025",
"KHZ_16", "KHZ_22P05",
"KHZ_32", "KHZ_44P1", "KHZ_48",
"KHZ_96", "KHZ_192", "KHZ_384"};
+static char const *ext_disp_bit_format_text[] = {"S16_LE", "S24_LE"};
+static char const *ext_disp_sample_rate_text[] = {"KHZ_48", "KHZ_96",
+ "KHZ_192"};
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_chs, ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(proxy_rx_chs, ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(prim_aux_pcm_rx_sample_rate, auxpcm_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(sec_aux_pcm_rx_sample_rate, auxpcm_rate_text);
@@ -282,6 +297,14 @@ static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_sample_rate, mi2s_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_tx_sample_rate, mi2s_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_sample_rate, mi2s_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_sample_rate, mi2s_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_tx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_format, mi2s_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_rx_chs, mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_chs, mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_rx_chs, mi2s_ch_text);
@@ -294,8 +317,11 @@ static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_chs, usb_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_chs, usb_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_format, bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_format, ext_disp_bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_sample_rate, usb_sample_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_sample_rate, usb_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_sample_rate,
+ ext_disp_sample_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_chs, tdm_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_format, tdm_bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_sample_rate, tdm_sample_rate_text);
@@ -667,6 +693,54 @@ static int tdm_get_format_val(int format)
return value;
}
+static int mi2s_get_format(int value)
+{
+ int format = 0;
+
+ switch (value) {
+ case 0:
+ format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ case 1:
+ format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 2:
+ format = SNDRV_PCM_FORMAT_S24_3LE;
+ break;
+ case 3:
+ format = SNDRV_PCM_FORMAT_S32_LE;
+ break;
+ default:
+ format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ return format;
+}
+
+static int mi2s_get_format_value(int format)
+{
+ int value = 0;
+
+ switch (format) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ value = 0;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ value = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S24_3LE:
+ value = 2;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ value = 3;
+ break;
+ default:
+ value = 0;
+ break;
+ }
+ return value;
+}
+
static int tdm_rx_format_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1132,6 +1206,78 @@ static int mi2s_tx_sample_rate_get(struct snd_kcontrol *kcontrol,
return 0;
}
+static int mi2s_tx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ mi2s_tx_cfg[idx].bit_format =
+ mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d] _tx_format = %d, item = %d\n", __func__,
+ idx, mi2s_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int mi2s_tx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_get_format_value(mi2s_tx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+ idx, mi2s_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int mi2s_rx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ mi2s_rx_cfg[idx].bit_format =
+ mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d] _rx_format = %d, item = %d\n", __func__,
+ idx, mi2s_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int mi2s_rx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_get_format_value(mi2s_rx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+ idx, mi2s_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
static int msm_mi2s_rx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1514,6 +1660,162 @@ static int usb_audio_tx_format_put(struct snd_kcontrol *kcontrol,
return rc;
}
+static int ext_disp_get_port_idx(struct snd_kcontrol *kcontrol)
+{
+ int idx;
+
+ if (strnstr(kcontrol->id.name, "Display Port RX",
+ sizeof("Display Port RX")))
+ idx = DP_RX_IDX;
+ else {
+ pr_err("%s: unsupported BE: %s",
+ __func__, kcontrol->id.name);
+ idx = -EINVAL;
+ }
+
+ return idx;
+}
+
+static int ext_disp_rx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ switch (ext_disp_rx_cfg[idx].bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+
+ pr_debug("%s: ext_disp_rx[%d].format = %d, ucontrol value = %ld\n",
+ __func__, idx, ext_disp_rx_cfg[idx].bit_format,
+ ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int ext_disp_rx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ ext_disp_rx_cfg[idx].bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ ext_disp_rx_cfg[idx].bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: ext_disp_rx[%d].format = %d, ucontrol value = %ld\n",
+ __func__, idx, ext_disp_rx_cfg[idx].bit_format,
+ ucontrol->value.integer.value[0]);
+
+ return 0;
+}
+
+static int ext_disp_rx_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.integer.value[0] =
+ ext_disp_rx_cfg[idx].channels - 2;
+
+ pr_debug("%s: ext_disp_rx[%d].ch = %d\n", __func__,
+ idx, ext_disp_rx_cfg[idx].channels);
+
+ return 0;
+}
+
+static int ext_disp_rx_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ext_disp_rx_cfg[idx].channels =
+ ucontrol->value.integer.value[0] + 2;
+
+ pr_debug("%s: ext_disp_rx[%d].ch = %d\n", __func__,
+ idx, ext_disp_rx_cfg[idx].channels);
+ return 1;
+}
+
+static int ext_disp_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int sample_rate_val;
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ switch (ext_disp_rx_cfg[idx].sample_rate) {
+ case SAMPLING_RATE_192KHZ:
+ sample_rate_val = 2;
+ break;
+
+ case SAMPLING_RATE_96KHZ:
+ sample_rate_val = 1;
+ break;
+
+ case SAMPLING_RATE_48KHZ:
+ default:
+ sample_rate_val = 0;
+ break;
+ }
+
+ ucontrol->value.integer.value[0] = sample_rate_val;
+ pr_debug("%s: ext_disp_rx[%d].sample_rate = %d\n", __func__,
+ idx, ext_disp_rx_cfg[idx].sample_rate);
+
+ return 0;
+}
+
+static int ext_disp_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 2:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_192KHZ;
+ break;
+ case 1:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_96KHZ;
+ break;
+ case 0:
+ default:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_48KHZ;
+ break;
+ }
+
+ pr_debug("%s: control value = %ld, ext_disp_rx[%d].sample_rate = %d\n",
+ __func__, ucontrol->value.integer.value[0], idx,
+ ext_disp_rx_cfg[idx].sample_rate);
+ return 0;
+}
+
const struct snd_kcontrol_new msm_common_snd_controls[] = {
SOC_ENUM_EXT("PROXY_RX Channels", proxy_rx_chs,
proxy_rx_ch_get, proxy_rx_ch_put),
@@ -1565,6 +1867,30 @@ const struct snd_kcontrol_new msm_common_snd_controls[] = {
SOC_ENUM_EXT("QUAT_MI2S_TX SampleRate", quat_mi2s_tx_sample_rate,
mi2s_tx_sample_rate_get,
mi2s_tx_sample_rate_put),
+ SOC_ENUM_EXT("PRIM_MI2S_RX Format", prim_mi2s_rx_format,
+ mi2s_rx_format_get,
+ mi2s_rx_format_put),
+ SOC_ENUM_EXT("SEC_MI2S_RX Format", sec_mi2s_rx_format,
+ mi2s_rx_format_get,
+ mi2s_rx_format_put),
+ SOC_ENUM_EXT("TERT_MI2S_RX Format", tert_mi2s_rx_format,
+ mi2s_rx_format_get,
+ mi2s_rx_format_put),
+ SOC_ENUM_EXT("QUAT_MI2S_RX Format", quat_mi2s_rx_format,
+ mi2s_rx_format_get,
+ mi2s_rx_format_put),
+ SOC_ENUM_EXT("PRIM_MI2S_TX Format", prim_mi2s_tx_format,
+ mi2s_tx_format_get,
+ mi2s_tx_format_put),
+ SOC_ENUM_EXT("SEC_MI2S_TX Format", sec_mi2s_tx_format,
+ mi2s_tx_format_get,
+ mi2s_tx_format_put),
+ SOC_ENUM_EXT("TERT_MI2S_TX Format", tert_mi2s_tx_format,
+ mi2s_tx_format_get,
+ mi2s_tx_format_put),
+ SOC_ENUM_EXT("QUAT_MI2S_TX Format", quat_mi2s_tx_format,
+ mi2s_tx_format_get,
+ mi2s_tx_format_put),
SOC_ENUM_EXT("PRIM_MI2S_RX Channels", prim_mi2s_rx_chs,
msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put),
SOC_ENUM_EXT("PRIM_MI2S_TX Channels", prim_mi2s_tx_chs,
@@ -1585,16 +1911,23 @@ const struct snd_kcontrol_new msm_common_snd_controls[] = {
usb_audio_rx_ch_get, usb_audio_rx_ch_put),
SOC_ENUM_EXT("USB_AUDIO_TX Channels", usb_tx_chs,
usb_audio_tx_ch_get, usb_audio_tx_ch_put),
+ SOC_ENUM_EXT("Display Port RX Channels", ext_disp_rx_chs,
+ ext_disp_rx_ch_get, ext_disp_rx_ch_put),
SOC_ENUM_EXT("USB_AUDIO_RX Format", usb_rx_format,
usb_audio_rx_format_get, usb_audio_rx_format_put),
SOC_ENUM_EXT("USB_AUDIO_TX Format", usb_tx_format,
usb_audio_tx_format_get, usb_audio_tx_format_put),
+ SOC_ENUM_EXT("Display Port RX Bit Format", ext_disp_rx_format,
+ ext_disp_rx_format_get, ext_disp_rx_format_put),
SOC_ENUM_EXT("USB_AUDIO_RX SampleRate", usb_rx_sample_rate,
usb_audio_rx_sample_rate_get,
usb_audio_rx_sample_rate_put),
SOC_ENUM_EXT("USB_AUDIO_TX SampleRate", usb_tx_sample_rate,
usb_audio_tx_sample_rate_get,
usb_audio_tx_sample_rate_put),
+ SOC_ENUM_EXT("Display Port RX SampleRate", ext_disp_rx_sample_rate,
+ ext_disp_rx_sample_rate_get,
+ ext_disp_rx_sample_rate_put),
SOC_ENUM_EXT("PRI_TDM_RX_0 SampleRate", tdm_rx_sample_rate,
tdm_rx_sample_rate_get,
tdm_rx_sample_rate_put),
@@ -1705,6 +2038,23 @@ static void param_set_mask(struct snd_pcm_hw_params *p, int n, unsigned bit)
}
}
+static int msm_ext_disp_get_idx_from_beid(int32_t be_id)
+{
+ int idx;
+
+ switch (be_id) {
+ case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
+ idx = DP_RX_IDX;
+ break;
+ default:
+ pr_err("%s: Incorrect ext_disp be_id %d\n", __func__, be_id);
+ idx = -EINVAL;
+ break;
+ }
+
+ return idx;
+}
+
/**
* msm_common_be_hw_params_fixup - updates settings of ALSA BE hw params.
*
@@ -1722,6 +2072,7 @@ int msm_common_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
int rc = 0;
+ int idx;
pr_debug("%s: format = %d, rate = %d\n",
__func__, params_format(params), params_rate(params));
@@ -1741,6 +2092,21 @@ int msm_common_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
channels->min = channels->max = usb_tx_cfg.channels;
break;
+ case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
+ idx = msm_ext_disp_get_idx_from_beid(dai_link->be_id);
+ if (IS_ERR_VALUE(idx)) {
+ pr_err("%s: Incorrect ext disp idx %d\n",
+ __func__, idx);
+ rc = idx;
+ break;
+ }
+
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ ext_disp_rx_cfg[idx].bit_format);
+ rate->min = rate->max = ext_disp_rx_cfg[idx].sample_rate;
+ channels->min = channels->max = ext_disp_rx_cfg[idx].channels;
+ break;
+
case MSM_BACKEND_DAI_AFE_PCM_RX:
channels->min = channels->max = proxy_rx_cfg.channels;
rate->min = rate->max = SAMPLING_RATE_48KHZ;
@@ -1870,48 +2236,64 @@ int msm_common_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
rate->min = rate->max = mi2s_rx_cfg[PRIM_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[PRIM_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[PRIM_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_PRI_MI2S_TX:
rate->min = rate->max = mi2s_tx_cfg[PRIM_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[PRIM_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[PRIM_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_SECONDARY_MI2S_RX:
rate->min = rate->max = mi2s_rx_cfg[SEC_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[SEC_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[SEC_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_SECONDARY_MI2S_TX:
rate->min = rate->max = mi2s_tx_cfg[SEC_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[SEC_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[SEC_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_TERTIARY_MI2S_RX:
rate->min = rate->max = mi2s_rx_cfg[TERT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[TERT_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[TERT_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_TERTIARY_MI2S_TX:
rate->min = rate->max = mi2s_tx_cfg[TERT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[TERT_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[TERT_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_QUATERNARY_MI2S_RX:
rate->min = rate->max = mi2s_rx_cfg[QUAT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[QUAT_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[QUAT_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_QUATERNARY_MI2S_TX:
rate->min = rate->max = mi2s_tx_cfg[QUAT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[QUAT_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[QUAT_MI2S].bit_format);
break;
default:
@@ -2001,6 +2383,7 @@ static u32 get_mi2s_bits_per_sample(u32 bit_format)
u32 bit_per_sample;
switch (bit_format) {
+ case SNDRV_PCM_FORMAT_S32_LE:
case SNDRV_PCM_FORMAT_S24_3LE:
case SNDRV_PCM_FORMAT_S24_LE:
bit_per_sample = 32;
diff --git a/sound/soc/msm/sdm660-ext-dai-links.c b/sound/soc/msm/sdm660-ext-dai-links.c
index 4cf5aefb0e0f..f64074d442dc 100644
--- a/sound/soc/msm/sdm660-ext-dai-links.c
+++ b/sound/soc/msm/sdm660-ext-dai-links.c
@@ -1861,6 +1861,24 @@ static struct snd_soc_dai_link msm_wcn_be_dai_links[] = {
},
};
+static struct snd_soc_dai_link ext_disp_be_dai_link[] = {
+ /* DISP PORT BACK END DAI Link */
+ {
+ .name = LPASS_BE_DISPLAY_PORT,
+ .stream_name = "Display Port Playback",
+ .cpu_dai_name = "msm-dai-q6-dp.24608",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-ext-disp-audio-codec-rx",
+ .codec_dai_name = "msm_dp_audio_codec_rx_dai",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+ .be_hw_params_fixup = msm_common_be_hw_params_fixup,
+ .ignore_pmdown_time = 1,
+ .ignore_suspend = 1,
+ },
+};
+
static struct snd_soc_dai_link msm_ext_tasha_dai_links[
ARRAY_SIZE(msm_ext_common_fe_dai) +
ARRAY_SIZE(msm_ext_tasha_fe_dai) +
@@ -1868,7 +1886,8 @@ ARRAY_SIZE(msm_ext_common_be_dai) +
ARRAY_SIZE(msm_ext_tasha_be_dai) +
ARRAY_SIZE(msm_mi2s_be_dai_links) +
ARRAY_SIZE(msm_auxpcm_be_dai_links) +
-ARRAY_SIZE(msm_wcn_be_dai_links)];
+ARRAY_SIZE(msm_wcn_be_dai_links) +
+ARRAY_SIZE(ext_disp_be_dai_link)];
static struct snd_soc_dai_link msm_ext_tavil_dai_links[
ARRAY_SIZE(msm_ext_common_fe_dai) +
@@ -1877,7 +1896,8 @@ ARRAY_SIZE(msm_ext_common_be_dai) +
ARRAY_SIZE(msm_ext_tavil_be_dai) +
ARRAY_SIZE(msm_mi2s_be_dai_links) +
ARRAY_SIZE(msm_auxpcm_be_dai_links) +
-ARRAY_SIZE(msm_wcn_be_dai_links)];
+ARRAY_SIZE(msm_wcn_be_dai_links) +
+ARRAY_SIZE(ext_disp_be_dai_link)];
/**
* populate_snd_card_dailinks - prepares dailink array and initializes card.
@@ -1951,6 +1971,15 @@ struct snd_soc_card *populate_snd_card_dailinks(struct device *dev,
sizeof(msm_wcn_be_dai_links));
len4 += ARRAY_SIZE(msm_wcn_be_dai_links);
}
+ if (of_property_read_bool(dev->of_node,
+ "qcom,ext-disp-audio-rx")) {
+ dev_dbg(dev, "%s(): ext disp audio support present\n",
+ __func__);
+ memcpy(msm_ext_tasha_dai_links + len4,
+ ext_disp_be_dai_link,
+ sizeof(ext_disp_be_dai_link));
+ len4 += ARRAY_SIZE(ext_disp_be_dai_link);
+ }
msm_ext_dai_links = msm_ext_tasha_dai_links;
} else if (strnstr(card->name, "tavil", strlen(card->name))) {
len1 = ARRAY_SIZE(msm_ext_common_fe_dai);
@@ -1987,6 +2016,15 @@ struct snd_soc_card *populate_snd_card_dailinks(struct device *dev,
sizeof(msm_wcn_be_dai_links));
len4 += ARRAY_SIZE(msm_wcn_be_dai_links);
}
+ if (of_property_read_bool(dev->of_node,
+ "qcom,ext-disp-audio-rx")) {
+ dev_dbg(dev, "%s(): ext disp audio support present\n",
+ __func__);
+ memcpy(msm_ext_tavil_dai_links + len4,
+ ext_disp_be_dai_link,
+ sizeof(ext_disp_be_dai_link));
+ len4 += ARRAY_SIZE(ext_disp_be_dai_link);
+ }
msm_ext_dai_links = msm_ext_tavil_dai_links;
} else {
dev_err(dev, "%s: failing as no matching card name\n",
diff --git a/sound/soc/msm/sdm660-external.c b/sound/soc/msm/sdm660-external.c
index 3fe327ad0442..191db6c2fa9d 100644
--- a/sound/soc/msm/sdm660-external.c
+++ b/sound/soc/msm/sdm660-external.c
@@ -676,7 +676,7 @@ static int msm_ext_get_spk(struct snd_kcontrol *kcontrol,
static int msm_ext_set_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
pr_debug("%s()\n", __func__);
if (msm_ext_spk_control == ucontrol->value.integer.value[0])
diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c
index 2546380a5a4e..37b34b231b72 100644
--- a/sound/soc/msm/sdm660-internal.c
+++ b/sound/soc/msm/sdm660-internal.c
@@ -136,7 +136,7 @@ static struct dev_config int_mi2s_cfg[] = {
[INT2_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
[INT3_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
[INT4_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
- [INT5_MI2S] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+ [INT5_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
[INT6_MI2S] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
};
@@ -2896,6 +2896,24 @@ static struct snd_soc_dai_link msm_wsa_be_dai_links[] = {
},
};
+static struct snd_soc_dai_link ext_disp_be_dai_link[] = {
+ /* DISP PORT BACK END DAI Link */
+ {
+ .name = LPASS_BE_DISPLAY_PORT,
+ .stream_name = "Display Port Playback",
+ .cpu_dai_name = "msm-dai-q6-dp.24608",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-ext-disp-audio-codec-rx",
+ .codec_dai_name = "msm_dp_audio_codec_rx_dai",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+ .be_hw_params_fixup = msm_common_be_hw_params_fixup,
+ .ignore_pmdown_time = 1,
+ .ignore_suspend = 1,
+ },
+};
+
static struct snd_soc_dai_link msm_int_dai_links[
ARRAY_SIZE(msm_int_dai) +
ARRAY_SIZE(msm_int_wsa_dai) +
@@ -2903,7 +2921,8 @@ ARRAY_SIZE(msm_int_be_dai) +
ARRAY_SIZE(msm_mi2s_be_dai_links) +
ARRAY_SIZE(msm_auxpcm_be_dai_links)+
ARRAY_SIZE(msm_wcn_be_dai_links) +
-ARRAY_SIZE(msm_wsa_be_dai_links)];
+ARRAY_SIZE(msm_wsa_be_dai_links) +
+ARRAY_SIZE(ext_disp_be_dai_link)];
static struct snd_soc_card sdm660_card = {
/* snd_soc_card_sdm660 */
@@ -3004,6 +3023,14 @@ static struct snd_soc_card *msm_int_populate_sndcard_dailinks(
sizeof(msm_wsa_be_dai_links));
len1 += ARRAY_SIZE(msm_wsa_be_dai_links);
}
+ if (of_property_read_bool(dev->of_node, "qcom,ext-disp-audio-rx")) {
+ dev_dbg(dev, "%s(): ext disp audio support present\n",
+ __func__);
+ memcpy(dailink + len1,
+ ext_disp_be_dai_link,
+ sizeof(ext_disp_be_dai_link));
+ len1 += ARRAY_SIZE(ext_disp_be_dai_link);
+ }
card->dai_link = dailink;
card->num_links = len1;
return card;