summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--Documentation/devicetree/bindings/display/msm/hdmi.txt2
-rw-r--r--Documentation/devicetree/bindings/drm/msm/hdmi-display.txt59
-rw-r--r--Documentation/devicetree/bindings/media/video/laser-sensor.txt28
-rw-r--r--Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt2
-rw-r--r--Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt8
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qpnp-haptic.txt35
-rw-r--r--Documentation/devicetree/bindings/soc/qcom/qpnp-pbs.txt30
-rw-r--r--Documentation/devicetree/bindings/sound/wcd_codec.txt5
-rw-r--r--Documentation/filesystems/Locking4
-rw-r--r--Documentation/filesystems/vfs.txt11
-rw-r--r--Documentation/vm/page_migration108
-rw-r--r--arch/arm/boot/dts/qcom/Makefile2
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-cdp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-mtp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-slimbus-mtp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pm8004-mdm9x55-i2s-cdp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pmk8001-mdm9x55-i2s-cdp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-cdp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-mtp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-slimbus-mtp.dts4
-rw-r--r--arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts5
-rw-r--r--arch/arm/boot/dts/qcom/msm-audio.dtsi52
-rw-r--r--arch/arm/boot/dts/qcom/msm-pm660l.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-camera-sensor-cdp.dtsi22
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi21
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi46
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-interposer-pm660.dtsi7
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi26
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi19
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-sde-display.dtsi41
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-sde.dtsi215
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-v2-qrd-skuk-hdk.dts28
-rw-r--r--arch/arm/boot/dts/qcom/msm8998-v2.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/msm8998.dtsi13
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-cdp.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-mdss.dtsi13
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-mtp.dtsi3
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-qrd.dtsi2
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-regulator.dtsi30
-rw-r--r--arch/arm/boot/dts/qcom/sdm630-usbc-audio-mtp.dts31
-rw-r--r--arch/arm/boot/dts/qcom/sdm630.dtsi50
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-audio.dtsi18
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-common.dtsi1
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-gpu.dtsi4
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-mdss.dtsi13
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi60
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-pm.dtsi130
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-qrd.dtsi20
-rw-r--r--arch/arm/boot/dts/qcom/sdm660-usbc-audio-mtp.dts31
-rw-r--r--arch/arm/boot/dts/qcom/sdm660.dtsi20
-rw-r--r--arch/arm/configs/sdm660-perf_defconfig4
-rw-r--r--arch/arm/configs/sdm660_defconfig2
-rw-r--r--arch/arm64/Kconfig.debug3
-rw-r--r--arch/arm64/configs/msmcortex-perf_defconfig1
-rw-r--r--arch/arm64/configs/msmcortex_defconfig2
-rw-r--r--arch/arm64/configs/msmcortex_mediabox_defconfig14
-rw-r--r--arch/arm64/configs/sdm660-perf_defconfig4
-rw-r--r--arch/arm64/configs/sdm660_defconfig3
-rw-r--r--arch/arm64/include/asm/pgtable.h29
-rw-r--r--arch/arm64/kernel/stacktrace.c4
-rw-r--r--drivers/base/dma-removed.c3
-rw-r--r--drivers/base/firmware_class.c3
-rw-r--r--drivers/block/zram/zram_drv.c5
-rw-r--r--drivers/bluetooth/btfm_slim_codec.c54
-rw-r--r--drivers/char/diag/diagchar_core.c3
-rw-r--r--drivers/char/diag/diagfwd.c94
-rw-r--r--drivers/char/diag/diagfwd.h5
-rw-r--r--drivers/char/diag/diagfwd_glink.c44
-rw-r--r--drivers/char/diag/diagfwd_glink.h4
-rw-r--r--drivers/clk/msm/clock-gcc-8998.c49
-rw-r--r--drivers/clk/msm/clock-generic.c14
-rw-r--r--drivers/clk/msm/clock-mmss-8998.c16
-rw-r--r--drivers/clk/msm/clock-osm.c62
-rw-r--r--drivers/clk/qcom/clk-cpu-osm.c27
-rw-r--r--drivers/clk/qcom/clk-rcg2.c22
-rw-r--r--drivers/clk/qcom/mmcc-sdm660.c2
-rw-r--r--drivers/crypto/msm/qcedev.c10
-rw-r--r--drivers/gpu/drm/msm/Kconfig8
-rw-r--r--drivers/gpu/drm/msm/Makefile20
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h21
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h42
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c96
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h114
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c104
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h3497
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c1345
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h187
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c509
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c383
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_snapshot.c796
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h27
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c57
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c453
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h215
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h120
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c4
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c11
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c1352
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h417
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c393
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c417
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_edid.c227
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h300
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c38
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h6
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c19
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c13
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h4
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c2
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c7
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h1
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c12
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c173
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h76
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c26
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c7
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c142
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h15
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c38
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c69
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c330
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h120
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c239
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.h37
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h7
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c6
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c13
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h19
-rw-r--r--drivers/gpu/drm/msm/msm_smmu.c14
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot.c105
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot.h85
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot_api.h121
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c38
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.h17
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_irq.c95
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_irq.h16
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys.h4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c29
-rw-r--r--drivers/gpu/drm/msm/sde/sde_formats.c99
-rw-r--r--drivers/gpu/drm/msm/sde/sde_formats.h11
-rw-r--r--drivers/gpu/drm/msm/sde/sde_irq.c80
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c145
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.h4
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.c12
-rw-r--r--drivers/gpu/drm/msm/sde_io_util.c502
-rw-r--r--drivers/gpu/drm/msm/sde_power_handle.c2
-rw-r--r--drivers/gpu/msm/adreno_a5xx.c1
-rw-r--r--drivers/gpu/msm/adreno_debugfs.c3
-rw-r--r--drivers/gpu/msm/adreno_drawctxt.c10
-rw-r--r--drivers/gpu/msm/adreno_ringbuffer.c3
-rw-r--r--drivers/gpu/msm/kgsl.c2
-rw-r--r--drivers/gpu/msm/kgsl_debugfs.c7
-rw-r--r--drivers/gpu/msm/kgsl_debugfs.h4
-rw-r--r--drivers/gpu/msm/kgsl_device.h6
-rw-r--r--drivers/gpu/msm/kgsl_snapshot.c8
-rw-r--r--drivers/hwtracing/coresight/Kconfig11
-rw-r--r--drivers/i2c/busses/i2c-msm-v2.c70
-rw-r--r--drivers/iommu/arm-smmu.c29
-rw-r--r--drivers/iommu/io-pgtable-arm.c158
-rw-r--r--drivers/iommu/io-pgtable.h1
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.c8
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp.h3
-rw-r--r--drivers/media/platform/msm/camera_v2/isp/msm_isp47.c23
-rw-r--r--drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c53
-rw-r--r--drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c11
-rw-r--r--drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c27
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h3
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h3
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c24
-rw-r--r--drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h3
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c39
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h3
-rw-r--r--drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c20
-rw-r--r--drivers/misc/hdcp.c3
-rw-r--r--drivers/misc/qseecom.c4
-rw-r--r--drivers/mmc/card/block.c12
-rw-r--r--drivers/mmc/host/cmdq_hci.c57
-rw-r--r--drivers/mmc/host/cmdq_hci.h8
-rw-r--r--drivers/mmc/host/sdhci-msm.c2
-rw-r--r--drivers/net/ethernet/msm/msm_rmnet_mhi.c4
-rw-r--r--drivers/net/wireless/ath/ath10k/ce.c2
-rw-r--r--drivers/net/wireless/ath/wil6210/Makefile1
-rw-r--r--drivers/net/wireless/ath/wil6210/pcie_bus.c3
-rw-r--r--drivers/net/wireless/ath/wil6210/sysfs.c126
-rw-r--r--drivers/net/wireless/ath/wil6210/wil6210.h2
-rw-r--r--drivers/pci/host/pci-msm.c14
-rw-r--r--drivers/platform/msm/Kconfig7
-rw-r--r--drivers/platform/msm/Makefile1
-rw-r--r--drivers/platform/msm/gsi/gsi.c72
-rw-r--r--drivers/platform/msm/gsi/gsi.h27
-rw-r--r--drivers/platform/msm/gsi/gsi_reg.h6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa.c40
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_client.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dma.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_dp.c2
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_flt.c3
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c26
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_uc.c6
-rw-r--r--drivers/platform/msm/ipa/ipa_v3/ipa_utils.c7
-rw-r--r--drivers/platform/msm/mhi/mhi.h154
-rw-r--r--drivers/platform/msm/mhi/mhi_bhi.c27
-rw-r--r--drivers/platform/msm/mhi/mhi_event.c63
-rw-r--r--drivers/platform/msm/mhi/mhi_hwio.h18
-rw-r--r--drivers/platform/msm/mhi/mhi_iface.c128
-rw-r--r--drivers/platform/msm/mhi/mhi_init.c109
-rw-r--r--drivers/platform/msm/mhi/mhi_isr.c329
-rw-r--r--drivers/platform/msm/mhi/mhi_macros.h13
-rw-r--r--drivers/platform/msm/mhi/mhi_main.c932
-rw-r--r--drivers/platform/msm/mhi/mhi_mmio_ops.c62
-rw-r--r--drivers/platform/msm/mhi/mhi_pm.c306
-rw-r--r--drivers/platform/msm/mhi/mhi_ring_ops.c12
-rw-r--r--drivers/platform/msm/mhi/mhi_ssr.c74
-rw-r--r--drivers/platform/msm/mhi/mhi_states.c785
-rw-r--r--drivers/platform/msm/mhi/mhi_sys.c127
-rw-r--r--drivers/platform/msm/mhi/mhi_sys.h4
-rw-r--r--drivers/platform/msm/mhi_uci/mhi_uci.c2
-rw-r--r--drivers/platform/msm/msm_ext_display.c (renamed from drivers/video/fbdev/msm/msm_ext_display.c)246
-rw-r--r--drivers/power/supply/qcom/battery.c17
-rw-r--r--drivers/power/supply/qcom/qpnp-smb2.c3
-rw-r--r--drivers/power/supply/qcom/smb-lib.c70
-rw-r--r--drivers/power/supply/qcom/smb-lib.h1
-rw-r--r--drivers/power/supply/qcom/smb138x-charger.c60
-rw-r--r--drivers/regulator/cprh-kbss-regulator.c121
-rw-r--r--drivers/regulator/qpnp-labibb-regulator.c59
-rw-r--r--drivers/regulator/qpnp-lcdb-regulator.c23
-rw-r--r--drivers/sensors/sensors_ssc.c17
-rw-r--r--drivers/soc/qcom/Kconfig9
-rw-r--r--drivers/soc/qcom/Makefile1
-rw-r--r--drivers/soc/qcom/glink.c4
-rw-r--r--drivers/soc/qcom/msm_smem.c2
-rw-r--r--drivers/soc/qcom/peripheral-loader.c27
-rw-r--r--drivers/soc/qcom/qdsp6v2/adsp-loader.c16
-rw-r--r--drivers/soc/qcom/qdsp6v2/apr_tal_glink.c1
-rw-r--r--drivers/soc/qcom/qdsp6v2/cdsp-loader.c22
-rw-r--r--drivers/soc/qcom/qpnp-haptic.c354
-rw-r--r--drivers/soc/qcom/qpnp-pbs.c361
-rw-r--r--drivers/soc/qcom/service-locator.c5
-rw-r--r--drivers/soc/qcom/smcinvoke.c7
-rw-r--r--drivers/soc/qcom/spcom.c20
-rw-r--r--drivers/spi/spi_qsd.c55
-rw-r--r--drivers/usb/gadget/function/f_gsi.c29
-rw-r--r--drivers/usb/gadget/function/f_mass_storage.c8
-rw-r--r--drivers/usb/pd/policy_engine.c2
-rw-r--r--drivers/video/fbdev/msm/Kconfig2
-rw-r--r--drivers/video/fbdev/msm/Makefile1
-rw-r--r--drivers/video/fbdev/msm/mdss.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c2
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.c34
-rw-r--r--drivers/video/fbdev/msm/mdss_fb.h1
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.c172
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp.h2
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_ctl.c5
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_intf_video.c16
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_overlay.c26
-rw-r--r--drivers/video/fbdev/msm/mdss_mdp_pipe.c5
-rw-r--r--drivers/virtio/virtio_balloon.c51
-rw-r--r--include/dt-bindings/clock/msm-clocks-8998.h4
-rw-r--r--include/dt-bindings/clock/msm-clocks-hwio-8998.h4
-rw-r--r--include/linux/balloon_compaction.h51
-rw-r--r--include/linux/compaction.h17
-rw-r--r--include/linux/fs.h2
-rw-r--r--include/linux/iommu.h1
-rw-r--r--include/linux/ksm.h3
-rw-r--r--include/linux/migrate.h17
-rw-r--r--include/linux/mm.h1
-rw-r--r--include/linux/mmzone.h6
-rw-r--r--include/linux/msm_ext_display.h2
-rw-r--r--include/linux/msm_gsi.h18
-rw-r--r--include/linux/msm_kgsl.h21
-rw-r--r--include/linux/msm_mhi.h7
-rw-r--r--include/linux/page-flags.h32
-rw-r--r--include/linux/qpnp/qpnp-pbs.h25
-rw-r--r--include/linux/regulator/qpnp-labibb-regulator.h23
-rw-r--r--include/linux/sde_io_util.h113
-rw-r--r--include/linux/vm_event_item.h1
-rw-r--r--include/linux/zsmalloc.h4
-rw-r--r--include/trace/events/compaction.h55
-rw-r--r--include/uapi/drm/msm_drm.h27
-rw-r--r--include/uapi/linux/magic.h2
-rw-r--r--include/uapi/sound/compress_offload.h6
-rw-r--r--kernel/sched/core_ctl.c24
-rw-r--r--mm/balloon_compaction.c94
-rw-r--r--mm/compaction.c313
-rw-r--r--mm/internal.h1
-rw-r--r--mm/ksm.c4
-rw-r--r--mm/memory_hotplug.c15
-rw-r--r--mm/migrate.c255
-rw-r--r--mm/page_alloc.c5
-rw-r--r--mm/util.c6
-rw-r--r--mm/vmscan.c165
-rw-r--r--mm/vmstat.c1
-rw-r--r--mm/zsmalloc.c1426
-rwxr-xr-xscripts/checkpatch.pl24
-rw-r--r--sound/soc/codecs/Kconfig2
-rw-r--r--sound/soc/codecs/msm_sdw/msm_sdw_cdc.c151
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c55
-rw-r--r--sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c3
-rw-r--r--sound/soc/codecs/wsa881x-regmap.c3
-rw-r--r--sound/soc/codecs/wsa881x-temp-sensor.c10
-rw-r--r--sound/soc/codecs/wsa881x.c28
-rw-r--r--sound/soc/codecs/wsa881x.h1
-rw-r--r--sound/soc/msm/Kconfig2
-rw-r--r--sound/soc/msm/msm-cpe-lsm.c61
-rw-r--r--sound/soc/msm/msm-dai-fe.c2
-rw-r--r--sound/soc/msm/qdsp6v2/msm-lsm-client.c91
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c34
-rw-r--r--sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c17
-rw-r--r--sound/soc/msm/sdm660-common.c383
-rw-r--r--sound/soc/msm/sdm660-ext-dai-links.c42
-rw-r--r--sound/soc/msm/sdm660-external.c2
-rw-r--r--sound/soc/msm/sdm660-internal.c31
316 files changed, 20756 insertions, 4604 deletions
diff --git a/Documentation/devicetree/bindings/display/msm/hdmi.txt b/Documentation/devicetree/bindings/display/msm/hdmi.txt
index 379ee2ea9a3d..a0615ac9d73e 100644
--- a/Documentation/devicetree/bindings/display/msm/hdmi.txt
+++ b/Documentation/devicetree/bindings/display/msm/hdmi.txt
@@ -3,6 +3,7 @@ Qualcomm adreno/snapdragon hdmi output
Required properties:
- compatible: one of the following
* "qcom,hdmi-tx-8996"
+ * "qcom,hdmi-tx-8998"
* "qcom,hdmi-tx-8994"
* "qcom,hdmi-tx-8084"
* "qcom,hdmi-tx-8974"
@@ -21,6 +22,7 @@ Required properties:
Optional properties:
- qcom,hdmi-tx-mux-en-gpio: hdmi mux enable pin
+- qcom,hdmi-tx-hpd5v-gpio: hdmi 5v boost pin
- qcom,hdmi-tx-mux-sel-gpio: hdmi mux select pin
- power-domains: reference to the power domain(s), if available.
- pinctrl-names: the pin control state names; should contain "default"
diff --git a/Documentation/devicetree/bindings/drm/msm/hdmi-display.txt b/Documentation/devicetree/bindings/drm/msm/hdmi-display.txt
new file mode 100644
index 000000000000..aaa3722659ab
--- /dev/null
+++ b/Documentation/devicetree/bindings/drm/msm/hdmi-display.txt
@@ -0,0 +1,59 @@
+Qualcomm Technologies,Inc. Adreno/Snapdragon hdmi display manager
+
+Required properties:
+- compatible: "qcom,hdmi-display"
+- label: label of this display manager
+
+Optional properties:
+- qcom,display-type: display type of this manager. It could be "primary",
+ "secondary", "tertiary", etc.
+- qcom,non-pluggable: Boolean to indicate if display is non pluggable.
+- qcom,customize-modes: Customized modes when it's non pluggable display.
+- qcom,customize-mode-id: Customized mode node.
+- qcom,mode-name: String which indicates the mode name which shall be used
+ by the connector in non pluggable mode. Refer the example below for details.
+ In pluggable mode, the modes shall be filled up
+ after edid parsing.
+- qcom,mode-h-active: Horizontal active pixels for this mode.
+- qcom,mode-h-front-porch: Horizontal front porch in pixels for this mode.
+- qcom,mode-h-pulse-width: Horizontal sync width in pixels for this mode.
+- qcom,mode-h-back-porch: Horizontal back porch in pixels for this mode.
+- qcom,mode-h-active-high: Boolean to indicate if mode horizontal polarity is active high.
+- qcom,mode-v-active: Vertical active lines for this mode.
+- qcom,mode-v-front-porch: Vertical front porch in lines for this mode.
+- qcom,mode-v-pulse-width: Vertical sync width in lines for this mode.
+- qcom,mode-v-back-porch: Vertical back porch in lines for this mode.
+- qcom,mode-v-active-high: Boolean to indicate if mode vertical polarity is active high.
+- qcom,mode-refersh-rate: Mode refresh rate in hertz.
+- qcom,mode-clock-in-khz: Mode pixel clock in KHz.
+
+Example:
+
+/ {
+ ...
+
+ hdmi_display: qcom,hdmi-display {
+ compatible = "qcom,hdmi-display";
+ label = "hdmi_display";
+ qcom,display-type = "secondary";
+ qcom,non-pluggable;
+ qcom,customize-modes {
+ qcom,customize-mode-id@0 {
+ qcom,mode-name = "3840x2160@30Hz";
+ qcom,mode-h-active = <3840>;
+ qcom,mode-h-front-porch = <176>;
+ qcom,mode-h-pulse-width = <88>;
+ qcom,mode-h-back-porch = <296>;
+ qcom,mode-h-active-high;
+ qcom,mode-v-active = <2160>;
+ qcom,mode-v-front-porch = <8>;
+ qcom,mode-v-pulse-width = <10>;
+ qcom,mode-v-back-porch = <72>;
+ qcom,mode-v-active-high;
+ qcom,mode-refersh-rate = <30>;
+ qcom,mode-clock-in-khz = <297000>;
+ };
+ };
+ };
+
+};
diff --git a/Documentation/devicetree/bindings/media/video/laser-sensor.txt b/Documentation/devicetree/bindings/media/video/laser-sensor.txt
new file mode 100644
index 000000000000..1bcb0b93cb10
--- /dev/null
+++ b/Documentation/devicetree/bindings/media/video/laser-sensor.txt
@@ -0,0 +1,28 @@
+Laser Sensor Device Tree Bindings.
+========================================
+
+Boards with the Laser Sensor connected to CCI shall have the following
+properties:
+
+Required node properties:
+ - cell-index: cci hardware core index
+ - compatible:
+ - "st,stmvl53l0" : STMiecroelectronics VL53L0 Laser sensor.
+ - reg : offset and length of the register set for the device
+ - qcom, cci-master: cci master the sensor connected to
+ - cam_cci-supply : cci voltage regulator used
+ - cam_laser-supply: laser sensor voltage regulator
+ - qcom,cam-vreg-name: voltage regulators name
+ - qcom, cam-vreg-min-voltage: specify minimum voltage level for
+ regulators used
+ - qcom, cam-vreg-max-voltage: specify maximum voltage level for
+ regulators used
+ - pinctrl-names : should specify the pin control groups followed by
+ the definition of each group
+ - gpios : should contain phandle to gpio controller node and array of
+ #gpio-cells specifying specific gpio (controller specific)
+ - qcom,gpio-req-tbl-num : contains index to gpios specific to the sensor
+ - qcom,gpio-req-tbl-flags : should contain direction of gpios present in
+ qcom,gpio-req-tbl-num property (in the same order)
+ - qcom,gpio-req-tbl-label : should contain name of gpios present in
+ qcom,gpio-req-tbl-num property (in the same order)
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
index d08ca957c954..c9cfc889faba 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-labibb-regulator.txt
@@ -149,6 +149,8 @@ LAB subnode optional properties:
already. If it it not specified, then
output voltage can be configured to
any value in the allowed limit.
+- qcom,notify-lab-vreg-ok-sts: A boolean property which upon set will
+ poll and notify the lab_vreg_ok status.
Following properties are available only for PM660A:
diff --git a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
index 8b3a38da0834..63da8ecbfa4c 100644
--- a/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
+++ b/Documentation/devicetree/bindings/regulator/qpnp-lcdb-regulator.txt
@@ -26,6 +26,13 @@ First Level Node - LCDB module
Value type: <prop-encoded-array>
Definition: Base address of the LCDB SPMI peripheral.
+- qcom,force-module-reenable
+ Usage: required if using SW mode for module enable
+ Value type: <bool>
+ Definition: This enables the workaround to force enable
+ the vph_pwr_2p5_ok signal required for
+ turning on the LCDB module.
+
Touch-to-wake (TTW) properties:
TTW supports 2 modes of operation - HW and SW. In the HW mode the enable/disable
@@ -59,7 +66,6 @@ main node.
Definition: ON time (in mS) for the VDISP/VDISN signals.
Possible values are 4, 8, 16, 32.
-
========================================
Second Level Nodes - LDO/NCP/BOOST block
========================================
diff --git a/Documentation/devicetree/bindings/soc/qcom/qpnp-haptic.txt b/Documentation/devicetree/bindings/soc/qcom/qpnp-haptic.txt
index fe94e40a27cd..a7848153f83c 100644
--- a/Documentation/devicetree/bindings/soc/qcom/qpnp-haptic.txt
+++ b/Documentation/devicetree/bindings/soc/qcom/qpnp-haptic.txt
@@ -66,6 +66,40 @@ Optional properties when qcom,actuator-type is "lra"
"none", "opt1", "opt2" and "opt3" (default)
- qcom,lra-res-cal-period : Auto resonance calibration period. The values range from
4 to 32(default)
+ - qcom,perform-lra-auto-resonance-search : boolean, define this property if:
+ a) the underlying PMI chip does not have a register in the MISC block to
+ read the error percentage in RC clock
+ b) the actuator type is LRA
+ Defining this causes the auto resonance search algorithm to be be performed
+ for such devices.
+ c) This property is not defined by default.
+
+- qcom,drive-period-code-max-limit-percent-variation: RATE_CFG1 and RATE_CFG2 registers will
+ be updated with the values from AUTO_RES_LO and AUTO_RES_HI registers
+ only if the variation from the resonant frequency is within the value
+ mentioned by this property on the higher side.
+ The default value is 25, which means if the drive period code resulting
+ from AUTO_RES register's is more than 25 percent of the existing drive
+ period code, then driver does not update RATE_CFG registers.
+- qcom,drive-period-code-min-limit-percent-variation: RATE_CFG1 and RATE_CFG2 registers will
+ be updated with the values from AUTO_RES_LO and AUTO_RES_HI registers
+ only if the variation from the resonant frequency is within the value
+ mentioned by this property on the lower side.
+ The default value is 25, which means if the drive period code resulting
+ from AUTO_RES register's is less than 25 percent of the existing drive
+ period code, then driver does not update RATE_CFG registers.
+
+Optional properties when qcom,lra-auto-res-mode is "qwd"
+ - qcom,time-required-to-generate-back-emf-us: Time period required to generate sufficient
+ back-emf (in case of QWD mode only) in us. For auto resonance
+ detection to work properly,sufficient back-emf has to be
+ generated. In general, back-emf takes some time to build up.
+ When the auto resonance mode is chosen as QWD, high-z will
+ be applied for every LRA cycle and hence there won't be
+ enough back-emf at the start-up. So we need to drive the
+ motor for a few LRA cycles. Hence, auto resonance detection
+ is enabled after this delay period after the PLAY bit is
+ asserted. The default value is 20000us.
Example:
qcom,haptic@c000 {
@@ -94,4 +128,5 @@ Example:
qcom,lra-high-z = "opt1";
qcom,lra-auto-res-mode = "qwd";
qcom,lra-res-cal-period = <4>;
+ qcom,time-required-to-generate-back-emf-us = <20000>;
};
diff --git a/Documentation/devicetree/bindings/soc/qcom/qpnp-pbs.txt b/Documentation/devicetree/bindings/soc/qcom/qpnp-pbs.txt
new file mode 100644
index 000000000000..d7aefbf9c19c
--- /dev/null
+++ b/Documentation/devicetree/bindings/soc/qcom/qpnp-pbs.txt
@@ -0,0 +1,30 @@
+QPNP PBS
+
+QPNP (Qualcomm Technologies, Inc. Plug N Play) PBS is programmable boot sequence
+and this driver is for helping the client drivers triggering such sequence
+to be configured in PMIC.
+
+This document describes the bindings for QPNP PBS driver.
+
+=======================
+Required Node Structure
+=======================
+
+- compatible
+ Usage: required
+ Value type: <string>
+ Definition: should be "qcom,qpnp-pbs".
+
+- reg
+ Usage: required
+ Value type: <prop-encoded-array>
+ Definition: Base address of the PBS registers.
+
+
+=======
+Example
+=======
+ pm660l_pbs: qcom,pbs@7300 {
+ compatible = "qcom,qpnp-pbs";
+ reg = <0x7300 0x100>;
+ };
diff --git a/Documentation/devicetree/bindings/sound/wcd_codec.txt b/Documentation/devicetree/bindings/sound/wcd_codec.txt
index b7a81efb768c..5b5d43a6323c 100644
--- a/Documentation/devicetree/bindings/sound/wcd_codec.txt
+++ b/Documentation/devicetree/bindings/sound/wcd_codec.txt
@@ -514,10 +514,6 @@ Required properties:
which is also existing driver WSA881x that represents
soundwire slave devices.
-Optional Properties:
- - qcom,cache-always : Boolean. This property is used in WSA slave
- device to use cacheable for all registers.
-
Example:
msm_sdw_codec: qcom,msm-sdw-codec@152c1000 {
@@ -535,7 +531,6 @@ msm_sdw_codec: qcom,msm-sdw-codec@152c1000 {
compatible = "qcom,wsa881x";
reg = <0x00 0x20170212>;
qcom,spkr-sd-n-gpio = <&tlmm 80 0>;
- qcom,cache-always;
};
};
};
diff --git a/Documentation/filesystems/Locking b/Documentation/filesystems/Locking
index 06d443450f21..539ac9c78ccf 100644
--- a/Documentation/filesystems/Locking
+++ b/Documentation/filesystems/Locking
@@ -197,7 +197,9 @@ prototypes:
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
int (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
+ bool (*isolate_page) (struct page *, isolate_mode_t);
int (*migratepage)(struct address_space *, struct page *, struct page *);
+ void (*putback_page) (struct page *);
int (*launder_page)(struct page *);
int (*is_partially_uptodate)(struct page *, unsigned long, unsigned long);
int (*error_remove_page)(struct address_space *, struct page *);
@@ -221,7 +223,9 @@ invalidatepage: yes
releasepage: yes
freepage: yes
direct_IO:
+isolate_page: yes
migratepage: yes (both)
+putback_page: yes
launder_page: yes
is_partially_uptodate: yes
error_remove_page: yes
diff --git a/Documentation/filesystems/vfs.txt b/Documentation/filesystems/vfs.txt
index 8c6f07ad373a..66ddf1e8fe28 100644
--- a/Documentation/filesystems/vfs.txt
+++ b/Documentation/filesystems/vfs.txt
@@ -593,9 +593,14 @@ struct address_space_operations {
int (*releasepage) (struct page *, int);
void (*freepage)(struct page *);
ssize_t (*direct_IO)(struct kiocb *, struct iov_iter *iter, loff_t offset);
+ /* isolate a page for migration */
+ bool (*isolate_page) (struct page *, isolate_mode_t);
/* migrate the contents of a page to the specified target */
int (*migratepage) (struct page *, struct page *);
+ /* put migration-failed page back to right list */
+ void (*putback_page) (struct page *);
int (*launder_page) (struct page *);
+
int (*is_partially_uptodate) (struct page *, unsigned long,
unsigned long);
void (*is_dirty_writeback) (struct page *, bool *, bool *);
@@ -748,6 +753,10 @@ struct address_space_operations {
and transfer data directly between the storage and the
application's address space.
+ isolate_page: Called by the VM when isolating a movable non-lru page.
+ If page is successfully isolated, VM marks the page as PG_isolated
+ via __SetPageIsolated.
+
migrate_page: This is used to compact the physical memory usage.
If the VM wants to relocate a page (maybe off a memory card
that is signalling imminent failure) it will pass a new page
@@ -755,6 +764,8 @@ struct address_space_operations {
transfer any private data across and update any references
that it has to the page.
+ putback_page: Called by the VM when isolated page's migration fails.
+
launder_page: Called before freeing a page - it writes back the dirty page. To
prevent redirtying the page, it is kept locked during the whole
operation.
diff --git a/Documentation/vm/page_migration b/Documentation/vm/page_migration
index fea5c0864170..94bd9c11c4e0 100644
--- a/Documentation/vm/page_migration
+++ b/Documentation/vm/page_migration
@@ -142,5 +142,111 @@ Steps:
20. The new page is moved to the LRU and can be scanned by the swapper
etc again.
-Christoph Lameter, May 8, 2006.
+C. Non-LRU page migration
+-------------------------
+
+Although original migration aimed for reducing the latency of memory access
+for NUMA, compaction who want to create high-order page is also main customer.
+
+Current problem of the implementation is that it is designed to migrate only
+*LRU* pages. However, there are potential non-lru pages which can be migrated
+in drivers, for example, zsmalloc, virtio-balloon pages.
+
+For virtio-balloon pages, some parts of migration code path have been hooked
+up and added virtio-balloon specific functions to intercept migration logics.
+It's too specific to a driver so other drivers who want to make their pages
+movable would have to add own specific hooks in migration path.
+
+To overclome the problem, VM supports non-LRU page migration which provides
+generic functions for non-LRU movable pages without driver specific hooks
+migration path.
+
+If a driver want to make own pages movable, it should define three functions
+which are function pointers of struct address_space_operations.
+
+1. bool (*isolate_page) (struct page *page, isolate_mode_t mode);
+
+What VM expects on isolate_page function of driver is to return *true*
+if driver isolates page successfully. On returing true, VM marks the page
+as PG_isolated so concurrent isolation in several CPUs skip the page
+for isolation. If a driver cannot isolate the page, it should return *false*.
+
+Once page is successfully isolated, VM uses page.lru fields so driver
+shouldn't expect to preserve values in that fields.
+
+2. int (*migratepage) (struct address_space *mapping,
+ struct page *newpage, struct page *oldpage, enum migrate_mode);
+
+After isolation, VM calls migratepage of driver with isolated page.
+The function of migratepage is to move content of the old page to new page
+and set up fields of struct page newpage. Keep in mind that you should
+indicate to the VM the oldpage is no longer movable via __ClearPageMovable()
+under page_lock if you migrated the oldpage successfully and returns
+MIGRATEPAGE_SUCCESS. If driver cannot migrate the page at the moment, driver
+can return -EAGAIN. On -EAGAIN, VM will retry page migration in a short time
+because VM interprets -EAGAIN as "temporal migration failure". On returning
+any error except -EAGAIN, VM will give up the page migration without retrying
+in this time.
+
+Driver shouldn't touch page.lru field VM using in the functions.
+
+3. void (*putback_page)(struct page *);
+
+If migration fails on isolated page, VM should return the isolated page
+to the driver so VM calls driver's putback_page with migration failed page.
+In this function, driver should put the isolated page back to the own data
+structure.
+4. non-lru movable page flags
+
+There are two page flags for supporting non-lru movable page.
+
+* PG_movable
+
+Driver should use the below function to make page movable under page_lock.
+
+ void __SetPageMovable(struct page *page, struct address_space *mapping)
+
+It needs argument of address_space for registering migration family functions
+which will be called by VM. Exactly speaking, PG_movable is not a real flag of
+struct page. Rather than, VM reuses page->mapping's lower bits to represent it.
+
+ #define PAGE_MAPPING_MOVABLE 0x2
+ page->mapping = page->mapping | PAGE_MAPPING_MOVABLE;
+
+so driver shouldn't access page->mapping directly. Instead, driver should
+use page_mapping which mask off the low two bits of page->mapping under
+page lock so it can get right struct address_space.
+
+For testing of non-lru movable page, VM supports __PageMovable function.
+However, it doesn't guarantee to identify non-lru movable page because
+page->mapping field is unified with other variables in struct page.
+As well, if driver releases the page after isolation by VM, page->mapping
+doesn't have stable value although it has PAGE_MAPPING_MOVABLE
+(Look at __ClearPageMovable). But __PageMovable is cheap to catch whether
+page is LRU or non-lru movable once the page has been isolated. Because
+LRU pages never can have PAGE_MAPPING_MOVABLE in page->mapping. It is also
+good for just peeking to test non-lru movable pages before more expensive
+checking with lock_page in pfn scanning to select victim.
+
+For guaranteeing non-lru movable page, VM provides PageMovable function.
+Unlike __PageMovable, PageMovable functions validates page->mapping and
+mapping->a_ops->isolate_page under lock_page. The lock_page prevents sudden
+destroying of page->mapping.
+
+Driver using __SetPageMovable should clear the flag via __ClearMovablePage
+under page_lock before the releasing the page.
+
+* PG_isolated
+
+To prevent concurrent isolation among several CPUs, VM marks isolated page
+as PG_isolated under lock_page. So if a CPU encounters PG_isolated non-lru
+movable page, it can skip it. Driver doesn't need to manipulate the flag
+because VM will set/clear it automatically. Keep in mind that if driver
+sees PG_isolated page, it means the page have been isolated by VM so it
+shouldn't touch page.lru field.
+PG_isolated is alias with PG_reclaim flag so driver shouldn't use the flag
+for own purpose.
+
+Christoph Lameter, May 8, 2006.
+Minchan Kim, Mar 28, 2016.
diff --git a/arch/arm/boot/dts/qcom/Makefile b/arch/arm/boot/dts/qcom/Makefile
index c6cff2e95bb9..db37dc6f31bb 100644
--- a/arch/arm/boot/dts/qcom/Makefile
+++ b/arch/arm/boot/dts/qcom/Makefile
@@ -164,6 +164,7 @@ dtb-$(CONFIG_ARCH_SDM660) += sdm660-sim.dtb \
sdm660-headset-jacktype-no-rcm.dtb \
sdm660-pm660a-headset-jacktype-no-cdp.dtb \
sdm660-pm660a-headset-jacktype-no-rcm.dtb \
+ sdm660-usbc-audio-mtp.dtb \
sdm658-mtp.dtb \
sdm658-cdp.dtb \
sdm658-rcm.dtb \
@@ -188,6 +189,7 @@ dtb-$(CONFIG_ARCH_SDM660) += sdm660-sim.dtb \
dtb-$(CONFIG_ARCH_SDM630) += sdm630-rumi.dtb \
sdm630-pm660a-rumi.dtb \
sdm630-mtp.dtb \
+ sdm630-usbc-audio-mtp.dtb \
sdm630-cdp.dtb \
sdm630-rcm.dtb \
sdm630-internal-codec-mtp.dtb \
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-cdp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-cdp.dts
index b434140dc88d..5e2886f7e615 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-cdp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-cdp.dts
@@ -79,8 +79,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-mtp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-mtp.dts
index d017949248a5..2ff517a2521b 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-mtp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-i2s-mtp.dts
@@ -78,8 +78,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-slimbus-mtp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-slimbus-mtp.dts
index 2fbb98c26e2c..0b74b1b2ca6c 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-slimbus-mtp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-mdm9x55-slimbus-mtp.dts
@@ -79,8 +79,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pm8004-mdm9x55-i2s-cdp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pm8004-mdm9x55-i2s-cdp.dts
index eae754dbd3c5..1c9d4ea56d47 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pm8004-mdm9x55-i2s-cdp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pm8004-mdm9x55-i2s-cdp.dts
@@ -79,8 +79,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pmk8001-mdm9x55-i2s-cdp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pmk8001-mdm9x55-i2s-cdp.dts
index 17d7717587f2..7c58c05f5c61 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pmk8001-mdm9x55-i2s-cdp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8994-pmk8001-mdm9x55-i2s-cdp.dts
@@ -79,8 +79,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-cdp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-cdp.dts
index ab51e673bc7d..71ef7617b47f 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-cdp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-cdp.dts
@@ -80,8 +80,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-mtp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-mtp.dts
index 3e4d11122e01..4d136091806a 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-mtp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-i2s-mtp.dts
@@ -80,8 +80,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-slimbus-mtp.dts b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-slimbus-mtp.dts
index c5c1fe0115f7..8fda04fbfe92 100644
--- a/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-slimbus-mtp.dts
+++ b/arch/arm/boot/dts/qcom/apq8096-v3-pmi8996-mdm9x55-slimbus-mtp.dts
@@ -80,8 +80,8 @@
mhi-chan-cfg-19 = <0x13 0x40 0x1 0xa2>;
mhi-chan-cfg-20 = <0x14 0xa 0x0 0x92>;
mhi-chan-cfg-21 = <0x15 0xa 0x0 0xa2>;
- mhi-chan-cfg-22 = <0x16 0xa 0x0 0x92>;
- mhi-chan-cfg-23 = <0x17 0xa 0x0 0xa2>;
+ mhi-chan-cfg-22 = <0x16 0x40 0x2 0x92>;
+ mhi-chan-cfg-23 = <0x17 0x40 0x2 0xa2>;
mhi-chan-cfg-24 = <0x18 0xa 0x0 0x91>;
mhi-chan-cfg-25 = <0x19 0xa 0x0 0xa1>;
mhi-chan-cfg-32 = <0x20 0x80 0x2 0x92>;
diff --git a/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts b/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts
index daf43d49e079..00e3d0e42427 100644
--- a/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts
+++ b/arch/arm/boot/dts/qcom/apq8998-v2.1-mediabox.dts
@@ -26,9 +26,14 @@
};
&mdss_mdp {
+ status = "disabled";
qcom,mdss-pref-prim-intf = "hdmi";
};
+&sde_hdmi {
+ qcom,display-type = "primary";
+};
+
&slim_aud {
tasha_codec {
wsa_spkr_sd1: msm_cdc_pinctrll {
diff --git a/arch/arm/boot/dts/qcom/msm-audio.dtsi b/arch/arm/boot/dts/qcom/msm-audio.dtsi
index a8a174300c7e..42cf30c789d9 100644
--- a/arch/arm/boot/dts/qcom/msm-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-audio.dtsi
@@ -102,6 +102,11 @@
compatible = "qcom,msm-pcm-afe";
};
+ dai_dp: qcom,msm-dai-q6-dp {
+ compatible = "qcom,msm-dai-q6-hdmi";
+ qcom,msm-dai-q6-dev-id = <24608>;
+ };
+
loopback: qcom,msm-pcm-loopback {
compatible = "qcom,msm-pcm-loopback";
};
@@ -592,6 +597,7 @@
qcom,wcn-btfm;
qcom,mi2s-audio-intf;
qcom,auxpcm-audio-intf;
+ qcom,ext-disp-audio-rx;
qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
qcom,audio-routing =
"AIF4 VI", "MCLK",
@@ -640,7 +646,8 @@
"msm-pcm-routing", "msm-cpe-lsm",
"msm-compr-dsp", "msm-pcm-dsp-noirq",
"msm-cpe-lsm.3";
- asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+ asoc-cpu = <&dai_dp>, <&dai_mi2s0>,
+ <&dai_mi2s1>,
<&dai_mi2s2>, <&dai_mi2s3>,
<&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
<&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
@@ -657,7 +664,8 @@
<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
- asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ asoc-cpu-names = "msm-dai-q6-dp.24608", "msm-dai-q6-mi2s.0",
+ "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
"msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
@@ -679,8 +687,9 @@
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36896",
"msm-dai-q6-tdm.36897", "msm-dai-q6-tdm.36912",
"msm-dai-q6-tdm.36913";
- asoc-codec = <&stub_codec>;
- asoc-codec-names = "msm-stub-codec.1";
+ asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
+ asoc-codec-names = "msm-stub-codec.1",
+ "msm-ext-disp-audio-codec-rx";
qcom,wsa-max-devs = <2>;
qcom,wsa-devs = <&wsa881x_211>, <&wsa881x_212>,
<&wsa881x_213>, <&wsa881x_214>;
@@ -694,6 +703,7 @@
qcom,wcn-btfm;
qcom,mi2s-audio-intf;
qcom,auxpcm-audio-intf;
+ qcom,ext-disp-audio-rx;
qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
qcom,audio-routing =
"AIF4 VI", "MCLK",
@@ -728,6 +738,8 @@
qcom,hph-en0-gpio = <&tavil_hph_en0>;
qcom,hph-en1-gpio = <&tavil_hph_en1>;
qcom,msm-mclk-freq = <9600000>;
+ qcom,usbc-analog-en1_gpio = <&wcd_usbc_analog_en1_gpio>;
+ qcom,usbc-analog-en2_n_gpio = <&wcd_usbc_analog_en2n_gpio>;
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
<&afe>, <&lsm>, <&routing>, <&cpe>, <&compr>,
@@ -739,7 +751,8 @@
"msm-pcm-afe", "msm-lsm-client",
"msm-pcm-routing", "msm-cpe-lsm",
"msm-compr-dsp", "msm-pcm-dsp-noirq";
- asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+ asoc-cpu = <&dai_dp>, <&dai_mi2s0>,
+ <&dai_mi2s1>,
<&dai_mi2s2>, <&dai_mi2s3>,
<&dai_pri_auxpcm>, <&dai_sec_auxpcm>,
<&dai_tert_auxpcm>, <&dai_quat_auxpcm>,
@@ -756,7 +769,8 @@
<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
- asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ asoc-cpu-names = "msm-dai-q6-dp.24608", "msm-dai-q6-mi2s.0",
+ "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
"msm-dai-q6-auxpcm.1", "msm-dai-q6-auxpcm.2",
"msm-dai-q6-auxpcm.3", "msm-dai-q6-auxpcm.4",
@@ -778,8 +792,9 @@
"msm-dai-q6-tdm.36881", "msm-dai-q6-tdm.36896",
"msm-dai-q6-tdm.36897", "msm-dai-q6-tdm.36912",
"msm-dai-q6-tdm.36913";
- asoc-codec = <&stub_codec>;
- asoc-codec-names = "msm-stub-codec.1";
+ asoc-codec = <&stub_codec>, <&ext_disp_audio_codec>;
+ asoc-codec-names = "msm-stub-codec.1",
+ "msm-ext-disp-audio-codec-rx";
qcom,wsa-max-devs = <2>;
qcom,wsa-devs = <&wsa881x_0211>, <&wsa881x_0212>,
<&wsa881x_0213>, <&wsa881x_0214>;
@@ -794,6 +809,7 @@
qcom,wcn-btfm;
qcom,mi2s-audio-intf;
qcom,auxpcm-audio-intf;
+ qcom,ext-disp-audio-rx;
qcom,msm-mi2s-master = <1>, <1>, <1>, <1>;
qcom,msm-mclk-freq = <9600000>;
qcom,msm-mbhc-hphl-swh = <1>;
@@ -823,7 +839,13 @@
"DMIC4", "MIC BIAS External",
"MIC BIAS External", "Digital Mic4",
"SpkrLeft IN", "SPK1 OUT",
- "SpkrRight IN", "SPK2 OUT";
+ "SpkrRight IN", "SPK2 OUT",
+ "PDM_IN_RX1", "PDM_OUT_RX1",
+ "PDM_IN_RX2", "PDM_OUT_RX2",
+ "PDM_IN_RX3", "PDM_OUT_RX3",
+ "ADC1_IN", "ADC1_OUT",
+ "ADC2_IN", "ADC2_OUT",
+ "ADC3_IN", "ADC3_OUT";
asoc-platform = <&pcm0>, <&pcm1>, <&pcm2>, <&voip>, <&voice>,
<&loopback>, <&compress>, <&hostless>,
@@ -836,7 +858,8 @@
"msm-pcm-afe", "msm-lsm-client",
"msm-pcm-routing", "msm-compr-dsp",
"msm-pcm-dsp-noirq";
- asoc-cpu = <&dai_mi2s0>, <&dai_mi2s1>,
+ asoc-cpu = <&dai_dp>, <&dai_mi2s0>,
+ <&dai_mi2s1>,
<&dai_mi2s2>, <&dai_mi2s3>,
<&dai_int_mi2s0>, <&dai_int_mi2s1>,
<&dai_int_mi2s2>, <&dai_int_mi2s3>,
@@ -853,7 +876,8 @@
<&dai_sec_tdm_rx_0>, <&dai_sec_tdm_tx_0>,
<&dai_tert_tdm_rx_0>, <&dai_tert_tdm_tx_0>,
<&dai_quat_tdm_rx_0>, <&dai_quat_tdm_tx_0>;
- asoc-cpu-names = "msm-dai-q6-mi2s.0", "msm-dai-q6-mi2s.1",
+ asoc-cpu-names = "msm-dai-q6-dp.24608", "msm-dai-q6-mi2s.0",
+ "msm-dai-q6-mi2s.1",
"msm-dai-q6-mi2s.2", "msm-dai-q6-mi2s.3",
"msm-dai-q6-mi2s.7", "msm-dai-q6-mi2s.8",
"msm-dai-q6-mi2s.9", "msm-dai-q6-mi2s.10",
@@ -872,9 +896,11 @@
"msm-dai-q6-tdm.36896", "msm-dai-q6-tdm.36897",
"msm-dai-q6-tdm.36912", "msm-dai-q6-tdm.36913";
asoc-codec = <&stub_codec>, <&msm_digital_codec>,
- <&pmic_analog_codec>, <&msm_sdw_codec>;
+ <&pmic_analog_codec>, <&msm_sdw_codec>,
+ <&ext_disp_audio_codec>;
asoc-codec-names = "msm-stub-codec.1", "msm-dig-codec",
- "analog-codec", "msm_sdw_codec";
+ "analog-codec", "msm_sdw_codec",
+ "msm-ext-disp-audio-codec-rx";
qcom,wsa-max-devs = <2>;
qcom,wsa-devs = <&wsa881x_211_en>, <&wsa881x_212_en>,
diff --git a/arch/arm/boot/dts/qcom/msm-pm660l.dtsi b/arch/arm/boot/dts/qcom/msm-pm660l.dtsi
index cdb3662a1a56..20016c7ac6ec 100644
--- a/arch/arm/boot/dts/qcom/msm-pm660l.dtsi
+++ b/arch/arm/boot/dts/qcom/msm-pm660l.dtsi
@@ -389,6 +389,7 @@
#address-cells = <1>;
#size-cells = <1>;
reg = <0xec00 0x100>;
+ qcom,force-module-reenable;
lcdb_ldo_vreg: ldo {
label = "ldo";
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-cdp.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-cdp.dtsi
index 2cb08e1709a5..e7a61f42dff1 100644
--- a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-cdp.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -78,6 +78,26 @@
status = "disabled";
};
+ tof0:qcom,tof@0{
+ cell-index = <0>;
+ reg = <0x29>;
+ compatible = "st,stmvl53l0";
+ qcom,cci-master = <0>;
+ cam_cci-supply = <&pm8998_lvs1>;
+ cam_laser-supply = <&pmi8998_bob>;
+ qcom,cam-vreg-name = "cam_cci", "cam_laser";
+ qcom,cam-vreg-min-voltage = <0 0>;
+ qcom,cam-vreg-max-voltage = <0 3600000>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_tof_active>;
+ pinctrl-1 = <&cam_tof_suspend>;
+ gpios = <&tlmm 27 0>,
+ <&tlmm 126 0>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <0 0>;
+ qcom,gpio-req-tbl-label = "CAM_TOF", "CAM_CE";
+ };
+
eeprom0: qcom,eeprom@0 {
cell-index = <0>;
reg = <0>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi
index 0a41383ba874..c5384eaf17a1 100644
--- a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-mtp.dtsi
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -61,7 +61,24 @@
pinctrl-0 = <&cam_actuator_vaf_active>;
pinctrl-1 = <&cam_actuator_vaf_suspend>;
};
-
+ tof0:qcom,tof@0{
+ cell-index = <0>;
+ reg = <0x29>;
+ compatible = "st,stmvl53l0";
+ qcom,cci-master = <0>;
+ cam_cci-supply = <&pm8998_lvs1>;
+ cam_laser-supply = <&pmi8998_bob>;
+ qcom,cam-vreg-name = "cam_cci", "cam_laser";
+ qcom,cam-vreg-min-voltage = <0 0>;
+ qcom,cam-vreg-max-voltage = <0 3600000>;
+ pinctrl-names = "cam_default", "cam_suspend";
+ pinctrl-0 = <&cam_tof_active>;
+ pinctrl-1 = <&cam_tof_suspend>;
+ gpios = <&tlmm 27 0>, <&tlmm 126 0>;
+ qcom,gpio-req-tbl-num = <0 1>;
+ qcom,gpio-req-tbl-flags = <0 0>;
+ qcom,gpio-req-tbl-label = "CAM_TOF", "CAM_CE";
+ };
ois0: qcom,ois@0 {
cell-index = <0>;
reg = <0x0>;
diff --git a/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi
new file mode 100644
index 000000000000..4b3748e5a40a
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-camera-sensor-skuk-hdk.dtsi
@@ -0,0 +1,46 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm8998-camera-sensor-skuk-evt3.dtsi"
+
+&tlmm{
+ cam_sensor_mclk1_active: cam_sensor_mclk1_active {
+ /* MCLK1 */
+ mux {
+ /* CLK, DATA */
+ pins = "gpio14";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio14";
+ bias-disable; /* No PULL */
+ drive-strength = <8>; /* 2 MA */
+ };
+ };
+
+ cam_sensor_mclk2_active: cam_sensor_mclk2_active {
+ /* MCLK1 */
+ mux {
+ /* CLK, DATA */
+ pins = "gpio15";
+ function = "cam_mclk";
+ };
+
+ config {
+ pins = "gpio15";
+ bias-disable; /* No PULL */
+ drive-strength = <8>; /* 2 MA */
+ };
+ };
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-interposer-pm660.dtsi b/arch/arm/boot/dts/qcom/msm8998-interposer-pm660.dtsi
index c9522154ae7d..f4ff7c401a98 100644
--- a/arch/arm/boot/dts/qcom/msm8998-interposer-pm660.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-interposer-pm660.dtsi
@@ -55,6 +55,13 @@
/delete-property/gpios;
};
+&tof0 {
+ /delete-property/cam_cci-supply;
+ /delete-property/cam_laser-supply;
+ /delete-property/gpios;
+};
+
+
&cci {
/delete-node/qcom,camera@1;
/delete-node/qcom,camera@2;
diff --git a/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi b/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
index 19b227f1b60f..4914363b414a 100644
--- a/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-pinctrl.dtsi
@@ -954,6 +954,32 @@
};
};
+ cam_tof_active: cam_tof_active {
+ mux {
+ pins = "gpio27", "gpio126";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio27", "gpio126";
+ bias-disable;
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
+ cam_tof_suspend: cam_tof_suspend {
+ mux {
+ pins = "gpio27", "gpio126";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio27", "gpio126";
+ bias-pull-down; /* PULL DOWN */
+ drive-strength = <2>; /* 2 MA */
+ };
+ };
+
cam_sensor_mclk0_active: cam_sensor_mclk0_active {
/* MCLK0 */
mux {
diff --git a/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi b/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi
index d1c9bce00b47..aa95872da385 100644
--- a/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-qrd-vr1.dtsi
@@ -156,6 +156,25 @@
};
};
+&i2c_5 {
+ status = "okay";
+ st_fts@49 {
+ compatible = "st,fts";
+ reg = <0x49>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <125 0x2008>;
+ vdd-supply = <&pm8998_l6>;
+ avdd-supply = <&pm8998_l28>;
+ pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+ pinctrl-0 = <&ts_active>;
+ pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+ st,irq-gpio = <&tlmm 125 0x2008>;
+ st,reset-gpio = <&tlmm 89 0x00>;
+ st,regulator_dvdd = "vdd";
+ st,regulator_avdd = "avdd";
+ };
+};
+
&soc {
gpio_keys {
compatible = "gpio-keys";
diff --git a/arch/arm/boot/dts/qcom/msm8998-sde-display.dtsi b/arch/arm/boot/dts/qcom/msm8998-sde-display.dtsi
new file mode 100644
index 000000000000..6098a96db206
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-sde-display.dtsi
@@ -0,0 +1,41 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+
+ sde_wb: qcom,wb-display@0 {
+ compatible = "qcom,wb-display";
+ cell-index = <0>;
+ label = "wb_display";
+ };
+
+ msm_ext_disp: qcom,msm_ext_disp {
+ compatible = "qcom,msm-ext-disp";
+
+ ext_disp_audio_codec: qcom,msm-ext-disp-audio-codec-rx {
+ compatible = "qcom,msm-ext-disp-audio-codec-rx";
+ qcom,msm_ext_disp = <&msm_ext_disp>;
+ };
+ };
+
+ sde_hdmi: qcom,hdmi-display {
+ compatible = "qcom,hdmi-display";
+ label = "sde_hdmi";
+ qcom,display-type = "secondary";
+ qcom,msm_ext_disp = <&msm_ext_disp>;
+ };
+
+};
+
+&sde_kms {
+ connectors = <&sde_hdmi_tx &sde_hdmi &sde_wb>;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-sde.dtsi b/arch/arm/boot/dts/qcom/msm8998-sde.dtsi
new file mode 100644
index 000000000000..ec70d0367160
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/msm8998-sde.dtsi
@@ -0,0 +1,215 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+&soc {
+ sde_kms: qcom,sde_kms@c900000 {
+ compatible = "qcom,sde-kms";
+ reg = <0x0c900000 0x90000>,
+ <0x0c9b0000 0x1040>;
+ reg-names = "mdp_phys", "vbif_phys";
+
+ /* clock and supply entries */
+ clocks = <&clock_gcc clk_mmssnoc_axi_clk>,
+ <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+ <&clock_mmss clk_mmss_bimc_smmu_ahb_clk>,
+ <&clock_mmss clk_mmss_bimc_smmu_axi_clk>,
+ <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_axi_clk>,
+ <&clock_mmss clk_mdp_clk_src>,
+ <&clock_mmss clk_mmss_mdss_mdp_clk>,
+ <&clock_mmss clk_mmss_mdss_vsync_clk>,
+ <&clock_mmss clk_mmss_mdss_mdp_lut_clk>;
+ clock-names = "mmss_noc_axi_clk",
+ "mmss_noc_ahb_clk",
+ "mmss_smmu_ahb_clk",
+ "mmss_smmu_axi_clk",
+ "mnoc_clk", "iface_clk", "bus_clk",
+ "core_clk_src", "core_clk", "vsync_clk",
+ "lut_clk";
+ clock-rate = <0 0 0 0 0 0 0 330000000 0 0 0 0>;
+ clock-max-rate = <0 0 0 0 0 0 412500000 412500000 0 0 0 0>;
+ qcom,sde-max-bw-low-kbps = <6700000>;
+ qcom,sde-max-bw-high-kbps = <6700000>;
+
+ /* interrupt config */
+ interrupt-parent = <&intc>;
+ interrupts = <0 83 0>;
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ iommus = <&mmss_smmu 0>;
+
+ /* hw blocks */
+ qcom,sde-off = <0x1000>;
+ qcom,sde-ctl-off = <0x2000 0x2200 0x2400
+ 0x2600 0x2800>;
+ qcom,sde-mixer-off = <0x45000 0x46000 0x47000
+ 0x48000 0x49000 0x4a000>;
+ qcom,sde-dspp-off = <0x55000 0x57000>;
+ qcom,sde-wb-off = <0x66000>;
+ qcom,sde-wb-id = <2>;
+ qcom,sde-wb-xin-id = <6>;
+ qcom,sde-wb-clk-ctrl = <0x2bc 0x10>;
+ qcom,sde-intf-off = <0x6b000 0x6b800
+ 0x6c000 0x6c800>;
+ qcom,sde-intf-type = "dp", "dsi", "dsi", "hdmi";
+ qcom,sde-pp-off = <0x71000 0x71800
+ 0x72000 0x72800>;
+ qcom,sde-te2-off = <0x2000 0x2000 0x0 0x0>;
+ qcom,sde-cdm-off = <0x7a200>;
+ qcom,sde-dsc-off = <0x10000 0x10000 0x0 0x0>;
+ qcom,sde-intf-max-prefetch-lines = <0x15 0x15 0x15 0x15>;
+
+ qcom,sde-sspp-type = "vig", "vig", "vig", "vig",
+ "dma", "dma", "dma", "dma",
+ "cursor", "cursor";
+
+ qcom,sde-sspp-off = <0x5000 0x7000 0x9000 0xb000
+ 0x25000 0x27000 0x29000 0x2b000
+ 0x35000 0x37000>;
+
+ qcom,sde-sspp-xin-id = <0 4 8 12 1 5 9 13 2 10>;
+
+ /* offsets are relative to "mdp_phys + qcom,sde-off */
+ qcom,sde-sspp-clk-ctrl = <0x2ac 0x8>, <0x2b4 0x8>,
+ <0x2c4 0x8>, <0x2c4 0xc>, <0x3a8 0x10>,
+ <0x3b0 0x10>;
+
+ qcom,sde-qseed-type = "qseedv3";
+ qcom,sde-mixer-linewidth = <2560>;
+ qcom,sde-sspp-linewidth = <2560>;
+ qcom,sde-mixer-blendstages = <0x7>;
+ qcom,sde-highest-bank-bit = <0x2>;
+ qcom,sde-panic-per-pipe;
+ qcom,sde-has-cdp;
+ qcom,sde-has-src-split;
+ qcom,sde-sspp-danger-lut = <0x000f 0xffff 0x0000>;
+ qcom,sde-sspp-safe-lut = <0xfffc 0xff00 0xffff>;
+
+ qcom,sde-vbif-off = <0>;
+ qcom,sde-vbif-id = <0>;
+ qcom,sde-vbif-default-ot-rd-limit = <32>;
+ qcom,sde-vbif-default-ot-wr-limit = <32>;
+ qcom,sde-vbif-dynamic-ot-rd-limit = <62208000 2>,
+ <124416000 4>, <248832000 16>;
+ qcom,sde-vbif-dynamic-ot-wr-limit = <62208000 2>,
+ <124416000 4>, <248832000 16>;
+
+ vdd-supply = <&gdsc_mdss>;
+ gdsc-mmagic-mdss-supply = <&gdsc_bimc_smmu>;
+ qcom,sde-csc-type = "csc-10bit";
+
+ qcom,sde-sspp-vig-blocks {
+ qcom,sde-vig-csc-off = <0x1a00>;
+ qcom,sde-vig-qseed-off = <0xa00>;
+ };
+
+ qcom,platform-supply-entries {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ qcom,platform-supply-entry@0 {
+ reg = <0>;
+ qcom,supply-name = "gdsc-mmagic-mdss";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+
+ qcom,platform-supply-entry@1 {
+ reg = <1>;
+ qcom,supply-name = "vdd";
+ qcom,supply-min-voltage = <0>;
+ qcom,supply-max-voltage = <0>;
+ qcom,supply-enable-load = <0>;
+ qcom,supply-disable-load = <0>;
+ };
+ };
+
+ smmu_kms_unsec: qcom,smmu_kms_unsec_cb {
+ compatible = "qcom,smmu_mdp_unsec";
+ iommus = <&mmss_smmu 0>;
+ };
+
+ /* data and reg bus scale settings */
+ qcom,sde-data-bus {
+ qcom,msm-bus,name = "mdss_sde";
+ qcom,msm-bus,num-cases = <3>;
+ qcom,msm-bus,num-paths = <2>;
+ qcom,msm-bus,vectors-KBps =
+ <22 512 0 0>, <23 512 0 0>,
+ <22 512 0 6400000>, <23 512 0 6400000>,
+ <22 512 0 6400000>, <23 512 0 6400000>;
+ };
+ qcom,sde-reg-bus {
+ qcom,msm-bus,name = "mdss_reg";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>,
+ <1 590 0 160000>,
+ <1 590 0 320000>;
+ };
+ };
+
+ sde_hdmi_tx: qcom,hdmi_tx_8998@c9a0000 {
+ cell-index = <0>;
+ compatible = "qcom,hdmi-tx-8998";
+ reg = <0xc9a0000 0x50c>,
+ <0x780000 0x621c>,
+ <0xc9e0000 0x28>;
+ reg-names = "core_physical", "qfprom_physical", "hdcp_physical";
+ interrupt-parent = <&sde_kms>;
+ interrupts = <8 0>;
+ qcom,hdmi-tx-ddc-clk-gpio = <&tlmm 32 0>;
+ qcom,hdmi-tx-ddc-data-gpio = <&tlmm 33 0>;
+ qcom,hdmi-tx-hpd-gpio = <&tlmm 34 0>;
+ qcom,hdmi-tx-hpd5v-gpio = <&tlmm 133 0>;
+ pinctrl-names = "default", "sleep";
+ pinctrl-0 = <&mdss_hdmi_hpd_active
+ &mdss_hdmi_ddc_active
+ &mdss_hdmi_cec_active
+ &mdss_hdmi_5v_active>;
+ pinctrl-1 = <&mdss_hdmi_hpd_suspend
+ &mdss_hdmi_ddc_suspend
+ &mdss_hdmi_cec_suspend
+ &mdss_hdmi_5v_suspend>;
+ hpd-gdsc-supply = <&gdsc_mdss>;
+ qcom,supply-names = "hpd-gdsc";
+ qcom,min-voltage-level = <0>;
+ qcom,max-voltage-level = <0>;
+ qcom,enable-load = <0>;
+ qcom,disable-load = <0>;
+
+ clocks = <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_hdmi_clk>,
+ <&clock_mmss clk_mmss_mdss_mdp_clk>,
+ <&clock_mmss clk_mmss_mdss_hdmi_dp_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_extpclk_clk>,
+ <&clock_mmss clk_mmss_mnoc_ahb_clk>,
+ <&clock_mmss clk_mmss_misc_ahb_clk>,
+ <&clock_mmss clk_mmss_mdss_axi_clk>;
+ clock-names = "hpd_mnoc_clk", "hpd_iface_clk",
+ "hpd_core_clk", "hpd_mdp_core_clk",
+ "hpd_alt_iface_clk", "core_extp_clk",
+ "mnoc_clk","hpd_misc_ahb_clk",
+ "hpd_bus_clk";
+
+ /*qcom,mdss-fb-map = <&mdss_fb2>;*/
+ qcom,pluggable;
+ };
+};
+#include "msm8998-sde-display.dtsi"
diff --git a/arch/arm/boot/dts/qcom/msm8998-v2-qrd-skuk-hdk.dts b/arch/arm/boot/dts/qcom/msm8998-v2-qrd-skuk-hdk.dts
index 0e17ce31d52f..73debc374fda 100644
--- a/arch/arm/boot/dts/qcom/msm8998-v2-qrd-skuk-hdk.dts
+++ b/arch/arm/boot/dts/qcom/msm8998-v2-qrd-skuk-hdk.dts
@@ -15,7 +15,7 @@
#include "msm8998-v2.dtsi"
#include "msm8998-qrd-skuk.dtsi"
-#include "msm8998-camera-sensor-skuk.dtsi"
+#include "msm8998-camera-sensor-skuk-hdk.dtsi"
/ {
model = "Qualcomm Technologies, Inc. MSM 8998 SKUK HDK";
@@ -120,3 +120,29 @@
qcom,mdss-dsi-bl-max-level = <255>;
qcom,panel-supply-entries = <&dsi_panel_pwr_supply>;
};
+
+&i2c_5 {
+ status = "okay";
+ st_fts@49 {
+ compatible = "st,fts";
+ reg = <0x49>;
+ interrupt-parent = <&tlmm>;
+ interrupts = <125 0x2008>;
+ vdd-supply = <&pm8998_l6>;
+ avdd-supply = <&pm8998_l28>;
+ pinctrl-names = "pmx_ts_active", "pmx_ts_suspend";
+ pinctrl-0 = <&ts_active>;
+ pinctrl-1 = <&ts_int_suspend &ts_reset_suspend>;
+ st,irq-gpio = <&tlmm 125 0x2008>;
+ st,reset-gpio = <&tlmm 89 0x00>;
+ st,regulator_dvdd = "vdd";
+ st,regulator_avdd = "avdd";
+ };
+};
+
+&soc {
+ /* HDK835 do not use improveTouch. If do not remove this node,
+ * legacy TOUCH could not work.
+ */
+ /delete-node/hbtp;
+};
diff --git a/arch/arm/boot/dts/qcom/msm8998-v2.dtsi b/arch/arm/boot/dts/qcom/msm8998-v2.dtsi
index b6ddd549efe5..348faf9b69c3 100644
--- a/arch/arm/boot/dts/qcom/msm8998-v2.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998-v2.dtsi
@@ -366,7 +366,7 @@
&pm8998_s13 {
regulator-min-microvolt = <568000>;
- regulator-max-microvolt = <1056000>;
+ regulator-max-microvolt = <1136000>;
};
&pcie0 {
diff --git a/arch/arm/boot/dts/qcom/msm8998.dtsi b/arch/arm/boot/dts/qcom/msm8998.dtsi
index 81bf1777c7e2..f33b8bc2a8a8 100644
--- a/arch/arm/boot/dts/qcom/msm8998.dtsi
+++ b/arch/arm/boot/dts/qcom/msm8998.dtsi
@@ -1342,6 +1342,18 @@
qcom,irq-mask = <0x200>;
interrupts = <0 157 1>;
label = "lpass";
+ qcom,qos-config = <&glink_qos_adsp>;
+ qcom,ramp-time = <0xaf>;
+ };
+
+ glink_qos_adsp: qcom,glink-qos-config-adsp {
+ compatible = "qcom,glink-qos-config";
+ qcom,flow-info = <0x3c 0x0>,
+ <0x3c 0x0>,
+ <0x3c 0x0>,
+ <0x3c 0x0>;
+ qcom,mtu-size = <0x800>;
+ qcom,tput-stats-cycle = <0xa>;
};
qcom,glink-smem-native-xprt-dsps@86000000 {
@@ -3286,3 +3298,4 @@
#include "msm8998-blsp.dtsi"
#include "msm8998-audio.dtsi"
#include "msm-smb138x.dtsi"
+#include "msm8998-sde.dtsi"
diff --git a/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi b/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi
index 060923a5f227..37fce82adb17 100644
--- a/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-cdp.dtsi
@@ -47,6 +47,9 @@
};
&soc {
+ qcom,msm-ssc-sensors {
+ compatible = "qcom,msm-ssc-sensors";
+ };
};
&pm660_charger {
diff --git a/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi b/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi
index 18e6445b68e1..3fb6993d71d6 100644
--- a/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-mdss.dtsi
@@ -180,6 +180,19 @@
qcom,mdss-prefill-post-scaler-buffer-pixels = <2560>;
qcom,mdss-prefill-pingpong-buffer-pixels = <5120>;
+ qcom,mdss-reg-bus {
+ /* Reg Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_reg";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>,
+ <1 590 0 160000>,
+ <1 590 0 320000>;
+ };
+
qcom,mdss-pp-offsets {
qcom,mdss-sspp-mdss-igc-lut-off = <0x2000>;
qcom,mdss-sspp-vig-pcc-off = <0x1b00>;
diff --git a/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi b/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi
index 69dcb26c42a2..d13e1dbfe824 100644
--- a/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-mtp.dtsi
@@ -56,6 +56,9 @@
};
&soc {
+ qcom,msm-ssc-sensors {
+ compatible = "qcom,msm-ssc-sensors";
+ };
};
&pm660_fg {
diff --git a/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
index f80562e22989..52df459cedfb 100644
--- a/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-qrd.dtsi
@@ -102,7 +102,7 @@
1 &intc 0 0 221 0
2 &tlmm 54 0>;
interrupt-names = "hc_irq", "pwr_irq", "status_irq";
- cd-gpios = <&tlmm 54 0x1>;
+ cd-gpios = <&tlmm 54 0x0>;
qcom,clk-rates = <400000 20000000 25000000 50000000 100000000
200000000>;
diff --git a/arch/arm/boot/dts/qcom/sdm630-regulator.dtsi b/arch/arm/boot/dts/qcom/sdm630-regulator.dtsi
index 13b561944e13..c16c83050f31 100644
--- a/arch/arm/boot/dts/qcom/sdm630-regulator.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630-regulator.dtsi
@@ -681,20 +681,20 @@
regulator-min-microvolt = <1>;
regulator-max-microvolt = <8>;
- qcom,cpr-fuse-corners = <4>;
+ qcom,cpr-fuse-corners = <3>;
qcom,cpr-fuse-combos = <24>;
qcom,cpr-speed-bins = <3>;
qcom,cpr-speed-bin-corners = <8 8 8>;
qcom,cpr-corners = <8>;
- qcom,cpr-corner-fmax-map = <2 4 5 8>;
+ qcom,cpr-corner-fmax-map = <2 4 8>;
qcom,cpr-voltage-ceiling =
- <724000 724000 724000 788000 868000
- 924000 988000 1068000>;
+ <724000 724000 788000 788000
+ 1068000 1068000 1068000 1068000>;
qcom,cpr-voltage-floor =
- <588000 588000 596000 652000 712000
- 744000 784000 844000>;
+ <588000 588000 596000 652000
+ 712000 744000 784000 844000>;
qcom,corner-frequencies =
<300000000 614400000 883200000
@@ -755,7 +755,7 @@
regulator-min-microvolt = <1>;
regulator-max-microvolt = <11>;
- qcom,cpr-fuse-corners = <4>;
+ qcom,cpr-fuse-corners = <5>;
qcom,cpr-fuse-combos = <24>;
qcom,cpr-speed-bins = <3>;
qcom,cpr-speed-bin-corners = <10 10 11>;
@@ -771,29 +771,29 @@
qcom,cpr-corner-fmax-map =
/* Speed bin 0 */
- <2 4 6 10>,
+ <2 4 6 9 10>,
/* Speed bin 1 */
- <2 4 6 10>,
+ <2 4 6 9 10>,
/* Speed bin 2 */
- <2 4 6 11>;
+ <2 4 6 9 11>;
qcom,cpr-voltage-ceiling =
/* Speed bin 0 */
<724000 724000 724000 788000
- 868000 868000 924000 988000
+ 868000 868000 988000 988000
988000 1068000>,
/* Speed bin 1 */
<724000 724000 724000 788000
- 868000 868000 924000 988000
+ 868000 868000 988000 988000
988000 1068000>,
/* Speed bin 2 */
<724000 724000 724000 788000
- 868000 868000 924000 988000
- 988000 1068000 1140000>;
+ 868000 868000 988000 988000
+ 988000 1140000 1140000>;
qcom,cpr-voltage-floor =
/* Speed bin 0 */
@@ -828,7 +828,7 @@
<300000000 787200000 1113600000
1344000000 1516800000 1670400000
1881600000 2016000000 2150400000
- 2208000000 2515200000>;
+ 2380800000 2515200000>;
qcom,allow-voltage-interpolation;
qcom,allow-quotient-interpolation;
diff --git a/arch/arm/boot/dts/qcom/sdm630-usbc-audio-mtp.dts b/arch/arm/boot/dts/qcom/sdm630-usbc-audio-mtp.dts
new file mode 100644
index 000000000000..eb089b524fef
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdm630-usbc-audio-mtp.dts
@@ -0,0 +1,31 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm630.dtsi"
+#include "sdm630-mtp.dtsi"
+#include "sdm660-external-codec.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM 630 PM660 + PM660L, USBC Audio MTP";
+ compatible = "qcom,sdm630-mtp", "qcom,sdm630", "qcom,mtp";
+ qcom,board-id = <8 2>;
+ qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+ <0x0001001b 0x0201011a 0x0 0x0>;
+};
+
+&tavil_snd {
+ qcom,msm-mbhc-moist-cfg = <0>, <0>, <3>;
+ qcom,msm-mbhc-usbc-audio-supported = <1>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm630.dtsi b/arch/arm/boot/dts/qcom/sdm630.dtsi
index 3b262453c0ed..cd895a067f65 100644
--- a/arch/arm/boot/dts/qcom/sdm630.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm630.dtsi
@@ -52,6 +52,7 @@
reg = <0x0 0x100>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile0>;
+ qcom,lmh-dcvs = <&lmh_dcvs1>;
qcom,ea = <&ea0>;
efficiency = <1126>;
next-level-cache = <&L2_1>;
@@ -80,6 +81,7 @@
reg = <0x0 0x101>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile1>;
+ qcom,lmh-dcvs = <&lmh_dcvs1>;
qcom,ea = <&ea1>;
efficiency = <1126>;
next-level-cache = <&L2_1>;
@@ -102,6 +104,7 @@
reg = <0x0 0x102>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile2>;
+ qcom,lmh-dcvs = <&lmh_dcvs1>;
qcom,ea = <&ea2>;
efficiency = <1126>;
next-level-cache = <&L2_1>;
@@ -124,6 +127,7 @@
reg = <0x0 0x103>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile3>;
+ qcom,lmh-dcvs = <&lmh_dcvs1>;
qcom,ea = <&ea3>;
efficiency = <1126>;
next-level-cache = <&L2_1>;
@@ -146,6 +150,7 @@
reg = <0x0 0x0>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile4>;
+ qcom,lmh-dcvs = <&lmh_dcvs0>;
qcom,ea = <&ea4>;
efficiency = <1024>;
next-level-cache = <&L2_0>;
@@ -174,6 +179,7 @@
reg = <0x0 0x1>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile4>;
+ qcom,lmh-dcvs = <&lmh_dcvs0>;
qcom,ea = <&ea5>;
efficiency = <1024>;
next-level-cache = <&L2_0>;
@@ -196,6 +202,7 @@
reg = <0x0 0x2>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile4>;
+ qcom,lmh-dcvs = <&lmh_dcvs0>;
qcom,ea = <&ea6>;
efficiency = <1024>;
next-level-cache = <&L2_0>;
@@ -218,6 +225,7 @@
reg = <0x0 0x3>;
enable-method = "psci";
qcom,limits-info = <&mitigation_profile4>;
+ qcom,lmh-dcvs = <&lmh_dcvs0>;
qcom,ea = <&ea7>;
efficiency = <1024>;
next-level-cache = <&L2_0>;
@@ -712,6 +720,8 @@
qcom,synchronous-cluster-map = <0 4 &CPU4 &CPU5 &CPU6 &CPU7>,
<1 4 &CPU0 &CPU1 &CPU2 &CPU3>;
+ clock-names = "osm";
+ clocks = <&clock_cpu PERFCL_CLK>;
qcom,cxip-lm-enable = <0>;
qcom,vdd-restriction-temp = <5>;
qcom,vdd-restriction-temp-hysteresis = <10>;
@@ -800,7 +810,6 @@
};
wdog: qcom,wdt@17817000 {
- status = "disabled";
compatible = "qcom,msm-watchdog";
reg = <0x17817000 0x1000>;
reg-names = "wdt-base";
@@ -1109,7 +1118,7 @@
qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
qcom,target-dev = <&memlat_cpu0>;
qcom,core-dev-table =
- < 787200 762 >,
+ < 1113600 762 >,
< 1344000 2086 >,
< 1670400 2929 >,
< 2150400 3879 >,
@@ -1758,9 +1767,9 @@
reg = <0x10 8>;
};
- dload_type@18 {
+ dload_type@1c {
compatible = "qcom,msm-imem-dload-type";
- reg = <0x18 4>;
+ reg = <0x1c 4>;
};
restart_reason@65c {
@@ -2145,10 +2154,34 @@
status = "ok";
};
+&clock_cpu {
+ lmh_dcvs0: qcom,limits-dcvs@0 {
+ compatible = "qcom,msm-hw-limits";
+ interrupts = <GIC_SPI 37 IRQ_TYPE_LEVEL_HIGH>;
+ };
+
+ lmh_dcvs1: qcom,limits-dcvs@1 {
+ compatible = "qcom,msm-hw-limits";
+ interrupts = <GIC_SPI 38 IRQ_TYPE_LEVEL_HIGH>;
+ };
+};
+
&blsp2_uart1_hs {
status = "ok";
};
+&pm660l_gpios {
+ /* GPIO 7 for VOL_UP */
+ gpio@c600 {
+ status = "okay";
+ qcom,mode = <0>;
+ qcom,pull = <0>;
+ qcom,vin-sel = <0>;
+ qcom,src-sel = <0>;
+ qcom,out-strength = <1>;
+ };
+};
+
&soc {
gpio_keys {
status = "okay";
@@ -2173,5 +2206,14 @@
linux,code = <0x2fe>;
debounce-interval = <15>;
};
+
+ vol_up {
+ label = "volume_up";
+ gpios = <&pm660l_gpios 7 0x1>;
+ linux,input-type = <1>;
+ linux,code = <115>;
+ gpio-key,wakeup;
+ debounce-interval = <15>;
+ };
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-audio.dtsi b/arch/arm/boot/dts/qcom/sdm660-audio.dtsi
index 42ab88fda778..0b216a16c7e8 100644
--- a/arch/arm/boot/dts/qcom/sdm660-audio.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-audio.dtsi
@@ -94,6 +94,20 @@
qcom,max-frequency = <24000000>;
qcom,mem-base-addr = <0x100000>;
};
+
+ wcd_usbc_analog_en1_gpio: msm_cdc_pinctrl_usbc_audio_en1 {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&wcd_usbc_analog_en1_active>;
+ pinctrl-1 = <&wcd_usbc_analog_en1_idle>;
+ };
+
+ wcd_usbc_analog_en2n_gpio: msm_cdc_pinctrl_usbc_audio_en2 {
+ compatible = "qcom,msm-cdc-pinctrl";
+ pinctrl-names = "aud_active", "aud_sleep";
+ pinctrl-0 = <&wcd_usbc_analog_en2n_active>;
+ pinctrl-1 = <&wcd_usbc_analog_en2n_idle>;
+ };
};
};
@@ -230,28 +244,24 @@
compatible = "qcom,wsa881x";
reg = <0x0 0x20170211>;
qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
- qcom,cache-always;
};
wsa881x_212_en: wsa881x_en@20170212 {
compatible = "qcom,wsa881x";
reg = <0x0 0x20170212>;
qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
- qcom,cache-always;
};
wsa881x_213_en: wsa881x_en@21170213 {
compatible = "qcom,wsa881x";
reg = <0x0 0x21170213>;
qcom,spkr-sd-n-node = <&wsa_spkr_en1>;
- qcom,cache-always;
};
wsa881x_214_en: wsa881x_en@21170214 {
compatible = "qcom,wsa881x";
reg = <0x0 0x21170214>;
qcom,spkr-sd-n-node = <&wsa_spkr_en2>;
- qcom,cache-always;
};
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-common.dtsi b/arch/arm/boot/dts/qcom/sdm660-common.dtsi
index 05b7973f2457..dc57ee62a867 100644
--- a/arch/arm/boot/dts/qcom/sdm660-common.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-common.dtsi
@@ -194,6 +194,7 @@
tx-fifo-resize;
snps,nominal-elastic-buffer;
snps,disable-clk-gating;
+ snps,has-lpm-erratum;
snps,is-utmi-l1-suspend;
snps,hird-threshold = /bits/ 8 <0x0>;
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi
index e5cf0b1534ec..a47a788874fa 100644
--- a/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-gpu.dtsi
@@ -119,8 +119,8 @@
vdd-supply = <&gdsc_gpu_gx>;
/* CPU latency parameter */
- qcom,pm-qos-active-latency = <349>;
- qcom,pm-qos-wakeup-latency = <349>;
+ qcom,pm-qos-active-latency = <518>;
+ qcom,pm-qos-wakeup-latency = <518>;
/* Quirks */
qcom,gpu-quirk-dp2clockgating-disable;
diff --git a/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi b/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
index fd07a105a19a..6e32c1282d3e 100644
--- a/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-mdss.dtsi
@@ -195,6 +195,19 @@
qcom,mdss-prefill-post-scaler-buffer-pixels = <2560>;
qcom,mdss-prefill-pingpong-buffer-pixels = <5120>;
+ qcom,mdss-reg-bus {
+ /* Reg Bus Scale Settings */
+ qcom,msm-bus,name = "mdss_reg";
+ qcom,msm-bus,num-cases = <4>;
+ qcom,msm-bus,num-paths = <1>;
+ qcom,msm-bus,active-only;
+ qcom,msm-bus,vectors-KBps =
+ <1 590 0 0>,
+ <1 590 0 76800>,
+ <1 590 0 160000>,
+ <1 590 0 320000>;
+ };
+
qcom,mdss-pp-offsets {
qcom,mdss-sspp-mdss-igc-lut-off = <0x2000>;
qcom,mdss-sspp-vig-pcc-off = <0x1b00>;
diff --git a/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi b/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi
index 7ecab691dadf..c6e2f431a7c9 100644
--- a/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-pinctrl.dtsi
@@ -744,6 +744,66 @@
};
};
};
+ /* USB C analog configuration */
+ wcd_usbc_analog_en1 {
+ wcd_usbc_analog_en1_idle: wcd_usbc_ana_en1_idle {
+ mux {
+ pins = "gpio80";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio80";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+
+ wcd_usbc_analog_en1_active: wcd_usbc_ana_en1_active {
+ mux {
+ pins = "gpio80";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio80";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+ };
+
+ wcd_usbc_analog_en2n {
+ wcd_usbc_analog_en2n_idle: wcd_usbc_ana_en2n_idle {
+ mux {
+ pins = "gpio77";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio77";
+ drive-strength = <2>;
+ bias-disable;
+ output-high;
+ };
+ };
+
+ wcd_usbc_analog_en2n_active: wcd_usbc_ana_en2n_active {
+ mux {
+ pins = "gpio77";
+ function = "gpio";
+ };
+
+ config {
+ pins = "gpio77";
+ drive-strength = <2>;
+ bias-pull-down;
+ output-low;
+ };
+ };
+ };
sdw_clk_pin {
sdw_clk_sleep: sdw_clk_sleep {
diff --git a/arch/arm/boot/dts/qcom/sdm660-pm.dtsi b/arch/arm/boot/dts/qcom/sdm660-pm.dtsi
index 6a9fc5bde361..1624975028c5 100644
--- a/arch/arm/boot/dts/qcom/sdm660-pm.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-pm.dtsi
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -62,20 +62,20 @@
reg = <0>;
label = "system-wfi";
qcom,psci-mode = <0x0>;
- qcom,latency-us = <100>;
- qcom,ss-power = <725>;
- qcom,energy-overhead = <85000>;
- qcom,time-overhead = <120>;
+ qcom,latency-us = <1654>;
+ qcom,ss-power = <219>;
+ qcom,energy-overhead = <98750>;
+ qcom,time-overhead = <2294>;
};
qcom,pm-cluster-level@1{ /* E3 */
reg = <1>;
label = "system-pc";
qcom,psci-mode = <0x3>;
- qcom,latency-us = <350>;
- qcom,ss-power = <530>;
- qcom,energy-overhead = <160000>;
- qcom,time-overhead = <550>;
+ qcom,latency-us = <4506>;
+ qcom,ss-power = <88>;
+ qcom,energy-overhead = <1228536>;
+ qcom,time-overhead = <15337>;
qcom,min-child-idx = <3>;
qcom,is-reset;
qcom,notify-rpm;
@@ -95,19 +95,19 @@
reg = <0>;
label = "pwr-l2-wfi";
qcom,psci-mode = <0x1>;
- qcom,latency-us = <40>;
- qcom,ss-power = <740>;
- qcom,energy-overhead = <65000>;
- qcom,time-overhead = <85>;
+ qcom,latency-us = <51>;
+ qcom,ss-power = <250>;
+ qcom,energy-overhead = <83452>;
+ qcom,time-overhead = <89>;
};
qcom,pm-cluster-level@1{ /* D2D */
reg = <1>;
label = "pwr-l2-dynret";
qcom,psci-mode = <0x2>;
- qcom,latency-us = <60>;
- qcom,ss-power = <700>;
- qcom,energy-overhead = <85000>;
- qcom,time-overhead = <85>;
+ qcom,latency-us = <421>;
+ qcom,ss-power = <235>;
+ qcom,energy-overhead = <219416>;
+ qcom,time-overhead = <781>;
qcom,min-child-idx = <1>;
};
@@ -115,10 +115,10 @@
reg = <2>;
label = "pwr-l2-ret";
qcom,psci-mode = <0x3>;
- qcom,latency-us = <100>;
- qcom,ss-power = <640>;
- qcom,energy-overhead = <135000>;
- qcom,time-overhead = <85>;
+ qcom,latency-us = <517>;
+ qcom,ss-power = <226>;
+ qcom,energy-overhead= <299405>;
+ qcom,time-overhead = <922>;
qcom,min-child-idx = <2>;
};
@@ -126,10 +126,10 @@
reg = <3>;
label = "pwr-l2-pc";
qcom,psci-mode = <0x4>;
- qcom,latency-us = <700>;
- qcom,ss-power = <450>;
- qcom,energy-overhead = <210000>;
- qcom,time-overhead = <11500>;
+ qcom,latency-us = <2118>;
+ qcom,ss-power = <210>;
+ qcom,energy-overhead = <833056>;
+ qcom,time-overhead = <2918>;
qcom,min-child-idx = <2>;
qcom,is-reset;
};
@@ -144,30 +144,30 @@
reg = <0>;
qcom,spm-cpu-mode = "wfi";
qcom,psci-cpu-mode = <0x1>;
- qcom,latency-us = <20>;
- qcom,ss-power = <750>;
- qcom,energy-overhead = <32000>;
- qcom,time-overhead = <60>;
+ qcom,latency-us = <42>;
+ qcom,ss-power = <250>;
+ qcom,energy-overhead = <30562>;
+ qcom,time-overhead = <91>;
};
qcom,pm-cpu-level@1 { /* C2D */
reg = <1>;
qcom,psci-cpu-mode = <2>;
qcom,spm-cpu-mode = "ret";
- qcom,latency-us = <40>;
- qcom,ss-power = <730>;
- qcom,energy-overhead = <85500>;
- qcom,time-overhead = <110>;
+ qcom,latency-us = <63>;
+ qcom,ss-power = <245>;
+ qcom,energy-overhead = <49239>;
+ qcom,time-overhead = <172>;
};
qcom,pm-cpu-level@2 { /* C3 */
reg = <2>;
qcom,spm-cpu-mode = "pc";
qcom,psci-cpu-mode = <0x3>;
- qcom,latency-us = <80>;
- qcom,ss-power = <700>;
- qcom,energy-overhead = <126480>;
- qcom,time-overhead = <160>;
+ qcom,latency-us = <376>;
+ qcom,ss-power = <237>;
+ qcom,energy-overhead = <181018>;
+ qcom,time-overhead = <666>;
qcom,is-reset;
};
};
@@ -187,20 +187,20 @@
reg = <0>;
label = "perf-l2-wfi";
qcom,psci-mode = <0x1>;
- qcom,latency-us = <40>;
- qcom,ss-power = <740>;
- qcom,energy-overhead = <70000>;
- qcom,time-overhead = <80>;
+ qcom,latency-us = <51>;
+ qcom,ss-power = <283>;
+ qcom,energy-overhead = <83083>;
+ qcom,time-overhead = <89>;
};
qcom,pm-cluster-level@1{ /* D2D */
reg = <1>;
label = "perf-l2-dynret";
qcom,psci-mode = <2>;
- qcom,latency-us = <60>;
- qcom,ss-power = <700>;
- qcom,energy-overhead = <85000>;
- qcom,time-overhead = <85>;
+ qcom,latency-us = <345>;
+ qcom,ss-power = <254>;
+ qcom,energy-overhead = <198349>;
+ qcom,time-overhead = <659>;
qcom,min-child-idx = <1>;
};
@@ -208,10 +208,10 @@
reg = <2>;
label = "perf-l2-ret";
qcom,psci-mode = <3>;
- qcom,latency-us = <100>;
- qcom,ss-power = <640>;
- qcom,energy-overhead = <135000>;
- qcom,time-overhead = <85>;
+ qcom,latency-us = <419>;
+ qcom,ss-power = <244>;
+ qcom,energy-overhead = <281921>;
+ qcom,time-overhead = <737>;
qcom,min-child-idx = <2>;
};
@@ -219,10 +219,10 @@
reg = <3>;
label = "perf-l2-pc";
qcom,psci-mode = <0x4>;
- qcom,latency-us = <800>;
- qcom,ss-power = <450>;
- qcom,energy-overhead = <240000>;
- qcom,time-overhead = <11500>;
+ qcom,latency-us = <1654>;
+ qcom,ss-power = <219>;
+ qcom,energy-overhead = <815573>;
+ qcom,time-overhead = <2294>;
qcom,min-child-idx = <2>;
qcom,is-reset;
};
@@ -237,30 +237,30 @@
reg = <0>;
qcom,spm-cpu-mode = "wfi";
qcom,psci-cpu-mode = <0x1>;
- qcom,latency-us = <25>;
- qcom,ss-power = <750>;
- qcom,energy-overhead = <37000>;
- qcom,time-overhead = <50>;
+ qcom,latency-us = <39>;
+ qcom,ss-power = <292>;
+ qcom,energy-overhead = <37558>;
+ qcom,time-overhead = <68>;
};
qcom,pm-cpu-level@1 { /* C2D */
reg = <1>;
qcom,psci-cpu-mode = <2>;
qcom,spm-cpu-mode = "ret";
- qcom,latency-us = <40>;
- qcom,ss-power = <730>;
- qcom,energy-overhead = <85500>;
- qcom,time-overhead = <110>;
+ qcom,latency-us = <60>;
+ qcom,ss-power = <275>;
+ qcom,energy-overhead = <70737>;
+ qcom,time-overhead = <181>;
};
qcom,pm-cpu-level@2 { /* C3 */
reg = <2>;
qcom,spm-cpu-mode = "pc";
qcom,psci-cpu-mode = <0x3>;
- qcom,latency-us = <80>;
- qcom,ss-power = <700>;
- qcom,energy-overhead = <136480>;
- qcom,time-overhead = <160>;
+ qcom,latency-us = <324>;
+ qcom,ss-power = <263>;
+ qcom,energy-overhead = <213213>;
+ qcom,time-overhead = <621>;
qcom,is-reset;
};
};
diff --git a/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi b/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
index 1db65c064f66..b2f6ac722dfc 100644
--- a/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660-qrd.dtsi
@@ -238,6 +238,18 @@
qcom,fg-cutoff-voltage = <3700>;
};
+&i2c_2 {
+ status = "ok";
+ smb1351-charger@1d {
+ compatible = "qcom,smb1351-charger";
+ reg = <0x1d>;
+ qcom,parallel-charger;
+ qcom,float-voltage-mv = <4400>;
+ qcom,recharge-mv = <100>;
+ qcom,parallel-en-pin-polarity = <0>;
+ };
+};
+
&tasha_snd {
qcom,model = "sdm660-tasha-skus-snd-card";
qcom,audio-routing =
@@ -248,10 +260,10 @@
"MIC BIAS2", "Headset Mic",
"DMIC0", "MIC BIAS1",
"MIC BIAS1", "Digital Mic0",
- "DMIC2", "MIC BIAS3",
- "MIC BIAS3", "Digital Mic2",
- "DMIC4", "MIC BIAS3",
- "MIC BIAS3", "Digital Mic4",
+ "DMIC3", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic3",
+ "DMIC5", "MIC BIAS3",
+ "MIC BIAS3", "Digital Mic5",
"SpkrLeft IN", "SPK1 OUT";
qcom,msm-mbhc-hphl-swh = <1>;
/delete-property/ qcom,us-euro-gpios;
diff --git a/arch/arm/boot/dts/qcom/sdm660-usbc-audio-mtp.dts b/arch/arm/boot/dts/qcom/sdm660-usbc-audio-mtp.dts
new file mode 100644
index 000000000000..dff55d8e9cf8
--- /dev/null
+++ b/arch/arm/boot/dts/qcom/sdm660-usbc-audio-mtp.dts
@@ -0,0 +1,31 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+
+/dts-v1/;
+
+#include "sdm660.dtsi"
+#include "sdm660-mtp.dtsi"
+#include "sdm660-external-codec.dtsi"
+
+/ {
+ model = "Qualcomm Technologies, Inc. SDM 660 PM660 + PM660L, USBC Audio MTP";
+ compatible = "qcom,sdm660-mtp", "qcom,sdm660", "qcom,mtp";
+ qcom,board-id = <8 2>;
+ qcom,pmic-id = <0x0001001b 0x0101011a 0x0 0x0>,
+ <0x0001001b 0x0201011a 0x0 0x0>;
+};
+
+&tavil_snd {
+ qcom,msm-mbhc-moist-cfg = <0>, <0>, <3>;
+ qcom,msm-mbhc-usbc-audio-supported = <1>;
+};
diff --git a/arch/arm/boot/dts/qcom/sdm660.dtsi b/arch/arm/boot/dts/qcom/sdm660.dtsi
index 45244f2bbfc8..2d44c69ea827 100644
--- a/arch/arm/boot/dts/qcom/sdm660.dtsi
+++ b/arch/arm/boot/dts/qcom/sdm660.dtsi
@@ -320,10 +320,16 @@
reg = <0x0 0x92a00000 0x0 0x1e00000>;
};
- cdsp_fw_mem: cdsp_fw_region@94800000 {
+ pil_mba_mem: pil_mba_region@94800000 {
compatible = "removed-dma-pool";
no-map;
- reg = <0x0 0x94800000 0x0 0x600000>;
+ reg = <0x0 0x94800000 0x0 0x200000>;
+ };
+
+ cdsp_fw_mem: cdsp_fw_region@94a00000 {
+ compatible = "removed-dma-pool";
+ no-map;
+ reg = <0x0 0x94a00000 0x0 0x600000>;
};
venus_fw_mem: venus_fw_region {
@@ -1166,7 +1172,7 @@
qcom,cpulist = <&CPU0 &CPU1 &CPU2 &CPU3>;
qcom,target-dev = <&memlat_cpu0>;
qcom,core-dev-table =
- < 633600 762 >,
+ < 902400 762 >,
< 1401600 2086 >,
< 1881600 3879 >;
};
@@ -1966,6 +1972,10 @@
/* GPIO output to mss */
qcom,gpio-force-stop = <&smp2pgpio_ssr_smp2p_1_out 0 0>;
status = "ok";
+ qcom,mba-mem@0 {
+ compatible = "qcom,pil-mba-mem";
+ memory-region = <&pil_mba_mem>;
+ };
};
qcom,msm-rtb {
@@ -1991,9 +2001,9 @@
reg = <0x10 8>;
};
- dload_type@18 {
+ dload_type@1c {
compatible = "qcom,msm-imem-dload-type";
- reg = <0x18 4>;
+ reg = <0x1c 4>;
};
restart_reason@65c {
diff --git a/arch/arm/configs/sdm660-perf_defconfig b/arch/arm/configs/sdm660-perf_defconfig
index 1e965b29c9fd..515aab3ccefb 100644
--- a/arch/arm/configs/sdm660-perf_defconfig
+++ b/arch/arm/configs/sdm660-perf_defconfig
@@ -510,6 +510,7 @@ CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_COMMON_LOG=y
CONFIG_MSM_SMEM=y
CONFIG_QPNP_HAPTIC=y
+CONFIG_QPNP_PBS=y
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
CONFIG_MSM_GLINK=y
@@ -620,6 +621,8 @@ CONFIG_CORESIGHT_EVENT=y
CONFIG_CORESIGHT_QCOM_REPLICATOR=y
CONFIG_CORESIGHT_STM=y
CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_CTI_SAVE_DISABLE=y
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
CONFIG_CORESIGHT_QPDI=y
@@ -638,4 +641,5 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_XZ_DEC=y
diff --git a/arch/arm/configs/sdm660_defconfig b/arch/arm/configs/sdm660_defconfig
index d8bdeb7ff4c6..574f3808704b 100644
--- a/arch/arm/configs/sdm660_defconfig
+++ b/arch/arm/configs/sdm660_defconfig
@@ -510,6 +510,7 @@ CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_COMMON_LOG=y
CONFIG_MSM_SMEM=y
CONFIG_QPNP_HAPTIC=y
+CONFIG_QPNP_PBS=y
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
CONFIG_MSM_GLINK=y
@@ -680,4 +681,5 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_XZ_DEC=y
diff --git a/arch/arm64/Kconfig.debug b/arch/arm64/Kconfig.debug
index 6236b3e1297c..a96db6bdaf4b 100644
--- a/arch/arm64/Kconfig.debug
+++ b/arch/arm64/Kconfig.debug
@@ -122,6 +122,9 @@ config KERNEL_TEXT_RDONLY
If unsure, say N.
+config ARM64_STRICT_BREAK_BEFORE_MAKE
+ bool "Enforce strict break-before-make on page table updates "
+
source "drivers/hwtracing/coresight/Kconfig"
endmenu
diff --git a/arch/arm64/configs/msmcortex-perf_defconfig b/arch/arm64/configs/msmcortex-perf_defconfig
index 80030b40a89b..5c07a8b80354 100644
--- a/arch/arm64/configs/msmcortex-perf_defconfig
+++ b/arch/arm64/configs/msmcortex-perf_defconfig
@@ -630,6 +630,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/msmcortex_defconfig b/arch/arm64/configs/msmcortex_defconfig
index a118a7a9cf51..f5b163eaec0b 100644
--- a/arch/arm64/configs/msmcortex_defconfig
+++ b/arch/arm64/configs/msmcortex_defconfig
@@ -668,6 +668,7 @@ CONFIG_PID_IN_CONTEXTIDR=y
CONFIG_DEBUG_SET_MODULE_RONX=y
CONFIG_DEBUG_RODATA=y
CONFIG_FREE_PAGES_RDONLY=y
+CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE=y
CONFIG_CORESIGHT=y
CONFIG_CORESIGHT_EVENT=y
CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
@@ -696,6 +697,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/msmcortex_mediabox_defconfig b/arch/arm64/configs/msmcortex_mediabox_defconfig
index 967edf184f1b..27055fc4b9d5 100644
--- a/arch/arm64/configs/msmcortex_mediabox_defconfig
+++ b/arch/arm64/configs/msmcortex_mediabox_defconfig
@@ -349,7 +349,6 @@ CONFIG_MFD_SPMI_PMIC=y
CONFIG_MFD_I2C_PMIC=y
CONFIG_WCD9335_CODEC=y
CONFIG_WCD934X_CODEC=y
-CONFIG_REGULATOR=y
CONFIG_REGULATOR_FIXED_VOLTAGE=y
CONFIG_REGULATOR_RPM_SMD=y
CONFIG_REGULATOR_QPNP=y
@@ -408,14 +407,8 @@ CONFIG_DVB_MPQ_DEMUX=m
CONFIG_DVB_MPQ_MEDIA_BOX_DEMUX=y
CONFIG_TSPP=m
CONFIG_QCOM_KGSL=y
-CONFIG_FB=y
-CONFIG_FB_VIRTUAL=y
-CONFIG_FB_MSM=y
-CONFIG_FB_MSM_MDSS=y
-CONFIG_FB_MSM_MDSS_WRITEBACK=y
-CONFIG_FB_MSM_MDSS_HDMI_PANEL=y
-CONFIG_FB_MSM_MDSS_DP_PANEL=y
-CONFIG_FB_MSM_MDSS_XLOG_DEBUG=y
+CONFIG_DRM=y
+CONFIG_BACKLIGHT_LCD_SUPPORT=y
CONFIG_LOGO=y
# CONFIG_LOGO_LINUX_MONO is not set
# CONFIG_LOGO_LINUX_VGA16 is not set
@@ -497,6 +490,7 @@ CONFIG_STAGING=y
CONFIG_ASHMEM=y
CONFIG_ANDROID_TIMED_GPIO=y
CONFIG_ANDROID_LOW_MEMORY_KILLER=y
+CONFIG_SYNC=y
CONFIG_ION=y
CONFIG_ION_MSM=y
CONFIG_QPNP_REVID=y
@@ -545,7 +539,6 @@ CONFIG_MSM_SYSMON_GLINK_COMM=y
CONFIG_MSM_IPC_ROUTER_GLINK_XPRT=y
CONFIG_MSM_GLINK_PKT=y
CONFIG_MSM_SPM=y
-CONFIG_QCOM_SCM=y
CONFIG_QCOM_WATCHDOG_V2=y
CONFIG_QCOM_IRQ_HELPER=y
CONFIG_QCOM_MEMORY_DUMP_V2=y
@@ -608,7 +601,6 @@ CONFIG_EXT4_FS_ICE_ENCRYPTION=y
CONFIG_FUSE_FS=y
CONFIG_MSDOS_FS=y
CONFIG_VFAT_FS=y
-CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
CONFIG_EFIVAR_FS=y
CONFIG_ECRYPT_FS=y
diff --git a/arch/arm64/configs/sdm660-perf_defconfig b/arch/arm64/configs/sdm660-perf_defconfig
index d00772c12626..b0bdabf58d62 100644
--- a/arch/arm64/configs/sdm660-perf_defconfig
+++ b/arch/arm64/configs/sdm660-perf_defconfig
@@ -527,6 +527,7 @@ CONFIG_IOMMU_DEBUG_TRACKING=y
CONFIG_IOMMU_TESTS=y
CONFIG_MSM_SMEM=y
CONFIG_QPNP_HAPTIC=y
+CONFIG_QPNP_PBS=y
CONFIG_MSM_SMD=y
CONFIG_MSM_GLINK=y
CONFIG_MSM_GLINK_LOOPBACK_SERVER=y
@@ -627,6 +628,8 @@ CONFIG_CORESIGHT_LINK_AND_SINK_TMC=y
CONFIG_CORESIGHT_QCOM_REPLICATOR=y
CONFIG_CORESIGHT_STM=y
CONFIG_CORESIGHT_HWEVENT=y
+CONFIG_CORESIGHT_CTI=y
+CONFIG_CORESIGHT_CTI_SAVE_DISABLE=y
CONFIG_CORESIGHT_TPDA=y
CONFIG_CORESIGHT_TPDM=y
CONFIG_CORESIGHT_QPDI=y
@@ -645,6 +648,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/configs/sdm660_defconfig b/arch/arm64/configs/sdm660_defconfig
index 207f3b217d8c..229de3920276 100644
--- a/arch/arm64/configs/sdm660_defconfig
+++ b/arch/arm64/configs/sdm660_defconfig
@@ -233,6 +233,7 @@ CONFIG_NFC_NQ=y
CONFIG_IPC_ROUTER=y
CONFIG_IPC_ROUTER_SECURITY=y
CONFIG_FW_LOADER_USER_HELPER_FALLBACK=y
+CONFIG_REGMAP_ALLOW_WRITE_DEBUGFS=y
CONFIG_DMA_CMA=y
CONFIG_ZRAM=y
CONFIG_BLK_DEV_LOOP=y
@@ -540,6 +541,7 @@ CONFIG_IOMMU_TESTS=y
CONFIG_QCOM_COMMON_LOG=y
CONFIG_MSM_SMEM=y
CONFIG_QPNP_HAPTIC=y
+CONFIG_QPNP_PBS=y
CONFIG_MSM_SMD=y
CONFIG_MSM_SMD_DEBUG=y
CONFIG_MSM_GLINK=y
@@ -714,6 +716,7 @@ CONFIG_CRYPTO_DEV_QCOM_MSM_QCE=y
CONFIG_CRYPTO_DEV_QCEDEV=y
CONFIG_CRYPTO_DEV_OTA_CRYPTO=y
CONFIG_CRYPTO_DEV_QCOM_ICE=y
+CONFIG_SYSTEM_TRUSTED_KEYS="verity.x509.pem"
CONFIG_ARM64_CRYPTO=y
CONFIG_CRYPTO_SHA1_ARM64_CE=y
CONFIG_CRYPTO_SHA2_ARM64_CE=y
diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h
index 9a09ccf7122d..72b1c3fa1576 100644
--- a/arch/arm64/include/asm/pgtable.h
+++ b/arch/arm64/include/asm/pgtable.h
@@ -19,6 +19,7 @@
#include <asm/bug.h>
#include <asm/proc-fns.h>
+#include <asm/bug.h>
#include <asm/memory.h>
#include <asm/pgtable-hwdef.h>
@@ -229,6 +230,34 @@ static inline pmd_t pmd_mkcont(pmd_t pmd)
static inline void set_pte(pte_t *ptep, pte_t pte)
{
+#ifdef CONFIG_ARM64_STRICT_BREAK_BEFORE_MAKE
+ pteval_t old = pte_val(*ptep);
+ pteval_t new = pte_val(pte);
+
+ /* Only problematic if valid -> valid */
+ if (!(old & new & PTE_VALID))
+ goto pte_ok;
+
+ /* Changing attributes should go via an invalid entry */
+ if (WARN_ON((old & PTE_ATTRINDX_MASK) != (new & PTE_ATTRINDX_MASK)))
+ goto pte_bad;
+
+ /* Change of OA is only an issue if one mapping is writable */
+ if (!(old & new & PTE_RDONLY) &&
+ WARN_ON(pte_pfn(*ptep) != pte_pfn(pte)))
+ goto pte_bad;
+
+ goto pte_ok;
+
+pte_bad:
+ *ptep = __pte(0);
+ dsb(ishst);
+ asm("tlbi vmalle1is");
+ dsb(ish);
+ isb();
+pte_ok:
+#endif
+
*ptep = pte;
/*
diff --git a/arch/arm64/kernel/stacktrace.c b/arch/arm64/kernel/stacktrace.c
index 85aea381fbf6..cb3eec8e8e50 100644
--- a/arch/arm64/kernel/stacktrace.c
+++ b/arch/arm64/kernel/stacktrace.c
@@ -69,6 +69,8 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
frame->fp = *(unsigned long *)(fp);
frame->pc = *(unsigned long *)(fp + 8);
+ kasan_enable_current();
+
#ifdef CONFIG_FUNCTION_GRAPH_TRACER
if (tsk && tsk->ret_stack &&
(frame->pc == (unsigned long)return_to_handler)) {
@@ -112,8 +114,6 @@ int notrace unwind_frame(struct task_struct *tsk, struct stackframe *frame)
}
}
- kasan_enable_current();
-
return 0;
}
diff --git a/drivers/base/dma-removed.c b/drivers/base/dma-removed.c
index 87c743995a50..5fa3c6bdeea0 100644
--- a/drivers/base/dma-removed.c
+++ b/drivers/base/dma-removed.c
@@ -1,6 +1,6 @@
/*
*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
* Copyright (C) 2000-2004 Russell King
*
* This program is free software; you can redistribute it and/or modify
@@ -296,6 +296,7 @@ void removed_free(struct device *dev, size_t size, void *cpu_addr,
attrs);
struct removed_region *dma_mem = dev->removed_mem;
+ size = PAGE_ALIGN(size);
if (!no_kernel_mapping)
iounmap(cpu_addr);
mutex_lock(&dma_mem->lock);
diff --git a/drivers/base/firmware_class.c b/drivers/base/firmware_class.c
index 87a48268b663..1c6e4da01e69 100644
--- a/drivers/base/firmware_class.c
+++ b/drivers/base/firmware_class.c
@@ -307,8 +307,7 @@ static const char * const fw_path[] = {
"/lib/firmware/updates/" UTS_RELEASE,
"/lib/firmware/updates",
"/lib/firmware/" UTS_RELEASE,
- "/lib/firmware",
- "/firmware/image"
+ "/lib/firmware"
};
/*
diff --git a/drivers/block/zram/zram_drv.c b/drivers/block/zram/zram_drv.c
index 72d088b1863c..3c59417b485a 100644
--- a/drivers/block/zram/zram_drv.c
+++ b/drivers/block/zram/zram_drv.c
@@ -521,7 +521,7 @@ static struct zram_meta *zram_meta_alloc(char *pool_name, u64 disksize)
goto out_error;
}
- meta->mem_pool = zs_create_pool(pool_name, GFP_NOIO | __GFP_HIGHMEM);
+ meta->mem_pool = zs_create_pool(pool_name);
if (!meta->mem_pool) {
pr_err("Error creating memory pool\n");
goto out_error;
@@ -725,7 +725,8 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
src = uncmem;
}
- handle = zs_malloc(meta->mem_pool, clen);
+ handle = zs_malloc(meta->mem_pool, clen, GFP_NOIO | __GFP_HIGHMEM |
+ __GFP_MOVABLE);
if (!handle) {
if (printk_timed_ratelimit(&zram_rs_time,
ALLOC_ERROR_LOG_RATE_MS))
diff --git a/drivers/bluetooth/btfm_slim_codec.c b/drivers/bluetooth/btfm_slim_codec.c
index 354b48bfa2a8..061177173ab8 100644
--- a/drivers/bluetooth/btfm_slim_codec.c
+++ b/drivers/bluetooth/btfm_slim_codec.c
@@ -54,7 +54,7 @@ static int btfm_slim_dai_startup(struct snd_pcm_substream *substream,
int ret;
struct btfmslim *btfmslim = dai->dev->platform_data;
- BTFMSLIM_DBG("substream = %s stream = %d dai name = %s",
+ BTFMSLIM_DBG("substream = %s stream = %d dai->name = %s",
substream->name, substream->stream, dai->name);
ret = btfm_slim_hw_init(btfmslim);
return ret;
@@ -63,33 +63,12 @@ static int btfm_slim_dai_startup(struct snd_pcm_substream *substream,
static void btfm_slim_dai_shutdown(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
- struct btfmslim *btfmslim = dai->dev->platform_data;
-
- BTFMSLIM_DBG("substream = %s stream = %d dai name = %s",
- substream->name, substream->stream, dai->name);
- btfm_slim_hw_deinit(btfmslim);
-}
-
-static int btfm_slim_dai_hw_params(struct snd_pcm_substream *substream,
- struct snd_pcm_hw_params *params,
- struct snd_soc_dai *dai)
-{
- BTFMSLIM_DBG("dai name = %s DAI-ID %x rate %d num_ch %d",
- dai->name, dai->id, params_rate(params),
- params_channels(params));
-
- return 0;
-}
-
-int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
- struct snd_soc_dai *dai)
-{
- int i, ret = -EINVAL;
+ int i;
struct btfmslim *btfmslim = dai->dev->platform_data;
struct btfmslim_ch *ch;
uint8_t rxport, grp = false, nchan = 1;
- BTFMSLIM_DBG("dai name: %s, dai->id: %d, dai->rate: %d", dai->name,
+ BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
dai->id, dai->rate);
switch (dai->id) {
@@ -110,7 +89,7 @@ int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
case BTFM_SLIM_NUM_CODEC_DAIS:
default:
BTFMSLIM_ERR("dai->id is invalid:%d", dai->id);
- return ret;
+ return;
}
/* Search for dai->id matched port handler */
@@ -122,14 +101,25 @@ int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
if ((ch->port == BTFM_SLIM_PGD_PORT_LAST) ||
(ch->id == BTFM_SLIM_NUM_CODEC_DAIS)) {
BTFMSLIM_ERR("ch is invalid!!");
- return ret;
+ return;
}
- ret = btfm_slim_enable_ch(btfmslim, ch, rxport, dai->rate, grp, nchan);
- return ret;
+ btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
+ btfm_slim_hw_deinit(btfmslim);
}
-int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
+static int btfm_slim_dai_hw_params(struct snd_pcm_substream *substream,
+ struct snd_pcm_hw_params *params,
+ struct snd_soc_dai *dai)
+{
+ BTFMSLIM_DBG("dai->name = %s DAI-ID %x rate %d num_ch %d",
+ dai->name, dai->id, params_rate(params),
+ params_channels(params));
+
+ return 0;
+}
+
+int btfm_slim_dai_prepare(struct snd_pcm_substream *substream,
struct snd_soc_dai *dai)
{
int i, ret = -EINVAL;
@@ -137,7 +127,7 @@ int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
struct btfmslim_ch *ch;
uint8_t rxport, grp = false, nchan = 1;
- BTFMSLIM_DBG("dai name: %s, dai->id: %d, dai->rate: %d", dai->name,
+ BTFMSLIM_DBG("dai->name: %s, dai->id: %d, dai->rate: %d", dai->name,
dai->id, dai->rate);
switch (dai->id) {
@@ -172,7 +162,8 @@ int btfm_slim_dai_hw_free(struct snd_pcm_substream *substream,
BTFMSLIM_ERR("ch is invalid!!");
return ret;
}
- ret = btfm_slim_disable_ch(btfmslim, ch, rxport, grp, nchan);
+
+ ret = btfm_slim_enable_ch(btfmslim, ch, rxport, dai->rate, grp, nchan);
return ret;
}
@@ -315,7 +306,6 @@ static struct snd_soc_dai_ops btfmslim_dai_ops = {
.shutdown = btfm_slim_dai_shutdown,
.hw_params = btfm_slim_dai_hw_params,
.prepare = btfm_slim_dai_prepare,
- .hw_free = btfm_slim_dai_hw_free,
.set_channel_map = btfm_slim_dai_set_channel_map,
.get_channel_map = btfm_slim_dai_get_channel_map,
};
diff --git a/drivers/char/diag/diagchar_core.c b/drivers/char/diag/diagchar_core.c
index facdb0f40e44..335064352789 100644
--- a/drivers/char/diag/diagchar_core.c
+++ b/drivers/char/diag/diagchar_core.c
@@ -2606,7 +2606,8 @@ static int diag_user_process_raw_data(const char __user *buf, int len)
info = diag_md_session_get_pid(current->tgid);
ret = diag_process_apps_pkt(user_space_data, len, info);
if (ret == 1)
- diag_send_error_rsp((void *)(user_space_data), len);
+ diag_send_error_rsp((void *)(user_space_data), len,
+ info);
}
fail:
diagmem_free(driver, user_space_data, mempool);
diff --git a/drivers/char/diag/diagfwd.c b/drivers/char/diag/diagfwd.c
index a7069bc0edf3..99a16dd47cd4 100644
--- a/drivers/char/diag/diagfwd.c
+++ b/drivers/char/diag/diagfwd.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -239,10 +239,11 @@ void chk_logging_wakeup(void)
}
}
-static void pack_rsp_and_send(unsigned char *buf, int len)
+static void pack_rsp_and_send(unsigned char *buf, int len,
+ struct diag_md_session_t *info)
{
int err;
- int retry_count = 0;
+ int retry_count = 0, i, rsp_ctxt;
uint32_t write_len = 0;
unsigned long flags;
unsigned char *rsp_ptr = driver->encoded_rsp_buf;
@@ -257,6 +258,15 @@ static void pack_rsp_and_send(unsigned char *buf, int len)
return;
}
+ if (info && info->peripheral_mask) {
+ for (i = 0; i <= NUM_PERIPHERALS; i++) {
+ if (info->peripheral_mask & (1 << i))
+ break;
+ }
+ rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1);
+ } else
+ rsp_ctxt = driver->rsp_buf_ctxt;
+
/*
* Keep trying till we get the buffer back. It should probably
* take one or two iterations. When this loops till UINT_MAX, it
@@ -298,8 +308,7 @@ static void pack_rsp_and_send(unsigned char *buf, int len)
*(uint8_t *)(rsp_ptr + write_len) = CONTROL_CHAR;
write_len += sizeof(uint8_t);
- err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, write_len,
- driver->rsp_buf_ctxt);
+ err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, write_len, rsp_ctxt);
if (err) {
pr_err("diag: In %s, unable to write to mux, err: %d\n",
__func__, err);
@@ -309,12 +318,13 @@ static void pack_rsp_and_send(unsigned char *buf, int len)
}
}
-static void encode_rsp_and_send(unsigned char *buf, int len)
+static void encode_rsp_and_send(unsigned char *buf, int len,
+ struct diag_md_session_t *info)
{
struct diag_send_desc_type send = { NULL, NULL, DIAG_STATE_START, 0 };
struct diag_hdlc_dest_type enc = { NULL, NULL, 0 };
unsigned char *rsp_ptr = driver->encoded_rsp_buf;
- int err, retry_count = 0;
+ int err, i, rsp_ctxt, retry_count = 0;
unsigned long flags;
if (!rsp_ptr || !buf)
@@ -326,6 +336,15 @@ static void encode_rsp_and_send(unsigned char *buf, int len)
return;
}
+ if (info && info->peripheral_mask) {
+ for (i = 0; i <= NUM_PERIPHERALS; i++) {
+ if (info->peripheral_mask & (1 << i))
+ break;
+ }
+ rsp_ctxt = SET_BUF_CTXT(i, TYPE_CMD, 1);
+ } else
+ rsp_ctxt = driver->rsp_buf_ctxt;
+
/*
* Keep trying till we get the buffer back. It should probably
* take one or two iterations. When this loops till UINT_MAX, it
@@ -369,7 +388,7 @@ static void encode_rsp_and_send(unsigned char *buf, int len)
diag_hdlc_encode(&send, &enc);
driver->encoded_rsp_len = (int)(enc.dest - (void *)rsp_ptr);
err = diag_mux_write(DIAG_LOCAL_PROC, rsp_ptr, driver->encoded_rsp_len,
- driver->rsp_buf_ctxt);
+ rsp_ctxt);
if (err) {
pr_err("diag: In %s, Unable to write to device, err: %d\n",
__func__, err);
@@ -380,21 +399,22 @@ static void encode_rsp_and_send(unsigned char *buf, int len)
memset(buf, '\0', DIAG_MAX_RSP_SIZE);
}
-void diag_send_rsp(unsigned char *buf, int len)
+void diag_send_rsp(unsigned char *buf, int len, struct diag_md_session_t *info)
{
struct diag_md_session_t *session_info = NULL;
uint8_t hdlc_disabled;
- session_info = diag_md_session_get_peripheral(APPS_DATA);
+ session_info = (info) ? info :
+ diag_md_session_get_peripheral(APPS_DATA);
if (session_info)
hdlc_disabled = session_info->hdlc_disabled;
else
hdlc_disabled = driver->hdlc_disabled;
if (hdlc_disabled)
- pack_rsp_and_send(buf, len);
+ pack_rsp_and_send(buf, len, session_info);
else
- encode_rsp_and_send(buf, len);
+ encode_rsp_and_send(buf, len, session_info);
}
void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type)
@@ -865,7 +885,8 @@ static int diag_cmd_disable_hdlc(unsigned char *src_buf, int src_len,
return write_len;
}
-void diag_send_error_rsp(unsigned char *buf, int len)
+void diag_send_error_rsp(unsigned char *buf, int len,
+ struct diag_md_session_t *info)
{
/* -1 to accomodate the first byte 0x13 */
if (len > (DIAG_MAX_RSP_SIZE - 1)) {
@@ -875,7 +896,7 @@ void diag_send_error_rsp(unsigned char *buf, int len)
*(uint8_t *)driver->apps_rsp_buf = DIAG_CMD_ERROR;
memcpy((driver->apps_rsp_buf + sizeof(uint8_t)), buf, len);
- diag_send_rsp(driver->apps_rsp_buf, len + 1);
+ diag_send_rsp(driver->apps_rsp_buf, len + 1, info);
}
int diag_process_apps_pkt(unsigned char *buf, int len,
@@ -895,7 +916,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
/* Check if the command is a supported mask command */
mask_ret = diag_process_apps_masks(buf, len, info);
if (mask_ret > 0) {
- diag_send_rsp(driver->apps_rsp_buf, mask_ret);
+ diag_send_rsp(driver->apps_rsp_buf, mask_ret, info);
return 0;
}
@@ -917,7 +938,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0)
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
return 0;
}
@@ -933,7 +954,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
} else {
if (MD_PERIPHERAL_MASK(reg_item->proc) &
driver->logging_mask)
- diag_send_error_rsp(buf, len);
+ diag_send_error_rsp(buf, len, info);
else
write_len = diag_send_data(reg_item, buf, len);
}
@@ -949,13 +970,13 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
for (i = 0; i < 4; i++)
*(driver->apps_rsp_buf+i) = *(buf+i);
*(uint32_t *)(driver->apps_rsp_buf+4) = DIAG_MAX_REQ_SIZE;
- diag_send_rsp(driver->apps_rsp_buf, 8);
+ diag_send_rsp(driver->apps_rsp_buf, 8, info);
return 0;
} else if ((*buf == 0x4b) && (*(buf+1) == 0x12) &&
(*(uint16_t *)(buf+2) == DIAG_DIAG_STM)) {
len = diag_process_stm_cmd(buf, driver->apps_rsp_buf);
if (len > 0) {
- diag_send_rsp(driver->apps_rsp_buf, len);
+ diag_send_rsp(driver->apps_rsp_buf, len, info);
return 0;
}
return len;
@@ -968,7 +989,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0)
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
return 0;
}
/* Check for time sync switch command */
@@ -979,14 +1000,14 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0)
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
return 0;
}
/* Check for download command */
else if ((chk_apps_master()) && (*buf == 0x3A)) {
/* send response back */
driver->apps_rsp_buf[0] = *buf;
- diag_send_rsp(driver->apps_rsp_buf, 1);
+ diag_send_rsp(driver->apps_rsp_buf, 1, info);
msleep(5000);
/* call download API */
msm_set_restart_mode(RESTART_DLOAD);
@@ -1006,7 +1027,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
for (i = 0; i < 13; i++)
driver->apps_rsp_buf[i+3] = 0;
- diag_send_rsp(driver->apps_rsp_buf, 16);
+ diag_send_rsp(driver->apps_rsp_buf, 16, info);
return 0;
}
}
@@ -1015,7 +1036,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
(*(buf+2) == 0x04) && (*(buf+3) == 0x0)) {
memcpy(driver->apps_rsp_buf, buf, 4);
driver->apps_rsp_buf[4] = wrap_enabled;
- diag_send_rsp(driver->apps_rsp_buf, 5);
+ diag_send_rsp(driver->apps_rsp_buf, 5, info);
return 0;
}
/* Wrap the Delayed Rsp ID */
@@ -1024,7 +1045,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
wrap_enabled = true;
memcpy(driver->apps_rsp_buf, buf, 4);
driver->apps_rsp_buf[4] = wrap_count;
- diag_send_rsp(driver->apps_rsp_buf, 6);
+ diag_send_rsp(driver->apps_rsp_buf, 6, info);
return 0;
}
/* Mobile ID Rsp */
@@ -1035,7 +1056,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0) {
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
return 0;
}
}
@@ -1055,7 +1076,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
for (i = 0; i < 55; i++)
driver->apps_rsp_buf[i] = 0;
- diag_send_rsp(driver->apps_rsp_buf, 55);
+ diag_send_rsp(driver->apps_rsp_buf, 55, info);
return 0;
}
/* respond to 0x7c command */
@@ -1068,14 +1089,14 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
chk_config_get_id();
*(unsigned char *)(driver->apps_rsp_buf + 12) = '\0';
*(unsigned char *)(driver->apps_rsp_buf + 13) = '\0';
- diag_send_rsp(driver->apps_rsp_buf, 14);
+ diag_send_rsp(driver->apps_rsp_buf, 14, info);
return 0;
}
}
write_len = diag_cmd_chk_stats(buf, len, driver->apps_rsp_buf,
DIAG_MAX_RSP_SIZE);
if (write_len > 0) {
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
return 0;
}
write_len = diag_cmd_disable_hdlc(buf, len, driver->apps_rsp_buf,
@@ -1087,7 +1108,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
* before disabling HDLC encoding on Apps processor.
*/
mutex_lock(&driver->hdlc_disable_mutex);
- diag_send_rsp(driver->apps_rsp_buf, write_len);
+ diag_send_rsp(driver->apps_rsp_buf, write_len, info);
/*
* Set the value of hdlc_disabled after sending the response to
* the tools. This is required since the tools is expecting a
@@ -1107,7 +1128,7 @@ int diag_process_apps_pkt(unsigned char *buf, int len,
/* We have now come to the end of the function. */
if (chk_apps_only())
- diag_send_error_rsp(buf, len);
+ diag_send_error_rsp(buf, len, info);
return 0;
}
@@ -1190,7 +1211,7 @@ fail:
* recovery algorithm. Send an error response if the
* packet is not in expected format.
*/
- diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len);
+ diag_send_error_rsp(driver->hdlc_buf, driver->hdlc_buf_len, info);
driver->hdlc_buf_len = 0;
end:
mutex_unlock(&driver->diag_hdlc_mutex);
@@ -1446,7 +1467,7 @@ start:
if (actual_pkt->start != CONTROL_CHAR) {
diag_hdlc_start_recovery(buf, len, info);
- diag_send_error_rsp(buf, len);
+ diag_send_error_rsp(buf, len, info);
goto end;
}
@@ -1528,15 +1549,14 @@ static int diagfwd_mux_write_done(unsigned char *buf, int len, int buf_ctxt,
case TYPE_CMD:
if (peripheral >= 0 && peripheral < NUM_PERIPHERALS) {
diagfwd_write_done(peripheral, type, num);
- } else if (peripheral == APPS_DATA) {
+ }
+ if (peripheral == APPS_DATA ||
+ ctxt == DIAG_MEMORY_DEVICE_MODE) {
spin_lock_irqsave(&driver->rsp_buf_busy_lock, flags);
driver->rsp_buf_busy = 0;
driver->encoded_rsp_len = 0;
spin_unlock_irqrestore(&driver->rsp_buf_busy_lock,
flags);
- } else {
- pr_err_ratelimited("diag: Invalid peripheral %d in %s, type: %d\n",
- peripheral, __func__, type);
}
break;
default:
diff --git a/drivers/char/diag/diagfwd.h b/drivers/char/diag/diagfwd.h
index 0023e0638aa2..4c6d86fc36ae 100644
--- a/drivers/char/diag/diagfwd.h
+++ b/drivers/char/diag/diagfwd.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2008-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2008-2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -45,7 +45,8 @@ void diag_update_userspace_clients(unsigned int type);
void diag_update_sleeping_process(int process_id, int data_type);
int diag_process_apps_pkt(unsigned char *buf, int len,
struct diag_md_session_t *info);
-void diag_send_error_rsp(unsigned char *buf, int len);
+void diag_send_error_rsp(unsigned char *buf, int len,
+ struct diag_md_session_t *info);
void diag_update_pkt_buffer(unsigned char *buf, uint32_t len, int type);
int diag_process_stm_cmd(unsigned char *buf, unsigned char *dest_buf);
void diag_md_hdlc_reset_timer_func(unsigned long pid);
diff --git a/drivers/char/diag/diagfwd_glink.c b/drivers/char/diag/diagfwd_glink.c
index e761e6ec4e39..37f3bd2626c8 100644
--- a/drivers/char/diag/diagfwd_glink.c
+++ b/drivers/char/diag/diagfwd_glink.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -462,6 +462,31 @@ static int diag_glink_write(void *ctxt, unsigned char *buf, int len)
return err;
}
+
+static void diag_glink_connect_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ connect_work);
+ if (!glink_info || glink_info->hdl)
+ return;
+ atomic_set(&glink_info->opened, 1);
+ diagfwd_channel_open(glink_info->fwd_ctxt);
+ diagfwd_late_open(glink_info->fwd_ctxt);
+}
+
+static void diag_glink_remote_disconnect_work_fn(struct work_struct *work)
+{
+ struct diag_glink_info *glink_info = container_of(work,
+ struct diag_glink_info,
+ remote_disconnect_work);
+ if (!glink_info || glink_info->hdl)
+ return;
+ atomic_set(&glink_info->opened, 0);
+ diagfwd_channel_close(glink_info->fwd_ctxt);
+ atomic_set(&glink_info->tx_intent_ready, 0);
+}
+
static void diag_glink_transport_notify_state(void *handle, const void *priv,
unsigned event)
{
@@ -475,9 +500,7 @@ static void diag_glink_transport_notify_state(void *handle, const void *priv,
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"%s received channel connect for periph:%d\n",
glink_info->name, glink_info->peripheral);
- atomic_set(&glink_info->opened, 1);
- diagfwd_channel_open(glink_info->fwd_ctxt);
- diagfwd_late_open(glink_info->fwd_ctxt);
+ queue_work(glink_info->wq, &glink_info->connect_work);
break;
case GLINK_LOCAL_DISCONNECTED:
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -489,9 +512,7 @@ static void diag_glink_transport_notify_state(void *handle, const void *priv,
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
"%s received channel remote disconnect for periph:%d\n",
glink_info->name, glink_info->peripheral);
- atomic_set(&glink_info->opened, 0);
- diagfwd_channel_close(glink_info->fwd_ctxt);
- atomic_set(&glink_info->tx_intent_ready, 0);
+ queue_work(glink_info->wq, &glink_info->remote_disconnect_work);
break;
default:
DIAG_LOG(DIAG_DEBUG_PERIPHERALS,
@@ -641,6 +662,9 @@ static void __diag_glink_init(struct diag_glink_info *glink_info)
INIT_WORK(&(glink_info->open_work), diag_glink_open_work_fn);
INIT_WORK(&(glink_info->close_work), diag_glink_close_work_fn);
INIT_WORK(&(glink_info->read_work), diag_glink_read_work_fn);
+ INIT_WORK(&(glink_info->connect_work), diag_glink_connect_work_fn);
+ INIT_WORK(&(glink_info->remote_disconnect_work),
+ diag_glink_remote_disconnect_work_fn);
link_info.glink_link_state_notif_cb = diag_glink_notify_cb;
link_info.transport = NULL;
link_info.edge = glink_info->edge;
@@ -681,6 +705,8 @@ int diag_glink_init(void)
struct diag_glink_info *glink_info = NULL;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
glink_info = &glink_cntl[peripheral];
__diag_glink_init(glink_info);
diagfwd_cntl_register(TRANSPORT_GLINK, glink_info->peripheral,
@@ -719,6 +745,8 @@ void diag_glink_early_exit(void)
int peripheral = 0;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
__diag_glink_exit(&glink_cntl[peripheral]);
glink_unregister_link_state_cb(&glink_cntl[peripheral].hdl);
}
@@ -729,6 +757,8 @@ void diag_glink_exit(void)
int peripheral = 0;
for (peripheral = 0; peripheral < NUM_PERIPHERALS; peripheral++) {
+ if (peripheral != PERIPHERAL_WDSP)
+ continue;
__diag_glink_exit(&glink_data[peripheral]);
__diag_glink_exit(&glink_cmd[peripheral]);
__diag_glink_exit(&glink_dci[peripheral]);
diff --git a/drivers/char/diag/diagfwd_glink.h b/drivers/char/diag/diagfwd_glink.h
index bad4629b5ab8..5c1abeffd498 100644
--- a/drivers/char/diag/diagfwd_glink.h
+++ b/drivers/char/diag/diagfwd_glink.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -35,6 +35,8 @@ struct diag_glink_info {
struct work_struct open_work;
struct work_struct close_work;
struct work_struct read_work;
+ struct work_struct connect_work;
+ struct work_struct remote_disconnect_work;
struct diagfwd_info *fwd_ctxt;
};
diff --git a/drivers/clk/msm/clock-gcc-8998.c b/drivers/clk/msm/clock-gcc-8998.c
index 8b7efbd093b5..f9d713a22c76 100644
--- a/drivers/clk/msm/clock-gcc-8998.c
+++ b/drivers/clk/msm/clock-gcc-8998.c
@@ -239,28 +239,6 @@ static struct pll_vote_clk gpll4 = {
};
DEFINE_EXT_CLK(gpll4_out_main, &gpll4.c);
-static struct clk_freq_tbl ftbl_hmss_ahb_clk_src[] = {
- F( 19200000, cxo_clk_src_ao, 1, 0, 0),
- F( 50000000, gpll0_out_main, 12, 0, 0),
- F( 100000000, gpll0_out_main, 6, 0, 0),
- F_END
-};
-
-static struct rcg_clk hmss_ahb_clk_src = {
- .cmd_rcgr_reg = GCC_HMSS_AHB_CMD_RCGR,
- .set_rate = set_rate_hid,
- .freq_tbl = ftbl_hmss_ahb_clk_src,
- .current_freq = &rcg_dummy_freq,
- .base = &virt_base,
- .c = {
- .dbg_name = "hmss_ahb_clk_src",
- .ops = &clk_ops_rcg,
- VDD_DIG_FMAX_MAP3_AO(LOWER, 19200000, LOW, 50000000,
- NOMINAL, 100000000),
- CLK_INIT(hmss_ahb_clk_src.c),
- },
-};
-
static struct clk_freq_tbl ftbl_usb30_master_clk_src[] = {
F( 19200000, cxo_clk_src, 1, 0, 0),
F( 60000000, gpll0_out_main, 10, 0, 0),
@@ -1732,20 +1710,6 @@ static struct branch_clk gcc_gpu_iref_clk = {
},
};
-static struct local_vote_clk gcc_hmss_ahb_clk = {
- .cbcr_reg = GCC_HMSS_AHB_CBCR,
- .vote_reg = GCC_APCS_CLOCK_BRANCH_ENA_VOTE,
- .en_mask = BIT(21),
- .base = &virt_base,
- .c = {
- .dbg_name = "gcc_hmss_ahb_clk",
- .always_on = true,
- .parent = &hmss_ahb_clk_src.c,
- .ops = &clk_ops_vote,
- CLK_INIT(gcc_hmss_ahb_clk.c),
- },
-};
-
static struct branch_clk gcc_hmss_dvm_bus_clk = {
.cbcr_reg = GCC_HMSS_DVM_BUS_CBCR,
.has_sibling = 1,
@@ -2408,7 +2372,6 @@ static struct mux_clk gcc_debug_mux = {
{ &gcc_ce1_ahb_m_clk.c, 0x0099 },
{ &measure_only_bimc_hmss_axi_clk.c, 0x00bb },
{ &gcc_bimc_gfx_clk.c, 0x00ac },
- { &gcc_hmss_ahb_clk.c, 0x00ba },
{ &gcc_hmss_rbcpr_clk.c, 0x00bc },
{ &gcc_gp1_clk.c, 0x00df },
{ &gcc_gp2_clk.c, 0x00e0 },
@@ -2531,7 +2494,6 @@ static struct clk_lookup msm_clocks_gcc_8998[] = {
CLK_LIST(gcc_gpu_gpll0_div_clk),
CLK_LIST(gpll4),
CLK_LIST(gpll4_out_main),
- CLK_LIST(hmss_ahb_clk_src),
CLK_LIST(usb30_master_clk_src),
CLK_LIST(pcie_aux_clk_src),
CLK_LIST(ufs_axi_clk_src),
@@ -2629,7 +2591,6 @@ static struct clk_lookup msm_clocks_gcc_8998[] = {
CLK_LIST(gcc_gpu_bimc_gfx_clk),
CLK_LIST(gcc_gpu_cfg_ahb_clk),
CLK_LIST(gcc_gpu_iref_clk),
- CLK_LIST(gcc_hmss_ahb_clk),
CLK_LIST(gcc_hmss_dvm_bus_clk),
CLK_LIST(gcc_hmss_rbcpr_clk),
CLK_LIST(gcc_mmss_noc_cfg_ahb_clk),
@@ -2742,12 +2703,12 @@ static int msm_gcc_8998_probe(struct platform_device *pdev)
}
/*
- * Set the HMSS_AHB_CLK_SLEEP_ENA bit to allow the hmss_ahb_clk to be
- * turned off by hardware during certain apps low power modes.
+ * Clear the HMSS_AHB_CLK_ENA bit to allow the gcc_hmss_ahb_clk clock
+ * to be gated by RPM during VDD_MIN.
*/
- regval = readl_relaxed(virt_base + GCC_APCS_CLOCK_SLEEP_ENA_VOTE);
- regval |= BIT(21);
- writel_relaxed(regval, virt_base + GCC_APCS_CLOCK_SLEEP_ENA_VOTE);
+ regval = readl_relaxed(virt_base + GCC_APCS_CLOCK_BRANCH_ENA_VOTE);
+ regval &= ~BIT(21);
+ writel_relaxed(regval, virt_base + GCC_APCS_CLOCK_BRANCH_ENA_VOTE);
vdd_dig.regulator[0] = devm_regulator_get(&pdev->dev, "vdd_dig");
if (IS_ERR(vdd_dig.regulator[0])) {
diff --git a/drivers/clk/msm/clock-generic.c b/drivers/clk/msm/clock-generic.c
index eeb940e434cf..69e47a103402 100644
--- a/drivers/clk/msm/clock-generic.c
+++ b/drivers/clk/msm/clock-generic.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -491,6 +491,9 @@ static long __slave_div_round_rate(struct clk *c, unsigned long rate,
if (best_div)
*best_div = div;
+ if (d->data.is_half_divider)
+ p_rate *= 2;
+
return p_rate / div;
}
@@ -530,9 +533,16 @@ static int slave_div_set_rate(struct clk *c, unsigned long rate)
static unsigned long slave_div_get_rate(struct clk *c)
{
struct div_clk *d = to_div_clk(c);
+ unsigned long rate;
+
if (!d->data.div)
return 0;
- return clk_get_rate(c->parent) / d->data.div;
+
+ rate = clk_get_rate(c->parent) / d->data.div;
+ if (d->data.is_half_divider)
+ rate *= 2;
+
+ return rate;
}
struct clk_ops clk_ops_slave_div = {
diff --git a/drivers/clk/msm/clock-mmss-8998.c b/drivers/clk/msm/clock-mmss-8998.c
index 1661878fc650..6ebb3ed6ed91 100644
--- a/drivers/clk/msm/clock-mmss-8998.c
+++ b/drivers/clk/msm/clock-mmss-8998.c
@@ -664,8 +664,8 @@ static struct rcg_clk byte0_clk_src = {
.parent = &ext_byte0_clk_src.c,
.ops = &clk_ops_byte_multiparent,
.flags = CLKFLAG_NO_RATE_CACHE,
- VDD_DIG_FMAX_MAP3(LOWER, 150000000, LOW, 240000000,
- NOMINAL, 357140000),
+ VDD_DIG_FMAX_MAP3(LOWER, 131250000, LOW, 210000000,
+ NOMINAL, 312500000),
CLK_INIT(byte0_clk_src.c),
},
};
@@ -681,8 +681,8 @@ static struct rcg_clk byte1_clk_src = {
.parent = &ext_byte1_clk_src.c,
.ops = &clk_ops_byte_multiparent,
.flags = CLKFLAG_NO_RATE_CACHE,
- VDD_DIG_FMAX_MAP3(LOWER, 150000000, LOW, 240000000,
- NOMINAL, 357140000),
+ VDD_DIG_FMAX_MAP3(LOWER, 131250000, LOW, 210000000,
+ NOMINAL, 312500000),
CLK_INIT(byte1_clk_src.c),
},
};
@@ -722,8 +722,8 @@ static struct rcg_clk pclk0_clk_src = {
.parent = &ext_pclk0_clk_src.c,
.ops = &clk_ops_pixel_multiparent,
.flags = CLKFLAG_NO_RATE_CACHE,
- VDD_DIG_FMAX_MAP3(LOWER, 184000000, LOW, 295000000,
- NOMINAL, 610000000),
+ VDD_DIG_FMAX_MAP3(LOWER, 175000000, LOW, 280000000,
+ NOMINAL, 416670000),
CLK_INIT(pclk0_clk_src.c),
},
};
@@ -739,8 +739,8 @@ static struct rcg_clk pclk1_clk_src = {
.parent = &ext_pclk1_clk_src.c,
.ops = &clk_ops_pixel_multiparent,
.flags = CLKFLAG_NO_RATE_CACHE,
- VDD_DIG_FMAX_MAP3(LOWER, 184000000, LOW, 295000000,
- NOMINAL, 610000000),
+ VDD_DIG_FMAX_MAP3(LOWER, 175000000, LOW, 280000000,
+ NOMINAL, 416670000),
CLK_INIT(pclk1_clk_src.c),
},
};
diff --git a/drivers/clk/msm/clock-osm.c b/drivers/clk/msm/clock-osm.c
index 1639b1b7f94b..9d9aa61c480a 100644
--- a/drivers/clk/msm/clock-osm.c
+++ b/drivers/clk/msm/clock-osm.c
@@ -89,6 +89,7 @@ enum clk_osm_trace_packet_id {
#define SINGLE_CORE 1
#define MAX_CORE_COUNT 4
#define LLM_SW_OVERRIDE_CNT 3
+#define OSM_SEQ_MINUS_ONE 0xff
#define ENABLE_REG 0x1004
#define INDEX_REG 0x1150
@@ -367,8 +368,10 @@ struct clk_osm {
u32 cluster_num;
u32 irq;
u32 apm_crossover_vc;
+ u32 apm_threshold_pre_vc;
u32 apm_threshold_vc;
u32 mem_acc_crossover_vc;
+ u32 mem_acc_threshold_pre_vc;
u32 mem_acc_threshold_vc;
u32 cycle_counter_reads;
u32 cycle_counter_delay;
@@ -1646,10 +1649,26 @@ static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
if (corner_volt >= apm_threshold) {
c->apm_threshold_vc = c->osm_table[i].virtual_corner;
+ /*
+ * Handle case where VC 0 has open-loop
+ * greater than or equal to APM threshold voltage.
+ */
+ c->apm_threshold_pre_vc = c->apm_threshold_vc ?
+ c->apm_threshold_vc - 1 : OSM_SEQ_MINUS_ONE;
break;
}
}
+ /*
+ * This assumes the OSM table uses corners
+ * 0 to MAX_VIRTUAL_CORNER - 1.
+ */
+ if (!c->apm_threshold_vc &&
+ c->apm_threshold_pre_vc != OSM_SEQ_MINUS_ONE) {
+ c->apm_threshold_vc = MAX_VIRTUAL_CORNER;
+ c->apm_threshold_pre_vc = c->apm_threshold_vc - 1;
+ }
+
/* Determine MEM ACC threshold virtual corner */
if (mem_acc_threshold) {
for (i = 0; i < OSM_TABLE_SIZE; i++) {
@@ -1660,6 +1679,15 @@ static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
if (corner_volt >= mem_acc_threshold) {
c->mem_acc_threshold_vc
= c->osm_table[i].virtual_corner;
+ /*
+ * Handle case where VC 0 has open-loop
+ * greater than or equal to MEM-ACC threshold
+ * voltage.
+ */
+ c->mem_acc_threshold_pre_vc =
+ c->mem_acc_threshold_vc ?
+ c->mem_acc_threshold_vc - 1 :
+ OSM_SEQ_MINUS_ONE;
break;
}
}
@@ -1668,9 +1696,13 @@ static int clk_osm_resolve_crossover_corners(struct clk_osm *c,
* This assumes the OSM table uses corners
* 0 to MAX_VIRTUAL_CORNER - 1.
*/
- if (!c->mem_acc_threshold_vc)
+ if (!c->mem_acc_threshold_vc && c->mem_acc_threshold_pre_vc
+ != OSM_SEQ_MINUS_ONE) {
c->mem_acc_threshold_vc =
MAX_VIRTUAL_CORNER;
+ c->mem_acc_threshold_pre_vc =
+ c->mem_acc_threshold_vc - 1;
+ }
}
return 0;
@@ -2021,13 +2053,20 @@ static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
* highest MEM ACC threshold if it is specified instead of the
* fixed mapping in the LUT.
*/
- if (c->mem_acc_threshold_vc) {
- threshold_vc[2] = c->mem_acc_threshold_vc - 1;
+ if (c->mem_acc_threshold_vc || c->mem_acc_threshold_pre_vc
+ == OSM_SEQ_MINUS_ONE) {
+ threshold_vc[2] = c->mem_acc_threshold_pre_vc;
threshold_vc[3] = c->mem_acc_threshold_vc;
- if (threshold_vc[1] >= threshold_vc[2])
- threshold_vc[1] = threshold_vc[2] - 1;
- if (threshold_vc[0] >= threshold_vc[1])
- threshold_vc[0] = threshold_vc[1] - 1;
+
+ if (c->mem_acc_threshold_pre_vc == OSM_SEQ_MINUS_ONE) {
+ threshold_vc[1] = threshold_vc[0] =
+ c->mem_acc_threshold_pre_vc;
+ } else {
+ if (threshold_vc[1] >= threshold_vc[2])
+ threshold_vc[1] = threshold_vc[2] - 1;
+ if (threshold_vc[0] >= threshold_vc[1])
+ threshold_vc[0] = threshold_vc[1] - 1;
+ }
}
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(55),
@@ -2045,7 +2084,8 @@ static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
* Program L_VAL corresponding to the first virtual
* corner with MEM ACC level 3.
*/
- if (c->mem_acc_threshold_vc)
+ if (c->mem_acc_threshold_vc ||
+ c->mem_acc_threshold_pre_vc == OSM_SEQ_MINUS_ONE)
for (i = 0; i < c->num_entries; i++)
if (c->mem_acc_threshold_vc == table[i].virtual_corner)
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(32),
@@ -2365,8 +2405,7 @@ static void clk_osm_apm_vc_setup(struct clk_osm *c)
SEQ_REG(8));
clk_osm_write_reg(c, c->apm_threshold_vc,
SEQ_REG(15));
- clk_osm_write_reg(c, c->apm_threshold_vc != 0 ?
- c->apm_threshold_vc - 1 : 0xff,
+ clk_osm_write_reg(c, c->apm_threshold_pre_vc,
SEQ_REG(31));
clk_osm_write_reg(c, 0x3b | c->apm_threshold_vc << 6,
SEQ_REG(73));
@@ -2390,8 +2429,7 @@ static void clk_osm_apm_vc_setup(struct clk_osm *c)
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(15),
c->apm_threshold_vc);
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(31),
- c->apm_threshold_vc != 0 ?
- c->apm_threshold_vc - 1 : 0xff);
+ c->apm_threshold_pre_vc);
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(76),
0x39 | c->apm_threshold_vc << 6);
}
diff --git a/drivers/clk/qcom/clk-cpu-osm.c b/drivers/clk/qcom/clk-cpu-osm.c
index e88d70f07a1c..6b00bee337a1 100644
--- a/drivers/clk/qcom/clk-cpu-osm.c
+++ b/drivers/clk/qcom/clk-cpu-osm.c
@@ -1952,16 +1952,21 @@ static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
}
}
+ threshold_vc[0] = mem_acc_level_map[0];
+ threshold_vc[1] = mem_acc_level_map[0] + 1;
+ threshold_vc[2] = mem_acc_level_map[1];
+ threshold_vc[3] = mem_acc_level_map[1] + 1;
+
if (c->secure_init) {
clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(1), SEQ_REG(51));
clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(2), SEQ_REG(52));
clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(3), SEQ_REG(53));
clk_osm_write_reg(c, MEM_ACC_SEQ_CONST(4), SEQ_REG(54));
clk_osm_write_reg(c, MEM_ACC_APM_READ_MASK, SEQ_REG(59));
- clk_osm_write_reg(c, mem_acc_level_map[0], SEQ_REG(55));
- clk_osm_write_reg(c, mem_acc_level_map[0] + 1, SEQ_REG(56));
- clk_osm_write_reg(c, mem_acc_level_map[1], SEQ_REG(57));
- clk_osm_write_reg(c, mem_acc_level_map[1] + 1, SEQ_REG(58));
+ clk_osm_write_reg(c, threshold_vc[0], SEQ_REG(55));
+ clk_osm_write_reg(c, threshold_vc[1], SEQ_REG(56));
+ clk_osm_write_reg(c, threshold_vc[2], SEQ_REG(57));
+ clk_osm_write_reg(c, threshold_vc[3], SEQ_REG(58));
clk_osm_write_reg(c, c->pbases[OSM_BASE] + SEQ_REG(28),
SEQ_REG(49));
@@ -1977,11 +1982,6 @@ static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(88),
c->mem_acc_crossover_vc);
- threshold_vc[0] = mem_acc_level_map[0];
- threshold_vc[1] = mem_acc_level_map[0] + 1;
- threshold_vc[2] = mem_acc_level_map[1];
- threshold_vc[3] = mem_acc_level_map[1] + 1;
-
/*
* Use dynamic MEM ACC threshold voltage based value for the
* highest MEM ACC threshold if it is specified instead of the
@@ -2011,11 +2011,10 @@ static void clk_osm_program_mem_acc_regs(struct clk_osm *c)
* Program L_VAL corresponding to the first virtual
* corner with MEM ACC level 3.
*/
- if (c->mem_acc_threshold_vc)
- for (i = 0; i < c->num_entries; i++)
- if (c->mem_acc_threshold_vc == table[i].virtual_corner)
- scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(32),
- L_VAL(table[i].freq_data));
+ for (i = 0; i < c->num_entries; i++)
+ if (threshold_vc[3] == table[i].virtual_corner)
+ scm_io_write(c->pbases[OSM_BASE] + SEQ_REG(32),
+ L_VAL(table[i].freq_data));
}
void clk_osm_setup_sequencer(struct clk_osm *c)
diff --git a/drivers/clk/qcom/clk-rcg2.c b/drivers/clk/qcom/clk-rcg2.c
index a07deb902577..9e5c0b6f7a0e 100644
--- a/drivers/clk/qcom/clk-rcg2.c
+++ b/drivers/clk/qcom/clk-rcg2.c
@@ -307,7 +307,18 @@ static int clk_rcg2_configure(struct clk_rcg2 *rcg, const struct freq_tbl *f)
{
u32 cfg, mask, old_cfg;
struct clk_hw *hw = &rcg->clkr.hw;
- int ret, index = qcom_find_src_index(hw, rcg->parent_map, f->src);
+ int ret, index;
+
+ /*
+ * In case the frequency table of cxo_f is used, the src in parent_map
+ * and the source in cxo_f.src could be different. Update the index to
+ * '0' since it's assumed that CXO is always fed to port 0 of RCGs HLOS
+ * controls.
+ */
+ if (f == &cxo_f)
+ index = 0;
+ else
+ index = qcom_find_src_index(hw, rcg->parent_map, f->src);
if (index < 0)
return index;
@@ -507,6 +518,15 @@ static int clk_rcg2_enable(struct clk_hw *hw)
if (!f)
return -EINVAL;
+ /*
+ * If CXO is not listed as a supported frequency in the frequency
+ * table, the above API would return the lowest supported frequency
+ * instead. This will lead to incorrect configuration of the RCG.
+ * Check if the RCG rate is CXO and configure it accordingly.
+ */
+ if (rate == cxo_f.freq)
+ f = &cxo_f;
+
clk_rcg_set_force_enable(hw);
clk_rcg2_configure(rcg, f);
clk_rcg_clear_force_enable(hw);
diff --git a/drivers/clk/qcom/mmcc-sdm660.c b/drivers/clk/qcom/mmcc-sdm660.c
index b908443006af..87e6f8c6be0a 100644
--- a/drivers/clk/qcom/mmcc-sdm660.c
+++ b/drivers/clk/qcom/mmcc-sdm660.c
@@ -529,7 +529,7 @@ static struct clk_rcg2 ahb_clk_src = {
.hid_width = 5,
.parent_map = mmcc_parent_map_10,
.freq_tbl = ftbl_ahb_clk_src,
- .flags = FORCE_ENABLE_RCGR,
+ .enable_safe_config = true,
.clkr.hw.init = &(struct clk_init_data){
.name = "ahb_clk_src",
.parent_names = mmcc_parent_names_10,
diff --git a/drivers/crypto/msm/qcedev.c b/drivers/crypto/msm/qcedev.c
index 5ce87a6edcc3..7459401979fe 100644
--- a/drivers/crypto/msm/qcedev.c
+++ b/drivers/crypto/msm/qcedev.c
@@ -1490,6 +1490,11 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
}
/* Check for sum of all dst length is equal to data_len */
for (i = 0, total = 0; i < req->entries; i++) {
+ if (!req->vbuf.dst[i].vaddr && req->vbuf.dst[i].len) {
+ pr_err("%s: NULL req dst vbuf[%d] with length %d\n",
+ __func__, i, req->vbuf.dst[i].len);
+ goto error;
+ }
if (req->vbuf.dst[i].len >= U32_MAX - total) {
pr_err("%s: Integer overflow on total req dst vbuf length\n",
__func__);
@@ -1504,6 +1509,11 @@ static int qcedev_check_cipher_params(struct qcedev_cipher_op_req *req,
}
/* Check for sum of all src length is equal to data_len */
for (i = 0, total = 0; i < req->entries; i++) {
+ if (!req->vbuf.src[i].vaddr && req->vbuf.src[i].len) {
+ pr_err("%s: NULL req src vbuf[%d] with length %d\n",
+ __func__, i, req->vbuf.src[i].len);
+ goto error;
+ }
if (req->vbuf.src[i].len > U32_MAX - total) {
pr_err("%s: Integer overflow on total req src vbuf length\n",
__func__);
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index afd94a1e85d3..5838545468f8 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -10,6 +10,8 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM
+ select BACKLIGHT_CLASS_DEVICE
+ select MSM_EXT_DISPLAY
default y
help
DRM/KMS driver for MSM/snapdragon.
@@ -88,3 +90,9 @@ config DRM_SDE_WB
help
Choose this option for writeback connector support.
+config DRM_SDE_HDMI
+ bool "Enable HDMI driver support in DRM SDE driver"
+ depends on DRM_MSM
+ default y
+ help
+ Choose this option if HDMI connector support is needed in SDE driver.
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 442c5d9711a9..79ea5a9f90ea 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,4 +1,6 @@
ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging
+ccflags-y += -Idrivers/gpu/drm/msm/hdmi
+ccflags-y += -Idrivers/gpu/drm/msm/hdmi-staging
ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android
ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
@@ -45,14 +47,19 @@ msm_drm-y := \
sde/sde_backlight.o \
sde/sde_color_processing.o \
sde/sde_vbif.o \
- sde_dbg_evtlog.o
+ sde_dbg_evtlog.o \
+ sde_io_util.o \
# use drm gpu driver only if qcom_kgsl driver not available
ifneq ($(CONFIG_QCOM_KGSL),y)
msm_drm-y += adreno/adreno_device.o \
adreno/adreno_gpu.o \
adreno/a3xx_gpu.o \
- adreno/a4xx_gpu.o
+ adreno/a4xx_gpu.o \
+ adreno/a5xx_gpu.o \
+ adreno/a5xx_power.o \
+ adreno/a5xx_preempt.o \
+ adreno/a5xx_snapshot.o
endif
msm_drm-$(CONFIG_DRM_MSM_MDP4) += mdp/mdp4/mdp4_crtc.o \
@@ -90,6 +97,12 @@ msm_drm-$(CONFIG_DRM_MSM_DSI_STAGING) += dsi-staging/dsi_phy.o \
dsi-staging/dsi_panel.o \
dsi-staging/dsi_display_test.o
+msm_drm-$(CONFIG_DRM_SDE_HDMI) += \
+ hdmi-staging/sde_hdmi.o \
+ hdmi-staging/sde_hdmi_bridge.o \
+ hdmi-staging/sde_hdmi_audio.o \
+ hdmi-staging/sde_hdmi_edid.o
+
msm_drm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
dsi/pll/dsi_pll_28nm.o
@@ -128,6 +141,7 @@ msm_drm-$(CONFIG_DRM_MSM) += \
msm_perf.o \
msm_rd.o \
msm_ringbuffer.o \
- msm_prop.o
+ msm_prop.o \
+ msm_snapshot.o
obj-$(CONFIG_DRM_MSM) += msm_drm.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index fee24297fb92..8d16b21ef8a5 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,16 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 27dabd5e57fb..d521b13bdf9f 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,14 +8,15 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -129,10 +130,14 @@ enum a3xx_tex_fmt {
TFMT_Z16_UNORM = 9,
TFMT_X8Z24_UNORM = 10,
TFMT_Z32_FLOAT = 11,
- TFMT_NV12_UV_TILED = 17,
- TFMT_NV12_Y_TILED = 19,
- TFMT_NV12_UV = 21,
- TFMT_NV12_Y = 23,
+ TFMT_UV_64X32 = 16,
+ TFMT_VU_64X32 = 17,
+ TFMT_Y_64X32 = 18,
+ TFMT_NV12_64X32 = 19,
+ TFMT_UV_LINEAR = 20,
+ TFMT_VU_LINEAR = 21,
+ TFMT_Y_LINEAR = 22,
+ TFMT_NV12_LINEAR = 23,
TFMT_I420_Y = 24,
TFMT_I420_U = 26,
TFMT_I420_V = 27,
@@ -525,14 +530,6 @@ enum a3xx_uche_perfcounter_select {
UCHE_UCHEPERF_ACTIVE_CYCLES = 20,
};
-enum a3xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_DST_MINUS_SRC = 2,
- BLEND_MIN_DST_SRC = 3,
- BLEND_MAX_DST_SRC = 4,
-};
-
enum a3xx_intp_mode {
SMOOTH = 0,
FLAT = 1,
@@ -1393,13 +1390,14 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mod
{
return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
}
+#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE 0x00000080
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
{
return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
}
-#define A3XX_RB_COPY_CONTROL_UNK12 0x00001000
+#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE 0x00001000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
@@ -1472,7 +1470,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
{
return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
}
-#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index f3a959eedbe0..c4f886fd6037 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -40,10 +40,11 @@
extern bool hang_debug;
static void a3xx_dump(struct msm_gpu *gpu);
+static bool a3xx_idle(struct msm_gpu *gpu);
static bool a3xx_me_init(struct msm_gpu *gpu)
{
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
@@ -64,8 +65,8 @@ static bool a3xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu);
- return gpu->funcs->idle(gpu);
+ gpu->funcs->flush(gpu, ring);
+ return a3xx_idle(gpu);
}
static int a3xx_hw_init(struct msm_gpu *gpu)
@@ -331,7 +332,7 @@ static void a3xx_destroy(struct msm_gpu *gpu)
static bool a3xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- if (!adreno_idle(gpu))
+ if (!adreno_idle(gpu, gpu->rb[0]))
return false;
/* then wait for GPU to finish: */
@@ -422,91 +423,13 @@ static void a3xx_dump(struct msm_gpu *gpu)
}
/* Register offset defines for A3XX */
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_A3XX_CP_PFP_UCODE_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_A3XX_CP_PFP_UCODE_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
- REG_A3XX_CP_PROTECT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_A3XX_RBBM_PERFCTR_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
- REG_A3XX_RBBM_INT_0_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_A3XX_RBBM_AHB_ERROR_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_A3XX_RBBM_INT_CLEAR_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
- REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_A3XX_VSC_SIZE_ADDRESS),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_A3XX_SP_VS_OBJ_START_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_A3XX_SP_FS_OBJ_START_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
- REG_A3XX_RBBM_PM_OVERRIDE2),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
- REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
- REG_A3XX_SQ_GPR_MANAGEMENT),
- REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
- REG_A3XX_SQ_INST_STORE_MANAGMENT),
- REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_A3XX_RBBM_SW_RESET_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
- REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
- REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static const struct adreno_gpu_funcs funcs = {
@@ -517,9 +440,10 @@ static const struct adreno_gpu_funcs funcs = {
.pm_resume = msm_gpu_pm_resume,
.recover = a3xx_recover,
.last_fence = adreno_last_fence,
+ .submitted_fence = adreno_submitted_fence,
.submit = adreno_submit,
.flush = adreno_flush,
- .idle = a3xx_idle,
+ .active_ring = adreno_active_ring,
.irq = a3xx_irq,
.destroy = a3xx_destroy,
#ifdef CONFIG_DEBUG_FS
@@ -567,7 +491,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a3xx_registers;
adreno_gpu->reg_offsets = a3xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 3220b91f559a..1004d4ecf8fe 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,14 +8,15 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -46,6 +47,9 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
enum a4xx_color_fmt {
RB4_A8_UNORM = 1,
RB4_R8_UNORM = 2,
+ RB4_R8_SNORM = 3,
+ RB4_R8_UINT = 4,
+ RB4_R8_SINT = 5,
RB4_R4G4B4A4_UNORM = 8,
RB4_R5G5B5A1_UNORM = 10,
RB4_R5G6B5_UNORM = 14,
@@ -89,17 +93,10 @@ enum a4xx_color_fmt {
enum a4xx_tile_mode {
TILE4_LINEAR = 0,
+ TILE4_2 = 2,
TILE4_3 = 3,
};
-enum a4xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_DST_MINUS_SRC = 2,
- BLEND_MIN_DST_SRC = 3,
- BLEND_MAX_DST_SRC = 4,
-};
-
enum a4xx_vtx_fmt {
VFMT4_32_FLOAT = 1,
VFMT4_32_32_FLOAT = 2,
@@ -940,6 +937,7 @@ static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
{
return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
}
+#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM 0x00010000
#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1
#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001
@@ -1043,7 +1041,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_b
}
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
}
@@ -1061,7 +1059,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb
}
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
}
@@ -1073,12 +1071,18 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r
}
#define REG_A4XX_RB_BLEND_RED 0x000020f0
-#define A4XX_RB_BLEND_RED_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_RED_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_RED_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK;
}
+#define A4XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK;
+}
#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
@@ -1095,12 +1099,18 @@ static inline uint32_t A4XX_RB_BLEND_RED_F32(float val)
}
#define REG_A4XX_RB_BLEND_GREEN 0x000020f2
-#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK;
}
+#define A4XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK;
+}
#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
@@ -1117,12 +1127,18 @@ static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val)
}
#define REG_A4XX_RB_BLEND_BLUE 0x000020f4
-#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK;
}
+#define A4XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK;
+}
#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
@@ -1139,12 +1155,18 @@ static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val)
}
#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6
-#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x0000ffff
+#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK;
}
+#define A4XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK;
+}
#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
@@ -1348,7 +1370,7 @@ static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
{
return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
}
-#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000
#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
@@ -2177,11 +2199,23 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
-#define REG_A4XX_CP_PROTECT_REG_0 0x00000240
-
static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A4XX_CP_PROTECT_REG_TRAP_READ 0x40000000
#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
@@ -2272,7 +2306,7 @@ static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
@@ -2420,7 +2454,7 @@ static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
@@ -3117,6 +3151,8 @@ static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE 0x00010000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
#define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003
@@ -3670,6 +3706,8 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001
+#define REG_A4XX_PC_TESSFACTOR_ADDR 0x00000d08
+
#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c
#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
@@ -3690,6 +3728,20 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
#define REG_A4XX_PC_BIN_BASE 0x000021c0
+#define REG_A4XX_PC_VSTREAM_CONTROL 0x000021c2
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A4XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
+#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT 22
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK;
+}
+
#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0
@@ -3752,12 +3804,8 @@ static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
{
return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK;
}
-#define A4XX_PC_HS_PARAM_PRIMTYPE__MASK 0x01800000
-#define A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT 23
-static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_HS_PARAM_PRIMTYPE__MASK;
-}
+#define A4XX_PC_HS_PARAM_CW 0x00800000
+#define A4XX_PC_HS_PARAM_CONNECTED 0x01000000
#define REG_A4XX_VBIF_VERSION 0x00003000
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index f430b291916d..534a7c3fbdca 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -31,6 +31,7 @@
extern bool hang_debug;
static void a4xx_dump(struct msm_gpu *gpu);
+static bool a4xx_idle(struct msm_gpu *gpu);
/*
* a4xx_enable_hwcg() - Program the clock control registers
@@ -115,7 +116,7 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
static bool a4xx_me_init(struct msm_gpu *gpu)
{
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
@@ -136,8 +137,8 @@ static bool a4xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu);
- return gpu->funcs->idle(gpu);
+ gpu->funcs->flush(gpu, ring);
+ return a4xx_idle(gpu);
}
static int a4xx_hw_init(struct msm_gpu *gpu)
@@ -329,7 +330,7 @@ static void a4xx_destroy(struct msm_gpu *gpu)
static bool a4xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- if (!adreno_idle(gpu))
+ if (!adreno_idle(gpu, gpu->rb[0]))
return false;
/* then wait for GPU to finish: */
@@ -455,87 +456,13 @@ static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
/* Register offset defines for A4XX, in order of enum adreno_regs */
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_A4XX_CP_PFP_UCODE_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_A4XX_CP_PFP_UCODE_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
- REG_A4XX_CP_PROTECT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_A4XX_RBBM_PERFCTR_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
- REG_A4XX_RBBM_INT_0_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_A4XX_RBBM_AHB_ERROR_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
- REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
- REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_A4XX_VPC_DEBUG_RAM_SEL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_A4XX_VPC_DEBUG_RAM_READ),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_A4XX_RBBM_INT_CLEAR_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_A4XX_VSC_SIZE_ADDRESS),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_A4XX_SP_VS_PVT_MEM_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_A4XX_SP_FS_PVT_MEM_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_A4XX_SP_VS_OBJ_START),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_A4XX_SP_FS_OBJ_START),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_A4XX_RBBM_SW_RESET_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
- REG_A4XX_UCHE_INVALIDATE0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
- REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static void a4xx_dump(struct msm_gpu *gpu)
@@ -582,16 +509,8 @@ static int a4xx_pm_suspend(struct msm_gpu *gpu) {
static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
{
- uint32_t hi, lo, tmp;
-
- tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
- do {
- hi = tmp;
- lo = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO);
- tmp = gpu_read(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_HI);
- } while (tmp != hi);
-
- *value = (((uint64_t)hi) << 32) | lo;
+ *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A4XX_RBBM_PERFCTR_CP_0_HI);
return 0;
}
@@ -604,9 +523,10 @@ static const struct adreno_gpu_funcs funcs = {
.pm_resume = a4xx_pm_resume,
.recover = a4xx_recover,
.last_fence = adreno_last_fence,
+ .submitted_fence = adreno_submitted_fence,
.submit = adreno_submit,
.flush = adreno_flush,
- .idle = a4xx_idle,
+ .active_ring = adreno_active_ring,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
#ifdef CONFIG_DEBUG_FS
@@ -648,7 +568,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a4xx_registers;
adreno_gpu->reg_offsets = a4xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
new file mode 100644
index 000000000000..56dad2217289
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -0,0 +1,3497 @@
+#ifndef A5XX_XML
+#define A5XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /local3/projects/drm/envytools/rnndb//adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- /local3/projects/drm/envytools/rnndb//adreno/a5xx.xml ( 81546 bytes, from 2016-10-31 16:38:41)
+- /local3/projects/drm/envytools/rnndb//adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a5xx_color_fmt {
+ RB5_R8_UNORM = 3,
+ RB5_R5G5B5A1_UNORM = 10,
+ RB5_R8G8B8A8_UNORM = 48,
+ RB5_R8G8B8_UNORM = 49,
+ RB5_R8G8B8A8_UINT = 51,
+ RB5_R10G10B10A2_UINT = 58,
+ RB5_R16G16B16A16_FLOAT = 98,
+};
+
+enum a5xx_tile_mode {
+ TILE5_LINEAR = 0,
+ TILE5_2 = 2,
+ TILE5_3 = 3,
+};
+
+enum a5xx_vtx_fmt {
+ VFMT5_8_UNORM = 3,
+ VFMT5_8_SNORM = 4,
+ VFMT5_8_UINT = 5,
+ VFMT5_8_SINT = 6,
+ VFMT5_8_8_UNORM = 15,
+ VFMT5_8_8_SNORM = 16,
+ VFMT5_8_8_UINT = 17,
+ VFMT5_8_8_SINT = 18,
+ VFMT5_16_UNORM = 21,
+ VFMT5_16_SNORM = 22,
+ VFMT5_16_FLOAT = 23,
+ VFMT5_16_UINT = 24,
+ VFMT5_16_SINT = 25,
+ VFMT5_8_8_8_UNORM = 33,
+ VFMT5_8_8_8_SNORM = 34,
+ VFMT5_8_8_8_UINT = 35,
+ VFMT5_8_8_8_SINT = 36,
+ VFMT5_8_8_8_8_UNORM = 48,
+ VFMT5_8_8_8_8_SNORM = 50,
+ VFMT5_8_8_8_8_UINT = 51,
+ VFMT5_8_8_8_8_SINT = 52,
+ VFMT5_16_16_UNORM = 67,
+ VFMT5_16_16_SNORM = 68,
+ VFMT5_16_16_FLOAT = 69,
+ VFMT5_16_16_UINT = 70,
+ VFMT5_16_16_SINT = 71,
+ VFMT5_32_UNORM = 72,
+ VFMT5_32_SNORM = 73,
+ VFMT5_32_FLOAT = 74,
+ VFMT5_32_UINT = 75,
+ VFMT5_32_SINT = 76,
+ VFMT5_32_FIXED = 77,
+ VFMT5_16_16_16_UNORM = 88,
+ VFMT5_16_16_16_SNORM = 89,
+ VFMT5_16_16_16_FLOAT = 90,
+ VFMT5_16_16_16_UINT = 91,
+ VFMT5_16_16_16_SINT = 92,
+ VFMT5_16_16_16_16_UNORM = 96,
+ VFMT5_16_16_16_16_SNORM = 97,
+ VFMT5_16_16_16_16_FLOAT = 98,
+ VFMT5_16_16_16_16_UINT = 99,
+ VFMT5_16_16_16_16_SINT = 100,
+ VFMT5_32_32_UNORM = 101,
+ VFMT5_32_32_SNORM = 102,
+ VFMT5_32_32_FLOAT = 103,
+ VFMT5_32_32_UINT = 104,
+ VFMT5_32_32_SINT = 105,
+ VFMT5_32_32_FIXED = 106,
+ VFMT5_32_32_32_UNORM = 112,
+ VFMT5_32_32_32_SNORM = 113,
+ VFMT5_32_32_32_UINT = 114,
+ VFMT5_32_32_32_SINT = 115,
+ VFMT5_32_32_32_FLOAT = 116,
+ VFMT5_32_32_32_FIXED = 117,
+ VFMT5_32_32_32_32_UNORM = 128,
+ VFMT5_32_32_32_32_SNORM = 129,
+ VFMT5_32_32_32_32_FLOAT = 130,
+ VFMT5_32_32_32_32_UINT = 131,
+ VFMT5_32_32_32_32_SINT = 132,
+ VFMT5_32_32_32_32_FIXED = 133,
+};
+
+enum a5xx_tex_fmt {
+ TFMT5_A8_UNORM = 2,
+ TFMT5_8_UNORM = 3,
+ TFMT5_4_4_4_4_UNORM = 8,
+ TFMT5_5_6_5_UNORM = 14,
+ TFMT5_L8_A8_UNORM = 19,
+ TFMT5_16_FLOAT = 23,
+ TFMT5_8_8_8_8_UNORM = 48,
+ TFMT5_10_10_10_2_UNORM = 54,
+ TFMT5_16_16_FLOAT = 69,
+ TFMT5_32_FLOAT = 74,
+ TFMT5_16_16_16_16_FLOAT = 98,
+ TFMT5_32_32_FLOAT = 103,
+ TFMT5_32_32_32_32_FLOAT = 130,
+ TFMT5_X8Z24_UNORM = 160,
+};
+
+enum a5xx_tex_fetchsize {
+ TFETCH5_1_BYTE = 0,
+ TFETCH5_2_BYTE = 1,
+ TFETCH5_4_BYTE = 2,
+ TFETCH5_8_BYTE = 3,
+ TFETCH5_16_BYTE = 4,
+};
+
+enum a5xx_depth_format {
+ DEPTH5_NONE = 0,
+ DEPTH5_16 = 1,
+ DEPTH5_24_8 = 2,
+ DEPTH5_32 = 4,
+};
+
+enum a5xx_debugbus {
+ A5XX_RBBM_DBGBUS_CP = 1,
+ A5XX_RBBM_DBGBUS_RBBM = 2,
+ A5XX_RBBM_DBGBUS_VBIF = 3,
+ A5XX_RBBM_DBGBUS_HLSQ = 4,
+ A5XX_RBBM_DBGBUS_UCHE = 5,
+ A5XX_RBBM_DBGBUS_DPM = 6,
+ A5XX_RBBM_DBGBUS_TESS = 7,
+ A5XX_RBBM_DBGBUS_PC = 8,
+ A5XX_RBBM_DBGBUS_VFDP = 9,
+ A5XX_RBBM_DBGBUS_VPC = 10,
+ A5XX_RBBM_DBGBUS_TSE = 11,
+ A5XX_RBBM_DBGBUS_RAS = 12,
+ A5XX_RBBM_DBGBUS_VSC = 13,
+ A5XX_RBBM_DBGBUS_COM = 14,
+ A5XX_RBBM_DBGBUS_DCOM = 15,
+ A5XX_RBBM_DBGBUS_LRZ = 16,
+ A5XX_RBBM_DBGBUS_A2D_DSP = 17,
+ A5XX_RBBM_DBGBUS_CCUFCHE = 18,
+ A5XX_RBBM_DBGBUS_GPMU = 19,
+ A5XX_RBBM_DBGBUS_RBP = 20,
+ A5XX_RBBM_DBGBUS_HM = 21,
+ A5XX_RBBM_DBGBUS_RBBM_CFG = 22,
+ A5XX_RBBM_DBGBUS_VBIF_CX = 23,
+ A5XX_RBBM_DBGBUS_GPC = 29,
+ A5XX_RBBM_DBGBUS_LARC = 30,
+ A5XX_RBBM_DBGBUS_HLSQ_SPTP = 31,
+ A5XX_RBBM_DBGBUS_RB_0 = 32,
+ A5XX_RBBM_DBGBUS_RB_1 = 33,
+ A5XX_RBBM_DBGBUS_RB_2 = 34,
+ A5XX_RBBM_DBGBUS_RB_3 = 35,
+ A5XX_RBBM_DBGBUS_CCU_0 = 40,
+ A5XX_RBBM_DBGBUS_CCU_1 = 41,
+ A5XX_RBBM_DBGBUS_CCU_2 = 42,
+ A5XX_RBBM_DBGBUS_CCU_3 = 43,
+ A5XX_RBBM_DBGBUS_A2D_RAS_0 = 48,
+ A5XX_RBBM_DBGBUS_A2D_RAS_1 = 49,
+ A5XX_RBBM_DBGBUS_A2D_RAS_2 = 50,
+ A5XX_RBBM_DBGBUS_A2D_RAS_3 = 51,
+ A5XX_RBBM_DBGBUS_VFD_0 = 56,
+ A5XX_RBBM_DBGBUS_VFD_1 = 57,
+ A5XX_RBBM_DBGBUS_VFD_2 = 58,
+ A5XX_RBBM_DBGBUS_VFD_3 = 59,
+ A5XX_RBBM_DBGBUS_SP_0 = 64,
+ A5XX_RBBM_DBGBUS_SP_1 = 65,
+ A5XX_RBBM_DBGBUS_SP_2 = 66,
+ A5XX_RBBM_DBGBUS_SP_3 = 67,
+ A5XX_RBBM_DBGBUS_TPL1_0 = 72,
+ A5XX_RBBM_DBGBUS_TPL1_1 = 73,
+ A5XX_RBBM_DBGBUS_TPL1_2 = 74,
+ A5XX_RBBM_DBGBUS_TPL1_3 = 75,
+};
+
+enum a5xx_shader_blocks {
+ A5XX_TP_W_MEMOBJ = 1,
+ A5XX_TP_W_SAMPLER = 2,
+ A5XX_TP_W_MIPMAP_BASE = 3,
+ A5XX_TP_W_MEMOBJ_TAG = 4,
+ A5XX_TP_W_SAMPLER_TAG = 5,
+ A5XX_TP_S_3D_MEMOBJ = 6,
+ A5XX_TP_S_3D_SAMPLER = 7,
+ A5XX_TP_S_3D_MEMOBJ_TAG = 8,
+ A5XX_TP_S_3D_SAMPLER_TAG = 9,
+ A5XX_TP_S_CS_MEMOBJ = 10,
+ A5XX_TP_S_CS_SAMPLER = 11,
+ A5XX_TP_S_CS_MEMOBJ_TAG = 12,
+ A5XX_TP_S_CS_SAMPLER_TAG = 13,
+ A5XX_SP_W_INSTR = 14,
+ A5XX_SP_W_CONST = 15,
+ A5XX_SP_W_UAV_SIZE = 16,
+ A5XX_SP_W_CB_SIZE = 17,
+ A5XX_SP_W_UAV_BASE = 18,
+ A5XX_SP_W_CB_BASE = 19,
+ A5XX_SP_W_INST_TAG = 20,
+ A5XX_SP_W_STATE = 21,
+ A5XX_SP_S_3D_INSTR = 22,
+ A5XX_SP_S_3D_CONST = 23,
+ A5XX_SP_S_3D_CB_BASE = 24,
+ A5XX_SP_S_3D_CB_SIZE = 25,
+ A5XX_SP_S_3D_UAV_BASE = 26,
+ A5XX_SP_S_3D_UAV_SIZE = 27,
+ A5XX_SP_S_CS_INSTR = 28,
+ A5XX_SP_S_CS_CONST = 29,
+ A5XX_SP_S_CS_CB_BASE = 30,
+ A5XX_SP_S_CS_CB_SIZE = 31,
+ A5XX_SP_S_CS_UAV_BASE = 32,
+ A5XX_SP_S_CS_UAV_SIZE = 33,
+ A5XX_SP_S_3D_INSTR_DIRTY = 34,
+ A5XX_SP_S_3D_CONST_DIRTY = 35,
+ A5XX_SP_S_3D_CB_BASE_DIRTY = 36,
+ A5XX_SP_S_3D_CB_SIZE_DIRTY = 37,
+ A5XX_SP_S_3D_UAV_BASE_DIRTY = 38,
+ A5XX_SP_S_3D_UAV_SIZE_DIRTY = 39,
+ A5XX_SP_S_CS_INSTR_DIRTY = 40,
+ A5XX_SP_S_CS_CONST_DIRTY = 41,
+ A5XX_SP_S_CS_CB_BASE_DIRTY = 42,
+ A5XX_SP_S_CS_CB_SIZE_DIRTY = 43,
+ A5XX_SP_S_CS_UAV_BASE_DIRTY = 44,
+ A5XX_SP_S_CS_UAV_SIZE_DIRTY = 45,
+ A5XX_HLSQ_ICB = 46,
+ A5XX_HLSQ_ICB_DIRTY = 47,
+ A5XX_HLSQ_ICB_CB_BASE_DIRTY = 48,
+ A5XX_SP_POWER_RESTORE_RAM = 64,
+ A5XX_SP_POWER_RESTORE_RAM_TAG = 65,
+ A5XX_TP_POWER_RESTORE_RAM = 66,
+ A5XX_TP_POWER_RESTORE_RAM_TAG = 67,
+};
+
+enum a5xx_tex_filter {
+ A5XX_TEX_NEAREST = 0,
+ A5XX_TEX_LINEAR = 1,
+ A5XX_TEX_ANISO = 2,
+};
+
+enum a5xx_tex_clamp {
+ A5XX_TEX_REPEAT = 0,
+ A5XX_TEX_CLAMP_TO_EDGE = 1,
+ A5XX_TEX_MIRROR_REPEAT = 2,
+ A5XX_TEX_CLAMP_TO_BORDER = 3,
+ A5XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a5xx_tex_aniso {
+ A5XX_TEX_ANISO_1 = 0,
+ A5XX_TEX_ANISO_2 = 1,
+ A5XX_TEX_ANISO_4 = 2,
+ A5XX_TEX_ANISO_8 = 3,
+ A5XX_TEX_ANISO_16 = 4,
+};
+
+enum a5xx_tex_swiz {
+ A5XX_TEX_X = 0,
+ A5XX_TEX_Y = 1,
+ A5XX_TEX_Z = 2,
+ A5XX_TEX_W = 3,
+ A5XX_TEX_ZERO = 4,
+ A5XX_TEX_ONE = 5,
+};
+
+enum a5xx_tex_type {
+ A5XX_TEX_1D = 0,
+ A5XX_TEX_2D = 1,
+ A5XX_TEX_CUBE = 2,
+ A5XX_TEX_3D = 3,
+};
+
+#define A5XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A5XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A5XX_INT0_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_INT0_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_INT0_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_INT0_RBBM_GPC_ERROR 0x00000080
+#define A5XX_INT0_CP_SW 0x00000100
+#define A5XX_INT0_CP_HW_ERROR 0x00000200
+#define A5XX_INT0_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_INT0_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_INT0_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_INT0_CP_IB2 0x00002000
+#define A5XX_INT0_CP_IB1 0x00004000
+#define A5XX_INT0_CP_RB 0x00008000
+#define A5XX_INT0_CP_UNUSED_1 0x00010000
+#define A5XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A5XX_INT0_CP_WT_DONE_TS 0x00040000
+#define A5XX_INT0_UNKNOWN_1 0x00080000
+#define A5XX_INT0_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_INT0_UNUSED_2 0x00200000
+#define A5XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_INT0_MISC_HANG_DETECT 0x00800000
+#define A5XX_INT0_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_INT0_UCHE_TRAP_INTR 0x02000000
+#define A5XX_INT0_DEBBUS_INTR_0 0x04000000
+#define A5XX_INT0_DEBBUS_INTR_1 0x08000000
+#define A5XX_INT0_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_INT0_GPMU_FIRMWARE 0x20000000
+#define A5XX_INT0_ISDB_CPU_IRQ 0x40000000
+#define A5XX_INT0_ISDB_UNDER_DEBUG 0x80000000
+#define A5XX_CP_INT_CP_OPCODE_ERROR 0x00000001
+#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR 0x00000002
+#define A5XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
+#define A5XX_CP_INT_CP_DMA_ERROR 0x00000008
+#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
+#define A5XX_CP_INT_CP_AHB_ERROR 0x00000020
+#define REG_A5XX_CP_RB_BASE 0x00000800
+
+#define REG_A5XX_CP_RB_BASE_HI 0x00000801
+
+#define REG_A5XX_CP_RB_CNTL 0x00000802
+
+#define REG_A5XX_CP_RB_RPTR_ADDR 0x00000804
+
+#define REG_A5XX_CP_RB_RPTR_ADDR_HI 0x00000805
+
+#define REG_A5XX_CP_RB_RPTR 0x00000806
+
+#define REG_A5XX_CP_RB_WPTR 0x00000807
+
+#define REG_A5XX_CP_PFP_STAT_ADDR 0x00000808
+
+#define REG_A5XX_CP_PFP_STAT_DATA 0x00000809
+
+#define REG_A5XX_CP_DRAW_STATE_ADDR 0x0000080b
+
+#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818
+
+#define REG_A5XX_CP_CRASH_DUMP_CNTL 0x00000819
+
+#define REG_A5XX_CP_ME_STAT_ADDR 0x0000081a
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_1 0x0000081f
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_2 0x00000820
+
+#define REG_A5XX_CP_ROQ_DBG_ADDR 0x00000821
+
+#define REG_A5XX_CP_ROQ_DBG_DATA 0x00000822
+
+#define REG_A5XX_CP_MEQ_DBG_ADDR 0x00000823
+
+#define REG_A5XX_CP_MEQ_DBG_DATA 0x00000824
+
+#define REG_A5XX_CP_MEQ_THRESHOLDS 0x00000825
+
+#define REG_A5XX_CP_MERCIU_SIZE 0x00000826
+
+#define REG_A5XX_CP_MERCIU_DBG_ADDR 0x00000827
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_1 0x00000828
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_2 0x00000829
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_ADDR 0x0000082a
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_DATA 0x0000082b
+
+#define REG_A5XX_CP_ME_UCODE_DBG_ADDR 0x0000082f
+
+#define REG_A5XX_CP_ME_UCODE_DBG_DATA 0x00000830
+
+#define REG_A5XX_CP_CNTL 0x00000831
+
+#define REG_A5XX_CP_PFP_ME_CNTL 0x00000832
+
+#define REG_A5XX_CP_CHICKEN_DBG 0x00000833
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_LO 0x00000835
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_HI 0x00000836
+
+#define REG_A5XX_CP_ME_INSTR_BASE_LO 0x00000838
+
+#define REG_A5XX_CP_ME_INSTR_BASE_HI 0x00000839
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_CNTL 0x0000083b
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO 0x0000083c
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI 0x0000083d
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO 0x0000083e
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI 0x0000083f
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x00000840
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x00000841
+
+#define REG_A5XX_CP_ADDR_MODE_CNTL 0x00000860
+
+#define REG_A5XX_CP_ME_STAT_DATA 0x00000b14
+
+#define REG_A5XX_CP_WFI_PEND_CTR 0x00000b15
+
+#define REG_A5XX_CP_INTERRUPT_STATUS 0x00000b18
+
+#define REG_A5XX_CP_HW_FAULT 0x00000b1a
+
+#define REG_A5XX_CP_PROTECT_STATUS 0x00000b1c
+
+#define REG_A5XX_CP_IB1_BASE 0x00000b1f
+
+#define REG_A5XX_CP_IB1_BASE_HI 0x00000b20
+
+#define REG_A5XX_CP_IB1_BUFSZ 0x00000b21
+
+#define REG_A5XX_CP_IB2_BASE 0x00000b22
+
+#define REG_A5XX_CP_IB2_BASE_HI 0x00000b23
+
+#define REG_A5XX_CP_IB2_BUFSZ 0x00000b24
+
+static inline uint32_t REG_A5XX_CP_SCRATCH(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A5XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A5XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A5XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A5XX_CP_PROTECT_REG_TRAP_READ 0x40000000
+
+#define REG_A5XX_CP_PROTECT_CNTL 0x000008a0
+
+#define REG_A5XX_CP_AHB_FAULT 0x00000b1b
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_0 0x00000bb0
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_1 0x00000bb1
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_2 0x00000bb2
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_3 0x00000bb3
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_4 0x00000bb4
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_5 0x00000bb5
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_6 0x00000bb6
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_7 0x00000bb7
+
+#define REG_A5XX_VSC_ADDR_MODE_CNTL 0x00000bc1
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_0 0x00000bba
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_1 0x00000bbb
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_2 0x00000bbc
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_3 0x00000bbd
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_A 0x00000004
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__MASK 0x000000ff
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT 0
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT) & A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__MASK;
+}
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK 0x0000ff00
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT) & A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK;
+}
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_B 0x00000005
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_C 0x00000006
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_D 0x00000007
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLT 0x00000008
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLM 0x00000009
+#define A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A5XX_RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPL 0x0000000a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPE 0x0000000b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_0 0x0000000c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_1 0x0000000d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_2 0x0000000e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_3 0x0000000f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_0 0x00000010
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_1 0x00000011
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_2 0x00000012
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_3 0x00000013
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_0 0x00000014
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_1 0x00000015
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_0 0x00000016
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_1 0x00000017
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_2 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_3 0x00000019
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_0 0x0000001a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_1 0x0000001b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_2 0x0000001c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_3 0x0000001d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_NIBBLEE 0x0000001e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC0 0x0000001f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC1 0x00000020
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADREG 0x00000021
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IDX 0x00000022
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CLRC 0x00000023
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADIVT 0x00000024
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000002f
+
+#define REG_A5XX_RBBM_INT_CLEAR_CMD 0x00000037
+
+#define REG_A5XX_RBBM_INT_0_MASK 0x00000038
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
+#define A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR 0x00000002
+#define A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
+#define A5XX_RBBM_INT_0_MASK_CP_SW 0x00000100
+#define A5XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
+#define A5XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
+#define A5XX_RBBM_INT_0_MASK_CP_RB 0x00008000
+#define A5XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
+#define A5XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
+#define A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT 0x00800000
+#define A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_FIRMWARE 0x20000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+
+#define REG_A5XX_RBBM_AHB_DBG_CNTL 0x0000003f
+
+#define REG_A5XX_RBBM_EXT_VBIF_DBG_CNTL 0x00000041
+
+#define REG_A5XX_RBBM_SW_RESET_CMD 0x00000043
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+
+#define REG_A5XX_RBBM_DBG_LO_HI_GPIO 0x00000048
+
+#define REG_A5XX_RBBM_EXT_TRACE_BUS_CNTL 0x00000049
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP0 0x0000004a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP1 0x0000004b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP2 0x0000004c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP3 0x0000004d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP0 0x0000004e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP1 0x0000004f
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP2 0x00000050
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP3 0x00000051
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP0 0x00000052
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP1 0x00000053
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP2 0x00000054
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP3 0x00000055
+
+#define REG_A5XX_RBBM_READ_AHB_THROUGH_DBG 0x00000059
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_UCHE 0x0000005a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_UCHE 0x0000005b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_UCHE 0x0000005c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL4_UCHE 0x0000005d
+
+#define REG_A5XX_RBBM_CLOCK_HYST_UCHE 0x0000005e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_UCHE 0x0000005f
+
+#define REG_A5XX_RBBM_CLOCK_MODE_GPC 0x00000060
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPC 0x00000061
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPC 0x00000062
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000063
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x00000064
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000065
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_HLSQ 0x00000066
+
+#define REG_A5XX_RBBM_CLOCK_CNTL 0x00000067
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP0 0x00000068
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP1 0x00000069
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP2 0x0000006a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP3 0x0000006b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP0 0x0000006c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP1 0x0000006d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP2 0x0000006e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP3 0x0000006f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP0 0x00000070
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP1 0x00000071
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP2 0x00000072
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP3 0x00000073
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP0 0x00000074
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP1 0x00000075
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP2 0x00000076
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP3 0x00000077
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB0 0x00000078
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB1 0x00000079
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB2 0x0000007a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB3 0x0000007b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB0 0x0000007c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB1 0x0000007d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB2 0x0000007e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB3 0x0000007f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RAC 0x00000080
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RAC 0x00000081
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU0 0x00000082
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU1 0x00000083
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU2 0x00000084
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU3 0x00000085
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000086
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000087
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000088
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000089
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RAC 0x0000008a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RAC 0x0000008b
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0 0x0000008c
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1 0x0000008d
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2 0x0000008e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3 0x0000008f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_VFD 0x00000090
+
+#define REG_A5XX_RBBM_CLOCK_MODE_VFD 0x00000091
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_VFD 0x00000092
+
+#define REG_A5XX_RBBM_AHB_CNTL0 0x00000093
+
+#define REG_A5XX_RBBM_AHB_CNTL1 0x00000094
+
+#define REG_A5XX_RBBM_AHB_CNTL2 0x00000095
+
+#define REG_A5XX_RBBM_AHB_CMD 0x00000096
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11 0x0000009c
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12 0x0000009d
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13 0x0000009e
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14 0x0000009f
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15 0x000000a0
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16 0x000000a1
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17 0x000000a2
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18 0x000000a3
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP0 0x000000a4
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP1 0x000000a5
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP2 0x000000a6
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP3 0x000000a7
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP0 0x000000a8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP1 0x000000a9
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP2 0x000000aa
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP3 0x000000ab
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP0 0x000000ac
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP1 0x000000ad
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP2 0x000000ae
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP3 0x000000af
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP0 0x000000b0
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP1 0x000000b1
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP2 0x000000b2
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP3 0x000000b3
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP0 0x000000b4
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP1 0x000000b5
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP2 0x000000b6
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP3 0x000000b7
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP0 0x000000b8
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP1 0x000000b9
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP2 0x000000ba
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP3 0x000000bb
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_GPMU 0x000000c8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPMU 0x000000c9
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPMU 0x000000ca
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_LO 0x000003a0
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_HI 0x000003a1
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_LO 0x000003a2
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_HI 0x000003a3
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_LO 0x000003a4
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_HI 0x000003a5
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_LO 0x000003a6
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_HI 0x000003a7
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_LO 0x000003a8
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_HI 0x000003a9
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_LO 0x000003aa
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_HI 0x000003ab
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_LO 0x000003ac
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_HI 0x000003ad
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_LO 0x000003ae
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_HI 0x000003af
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_LO 0x000003b0
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_HI 0x000003b1
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_LO 0x000003b2
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_HI 0x000003b3
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_LO 0x000003b4
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_HI 0x000003b5
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_LO 0x000003b6
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_HI 0x000003b7
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_LO 0x000003b8
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_HI 0x000003b9
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_LO 0x000003ba
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_HI 0x000003bb
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_LO 0x000003bc
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_HI 0x000003bd
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_LO 0x000003be
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_HI 0x000003bf
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_LO 0x000003c0
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_HI 0x000003c1
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_LO 0x000003c2
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_HI 0x000003c3
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_LO 0x000003c4
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_HI 0x000003c5
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_LO 0x000003c6
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_HI 0x000003c7
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_LO 0x000003c8
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_HI 0x000003c9
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_LO 0x000003ca
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_HI 0x000003cb
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_LO 0x000003cc
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_HI 0x000003cd
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_LO 0x000003ce
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_HI 0x000003cf
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_LO 0x000003d0
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_HI 0x000003d1
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_LO 0x000003d2
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_HI 0x000003d3
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_LO 0x000003d4
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_HI 0x000003d5
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_LO 0x000003d6
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_HI 0x000003d7
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO 0x000003d8
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI 0x000003d9
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO 0x000003da
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI 0x000003db
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO 0x000003dc
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI 0x000003dd
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO 0x000003de
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI 0x000003df
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO 0x000003e0
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI 0x000003e1
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO 0x000003e2
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI 0x000003e3
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO 0x000003e4
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI 0x000003e5
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO 0x000003e6
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI 0x000003e7
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_LO 0x000003e8
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_HI 0x000003e9
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_LO 0x000003ea
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_HI 0x000003eb
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_LO 0x000003ec
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_HI 0x000003ed
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_LO 0x000003ee
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_HI 0x000003ef
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_LO 0x000003f0
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_HI 0x000003f1
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_LO 0x000003f2
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_HI 0x000003f3
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_LO 0x000003f4
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_HI 0x000003f5
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_LO 0x000003f6
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_HI 0x000003f7
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_LO 0x000003f8
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_HI 0x000003f9
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_LO 0x000003fa
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_HI 0x000003fb
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_LO 0x000003fc
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_HI 0x000003fd
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_LO 0x000003fe
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_HI 0x000003ff
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_LO 0x00000400
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_HI 0x00000401
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_LO 0x00000402
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_HI 0x00000403
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_LO 0x00000404
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_HI 0x00000405
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_LO 0x00000406
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_HI 0x00000407
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_LO 0x00000408
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_HI 0x00000409
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_LO 0x0000040a
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_HI 0x0000040b
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_LO 0x0000040c
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_HI 0x0000040d
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_LO 0x0000040e
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_HI 0x0000040f
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_LO 0x00000410
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_HI 0x00000411
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_LO 0x00000412
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_HI 0x00000413
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_LO 0x00000414
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_HI 0x00000415
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_LO 0x00000416
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_HI 0x00000417
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_LO 0x00000418
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_HI 0x00000419
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_LO 0x0000041a
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_HI 0x0000041b
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_LO 0x0000041c
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_HI 0x0000041d
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_LO 0x0000041e
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_HI 0x0000041f
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_LO 0x00000420
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_HI 0x00000421
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_LO 0x00000422
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_HI 0x00000423
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_LO 0x00000424
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_HI 0x00000425
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_LO 0x00000426
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_HI 0x00000427
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_LO 0x00000428
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_HI 0x00000429
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_LO 0x0000042a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_HI 0x0000042b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_LO 0x0000042c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_HI 0x0000042d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_LO 0x0000042e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_HI 0x0000042f
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_LO 0x00000430
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_HI 0x00000431
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_LO 0x00000432
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_HI 0x00000433
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_LO 0x00000434
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_HI 0x00000435
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_LO 0x00000436
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_HI 0x00000437
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_LO 0x00000438
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_HI 0x00000439
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_LO 0x0000043a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_HI 0x0000043b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_LO 0x0000043c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_HI 0x0000043d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_LO 0x0000043e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_HI 0x0000043f
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_LO 0x00000440
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_HI 0x00000441
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_LO 0x00000442
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_HI 0x00000443
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_LO 0x00000444
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_HI 0x00000445
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_LO 0x00000446
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_HI 0x00000447
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_LO 0x00000448
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_HI 0x00000449
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_LO 0x0000044a
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_HI 0x0000044b
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_LO 0x0000044c
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_HI 0x0000044d
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_LO 0x0000044e
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_HI 0x0000044f
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_LO 0x00000450
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_HI 0x00000451
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_LO 0x00000452
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_HI 0x00000453
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_LO 0x00000454
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_HI 0x00000455
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_LO 0x00000456
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_HI 0x00000457
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_LO 0x00000458
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_HI 0x00000459
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_LO 0x0000045a
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_HI 0x0000045b
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_LO 0x0000045c
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_HI 0x0000045d
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_LO 0x0000045e
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_HI 0x0000045f
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_LO 0x00000460
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_HI 0x00000461
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_LO 0x00000462
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_HI 0x00000463
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_LO 0x000004d2
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI 0x000004d3
+
+#define REG_A5XX_RBBM_STATUS 0x000004f5
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x80000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x40000000
+#define A5XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A5XX_RBBM_STATUS_VSC_BUSY 0x10000000
+#define A5XX_RBBM_STATUS_TPL1_BUSY 0x08000000
+#define A5XX_RBBM_STATUS_SP_BUSY 0x04000000
+#define A5XX_RBBM_STATUS_UCHE_BUSY 0x02000000
+#define A5XX_RBBM_STATUS_VPC_BUSY 0x01000000
+#define A5XX_RBBM_STATUS_VFDP_BUSY 0x00800000
+#define A5XX_RBBM_STATUS_VFD_BUSY 0x00400000
+#define A5XX_RBBM_STATUS_TESS_BUSY 0x00200000
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY 0x00040000
+#define A5XX_RBBM_STATUS_DCOM_BUSY 0x00020000
+#define A5XX_RBBM_STATUS_COM_BUSY 0x00010000
+#define A5XX_RBBM_STATUS_LRZ_BUZY 0x00008000
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY 0x00004000
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY 0x00002000
+#define A5XX_RBBM_STATUS_RB_BUSY 0x00001000
+#define A5XX_RBBM_STATUS_RAS_BUSY 0x00000800
+#define A5XX_RBBM_STATUS_TSE_BUSY 0x00000400
+#define A5XX_RBBM_STATUS_VBIF_BUSY 0x00000200
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST 0x00000100
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST 0x00000080
+#define A5XX_RBBM_STATUS_CP_BUSY 0x00000040
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY 0x00000020
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY 0x00000010
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY 0x00000008
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A5XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001
+
+#define REG_A5XX_RBBM_STATUS3 0x00000530
+
+#define REG_A5XX_RBBM_INT_0_STATUS 0x000004e1
+
+#define REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS 0x000004f0
+
+#define REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS 0x000004f1
+
+#define REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS 0x000004f3
+
+#define REG_A5XX_RBBM_AHB_ERROR_STATUS 0x000004f4
+
+#define REG_A5XX_RBBM_PERFCTR_CNTL 0x00000464
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 0x00000465
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD1 0x00000466
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD2 0x00000467
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD3 0x00000468
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000469
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x0000046a
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000046f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC 0x00000504
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OVER 0x00000505
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT0 0x00000506
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT1 0x00000507
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT2 0x00000508
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT3 0x00000509
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT4 0x0000050a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT5 0x0000050b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR 0x0000050c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0 0x0000050d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1 0x0000050e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2 0x0000050f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3 0x00000510
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4 0x00000511
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR0 0x00000512
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR1 0x00000513
+
+#define REG_A5XX_RBBM_ISDB_CNT 0x00000533
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CONFIG 0x0000f000
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
+
+#define REG_A5XX_RBBM_SECVID_TSB_CNTL 0x0000f803
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_LO 0x0000f804
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_HI 0x0000f805
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_LO 0x0000f806
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_HI 0x0000f807
+
+#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
+
+#define REG_A5XX_VSC_PIPE_DATA_LENGTH_0 0x00000c00
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0 0x00000c60
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1 0x00000c61
+
+#define REG_A5XX_VSC_BIN_SIZE 0x00000cdd
+#define A5XX_VSC_BIN_SIZE_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_VSC_BIN_SIZE_X__MASK 0x00007fff
+#define A5XX_VSC_BIN_SIZE_X__SHIFT 0
+static inline uint32_t A5XX_VSC_BIN_SIZE_X(uint32_t val)
+{
+ return ((val) << A5XX_VSC_BIN_SIZE_X__SHIFT) & A5XX_VSC_BIN_SIZE_X__MASK;
+}
+#define A5XX_VSC_BIN_SIZE_Y__MASK 0x7fff0000
+#define A5XX_VSC_BIN_SIZE_Y__SHIFT 16
+static inline uint32_t A5XX_VSC_BIN_SIZE_Y(uint32_t val)
+{
+ return ((val) << A5XX_VSC_BIN_SIZE_Y__SHIFT) & A5XX_VSC_BIN_SIZE_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_ADDR_MODE_CNTL 0x00000c81
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c90
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c91
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c92
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c93
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c94
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c95
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c96
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c97
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 0x00000c98
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 0x00000c99
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 0x00000c9a
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 0x00000c9b
+
+#define REG_A5XX_RB_DBG_ECO_CNTL 0x00000cc4
+
+#define REG_A5XX_RB_ADDR_MODE_CNTL 0x00000cc5
+
+#define REG_A5XX_RB_MODE_CNTL 0x00000cc6
+
+#define REG_A5XX_RB_CCU_CNTL 0x00000cc7
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_0 0x00000cd0
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_1 0x00000cd1
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_2 0x00000cd2
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_3 0x00000cd3
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_4 0x00000cd4
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_5 0x00000cd5
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_6 0x00000cd6
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_7 0x00000cd7
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_0 0x00000cd8
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_1 0x00000cd9
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_2 0x00000cda
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_3 0x00000cdb
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_0 0x00000ce0
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_1 0x00000ce1
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_2 0x00000ce2
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_3 0x00000ce3
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_0 0x00000ce4
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_1 0x00000ce5
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_0 0x00000cec
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_1 0x00000ced
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_2 0x00000cee
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_3 0x00000cef
+
+#define REG_A5XX_PC_DBG_ECO_CNTL 0x00000d00
+#define A5XX_PC_DBG_ECO_CNTL_TWOPASSUSEWFI 0x00000100
+
+#define REG_A5XX_PC_ADDR_MODE_CNTL 0x00000d01
+
+#define REG_A5XX_PC_MODE_CNTL 0x00000d02
+
+#define REG_A5XX_UNKNOWN_0D08 0x00000d08
+
+#define REG_A5XX_UNKNOWN_0D09 0x00000d09
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_1 0x00000d11
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_2 0x00000d12
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_3 0x00000d13
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_4 0x00000d14
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_5 0x00000d15
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_6 0x00000d16
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_7 0x00000d17
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0 0x00000e00
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01
+
+#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05
+
+#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e10
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e11
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e12
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e13
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e14
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e15
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e16
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e17
+
+#define REG_A5XX_HLSQ_SPTP_RDSEL 0x00000f08
+
+#define REG_A5XX_HLSQ_DBG_READ_SEL 0x0000bc00
+#define A5XX_HLSQ_DBG_READ_SEL_STATETYPE__MASK 0x0000ff00
+#define A5XX_HLSQ_DBG_READ_SEL_STATETYPE__SHIFT 8
+static inline uint32_t A5XX_HLSQ_DBG_READ_SEL_STATETYPE(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DBG_READ_SEL_STATETYPE__SHIFT) & A5XX_HLSQ_DBG_READ_SEL_STATETYPE__MASK;
+}
+
+#define REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000a000
+
+#define REG_A5XX_VFD_ADDR_MODE_CNTL 0x00000e41
+
+#define REG_A5XX_VFD_MODE_CNTL 0x00000e42
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_0 0x00000e50
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_1 0x00000e51
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_2 0x00000e52
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_3 0x00000e53
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_4 0x00000e54
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_5 0x00000e55
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_6 0x00000e56
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7 0x00000e57
+
+#define REG_A5XX_VPC_DBG_ECO_CNTL 0x00000e60
+
+#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61
+
+#define REG_A5XX_VPC_MODE_CNTL 0x00000e62
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0 0x00000e64
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_1 0x00000e65
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_2 0x00000e66
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_3 0x00000e67
+
+#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80
+
+#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_HI 0x00000e88
+
+#define REG_A5XX_UCHE_TRAP_BASE_LO 0x00000e89
+
+#define REG_A5XX_UCHE_TRAP_BASE_HI 0x00000e8a
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e8b
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e8c
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e8d
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e8e
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL_2 0x00000e8f
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL 0x00000e90
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO 0x00000e91
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_HI 0x00000e92
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_LO 0x00000e93
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_HI 0x00000e94
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE 0x00000e95
+
+#define REG_A5XX_UCHE_CACHE_WAYS 0x00000e96
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000ea0
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000ea1
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000ea2
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000ea3
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000ea4
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000ea5
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000ea6
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000ea7
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 0x00000ea8
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 0x00000ea9
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 0x00000eaa
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 0x00000eab
+
+#define REG_A5XX_UCHE_TRAP_LOG_LO 0x00000eb1
+
+#define REG_A5XX_UCHE_TRAP_LOG_HI 0x00000eb2
+
+#define REG_A5XX_SP_DBG_ECO_CNTL 0x00000ec0
+
+#define REG_A5XX_SP_ADDR_MODE_CNTL 0x00000ec1
+
+#define REG_A5XX_SP_MODE_CNTL 0x00000ec2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_0 0x00000ed0
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_1 0x00000ed1
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_2 0x00000ed2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_3 0x00000ed3
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_4 0x00000ed4
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_5 0x00000ed5
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_6 0x00000ed6
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_7 0x00000ed7
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_8 0x00000ed8
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_9 0x00000ed9
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_10 0x00000eda
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_11 0x00000edb
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_0 0x00000edc
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_1 0x00000edd
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_2 0x00000ede
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_3 0x00000edf
+
+#define REG_A5XX_TPL1_ADDR_MODE_CNTL 0x00000f01
+
+#define REG_A5XX_TPL1_MODE_CNTL 0x00000f02
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_0 0x00000f10
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_1 0x00000f11
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_2 0x00000f12
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_3 0x00000f13
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_4 0x00000f14
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_5 0x00000f15
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_6 0x00000f16
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_7 0x00000f17
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_0 0x00000f18
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_1 0x00000f19
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_2 0x00000f1a
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_3 0x00000f1b
+
+#define REG_A5XX_VBIF_VERSION 0x00003000
+
+#define REG_A5XX_VBIF_CLKON 0x00003001
+#define A5XX_VBIF_CLKON_FORCE_ON 0x00000001
+#define A5XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000002
+
+#define REG_A5XX_VBIF_ABIT_SORT 0x00003028
+
+#define REG_A5XX_VBIF_ABIT_SORT_CONF 0x00003029
+
+#define REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A5XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL0 0x00003080
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL1 0x00003081
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
+#define A5XX_VBIF_TEST_BUS_OUT_CTRL_TEST_BUS_CTRL_EN 0x00000001
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL0 0x00003085
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL1 0x00003086
+#define A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__MASK 0x0000000f
+#define A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__SHIFT 0
+static inline uint32_t A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL(uint32_t val)
+{
+ return ((val) << A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__SHIFT) & A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__MASK;
+}
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL0 0x00003087
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL1 0x00003088
+#define A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__MASK 0x0000001f
+#define A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__SHIFT 0
+static inline uint32_t A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(uint32_t val)
+{
+ return ((val) << A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__SHIFT) & A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__MASK;
+}
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A5XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+
+#define REG_A5XX_GPMU_INST_RAM_BASE 0x00008800
+
+#define REG_A5XX_GPMU_DATA_RAM_BASE 0x00009800
+
+#define REG_A5XX_GPMU_SP_POWER_CNTL 0x0000a881
+
+#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL 0x0000a886
+
+#define REG_A5XX_GPMU_RBCCU_POWER_CNTL 0x0000a887
+
+#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS 0x0000a88b
+#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS 0x0000a88d
+#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY 0x0000a891
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL 0x0000a892
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST 0x0000a893
+
+#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1
+
+#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6
+
+#define REG_A5XX_GPMU_CM3_SYSRESET 0x0000a8d8
+
+#define REG_A5XX_GPMU_GENERAL_0 0x0000a8e0
+
+#define REG_A5XX_GPMU_GENERAL_1 0x0000a8e1
+
+#define REG_A5XX_SP_POWER_COUNTER_0_LO 0x0000a840
+
+#define REG_A5XX_SP_POWER_COUNTER_0_HI 0x0000a841
+
+#define REG_A5XX_SP_POWER_COUNTER_1_LO 0x0000a842
+
+#define REG_A5XX_SP_POWER_COUNTER_1_HI 0x0000a843
+
+#define REG_A5XX_SP_POWER_COUNTER_2_LO 0x0000a844
+
+#define REG_A5XX_SP_POWER_COUNTER_2_HI 0x0000a845
+
+#define REG_A5XX_SP_POWER_COUNTER_3_LO 0x0000a846
+
+#define REG_A5XX_SP_POWER_COUNTER_3_HI 0x0000a847
+
+#define REG_A5XX_TP_POWER_COUNTER_0_LO 0x0000a848
+
+#define REG_A5XX_TP_POWER_COUNTER_0_HI 0x0000a849
+
+#define REG_A5XX_TP_POWER_COUNTER_1_LO 0x0000a84a
+
+#define REG_A5XX_TP_POWER_COUNTER_1_HI 0x0000a84b
+
+#define REG_A5XX_TP_POWER_COUNTER_2_LO 0x0000a84c
+
+#define REG_A5XX_TP_POWER_COUNTER_2_HI 0x0000a84d
+
+#define REG_A5XX_TP_POWER_COUNTER_3_LO 0x0000a84e
+
+#define REG_A5XX_TP_POWER_COUNTER_3_HI 0x0000a84f
+
+#define REG_A5XX_RB_POWER_COUNTER_0_LO 0x0000a850
+
+#define REG_A5XX_RB_POWER_COUNTER_0_HI 0x0000a851
+
+#define REG_A5XX_RB_POWER_COUNTER_1_LO 0x0000a852
+
+#define REG_A5XX_RB_POWER_COUNTER_1_HI 0x0000a853
+
+#define REG_A5XX_RB_POWER_COUNTER_2_LO 0x0000a854
+
+#define REG_A5XX_RB_POWER_COUNTER_2_HI 0x0000a855
+
+#define REG_A5XX_RB_POWER_COUNTER_3_LO 0x0000a856
+
+#define REG_A5XX_RB_POWER_COUNTER_3_HI 0x0000a857
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_LO 0x0000a858
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_HI 0x0000a859
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_LO 0x0000a85a
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_HI 0x0000a85b
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_LO 0x0000a85c
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_HI 0x0000a85d
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_LO 0x0000a85e
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_HI 0x0000a85f
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_LO 0x0000a860
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_HI 0x0000a861
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_LO 0x0000a862
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_HI 0x0000a863
+
+#define REG_A5XX_CP_POWER_COUNTER_0_LO 0x0000a864
+
+#define REG_A5XX_CP_POWER_COUNTER_0_HI 0x0000a865
+
+#define REG_A5XX_CP_POWER_COUNTER_1_LO 0x0000a866
+
+#define REG_A5XX_CP_POWER_COUNTER_1_HI 0x0000a867
+
+#define REG_A5XX_CP_POWER_COUNTER_2_LO 0x0000a868
+
+#define REG_A5XX_CP_POWER_COUNTER_2_HI 0x0000a869
+
+#define REG_A5XX_CP_POWER_COUNTER_3_LO 0x0000a86a
+
+#define REG_A5XX_CP_POWER_COUNTER_3_HI 0x0000a86b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_LO 0x0000a86c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_HI 0x0000a86d
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_LO 0x0000a86e
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_HI 0x0000a86f
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_LO 0x0000a870
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_HI 0x0000a871
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_LO 0x0000a872
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_HI 0x0000a873
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_LO 0x0000a874
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_HI 0x0000a875
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_LO 0x0000a876
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_HI 0x0000a877
+
+#define REG_A5XX_GPMU_POWER_COUNTER_ENABLE 0x0000a878
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO 0x0000a879
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI 0x0000a87a
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET 0x0000a87b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_0 0x0000a87c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1 0x0000a87d
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL 0x0000a8a8
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_ID 0x0000ac00
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG 0x0000ac01
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__MASK 0x0000000f
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__SHIFT 0
+static inline uint32_t A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS(uint32_t val)
+{
+ return ((val) << A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__SHIFT) & A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__MASK;
+}
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_BCL_ENABLED 0x00000002
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_LLM_ENABLED 0x00000200
+
+#define REG_A5XX_GPMU_TEMP_VAL 0x0000ac02
+
+#define REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD 0x0000ac03
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_STATUS 0x0000ac05
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK 0x0000ac06
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_0_1 0x0000ac40
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_2_3 0x0000ac41
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_0_1 0x0000ac42
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_2_3 0x0000ac43
+
+#define REG_A5XX_GPMU_BASE_LEAKAGE 0x0000ac46
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE 0x0000ac60
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_STATUS 0x0000ac61
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK 0x0000ac62
+
+#define REG_A5XX_GPMU_GPMU_PWR_THRESHOLD 0x0000ac80
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL 0x0000acc4
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_IDLE_FULL_LM 0x00000001
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__MASK 0x00000030
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__SHIFT 4
+static inline uint32_t A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD(uint32_t val)
+{
+ return ((val) << A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__SHIFT) & A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__MASK;
+}
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS 0x0000acc5
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS_IDLE_FULL_ACK 0x00000001
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS_WAKEUP_ACK 0x00000002
+
+#define REG_A5XX_GDPM_CONFIG1 0x0000b80c
+
+#define REG_A5XX_GDPM_CONFIG2 0x0000b80d
+
+#define REG_A5XX_GDPM_INT_EN 0x0000b80f
+
+#define REG_A5XX_GDPM_INT_MASK 0x0000b811
+
+#define REG_A5XX_GPMU_BEC_ENABLE 0x0000b9a0
+
+#define REG_A5XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000c41a
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000c41d
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000c41f
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x0000c421
+
+#define REG_A5XX_GPU_CS_ENABLE_REG 0x0000c520
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557
+
+#define REG_A5XX_GRAS_CL_CNTL 0x0000e000
+
+#define REG_A5XX_UNKNOWN_E001 0x0000e001
+
+#define REG_A5XX_UNKNOWN_E004 0x0000e004
+
+#define REG_A5XX_GRAS_CLEAR_CNTL 0x0000e005
+#define A5XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR 0x00000001
+
+#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XOFFSET_0 0x0000e010
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XSCALE_0 0x0000e011
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YOFFSET_0 0x0000e012
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YSCALE_0 0x0000e013
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000e014
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZSCALE_0 0x0000e015
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CNTL 0x0000e090
+#define A5XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
+#define A5XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+
+#define REG_A5XX_GRAS_SU_POINT_MINMAX 0x0000e091
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POINT_SIZE 0x0000e092
+#define A5XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E093 0x0000e093
+
+#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094
+#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_ALPHA_TEST_ENABLE 0x00000001
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000e095
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000e096
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x0000e097
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO 0x0000e098
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x0000e099
+
+#define REG_A5XX_GRAS_SC_CNTL 0x0000e0a0
+
+#define REG_A5XX_GRAS_SC_BIN_CNTL 0x0000e0a1
+
+#define REG_A5XX_GRAS_SC_RAS_MSAA_CNTL 0x0000e0a2
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_DEST_MSAA_CNTL 0x0000e0a3
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL 0x0000e0a4
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x0000e0aa
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x0000e0ab
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x0000e0ca
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x0000e0cb
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000e0ea
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000e0eb
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_LRZ_CNTL 0x0000e100
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO 0x0000e101
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI 0x0000e102
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH 0x0000e103
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x0000e105
+
+#define REG_A5XX_RB_CNTL 0x0000e140
+#define A5XX_RB_CNTL_WIDTH__MASK 0x000000ff
+#define A5XX_RB_CNTL_WIDTH__SHIFT 0
+static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK;
+}
+#define A5XX_RB_CNTL_HEIGHT__MASK 0x0001fe00
+#define A5XX_RB_CNTL_HEIGHT__SHIFT 9
+static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK;
+}
+#define A5XX_RB_CNTL_BYPASS 0x00020000
+
+#define REG_A5XX_RB_RENDER_CNTL 0x0000e141
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS__MASK 0x00ff0000
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS__SHIFT 16
+static inline uint32_t A5XX_RB_RENDER_CNTL_ENABLED_MRTS(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_ENABLED_MRTS__SHIFT) & A5XX_RB_RENDER_CNTL_ENABLED_MRTS__MASK;
+}
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__MASK 0xff000000
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__SHIFT 24
+static inline uint32_t A5XX_RB_RENDER_CNTL_ENABLED_MRTS2(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__SHIFT) & A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__MASK;
+}
+
+#define REG_A5XX_RB_RAS_MSAA_CNTL 0x0000e142
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_RB_DEST_MSAA_CNTL 0x0000e143
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144
+#define A5XX_RB_RENDER_CONTROL0_VARYING 0x00000001
+#define A5XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
+#define A5XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
+#define A5XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
+#define A5XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
+
+#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145
+#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+
+#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_RB_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z 0x00000020
+
+static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001
+#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
+static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x0000e151 + 0x7*i0; }
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x0000e152 + 0x7*i0; }
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000007f
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
+
+static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 + 0x7*i0; }
+#define A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__MASK 0x0007ffff
+#define A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__SHIFT) & A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; }
+#define A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__MASK 0x01ffffff
+#define A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x0000e156 + 0x7*i0; }
+
+#define REG_A5XX_RB_BLEND_RED 0x0000e1a0
+#define A5XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_UINT__SHIFT) & A5XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_SINT__SHIFT) & A5XX_RB_BLEND_RED_SINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_RED_F32 0x0000e1a1
+#define A5XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_RED_F32__SHIFT) & A5XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN 0x0000e1a2
+#define A5XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_UINT__SHIFT) & A5XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_SINT__SHIFT) & A5XX_RB_BLEND_GREEN_SINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN_F32 0x0000e1a3
+#define A5XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_GREEN_F32__SHIFT) & A5XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE 0x0000e1a4
+#define A5XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_UINT__SHIFT) & A5XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_SINT__SHIFT) & A5XX_RB_BLEND_BLUE_SINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE_F32 0x0000e1a5
+#define A5XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_BLUE_F32__SHIFT) & A5XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA 0x0000e1a6
+#define A5XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_UINT__SHIFT) & A5XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_SINT__SHIFT) & A5XX_RB_BLEND_ALPHA_SINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA_F32 0x0000e1a7
+#define A5XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_ALPHA_F32__SHIFT) & A5XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A5XX_RB_ALPHA_CONTROL 0x0000e1a8
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_CNTL 0x0000e1a9
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_PLANE_CNTL 0x0000e1b0
+#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+
+#define REG_A5XX_RB_DEPTH_CNTL 0x0000e1b1
+#define A5XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
+#define A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
+static inline uint32_t A5XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A5XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000040
+
+#define REG_A5XX_RB_DEPTH_BUFFER_INFO 0x0000e1b2
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_LO 0x0000e1b3
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_HI 0x0000e1b4
+
+#define REG_A5XX_RB_DEPTH_BUFFER_PITCH 0x0000e1b5
+#define A5XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A5XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_INFO 0x0000e1c2
+#define A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+#define A5XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff000
+#define A5XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 12
+static inline uint32_t A5XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
+{
+ return ((val >> 12) << A5XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A5XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E1C3 0x0000e1c3
+
+#define REG_A5XX_RB_STENCILREFMASK 0x0000e1c6
+#define A5XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0
+#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A5XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_X__SHIFT) & A5XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A5XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A5XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_1 0x0000e211
+#define A5XX_RB_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_1_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_1_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_2 0x0000e212
+#define A5XX_RB_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_2_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_2_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x0000e240
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x0000e241
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x0000e242
+
+#define REG_A5XX_VPC_CNTL_0 0x0000e280
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT) & A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK;
+}
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+#define REG_A5XX_VPC_GS_SIV_CNTL 0x0000e298
+
+#define REG_A5XX_VPC_PACK 0x0000e29d
+#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK 0x000000ff
+#define A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT 0
+static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK;
+}
+
+#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL 0x0000e2a0
+
+#define REG_A5XX_VPC_SO_OVERRIDE 0x0000e2a2
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_LO_0 0x0000e2a7
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_HI_0 0x0000e2a8
+
+#define REG_A5XX_VPC_SO_BUFFER_SIZE_0 0x0000e2a9
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_LO_0 0x0000e2ac
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_HI_0 0x0000e2ad
+
+#define REG_A5XX_PC_PRIMITIVE_CNTL 0x0000e384
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
+}
+
+#define REG_A5XX_PC_RASTER_CNTL 0x0000e388
+
+#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c
+
+#define REG_A5XX_PC_GS_PARAM 0x0000e38e
+
+#define REG_A5XX_PC_HS_PARAM 0x0000e38f
+
+#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0
+
+#define REG_A5XX_VFD_CONTROL_0 0x0000e400
+#define A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x0000003f
+#define A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_1 0x0000e401
+#define A5XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
+#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_2 0x0000e402
+
+#define REG_A5XX_VFD_CONTROL_3 0x0000e403
+
+#define REG_A5XX_VFD_CONTROL_4 0x0000e404
+
+#define REG_A5XX_VFD_CONTROL_5 0x0000e405
+
+#define REG_A5XX_VFD_INDEX_OFFSET 0x0000e408
+
+#define REG_A5XX_VFD_INSTANCE_START_OFFSET 0x0000e409
+
+static inline uint32_t REG_A5XX_VFD_FETCH(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000e40b + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+#define A5XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
+#define A5XX_VFD_DECODE_INSTR_IDX__SHIFT 0
+static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x3ff00000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
+static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+
+static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A5XX_VFD_POWER_CNTL 0x0000e4f0
+
+#define REG_A5XX_SP_SP_CNTL 0x0000e580
+
+#define REG_A5XX_SP_VS_CONTROL_REG 0x0000e584
+#define A5XX_SP_VS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_FS_CONTROL_REG 0x0000e585
+#define A5XX_SP_FS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_HS_CONTROL_REG 0x0000e586
+#define A5XX_SP_HS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_DS_CONTROL_REG 0x0000e587
+#define A5XX_SP_DS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_GS_CONTROL_REG 0x0000e588
+#define A5XX_SP_GS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_CS_CONFIG 0x0000e589
+
+#define REG_A5XX_SP_VS_CONFIG_MAX_CONST 0x0000e58a
+
+#define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b
+
+#define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00100000
+
+static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+#define A5XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A5XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A5XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A5XX_SP_VS_OBJ_START_LO 0x0000e5ac
+
+#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad
+
+#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00100000
+
+#define REG_A5XX_SP_FS_OBJ_START_LO 0x0000e5c3
+
+#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4
+
+#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
+
+#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK 0x00001fe0
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT 5
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK 0x001fe000
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT 13
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
+static inline uint32_t REG_A5XX_SP_FS_MRT(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x0000007f
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+
+#define REG_A5XX_SP_CS_CNTL_0 0x0000e5f0
+
+#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704
+
+#define REG_A5XX_TPL1_TP_DEST_MSAA_CNTL 0x0000e705
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_LO 0x0000e722
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_HI 0x0000e723
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_LO 0x0000e72a
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_HI 0x0000e72b
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_LO 0x0000e75a
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_HI 0x0000e75b
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_LO 0x0000e75e
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_HI 0x0000e75f
+
+#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL 0x0000e764
+
+#define REG_A5XX_HLSQ_CONTROL_0_REG 0x0000e784
+
+#define REG_A5XX_HLSQ_CONTROL_1_REG 0x0000e785
+
+#define REG_A5XX_HLSQ_CONTROL_2_REG 0x0000e786
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787
+#define A5XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_4_REG 0x0000e788
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_UPDATE_CNTL 0x0000e78a
+
+#define REG_A5XX_HLSQ_VS_CONTROL_REG 0x0000e78b
+#define A5XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CONTROL_REG 0x0000e78c
+#define A5XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CONTROL_REG 0x0000e78d
+#define A5XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CONTROL_REG 0x0000e78e
+#define A5XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CONTROL_REG 0x0000e78f
+#define A5XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CONFIG 0x0000e790
+
+#define REG_A5XX_HLSQ_VS_CNTL 0x0000e791
+
+#define REG_A5XX_HLSQ_FS_CNTL 0x0000e792
+
+#define REG_A5XX_HLSQ_CS_CNTL 0x0000e796
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_X 0x0000e7b9
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000e7ba
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000e7bb
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_0 0x0000e7b0
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6
+
+#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7
+
+#define REG_A5XX_HLSQ_CS_CNTL_1 0x0000e7b8
+
+#define REG_A5XX_HLSQ_VS_CONSTLEN 0x0000e7c3
+
+#define REG_A5XX_HLSQ_VS_INSTRLEN 0x0000e7c4
+
+#define REG_A5XX_HLSQ_FS_CONSTLEN 0x0000e7d7
+
+#define REG_A5XX_HLSQ_FS_INSTRLEN 0x0000e7d8
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_3 0x0000e7dc
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_4 0x0000e7dd
+
+#define REG_A5XX_TEX_SAMP_0 0x00000000
+#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A5XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A5XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MAG(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MAG__SHIFT) & A5XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A5XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A5XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MIN(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MIN__SHIFT) & A5XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A5XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_S(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_S__SHIFT) & A5XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A5XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_T(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_T__SHIFT) & A5XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A5XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_R(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_R__SHIFT) & A5XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A5XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A5XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A5XX_TEX_SAMP_0_ANISO(enum a5xx_tex_aniso val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_ANISO__SHIFT) & A5XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A5XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A5XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A5XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_1 0x00000001
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A5XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A5XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A5XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A5XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A5XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A5XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A5XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A5XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A5XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_2 0x00000002
+
+#define REG_A5XX_TEX_SAMP_3 0x00000003
+
+#define REG_A5XX_TEX_CONST_0 0x00000000
+#define A5XX_TEX_CONST_0_TILED 0x00000001
+#define A5XX_TEX_CONST_0_SRGB 0x00000004
+#define A5XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A5XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_X(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_X__SHIFT) & A5XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A5XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Y(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A5XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Z(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A5XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000
+#define A5XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
+{
+ return ((val) << A5XX_TEX_CONST_0_FMT__SHIFT) & A5XX_TEX_CONST_0_FMT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_1 0x00000001
+#define A5XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
+#define A5XX_TEX_CONST_1_WIDTH__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_WIDTH__SHIFT) & A5XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A5XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
+#define A5XX_TEX_CONST_1_HEIGHT__SHIFT 15
+static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_HEIGHT__SHIFT) & A5XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_2 0x00000002
+#define A5XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f
+#define A5XX_TEX_CONST_2_FETCHSIZE__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_2_FETCHSIZE(enum a5xx_tex_fetchsize val)
+{
+ return ((val) << A5XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A5XX_TEX_CONST_2_FETCHSIZE__MASK;
+}
+#define A5XX_TEX_CONST_2_PITCH__MASK 0x1fffff00
+#define A5XX_TEX_CONST_2_PITCH__SHIFT 8
+static inline uint32_t A5XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_2_PITCH__SHIFT) & A5XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_2_TYPE__MASK 0x60000000
+#define A5XX_TEX_CONST_2_TYPE__SHIFT 29
+static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val)
+{
+ return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_3 0x00000003
+#define A5XX_TEX_CONST_3_LAYERSZ__MASK 0x00003fff
+#define A5XX_TEX_CONST_3_LAYERSZ__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_3_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_LAYERSZ__SHIFT) & A5XX_TEX_CONST_3_LAYERSZ__MASK;
+}
+#define A5XX_TEX_CONST_3_LAYERSZ2__MASK 0xff800000
+#define A5XX_TEX_CONST_3_LAYERSZ2__SHIFT 23
+static inline uint32_t A5XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A5XX_TEX_CONST_3_LAYERSZ2__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_4 0x00000004
+#define A5XX_TEX_CONST_4_BASE__MASK 0xffffffe0
+#define A5XX_TEX_CONST_4_BASE__SHIFT 5
+static inline uint32_t A5XX_TEX_CONST_4_BASE(uint32_t val)
+{
+ return ((val >> 5) << A5XX_TEX_CONST_4_BASE__SHIFT) & A5XX_TEX_CONST_4_BASE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_5 0x00000005
+#define A5XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
+#define A5XX_TEX_CONST_5_DEPTH__SHIFT 17
+static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_5_DEPTH__SHIFT) & A5XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_6 0x00000006
+
+#define REG_A5XX_TEX_CONST_7 0x00000007
+
+#define REG_A5XX_TEX_CONST_8 0x00000008
+
+#define REG_A5XX_TEX_CONST_9 0x00000009
+
+#define REG_A5XX_TEX_CONST_10 0x0000000a
+
+#define REG_A5XX_TEX_CONST_11 0x0000000b
+
+
+#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
new file mode 100644
index 000000000000..f5847bc60c49
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -0,0 +1,1345 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "msm_iommu.h"
+#include "a5xx_gpu.h"
+
+static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ uint32_t wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Make sure everything is posted before making a decision */
+ mb();
+
+ /* Update HW if this is the current ring and we are not in preempt */
+ if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ struct msm_gem_address_space *aspace)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_mmu *mmu = aspace->mmu;
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ if (!iommu->ttbr0)
+ return;
+
+ /* Turn off protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Turn on APIV mode to access critical regions */
+ OUT_PKT4(ring, REG_A5XX_CP_CNTL, 1);
+ OUT_RING(ring, 1);
+
+ /* Make sure the ME is syncronized before staring the update */
+ OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
+
+ /* Execute the table update */
+ OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 3);
+ OUT_RING(ring, lower_32_bits(iommu->ttbr0));
+ OUT_RING(ring, upper_32_bits(iommu->ttbr0));
+ OUT_RING(ring, iommu->contextidr);
+
+ /*
+ * Write the new TTBR0 to the preemption records - this will be used to
+ * reload the pagetable if the current ring gets preempted out.
+ */
+ OUT_PKT7(ring, CP_MEM_WRITE, 4);
+ OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, ttbr0)));
+ OUT_RING(ring, lower_32_bits(iommu->ttbr0));
+ OUT_RING(ring, upper_32_bits(iommu->ttbr0));
+
+ /* Also write the current contextidr (ASID) */
+ OUT_PKT7(ring, CP_MEM_WRITE, 3);
+ OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id,
+ contextidr)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id,
+ contextidr)));
+ OUT_RING(ring, iommu->contextidr);
+
+ /* Invalidate the draw state so we start off fresh */
+ OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
+ OUT_RING(ring, 0x40000);
+ OUT_RING(ring, 1);
+ OUT_RING(ring, 0);
+
+ /* Turn off APRIV */
+ OUT_PKT4(ring, REG_A5XX_CP_CNTL, 1);
+ OUT_RING(ring, 0);
+
+ /* Turn off protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+}
+
+static int a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[submit->ring];
+ unsigned int i, ibs = 0;
+
+ a5xx_set_pagetable(gpu, ring, submit->aspace);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ /* Enable local preemption for finegrain preemption */
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Submit the commands */
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+ }
+
+ /*
+ * Write the render mode to NULL (0) to indicate to the CP that the IBs
+ * are done rendering - otherwise a lucky preemption would start
+ * replaying from the last checkpoint
+ */
+ OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+
+ /* Turn off IB level preemptions */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Write the fence to the scratch register */
+ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->fence);
+
+ /*
+ * Execute a CACHE_FLUSH_TS event. This will ensure that the
+ * timestamp is written to the memory and then triggers the interrupt
+ */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+
+ OUT_RING(ring, lower_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(adreno_gpu, ring->id, fence)));
+ OUT_RING(ring, submit->fence);
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ /*
+ * If dword[2:1] are non zero, they specify an address for the CP to
+ * write the value of dword[3] to on preemption complete. Write 0 to
+ * skip the write
+ */
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ /* Data value - not used if the address above is 0 */
+ OUT_RING(ring, 0x01);
+ /* Set bit 0 to trigger an interrupt on preempt complete */
+ OUT_RING(ring, 0x01);
+
+ a5xx_flush(gpu, ring);
+
+ /* Check to see if we need to start preemption */
+ a5xx_preempt_trigger(gpu);
+
+ return 0;
+}
+
+static const struct {
+ u32 offset;
+ u32 value;
+} a5xx_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
+};
+
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
+ gpu_write(gpu, a5xx_hwcg[i].offset,
+ state ? a5xx_hwcg[i].value : 0);
+
+ /* There are a few additional registers just for A540 */
+ if (adreno_is_a540(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU,
+ state ? 0x770 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU,
+ state ? 0x004 : 0);
+ }
+
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
+}
+
+static int a5xx_me_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_ME_INIT, 8);
+
+ OUT_RING(ring, 0x0000002F);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Don't enable header dump */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ /* Specify workarounds for various microcode issues */
+ if (adreno_is_a530(adreno_gpu)) {
+ /* Workaround for token end syncs
+ * Force a WFI after every direct-render 3D mode draw and every
+ * 2D mode 3 draw
+ */
+ OUT_RING(ring, 0x0000000B);
+ } else {
+ /* No workarounds enabled */
+ OUT_RING(ring, 0x00000000);
+ }
+
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu, ring);
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static int a5xx_preempt_start(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (gpu->nr_rings == 1)
+ return 0;
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x00);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+ OUT_RING(ring, 0x01);
+
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x01);
+ OUT_RING(ring, 0x01);
+
+ gpu->funcs->flush(gpu, ring);
+
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+
+static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova)
+{
+ struct drm_device *drm = gpu->dev;
+ struct drm_gem_object *bo;
+ void *ptr;
+
+ mutex_lock(&drm->struct_mutex);
+ bo = msm_gem_new(drm, fw->size - 4,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(bo))
+ return bo;
+
+ ptr = msm_gem_vaddr(bo);
+ if (!ptr) {
+ drm_gem_object_unreference_unlocked(bo);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ if (iova) {
+ int ret = msm_gem_get_iova(bo, gpu->aspace, iova);
+
+ if (ret) {
+ drm_gem_object_unreference_unlocked(bo);
+ return ERR_PTR(ret);
+ }
+ }
+
+ memcpy(ptr, &fw->data[4], fw->size - 4);
+ return bo;
+}
+
+static int a5xx_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int ret;
+
+ if (!a5xx_gpu->pm4_bo) {
+ a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
+ &a5xx_gpu->pm4_iova);
+
+ if (IS_ERR(a5xx_gpu->pm4_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pm4_bo);
+ a5xx_gpu->pm4_bo = NULL;
+ dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (!a5xx_gpu->pfp_bo) {
+ a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
+ &a5xx_gpu->pfp_iova);
+
+ if (IS_ERR(a5xx_gpu->pfp_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pfp_bo);
+ a5xx_gpu->pfp_bo = NULL;
+ dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
+ REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
+
+ gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
+ REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
+
+ return 0;
+}
+
+#ifdef CONFIG_MSM_SUBSYSTEM_RESTART
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/scm.h>
+
+static void a5xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ const char *name;
+ void *ptr;
+
+ /* If no zap shader was defined, we'll assume that none is needed */
+ if (of_property_read_string(GPU_OF_NODE(gpu), "qcom,zap-shader", &name))
+ return;
+
+ /*
+ * If the zap shader has already been loaded then just ask the SCM to
+ * re-initialize the registers (not needed if CPZ retention is a thing)
+ */
+ if (test_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags)) {
+ int ret;
+ struct scm_desc desc = { 0 };
+
+ if (of_property_read_bool(GPU_OF_NODE(gpu),
+ "qcom,cpz-retention"))
+ return;
+
+ desc.args[0] = 0;
+ desc.args[1] = 13;
+ desc.arginfo = SCM_ARGS(2);
+
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, 0x0A), &desc);
+ if (ret)
+ DRM_ERROR(
+ "%s: zap-shader resume failed with error %d\n",
+ gpu->name, ret);
+
+ return;
+ }
+
+ ptr = subsystem_get(name);
+
+ if (IS_ERR_OR_NULL(ptr)) {
+ DRM_ERROR("%s: Unable to load the zap shader: %ld\n", gpu->name,
+ IS_ERR(ptr) ? PTR_ERR(ptr) : -ENODEV);
+ } else {
+ set_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags);
+ }
+}
+#else
+static void a5xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ if (of_find_property(GPU_OF_NODE(gpu), "qcom,zap-shader", NULL))
+ return;
+
+ DRM_INFO_ONCE("%s: Zap shader is defined but loader isn't available\n",
+ gpu->name);
+}
+#endif
+
+#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
+ A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
+ A5XX_RBBM_INT_0_MASK_CP_SW | \
+ A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+
+static int a5xx_hw_init(struct msm_gpu *gpu)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int ret, bit = 0;
+
+ pm_qos_update_request(&gpu->pm_qos_req_dma, 101);
+
+ gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ if (adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
+ /* Enable RBBM error reporting bits */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
+
+ if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
+ /*
+ * Mask out the activity signals from RB1-3 to avoid false
+ * positives
+ */
+
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
+ 0xF0000000);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
+ 0xFFFFFFFF);
+ }
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
+ (1 << 30) | 0xFFFF);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
+
+ /* Increase VFD cache access so LRZ and other data gets evicted less */
+ gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
+
+ /* Disable L2 bypass in the UCHE */
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
+
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO,
+ REG_A5XX_UCHE_GMEM_RANGE_MIN_LO, 0x00100000);
+
+ gpu_write64(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
+ REG_A5XX_UCHE_GMEM_RANGE_MAX_HI,
+ 0x00100000 + adreno_gpu->gmem - 1);
+
+ gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
+
+ if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+ gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
+
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
+
+ /* Enable USE_RETENTION_FLOPS */
+ gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
+
+ /* Enable ME/PFP split notification */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
+
+ /* Enable HWCG */
+ a5xx_set_hwcg(gpu, true);
+
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
+
+ /* Set the highest bank bit if specified in the device tree */
+ if (!of_property_read_u32(pdev->dev.of_node, "qcom,highest-bank-bit",
+ &bit)) {
+ if (bit >= 13 && bit <= 16) {
+ bit -= 13;
+
+ gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, bit << 7);
+ gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, bit << 1);
+
+ if (adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2,
+ bit);
+ }
+ }
+
+ /* Try to load and initialize the zap shader if applicable */
+ a5xx_zap_shader_init(gpu);
+
+ /* Protect registers from the CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
+
+ /* RBBM */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
+
+ /* Content protect */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
+
+ /* CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
+
+ /* RB */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
+
+ /* VPC */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
+
+ /* UCHE */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
+
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
+ ADRENO_PROTECT_RW(0x10000, 0x8000));
+
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* Put the GPU into 64 bit by default */
+ gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+
+ /* Load the GPMU firmware before starting the HW init */
+ a5xx_gpmu_ucode_init(gpu);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ a5xx_preempt_hw_init(gpu);
+
+ ret = a5xx_ucode_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Disable the interrupts through the initial bringup stage */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
+
+ /* Clear ME_HALT to start the micro engine */
+ gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
+ ret = a5xx_me_init(gpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Send a pipeline event stat to get misbehaving counters to start
+ * ticking correctly
+ */
+ if (adreno_is_a530(adreno_gpu)) {
+ OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
+ OUT_RING(gpu->rb[0], 0x0F);
+
+ gpu->funcs->flush(gpu, gpu->rb[0]);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ }
+
+ /*
+ * If a zap shader was specified in the device tree, assume that we are
+ * on a secure device that blocks access to the RBBM_SECVID registers
+ * so we need to use the CP to switch out of secure mode. If a zap
+ * shader was NOT specified then we assume we are on an unlocked device.
+ * If we guessed wrong then the access to the register will probably
+ * cause a XPU violation.
+ */
+ if (test_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags)) {
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_SET_SECURE_MODE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu, gpu->rb[0]);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ } else {
+ /* Print a warning so if we die, we know why */
+ dev_warn_once(gpu->dev->dev,
+ "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ }
+
+ /* Next, start the power */
+ ret = a5xx_power_init(gpu);
+ if (ret)
+ return ret;
+
+
+ /* Last step - yield the ringbuffer */
+ a5xx_preempt_start(gpu);
+
+ pm_qos_update_request(&gpu->pm_qos_req_dma, 501);
+
+ return 0;
+}
+
+static void a5xx_recover(struct msm_gpu *gpu)
+{
+ adreno_dump_info(gpu);
+
+ msm_gpu_snapshot(gpu, gpu->snapshot);
+
+ /* Reset the GPU so it can work again */
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
+
+ adreno_recover(gpu);
+}
+
+static void a5xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ a5xx_preempt_fini(gpu);
+
+ if (a5xx_gpu->pm4_bo) {
+ if (a5xx_gpu->pm4_iova)
+ msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
+ }
+
+ if (a5xx_gpu->pfp_bo) {
+ if (a5xx_gpu->pfp_iova)
+ msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
+ }
+
+ if (a5xx_gpu->gpmu_bo) {
+ if (a5xx_gpu->gpmu_iova)
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+ }
+
+ adreno_gpu_cleanup(adreno_gpu);
+ kfree(a5xx_gpu);
+}
+
+static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
+{
+ if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
+ return false;
+
+ /*
+ * Nearly every abnormality ends up pausing the GPU and triggering a
+ * fault so we can safely just watch for this one interrupt to fire
+ */
+ return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
+}
+
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ if (ring != a5xx_gpu->cur_ring) {
+ WARN(1, "Tried to idle a non-current ringbuffer\n");
+ return false;
+ }
+
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu, ring))
+ return false;
+
+ if (spin_until(_a5xx_check_idle(gpu))) {
+ DRM_ERROR(
+ "%s: timeout waiting for GPU RB %d to idle: status %8.8X rptr/wptr: %4.4X/%4.4X irq %8.8X\n",
+ gpu->name, ring->id,
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+ gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
+
+ return false;
+ }
+
+ return true;
+}
+
+static void a5xx_cp_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
+
+ if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
+ u32 val;
+
+ gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
+
+ /*
+ * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
+ * read it twice
+ */
+
+ gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+ val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+
+ dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
+ val);
+ }
+
+ if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
+ gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
+
+ if (status & A5XX_CP_INT_CP_DMA_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
+
+ if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+ val & (1 << 24) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, val);
+ }
+
+ if (status & A5XX_CP_INT_CP_AHB_ERROR) {
+ u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
+ const char *access[16] = { "reserved", "reserved",
+ "timestamp lo", "timestamp hi", "pfp read", "pfp write",
+ "", "", "me read", "me write", "", "", "crashdump read",
+ "crashdump write" };
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
+ status & 0xFFFFF, access[(status >> 24) & 0xF],
+ (status & (1 << 31)), status);
+ }
+}
+
+static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
+{
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
+ val & (1 << 28) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
+ (val >> 24) & 0xF);
+
+ /* Clear the error */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
+
+ /* Clear the interrupt */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+ }
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
+}
+
+static void a5xx_uche_err_irq(struct msm_gpu *gpu)
+{
+ uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
+
+ addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
+
+ dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
+ addr);
+}
+
+static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
+{
+ dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
+}
+
+static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+
+ dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ ring ? ring->id : -1, adreno_submitted_fence(gpu, ring),
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+ gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
+ gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
+ gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
+ gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
+#define RBBM_ERROR_MASK \
+ (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+
+static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
+
+ /*
+ * Clear all the interrupts except for RBBM_AHB_ERROR
+ * which needs to be cleared after the error condition
+ * is cleared otherwise it will storm
+ */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+
+ if (status & RBBM_ERROR_MASK)
+ a5xx_rbbm_err_irq(gpu, status);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a5xx_cp_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
+ a5xx_fault_detect_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ a5xx_uche_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+ a5xx_gpmu_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ msm_gpu_retire(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
+ a5xx_preempt_irq(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+ REG_A5XX_CP_RB_RPTR_ADDR_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
+};
+
+static const u32 a5xx_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002b,
+ 0x002e, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
+ 0x0097, 0x00bb, 0x03a0, 0x0464, 0x0469, 0x046f, 0x04d2, 0x04d3,
+ 0x04e0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081a, 0x081f, 0x0841,
+ 0x0860, 0x0860, 0x0880, 0x08a0, 0x0b00, 0x0b12, 0x0b14, 0x0b28,
+ 0x0b78, 0x0b7f, 0x0bb0, 0x0bbd, 0x0bc0, 0x0bc6, 0x0bd0, 0x0c53,
+ 0x0c60, 0x0c61, 0x0c80, 0x0c82, 0x0c84, 0x0c85, 0x0c90, 0x0c9b,
+ 0x0ca0, 0x0ca0, 0x0cb0, 0x0cb2, 0x0cc1, 0x0cc1, 0x0cc4, 0x0cc7,
+ 0x0ccc, 0x0ccc, 0x0cd0, 0x0cdb, 0x0ce0, 0x0ce5, 0x0ce8, 0x0ce8,
+ 0x0cec, 0x0cf1, 0x0cfb, 0x0d0e, 0x0d10, 0x0d17, 0x0d20, 0x0d23,
+ 0x0d30, 0x0d30, 0x0e40, 0x0e43, 0x0e4a, 0x0e4a, 0x0e50, 0x0e57,
+ 0x0e60, 0x0e7c, 0x0e80, 0x0e8e, 0x0e90, 0x0e96, 0x0ea0, 0x0eab,
+ 0x0eb0, 0x0eb2, 0x2100, 0x211e, 0x2140, 0x2145, 0x2180, 0x2185,
+ 0x2500, 0x251e, 0x2540, 0x2545, 0x2580, 0x2585, 0x3000, 0x3014,
+ 0x3018, 0x302c, 0x3030, 0x3030, 0x3034, 0x3036, 0x303c, 0x303d,
+ 0x3040, 0x3040, 0x3042, 0x3042, 0x3049, 0x3049, 0x3058, 0x3058,
+ 0x305a, 0x3061, 0x3064, 0x3068, 0x306c, 0x306d, 0x3080, 0x3088,
+ 0x308b, 0x308c, 0x3090, 0x3094, 0x3098, 0x3098, 0x309c, 0x309c,
+ 0x3124, 0x3124, 0x340c, 0x340c, 0x3410, 0x3410, 0x3800, 0x3801,
+ 0xa800, 0xa800, 0xa820, 0xa828, 0xa840, 0xa87d, 0xa880, 0xa88d,
+ 0xa890, 0xa8a3, 0xa8a8, 0xa8aa, 0xa8c0, 0xa8c3, 0xa8c6, 0xa8ca,
+ 0xa8cc, 0xa8cf, 0xa8d1, 0xa8d8, 0xa8dc, 0xa8dc, 0xa8e0, 0xa8f5,
+ 0xac00, 0xac06, 0xac40, 0xac47, 0xac60, 0xac62, 0xac80, 0xac82,
+ 0xb800, 0xb808, 0xb80c, 0xb812, 0xb814, 0xb817, 0xb900, 0xb904,
+ 0xb906, 0xb90a, 0xb90c, 0xb90f, 0xb920, 0xb924, 0xb926, 0xb92a,
+ 0xb92c, 0xb92f, 0xb940, 0xb944, 0xb946, 0xb94a, 0xb94c, 0xb94f,
+ 0xb960, 0xb964, 0xb966, 0xb96a, 0xb96c, 0xb96f, 0xb980, 0xb984,
+ 0xb986, 0xb98a, 0xb98c, 0xb98f, 0xb9a0, 0xb9b0, 0xb9b8, 0xb9ba,
+ 0xd200, 0xd23f, 0xe000, 0xe006, 0xe010, 0xe09a, 0xe0a0, 0xe0a4,
+ 0xe0aa, 0xe0eb, 0xe100, 0xe105, 0xe140, 0xe147, 0xe150, 0xe187,
+ 0xe1a0, 0xe1a9, 0xe1b0, 0xe1b6, 0xe1c0, 0xe1c7, 0xe1d0, 0xe1d1,
+ 0xe200, 0xe201, 0xe210, 0xe21c, 0xe240, 0xe268, 0xe280, 0xe280,
+ 0xe282, 0xe2a3, 0xe2a5, 0xe2c2, 0xe380, 0xe38f, 0xe3b0, 0xe3b0,
+ 0xe400, 0xe405, 0xe408, 0xe4e9, 0xe4f0, 0xe4f0, 0xe800, 0xe806,
+ 0xe810, 0xe89a, 0xe8a0, 0xe8a4, 0xe8aa, 0xe8eb, 0xe900, 0xe905,
+ 0xe940, 0xe947, 0xe950, 0xe987, 0xe9a0, 0xe9a9, 0xe9b0, 0xe9b6,
+ 0xe9c0, 0xe9c7, 0xe9d0, 0xe9d1, 0xea00, 0xea01, 0xea10, 0xea1c,
+ 0xea40, 0xea68, 0xea80, 0xea80, 0xea82, 0xeaa3, 0xeaa5, 0xeac2,
+ 0xeb80, 0xeb8f, 0xebb0, 0xebb0, 0xec00, 0xec05, 0xec08, 0xece9,
+ 0xecf0, 0xecf0, 0xf400, 0xf400, 0xf800, 0xf807,
+ ~0
+};
+
+static int a5xx_pm_resume(struct msm_gpu *gpu)
+{
+ int ret;
+
+ /* Turn on the core power */
+ ret = msm_gpu_pm_resume(gpu);
+ if (ret)
+ return ret;
+
+ /* Turn the RBCCU domain first to limit the chances of voltage droop */
+ gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
+
+ /* Wait 3 usecs before polling */
+ udelay(3);
+
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret) {
+ DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
+ gpu->name,
+ gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
+ return ret;
+ }
+
+ /* Turn on the SP domain */
+ gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret)
+ DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
+ gpu->name);
+
+ return ret;
+}
+
+static int a5xx_pm_suspend(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ /* Clear the VBIF pipe before shutting down */
+
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
+ spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF) == 0xF);
+
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ /*
+ * Reset the VBIF before power collapse to avoid issue with FIFO
+ * entries
+ */
+
+ if (adreno_is_a530(adreno_gpu)) {
+ /* These only need to be done for A530 */
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x003C0000);
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD, 0x00000000);
+ }
+
+ return msm_gpu_pm_suspend(gpu);
+}
+
+static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A5XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A5XX_RBBM_PERFCTR_CP_0_HI);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+ gpu->funcs->pm_resume(gpu);
+
+ seq_printf(m, "status: %08x\n",
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ gpu->funcs->pm_suspend(gpu);
+
+ adreno_show(gpu, m);
+}
+#endif
+
+static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ return a5xx_gpu->cur_ring;
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a5xx_hw_init,
+ .pm_suspend = a5xx_pm_suspend,
+ .pm_resume = a5xx_pm_resume,
+ .recover = a5xx_recover,
+ .last_fence = adreno_last_fence,
+ .submitted_fence = adreno_submitted_fence,
+ .submit = a5xx_submit,
+ .flush = a5xx_flush,
+ .active_ring = a5xx_active_ring,
+ .irq = a5xx_irq,
+ .destroy = a5xx_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .show = a5xx_show,
+#endif
+ .snapshot = a5xx_snapshot,
+ },
+ .get_timestamp = a5xx_get_timestamp,
+};
+
+/* Read the limits management leakage from the efuses */
+static void a530_efuse_leakage(struct platform_device *pdev,
+ struct adreno_gpu *adreno_gpu, void *base,
+ size_t size)
+{
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned int row0, row2;
+ unsigned int leakage_pwr_on, coeff;
+
+ if (size < 0x148)
+ return;
+
+ /* Leakage */
+ row0 = readl_relaxed(base + 0x134);
+ row2 = readl_relaxed(base + 0x144);
+
+ /* Read barrier to get the previous two reads */
+ rmb();
+
+ /* Get the leakage coefficient from device tree */
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,base-leakage-coefficent", &coeff))
+ return;
+
+ leakage_pwr_on = ((row2 >> 2) & 0xFF) * (1 << (row0 >> 1) & 0x03);
+ a5xx_gpu->lm_leakage = (leakage_pwr_on << 16) |
+ ((leakage_pwr_on * coeff) / 100);
+}
+
+/* Read the speed bin from the efuses */
+static void a530_efuse_bin(struct platform_device *pdev,
+ struct adreno_gpu *adreno_gpu, void *base,
+ size_t size)
+{
+ uint32_t speed_bin[3];
+ uint32_t val;
+
+ if (of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,gpu-speed-bin", speed_bin, 3))
+ return;
+
+ if (size < speed_bin[0] + 4)
+ return;
+
+ val = readl_relaxed(base + speed_bin[0]);
+
+ adreno_gpu->speed_bin = (val & speed_bin[1]) >> speed_bin[2];
+}
+
+/* Read target specific configuration from the efuses */
+static void a5xx_efuses_read(struct platform_device *pdev,
+ struct adreno_gpu *adreno_gpu)
+{
+ struct adreno_platform_config *config = pdev->dev.platform_data;
+ const struct adreno_info *info = adreno_info(config->rev);
+ struct resource *res;
+ void *base;
+
+ /*
+ * The adreno_gpu->revn mechanism isn't set up yet so we need to check
+ * it directly here
+ */
+ if (info->revn != 530)
+ return;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "qfprom_memory");
+ if (!res)
+ return;
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base)
+ return;
+
+ a530_efuse_bin(pdev, adreno_gpu, base, resource_size(res));
+ a530_efuse_leakage(pdev, adreno_gpu, base, resource_size(res));
+
+ iounmap(base);
+}
+
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct a5xx_gpu *a5xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "No A5XX device is defined\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
+ if (!a5xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a5xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ a5xx_gpu->pdev = pdev;
+ adreno_gpu->registers = a5xx_registers;
+ adreno_gpu->reg_offsets = a5xx_register_offsets;
+
+ a5xx_gpu->lm_leakage = 0x4E001A;
+
+ /* Check the efuses for some configuration */
+ a5xx_efuses_read(pdev, adreno_gpu);
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 4);
+ if (ret) {
+ a5xx_destroy(&(a5xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ /* Set up the preemption specific bits and pieces for each ringbuffer */
+ a5xx_preempt_init(gpu);
+
+ return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
new file mode 100644
index 000000000000..3de14fe42a1b
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -0,0 +1,187 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A5XX_GPU_H__
+#define __A5XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* Bringing over the hack from the previous targets */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a5xx.xml.h"
+
+enum {
+ A5XX_ZAP_SHADER_LOADED = 1,
+};
+
+struct a5xx_gpu {
+ unsigned long flags;
+
+ struct adreno_gpu base;
+ struct platform_device *pdev;
+
+ struct drm_gem_object *pm4_bo;
+ uint64_t pm4_iova;
+
+ struct drm_gem_object *pfp_bo;
+ uint64_t pfp_iova;
+
+ struct drm_gem_object *gpmu_bo;
+ uint64_t gpmu_iova;
+ uint32_t gpmu_dwords;
+
+ uint32_t lm_leakage;
+
+ struct msm_ringbuffer *cur_ring;
+ struct msm_ringbuffer *next_ring;
+
+ struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
+ struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
+ uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
+
+ atomic_t preempt_state;
+ struct timer_list preempt_timer;
+
+ struct a5xx_smmu_info *smmu_info;
+ struct drm_gem_object *smmu_info_bo;
+ uint64_t smmu_info_iova;
+};
+
+#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+
+/*
+ * In order to do lockless preemption we use a simple state machine to progress
+ * through the process.
+ *
+ * PREEMPT_NONE - no preemption in progress. Next state START.
+ * PREEMPT_START - The trigger is evaulating if preemption is possible. Next
+ * states: TRIGGERED, NONE
+ * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
+ * states: FAULTED, PENDING
+ * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
+ * recovery. Next state: N/A
+ * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
+ * checking the success of the operation. Next state: FAULTED, NONE.
+ */
+
+enum preempt_state {
+ PREEMPT_NONE = 0,
+ PREEMPT_START,
+ PREEMPT_TRIGGERED,
+ PREEMPT_FAULTED,
+ PREEMPT_PENDING,
+};
+
+/*
+ * struct a5xx_preempt_record is a shared buffer between the microcode and the
+ * CPU to store the state for preemption. The record itself is much larger
+ * (64k) but most of that is used by the CP for storage.
+ *
+ * There is a preemption record assigned per ringbuffer. When the CPU triggers a
+ * preemption, it fills out the record with the useful information (wptr, ring
+ * base, etc) and the microcode uses that information to set up the CP following
+ * the preemption. When a ring is switched out, the CP will save the ringbuffer
+ * state back to the record. In this way, once the records are properly set up
+ * the CPU can quickly switch back and forth between ringbuffers by only
+ * updating a few registers (often only the wptr).
+ *
+ * These are the CPU aware registers in the record:
+ * @magic: Must always be 0x27C4BAFC
+ * @info: Type of the record - written 0 by the CPU, updated by the CP
+ * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
+ * the CP
+ * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
+ * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
+ * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
+ * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
+ * @rbase: Value of RB_BASE written by CPU, save/restored by CP
+ * @counter: GPU address of the storage area for the performance counters
+ */
+struct a5xx_preempt_record {
+ uint32_t magic;
+ uint32_t info;
+ uint32_t data;
+ uint32_t cntl;
+ uint32_t rptr;
+ uint32_t wptr;
+ uint64_t rptr_addr;
+ uint64_t rbase;
+ uint64_t counter;
+};
+
+/* Magic identifier for the preemption record */
+#define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
+
+/*
+ * Even though the structure above is only a few bytes, we need a full 64k to
+ * store the entire preemption record from the CP
+ */
+#define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
+
+/*
+ * The preemption counter block is a storage area for the value of the
+ * preemption counters that are saved immediately before context switch. We
+ * append it on to the end of the allocadtion for the preemption record.
+ */
+#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
+
+/*
+ * This is a global structure that the preemption code uses to switch in the
+ * pagetable for the preempted process - the code switches in whatever we
+ * after preempting in a new ring.
+ */
+struct a5xx_smmu_info {
+ uint32_t magic;
+ uint32_t _pad4;
+ uint64_t ttbr0;
+ uint32_t asid;
+ uint32_t contextidr;
+};
+
+#define A5XX_SMMU_INFO_MAGIC 0x3618CDA3UL
+
+int a5xx_power_init(struct msm_gpu *gpu);
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
+
+static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
+ uint32_t reg, uint32_t mask, uint32_t value)
+{
+ while (usecs--) {
+ udelay(1);
+ if ((gpu_read(gpu, reg) & mask) == value)
+ return 0;
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+
+void a5xx_preempt_init(struct msm_gpu *gpu);
+void a5xx_preempt_hw_init(struct msm_gpu *gpu);
+void a5xx_preempt_trigger(struct msm_gpu *gpu);
+void a5xx_preempt_irq(struct msm_gpu *gpu);
+void a5xx_preempt_fini(struct msm_gpu *gpu);
+
+int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+
+/* Return true if we are in a preempt state */
+static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
+{
+ return !(atomic_read(&a5xx_gpu->preempt_state) == PREEMPT_NONE);
+}
+
+#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
new file mode 100644
index 000000000000..e04feaadefb9
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -0,0 +1,509 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pm_opp.h>
+#include "a5xx_gpu.h"
+
+/*
+ * The GPMU data block is a block of shared registers that can be used to
+ * communicate back and forth. These "registers" are by convention with the GPMU
+ * firwmare and not bound to any specific hardware design
+ */
+
+#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
+#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
+#define AGC_MSG_BASE (AGC_INIT_BASE + 7)
+
+#define AGC_MSG_STATE (AGC_MSG_BASE + 0)
+#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
+#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
+#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
+
+#define AGC_POWER_CONFIG_PRODUCTION_ID 1
+#define AGC_INIT_MSG_VALUE 0xBABEFACE
+
+/* AGC_LM_CONFIG (A540+) */
+#define AGC_LM_CONFIG (136/4)
+#define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17
+#define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1
+#define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8)
+#define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4)
+#define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4)
+#define AGC_LM_CONFIG_LLM_ENABLED (1 << 16)
+#define AGC_LM_CONFIG_BCL_DISABLED (1 << 24)
+
+#define AGC_LEVEL_CONFIG (140/4)
+
+static struct {
+ uint32_t reg;
+ uint32_t value;
+} a5xx_sequence_regs[] = {
+ { 0xB9A1, 0x00010303 },
+ { 0xB9A2, 0x13000000 },
+ { 0xB9A3, 0x00460020 },
+ { 0xB9A4, 0x10000000 },
+ { 0xB9A5, 0x040A1707 },
+ { 0xB9A6, 0x00010000 },
+ { 0xB9A7, 0x0E000904 },
+ { 0xB9A8, 0x10000000 },
+ { 0xB9A9, 0x01165000 },
+ { 0xB9AA, 0x000E0002 },
+ { 0xB9AB, 0x03884141 },
+ { 0xB9AC, 0x10000840 },
+ { 0xB9AD, 0x572A5000 },
+ { 0xB9AE, 0x00000003 },
+ { 0xB9AF, 0x00000000 },
+ { 0xB9B0, 0x10000000 },
+ { 0xB828, 0x6C204010 },
+ { 0xB829, 0x6C204011 },
+ { 0xB82A, 0x6C204012 },
+ { 0xB82B, 0x6C204013 },
+ { 0xB82C, 0x6C204014 },
+ { 0xB90F, 0x00000004 },
+ { 0xB910, 0x00000002 },
+ { 0xB911, 0x00000002 },
+ { 0xB912, 0x00000002 },
+ { 0xB913, 0x00000002 },
+ { 0xB92F, 0x00000004 },
+ { 0xB930, 0x00000005 },
+ { 0xB931, 0x00000005 },
+ { 0xB932, 0x00000005 },
+ { 0xB933, 0x00000005 },
+ { 0xB96F, 0x00000001 },
+ { 0xB970, 0x00000003 },
+ { 0xB94F, 0x00000004 },
+ { 0xB950, 0x0000000B },
+ { 0xB951, 0x0000000B },
+ { 0xB952, 0x0000000B },
+ { 0xB953, 0x0000000B },
+ { 0xB907, 0x00000019 },
+ { 0xB927, 0x00000019 },
+ { 0xB947, 0x00000019 },
+ { 0xB967, 0x00000019 },
+ { 0xB987, 0x00000019 },
+ { 0xB906, 0x00220001 },
+ { 0xB926, 0x00220001 },
+ { 0xB946, 0x00220001 },
+ { 0xB966, 0x00220001 },
+ { 0xB986, 0x00300000 },
+ { 0xAC40, 0x0340FF41 },
+ { 0xAC41, 0x03BEFED0 },
+ { 0xAC42, 0x00331FED },
+ { 0xAC43, 0x021FFDD3 },
+ { 0xAC44, 0x5555AAAA },
+ { 0xAC45, 0x5555AAAA },
+ { 0xB9BA, 0x00000008 },
+};
+
+/*
+ * Get the actual voltage value for the operating point at the specified
+ * frequency
+ */
+static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct dev_pm_opp *opp;
+
+ opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
+
+ return (!IS_ERR(opp)) ? dev_pm_opp_get_voltage(opp) / 1000 : 0;
+}
+
+#define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32))
+#define LM_DCVS_LIMIT 1
+#define LEVEL_CONFIG ~(0x303)
+
+/* Setup thermal limit management for A540 */
+static void a540_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ u32 max_power = 0;
+ u32 rate = gpu->gpufreq[gpu->active_level];
+ u32 config;
+
+ /* The battery current limiter isn't enabled for A540 */
+ config = AGC_LM_CONFIG_BCL_DISABLED;
+ config |= adreno_gpu->rev.patchid << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
+
+ /* For now disable GPMU side throttling */
+ config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
+
+ /* Get the max-power from the device tree */
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-max-power", &max_power);
+
+ gpu_write(gpu, AGC_MSG_STATE, 0x80000001);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ /*
+ * For now just write the one voltage level - we will do more when we
+ * can do scaling
+ */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), max_power);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG);
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE,
+ PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1));
+
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Setup thermal limit management for A530 */
+static void a530_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ uint32_t rate = gpu->gpufreq[gpu->active_level];
+ uint32_t tsens = 0;
+ uint32_t max_power = 0;
+ unsigned int i;
+
+ /* Write the block of sequence registers */
+ for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
+ gpu_write(gpu, a5xx_sequence_regs[i].reg,
+ a5xx_sequence_regs[i].value);
+
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,gpmu-tsens", &tsens);
+
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, tsens);
+ gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
+
+ /* Write the voltage table */
+
+ /* Get the max-power from the device tree */
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-max-power", &max_power);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
+
+ gpu_write(gpu, AGC_MSG_STATE, 1);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ /*
+ * For now just write the one voltage level - we will do more when we
+ * can do scaling
+ */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), max_power);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Enable SP/TP cpower collapse */
+static void a5xx_pc_init(struct msm_gpu *gpu)
+{
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
+}
+
+/* Enable the GPMU microcontroller */
+static int a5xx_gpmu_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (!a5xx_gpu->gpmu_dwords)
+ return 0;
+
+ /* Turn off protected mode for this operation */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Kick off the IB to load the GPMU microcode */
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, a5xx_gpu->gpmu_dwords);
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ gpu->funcs->flush(gpu, ring);
+
+ /* This is "fatal" because the CP is left in a bad state */
+ if (!a5xx_idle(gpu, ring)) {
+ DRM_ERROR("%s: Unable to load GPMU firmwaren",
+ gpu->name);
+ return -EINVAL;
+ }
+
+ /* Clock gating setup for A530 targets */
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
+
+ /* Kick off the GPMU */
+ gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
+
+ /*
+ * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
+ * won't have advanced power collapse.
+ */
+ if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
+ 0xBABEFACE)) {
+ DRM_ERROR("%s: GPMU firmware initialization timed out\n",
+ gpu->name);
+ return 0;
+ }
+
+ if (!adreno_is_a530(adreno_gpu)) {
+ u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1);
+
+ if (val)
+ DRM_ERROR("%s: GPMU firmare initialization failed: %d\n",
+ gpu->name, val);
+ }
+
+ /* FIXME: Clear GPMU interrupts? */
+ return 0;
+}
+
+/* Enable limits management */
+static void a5xx_lm_enable(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ /* This init sequence only applies to A530 */
+ if (!adreno_is_a530(adreno_gpu))
+ return;
+
+ gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
+ gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
+ gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
+
+ gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
+}
+
+int a5xx_power_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+ u32 lm_limit = 6000;
+
+ /*
+ * Set up the limit management
+ * first, do some generic setup:
+ */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
+
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-limit", &lm_limit);
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | lm_limit);
+
+ /* Now do the target specific setup */
+ if (adreno_is_a530(adreno_gpu))
+ a530_lm_setup(gpu);
+ else
+ a540_lm_setup(gpu);
+
+ /* Set up SP/TP power collpase */
+ a5xx_pc_init(gpu);
+
+ /* Start the GPMU */
+ ret = a5xx_gpmu_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Start the limits management */
+ a5xx_lm_enable(gpu);
+
+ return 0;
+}
+
+static int _read_header(unsigned int *data, uint32_t fwsize,
+ unsigned int *major, unsigned int *minor)
+{
+ uint32_t size;
+ unsigned int i;
+
+ /* First dword of the header is the header size */
+ if (fwsize < 4)
+ return -EINVAL;
+
+ size = data[0];
+
+ /* Make sure the header isn't too big and is a multiple of two */
+ if ((size % 2) || (size > 10) || size > (fwsize >> 2))
+ return -EINVAL;
+
+ /* Read the values in pairs */
+ for (i = 1; i < size; i += 2) {
+ switch (data[i]) {
+ case 1:
+ *major = data[i + 1];
+ break;
+ case 2:
+ *minor = data[i + 1];
+ break;
+ default:
+ /* Invalid values are non fatal */
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Make sure cur_major and cur_minor are greater than or equal to the minimum
+ * allowable major/minor
+ */
+static inline bool _check_gpmu_version(uint32_t cur_major, uint32_t cur_minor,
+ uint32_t min_major, uint32_t min_minor)
+{
+ return ((cur_major > min_major) ||
+ ((cur_major == min_major) && (cur_minor >= min_minor)));
+}
+
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *drm = gpu->dev;
+ const char *name;
+ const struct firmware *fw;
+ uint32_t version[2] = { 0, 0 };
+ uint32_t dwords = 0, offset = 0;
+ uint32_t major = 0, minor = 0, bosize;
+ unsigned int *data, *ptr, *cmds;
+ unsigned int cmds_size;
+
+ if (a5xx_gpu->gpmu_bo)
+ return;
+
+ /*
+ * Read the firmware name from the device tree - if it doesn't exist
+ * then don't initialize the GPMU for this target
+ */
+ if (of_property_read_string(GPU_OF_NODE(gpu), "qcom,gpmu-firmware",
+ &name))
+ return;
+
+ /*
+ * The version isn't mandatory, but if it exists, we need to enforce
+ * that the version of the GPMU firmware matches or is newer than the
+ * value
+ */
+ of_property_read_u32_array(GPU_OF_NODE(gpu), "qcom,gpmu-version",
+ version, 2);
+
+ /* Get the firmware */
+ if (request_firmware(&fw, name, drm->dev)) {
+ DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
+ gpu->name);
+ return;
+ }
+
+ data = (unsigned int *) fw->data;
+
+ /*
+ * The first dword is the size of the remaining data in dwords. Use it
+ * as a checksum of sorts and make sure it matches the actual size of
+ * the firmware that we read
+ */
+
+ if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2)))
+ goto out;
+
+ /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
+ if (data[1] != 2)
+ goto out;
+
+ /* Read the header and get the major/minor of the read firmware */
+ if (_read_header(&data[2], fw->size - 8, &major, &minor))
+ goto out;
+
+ if (!_check_gpmu_version(major, minor, version[0], version[1])) {
+ DRM_ERROR("%s: Loaded GPMU version %d.%d is too old\n",
+ gpu->name, major, minor);
+ goto out;
+ }
+
+ cmds = data + data[2] + 3;
+ cmds_size = data[0] - data[2] - 2;
+
+ /*
+ * A single type4 opcode can only have so many values attached so
+ * add enough opcodes to load the all the commands
+ */
+ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
+
+ mutex_lock(&drm->struct_mutex);
+ a5xx_gpu->gpmu_bo = msm_gem_new(drm, bosize,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(a5xx_gpu->gpmu_bo))
+ goto err;
+
+ if (msm_gem_get_iova(a5xx_gpu->gpmu_bo, gpu->aspace,
+ &a5xx_gpu->gpmu_iova))
+ goto err;
+
+ ptr = msm_gem_vaddr(a5xx_gpu->gpmu_bo);
+ if (!ptr)
+ goto err;
+
+ while (cmds_size > 0) {
+ int i;
+ uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
+ TYPE4_MAX_PAYLOAD : cmds_size;
+
+ ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
+ _size);
+
+ for (i = 0; i < _size; i++)
+ ptr[dwords++] = *cmds++;
+
+ offset += _size;
+ cmds_size -= _size;
+ }
+
+ a5xx_gpu->gpmu_dwords = dwords;
+
+ goto out;
+
+err:
+ if (a5xx_gpu->gpmu_iova)
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ if (a5xx_gpu->gpmu_bo)
+ drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+
+ a5xx_gpu->gpmu_bo = NULL;
+ a5xx_gpu->gpmu_iova = 0;
+ a5xx_gpu->gpmu_dwords = 0;
+
+out:
+ /* No need to keep that firmware laying around anymore */
+ release_firmware(fw);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
new file mode 100644
index 000000000000..648494c75abc
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -0,0 +1,383 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "msm_iommu.h"
+#include "a5xx_gpu.h"
+
+static void *alloc_kernel_bo(struct drm_device *drm, struct msm_gpu *gpu,
+ size_t size, uint32_t flags, struct drm_gem_object **bo,
+ u64 *iova)
+{
+ struct drm_gem_object *_bo;
+ u64 _iova;
+ void *ptr;
+ int ret;
+
+ mutex_lock(&drm->struct_mutex);
+ _bo = msm_gem_new(drm, size, flags);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(_bo))
+ return _bo;
+
+ ret = msm_gem_get_iova(_bo, gpu->aspace, &_iova);
+ if (ret)
+ goto out;
+
+ ptr = msm_gem_vaddr(_bo);
+ if (!ptr) {
+ ret = -ENOMEM;
+ goto out;
+ }
+
+ if (bo)
+ *bo = _bo;
+ if (iova)
+ *iova = _iova;
+
+ return ptr;
+out:
+ drm_gem_object_unreference_unlocked(_bo);
+ return ERR_PTR(ret);
+}
+
+/*
+ * Try to transition the preemption state from old to new. Return
+ * true on success or false if the original state wasn't 'old'
+ */
+static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu,
+ enum preempt_state old, enum preempt_state new)
+{
+ enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
+ old, new);
+
+ return (cur == old);
+}
+
+/*
+ * Force the preemption state to the specified state. This is used in cases
+ * where the current state is known and won't change
+ */
+static inline void set_preempt_state(struct a5xx_gpu *gpu,
+ enum preempt_state new)
+{
+ /*
+ * preempt_state may be read by other cores trying to trigger a
+ * preemption or in the interrupt handler so barriers are needed
+ * before...
+ */
+ smp_mb__before_atomic();
+ atomic_set(&gpu->preempt_state, new);
+ /* ... and after*/
+ smp_mb__after_atomic();
+}
+
+/* Write the most recent wptr for the given ring into the hardware */
+static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ unsigned long flags;
+ uint32_t wptr;
+
+ if (!ring)
+ return;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+/* Return the highest priority ringbuffer with something in it */
+static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ unsigned long flags;
+ int i;
+
+ for (i = gpu->nr_rings - 1; i >= 0; i--) {
+ bool empty;
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ spin_lock_irqsave(&ring->lock, flags);
+ empty = (get_wptr(ring) == adreno_gpu->memptrs->rptr[ring->id]);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ if (!empty)
+ return ring;
+ }
+
+ return NULL;
+}
+
+static void a5xx_preempt_timer(unsigned long data)
+{
+ struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data;
+ struct msm_gpu *gpu = &a5xx_gpu->base.base;
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
+ return;
+
+ dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
+/* Try to trigger a preemption switch */
+void a5xx_preempt_trigger(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned long flags;
+ struct msm_ringbuffer *ring;
+
+ if (gpu->nr_rings == 1)
+ return;
+
+ /*
+ * Try to start preemption by moving from NONE to START. If
+ * unsuccessful, a preemption is already in flight
+ */
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
+ return;
+
+ /* Get the next ring to preempt to */
+ ring = get_next_ring(gpu);
+
+ /*
+ * If no ring is populated or the highest priority ring is the current
+ * one do nothing except to update the wptr to the latest and greatest
+ */
+ if (!ring || (a5xx_gpu->cur_ring == ring)) {
+ update_wptr(gpu, ring);
+
+ /* Set the state back to NONE */
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+ return;
+ }
+
+ /* Make sure the wptr doesn't update while we're in motion */
+ spin_lock_irqsave(&ring->lock, flags);
+ a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Do read barrier to make sure we have updated pagetable info */
+ rmb();
+
+ /* Set the SMMU info for the preemption */
+ if (a5xx_gpu->smmu_info) {
+ a5xx_gpu->smmu_info->ttbr0 =
+ adreno_gpu->memptrs->ttbr0[ring->id];
+ a5xx_gpu->smmu_info->contextidr =
+ adreno_gpu->memptrs->contextidr[ring->id];
+ }
+
+ /* Set the address of the incoming preemption record */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
+ REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
+ a5xx_gpu->preempt_iova[ring->id]);
+
+ a5xx_gpu->next_ring = ring;
+
+ /* Start a timer to catch a stuck preemption */
+ mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
+
+ /* Set the preemption state to triggered */
+ set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED);
+
+ /* Make sure everything is written before hitting the button */
+ wmb();
+
+ /* And actually start the preemption */
+ gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
+}
+
+void a5xx_preempt_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
+ return;
+
+ /* Delete the preemption watchdog timer */
+ del_timer(&a5xx_gpu->preempt_timer);
+
+ /*
+ * The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
+ * firing the interrupt, but there is a non zero chance of a hardware
+ * condition or a software race that could set it again before we have a
+ * chance to finish. If that happens, log and go for recovery
+ */
+ status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
+ if (unlikely(status)) {
+ set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
+ dev_err(dev->dev, "%s: Preemption failed to complete\n",
+ gpu->name);
+ queue_work(priv->wq, &gpu->recover_work);
+ return;
+ }
+
+ a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
+ a5xx_gpu->next_ring = NULL;
+
+ update_wptr(gpu, a5xx_gpu->cur_ring);
+
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+}
+
+void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring;
+ int i;
+
+ if (gpu->nr_rings > 1) {
+ /* Clear the preemption records */
+ FOR_EACH_RING(gpu, ring, i) {
+ if (ring) {
+ a5xx_gpu->preempt[ring->id]->wptr = 0;
+ a5xx_gpu->preempt[ring->id]->rptr = 0;
+ a5xx_gpu->preempt[ring->id]->rbase = ring->iova;
+ }
+ }
+ }
+
+ /* Tell the CP where to find the smmu_info buffer */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
+ REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
+ a5xx_gpu->smmu_info_iova);
+
+ /* Reset the preemption state */
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+
+ /* Always come up on rb 0 */
+ a5xx_gpu->cur_ring = gpu->rb[0];
+}
+
+static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
+ struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct a5xx_preempt_record *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+
+ ptr = alloc_kernel_bo(gpu->dev, gpu,
+ A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
+ MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
+ &bo, &iova);
+
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ a5xx_gpu->preempt_bo[ring->id] = bo;
+ a5xx_gpu->preempt_iova[ring->id] = iova;
+ a5xx_gpu->preempt[ring->id] = ptr;
+
+ /* Set up the defaults on the preemption record */
+
+ ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
+ ptr->info = 0;
+ ptr->data = 0;
+ ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
+ ptr->rptr_addr = rbmemptr(adreno_gpu, ring->id, rptr);
+ ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
+
+ return 0;
+}
+
+void a5xx_preempt_fini(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring;
+ int i;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring || !a5xx_gpu->preempt_bo[i])
+ continue;
+
+ if (a5xx_gpu->preempt_iova[i])
+ msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
+
+ drm_gem_object_unreference_unlocked(a5xx_gpu->preempt_bo[i]);
+
+ a5xx_gpu->preempt_bo[i] = NULL;
+ }
+
+ if (a5xx_gpu->smmu_info_bo) {
+ if (a5xx_gpu->smmu_info_iova)
+ msm_gem_put_iova(a5xx_gpu->smmu_info_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->smmu_info_bo);
+ a5xx_gpu->smmu_info_bo = NULL;
+ }
+}
+
+void a5xx_preempt_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring;
+ struct a5xx_smmu_info *ptr;
+ struct drm_gem_object *bo;
+ uint64_t iova;
+ int i;
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings <= 1)
+ return;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
+
+ if (preempt_init_ring(a5xx_gpu, ring))
+ goto fail;
+ }
+
+ if (msm_iommu_allow_dynamic(gpu->aspace->mmu)) {
+ ptr = alloc_kernel_bo(gpu->dev, gpu,
+ sizeof(struct a5xx_smmu_info),
+ MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
+ &bo, &iova);
+
+ if (IS_ERR(ptr))
+ goto fail;
+
+ ptr->magic = A5XX_SMMU_INFO_MAGIC;
+
+ a5xx_gpu->smmu_info_bo = bo;
+ a5xx_gpu->smmu_info_iova = iova;
+ a5xx_gpu->smmu_info = ptr;
+ }
+
+ setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer,
+ (unsigned long) a5xx_gpu);
+
+ return;
+fail:
+ /*
+ * On any failure our adventure is over. Clean up and
+ * set nr_rings to 1 to force preemption off
+ */
+ a5xx_preempt_fini(gpu);
+ gpu->nr_rings = 1;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
new file mode 100644
index 000000000000..5a2edb0ea518
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
@@ -0,0 +1,796 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+#include "a5xx_gpu.h"
+#include "msm_snapshot_api.h"
+
+#define A5XX_NR_SHADER_BANKS 4
+
+/*
+ * These are a list of the registers that need to be read through the HLSQ
+ * aperture through the crashdumper. These are not nominally accessible from
+ * the CPU on a secure platform.
+ */
+static const struct {
+ u32 type;
+ u32 regoffset;
+ u32 count;
+} a5xx_hlsq_aperture_regs[] = {
+ { 0x35, 0xE00, 0x32 }, /* HSLQ non-context */
+ { 0x31, 0x2080, 0x1 }, /* HLSQ 2D context 0 */
+ { 0x33, 0x2480, 0x1 }, /* HLSQ 2D context 1 */
+ { 0x32, 0xE780, 0x62 }, /* HLSQ 3D context 0 */
+ { 0x34, 0xEF80, 0x62 }, /* HLSQ 3D context 1 */
+ { 0x3f, 0x0EC0, 0x40 }, /* SP non-context */
+ { 0x3d, 0x2040, 0x1 }, /* SP 2D context 0 */
+ { 0x3b, 0x2440, 0x1 }, /* SP 2D context 1 */
+ { 0x3e, 0xE580, 0x180 }, /* SP 3D context 0 */
+ { 0x3c, 0xED80, 0x180 }, /* SP 3D context 1 */
+ { 0x3a, 0x0F00, 0x1c }, /* TP non-context */
+ { 0x38, 0x2000, 0xa }, /* TP 2D context 0 */
+ { 0x36, 0x2400, 0xa }, /* TP 2D context 1 */
+ { 0x39, 0xE700, 0x80 }, /* TP 3D context 0 */
+ { 0x37, 0xEF00, 0x80 }, /* TP 3D context 1 */
+};
+
+/*
+ * The debugbus registers contain device state that presumably makes
+ * sense to the hardware designers. 'count' is the number of indexes to read,
+ * each index value is 64 bits
+ */
+static const struct {
+ enum a5xx_debugbus id;
+ u32 count;
+} a5xx_debugbus_blocks[] = {
+ { A5XX_RBBM_DBGBUS_CP, 0x100, },
+ { A5XX_RBBM_DBGBUS_RBBM, 0x100, },
+ { A5XX_RBBM_DBGBUS_HLSQ, 0x100, },
+ { A5XX_RBBM_DBGBUS_UCHE, 0x100, },
+ { A5XX_RBBM_DBGBUS_DPM, 0x100, },
+ { A5XX_RBBM_DBGBUS_TESS, 0x100, },
+ { A5XX_RBBM_DBGBUS_PC, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFDP, 0x100, },
+ { A5XX_RBBM_DBGBUS_VPC, 0x100, },
+ { A5XX_RBBM_DBGBUS_TSE, 0x100, },
+ { A5XX_RBBM_DBGBUS_RAS, 0x100, },
+ { A5XX_RBBM_DBGBUS_VSC, 0x100, },
+ { A5XX_RBBM_DBGBUS_COM, 0x100, },
+ { A5XX_RBBM_DBGBUS_DCOM, 0x100, },
+ { A5XX_RBBM_DBGBUS_LRZ, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_DSP, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCUFCHE, 0x100, },
+ { A5XX_RBBM_DBGBUS_GPMU, 0x100, },
+ { A5XX_RBBM_DBGBUS_RBP, 0x100, },
+ { A5XX_RBBM_DBGBUS_HM, 0x100, },
+ { A5XX_RBBM_DBGBUS_RBBM_CFG, 0x100, },
+ { A5XX_RBBM_DBGBUS_VBIF_CX, 0x100, },
+ { A5XX_RBBM_DBGBUS_GPC, 0x100, },
+ { A5XX_RBBM_DBGBUS_LARC, 0x100, },
+ { A5XX_RBBM_DBGBUS_HLSQ_SPTP, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_3, 0x100, },
+};
+
+/*
+ * The shader blocks are read from the HLSQ aperture - each one has its own
+ * identifier for the aperture read
+ */
+static const struct {
+ enum a5xx_shader_blocks id;
+ u32 size;
+} a5xx_shader_blocks[] = {
+ {A5XX_TP_W_MEMOBJ, 0x200},
+ {A5XX_TP_W_MIPMAP_BASE, 0x3C0},
+ {A5XX_TP_W_SAMPLER_TAG, 0x40},
+ {A5XX_TP_S_3D_SAMPLER, 0x80},
+ {A5XX_TP_S_3D_SAMPLER_TAG, 0x20},
+ {A5XX_TP_S_CS_SAMPLER, 0x40},
+ {A5XX_TP_S_CS_SAMPLER_TAG, 0x10},
+ {A5XX_SP_W_CONST, 0x800},
+ {A5XX_SP_W_CB_SIZE, 0x30},
+ {A5XX_SP_W_CB_BASE, 0xF0},
+ {A5XX_SP_W_STATE, 0x1},
+ {A5XX_SP_S_3D_CONST, 0x800},
+ {A5XX_SP_S_3D_CB_SIZE, 0x28},
+ {A5XX_SP_S_3D_UAV_SIZE, 0x80},
+ {A5XX_SP_S_CS_CONST, 0x400},
+ {A5XX_SP_S_CS_CB_SIZE, 0x8},
+ {A5XX_SP_S_CS_UAV_SIZE, 0x80},
+ {A5XX_SP_S_3D_CONST_DIRTY, 0x12},
+ {A5XX_SP_S_3D_CB_SIZE_DIRTY, 0x1},
+ {A5XX_SP_S_3D_UAV_SIZE_DIRTY, 0x2},
+ {A5XX_SP_S_CS_CONST_DIRTY, 0xA},
+ {A5XX_SP_S_CS_CB_SIZE_DIRTY, 0x1},
+ {A5XX_SP_S_CS_UAV_SIZE_DIRTY, 0x2},
+ {A5XX_HLSQ_ICB_DIRTY, 0xB},
+ {A5XX_SP_POWER_RESTORE_RAM_TAG, 0xA},
+ {A5XX_TP_POWER_RESTORE_RAM_TAG, 0xA},
+ {A5XX_TP_W_SAMPLER, 0x80},
+ {A5XX_TP_W_MEMOBJ_TAG, 0x40},
+ {A5XX_TP_S_3D_MEMOBJ, 0x200},
+ {A5XX_TP_S_3D_MEMOBJ_TAG, 0x20},
+ {A5XX_TP_S_CS_MEMOBJ, 0x100},
+ {A5XX_TP_S_CS_MEMOBJ_TAG, 0x10},
+ {A5XX_SP_W_INSTR, 0x800},
+ {A5XX_SP_W_UAV_SIZE, 0x80},
+ {A5XX_SP_W_UAV_BASE, 0x80},
+ {A5XX_SP_W_INST_TAG, 0x40},
+ {A5XX_SP_S_3D_INSTR, 0x800},
+ {A5XX_SP_S_3D_CB_BASE, 0xC8},
+ {A5XX_SP_S_3D_UAV_BASE, 0x80},
+ {A5XX_SP_S_CS_INSTR, 0x400},
+ {A5XX_SP_S_CS_CB_BASE, 0x28},
+ {A5XX_SP_S_CS_UAV_BASE, 0x80},
+ {A5XX_SP_S_3D_INSTR_DIRTY, 0x1},
+ {A5XX_SP_S_3D_CB_BASE_DIRTY, 0x5},
+ {A5XX_SP_S_3D_UAV_BASE_DIRTY, 0x2},
+ {A5XX_SP_S_CS_INSTR_DIRTY, 0x1},
+ {A5XX_SP_S_CS_CB_BASE_DIRTY, 0x1},
+ {A5XX_SP_S_CS_UAV_BASE_DIRTY, 0x2},
+ {A5XX_HLSQ_ICB, 0x200},
+ {A5XX_HLSQ_ICB_CB_BASE_DIRTY, 0x4},
+ {A5XX_SP_POWER_RESTORE_RAM, 0x140},
+ {A5XX_TP_POWER_RESTORE_RAM, 0x40},
+};
+
+/*
+ * The A5XX architecture has a a built in engine to asynchronously dump
+ * registers from the GPU. It is used to accelerate the copy of hundreds
+ * (thousands) of registers and as a safe way to access registers that might
+ * have secure data in them (if the GPU is in secure, the crashdumper returns
+ * bogus values for those registers). On a fully secured device the CPU will be
+ * blocked from accessing those registers directly and so the crashdump is the
+ * only way that we can access context registers and the shader banks for debug
+ * purposes.
+ *
+ * The downside of the crashdump is that it requires access to GPU accessible
+ * memory (so the VBIF and the bus and the SMMU need to be up and working) and
+ * you need enough memory to write the script for the crashdumper and to store
+ * the data that you are dumping so there is a balancing act between the work to
+ * set up a crash dumper and the value we get out of it.
+ */
+
+/*
+ * The crashdump uses a pseudo-script format to read and write registers. Each
+ * operation is two 64 bit values.
+ *
+ * READ:
+ * [qword 0] [64:00] - The absolute IOVA address target for the register value
+ * [qword 1] [63:44] - the dword address of the register offset to read
+ * [15:00] - Number of dwords to read at once
+ *
+ * WRITE:
+ * [qword 0] [31:0] 32 bit value to write to the register
+ * [qword 1] [63:44] - the dword address of the register offset to write
+ * [21:21] - set 1 to write
+ * [15:00] - Number of dwords to write (usually 1)
+ *
+ * At the bottom of the script, write quadword zeros to trigger the end.
+ */
+struct crashdump {
+ struct drm_gem_object *bo;
+ void *ptr;
+ u64 iova;
+ u32 index;
+};
+
+#define CRASHDUMP_BO_SIZE (SZ_1M)
+#define CRASHDUMP_SCRIPT_SIZE (256 * SZ_1K)
+#define CRASHDUMP_DATA_SIZE (CRASHDUMP_BO_SIZE - CRASHDUMP_SCRIPT_SIZE)
+
+static int crashdump_init(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+ struct drm_device *drm = gpu->dev;
+ int ret = -ENOMEM;
+
+ crashdump->bo = msm_gem_new(drm, CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED);
+ if (IS_ERR(crashdump->bo)) {
+ ret = PTR_ERR(crashdump->bo);
+ crashdump->bo = NULL;
+ return ret;
+ }
+
+ crashdump->ptr = msm_gem_vaddr_locked(crashdump->bo);
+ if (!crashdump->ptr)
+ goto out;
+
+ ret = msm_gem_get_iova_locked(crashdump->bo, gpu->aspace,
+ &crashdump->iova);
+
+out:
+ if (ret) {
+ drm_gem_object_unreference(crashdump->bo);
+ crashdump->bo = NULL;
+ }
+
+ return ret;
+}
+
+static int crashdump_run(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+ if (!crashdump->ptr || !crashdump->index)
+ return -EINVAL;
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
+ lower_32_bits(crashdump->iova));
+ gpu_write(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_HI,
+ upper_32_bits(crashdump->iova));
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+
+ return spin_until(gpu_read(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL) & 0x04);
+}
+
+static void crashdump_destroy(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+ if (!crashdump->bo)
+ return;
+
+ if (crashdump->iova)
+ msm_gem_put_iova(crashdump->bo, gpu->aspace);
+
+ drm_gem_object_unreference(crashdump->bo);
+
+ memset(crashdump, 0, sizeof(*crashdump));
+}
+
+static inline void CRASHDUMP_SCRIPT_WRITE(struct crashdump *crashdump,
+ u32 reg, u32 val)
+{
+ u64 *ptr = crashdump->ptr + crashdump->index;
+
+ if (WARN_ON(crashdump->index + (2 * sizeof(u64))
+ >= CRASHDUMP_SCRIPT_SIZE))
+ return;
+
+ /* This is the value to write */
+ ptr[0] = (u64) val;
+
+ /*
+ * This triggers a write to the specified register. 1 is the size of
+ * the write in dwords
+ */
+ ptr[1] = (((u64) reg) << 44) | (1 << 21) | 1;
+
+ crashdump->index += 2 * sizeof(u64);
+}
+
+static inline void CRASHDUMP_SCRIPT_READ(struct crashdump *crashdump,
+ u32 reg, u32 count, u32 offset)
+{
+ u64 *ptr = crashdump->ptr + crashdump->index;
+
+ if (WARN_ON(crashdump->index + (2 * sizeof(u64))
+ >= CRASHDUMP_SCRIPT_SIZE))
+ return;
+
+ if (WARN_ON(offset + (count * sizeof(u32)) >= CRASHDUMP_DATA_SIZE))
+ return;
+
+ ptr[0] = (u64) crashdump->iova + CRASHDUMP_SCRIPT_SIZE + offset;
+ ptr[1] = (((u64) reg) << 44) | count;
+
+ crashdump->index += 2 * sizeof(u64);
+}
+
+static inline void *CRASHDUMP_DATA_PTR(struct crashdump *crashdump, u32 offset)
+{
+ if (WARN_ON(!crashdump->ptr || offset >= CRASHDUMP_DATA_SIZE))
+ return NULL;
+
+ return crashdump->ptr + CRASHDUMP_SCRIPT_SIZE + offset;
+}
+
+static inline u32 CRASHDUMP_DATA_READ(struct crashdump *crashdump, u32 offset)
+{
+ return *((u32 *) CRASHDUMP_DATA_PTR(crashdump, offset));
+}
+
+static inline void CRASHDUMP_RESET(struct crashdump *crashdump)
+{
+ crashdump->index = 0;
+}
+
+static inline void CRASHDUMP_END(struct crashdump *crashdump)
+{
+ u64 *ptr = crashdump->ptr + crashdump->index;
+
+ if (WARN_ON((crashdump->index + (2 * sizeof(u64)))
+ >= CRASHDUMP_SCRIPT_SIZE))
+ return;
+
+ ptr[0] = 0;
+ ptr[1] = 0;
+
+ crashdump->index += 2 * sizeof(u64);
+}
+
+static u32 _crashdump_read_hlsq_aperture(struct crashdump *crashdump,
+ u32 offset, u32 statetype, u32 bank,
+ u32 count)
+{
+ CRASHDUMP_SCRIPT_WRITE(crashdump, REG_A5XX_HLSQ_DBG_READ_SEL,
+ A5XX_HLSQ_DBG_READ_SEL_STATETYPE(statetype) | bank);
+
+ CRASHDUMP_SCRIPT_READ(crashdump, REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE,
+ count, offset);
+
+ return count * sizeof(u32);
+}
+
+static u32 _copy_registers(struct msm_snapshot *snapshot,
+ struct crashdump *crashdump, u32 reg, u32 count,
+ u32 offset)
+{
+ int i;
+ u32 *ptr = (u32 *) (crashdump->ptr + CRASHDUMP_SCRIPT_SIZE + offset);
+ /*
+ * Write the offset of the first register of the group and the number of
+ * registers in the group
+ */
+ SNAPSHOT_WRITE_U32(snapshot, ((count << 16) | reg));
+
+ /* Followed by each register value in the group */
+ for (i = 0; i < count; i++)
+ SNAPSHOT_WRITE_U32(snapshot, ptr[i]);
+
+ return count * sizeof(u32);
+}
+
+/*
+ * Return the number of registers in each register group from the
+ * adreno_gpu->rgisters
+ */
+static inline u32 REG_COUNT(const unsigned int *ptr)
+{
+ return (ptr[1] - ptr[0]) + 1;
+}
+
+/*
+ * Capture what registers we can from the CPU in case the crashdumper is
+ * unavailable or broken. This will omit the SP,TP and HLSQ registers, but
+ * you'll get everything else and that ain't bad
+ */
+static void a5xx_snapshot_registers_cpu(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_snapshot_regs header;
+ u32 regcount = 0, groups = 0;
+ int i;
+
+ /*
+ * Before we write the section we need to figure out how big our data
+ * section will be
+ */
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ regcount += REG_COUNT(&(adreno_gpu->registers[i]));
+ groups++;
+ }
+
+ header.count = groups;
+
+ /*
+ * We need one dword for each group and then one dword for each register
+ * value in that group
+ */
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_REGS_V2,
+ regcount + groups))
+ return;
+
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ u32 count = REG_COUNT(&(adreno_gpu->registers[i]));
+ u32 reg = adreno_gpu->registers[i];
+ int j;
+
+ /* Write the offset and count for the group */
+ SNAPSHOT_WRITE_U32(snapshot, (count << 16) | reg);
+
+ /* Write each value in the group */
+ for (j = 0; j < count; j++)
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu, reg++));
+ }
+}
+
+static void a5xx_snapshot_registers(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct msm_snapshot_regs header;
+ struct crashdump *crashdump = snapshot->priv;
+ u32 offset = 0, regcount = 0, groups = 0;
+ int i;
+
+ /*
+ * First snapshot all the registers that we can from the CPU. Do this
+ * because the crashdumper has a tendency to "taint" the value of some
+ * of the registers (because the GPU implements the crashdumper) so we
+ * only want to use the crash dump facility if we have to
+ */
+ a5xx_snapshot_registers_cpu(gpu, snapshot);
+
+ if (!crashdump)
+ return;
+
+ CRASHDUMP_RESET(crashdump);
+
+ /* HLSQ and context registers behind the aperture */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 count = a5xx_hlsq_aperture_regs[i].count;
+
+ offset += _crashdump_read_hlsq_aperture(crashdump, offset,
+ a5xx_hlsq_aperture_regs[i].type, 0, count);
+ regcount += count;
+
+ groups++;
+ }
+
+ CRASHDUMP_END(crashdump);
+
+ if (crashdump_run(gpu, crashdump))
+ return;
+
+ header.count = groups;
+
+ /*
+ * The size of the data will be one dword for each "group" of registers,
+ * and then one dword for each of the registers in that group
+ */
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_REGS_V2,
+ groups + regcount))
+ return;
+
+ /* Copy the registers to the snapshot */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
+ offset += _copy_registers(snapshot, crashdump,
+ a5xx_hlsq_aperture_regs[i].regoffset,
+ a5xx_hlsq_aperture_regs[i].count, offset);
+}
+
+static void _a5xx_snapshot_shader_bank(struct msm_snapshot *snapshot,
+ struct crashdump *crashdump, u32 block, u32 bank,
+ u32 size, u32 offset)
+{
+ void *src;
+
+ struct msm_snapshot_shader header = {
+ .type = block,
+ .index = bank,
+ .size = size,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_SHADER, size))
+ return;
+
+ src = CRASHDUMP_DATA_PTR(crashdump, offset);
+
+ if (src)
+ SNAPSHOT_MEMCPY(snapshot, src, size * sizeof(u32));
+}
+
+static void a5xx_snapshot_shader_memory(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct crashdump *crashdump = snapshot->priv;
+ u32 offset = 0;
+ int i;
+
+ /* We can only get shader memory through the crashdump */
+ if (!crashdump)
+ return;
+
+ CRASHDUMP_RESET(crashdump);
+
+ /* For each shader block */
+ for (i = 0; i < ARRAY_SIZE(a5xx_shader_blocks); i++) {
+ int j;
+
+ /* For each block, dump 4 banks */
+ for (j = 0; j < A5XX_NR_SHADER_BANKS; j++)
+ offset += _crashdump_read_hlsq_aperture(crashdump,
+ offset, a5xx_shader_blocks[i].id, j,
+ a5xx_shader_blocks[i].size);
+ }
+
+ CRASHDUMP_END(crashdump);
+
+ /* If the crashdump fails we can't get shader memory any other way */
+ if (crashdump_run(gpu, crashdump))
+ return;
+
+ /* Each bank of each shader gets its own snapshot section */
+ for (offset = 0, i = 0; i < ARRAY_SIZE(a5xx_shader_blocks); i++) {
+ int j;
+
+ for (j = 0; j < A5XX_NR_SHADER_BANKS; j++) {
+ _a5xx_snapshot_shader_bank(snapshot, crashdump,
+ a5xx_shader_blocks[i].id, j,
+ a5xx_shader_blocks[i].size, offset);
+ offset += a5xx_shader_blocks[i].size * sizeof(u32);
+ }
+ }
+}
+
+#define A5XX_NUM_AXI_ARB_BLOCKS 2
+#define A5XX_NUM_XIN_BLOCKS 4
+#define VBIF_DATA_SIZE ((16 * A5XX_NUM_AXI_ARB_BLOCKS) + \
+ (18 * A5XX_NUM_XIN_BLOCKS) + (12 * A5XX_NUM_XIN_BLOCKS))
+
+static void a5xx_snapshot_debugbus_vbif(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+ struct msm_snapshot_debugbus header = {
+ .id = A5XX_RBBM_DBGBUS_VBIF,
+ .count = VBIF_DATA_SIZE,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUGBUS,
+ VBIF_DATA_SIZE))
+ return;
+
+ gpu_rmw(gpu, REG_A5XX_VBIF_CLKON, A5XX_VBIF_CLKON_FORCE_ON_TESTBUS,
+ A5XX_VBIF_CLKON_FORCE_ON_TESTBUS);
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL0, 0);
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS_OUT_CTRL,
+ A5XX_VBIF_TEST_BUS_OUT_CTRL_TEST_BUS_CTRL_EN);
+
+ for (i = 0; i < A5XX_NUM_AXI_ARB_BLOCKS; i++) {
+ int j;
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL0, 1 << (i + 16));
+ for (j = 0; j < 16; j++) {
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL1,
+ A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(j));
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+ REG_A5XX_VBIF_TEST_BUS_OUT));
+ }
+ }
+
+ for (i = 0; i < A5XX_NUM_XIN_BLOCKS; i++) {
+ int j;
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
+ for (j = 0; j < 18; j++) {
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL1,
+ A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(j));
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_VBIF_TEST_BUS_OUT));
+ }
+ }
+
+ for (i = 0; i < A5XX_NUM_XIN_BLOCKS; i++) {
+ int j;
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
+ for (j = 0; j < 12; j++) {
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL1,
+ A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL(j));
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+ REG_A5XX_VBIF_TEST_BUS_OUT));
+ }
+ }
+
+}
+
+static void a5xx_snapshot_debugbus_block(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot, u32 block, u32 count)
+{
+ int i;
+ struct msm_snapshot_debugbus header = {
+ .id = block,
+ .count = count * 2, /* Each value is 2 dwords */
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUGBUS,
+ (count * 2)))
+ return;
+
+ for (i = 0; i < count; i++) {
+ u32 reg = A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX(i) |
+ A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_A, reg);
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_B, reg);
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_C, reg);
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_D, reg);
+
+ /* Each debugbus entry is a quad word */
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+ REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2));
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1));
+ }
+}
+
+static void a5xx_snapshot_debugbus(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_CNTLM,
+ A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE(0xF));
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_debugbus_blocks); i++)
+ a5xx_snapshot_debugbus_block(gpu, snapshot,
+ a5xx_debugbus_blocks[i].id,
+ a5xx_debugbus_blocks[i].count);
+
+ /* VBIF is special and not in a good way */
+ a5xx_snapshot_debugbus_vbif(gpu, snapshot);
+}
+
+static void a5xx_snapshot_cp_merciu(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ unsigned int i;
+ struct msm_snapshot_debug header = {
+ .type = SNAPSHOT_DEBUG_CP_MERCIU,
+ .size = 64 << 1, /* Data size is 2 dwords per entry */
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 64 << 1))
+ return;
+
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_DBG_ADDR, 0);
+ for (i = 0; i < 64; i++) {
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_MERCIU_DBG_DATA_1));
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_MERCIU_DBG_DATA_2));
+ }
+}
+
+static void a5xx_snapshot_cp_roq(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+ struct msm_snapshot_debug header = {
+ .type = SNAPSHOT_DEBUG_CP_ROQ,
+ .size = 512,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 512))
+ return;
+
+ gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0);
+ for (i = 0; i < 512; i++)
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA));
+}
+
+static void a5xx_snapshot_cp_meq(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+ struct msm_snapshot_debug header = {
+ .type = SNAPSHOT_DEBUG_CP_MEQ,
+ .size = 64,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 64))
+ return;
+
+ gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0);
+ for (i = 0; i < 64; i++)
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
+}
+
+static void a5xx_snapshot_indexed_registers(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot, u32 addr, u32 data,
+ u32 count)
+{
+ unsigned int i;
+ struct msm_snapshot_indexed_regs header = {
+ .index_reg = addr,
+ .data_reg = data,
+ .start = 0,
+ .count = count,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_INDEXED_REGS,
+ count))
+ return;
+
+ for (i = 0; i < count; i++) {
+ gpu_write(gpu, addr, i);
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu, data));
+ }
+}
+
+int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ struct crashdump crashdump = { 0 };
+
+ if (!crashdump_init(gpu, &crashdump))
+ snapshot->priv = &crashdump;
+
+ /* To accurately read all registers, disable hardware clock gating */
+ a5xx_set_hwcg(gpu, false);
+
+ /* Kick it up to the generic level */
+ adreno_snapshot(gpu, snapshot);
+
+ /* Read the GPU registers */
+ a5xx_snapshot_registers(gpu, snapshot);
+
+ /* Read the shader memory banks */
+ a5xx_snapshot_shader_memory(gpu, snapshot);
+
+ /* Read the debugbus registers */
+ a5xx_snapshot_debugbus(gpu, snapshot);
+
+ /* PFP data */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_PFP_STAT_ADDR, REG_A5XX_CP_PFP_STAT_DATA, 36);
+
+ /* ME data */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_ME_STAT_ADDR, REG_A5XX_CP_ME_STAT_DATA, 29);
+
+ /* DRAW_STATE data */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_DRAW_STATE_ADDR, REG_A5XX_CP_DRAW_STATE_DATA,
+ 256);
+
+ /* ME cache */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_ME_UCODE_DBG_ADDR, REG_A5XX_CP_ME_UCODE_DBG_DATA,
+ 0x53F);
+
+ /* PFP cache */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_PFP_UCODE_DBG_ADDR, REG_A5XX_CP_PFP_UCODE_DBG_DATA,
+ 0x53F);
+
+ /* ME queue */
+ a5xx_snapshot_cp_meq(gpu, snapshot);
+
+ /* CP ROQ */
+ a5xx_snapshot_cp_roq(gpu, snapshot);
+
+ /* CP MERCIU */
+ a5xx_snapshot_cp_merciu(gpu, snapshot);
+
+ crashdump_destroy(gpu, &crashdump);
+ snapshot->priv = NULL;
+
+ /* Re-enable HWCG */
+ a5xx_set_hwcg(gpu, true);
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index e81481d1b7df..1cf84479e447 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,14 +8,15 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -127,11 +128,13 @@ enum a3xx_rop_code {
ROP_COPY_INVERTED = 3,
ROP_AND_REVERSE = 4,
ROP_INVERT = 5,
+ ROP_XOR = 6,
ROP_NAND = 7,
ROP_AND = 8,
ROP_EQUIV = 9,
ROP_NOOP = 10,
ROP_OR_INVERTED = 11,
+ ROP_COPY = 12,
ROP_OR_REVERSE = 13,
ROP_OR = 14,
ROP_SET = 15,
@@ -172,6 +175,14 @@ enum a3xx_color_swap {
XYZW = 3,
};
+enum a3xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_DST_MINUS_SRC = 2,
+ BLEND_MIN_DST_SRC = 3,
+ BLEND_MAX_DST_SRC = 4,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 5127b75dbf40..a498a60cd52d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -27,6 +27,7 @@ module_param_named(hang_debug, hang_debug, bool, 0600);
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
static const struct adreno_info gpulist[] = {
{
@@ -77,6 +78,22 @@ static const struct adreno_info gpulist[] = {
.pfpfw = "a420_pfp.fw",
.gmem = (SZ_1M + SZ_512K),
.init = a4xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(5, 3, 0, ANY_ID),
+ .revn = 530,
+ .name = "A530",
+ .pm4fw = "a530_pm4.fw",
+ .pfpfw = "a530_pfp.fw",
+ .gmem = SZ_1M,
+ .init = a5xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(5, 4, 0, ANY_ID),
+ .revn = 540,
+ .name = "A540",
+ .pm4fw = "a530_pm4.fw",
+ .pfpfw = "a530_pfp.fw",
+ .gmem = SZ_1M,
+ .init = a5xx_gpu_init,
},
};
@@ -86,6 +103,8 @@ MODULE_FIRMWARE("a330_pm4.fw");
MODULE_FIRMWARE("a330_pfp.fw");
MODULE_FIRMWARE("a420_pm4.fw");
MODULE_FIRMWARE("a420_pfp.fw");
+MODULE_FIRMWARE("a530_fm4.fw");
+MODULE_FIRMWARE("a530_pfp.fw");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
@@ -148,12 +167,16 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
mutex_lock(&dev->struct_mutex);
gpu->funcs->pm_resume(gpu);
mutex_unlock(&dev->struct_mutex);
+
+ disable_irq(gpu->irq);
+
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
gpu->funcs->destroy(gpu);
gpu = NULL;
} else {
+ enable_irq(gpu->irq);
/* give inactive pm a chance to kick in: */
msm_gpu_retire(gpu);
}
@@ -172,11 +195,16 @@ static void set_gpu_pdev(struct drm_device *dev,
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
- struct device_node *child, *node = dev->of_node;
- u32 val;
+ uint32_t val = 0;
int ret;
- ret = of_property_read_u32(node, "qcom,chipid", &val);
+ /*
+ * Read the chip ID from the device tree at bind time - we use this
+ * information to load the correct functions. All the rest of the
+ * (extensive) device tree probing should happen in the GPU specific
+ * code
+ */
+ ret = of_property_read_u32(dev->of_node, "qcom,chipid", &val);
if (ret) {
dev_err(dev, "could not find chipid: %d\n", ret);
return ret;
@@ -185,29 +213,6 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
config.rev = ADRENO_REV((val >> 24) & 0xff,
(val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
- /* find clock rates: */
- config.fast_rate = 0;
- config.slow_rate = ~0;
- for_each_child_of_node(node, child) {
- if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
- struct device_node *pwrlvl;
- for_each_child_of_node(child, pwrlvl) {
- ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
- if (ret) {
- dev_err(dev, "could not find gpu-freq: %d\n", ret);
- return ret;
- }
- config.fast_rate = max(config.fast_rate, val);
- config.slow_rate = min(config.slow_rate, val);
- }
- }
- }
-
- if (!config.fast_rate) {
- dev_err(dev, "could not find clk rates\n");
- return -ENXIO;
- }
-
dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
return 0;
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 4b518bfdeee4..f1883825354e 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -2,7 +2,7 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -17,12 +17,12 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/utsname.h>
#include "adreno_gpu.h"
+#include "msm_snapshot.h"
#include "msm_gem.h"
#include "msm_mmu.h"
-#define RB_SIZE SZ_32K
-#define RB_BLKSIZE 16
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
@@ -35,6 +35,9 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
case MSM_PARAM_GMEM_SIZE:
*value = adreno_gpu->gmem;
return 0;
+ case MSM_PARAM_GMEM_BASE:
+ *value = 0x100000;
+ return 0;
case MSM_PARAM_CHIP_ID:
*value = adreno_gpu->rev.patchid |
(adreno_gpu->rev.minor << 8) |
@@ -42,7 +45,7 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
(adreno_gpu->rev.core << 24);
return 0;
case MSM_PARAM_MAX_FREQ:
- *value = adreno_gpu->base.fast_rate;
+ *value = gpu->gpufreq[gpu->active_level];
return 0;
case MSM_PARAM_TIMESTAMP:
if (adreno_gpu->funcs->get_timestamp)
@@ -54,91 +57,125 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
}
}
-#define rbmemptr(adreno_gpu, member) \
- ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
-
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- int ret;
+ int i;
DBG("%s", gpu->name);
- ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
- if (ret) {
- gpu->rb_iova = 0;
- dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
- return ret;
+ for (i = 0; i < gpu->nr_rings; i++) {
+ int ret = msm_gem_get_iova(gpu->rb[i]->bo, gpu->aspace,
+ &gpu->rb[i]->iova);
+ if (ret) {
+ gpu->rb[i]->iova = 0;
+ dev_err(gpu->dev->dev,
+ "could not map ringbuffer %d: %d\n", i, ret);
+ return ret;
+ }
}
- /* Setup REG_CP_RB_CNTL: */
+ /*
+ * Setup REG_CP_RB_CNTL. The same value is used across targets (with
+ * the excpetion of A430 that disables the RPTR shadow) - the cacluation
+ * for the ringbuffer size and block size is moved to msm_gpu.h for the
+ * pre-processor to deal with and the A430 variant is ORed in here
+ */
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
- /* size is log2(quad-words): */
- AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
- AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)) |
- (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
+ MSM_GPU_RB_CNTL_DEFAULT |
+ (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
- /* Setup ringbuffer address: */
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
+ /* Setup ringbuffer address - use ringbuffer[0] for GPU init */
+ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
- if (!adreno_is_a430(adreno_gpu))
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
- rbmemptr(adreno_gpu, rptr));
+ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(adreno_gpu, 0, rptr));
return 0;
}
-static uint32_t get_wptr(struct msm_ringbuffer *ring)
+/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
+static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
+ struct msm_ringbuffer *ring)
{
- return ring->cur - ring->start;
+ if (adreno_is_a430(adreno_gpu)) {
+ /*
+ * If index is anything but 0 this will probably break horribly,
+ * but I think that we have enough infrastructure in place to
+ * ensure that it won't be. If not then this is why your
+ * a430 stopped working.
+ */
+ return adreno_gpu->memptrs->rptr[ring->id] = adreno_gpu_read(
+ adreno_gpu, REG_ADRENO_CP_RB_RPTR);
+ } else
+ return adreno_gpu->memptrs->rptr[ring->id];
}
-/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
-static uint32_t get_rptr(struct adreno_gpu *adreno_gpu)
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
{
- if (adreno_is_a430(adreno_gpu))
- return adreno_gpu->memptrs->rptr = adreno_gpu_read(
- adreno_gpu, REG_ADRENO_CP_RB_RPTR);
- else
- return adreno_gpu->memptrs->rptr;
+ return gpu->rb[0];
+}
+
+uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring)
+{
+ if (!ring)
+ return 0;
+
+ return ring->submitted_fence;
}
-uint32_t adreno_last_fence(struct msm_gpu *gpu)
+uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- return adreno_gpu->memptrs->fence;
+
+ if (!ring)
+ return 0;
+
+ return adreno_gpu->memptrs->fence[ring->id];
}
void adreno_recover(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *dev = gpu->dev;
- int ret;
+ struct msm_ringbuffer *ring;
+ int ret, i;
gpu->funcs->pm_suspend(gpu);
- /* reset ringbuffer: */
- gpu->rb->cur = gpu->rb->start;
+ /* reset ringbuffer(s): */
- /* reset completed fence seqno, just discard anything pending: */
- adreno_gpu->memptrs->fence = gpu->submitted_fence;
- adreno_gpu->memptrs->rptr = 0;
- adreno_gpu->memptrs->wptr = 0;
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
+
+ /* No need for a lock here, nobody else is peeking in */
+ ring->cur = ring->start;
+ ring->next = ring->start;
+
+ /* reset completed fence seqno, discard anything pending: */
+ adreno_gpu->memptrs->fence[ring->id] =
+ adreno_submitted_fence(gpu, ring);
+ adreno_gpu->memptrs->rptr[ring->id] = 0;
+ }
gpu->funcs->pm_resume(gpu);
+
+ disable_irq(gpu->irq);
ret = gpu->funcs->hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
+ enable_irq(gpu->irq);
}
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx)
+int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct msm_drm_private *priv = gpu->dev->dev_private;
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[submit->ring];
unsigned i, ibs = 0;
for (i = 0; i < submit->nr_cmds; i++) {
@@ -147,13 +184,11 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
/* ignore IB-targets */
break;
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- /* ignore if there has not been a ctx switch: */
- if (priv->lastctx == ctx)
break;
case MSM_SUBMIT_CMD_BUF:
OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
- OUT_RING(ring, submit->cmd[i].iova);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
ibs++;
break;
@@ -184,7 +219,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
- OUT_RING(ring, rbmemptr(adreno_gpu, fence));
+ OUT_RING(ring, rbmemptr(adreno_gpu, ring->id, fence));
OUT_RING(ring, submit->fence);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
@@ -211,15 +246,25 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
#endif
- gpu->funcs->flush(gpu);
+ gpu->funcs->flush(gpu, ring);
return 0;
}
-void adreno_flush(struct msm_gpu *gpu)
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t wptr;
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /*
+ * Mask the wptr value that we calculate to fit in the HW range. This is
+ * to account for the possibility that the last command fit exactly into
+ * the ringbuffer and rb->next hasn't wrapped to zero yet
+ */
+ wptr = get_wptr(ring);
/* ensure writes to ringbuffer have hit system memory: */
mb();
@@ -227,17 +272,19 @@ void adreno_flush(struct msm_gpu *gpu)
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
}
-bool adreno_idle(struct msm_gpu *gpu)
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t wptr = get_wptr(ring);
/* wait for CP to drain ringbuffer: */
- if (!spin_until(get_rptr(adreno_gpu) == wptr))
+ if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
return true;
/* TODO maybe we need to reset GPU here to recover from hang? */
- DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+ DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
+ gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
+
return false;
}
@@ -245,6 +292,7 @@ bool adreno_idle(struct msm_gpu *gpu)
void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring;
int i;
seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
@@ -252,11 +300,18 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->submitted_fence);
- seq_printf(m, "rptr: %d\n", get_rptr(adreno_gpu));
- seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
- seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
+
+ seq_printf(m, "rb %d: fence: %d/%d\n", i,
+ adreno_last_fence(gpu, ring),
+ adreno_submitted_fence(gpu, ring));
+
+ seq_printf(m, " rptr: %d\n",
+ get_rptr(adreno_gpu, ring));
+ seq_printf(m, "rb wptr: %d\n", get_wptr(ring));
+ }
gpu->funcs->pm_resume(gpu);
@@ -285,22 +340,29 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
*/
void adreno_dump_info(struct msm_gpu *gpu)
{
+ struct drm_device *dev = gpu->dev;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring;
int i;
- printk("revision: %d (%d.%d.%d.%d)\n",
+ dev_err(dev->dev, "revision: %d (%d.%d.%d.%d)\n",
adreno_gpu->info->revn, adreno_gpu->rev.core,
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->submitted_fence);
- printk("rptr: %d\n", get_rptr(adreno_gpu));
- printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
- printk("rb wptr: %d\n", get_wptr(gpu->rb));
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
+
+ dev_err(dev->dev, " ring %d: fence %d/%d rptr/wptr %x/%x\n", i,
+ adreno_last_fence(gpu, ring),
+ adreno_submitted_fence(gpu, ring),
+ get_rptr(adreno_gpu, ring),
+ get_wptr(ring));
+ }
for (i = 0; i < 8; i++) {
- printk("CP_SCRATCH_REG%d: %u\n", i,
+ pr_err("CP_SCRATCH_REG%d: %u\n", i,
gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
}
}
@@ -325,30 +387,146 @@ void adreno_dump(struct msm_gpu *gpu)
}
}
-static uint32_t ring_freewords(struct msm_gpu *gpu)
+static uint32_t ring_freewords(struct msm_ringbuffer *ring)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t size = gpu->rb->size / 4;
- uint32_t wptr = get_wptr(gpu->rb);
- uint32_t rptr = get_rptr(adreno_gpu);
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
+ uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
+ /* Use ring->next to calculate free size */
+ uint32_t wptr = ring->next - ring->start;
+ uint32_t rptr = get_rptr(adreno_gpu, ring);
return (rptr + (size - 1) - wptr) % size;
}
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
{
- if (spin_until(ring_freewords(gpu) >= ndwords))
- DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
+ if (spin_until(ring_freewords(ring) >= ndwords))
+ DRM_ERROR("%s: timeout waiting for space in ringubffer %d\n",
+ ring->gpu->name, ring->id);
}
static const char *iommu_ports[] = {
- "gfx3d_user", "gfx3d_priv",
- "gfx3d1_user", "gfx3d1_priv",
+ "gfx3d_user",
+};
+
+/* Read the set of powerlevels */
+static int _adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *node)
+{
+ struct device_node *child;
+
+ gpu->active_level = 1;
+
+ /* The device tree will tell us the best clock to initialize with */
+ of_property_read_u32(node, "qcom,initial-pwrlevel", &gpu->active_level);
+
+ if (gpu->active_level >= ARRAY_SIZE(gpu->gpufreq))
+ gpu->active_level = 1;
+
+ for_each_child_of_node(node, child) {
+ unsigned int index;
+
+ if (of_property_read_u32(child, "reg", &index))
+ return -EINVAL;
+
+ if (index >= ARRAY_SIZE(gpu->gpufreq))
+ continue;
+
+ gpu->nr_pwrlevels = max(gpu->nr_pwrlevels, index + 1);
+
+ of_property_read_u32(child, "qcom,gpu-freq",
+ &gpu->gpufreq[index]);
+ of_property_read_u32(child, "qcom,bus-freq",
+ &gpu->busfreq[index]);
+ }
+
+ DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
+ gpu->gpufreq[gpu->active_level],
+ gpu->gpufreq[gpu->nr_pwrlevels - 1],
+ gpu->busfreq[gpu->active_level]);
+
+ return 0;
+}
+
+/*
+ * Escape valve for targets that don't define the binning nodes. Get the
+ * first powerlevel node and parse it
+ */
+static int adreno_get_legacy_pwrlevels(struct msm_gpu *gpu,
+ struct device_node *parent)
+{
+ struct device_node *child;
+
+ child = of_find_node_by_name(parent, "qcom,gpu-pwrlevels");
+ if (child)
+ return _adreno_get_pwrlevels(gpu, child);
+
+ dev_err(gpu->dev->dev, "Unable to parse any powerlevels\n");
+ return -EINVAL;
+}
+
+/* Get the powerlevels for the target */
+static int adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *parent)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct device_node *node, *child;
+
+ /* See if the target has defined a number of power bins */
+ node = of_find_node_by_name(parent, "qcom,gpu-pwrlevel-bins");
+ if (!node) {
+ /* If not look for the qcom,gpu-pwrlevels node */
+ return adreno_get_legacy_pwrlevels(gpu, parent);
+ }
+
+ for_each_child_of_node(node, child) {
+ unsigned int bin;
+
+ if (of_property_read_u32(child, "qcom,speed-bin", &bin))
+ continue;
+
+ /*
+ * If the bin matches the bin specified by the fuses, then we
+ * have a winner - parse it
+ */
+ if (adreno_gpu->speed_bin == bin)
+ return _adreno_get_pwrlevels(gpu, child);
+ }
+
+ return -ENODEV;
+}
+
+static const struct {
+ const char *str;
+ uint32_t flag;
+} quirks[] = {
+ { "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI },
+ { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
};
+/* Parse the statistics from the device tree */
+static int adreno_of_parse(struct platform_device *pdev, struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct device_node *node = pdev->dev.of_node;
+ int i, ret;
+
+ /* Probe the powerlevels */
+ ret = adreno_get_pwrlevels(gpu, node);
+ if (ret)
+ return ret;
+
+ /* Check to see if any quirks were specified in the device tree */
+ for (i = 0; i < ARRAY_SIZE(quirks); i++)
+ if (of_property_read_bool(node, quirks[i].str))
+ adreno_gpu->quirks |= quirks[i].flag;
+
+ return 0;
+}
+
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
- struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
+ struct adreno_gpu *adreno_gpu,
+ const struct adreno_gpu_funcs *funcs, int nr_rings)
{
struct adreno_platform_config *config = pdev->dev.platform_data;
+ struct msm_gpu_config adreno_gpu_config = { 0 };
struct msm_gpu *gpu = &adreno_gpu->base;
struct msm_mmu *mmu;
int ret;
@@ -359,19 +537,29 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->revn = adreno_gpu->info->revn;
adreno_gpu->rev = config->rev;
- gpu->fast_rate = config->fast_rate;
- gpu->slow_rate = config->slow_rate;
- gpu->bus_freq = config->bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- gpu->bus_scale_table = config->bus_scale_table;
-#endif
+ /* Get the rest of the target configuration from the device tree */
+ adreno_of_parse(pdev, gpu);
- DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
- gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
+ adreno_gpu_config.ioname = "kgsl_3d0_reg_memory";
+ adreno_gpu_config.irqname = "kgsl_3d0_irq";
+ adreno_gpu_config.nr_rings = nr_rings;
+
+ adreno_gpu_config.va_start = SZ_16M;
+ adreno_gpu_config.va_end = 0xffffffff;
+
+ if (adreno_gpu->revn >= 500) {
+ /* 5XX targets use a 64 bit region */
+ adreno_gpu_config.va_start = 0x800000000;
+ adreno_gpu_config.va_end = 0x8ffffffff;
+ } else {
+ adreno_gpu_config.va_start = 0x300000;
+ adreno_gpu_config.va_end = 0xffffffff;
+ }
+
+ adreno_gpu_config.nr_rings = nr_rings;
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
- adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
- RB_SIZE);
+ adreno_gpu->info->name, &adreno_gpu_config);
if (ret)
return ret;
@@ -414,7 +602,7 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
return -ENOMEM;
}
- ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
+ ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->aspace,
&adreno_gpu->memptrs_iova);
if (ret) {
dev_err(drm->dev, "could not map memptrs: %d\n", ret);
@@ -430,7 +618,7 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
if (gpu->memptrs_bo) {
if (gpu->memptrs_iova)
- msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
+ msm_gem_put_iova(gpu->memptrs_bo, aspace);
drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
release_firmware(gpu->pm4);
@@ -439,8 +627,85 @@ void adreno_gpu_cleanup(struct adreno_gpu *gpu)
msm_gpu_cleanup(&gpu->base);
if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
- msm_gem_address_space_destroy(aspace);
+ aspace->mmu->funcs->detach(aspace->mmu);
+ msm_gem_address_space_put(aspace);
+ }
+}
+
+static void adreno_snapshot_os(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct msm_snapshot_linux header;
+
+ memset(&header, 0, sizeof(header));
+
+ header.osid = SNAPSHOT_OS_LINUX_V3;
+ strlcpy(header.release, utsname()->release, sizeof(header.release));
+ strlcpy(header.version, utsname()->version, sizeof(header.version));
+
+ header.seconds = get_seconds();
+ header.ctxtcount = 0;
+
+ SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_OS, 0);
+}
+
+static void adreno_snapshot_ringbuffer(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_snapshot_ringbuffer header;
+ unsigned int i, end = 0;
+ unsigned int *data = ring->start;
+
+ memset(&header, 0, sizeof(header));
+
+ /*
+ * We only want to copy the active contents of each ring, so find the
+ * last valid entry in the ringbuffer
+ */
+ for (i = 0; i < MSM_GPU_RINGBUFFER_SZ >> 2; i++) {
+ if (data[i])
+ end = i;
}
+
+ /* The dump always starts at 0 */
+ header.start = 0;
+ header.end = end;
+
+ /* This is the number of dwords being dumped */
+ header.count = end + 1;
+
+ /* This is the size of the actual ringbuffer */
+ header.rbsize = MSM_GPU_RINGBUFFER_SZ >> 2;
+
+ header.id = ring->id;
+ header.gpuaddr = ring->iova;
+ header.rptr = get_rptr(adreno_gpu, ring);
+ header.wptr = get_wptr(ring);
+ header.timestamp_queued = adreno_submitted_fence(gpu, ring);
+ header.timestamp_retired = adreno_last_fence(gpu, ring);
+
+ /* Write the header even if the ringbuffer data is empty */
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_RB_V2,
+ header.count))
+ return;
+
+ SNAPSHOT_MEMCPY(snapshot, ring->start, header.count * sizeof(u32));
+}
+
+static void adreno_snapshot_ringbuffers(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct msm_ringbuffer *ring;
+ int i;
+
+ /* Write a new section for each ringbuffer */
+ FOR_EACH_RING(gpu, ring, i)
+ adreno_snapshot_ringbuffer(gpu, snapshot, ring);
+}
+
+void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ adreno_snapshot_os(gpu, snapshot);
+ adreno_snapshot_ringbuffers(gpu, snapshot);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index aea39d357dc0..30461115281c 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -2,7 +2,7 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -24,10 +24,17 @@
#include "msm_gpu.h"
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
#include "adreno_common.xml.h"
#include "adreno_pm4.xml.h"
#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
+#define REG_SKIP ~0
+#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
+
/**
* adreno_regs: List of registers that are used in across all
* 3D devices. Each device type has different offset value for the same
@@ -35,73 +42,21 @@
* and are indexed by the enumeration values defined in this enum
*/
enum adreno_regs {
- REG_ADRENO_CP_DEBUG,
- REG_ADRENO_CP_ME_RAM_WADDR,
- REG_ADRENO_CP_ME_RAM_DATA,
- REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_ADRENO_CP_WFI_PEND_CTR,
REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_BASE_HI,
REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI,
REG_ADRENO_CP_RB_RPTR,
REG_ADRENO_CP_RB_WPTR,
- REG_ADRENO_CP_PROTECT_CTRL,
- REG_ADRENO_CP_ME_CNTL,
REG_ADRENO_CP_RB_CNTL,
- REG_ADRENO_CP_IB1_BASE,
- REG_ADRENO_CP_IB1_BUFSZ,
- REG_ADRENO_CP_IB2_BASE,
- REG_ADRENO_CP_IB2_BUFSZ,
- REG_ADRENO_CP_TIMESTAMP,
- REG_ADRENO_CP_ME_RAM_RADDR,
- REG_ADRENO_CP_ROQ_ADDR,
- REG_ADRENO_CP_ROQ_DATA,
- REG_ADRENO_CP_MERCIU_ADDR,
- REG_ADRENO_CP_MERCIU_DATA,
- REG_ADRENO_CP_MERCIU_DATA2,
- REG_ADRENO_CP_MEQ_ADDR,
- REG_ADRENO_CP_MEQ_DATA,
- REG_ADRENO_CP_HW_FAULT,
- REG_ADRENO_CP_PROTECT_STATUS,
- REG_ADRENO_SCRATCH_ADDR,
- REG_ADRENO_SCRATCH_UMSK,
- REG_ADRENO_SCRATCH_REG2,
- REG_ADRENO_RBBM_STATUS,
- REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
- REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_ADRENO_RBBM_INT_0_MASK,
- REG_ADRENO_RBBM_INT_0_STATUS,
- REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_ADRENO_RBBM_PM_OVERRIDE2,
- REG_ADRENO_RBBM_AHB_CMD,
- REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_ADRENO_RBBM_CLOCK_CTL,
- REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
- REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
- REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_ADRENO_VFD_CONTROL_0,
- REG_ADRENO_VFD_INDEX_MAX,
- REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_ADRENO_PA_SC_AA_CONFIG,
- REG_ADRENO_SQ_GPR_MANAGEMENT,
- REG_ADRENO_SQ_INST_STORE_MANAGMENT,
- REG_ADRENO_TP0_CHICKEN,
- REG_ADRENO_RBBM_RBBM_CTL,
- REG_ADRENO_UCHE_INVALIDATE0,
- REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
REG_ADRENO_REGISTER_MAX,
};
+enum adreno_quirks {
+ ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
+ ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
+};
+
struct adreno_rev {
uint8_t core;
uint8_t major;
@@ -128,10 +83,20 @@ struct adreno_info {
const struct adreno_info *adreno_info(struct adreno_rev rev);
+#define _sizeof(member) \
+ sizeof(((struct adreno_rbmemptrs *) 0)->member[0])
+
+#define _base(adreno_gpu, member) \
+ ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
+
+#define rbmemptr(adreno_gpu, index, member) \
+ (_base((adreno_gpu), member) + ((index) * _sizeof(member)))
+
struct adreno_rbmemptrs {
- volatile uint32_t rptr;
- volatile uint32_t wptr;
- volatile uint32_t fence;
+ volatile uint32_t rptr[MSM_GPU_MAX_RINGS];
+ volatile uint32_t fence[MSM_GPU_MAX_RINGS];
+ volatile uint64_t ttbr0[MSM_GPU_MAX_RINGS];
+ volatile unsigned int contextidr[MSM_GPU_MAX_RINGS];
};
struct adreno_gpu {
@@ -153,7 +118,7 @@ struct adreno_gpu {
// different for z180..
struct adreno_rbmemptrs *memptrs;
struct drm_gem_object *memptrs_bo;
- uint32_t memptrs_iova;
+ uint64_t memptrs_iova;
/*
* Register offsets are different between some GPUs.
@@ -161,16 +126,15 @@ struct adreno_gpu {
* code (a3xx_gpu.c) and stored in this common location.
*/
const unsigned int *reg_offsets;
+
+ uint32_t quirks;
+ uint32_t speed_bin;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
/* platform config data (ie. from DT, or pdata) */
struct adreno_platform_config {
struct adreno_rev rev;
- uint32_t fast_rate, slow_rate, bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- struct msm_bus_scale_pdata *bus_scale_table;
-#endif
};
#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -187,6 +151,9 @@ struct adreno_platform_config {
__ret; \
})
+#define GPU_OF_NODE(_g) \
+ (((struct msm_drm_private *) \
+ ((_g)->dev->dev_private))->gpu_pdev->dev.of_node)
static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
{
@@ -234,32 +201,46 @@ static inline int adreno_is_a430(struct adreno_gpu *gpu)
return gpu->revn == 430;
}
+static inline int adreno_is_a530(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 530;
+}
+
+static inline int adreno_is_a540(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 540;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu);
-uint32_t adreno_last_fence(struct msm_gpu *gpu);
+uint32_t adreno_last_fence(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring);
void adreno_recover(struct msm_gpu *gpu);
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx);
-void adreno_flush(struct msm_gpu *gpu);
-bool adreno_idle(struct msm_gpu *gpu);
+int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
#endif
void adreno_dump_info(struct msm_gpu *gpu);
void adreno_dump(struct msm_gpu *gpu);
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
- struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs);
+ struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+ int nr_rings);
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
+void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
/* ringbuffer helpers (the parts that are adreno specific) */
static inline void
OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt+1);
+ adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
}
@@ -267,19 +248,49 @@ OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
static inline void
OUT_PKT2(struct msm_ringbuffer *ring)
{
- adreno_wait_ring(ring->gpu, 1);
+ adreno_wait_ring(ring, 1);
OUT_RING(ring, CP_TYPE2_PKT);
}
static inline void
OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt+1);
+ adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
}
+static inline u32 PM4_PARITY(u32 val)
+{
+ return (0x9669 >> (0xF & (val ^
+ (val >> 4) ^ (val >> 8) ^ (val >> 12) ^
+ (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
+ (val >> 28)))) & 1;
+}
+
+/* Maximum number of values that can be executed for one opcode */
+#define TYPE4_MAX_PAYLOAD 127
+
+#define PKT4(_reg, _cnt) \
+ (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
+ (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
+
+static inline void
+OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt + 1);
+ OUT_RING(ring, PKT4(regindx, cnt));
+}
+
+static inline void
+OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt + 1);
+ OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
+ ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
+}
+
/*
- * adreno_checkreg_off() - Checks the validity of a register enum
+ * adreno_reg_check() - Checks the validity of a register enum
* @gpu: Pointer to struct adreno_gpu
* @offset_name: The register enum that is checked
*/
@@ -290,6 +301,16 @@ static inline bool adreno_reg_check(struct adreno_gpu *gpu,
!gpu->reg_offsets[offset_name]) {
BUG();
}
+
+ /*
+ * REG_SKIP is a special value that tell us that the register in
+ * question isn't implemented on target but don't trigger a BUG(). This
+ * is used to cleanly implement adreno_gpu_write64() and
+ * adreno_gpu_read64() in a generic fashion
+ */
+ if (gpu->reg_offsets[offset_name] == REG_SKIP)
+ return false;
+
return true;
}
@@ -311,4 +332,40 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
gpu_write(&gpu->base, reg - 1, data);
}
+static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
+ enum adreno_regs lo, enum adreno_regs hi, u64 data)
+{
+ adreno_gpu_write(gpu, lo, lower_32_bits(data));
+ adreno_gpu_write(gpu, hi, upper_32_bits(data));
+}
+
+static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
+{
+ return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
+}
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ *
+ * The register base needs to be a multiple of the length. If it is not, the
+ * hardware will quietly mask off the bits for you and shift the size. For
+ * example, if you intend the protection to start at 0x07 for a length of 4
+ * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
+ * expose registers you intended to protect!
+ */
+#define ADRENO_PROTECT_RW(_reg, _len) \
+ ((1 << 30) | (1 << 29) | \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define ADRENO_PROTECT_RDONLY(_reg, _len) \
+ ((1 << 29) \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index d7477ff867c9..9911a181f9c2 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,14 +8,15 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 11518 bytes, from 2016-02-10 21:03:25)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 16166 bytes, from 2016-02-11 21:20:31)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83967 bytes, from 2016-02-10 17:07:21)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 109916 bytes, from 2016-02-20 18:44:48)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
@@ -58,6 +59,7 @@ enum vgt_event_type {
RST_PIX_CNT = 13,
RST_VTX_CNT = 14,
TILE_FLUSH = 15,
+ STAT_EVENT = 16,
CACHE_FLUSH_AND_INV_TS_EVENT = 20,
ZPASS_DONE = 21,
CACHE_FLUSH_AND_INV_EVENT = 22,
@@ -82,7 +84,6 @@ enum pc_di_primtype {
DI_PT_LINESTRIP_ADJ = 11,
DI_PT_TRI_ADJ = 12,
DI_PT_TRISTRIP_ADJ = 13,
- DI_PT_PATCHES = 34,
};
enum pc_di_src_sel {
@@ -110,11 +111,15 @@ enum adreno_pm4_packet_type {
CP_TYPE1_PKT = 0x40000000,
CP_TYPE2_PKT = 0x80000000,
CP_TYPE3_PKT = 0xc0000000,
+ CP_TYPE4_PKT = 0x40000000,
+ CP_TYPE7_PKT = 0x70000000,
};
enum adreno_pm4_type3_packets {
CP_ME_INIT = 72,
CP_NOP = 16,
+ CP_PREEMPT_ENABLE = 28,
+ CP_PREEMPT_TOKEN = 30,
CP_INDIRECT_BUFFER = 63,
CP_INDIRECT_BUFFER_PFD = 55,
CP_WAIT_FOR_IDLE = 38,
@@ -163,6 +168,7 @@ enum adreno_pm4_type3_packets {
CP_TEST_TWO_MEMS = 113,
CP_REG_WR_NO_CTXT = 120,
CP_RECORD_PFP_TIMESTAMP = 17,
+ CP_SET_SECURE_MODE = 102,
CP_WAIT_FOR_ME = 19,
CP_SET_DRAW_STATE = 67,
CP_DRAW_INDX_OFFSET = 56,
@@ -178,6 +184,21 @@ enum adreno_pm4_type3_packets {
CP_WAIT_MEM_WRITES = 18,
CP_COND_REG_EXEC = 71,
CP_MEM_TO_REG = 66,
+ CP_EXEC_CS = 51,
+ CP_PERFCOUNTER_ACTION = 80,
+ CP_SMMU_TABLE_UPDATE = 83,
+ CP_CONTEXT_REG_BUNCH = 92,
+ CP_YIELD_ENABLE = 28,
+ CP_SKIP_IB2_ENABLE_GLOBAL = 29,
+ CP_SKIP_IB2_ENABLE_LOCAL = 35,
+ CP_SET_SUBDRAW_SIZE = 53,
+ CP_SET_VISIBILITY_OVERRIDE = 100,
+ CP_PREEMPT_ENABLE_GLOBAL = 105,
+ CP_PREEMPT_ENABLE_LOCAL = 106,
+ CP_CONTEXT_SWITCH_YIELD = 107,
+ CP_SET_RENDER_MODE = 108,
+ CP_COMPUTE_CHECKPOINT = 110,
+ CP_MEM_TO_MEM = 115,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -196,6 +217,7 @@ enum adreno_state_block {
SB_VERT_SHADER = 4,
SB_GEOM_SHADER = 5,
SB_FRAG_SHADER = 6,
+ SB_COMPUTE_SHADER = 7,
};
enum adreno_state_type {
@@ -389,7 +411,12 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel va
{
return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
}
-#define CP_DRAW_INDX_OFFSET_0_TESSELLATE 0x00000100
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000300
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
+}
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10
static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val)
@@ -533,5 +560,78 @@ static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
}
+#define REG_CP_DISPATCH_COMPUTE_0 0x00000000
+
+#define REG_CP_DISPATCH_COMPUTE_1 0x00000001
+#define CP_DISPATCH_COMPUTE_1_X__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_1_X__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_2 0x00000002
+#define CP_DISPATCH_COMPUTE_2_Y__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_2_Y__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_3 0x00000003
+#define CP_DISPATCH_COMPUTE_3_Z__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_3_Z__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_0 0x00000000
+
+#define REG_CP_SET_RENDER_MODE_1 0x00000001
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_2 0x00000002
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_3 0x00000003
+#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010
+
+#define REG_CP_SET_RENDER_MODE_4 0x00000004
+
+#define REG_CP_SET_RENDER_MODE_5 0x00000005
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_6 0x00000006
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_7 0x00000007
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK;
+}
+
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
index b8520aadbc0c..fb9617ce572a 100644
--- a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -1186,7 +1186,7 @@ static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl)
int dsi_ctrl_buffer_init(struct dsi_ctrl *dsi_ctrl)
{
int rc = 0;
- u32 iova = 0;
+ u64 iova = 0;
dsi_ctrl->tx_cmd_buf = msm_gem_new(dsi_ctrl->drm_dev,
SZ_4K,
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 5f5a3732cdf6..4cb4764e7492 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -89,7 +89,7 @@ int msm_dsi_manager_phy_enable(int id,
u32 *clk_pre, u32 *clk_post);
void msm_dsi_manager_phy_disable(int id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
-bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u64 iova, u32 len);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
@@ -143,7 +143,7 @@ int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg);
void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
- u32 iova, u32 len);
+ u64 iova, u32 len);
int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 4c49868efcda..4580a6e3c877 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -836,7 +836,7 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
{
struct drm_device *dev = msm_host->dev;
int ret;
- u32 iova;
+ u64 iova;
mutex_lock(&dev->struct_mutex);
msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
@@ -974,7 +974,7 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
{
int ret;
- u32 iova;
+ uint64_t iova;
bool triggered;
ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
@@ -1750,11 +1750,12 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
return ret;
}
-void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u64 iova, u32 len)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
+ /* FIXME: Verify that the iova < 32 bits? */
+ dsi_write(msm_host, REG_DSI_DMA_BASE, lower_32_bits(iova));
dsi_write(msm_host, REG_DSI_DMA_LEN, len);
dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 0455ff75074a..2091b748abbb 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -774,7 +774,7 @@ restore_host0:
return ret;
}
-bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u64 iova, u32 len)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
new file mode 100644
index 000000000000..347b78886b24
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -0,0 +1,1352 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "sde-hdmi:[%s] " fmt, __func__
+
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+
+#include "sde_kms.h"
+#include "sde_connector.h"
+#include "msm_drv.h"
+#include "sde_hdmi.h"
+
+static DEFINE_MUTEX(sde_hdmi_list_lock);
+static LIST_HEAD(sde_hdmi_list);
+
+static const struct of_device_id sde_hdmi_dt_match[] = {
+ {.compatible = "qcom,hdmi-display"},
+ {}
+};
+
+static ssize_t _sde_hdmi_debugfs_dump_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char *buf;
+ u32 len = 0;
+
+ if (!display)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(SZ_1K, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += snprintf(buf, SZ_4K, "name = %s\n", display->name);
+
+ if (copy_to_user(buff, buf, len)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+
+ kfree(buf);
+ return len;
+}
+
+
+static const struct file_operations dump_info_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_dump_info_read,
+};
+
+static int _sde_hdmi_debugfs_init(struct sde_hdmi *display)
+{
+ int rc = 0;
+ struct dentry *dir, *dump_file;
+
+ dir = debugfs_create_dir(display->name, NULL);
+ if (!dir) {
+ rc = -ENOMEM;
+ SDE_ERROR("[%s]debugfs create dir failed, rc = %d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ dump_file = debugfs_create_file("dump_info",
+ 0444,
+ dir,
+ display,
+ &dump_info_fops);
+ if (IS_ERR_OR_NULL(dump_file)) {
+ rc = PTR_ERR(dump_file);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ display->root = dir;
+ return rc;
+error_remove_dir:
+ debugfs_remove(dir);
+error:
+ return rc;
+}
+
+static void _sde_hdmi_debugfs_deinit(struct sde_hdmi *display)
+{
+ debugfs_remove(display->root);
+}
+
+static void _sde_hdmi_phy_reset(struct hdmi *hdmi)
+{
+ unsigned int val;
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+}
+
+static int _sde_hdmi_gpio_config(struct hdmi *hdmi, bool on)
+{
+ const struct hdmi_platform_config *config = hdmi->config;
+ int ret;
+
+ if (on) {
+ if (config->ddc_clk_gpio != -1) {
+ ret = gpio_request(config->ddc_clk_gpio,
+ "HDMI_DDC_CLK");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_CLK", config->ddc_clk_gpio,
+ ret);
+ goto error_ddc_clk_gpio;
+ }
+ gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
+ }
+
+ if (config->ddc_data_gpio != -1) {
+ ret = gpio_request(config->ddc_data_gpio,
+ "HDMI_DDC_DATA");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_DATA", config->ddc_data_gpio,
+ ret);
+ goto error_ddc_data_gpio;
+ }
+ gpio_set_value_cansleep(config->ddc_data_gpio, 1);
+ }
+
+ ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_HPD", config->hpd_gpio, ret);
+ goto error_hpd_gpio;
+ }
+ gpio_direction_output(config->hpd_gpio, 1);
+ if (config->hpd5v_gpio != -1) {
+ ret = gpio_request(config->hpd5v_gpio, "HDMI_HPD_5V");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_HPD_5V",
+ config->hpd5v_gpio,
+ ret);
+ goto error_hpd5v_gpio;
+ }
+ gpio_set_value_cansleep(config->hpd5v_gpio, 1);
+ }
+
+ if (config->mux_en_gpio != -1) {
+ ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_EN", config->mux_en_gpio,
+ ret);
+ goto error_en_gpio;
+ }
+ gpio_set_value_cansleep(config->mux_en_gpio, 1);
+ }
+
+ if (config->mux_sel_gpio != -1) {
+ ret = gpio_request(config->mux_sel_gpio,
+ "HDMI_MUX_SEL");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_SEL", config->mux_sel_gpio,
+ ret);
+ goto error_sel_gpio;
+ }
+ gpio_set_value_cansleep(config->mux_sel_gpio, 0);
+ }
+
+ if (config->mux_lpm_gpio != -1) {
+ ret = gpio_request(config->mux_lpm_gpio,
+ "HDMI_MUX_LPM");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_LPM",
+ config->mux_lpm_gpio, ret);
+ goto error_lpm_gpio;
+ }
+ gpio_set_value_cansleep(config->mux_lpm_gpio, 1);
+ }
+ SDE_DEBUG("gpio on");
+ } else {
+ if (config->ddc_clk_gpio != -1)
+ gpio_free(config->ddc_clk_gpio);
+
+ if (config->ddc_data_gpio != -1)
+ gpio_free(config->ddc_data_gpio);
+
+ gpio_free(config->hpd_gpio);
+
+ if (config->mux_en_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_en_gpio, 0);
+ gpio_free(config->mux_en_gpio);
+ }
+
+ if (config->mux_sel_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_sel_gpio, 1);
+ gpio_free(config->mux_sel_gpio);
+ }
+
+ if (config->mux_lpm_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_lpm_gpio, 0);
+ gpio_free(config->mux_lpm_gpio);
+ }
+ SDE_DEBUG("gpio off");
+ }
+
+ return 0;
+
+error_lpm_gpio:
+ if (config->mux_sel_gpio != -1)
+ gpio_free(config->mux_sel_gpio);
+error_sel_gpio:
+ if (config->mux_en_gpio != -1)
+ gpio_free(config->mux_en_gpio);
+error_en_gpio:
+ gpio_free(config->hpd5v_gpio);
+error_hpd5v_gpio:
+ gpio_free(config->hpd_gpio);
+error_hpd_gpio:
+ if (config->ddc_data_gpio != -1)
+ gpio_free(config->ddc_data_gpio);
+error_ddc_data_gpio:
+ if (config->ddc_clk_gpio != -1)
+ gpio_free(config->ddc_clk_gpio);
+error_ddc_clk_gpio:
+ return ret;
+}
+
+static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct device *dev = &hdmi->pdev->dev;
+ uint32_t hpd_ctrl;
+ int i, ret;
+ unsigned long flags;
+
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->hpd_regs[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
+ }
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret) {
+ SDE_ERROR("pinctrl state chg failed: %d\n", ret);
+ goto fail;
+ }
+
+ ret = _sde_hdmi_gpio_config(hdmi, true);
+ if (ret) {
+ SDE_ERROR("failed to configure GPIOs: %d\n", ret);
+ goto fail;
+ }
+
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ if (config->hpd_freq && config->hpd_freq[i]) {
+ ret = clk_set_rate(hdmi->hpd_clks[i],
+ config->hpd_freq[i]);
+ if (ret)
+ pr_warn("failed to set clk %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+
+ ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ goto fail;
+ }
+ }
+
+ sde_hdmi_set_mode(hdmi, false);
+ _sde_hdmi_phy_reset(hdmi);
+ sde_hdmi_set_mode(hdmi, true);
+
+ hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
+
+ /* set timeout to 4.1ms (max) for hardware debounce */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
+
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+
+ /* enable HPD events: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ HDMI_HPD_INT_CTRL_INT_CONNECT |
+ HDMI_HPD_INT_CTRL_INT_EN);
+
+ /* Toggle HPD circuit to trigger HPD sense */
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ return 0;
+
+fail:
+ return ret;
+}
+
+static void _sde_hdmi_hdp_disable(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct device *dev = &hdmi->pdev->dev;
+ int i, ret = 0;
+
+ /* Disable HPD interrupt */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
+
+ sde_hdmi_set_mode(hdmi, false);
+
+ for (i = 0; i < config->hpd_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->hpd_clks[i]);
+
+ ret = _sde_hdmi_gpio_config(hdmi, false);
+ if (ret)
+ pr_warn("failed to unconfigure GPIOs: %d\n", ret);
+
+ ret = pinctrl_pm_select_sleep_state(dev);
+ if (ret)
+ pr_warn("pinctrl state chg failed: %d\n", ret);
+
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->hpd_regs[i]);
+ if (ret)
+ pr_warn("failed to disable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ }
+}
+
+static void _sde_hdmi_hotplug_work(struct work_struct *work)
+{
+ struct sde_hdmi *sde_hdmi =
+ container_of(work, struct sde_hdmi, hpd_work);
+ struct drm_connector *connector;
+
+ if (!sde_hdmi || !sde_hdmi->ctrl.ctrl ||
+ !sde_hdmi->ctrl.ctrl->connector) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ sde_hdmi);
+ return;
+ }
+
+ connector = sde_hdmi->ctrl.ctrl->connector;
+
+ if (sde_hdmi->connected)
+ sde_hdmi_get_edid(connector, sde_hdmi);
+ else
+ sde_hdmi_free_edid(sde_hdmi);
+
+ sde_hdmi_notify_clients(connector, sde_hdmi->connected);
+ drm_helper_hpd_irq_event(connector->dev);
+}
+
+static void _sde_hdmi_connector_irq(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ uint32_t hpd_int_status, hpd_int_ctrl;
+
+ /* Process HPD: */
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ hpd_int_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL);
+
+ if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
+ (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
+ sde_hdmi->connected = !!(hpd_int_status &
+ HDMI_HPD_INT_STATUS_CABLE_DETECTED);
+ /* ack & disable (temporarily) HPD events: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ HDMI_HPD_INT_CTRL_INT_ACK);
+
+ DRM_DEBUG("status=%04x, ctrl=%04x", hpd_int_status,
+ hpd_int_ctrl);
+
+ /* detect disconnect if we are connected or visa versa: */
+ hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
+ if (!sde_hdmi->connected)
+ hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
+
+ if (!sde_hdmi->non_pluggable)
+ queue_work(hdmi->workq, &sde_hdmi->hpd_work);
+ }
+}
+
+static irqreturn_t _sde_hdmi_irq(int irq, void *dev_id)
+{
+ struct sde_hdmi *sde_hdmi = dev_id;
+ struct hdmi *hdmi;
+
+ if (!sde_hdmi || !sde_hdmi->ctrl.ctrl) {
+ SDE_ERROR("sde_hdmi=%p or hdmi is NULL\n", sde_hdmi);
+ return IRQ_NONE;
+ }
+ hdmi = sde_hdmi->ctrl.ctrl;
+ /* Process HPD: */
+ _sde_hdmi_connector_irq(sde_hdmi);
+
+ /* Process DDC: */
+ hdmi_i2c_irq(hdmi->i2c);
+
+ /* Process HDCP: */
+ if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
+ hdmi_hdcp_ctrl_irq(hdmi->hdcp_ctrl);
+
+ /* TODO audio.. */
+
+ return IRQ_HANDLED;
+}
+
+static int _sde_hdmi_audio_info_setup(struct platform_device *pdev,
+ struct msm_ext_disp_audio_setup_params *params)
+{
+ int rc = -EPERM;
+ struct sde_hdmi *display = NULL;
+ struct hdmi *hdmi = NULL;
+
+ display = platform_get_drvdata(pdev);
+
+ if (!display || !params) {
+ SDE_ERROR("invalid param(s), display %pK, params %pK\n",
+ display, params);
+ return -ENODEV;
+ }
+
+ hdmi = display->ctrl.ctrl;
+
+ if (hdmi->hdmi_mode)
+ rc = sde_hdmi_audio_on(hdmi, params);
+
+ return rc;
+}
+
+static int _sde_hdmi_get_audio_edid_blk(struct platform_device *pdev,
+ struct msm_ext_disp_audio_edid_blk *blk)
+{
+ struct sde_hdmi *display = platform_get_drvdata(pdev);
+
+ if (!display || !blk) {
+ SDE_ERROR("invalid param(s), display %pK, blk %pK\n",
+ display, blk);
+ return -ENODEV;
+ }
+
+ blk->audio_data_blk = display->edid.audio_data_block;
+ blk->audio_data_blk_size = display->edid.adb_size;
+
+ blk->spk_alloc_data_blk = display->edid.spkr_alloc_data_block;
+ blk->spk_alloc_data_blk_size = display->edid.sadb_size;
+
+ return 0;
+}
+
+static int _sde_hdmi_get_cable_status(struct platform_device *pdev, u32 vote)
+{
+ struct sde_hdmi *display = NULL;
+ struct hdmi *hdmi = NULL;
+
+ display = platform_get_drvdata(pdev);
+
+ if (!display) {
+ SDE_ERROR("invalid param(s), display %pK\n", display);
+ return -ENODEV;
+ }
+
+ hdmi = display->ctrl.ctrl;
+
+ return hdmi->power_on && display->connected;
+}
+
+static int _sde_hdmi_ext_disp_init(struct sde_hdmi *display)
+{
+ int rc = 0;
+ struct device_node *pd_np;
+ const char *phandle = "qcom,msm_ext_disp";
+
+ if (!display) {
+ SDE_ERROR("[%s]Invalid params\n", display->name);
+ return -EINVAL;
+ }
+
+ display->ext_audio_data.type = EXT_DISPLAY_TYPE_HDMI;
+ display->ext_audio_data.pdev = display->pdev;
+ display->ext_audio_data.codec_ops.audio_info_setup =
+ _sde_hdmi_audio_info_setup;
+ display->ext_audio_data.codec_ops.get_audio_edid_blk =
+ _sde_hdmi_get_audio_edid_blk;
+ display->ext_audio_data.codec_ops.cable_status =
+ _sde_hdmi_get_cable_status;
+
+ if (!display->pdev->dev.of_node) {
+ SDE_ERROR("[%s]cannot find sde_hdmi of_node\n", display->name);
+ return -ENODEV;
+ }
+
+ pd_np = of_parse_phandle(display->pdev->dev.of_node, phandle, 0);
+ if (!pd_np) {
+ SDE_ERROR("[%s]cannot find %s device node\n",
+ display->name, phandle);
+ return -ENODEV;
+ }
+
+ display->ext_pdev = of_find_device_by_node(pd_np);
+ if (!display->ext_pdev) {
+ SDE_ERROR("[%s]cannot find %s platform device\n",
+ display->name, phandle);
+ return -ENODEV;
+ }
+
+ rc = msm_ext_disp_register_intf(display->ext_pdev,
+ &display->ext_audio_data);
+ if (rc)
+ SDE_ERROR("[%s]failed to register disp\n", display->name);
+
+ return rc;
+}
+
+void sde_hdmi_notify_clients(struct drm_connector *connector,
+ bool connected)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+ int state = connected ?
+ EXT_DISPLAY_CABLE_CONNECT : EXT_DISPLAY_CABLE_DISCONNECT;
+
+ if (display && display->ext_audio_data.intf_ops.hpd) {
+ struct hdmi *hdmi = display->ctrl.ctrl;
+ u32 flags = MSM_EXT_DISP_HPD_VIDEO;
+
+ if (hdmi->hdmi_mode)
+ flags |= MSM_EXT_DISP_HPD_AUDIO;
+
+ display->ext_audio_data.intf_ops.hpd(display->ext_pdev,
+ display->ext_audio_data.type, state, flags);
+ }
+}
+
+void sde_hdmi_ack_state(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+ if (display) {
+ struct hdmi *hdmi = display->ctrl.ctrl;
+
+ if (hdmi->hdmi_mode && display->ext_audio_data.intf_ops.notify)
+ display->ext_audio_data.intf_ops.notify(
+ display->ext_pdev, status);
+ }
+}
+
+void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
+{
+ uint32_t ctrl = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ if (power_on) {
+ ctrl |= HDMI_CTRL_ENABLE;
+ if (!hdmi->hdmi_mode) {
+ ctrl |= HDMI_CTRL_HDMI;
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ ctrl &= ~HDMI_CTRL_HDMI;
+ } else {
+ ctrl |= HDMI_CTRL_HDMI;
+ }
+ } else {
+ ctrl &= ~HDMI_CTRL_HDMI;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+ DRM_DEBUG("HDMI Core: %s, HDMI_CTRL=0x%08x\n",
+ power_on ? "Enable" : "Disable", ctrl);
+}
+
+int sde_hdmi_get_info(struct msm_display_info *info,
+ void *display)
+{
+ int rc = 0;
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct hdmi *hdmi = hdmi_display->ctrl.ctrl;
+
+ if (!display || !info) {
+ SDE_ERROR("display=%p or info=%p is NULL\n", display, info);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_display->display_lock);
+
+ info->intf_type = DRM_MODE_CONNECTOR_HDMIA;
+ info->num_of_h_tiles = 1;
+ info->h_tile_instance[0] = 0;
+ if (hdmi_display->non_pluggable) {
+ info->capabilities = MSM_DISPLAY_CAP_VID_MODE;
+ hdmi_display->connected = true;
+ hdmi->hdmi_mode = true;
+ } else {
+ info->capabilities = MSM_DISPLAY_CAP_HOT_PLUG |
+ MSM_DISPLAY_CAP_EDID | MSM_DISPLAY_CAP_VID_MODE;
+ }
+ info->is_connected = hdmi_display->connected;
+ info->max_width = 1920;
+ info->max_height = 1080;
+ info->compression = MSM_DISPLAY_COMPRESS_NONE;
+
+ mutex_unlock(&hdmi_display->display_lock);
+ return rc;
+}
+
+u32 sde_hdmi_get_num_of_displays(void)
+{
+ u32 count = 0;
+ struct sde_hdmi *display;
+
+ mutex_lock(&sde_hdmi_list_lock);
+
+ list_for_each_entry(display, &sde_hdmi_list, list)
+ count++;
+
+ mutex_unlock(&sde_hdmi_list_lock);
+ return count;
+}
+
+int sde_hdmi_get_displays(void **display_array, u32 max_display_count)
+{
+ struct sde_hdmi *display;
+ int i = 0;
+
+ SDE_DEBUG("\n");
+
+ if (!display_array || !max_display_count) {
+ if (!display_array)
+ SDE_ERROR("invalid param\n");
+ return 0;
+ }
+
+ mutex_lock(&sde_hdmi_list_lock);
+ list_for_each_entry(display, &sde_hdmi_list, list) {
+ if (i >= max_display_count)
+ break;
+ display_array[i++] = display;
+ }
+ mutex_unlock(&sde_hdmi_list_lock);
+
+ return i;
+}
+
+int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+ void *display)
+{
+ struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)display;
+
+ if (!sde_hdmi || !sde_hdmi->ctrl.ctrl) {
+ SDE_ERROR("sde_hdmi=%p or hdmi is NULL\n", sde_hdmi);
+ return -EINVAL;
+ }
+
+ _sde_hdmi_hdp_disable(sde_hdmi);
+
+ return 0;
+}
+
+int sde_hdmi_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ int rc = 0;
+ struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)display;
+ struct hdmi *hdmi;
+
+ if (!sde_hdmi) {
+ SDE_ERROR("sde_hdmi is NULL\n");
+ return -EINVAL;
+ }
+
+ hdmi = sde_hdmi->ctrl.ctrl;
+ if (!hdmi) {
+ SDE_ERROR("hdmi is NULL\n");
+ return -EINVAL;
+ }
+
+ if (info)
+ sde_kms_info_add_keystr(info,
+ "DISPLAY_TYPE",
+ sde_hdmi->display_type);
+
+ hdmi->connector = connector;
+ INIT_WORK(&sde_hdmi->hpd_work, _sde_hdmi_hotplug_work);
+
+ /* Enable HPD detection */
+ rc = _sde_hdmi_hpd_enable(sde_hdmi);
+ if (rc)
+ SDE_ERROR("failed to enable HPD: %d\n", rc);
+
+ return rc;
+}
+
+enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display)
+{
+ enum drm_connector_status status = connector_status_unknown;
+ struct msm_display_info info;
+ int rc;
+
+ if (!connector || !display) {
+ SDE_ERROR("connector=%p or display=%p is NULL\n",
+ connector, display);
+ return status;
+ }
+
+ SDE_DEBUG("\n");
+
+ /* get display dsi_info */
+ memset(&info, 0x0, sizeof(info));
+ rc = sde_hdmi_get_info(&info, display);
+ if (rc) {
+ SDE_ERROR("failed to get display info, rc=%d\n", rc);
+ return connector_status_disconnected;
+ }
+
+ if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+ status = (info.is_connected ? connector_status_connected :
+ connector_status_disconnected);
+ else
+ status = connector_status_connected;
+
+ connector->display_info.width_mm = info.width_mm;
+ connector->display_info.height_mm = info.height_mm;
+
+ return status;
+}
+
+int _sde_hdmi_update_modes(struct drm_connector *connector,
+ struct sde_hdmi *display)
+{
+ int rc = 0;
+ struct hdmi_edid_ctrl *edid_ctrl = &display->edid;
+
+ if (edid_ctrl->edid) {
+ drm_mode_connector_update_edid_property(connector,
+ edid_ctrl->edid);
+
+ rc = drm_add_edid_modes(connector, edid_ctrl->edid);
+ return rc;
+ }
+
+ drm_mode_connector_update_edid_property(connector, NULL);
+
+ return rc;
+}
+
+int sde_hdmi_connector_get_modes(struct drm_connector *connector, void *display)
+{
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct drm_display_mode *mode, *m;
+ int ret = 0;
+
+ if (!connector || !display) {
+ SDE_ERROR("connector=%p or display=%p is NULL\n",
+ connector, display);
+ return 0;
+ }
+
+ SDE_DEBUG("\n");
+
+ if (hdmi_display->non_pluggable) {
+ list_for_each_entry(mode, &hdmi_display->mode_list, head) {
+ m = drm_mode_duplicate(connector->dev, mode);
+ if (!m) {
+ SDE_ERROR("failed to add hdmi mode %dx%d\n",
+ mode->hdisplay, mode->vdisplay);
+ break;
+ }
+ drm_mode_probed_add(connector, m);
+ }
+ ret = hdmi_display->num_of_modes;
+ } else {
+ ret = _sde_hdmi_update_modes(connector, display);
+ }
+
+ return ret;
+}
+
+enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display)
+{
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct hdmi *hdmi;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+ long actual, requested;
+
+ if (!connector || !display || !mode) {
+ SDE_ERROR("connector=%p or display=%p or mode=%p is NULL\n",
+ connector, display, mode);
+ return 0;
+ }
+
+ SDE_DEBUG("\n");
+
+ hdmi = hdmi_display->ctrl.ctrl;
+ priv = connector->dev->dev_private;
+ kms = priv->kms;
+ requested = 1000 * mode->clock;
+ actual = kms->funcs->round_pixclk(kms,
+ requested, hdmi->encoder);
+
+ SDE_DEBUG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ return MODE_OK;
+}
+
+int sde_hdmi_dev_init(struct sde_hdmi *display)
+{
+ if (!display) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int sde_hdmi_dev_deinit(struct sde_hdmi *display)
+{
+ if (!display) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int sde_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ int rc = 0;
+ struct sde_hdmi_ctrl *display_ctrl = NULL;
+ struct sde_hdmi *display = NULL;
+ struct drm_device *drm = NULL;
+ struct msm_drm_private *priv = NULL;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ SDE_ERROR("E\n");
+ if (!dev || !pdev || !master) {
+ pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
+ dev, pdev, master);
+ return -EINVAL;
+ }
+
+ drm = dev_get_drvdata(master);
+ display = platform_get_drvdata(pdev);
+ if (!drm || !display) {
+ pr_err("invalid param(s), drm %pK, display %pK\n",
+ drm, display);
+ return -EINVAL;
+ }
+
+ priv = drm->dev_private;
+ mutex_lock(&display->display_lock);
+
+ rc = _sde_hdmi_debugfs_init(display);
+ if (rc) {
+ SDE_ERROR("[%s]Debugfs init failed, rc=%d\n",
+ display->name, rc);
+ goto debug_error;
+ }
+
+ rc = _sde_hdmi_ext_disp_init(display);
+ if (rc) {
+ SDE_ERROR("[%s]Ext Disp init failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ rc = sde_hdmi_edid_init(display);
+ if (rc) {
+ SDE_ERROR("[%s]Ext Disp init failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ display_ctrl = &display->ctrl;
+ display_ctrl->ctrl = priv->hdmi;
+ SDE_ERROR("display_ctrl->ctrl=%p\n", display_ctrl->ctrl);
+ display->drm_dev = drm;
+
+error:
+ (void)_sde_hdmi_debugfs_deinit(display);
+debug_error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+
+static void sde_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sde_hdmi *display = NULL;
+
+ if (!dev) {
+ SDE_ERROR("invalid params\n");
+ return;
+ }
+
+ display = platform_get_drvdata(to_platform_device(dev));
+ if (!display) {
+ SDE_ERROR("Invalid display device\n");
+ return;
+ }
+ mutex_lock(&display->display_lock);
+ (void)_sde_hdmi_debugfs_deinit(display);
+ (void)sde_hdmi_edid_deinit(display);
+ display->drm_dev = NULL;
+ mutex_unlock(&display->display_lock);
+}
+
+static const struct component_ops sde_hdmi_comp_ops = {
+ .bind = sde_hdmi_bind,
+ .unbind = sde_hdmi_unbind,
+};
+
+static int _sde_hdmi_parse_dt_modes(struct device_node *np,
+ struct list_head *head,
+ u32 *num_of_modes)
+{
+ int rc = 0;
+ struct drm_display_mode *mode;
+ u32 mode_count = 0;
+ struct device_node *node = NULL;
+ struct device_node *root_node = NULL;
+ const char *name;
+ u32 h_front_porch, h_pulse_width, h_back_porch;
+ u32 v_front_porch, v_pulse_width, v_back_porch;
+ bool h_active_high, v_active_high;
+ u32 flags = 0;
+
+ root_node = of_get_child_by_name(np, "qcom,customize-modes");
+ if (!root_node) {
+ root_node = of_parse_phandle(np, "qcom,customize-modes", 0);
+ if (!root_node) {
+ DRM_INFO("No entry present for qcom,customize-modes");
+ goto end;
+ }
+ }
+ for_each_child_of_node(root_node, node) {
+ rc = 0;
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode) {
+ SDE_ERROR("Out of memory\n");
+ rc = -ENOMEM;
+ continue;
+ }
+
+ rc = of_property_read_string(node, "qcom,mode-name",
+ &name);
+ if (rc) {
+ SDE_ERROR("failed to read qcom,mode-name, rc=%d\n", rc);
+ goto fail;
+ }
+ strlcpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+ rc = of_property_read_u32(node, "qcom,mode-h-active",
+ &mode->hdisplay);
+ if (rc) {
+ SDE_ERROR("failed to read h-active, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-front-porch",
+ &h_front_porch);
+ if (rc) {
+ SDE_ERROR("failed to read h-front-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-pulse-width",
+ &h_pulse_width);
+ if (rc) {
+ SDE_ERROR("failed to read h-pulse-width, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-back-porch",
+ &h_back_porch);
+ if (rc) {
+ SDE_ERROR("failed to read h-back-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ h_active_high = of_property_read_bool(node,
+ "qcom,mode-h-active-high");
+
+ rc = of_property_read_u32(node, "qcom,mode-v-active",
+ &mode->vdisplay);
+ if (rc) {
+ SDE_ERROR("failed to read v-active, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-front-porch",
+ &v_front_porch);
+ if (rc) {
+ SDE_ERROR("failed to read v-front-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-pulse-width",
+ &v_pulse_width);
+ if (rc) {
+ SDE_ERROR("failed to read v-pulse-width, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-back-porch",
+ &v_back_porch);
+ if (rc) {
+ SDE_ERROR("failed to read v-back-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ v_active_high = of_property_read_bool(node,
+ "qcom,mode-v-active-high");
+
+ rc = of_property_read_u32(node, "qcom,mode-refersh-rate",
+ &mode->vrefresh);
+ if (rc) {
+ SDE_ERROR("failed to read refersh-rate, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-clock-in-khz",
+ &mode->clock);
+ if (rc) {
+ SDE_ERROR("failed to read clock, rc=%d\n", rc);
+ goto fail;
+ }
+
+ mode->hsync_start = mode->hdisplay + h_front_porch;
+ mode->hsync_end = mode->hsync_start + h_pulse_width;
+ mode->htotal = mode->hsync_end + h_back_porch;
+ mode->vsync_start = mode->vdisplay + v_front_porch;
+ mode->vsync_end = mode->vsync_start + v_pulse_width;
+ mode->vtotal = mode->vsync_end + v_back_porch;
+ if (h_active_high)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (v_active_high)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ mode->flags = flags;
+
+ if (!rc) {
+ mode_count++;
+ list_add_tail(&mode->head, head);
+ }
+
+ SDE_DEBUG("mode[%d] h[%d,%d,%d,%d] v[%d,%d,%d,%d] %d %xH %d\n",
+ mode_count - 1, mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal, mode->vdisplay,
+ mode->vsync_start, mode->vsync_end, mode->vtotal,
+ mode->vrefresh, mode->flags, mode->clock);
+fail:
+ if (rc) {
+ kfree(mode);
+ continue;
+ }
+ }
+
+ if (num_of_modes)
+ *num_of_modes = mode_count;
+
+end:
+ return rc;
+}
+
+static int _sde_hdmi_parse_dt(struct device_node *node,
+ struct sde_hdmi *display)
+{
+ int rc = 0;
+
+ display->name = of_get_property(node, "label", NULL);
+
+ display->display_type = of_get_property(node,
+ "qcom,display-type", NULL);
+ if (!display->display_type)
+ display->display_type = "unknown";
+
+ display->non_pluggable = of_property_read_bool(node,
+ "qcom,non-pluggable");
+
+ rc = _sde_hdmi_parse_dt_modes(node, &display->mode_list,
+ &display->num_of_modes);
+ if (rc)
+ SDE_ERROR("parse_dt_modes failed rc=%d\n", rc);
+
+ return rc;
+}
+
+static int _sde_hdmi_dev_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct sde_hdmi *display;
+ int ret = 0;
+
+
+ SDE_DEBUG("\n");
+
+ if (!pdev || !pdev->dev.of_node) {
+ SDE_ERROR("pdev not found\n");
+ return -ENODEV;
+ }
+
+ display = devm_kzalloc(&pdev->dev, sizeof(*display), GFP_KERNEL);
+ if (!display)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&display->mode_list);
+ rc = _sde_hdmi_parse_dt(pdev->dev.of_node, display);
+ if (rc)
+ SDE_ERROR("parse dt failed, rc=%d\n", rc);
+
+ mutex_init(&display->display_lock);
+ display->pdev = pdev;
+ platform_set_drvdata(pdev, display);
+ mutex_lock(&sde_hdmi_list_lock);
+ list_add(&display->list, &sde_hdmi_list);
+ mutex_unlock(&sde_hdmi_list_lock);
+ if (!sde_hdmi_dev_init(display)) {
+ ret = component_add(&pdev->dev, &sde_hdmi_comp_ops);
+ if (ret) {
+ pr_err("component add failed\n");
+ goto out;
+ }
+ }
+ return 0;
+
+out:
+ if (rc)
+ devm_kfree(&pdev->dev, display);
+ return rc;
+}
+
+static int _sde_hdmi_dev_remove(struct platform_device *pdev)
+{
+ struct sde_hdmi *display;
+ struct sde_hdmi *pos, *tmp;
+ struct drm_display_mode *mode, *n;
+
+ if (!pdev) {
+ SDE_ERROR("Invalid device\n");
+ return -EINVAL;
+ }
+
+ display = platform_get_drvdata(pdev);
+
+ mutex_lock(&sde_hdmi_list_lock);
+ list_for_each_entry_safe(pos, tmp, &sde_hdmi_list, list) {
+ if (pos == display) {
+ list_del(&display->list);
+ break;
+ }
+ }
+ mutex_unlock(&sde_hdmi_list_lock);
+
+ list_for_each_entry_safe(mode, n, &display->mode_list, head) {
+ list_del(&mode->head);
+ kfree(mode);
+ }
+
+ platform_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, display);
+ return 0;
+}
+
+static struct platform_driver sde_hdmi_driver = {
+ .probe = _sde_hdmi_dev_probe,
+ .remove = _sde_hdmi_dev_remove,
+ .driver = {
+ .name = "sde_hdmi",
+ .of_match_table = sde_hdmi_dt_match,
+ },
+};
+
+int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
+{
+ int rc = 0;
+ struct msm_drm_private *priv = NULL;
+ struct hdmi *hdmi;
+ struct platform_device *pdev;
+
+ DBG("");
+ if (!display || !display->drm_dev || !enc) {
+ SDE_ERROR("display=%p or enc=%p or drm_dev is NULL\n",
+ display, enc);
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+ priv = display->drm_dev->dev_private;
+ hdmi = display->ctrl.ctrl;
+
+ if (!priv || !hdmi) {
+ SDE_ERROR("priv=%p or hdmi=%p is NULL\n",
+ priv, hdmi);
+ mutex_unlock(&display->display_lock);
+ return -EINVAL;
+ }
+
+ pdev = hdmi->pdev;
+ hdmi->dev = display->drm_dev;
+ hdmi->encoder = enc;
+
+ hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
+
+ hdmi->bridge = sde_hdmi_bridge_init(hdmi);
+ if (IS_ERR(hdmi->bridge)) {
+ rc = PTR_ERR(hdmi->bridge);
+ SDE_ERROR("failed to create HDMI bridge: %d\n", rc);
+ hdmi->bridge = NULL;
+ goto error;
+ }
+ hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (hdmi->irq < 0) {
+ rc = hdmi->irq;
+ SDE_ERROR("failed to get irq: %d\n", rc);
+ goto error;
+ }
+
+ rc = devm_request_irq(&pdev->dev, hdmi->irq,
+ _sde_hdmi_irq, IRQF_TRIGGER_HIGH,
+ "sde_hdmi_isr", display);
+ if (rc < 0) {
+ SDE_ERROR("failed to request IRQ%u: %d\n",
+ hdmi->irq, rc);
+ goto error;
+ }
+
+ enc->bridge = hdmi->bridge;
+ priv->bridges[priv->num_bridges++] = hdmi->bridge;
+
+ mutex_unlock(&display->display_lock);
+ return 0;
+
+error:
+ /* bridge is normally destroyed by drm: */
+ if (hdmi->bridge) {
+ hdmi_bridge_destroy(hdmi->bridge);
+ hdmi->bridge = NULL;
+ }
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int sde_hdmi_drm_deinit(struct sde_hdmi *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+
+ return rc;
+}
+
+static int __init sde_hdmi_register(void)
+{
+ int rc = 0;
+
+ DBG("");
+ rc = platform_driver_register(&sde_hdmi_driver);
+ return rc;
+}
+
+static void __exit sde_hdmi_unregister(void)
+{
+ platform_driver_unregister(&sde_hdmi_driver);
+}
+
+module_init(sde_hdmi_register);
+module_exit(sde_hdmi_unregister);
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
new file mode 100644
index 000000000000..869d1bebf9db
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_HDMI_H_
+#define _SDE_HDMI_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/msm_ext_display.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "hdmi.h"
+
+#define MAX_NUMBER_ADB 5
+#define MAX_AUDIO_DATA_BLOCK_SIZE 30
+#define MAX_SPKR_ALLOC_DATA_BLOCK_SIZE 3
+
+/**
+ * struct sde_hdmi_info - defines hdmi display properties
+ * @display_type: Display type as defined by device tree.
+ * @is_hot_pluggable: Can panel be hot plugged.
+ * @is_connected: Is panel connected.
+ * @is_edid_supported: Does panel support reading EDID information.
+ * @width_mm: Physical width of panel in millimeters.
+ * @height_mm: Physical height of panel in millimeters.
+ */
+struct sde_hdmi_info {
+ const char *display_type;
+
+ /* HPD */
+ bool is_hot_pluggable;
+ bool is_connected;
+ bool is_edid_supported;
+
+ /* Physical properties */
+ u32 width_mm;
+ u32 height_mm;
+};
+
+/**
+ * struct sde_hdmi_ctrl - hdmi ctrl/phy information for the display
+ * @ctrl: Handle to the HDMI controller device.
+ * @ctrl_of_node: pHandle to the HDMI controller device.
+ * @hdmi_ctrl_idx: HDMI controller instance id.
+ */
+struct sde_hdmi_ctrl {
+ /* controller info */
+ struct hdmi *ctrl;
+ struct device_node *ctrl_of_node;
+ u32 hdmi_ctrl_idx;
+};
+
+struct hdmi_edid_ctrl {
+ struct edid *edid;
+ u8 audio_data_block[MAX_NUMBER_ADB * MAX_AUDIO_DATA_BLOCK_SIZE];
+ int adb_size;
+ u8 spkr_alloc_data_block[MAX_SPKR_ALLOC_DATA_BLOCK_SIZE];
+ int sadb_size;
+};
+
+/**
+ * struct sde_hdmi - hdmi display information
+ * @pdev: Pointer to platform device.
+ * @drm_dev: DRM device associated with the display.
+ * @name: Name of the display.
+ * @display_type: Display type as defined in device tree.
+ * @list: List pointer.
+ * @display_lock: Mutex for sde_hdmi interface.
+ * @ctrl: Controller information for HDMI display.
+ * @non_pluggable: If HDMI display is non pluggable
+ * @num_of_modes: Number of modes supported by display if non pluggable.
+ * @mode_list: Mode list if non pluggable.
+ * @connected: If HDMI display is connected.
+ * @is_tpg_enabled: TPG state.
+ * @hpd_work: HPD work structure.
+ * @root: Debug fs root entry.
+ */
+struct sde_hdmi {
+ struct platform_device *pdev;
+ struct drm_device *drm_dev;
+
+ const char *name;
+ const char *display_type;
+ struct list_head list;
+ struct mutex display_lock;
+
+ struct sde_hdmi_ctrl ctrl;
+
+ struct platform_device *ext_pdev;
+ struct msm_ext_disp_init_data ext_audio_data;
+ struct hdmi_edid_ctrl edid;
+
+ bool non_pluggable;
+ u32 num_of_modes;
+ struct list_head mode_list;
+ bool connected;
+ bool is_tpg_enabled;
+
+ struct work_struct hpd_work;
+
+ /* DEBUG FS */
+ struct dentry *root;
+};
+
+#ifdef CONFIG_DRM_SDE_HDMI
+/**
+ * sde_hdmi_get_num_of_displays() - returns number of display devices
+ * supported.
+ *
+ * Return: number of displays.
+ */
+u32 sde_hdmi_get_num_of_displays(void);
+
+/**
+ * sde_hdmi_get_displays() - returns the display list that's available.
+ * @display_array: Pointer to display list
+ * @max_display_count: Number of maximum displays in the list
+ *
+ * Return: number of available displays.
+ */
+int sde_hdmi_get_displays(void **display_array, u32 max_display_count);
+
+/**
+ * sde_hdmi_connector_pre_deinit()- perform additional deinitialization steps
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+ void *display);
+
+/**
+ * sde_hdmi_connector_post_init()- perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to sde connector info structure
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+int sde_hdmi_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display);
+
+/**
+ * sde_hdmi_connector_detect()- determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display);
+
+/**
+ * sde_hdmi_connector_get_modes - add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+
+ * Returns: Number of modes added
+ */
+int sde_hdmi_connector_get_modes(struct drm_connector *connector,
+ void *display);
+
+/**
+ * sde_hdmi_mode_valid - determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ *
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display);
+
+/**
+ * sde_hdmi_dev_init() - Initializes the display device
+ * @display: Handle to the display.
+ *
+ * Initialization will acquire references to the resources required for the
+ * display hardware to function.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_dev_init(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_dev_deinit() - Desinitializes the display device
+ * @display: Handle to the display.
+ *
+ * All the resources acquired during device init will be released.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_dev_deinit(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_drm_init() - initializes DRM objects for the display device.
+ * @display: Handle to the display.
+ * @encoder: Pointer to the encoder object which is connected to the
+ * display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_drm_init(struct sde_hdmi *display,
+ struct drm_encoder *enc);
+
+/**
+ * sde_hdmi_drm_deinit() - destroys DRM objects assosciated with the display
+ * @display: Handle to the display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_drm_deinit(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_get_info() - returns the display properties
+ * @display: Handle to the display.
+ * @info: Pointer to the structure where info is stored.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_get_info(struct msm_display_info *info,
+ void *display);
+
+/**
+ * sde_hdmi_bridge_init() - init sde hdmi bridge
+ * @hdmi: Handle to the hdmi.
+ *
+ * Return: struct drm_bridge *.
+ */
+struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi);
+
+/**
+ * sde_hdmi_set_mode() - Set HDMI mode API.
+ * @hdmi: Handle to the hdmi.
+ * @power_on: Power on/off request.
+ *
+ * Return: void.
+ */
+void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on);
+
+/**
+ * sde_hdmi_audio_on() - enable hdmi audio.
+ * @hdmi: Handle to the hdmi.
+ * @params: audio setup parameters from codec.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_audio_on(struct hdmi *hdmi,
+ struct msm_ext_disp_audio_setup_params *params);
+
+/**
+ * sde_hdmi_audio_off() - disable hdmi audio.
+ * @hdmi: Handle to the hdmi.
+ *
+ * Return: void.
+ */
+void sde_hdmi_audio_off(struct hdmi *hdmi);
+
+/**
+ * sde_hdmi_config_avmute() - mute hdmi.
+ * @hdmi: Handle to the hdmi.
+ * @set: enable/disable avmute.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_config_avmute(struct hdmi *hdmi, bool set);
+
+/**
+ * sde_hdmi_notify_clients() - notify hdmi clients of the connection status.
+ * @connector: Handle to the drm_connector.
+ * @connected: connection status.
+ *
+ * Return: void.
+ */
+void sde_hdmi_notify_clients(struct drm_connector *connector,
+ bool connected);
+
+/**
+ * sde_hdmi_ack_state() - acknowledge the connection status.
+ * @connector: Handle to the drm_connector.
+ * @status: connection status.
+ *
+ * Return: void.
+ */
+void sde_hdmi_ack_state(struct drm_connector *connector,
+ enum drm_connector_status status);
+
+/**
+ * sde_hdmi_edid_init() - init edid structure.
+ * @display: Handle to the sde_hdmi.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_edid_init(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_edid_deinit() - deinit edid structure.
+ * @display: Handle to the sde_hdmi.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_edid_deinit(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_get_edid() - get edid info.
+ * @connector: Handle to the drm_connector.
+ * @display: Handle to the sde_hdmi.
+ *
+ * Return: void.
+ */
+void sde_hdmi_get_edid(struct drm_connector *connector,
+ struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_free_edid() - free edid structure.
+ * @display: Handle to the sde_hdmi.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_free_edid(struct sde_hdmi *display);
+
+#else /*#ifdef CONFIG_DRM_SDE_HDMI*/
+
+static inline u32 sde_hdmi_get_num_of_displays(void)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_get_displays(void **display_array,
+ u32 max_display_count)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+ void *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ return 0;
+}
+
+static inline enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display)
+{
+ return connector_status_disconnected;
+}
+
+static inline int sde_hdmi_connector_get_modes(struct drm_connector *connector,
+ void *display)
+{
+ return 0;
+}
+
+static inline enum drm_mode_status sde_hdmi_mode_valid(
+ struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display)
+{
+ return MODE_OK;
+}
+
+static inline int sde_hdmi_dev_init(struct sde_hdmi *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_dev_deinit(struct sde_hdmi *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_drm_init(struct sde_hdmi *display,
+ struct drm_encoder *enc)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_drm_deinit(struct sde_hdmi *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_get_info(struct msm_display_info *info,
+ void *display)
+{
+ return 0;
+}
+#endif /*#else of CONFIG_DRM_SDE_HDMI*/
+#endif /* _SDE_HDMI_H_ */
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c
new file mode 100644
index 000000000000..13ea49cfa42d
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c
@@ -0,0 +1,393 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include <linux/switch.h>
+#include <linux/gcd.h>
+
+#include "drm_edid.h"
+#include "sde_kms.h"
+#include "sde_hdmi.h"
+#include "sde_hdmi_regs.h"
+#include "hdmi.h"
+
+#define HDMI_AUDIO_INFO_FRAME_PACKET_HEADER 0x84
+#define HDMI_AUDIO_INFO_FRAME_PACKET_VERSION 0x1
+#define HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH 0x0A
+
+#define HDMI_KHZ_TO_HZ 1000
+#define HDMI_MHZ_TO_HZ 1000000
+#define HDMI_ACR_N_MULTIPLIER 128
+#define DEFAULT_AUDIO_SAMPLE_RATE_HZ 48000
+
+/* Supported HDMI Audio channels */
+enum hdmi_audio_channels {
+ AUDIO_CHANNEL_2 = 2,
+ AUDIO_CHANNEL_3,
+ AUDIO_CHANNEL_4,
+ AUDIO_CHANNEL_5,
+ AUDIO_CHANNEL_6,
+ AUDIO_CHANNEL_7,
+ AUDIO_CHANNEL_8,
+};
+
+/* parameters for clock regeneration */
+struct hdmi_audio_acr {
+ u32 n;
+ u32 cts;
+};
+
+enum hdmi_audio_sample_rates {
+ AUDIO_SAMPLE_RATE_32KHZ,
+ AUDIO_SAMPLE_RATE_44_1KHZ,
+ AUDIO_SAMPLE_RATE_48KHZ,
+ AUDIO_SAMPLE_RATE_88_2KHZ,
+ AUDIO_SAMPLE_RATE_96KHZ,
+ AUDIO_SAMPLE_RATE_176_4KHZ,
+ AUDIO_SAMPLE_RATE_192KHZ,
+ AUDIO_SAMPLE_RATE_MAX
+};
+
+struct sde_hdmi_audio {
+ struct hdmi *hdmi;
+ struct msm_ext_disp_audio_setup_params params;
+ u32 pclk;
+};
+
+static void _sde_hdmi_audio_get_audio_sample_rate(u32 *sample_rate_hz)
+{
+ u32 rate = *sample_rate_hz;
+
+ switch (rate) {
+ case 32000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_32KHZ;
+ break;
+ case 44100:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_44_1KHZ;
+ break;
+ case 48000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_48KHZ;
+ break;
+ case 88200:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_88_2KHZ;
+ break;
+ case 96000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_96KHZ;
+ break;
+ case 176400:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_176_4KHZ;
+ break;
+ case 192000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_192KHZ;
+ break;
+ default:
+ SDE_ERROR("%d unchanged\n", rate);
+ break;
+ }
+}
+
+static void _sde_hdmi_audio_get_acr_param(u32 pclk, u32 fs,
+ struct hdmi_audio_acr *acr)
+{
+ u32 div, mul;
+
+ if (!acr) {
+ SDE_ERROR("invalid data\n");
+ return;
+ }
+
+ /*
+ * as per HDMI specification, N/CTS = (128*fs)/pclk.
+ * get the ratio using this formula.
+ */
+ acr->n = HDMI_ACR_N_MULTIPLIER * fs;
+ acr->cts = pclk;
+
+ /* get the greatest common divisor for the ratio */
+ div = gcd(acr->n, acr->cts);
+
+ /* get the n and cts values wrt N/CTS formula */
+ acr->n /= div;
+ acr->cts /= div;
+
+ /*
+ * as per HDMI specification, 300 <= 128*fs/N <= 1500
+ * with a target of 128*fs/N = 1000. To get closest
+ * value without truncating fractional values, find
+ * the corresponding multiplier
+ */
+ mul = ((HDMI_ACR_N_MULTIPLIER * fs / HDMI_KHZ_TO_HZ)
+ + (acr->n - 1)) / acr->n;
+
+ acr->n *= mul;
+ acr->cts *= mul;
+}
+
+static void _sde_hdmi_audio_acr_enable(struct sde_hdmi_audio *audio)
+{
+ struct hdmi_audio_acr acr;
+ struct msm_ext_disp_audio_setup_params *params;
+ u32 pclk, layout, multiplier = 1, sample_rate;
+ u32 acr_pkt_ctl, aud_pkt_ctl2, acr_reg_cts, acr_reg_n;
+ struct hdmi *hdmi;
+
+ hdmi = audio->hdmi;
+ params = &audio->params;
+ pclk = audio->pclk;
+ sample_rate = params->sample_rate_hz;
+
+ _sde_hdmi_audio_get_acr_param(pclk, sample_rate, &acr);
+ _sde_hdmi_audio_get_audio_sample_rate(&sample_rate);
+
+ layout = (params->num_of_channels == AUDIO_CHANNEL_2) ? 0 : 1;
+
+ SDE_DEBUG("n=%u, cts=%u, layout=%u\n", acr.n, acr.cts, layout);
+
+ /* AUDIO_PRIORITY | SOURCE */
+ acr_pkt_ctl = BIT(31) | BIT(8);
+
+ switch (sample_rate) {
+ case AUDIO_SAMPLE_RATE_44_1KHZ:
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ case AUDIO_SAMPLE_RATE_48KHZ:
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_192KHZ:
+ multiplier = 4;
+ acr.n >>= 2;
+
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_176_4KHZ:
+ multiplier = 4;
+ acr.n >>= 2;
+
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ case AUDIO_SAMPLE_RATE_96KHZ:
+ multiplier = 2;
+ acr.n >>= 1;
+
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_88_2KHZ:
+ multiplier = 2;
+ acr.n >>= 1;
+
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ default:
+ multiplier = 1;
+
+ acr_pkt_ctl |= 0x1 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_32_0;
+ acr_reg_n = HDMI_ACR_32_1;
+ break;
+ }
+
+ aud_pkt_ctl2 = BIT(0) | (layout << 1);
+
+ /* N_MULTIPLE(multiplier) */
+ acr_pkt_ctl &= ~(7 << 16);
+ acr_pkt_ctl |= (multiplier & 0x7) << 16;
+
+ /* SEND | CONT */
+ acr_pkt_ctl |= BIT(0) | BIT(1);
+
+ hdmi_write(hdmi, acr_reg_cts, acr.cts);
+ hdmi_write(hdmi, acr_reg_n, acr.n);
+ hdmi_write(hdmi, HDMI_ACR_PKT_CTRL, acr_pkt_ctl);
+ hdmi_write(hdmi, HDMI_AUDIO_PKT_CTRL2, aud_pkt_ctl2);
+}
+
+static void _sde_hdmi_audio_acr_setup(struct sde_hdmi_audio *audio, bool on)
+{
+ if (on)
+ _sde_hdmi_audio_acr_enable(audio);
+ else
+ hdmi_write(audio->hdmi, HDMI_ACR_PKT_CTRL, 0);
+}
+
+static void _sde_hdmi_audio_infoframe_setup(struct sde_hdmi_audio *audio,
+ bool enabled)
+{
+ struct hdmi *hdmi = audio->hdmi;
+ u32 channels, channel_allocation, level_shift, down_mix, layout;
+ u32 hdmi_debug_reg = 0, audio_info_0_reg = 0, audio_info_1_reg = 0;
+ u32 audio_info_ctrl_reg, aud_pck_ctrl_2_reg;
+ u32 check_sum, sample_present;
+
+ audio_info_ctrl_reg = hdmi_read(hdmi, HDMI_INFOFRAME_CTRL0);
+ audio_info_ctrl_reg &= ~0xF0;
+
+ if (!enabled)
+ goto end;
+
+ channels = audio->params.num_of_channels - 1;
+ channel_allocation = audio->params.channel_allocation;
+ level_shift = audio->params.level_shift;
+ down_mix = audio->params.down_mix;
+ sample_present = audio->params.sample_present;
+
+ layout = (audio->params.num_of_channels == AUDIO_CHANNEL_2) ? 0 : 1;
+ aud_pck_ctrl_2_reg = BIT(0) | (layout << 1);
+ hdmi_write(hdmi, HDMI_AUDIO_PKT_CTRL2, aud_pck_ctrl_2_reg);
+
+ audio_info_1_reg |= channel_allocation & 0xFF;
+ audio_info_1_reg |= ((level_shift & 0xF) << 11);
+ audio_info_1_reg |= ((down_mix & 0x1) << 15);
+
+ check_sum = 0;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_HEADER;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_VERSION;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH;
+ check_sum += channels;
+ check_sum += channel_allocation;
+ check_sum += (level_shift & 0xF) << 3 | (down_mix & 0x1) << 7;
+ check_sum &= 0xFF;
+ check_sum = (u8) (256 - check_sum);
+
+ audio_info_0_reg |= check_sum & 0xFF;
+ audio_info_0_reg |= ((channels & 0x7) << 8);
+
+ /* Enable Audio InfoFrame Transmission */
+ audio_info_ctrl_reg |= 0xF0;
+
+ if (layout) {
+ /* Set the Layout bit */
+ hdmi_debug_reg |= BIT(4);
+
+ /* Set the Sample Present bits */
+ hdmi_debug_reg |= sample_present & 0xF;
+ }
+end:
+ hdmi_write(hdmi, HDMI_DEBUG, hdmi_debug_reg);
+ hdmi_write(hdmi, HDMI_AUDIO_INFO0, audio_info_0_reg);
+ hdmi_write(hdmi, HDMI_AUDIO_INFO1, audio_info_1_reg);
+ hdmi_write(hdmi, HDMI_INFOFRAME_CTRL0, audio_info_ctrl_reg);
+}
+
+int sde_hdmi_audio_on(struct hdmi *hdmi,
+ struct msm_ext_disp_audio_setup_params *params)
+{
+ struct sde_hdmi_audio audio;
+ int rc = 0;
+
+ if (!hdmi) {
+ SDE_ERROR("invalid HDMI Ctrl\n");
+ rc = -ENODEV;
+ goto end;
+ }
+
+ audio.pclk = hdmi->pixclock;
+ audio.params = *params;
+ audio.hdmi = hdmi;
+
+ if (!audio.params.num_of_channels) {
+ audio.params.sample_rate_hz = DEFAULT_AUDIO_SAMPLE_RATE_HZ;
+ audio.params.num_of_channels = AUDIO_CHANNEL_2;
+ }
+
+ _sde_hdmi_audio_acr_setup(&audio, true);
+ _sde_hdmi_audio_infoframe_setup(&audio, true);
+
+ SDE_DEBUG("HDMI Audio: Enabled\n");
+end:
+ return rc;
+}
+
+void sde_hdmi_audio_off(struct hdmi *hdmi)
+{
+ struct sde_hdmi_audio audio;
+ int rc = 0;
+
+ if (!hdmi) {
+ SDE_ERROR("invalid HDMI Ctrl\n");
+ rc = -ENODEV;
+ return;
+ }
+
+ audio.hdmi = hdmi;
+
+ _sde_hdmi_audio_infoframe_setup(&audio, false);
+ _sde_hdmi_audio_acr_setup(&audio, false);
+
+ SDE_DEBUG("HDMI Audio: Disabled\n");
+}
+
+int sde_hdmi_config_avmute(struct hdmi *hdmi, bool set)
+{
+ u32 av_mute_status;
+ bool av_pkt_en = false;
+
+ if (!hdmi) {
+ SDE_ERROR("invalid HDMI Ctrl\n");
+ return -ENODEV;
+ }
+
+ av_mute_status = hdmi_read(hdmi, HDMI_GC);
+
+ if (set) {
+ if (!(av_mute_status & BIT(0))) {
+ hdmi_write(hdmi, HDMI_GC, av_mute_status | BIT(0));
+ av_pkt_en = true;
+ }
+ } else {
+ if (av_mute_status & BIT(0)) {
+ hdmi_write(hdmi, HDMI_GC, av_mute_status & ~BIT(0));
+ av_pkt_en = true;
+ }
+ }
+
+ /* Enable AV Mute tranmission here */
+ if (av_pkt_en)
+ hdmi_write(hdmi, HDMI_VBI_PKT_CTRL,
+ hdmi_read(hdmi, HDMI_VBI_PKT_CTRL) | (BIT(4) & BIT(5)));
+
+ SDE_DEBUG("AVMUTE %s\n", set ? "set" : "cleared");
+
+ return 0;
+}
+
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
new file mode 100644
index 000000000000..681dca501f9b
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -0,0 +1,417 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "drm_edid.h"
+#include "sde_kms.h"
+#include "sde_connector.h"
+#include "sde_hdmi.h"
+#include "hdmi.h"
+
+struct sde_hdmi_bridge {
+ struct drm_bridge base;
+ struct hdmi *hdmi;
+};
+#define to_hdmi_bridge(x) container_of(x, struct sde_hdmi_bridge, base)
+
+/* for AVI program */
+#define HDMI_AVI_INFOFRAME_BUFFER_SIZE \
+ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE)
+#define HDMI_VS_INFOFRAME_BUFFER_SIZE (HDMI_INFOFRAME_HEADER_SIZE + 6)
+#define HDMI_SPD_INFOFRAME_BUFFER_SIZE \
+ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_SPD_INFOFRAME_SIZE)
+#define HDMI_DEFAULT_VENDOR_NAME "unknown"
+#define HDMI_DEFAULT_PRODUCT_NAME "msm"
+#define LEFT_SHIFT_BYTE(x) ((x) << 8)
+#define LEFT_SHIFT_WORD(x) ((x) << 16)
+#define LEFT_SHIFT_24BITS(x) ((x) << 24)
+#define HDMI_AVI_IFRAME_LINE_NUMBER 1
+#define HDMI_VENDOR_IFRAME_LINE_NUMBER 3
+
+void _sde_hdmi_bridge_destroy(struct drm_bridge *bridge)
+{
+}
+
+static void _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int i, ret;
+
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->pwr_regs[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ }
+ }
+
+ if (config->pwr_clk_cnt > 0) {
+ DRM_DEBUG("pixclock: %lu", hdmi->pixclock);
+ ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
+ if (ret) {
+ SDE_ERROR("failed to set pixel clk: %s (%d)\n",
+ config->pwr_clk_names[0], ret);
+ }
+ }
+
+ for (i = 0; i < config->pwr_clk_cnt; i++) {
+ ret = clk_prepare_enable(hdmi->pwr_clks[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable pwr clk: %s (%d)\n",
+ config->pwr_clk_names[i], ret);
+ }
+ }
+}
+
+static void _sde_hdmi_bridge_power_off(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int i, ret;
+
+ /* Wait for vsync */
+ msleep(20);
+
+ for (i = 0; i < config->pwr_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->pwr_clks[i]);
+
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->pwr_regs[i]);
+ if (ret) {
+ SDE_ERROR("failed to disable pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ }
+ }
+}
+
+static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+
+ DRM_DEBUG("power up");
+
+ if (!hdmi->power_on) {
+ _sde_hdmi_bridge_power_on(bridge);
+ hdmi->power_on = true;
+ }
+
+ if (phy)
+ phy->funcs->powerup(phy, hdmi->pixclock);
+
+ sde_hdmi_set_mode(hdmi, true);
+
+ if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
+ hdmi_hdcp_ctrl_on(hdmi->hdcp_ctrl);
+
+ sde_hdmi_ack_state(hdmi->connector, EXT_DISPLAY_CABLE_CONNECT);
+}
+
+static void sde_hdmi_force_update_audio(struct drm_connector *connector,
+ enum drm_connector_status status)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+ if (display && display->non_pluggable) {
+ display->ext_audio_data.intf_ops.hpd(display->ext_pdev,
+ display->ext_audio_data.type,
+ status,
+ MSM_EXT_DISP_HPD_AUDIO);
+ }
+}
+
+static void _sde_hdmi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+
+ /* force update audio ops when there's no HPD event */
+ sde_hdmi_force_update_audio(hdmi->connector,
+ EXT_DISPLAY_CABLE_CONNECT);
+}
+
+static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+
+ /* force update audio ops when there's no HPD event */
+ sde_hdmi_force_update_audio(hdmi->connector,
+ EXT_DISPLAY_CABLE_DISCONNECT);
+}
+
+static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+
+ if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
+ hdmi_hdcp_ctrl_off(hdmi->hdcp_ctrl);
+
+ sde_hdmi_audio_off(hdmi);
+
+ DRM_DEBUG("power down");
+ sde_hdmi_set_mode(hdmi, false);
+
+ if (phy)
+ phy->funcs->powerdown(phy);
+
+ if (hdmi->power_on) {
+ _sde_hdmi_bridge_power_off(bridge);
+ hdmi->power_on = false;
+ }
+
+ sde_hdmi_ack_state(hdmi->connector, EXT_DISPLAY_CABLE_DISCONNECT);
+}
+
+static void _sde_hdmi_bridge_set_avi_infoframe(struct hdmi *hdmi,
+ const struct drm_display_mode *mode)
+{
+ u8 avi_iframe[HDMI_AVI_INFOFRAME_BUFFER_SIZE] = {0};
+ u8 *avi_frame = &avi_iframe[HDMI_INFOFRAME_HEADER_SIZE];
+ u8 checksum;
+ u32 reg_val;
+ struct hdmi_avi_infoframe info;
+
+ drm_hdmi_avi_infoframe_from_display_mode(&info, mode);
+ hdmi_avi_infoframe_pack(&info, avi_iframe, sizeof(avi_iframe));
+ checksum = avi_iframe[HDMI_INFOFRAME_HEADER_SIZE - 1];
+
+ reg_val = checksum |
+ LEFT_SHIFT_BYTE(avi_frame[0]) |
+ LEFT_SHIFT_WORD(avi_frame[1]) |
+ LEFT_SHIFT_24BITS(avi_frame[2]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(0), reg_val);
+
+ reg_val = avi_frame[3] |
+ LEFT_SHIFT_BYTE(avi_frame[4]) |
+ LEFT_SHIFT_WORD(avi_frame[5]) |
+ LEFT_SHIFT_24BITS(avi_frame[6]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(1), reg_val);
+
+ reg_val = avi_frame[7] |
+ LEFT_SHIFT_BYTE(avi_frame[8]) |
+ LEFT_SHIFT_WORD(avi_frame[9]) |
+ LEFT_SHIFT_24BITS(avi_frame[10]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(2), reg_val);
+
+ reg_val = avi_frame[11] |
+ LEFT_SHIFT_BYTE(avi_frame[12]) |
+ LEFT_SHIFT_24BITS(avi_iframe[1]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(3), reg_val);
+
+ /* AVI InfFrame enable (every frame) */
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
+ hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0) | BIT(1) | BIT(0));
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ reg_val &= ~0x3F;
+ reg_val |= HDMI_AVI_IFRAME_LINE_NUMBER;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, reg_val);
+}
+
+static void _sde_hdmi_bridge_set_vs_infoframe(struct hdmi *hdmi,
+ const struct drm_display_mode *mode)
+{
+ u8 vs_iframe[HDMI_VS_INFOFRAME_BUFFER_SIZE] = {0};
+ u32 reg_val;
+ struct hdmi_vendor_infoframe info;
+ int rc = 0;
+
+ rc = drm_hdmi_vendor_infoframe_from_display_mode(&info, mode);
+ if (rc < 0) {
+ SDE_DEBUG("don't send vendor infoframe\n");
+ return;
+ }
+ hdmi_vendor_infoframe_pack(&info, vs_iframe, sizeof(vs_iframe));
+
+ reg_val = (info.s3d_struct << 24) | (info.vic << 16) |
+ (vs_iframe[3] << 8) | (vs_iframe[7] << 5) |
+ vs_iframe[2];
+ hdmi_write(hdmi, REG_HDMI_VENSPEC_INFO0, reg_val);
+
+ /* vendor specific info-frame enable (every frame) */
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
+ hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0) | BIT(13) | BIT(12));
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ reg_val &= ~0x3F000000;
+ reg_val |= (HDMI_VENDOR_IFRAME_LINE_NUMBER << 24);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, reg_val);
+}
+
+static void _sde_hdmi_bridge_set_spd_infoframe(struct hdmi *hdmi,
+ const struct drm_display_mode *mode)
+{
+ u8 spd_iframe[HDMI_SPD_INFOFRAME_BUFFER_SIZE] = {0};
+ u32 packet_payload, packet_control, packet_header;
+ struct hdmi_spd_infoframe info;
+ int i;
+
+ /* Need to query vendor and product name from platform setup */
+ hdmi_spd_infoframe_init(&info, HDMI_DEFAULT_VENDOR_NAME,
+ HDMI_DEFAULT_PRODUCT_NAME);
+ hdmi_spd_infoframe_pack(&info, spd_iframe, sizeof(spd_iframe));
+
+ packet_header = spd_iframe[0]
+ | LEFT_SHIFT_BYTE(spd_iframe[1] & 0x7f)
+ | LEFT_SHIFT_WORD(spd_iframe[2] & 0x7f);
+ hdmi_write(hdmi, REG_HDMI_GENERIC1_HDR, packet_header);
+
+ for (i = 0; i < MAX_REG_HDMI_GENERIC1_INDEX; i++) {
+ packet_payload = spd_iframe[3 + i * 4]
+ | LEFT_SHIFT_BYTE(spd_iframe[4 + i * 4] & 0x7f)
+ | LEFT_SHIFT_WORD(spd_iframe[5 + i * 4] & 0x7f)
+ | LEFT_SHIFT_24BITS(spd_iframe[6 + i * 4] & 0x7f);
+ hdmi_write(hdmi, REG_HDMI_GENERIC1(i), packet_payload);
+ }
+
+ packet_payload = (spd_iframe[27] & 0x7f)
+ | LEFT_SHIFT_BYTE(spd_iframe[28] & 0x7f);
+ hdmi_write(hdmi, REG_HDMI_GENERIC1(MAX_REG_HDMI_GENERIC1_INDEX),
+ packet_payload);
+
+ /*
+ * GENERIC1_LINE | GENERIC1_CONT | GENERIC1_SEND
+ * Setup HDMI TX generic packet control
+ * Enable this packet to transmit every frame
+ * Enable HDMI TX engine to transmit Generic packet 1
+ */
+ packet_control = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ packet_control |= ((0x1 << 24) | (1 << 5) | (1 << 4));
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, packet_control);
+}
+
+static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ int hstart, hend, vstart, vend;
+ uint32_t frame_ctrl;
+
+ mode = adjusted_mode;
+
+ hdmi->pixclock = mode->clock * 1000;
+
+ hstart = mode->htotal - mode->hsync_start;
+ hend = mode->htotal - mode->hsync_start + mode->hdisplay;
+
+ vstart = mode->vtotal - mode->vsync_start - 1;
+ vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
+
+ DRM_DEBUG(
+ "htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
+ mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
+
+ hdmi_write(hdmi, REG_HDMI_TOTAL,
+ HDMI_TOTAL_H_TOTAL(mode->htotal - 1) |
+ HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
+
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
+ HDMI_ACTIVE_HSYNC_START(hstart) |
+ HDMI_ACTIVE_HSYNC_END(hend));
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC,
+ HDMI_ACTIVE_VSYNC_START(vstart) |
+ HDMI_ACTIVE_VSYNC_END(vend));
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) |
+ HDMI_VSYNC_ACTIVE_F2_END(vend + 1));
+ } else {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ HDMI_VSYNC_TOTAL_F2_V_TOTAL(0));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ HDMI_VSYNC_ACTIVE_F2_START(0) |
+ HDMI_VSYNC_ACTIVE_F2_END(0));
+ }
+
+ frame_ctrl = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
+ DRM_DEBUG("frame_ctrl=%08x\n", frame_ctrl);
+ hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
+
+ /*
+ * Setup info frame
+ * Current drm_edid driver doesn't have all CEA formats defined in
+ * latest CEA-861(CTA-861) spec. So, don't check if mode is CEA mode
+ * in here. Once core framework is updated, the check needs to be
+ * added back.
+ */
+ if (hdmi->hdmi_mode) {
+ _sde_hdmi_bridge_set_avi_infoframe(hdmi, mode);
+ _sde_hdmi_bridge_set_vs_infoframe(hdmi, mode);
+ _sde_hdmi_bridge_set_spd_infoframe(hdmi, mode);
+ DRM_DEBUG("hdmi setup info frame\n");
+ }
+}
+
+static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = {
+ .pre_enable = _sde_hdmi_bridge_pre_enable,
+ .enable = _sde_hdmi_bridge_enable,
+ .disable = _sde_hdmi_bridge_disable,
+ .post_disable = _sde_hdmi_bridge_post_disable,
+ .mode_set = _sde_hdmi_bridge_mode_set,
+};
+
+
+/* initialize bridge */
+struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi)
+{
+ struct drm_bridge *bridge = NULL;
+ struct sde_hdmi_bridge *sde_hdmi_bridge;
+ int ret;
+
+ sde_hdmi_bridge = devm_kzalloc(hdmi->dev->dev,
+ sizeof(*sde_hdmi_bridge), GFP_KERNEL);
+ if (!sde_hdmi_bridge) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ sde_hdmi_bridge->hdmi = hdmi;
+
+ bridge = &sde_hdmi_bridge->base;
+ bridge->funcs = &_sde_hdmi_bridge_funcs;
+
+ ret = drm_bridge_attach(hdmi->dev, bridge);
+ if (ret)
+ goto fail;
+
+ return bridge;
+
+fail:
+ if (bridge)
+ _sde_hdmi_bridge_destroy(bridge);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_edid.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_edid.c
new file mode 100644
index 000000000000..57c79e2aa812
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_edid.c
@@ -0,0 +1,227 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm_edid.h>
+
+#include "sde_kms.h"
+#include "sde_hdmi.h"
+
+/* TODO: copy from drm_edid.c and mdss_hdmi_edid.c. remove if using ELD */
+#define DBC_START_OFFSET 4
+#define EDID_DTD_LEN 18
+
+enum data_block_types {
+ RESERVED,
+ AUDIO_DATA_BLOCK,
+ VIDEO_DATA_BLOCK,
+ VENDOR_SPECIFIC_DATA_BLOCK,
+ SPEAKER_ALLOCATION_DATA_BLOCK,
+ VESA_DTC_DATA_BLOCK,
+ RESERVED2,
+ USE_EXTENDED_TAG
+};
+
+static u8 *_sde_hdmi_edid_find_cea_extension(struct edid *edid)
+{
+ u8 *edid_ext = NULL;
+ int i;
+
+ /* No EDID or EDID extensions */
+ if (edid == NULL || edid->extensions == 0)
+ return NULL;
+
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == CEA_EXT)
+ break;
+ }
+
+ if (i == edid->extensions)
+ return NULL;
+
+ return edid_ext;
+}
+
+static const u8 *_sde_hdmi_edid_find_block(const u8 *in_buf, u32 start_offset,
+ u8 type, u8 *len)
+{
+ /* the start of data block collection, start of Video Data Block */
+ u32 offset = start_offset;
+ u32 dbc_offset = in_buf[2];
+
+ /*
+ * * edid buffer 1, byte 2 being 4 means no non-DTD/Data block
+ * collection present.
+ * * edid buffer 1, byte 2 being 0 means no non-DTD/DATA block
+ * collection present and no DTD data present.
+ */
+ if ((dbc_offset == 0) || (dbc_offset == 4)) {
+ SDE_ERROR("EDID: no DTD or non-DTD data present\n");
+ return NULL;
+ }
+
+ while (offset < dbc_offset) {
+ u8 block_len = in_buf[offset] & 0x1F;
+
+ if ((offset + block_len <= dbc_offset) &&
+ (in_buf[offset] >> 5) == type) {
+ *len = block_len;
+ SDE_DEBUG("EDID: block=%d found @ 0x%x w/ len=%d\n",
+ type, offset, block_len);
+
+ return in_buf + offset;
+ }
+ offset += 1 + block_len;
+ }
+
+ return NULL;
+}
+
+static void _sde_hdmi_extract_audio_data_blocks(
+ struct hdmi_edid_ctrl *edid_ctrl)
+{
+ u8 len = 0;
+ u8 adb_max = 0;
+ const u8 *adb = NULL;
+ u32 offset = DBC_START_OFFSET;
+ u8 *cea = NULL;
+
+ if (!edid_ctrl) {
+ SDE_ERROR("invalid edid_ctrl\n");
+ return;
+ }
+
+ cea = _sde_hdmi_edid_find_cea_extension(edid_ctrl->edid);
+ if (!cea) {
+ SDE_DEBUG("CEA extension not found\n");
+ return;
+ }
+
+ edid_ctrl->adb_size = 0;
+
+ memset(edid_ctrl->audio_data_block, 0,
+ sizeof(edid_ctrl->audio_data_block));
+
+ do {
+ len = 0;
+ adb = _sde_hdmi_edid_find_block(cea, offset, AUDIO_DATA_BLOCK,
+ &len);
+
+ if ((adb == NULL) || (len > MAX_AUDIO_DATA_BLOCK_SIZE ||
+ adb_max >= MAX_NUMBER_ADB)) {
+ if (!edid_ctrl->adb_size) {
+ SDE_DEBUG("No/Invalid Audio Data Block\n");
+ return;
+ }
+
+ continue;
+ }
+
+ memcpy(edid_ctrl->audio_data_block + edid_ctrl->adb_size,
+ adb + 1, len);
+ offset = (adb - cea) + 1 + len;
+
+ edid_ctrl->adb_size += len;
+ adb_max++;
+ } while (adb);
+
+}
+
+static void _sde_hdmi_extract_speaker_allocation_data(
+ struct hdmi_edid_ctrl *edid_ctrl)
+{
+ u8 len;
+ const u8 *sadb = NULL;
+ u8 *cea = NULL;
+
+ if (!edid_ctrl) {
+ SDE_ERROR("invalid edid_ctrl\n");
+ return;
+ }
+
+ cea = _sde_hdmi_edid_find_cea_extension(edid_ctrl->edid);
+ if (!cea) {
+ SDE_DEBUG("CEA extension not found\n");
+ return;
+ }
+
+ sadb = _sde_hdmi_edid_find_block(cea, DBC_START_OFFSET,
+ SPEAKER_ALLOCATION_DATA_BLOCK, &len);
+ if ((sadb == NULL) || (len != MAX_SPKR_ALLOC_DATA_BLOCK_SIZE)) {
+ SDE_DEBUG("No/Invalid Speaker Allocation Data Block\n");
+ return;
+ }
+
+ memcpy(edid_ctrl->spkr_alloc_data_block, sadb + 1, len);
+ edid_ctrl->sadb_size = len;
+
+ SDE_DEBUG("EDID: speaker alloc data SP byte = %08x %s%s%s%s%s%s%s\n",
+ sadb[1],
+ (sadb[1] & BIT(0)) ? "FL/FR," : "",
+ (sadb[1] & BIT(1)) ? "LFE," : "",
+ (sadb[1] & BIT(2)) ? "FC," : "",
+ (sadb[1] & BIT(3)) ? "RL/RR," : "",
+ (sadb[1] & BIT(4)) ? "RC," : "",
+ (sadb[1] & BIT(5)) ? "FLC/FRC," : "",
+ (sadb[1] & BIT(6)) ? "RLC/RRC," : "");
+}
+
+int sde_hdmi_edid_init(struct sde_hdmi *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ SDE_ERROR("[%s]Invalid params\n", display->name);
+ return -EINVAL;
+ }
+
+ memset(&display->edid, 0, sizeof(display->edid));
+
+ return rc;
+}
+
+int sde_hdmi_free_edid(struct sde_hdmi *display)
+{
+ struct hdmi_edid_ctrl *edid_ctrl = &display->edid;
+
+ kfree(edid_ctrl->edid);
+ edid_ctrl->edid = NULL;
+
+ return 0;
+}
+
+int sde_hdmi_edid_deinit(struct sde_hdmi *display)
+{
+ return sde_hdmi_free_edid(display);
+}
+
+void sde_hdmi_get_edid(struct drm_connector *connector,
+ struct sde_hdmi *display)
+{
+ u32 hdmi_ctrl;
+ struct hdmi_edid_ctrl *edid_ctrl = &display->edid;
+ struct hdmi *hdmi = display->ctrl.ctrl;
+
+ /* Read EDID */
+ hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
+ edid_ctrl->edid = drm_get_edid(connector, hdmi->i2c);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+
+ if (edid_ctrl->edid) {
+ hdmi->hdmi_mode = drm_detect_hdmi_monitor(edid_ctrl->edid);
+
+ _sde_hdmi_extract_audio_data_blocks(edid_ctrl);
+ _sde_hdmi_extract_speaker_allocation_data(edid_ctrl);
+ }
+};
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h
new file mode 100644
index 000000000000..f1bff0f08051
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h
@@ -0,0 +1,300 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HDMI_REGS_H
+#define _SDE_HDMI_REGS_H
+
+/* HDMI_TX Registers */
+#define HDMI_CTRL (0x00000000)
+#define HDMI_TEST_PATTERN (0x00000010)
+#define HDMI_RANDOM_PATTERN (0x00000014)
+#define HDMI_PKT_BLK_CTRL (0x00000018)
+#define HDMI_STATUS (0x0000001C)
+#define HDMI_AUDIO_PKT_CTRL (0x00000020)
+#define HDMI_ACR_PKT_CTRL (0x00000024)
+#define HDMI_VBI_PKT_CTRL (0x00000028)
+#define HDMI_INFOFRAME_CTRL0 (0x0000002C)
+#define HDMI_INFOFRAME_CTRL1 (0x00000030)
+#define HDMI_GEN_PKT_CTRL (0x00000034)
+#define HDMI_ACP (0x0000003C)
+#define HDMI_GC (0x00000040)
+#define HDMI_AUDIO_PKT_CTRL2 (0x00000044)
+#define HDMI_ISRC1_0 (0x00000048)
+#define HDMI_ISRC1_1 (0x0000004C)
+#define HDMI_ISRC1_2 (0x00000050)
+#define HDMI_ISRC1_3 (0x00000054)
+#define HDMI_ISRC1_4 (0x00000058)
+#define HDMI_ISRC2_0 (0x0000005C)
+#define HDMI_ISRC2_1 (0x00000060)
+#define HDMI_ISRC2_2 (0x00000064)
+#define HDMI_ISRC2_3 (0x00000068)
+#define HDMI_AVI_INFO0 (0x0000006C)
+#define HDMI_AVI_INFO1 (0x00000070)
+#define HDMI_AVI_INFO2 (0x00000074)
+#define HDMI_AVI_INFO3 (0x00000078)
+#define HDMI_MPEG_INFO0 (0x0000007C)
+#define HDMI_MPEG_INFO1 (0x00000080)
+#define HDMI_GENERIC0_HDR (0x00000084)
+#define HDMI_GENERIC0_0 (0x00000088)
+#define HDMI_GENERIC0_1 (0x0000008C)
+#define HDMI_GENERIC0_2 (0x00000090)
+#define HDMI_GENERIC0_3 (0x00000094)
+#define HDMI_GENERIC0_4 (0x00000098)
+#define HDMI_GENERIC0_5 (0x0000009C)
+#define HDMI_GENERIC0_6 (0x000000A0)
+#define HDMI_GENERIC1_HDR (0x000000A4)
+#define HDMI_GENERIC1_0 (0x000000A8)
+#define HDMI_GENERIC1_1 (0x000000AC)
+#define HDMI_GENERIC1_2 (0x000000B0)
+#define HDMI_GENERIC1_3 (0x000000B4)
+#define HDMI_GENERIC1_4 (0x000000B8)
+#define HDMI_GENERIC1_5 (0x000000BC)
+#define HDMI_GENERIC1_6 (0x000000C0)
+#define HDMI_ACR_32_0 (0x000000C4)
+#define HDMI_ACR_32_1 (0x000000C8)
+#define HDMI_ACR_44_0 (0x000000CC)
+#define HDMI_ACR_44_1 (0x000000D0)
+#define HDMI_ACR_48_0 (0x000000D4)
+#define HDMI_ACR_48_1 (0x000000D8)
+#define HDMI_ACR_STATUS_0 (0x000000DC)
+#define HDMI_ACR_STATUS_1 (0x000000E0)
+#define HDMI_AUDIO_INFO0 (0x000000E4)
+#define HDMI_AUDIO_INFO1 (0x000000E8)
+#define HDMI_CS_60958_0 (0x000000EC)
+#define HDMI_CS_60958_1 (0x000000F0)
+#define HDMI_RAMP_CTRL0 (0x000000F8)
+#define HDMI_RAMP_CTRL1 (0x000000FC)
+#define HDMI_RAMP_CTRL2 (0x00000100)
+#define HDMI_RAMP_CTRL3 (0x00000104)
+#define HDMI_CS_60958_2 (0x00000108)
+#define HDMI_HDCP_CTRL2 (0x0000010C)
+#define HDMI_HDCP_CTRL (0x00000110)
+#define HDMI_HDCP_DEBUG_CTRL (0x00000114)
+#define HDMI_HDCP_INT_CTRL (0x00000118)
+#define HDMI_HDCP_LINK0_STATUS (0x0000011C)
+#define HDMI_HDCP_DDC_CTRL_0 (0x00000120)
+#define HDMI_HDCP_DDC_CTRL_1 (0x00000124)
+#define HDMI_HDCP_DDC_STATUS (0x00000128)
+#define HDMI_HDCP_ENTROPY_CTRL0 (0x0000012C)
+#define HDMI_HDCP_RESET (0x00000130)
+#define HDMI_HDCP_RCVPORT_DATA0 (0x00000134)
+#define HDMI_HDCP_RCVPORT_DATA1 (0x00000138)
+#define HDMI_HDCP_RCVPORT_DATA2_0 (0x0000013C)
+#define HDMI_HDCP_RCVPORT_DATA2_1 (0x00000140)
+#define HDMI_HDCP_RCVPORT_DATA3 (0x00000144)
+#define HDMI_HDCP_RCVPORT_DATA4 (0x00000148)
+#define HDMI_HDCP_RCVPORT_DATA5 (0x0000014C)
+#define HDMI_HDCP_RCVPORT_DATA6 (0x00000150)
+#define HDMI_HDCP_RCVPORT_DATA7 (0x00000154)
+#define HDMI_HDCP_RCVPORT_DATA8 (0x00000158)
+#define HDMI_HDCP_RCVPORT_DATA9 (0x0000015C)
+#define HDMI_HDCP_RCVPORT_DATA10 (0x00000160)
+#define HDMI_HDCP_RCVPORT_DATA11 (0x00000164)
+#define HDMI_HDCP_RCVPORT_DATA12 (0x00000168)
+#define HDMI_VENSPEC_INFO0 (0x0000016C)
+#define HDMI_VENSPEC_INFO1 (0x00000170)
+#define HDMI_VENSPEC_INFO2 (0x00000174)
+#define HDMI_VENSPEC_INFO3 (0x00000178)
+#define HDMI_VENSPEC_INFO4 (0x0000017C)
+#define HDMI_VENSPEC_INFO5 (0x00000180)
+#define HDMI_VENSPEC_INFO6 (0x00000184)
+#define HDMI_HDCP_DEBUG (0x00000194)
+#define HDMI_TMDS_CTRL_CHAR (0x0000019C)
+#define HDMI_TMDS_CTRL_SEL (0x000001A4)
+#define HDMI_TMDS_SYNCCHAR01 (0x000001A8)
+#define HDMI_TMDS_SYNCCHAR23 (0x000001AC)
+#define HDMI_TMDS_DEBUG (0x000001B4)
+#define HDMI_TMDS_CTL_BITS (0x000001B8)
+#define HDMI_TMDS_DCBAL_CTRL (0x000001BC)
+#define HDMI_TMDS_DCBAL_CHAR (0x000001C0)
+#define HDMI_TMDS_CTL01_GEN (0x000001C8)
+#define HDMI_TMDS_CTL23_GEN (0x000001CC)
+#define HDMI_AUDIO_CFG (0x000001D0)
+#define HDMI_DEBUG (0x00000204)
+#define HDMI_USEC_REFTIMER (0x00000208)
+#define HDMI_DDC_CTRL (0x0000020C)
+#define HDMI_DDC_ARBITRATION (0x00000210)
+#define HDMI_DDC_INT_CTRL (0x00000214)
+#define HDMI_DDC_SW_STATUS (0x00000218)
+#define HDMI_DDC_HW_STATUS (0x0000021C)
+#define HDMI_DDC_SPEED (0x00000220)
+#define HDMI_DDC_SETUP (0x00000224)
+#define HDMI_DDC_TRANS0 (0x00000228)
+#define HDMI_DDC_TRANS1 (0x0000022C)
+#define HDMI_DDC_TRANS2 (0x00000230)
+#define HDMI_DDC_TRANS3 (0x00000234)
+#define HDMI_DDC_DATA (0x00000238)
+#define HDMI_HDCP_SHA_CTRL (0x0000023C)
+#define HDMI_HDCP_SHA_STATUS (0x00000240)
+#define HDMI_HDCP_SHA_DATA (0x00000244)
+#define HDMI_HDCP_SHA_DBG_M0_0 (0x00000248)
+#define HDMI_HDCP_SHA_DBG_M0_1 (0x0000024C)
+#define HDMI_HPD_INT_STATUS (0x00000250)
+#define HDMI_HPD_INT_CTRL (0x00000254)
+#define HDMI_HPD_CTRL (0x00000258)
+#define HDMI_HDCP_ENTROPY_CTRL1 (0x0000025C)
+#define HDMI_HDCP_SW_UPPER_AN (0x00000260)
+#define HDMI_HDCP_SW_LOWER_AN (0x00000264)
+#define HDMI_CRC_CTRL (0x00000268)
+#define HDMI_VID_CRC (0x0000026C)
+#define HDMI_AUD_CRC (0x00000270)
+#define HDMI_VBI_CRC (0x00000274)
+#define HDMI_DDC_REF (0x0000027C)
+#define HDMI_HDCP_SW_UPPER_AKSV (0x00000284)
+#define HDMI_HDCP_SW_LOWER_AKSV (0x00000288)
+#define HDMI_CEC_CTRL (0x0000028C)
+#define HDMI_CEC_WR_DATA (0x00000290)
+#define HDMI_CEC_RETRANSMIT (0x00000294)
+#define HDMI_CEC_STATUS (0x00000298)
+#define HDMI_CEC_INT (0x0000029C)
+#define HDMI_CEC_ADDR (0x000002A0)
+#define HDMI_CEC_TIME (0x000002A4)
+#define HDMI_CEC_REFTIMER (0x000002A8)
+#define HDMI_CEC_RD_DATA (0x000002AC)
+#define HDMI_CEC_RD_FILTER (0x000002B0)
+#define HDMI_ACTIVE_H (0x000002B4)
+#define HDMI_ACTIVE_V (0x000002B8)
+#define HDMI_ACTIVE_V_F2 (0x000002BC)
+#define HDMI_TOTAL (0x000002C0)
+#define HDMI_V_TOTAL_F2 (0x000002C4)
+#define HDMI_FRAME_CTRL (0x000002C8)
+#define HDMI_AUD_INT (0x000002CC)
+#define HDMI_DEBUG_BUS_CTRL (0x000002D0)
+#define HDMI_PHY_CTRL (0x000002D4)
+#define HDMI_CEC_WR_RANGE (0x000002DC)
+#define HDMI_CEC_RD_RANGE (0x000002E0)
+#define HDMI_VERSION (0x000002E4)
+#define HDMI_BIST_ENABLE (0x000002F4)
+#define HDMI_TIMING_ENGINE_EN (0x000002F8)
+#define HDMI_INTF_CONFIG (0x000002FC)
+#define HDMI_HSYNC_CTL (0x00000300)
+#define HDMI_VSYNC_PERIOD_F0 (0x00000304)
+#define HDMI_VSYNC_PERIOD_F1 (0x00000308)
+#define HDMI_VSYNC_PULSE_WIDTH_F0 (0x0000030C)
+#define HDMI_VSYNC_PULSE_WIDTH_F1 (0x00000310)
+#define HDMI_DISPLAY_V_START_F0 (0x00000314)
+#define HDMI_DISPLAY_V_START_F1 (0x00000318)
+#define HDMI_DISPLAY_V_END_F0 (0x0000031C)
+#define HDMI_DISPLAY_V_END_F1 (0x00000320)
+#define HDMI_ACTIVE_V_START_F0 (0x00000324)
+#define HDMI_ACTIVE_V_START_F1 (0x00000328)
+#define HDMI_ACTIVE_V_END_F0 (0x0000032C)
+#define HDMI_ACTIVE_V_END_F1 (0x00000330)
+#define HDMI_DISPLAY_HCTL (0x00000334)
+#define HDMI_ACTIVE_HCTL (0x00000338)
+#define HDMI_HSYNC_SKEW (0x0000033C)
+#define HDMI_POLARITY_CTL (0x00000340)
+#define HDMI_TPG_MAIN_CONTROL (0x00000344)
+#define HDMI_TPG_VIDEO_CONFIG (0x00000348)
+#define HDMI_TPG_COMPONENT_LIMITS (0x0000034C)
+#define HDMI_TPG_RECTANGLE (0x00000350)
+#define HDMI_TPG_INITIAL_VALUE (0x00000354)
+#define HDMI_TPG_BLK_WHT_PATTERN_FRAMES (0x00000358)
+#define HDMI_TPG_RGB_MAPPING (0x0000035C)
+#define HDMI_CEC_COMPL_CTL (0x00000360)
+#define HDMI_CEC_RD_START_RANGE (0x00000364)
+#define HDMI_CEC_RD_TOTAL_RANGE (0x00000368)
+#define HDMI_CEC_RD_ERR_RESP_LO (0x0000036C)
+#define HDMI_CEC_WR_CHECK_CONFIG (0x00000370)
+#define HDMI_INTERNAL_TIMING_MODE (0x00000374)
+#define HDMI_CTRL_SW_RESET (0x00000378)
+#define HDMI_CTRL_AUDIO_RESET (0x0000037C)
+#define HDMI_SCRATCH (0x00000380)
+#define HDMI_CLK_CTRL (0x00000384)
+#define HDMI_CLK_ACTIVE (0x00000388)
+#define HDMI_VBI_CFG (0x0000038C)
+#define HDMI_DDC_INT_CTRL0 (0x00000430)
+#define HDMI_DDC_INT_CTRL1 (0x00000434)
+#define HDMI_DDC_INT_CTRL2 (0x00000438)
+#define HDMI_DDC_INT_CTRL3 (0x0000043C)
+#define HDMI_DDC_INT_CTRL4 (0x00000440)
+#define HDMI_DDC_INT_CTRL5 (0x00000444)
+#define HDMI_HDCP2P2_DDC_CTRL (0x0000044C)
+#define HDMI_HDCP2P2_DDC_TIMER_CTRL (0x00000450)
+#define HDMI_HDCP2P2_DDC_TIMER_CTRL2 (0x00000454)
+#define HDMI_HDCP2P2_DDC_STATUS (0x00000458)
+#define HDMI_SCRAMBLER_STATUS_DDC_CTRL (0x00000464)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL (0x00000468)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2 (0x0000046C)
+#define HDMI_SCRAMBLER_STATUS_DDC_STATUS (0x00000470)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS (0x00000474)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS2 (0x00000478)
+#define HDMI_HW_DDC_CTRL (0x000004CC)
+#define HDMI_HDCP2P2_DDC_SW_TRIGGER (0x000004D0)
+#define HDMI_HDCP_STATUS (0x00000500)
+#define HDMI_HDCP_INT_CTRL2 (0x00000504)
+
+/* HDMI PHY Registers */
+#define HDMI_PHY_ANA_CFG0 (0x00000000)
+#define HDMI_PHY_ANA_CFG1 (0x00000004)
+#define HDMI_PHY_PD_CTRL0 (0x00000010)
+#define HDMI_PHY_PD_CTRL1 (0x00000014)
+#define HDMI_PHY_BIST_CFG0 (0x00000034)
+#define HDMI_PHY_BIST_PATN0 (0x0000003C)
+#define HDMI_PHY_BIST_PATN1 (0x00000040)
+#define HDMI_PHY_BIST_PATN2 (0x00000044)
+#define HDMI_PHY_BIST_PATN3 (0x00000048)
+
+/* QFPROM Registers for HDMI/HDCP */
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB (0x000000F8)
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB (0x000000FC)
+#define QFPROM_RAW_VERSION_4 (0x000000A8)
+#define SEC_CTRL_HW_VERSION (0x00006000)
+#define HDCP_KSV_LSB (0x000060D8)
+#define HDCP_KSV_MSB (0x000060DC)
+#define HDCP_KSV_VERSION_4_OFFSET (0x00000014)
+
+/* SEC_CTRL version that supports HDCP SEL */
+#define HDCP_SEL_MIN_SEC_VERSION (0x50010000)
+
+#define LPASS_LPAIF_RDDMA_CTL0 (0xFE152000)
+#define LPASS_LPAIF_RDDMA_PER_CNT0 (0x00000014)
+
+/* TX major version that supports scrambling */
+#define HDMI_TX_SCRAMBLER_MIN_TX_VERSION 0x04
+
+/* TX major versions */
+#define HDMI_TX_VERSION_4 4
+#define HDMI_TX_VERSION_3 3
+
+/* HDMI SCDC register offsets */
+#define HDMI_SCDC_UPDATE_0 0x10
+#define HDMI_SCDC_UPDATE_1 0x11
+#define HDMI_SCDC_TMDS_CONFIG 0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
+#define HDMI_SCDC_CONFIG_0 0x30
+#define HDMI_SCDC_STATUS_FLAGS_0 0x40
+#define HDMI_SCDC_STATUS_FLAGS_1 0x41
+#define HDMI_SCDC_ERR_DET_0_L 0x50
+#define HDMI_SCDC_ERR_DET_0_H 0x51
+#define HDMI_SCDC_ERR_DET_1_L 0x52
+#define HDMI_SCDC_ERR_DET_1_H 0x53
+#define HDMI_SCDC_ERR_DET_2_L 0x54
+#define HDMI_SCDC_ERR_DET_2_H 0x55
+#define HDMI_SCDC_ERR_DET_CHECKSUM 0x56
+
+/* HDCP secure registers directly accessible to HLOS since HDMI controller
+ * version major version 4.0
+ */
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x00000004)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x00000008)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x0000000C)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x00000010)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x00000014)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x00000018)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x0000001C)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x00000020)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_CTRL (0x00000024)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_DATA (0x00000028)
+
+#endif /* _SDE_HDMI_REGS_H */
+
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index ba5921149ac3..7915562057d6 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, 2016 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -234,6 +234,11 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
hdmi->hdcp_ctrl = NULL;
}
+ /*making it false currently to avoid ifdefs
+ *will get rid of this flag when HDCP SW
+ *support gets added to HDMI DRM driver
+ */
+ hdmi->is_hdcp_supported = false;
return hdmi;
@@ -389,7 +394,34 @@ static struct hdmi_platform_config hdmi_tx_8996_config = {
.hpd_freq = hpd_clk_freq_8x74,
};
+/*TO DO*/
+static const char *pwr_reg_names_8x98[] = {"core-vdda", "core-vcc"};
+/*TO DO*/
+static const char *hpd_reg_names_8x98[] = {"hpd-gdsc", "hpd-5v"};
+
+static const char *pwr_clk_names_8x98[] = {"core_extp_clk",
+ "hpd_alt_iface_clk"};
+
+static const char *hpd_clk_names_8x98[] = {"hpd_iface_clk",
+ "hpd_core_clk",
+ "hpd_mdp_core_clk",
+ "mnoc_clk",
+ "hpd_misc_ahb_clk",
+ "hpd_bus_clk"};
+
+static unsigned long hpd_clk_freq_8x98[] = {0, 19200000, 0, 0, 0, 0};
+
+static struct hdmi_platform_config hdmi_tx_8998_config = {
+ .phy_init = NULL,
+ HDMI_CFG(pwr_reg, 8x98),
+ HDMI_CFG(hpd_reg, 8x98),
+ HDMI_CFG(pwr_clk, 8x98),
+ HDMI_CFG(hpd_clk, 8x98),
+ .hpd_freq = hpd_clk_freq_8x98,
+};
+
static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8998_config },
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
@@ -425,7 +457,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
#ifdef CONFIG_OF
struct device_node *of_node = dev->of_node;
const struct of_device_id *match;
-
match = of_match_node(dt_match, of_node);
if (match && match->data) {
hdmi_cfg = (struct hdmi_platform_config *)match->data;
@@ -443,12 +474,13 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi_cfg->mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
-
+ hdmi_cfg->hpd5v_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd5v");
#else
static struct hdmi_platform_config config = {};
static const char *hpd_clk_names[] = {
"core_clk", "master_iface_clk", "slave_iface_clk",
};
+
if (cpu_is_apq8064()) {
static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index e22ddcd51248..9ce8ff513210 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -70,7 +70,7 @@ struct hdmi {
struct drm_encoder *encoder;
bool hdmi_mode; /* are we in hdmi mode? */
-
+ bool is_hdcp_supported;
int irq;
struct workqueue_struct *workq;
@@ -110,7 +110,9 @@ struct hdmi_platform_config {
int pwr_clk_cnt;
/* gpio's: */
- int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
+ int ddc_clk_gpio, ddc_data_gpio;
+ int hpd_gpio, mux_en_gpio;
+ int mux_sel_gpio, hpd5v_gpio;
int mux_lpm_gpio;
};
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 767debc02f36..aef20f76bf02 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -111,6 +111,8 @@ static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val)
#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040
#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080
+#define REG_HDMI_INFOFRAME_CTRL1 0x00000030
+
#define REG_HDMI_GEN_PKT_CTRL 0x00000034
#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001
#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002
@@ -150,6 +152,7 @@ static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*
#define REG_HDMI_GENERIC1_HDR 0x000000a4
+#define MAX_REG_HDMI_GENERIC1_INDEX 6
static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
static inline uint32_t REG_HDMI_ACR(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 6ac9aa165768..caad2be87ae4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -40,7 +40,7 @@ struct mdp4_crtc {
uint32_t x, y;
/* next cursor to scan-out: */
- uint32_t next_iova;
+ uint64_t next_iova;
struct drm_gem_object *next_bo;
/* current cursor being scanned out: */
@@ -133,7 +133,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
container_of(work, struct mdp4_crtc, unref_cursor_work);
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
- msm_gem_put_iova(val, mdp4_kms->id);
+ msm_gem_put_iova(val, mdp4_kms->aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -387,25 +387,28 @@ static void update_cursor(struct drm_crtc *crtc)
if (mdp4_crtc->cursor.stale) {
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
- uint32_t iova = mdp4_crtc->cursor.next_iova;
+ uint64_t iova = mdp4_crtc->cursor.next_iova;
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_reference(next_bo);
- msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
+ msm_gem_get_iova_locked(next_bo, mdp4_kms->aspace,
+ &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
+ /* FIXME: Make sure iova < 32 bits */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
+ lower_32_bits(iova));
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
} else {
/* disable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
- mdp4_kms->blank_cursor_iova);
+ lower_32_bits(mdp4_kms->blank_cursor_iova));
}
/* and drop the iova ref + obj rev when done scanning out: */
@@ -432,7 +435,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_gem_object *cursor_bo, *old_bo;
unsigned long flags;
- uint32_t iova;
+ uint64_t iova;
int ret;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -449,7 +452,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
}
if (cursor_bo) {
- ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
+ ret = msm_gem_get_iova(cursor_bo, mdp4_kms->aspace, &iova);
if (ret)
goto fail;
} else {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 6d8e174236ea..b6cddee0cf34 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -197,14 +197,14 @@ static void mdp4_destroy(struct msm_kms *kms)
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
if (mdp4_kms->blank_cursor_iova)
- msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+ msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace);
if (mdp4_kms->blank_cursor_bo)
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
if (aspace) {
aspace->mmu->funcs->detach(aspace->mmu,
iommu_ports, ARRAY_SIZE(iommu_ports));
- msm_gem_address_space_destroy(aspace);
+ msm_gem_address_space_put(aspace);
}
kfree(mdp4_kms);
@@ -541,13 +541,6 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
aspace = NULL;
}
- mdp4_kms->id = msm_register_address_space(dev, aspace);
- if (mdp4_kms->id < 0) {
- ret = mdp4_kms->id;
- dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
- goto fail;
- }
-
ret = modeset_init(mdp4_kms);
if (ret) {
dev_err(dev->dev, "modeset_init failed: %d\n", ret);
@@ -564,7 +557,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+ ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace,
&mdp4_kms->blank_cursor_iova);
if (ret) {
dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 923a028db452..5cf03e58a4f4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -33,8 +33,6 @@ struct mdp4_kms {
int rev;
/* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
-
void __iomem *mmio;
struct regulator *dsi_pll_vdda;
@@ -51,7 +49,7 @@ struct mdp4_kms {
/* empty/blank cursor bo to use when cursor is "disabled" */
struct drm_gem_object *blank_cursor_bo;
- uint32_t blank_cursor_iova;
+ uint64_t blank_cursor_iova;
};
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 30d57e74c42f..bc1ece2a5b7e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -109,7 +109,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
return 0;
DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp4_kms->id);
+ return msm_framebuffer_prepare(fb, mdp4_kms->aspace);
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
@@ -123,7 +123,7 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
return;
DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp4_kms->id);
+ msm_framebuffer_cleanup(fb, mdp4_kms->aspace);
}
@@ -172,13 +172,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 0));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 0));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 1));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 1));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 2));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 3));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 3));
plane->fb = fb;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
index bb1225aa2f75..89305ad3cde2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_cfg.c
@@ -547,7 +547,7 @@ fail:
if (cfg_handler)
mdp5_cfg_destroy(cfg_handler);
- return NULL;
+ return ERR_PTR(ret);
}
static struct mdp5_cfg_platform *mdp5_get_config(struct platform_device *dev)
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 7f9f4ac88029..422a9a78f3b2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -171,7 +171,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
container_of(work, struct mdp5_crtc, unref_cursor_work);
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
- msm_gem_put_iova(val, mdp5_kms->id);
+ msm_gem_put_iova(val, mdp5_kms->aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -509,7 +509,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_gem_object *cursor_bo, *old_bo = NULL;
- uint32_t blendcfg, cursor_addr, stride;
+ uint32_t blendcfg, stride;
+ uint64_t cursor_addr;
int ret, bpp, lm;
unsigned int depth;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
@@ -536,7 +537,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
+ ret = msm_gem_get_iova(cursor_bo, mdp5_kms->aspace, &cursor_addr);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index cc5a73505537..e4e69ebd116e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -136,9 +136,8 @@ static void mdp5_destroy(struct msm_kms *kms)
mdp5_irq_domain_fini(mdp5_kms);
if (aspace) {
- aspace->mmu->funcs->detach(aspace->mmu,
- iommu_ports, ARRAY_SIZE(iommu_ports));
- msm_gem_address_space_destroy(aspace);
+ aspace->mmu->funcs->detach(aspace->mmu);
+ msm_gem_address_space_put(aspace);
}
if (mdp5_kms->ctlm)
@@ -627,13 +626,6 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
aspace = NULL;
}
- mdp5_kms->id = msm_register_address_space(dev, aspace);
- if (mdp5_kms->id < 0) {
- ret = mdp5_kms->id;
- dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
- goto fail;
- }
-
ret = modeset_init(mdp5_kms);
if (ret) {
dev_err(dev->dev, "modeset_init failed: %d\n", ret);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 6cb43ad0c1d7..c1aa86d21416 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -36,7 +36,6 @@ struct mdp5_kms {
/* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
struct msm_gem_address_space *aspace;
struct mdp5_smp *smp;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 81cd49045ffc..873ab11d34d2 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -260,7 +260,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
return 0;
DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp5_kms->id);
+ return msm_framebuffer_prepare(fb, mdp5_kms->aspace);
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
@@ -274,7 +274,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
return;
DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp5_kms->id);
+ msm_framebuffer_cleanup(fb, mdp5_kms->aspace);
}
static int mdp5_plane_atomic_check(struct drm_plane *plane,
@@ -400,13 +400,13 @@ static void set_scanout_locked(struct drm_plane *plane,
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 0));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 0));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 1));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 1));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 2));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 3));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 3));
plane->fb = fb;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 4b67edf5a77b..f821a81c53a6 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -23,6 +23,8 @@
#include "sde_wb.h"
#define TEARDOWN_DEADLOCK_RETRY_MAX 5
+#include "msm_gem.h"
+#include "msm_mmu.h"
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
@@ -38,20 +40,6 @@ static const struct drm_mode_config_funcs mode_config_funcs = {
.atomic_commit = msm_atomic_commit,
};
-int msm_register_address_space(struct drm_device *dev,
- struct msm_gem_address_space *aspace)
-{
- struct msm_drm_private *priv = dev->dev_private;
- int idx = priv->num_aspaces++;
-
- if (WARN_ON(idx >= ARRAY_SIZE(priv->aspace)))
- return -EINVAL;
-
- priv->aspace[idx] = aspace;
-
- return idx;
-}
-
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
static bool reglog = false;
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -566,29 +554,65 @@ static void load_gpu(struct drm_device *dev)
}
#endif
-static int msm_open(struct drm_device *dev, struct drm_file *file)
+static struct msm_file_private *setup_pagetable(struct msm_drm_private *priv)
{
struct msm_file_private *ctx;
+ if (!priv || !priv->gpu)
+ return NULL;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->aspace = msm_gem_address_space_create_instance(
+ priv->gpu->aspace->mmu, "gpu", 0x100000000, 0x1ffffffff);
+
+ if (IS_ERR(ctx->aspace)) {
+ int ret = PTR_ERR(ctx->aspace);
+
+ /*
+ * If dynamic domains are not supported, everybody uses the
+ * same pagetable
+ */
+ if (ret != -EOPNOTSUPP) {
+ kfree(ctx);
+ return ERR_PTR(ret);
+ }
+
+ ctx->aspace = priv->gpu->aspace;
+ }
+
+ ctx->aspace->mmu->funcs->attach(ctx->aspace->mmu, NULL, 0);
+ return ctx;
+}
+
+static int msm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_file_private *ctx = NULL;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+
+ if (!dev || !dev->dev_private)
+ return -ENODEV;
+
+ priv = dev->dev_private;
/* For now, load gpu on open.. to avoid the requirement of having
* firmware in the initrd.
*/
load_gpu(dev);
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = setup_pagetable(priv);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
file->driver_priv = ctx;
- if (dev && dev->dev_private) {
- struct msm_drm_private *priv = dev->dev_private;
- struct msm_kms *kms;
+ kms = priv->kms;
+
+ if (kms && kms->funcs && kms->funcs->postopen)
+ kms->funcs->postopen(kms, file);
- kms = priv->kms;
- if (kms && kms->funcs && kms->funcs->postopen)
- kms->funcs->postopen(kms, file);
- }
return 0;
}
@@ -611,8 +635,10 @@ static void msm_postclose(struct drm_device *dev, struct drm_file *file)
kms->funcs->postclose(kms, file);
mutex_lock(&dev->struct_mutex);
- if (ctx == priv->lastctx)
- priv->lastctx = NULL;
+ if (ctx && ctx->aspace && ctx->aspace != priv->gpu->aspace) {
+ ctx->aspace->mmu->funcs->detach(ctx->aspace->mmu);
+ msm_gem_address_space_put(ctx->aspace);
+ }
mutex_unlock(&dev->struct_mutex);
kfree(ctx);
@@ -811,6 +837,13 @@ static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
return 0;
}
+static int msm_snapshot_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ return msm_snapshot_write(priv->gpu, m);
+}
+
static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -875,11 +908,22 @@ static int show_locked(struct seq_file *m, void *arg)
return ret;
}
+static int show_unlocked(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ int (*show)(struct drm_device *dev, struct seq_file *m) =
+ node->info_ent->data;
+
+ return show(dev, m);
+}
+
static struct drm_info_list msm_debugfs_list[] = {
{"gpu", show_locked, 0, msm_gpu_show},
{"gem", show_locked, 0, msm_gem_show},
{ "mm", show_locked, 0, msm_mm_show },
{ "fb", show_locked, 0, msm_fb_show },
+ { "snapshot", show_unlocked, 0, msm_snapshot_show },
};
static int late_init_minor(struct drm_minor *minor)
@@ -953,14 +997,23 @@ int msm_wait_fence(struct drm_device *dev, uint32_t fence,
ktime_t *timeout , bool interruptible)
{
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int index = FENCE_RING(fence);
+ uint32_t submitted;
int ret;
- if (!priv->gpu)
- return 0;
+ if (!gpu)
+ return -ENXIO;
+
+ if (index > MSM_GPU_MAX_RINGS || index >= gpu->nr_rings ||
+ !gpu->rb[index])
+ return -EINVAL;
+
+ submitted = gpu->funcs->submitted_fence(gpu, gpu->rb[index]);
- if (fence > priv->gpu->submitted_fence) {
+ if (fence > submitted) {
DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
- fence, priv->gpu->submitted_fence);
+ fence, submitted);
return -EINVAL;
}
@@ -990,7 +1043,7 @@ int msm_wait_fence(struct drm_device *dev, uint32_t fence,
if (ret == 0) {
DBG("timeout waiting for fence: %u (completed: %u)",
- fence, priv->completed_fence);
+ fence, priv->completed_fence[index]);
ret = -ETIMEDOUT;
} else if (ret != -ERESTARTSYS) {
ret = 0;
@@ -1004,12 +1057,13 @@ int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
+ int index = FENCE_RING(fence);
int ret = 0;
mutex_lock(&dev->struct_mutex);
if (!list_empty(&cb->work.entry)) {
ret = -EINVAL;
- } else if (fence > priv->completed_fence) {
+ } else if (fence > priv->completed_fence[index]) {
cb->fence = fence;
list_add_tail(&cb->work.entry, &priv->fence_cbs);
} else {
@@ -1024,21 +1078,21 @@ int msm_queue_fence_cb(struct drm_device *dev,
void msm_update_fence(struct drm_device *dev, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_fence_cb *cb, *tmp;
+ int index = FENCE_RING(fence);
- mutex_lock(&dev->struct_mutex);
- priv->completed_fence = max(fence, priv->completed_fence);
-
- while (!list_empty(&priv->fence_cbs)) {
- struct msm_fence_cb *cb;
-
- cb = list_first_entry(&priv->fence_cbs,
- struct msm_fence_cb, work.entry);
+ if (index >= MSM_GPU_MAX_RINGS)
+ return;
- if (cb->fence > priv->completed_fence)
- break;
+ mutex_lock(&dev->struct_mutex);
+ priv->completed_fence[index] = max(fence, priv->completed_fence[index]);
- list_del_init(&cb->work.entry);
- queue_work(priv->wq, &cb->work);
+ list_for_each_entry_safe(cb, tmp, &priv->fence_cbs, work.entry) {
+ if (COMPARE_FENCE_LTE(cb->fence,
+ priv->completed_fence[index])) {
+ list_del_init(&cb->work.entry);
+ queue_work(priv->wq, &cb->work);
+ }
}
mutex_unlock(&dev->struct_mutex);
@@ -1143,16 +1197,28 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
{
struct drm_msm_gem_info *args = data;
struct drm_gem_object *obj;
+ struct msm_file_private *ctx = file->driver_priv;
int ret = 0;
- if (args->pad)
+ if (args->flags & ~MSM_INFO_FLAGS)
+ return -EINVAL;
+
+ if (!ctx || !ctx->aspace)
return -EINVAL;
obj = drm_gem_object_lookup(dev, file, args->handle);
if (!obj)
return -ENOENT;
- args->offset = msm_gem_mmap_offset(obj);
+ if (args->flags & MSM_INFO_IOVA) {
+ uint64_t iova;
+
+ ret = msm_gem_get_iova(obj, ctx->aspace, &iova);
+ if (!ret)
+ args->offset = iova;
+ } else {
+ args->offset = msm_gem_mmap_offset(obj);
+ }
drm_gem_object_unreference_unlocked(obj);
@@ -1163,13 +1229,24 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_wait_fence *args = data;
- ktime_t timeout = to_ktime(args->timeout);
+ ktime_t timeout;
+
if (args->pad) {
DRM_ERROR("invalid pad: %08x\n", args->pad);
return -EINVAL;
}
+ /*
+ * Special case - if the user passes a timeout of 0.0 just return the
+ * current fence status (0 for retired, -EBUSY for active) with no
+ * accompanying kernel logs. This can be a poor man's way of
+ * determining the status of a fence.
+ */
+ if (args->timeout.tv_sec == 0 && args->timeout.tv_nsec == 0)
+ return msm_wait_fence(dev, args->fence, NULL, true);
+
+ timeout = to_ktime(args->timeout);
return msm_wait_fence(dev, args->fence, &timeout, true);
}
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 4ed5953d07f9..d8a4c34e9be0 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -32,7 +32,8 @@
#include <linux/iommu.h>
#include <linux/types.h>
#include <linux/of_graph.h>
-#include <linux/mdss_io_util.h>
+#include <linux/of_device.h>
+#include <linux/sde_io_util.h>
#include <asm/sizes.h>
#include <linux/kthread.h>
@@ -74,11 +75,7 @@ struct msm_gem_vma;
#define MAX_CONNECTORS 8
struct msm_file_private {
- /* currently we don't do anything useful with this.. but when
- * per-context address spaces are supported we'd keep track of
- * the context's page-tables here.
- */
- int dummy;
+ struct msm_gem_address_space *aspace;
};
enum msm_mdp_plane_property {
@@ -250,6 +247,8 @@ struct msm_drm_commit {
struct kthread_worker worker;
};
+#define MSM_GPU_MAX_RINGS 4
+
struct msm_drm_private {
struct msm_kms *kms;
@@ -276,11 +275,12 @@ struct msm_drm_private {
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
- struct msm_file_private *lastctx;
struct drm_fb_helper *fbdev;
- uint32_t next_fence, completed_fence;
+ uint32_t next_fence[MSM_GPU_MAX_RINGS];
+ uint32_t completed_fence[MSM_GPU_MAX_RINGS];
+
wait_queue_head_t fence_event;
struct msm_rd_state *rd;
@@ -351,6 +351,31 @@ struct msm_format {
uint32_t pixel_format;
};
+/*
+ * Some GPU targets can support multiple ringbuffers and preempt between them.
+ * In order to do this without massive API changes we will steal two bits from
+ * the top of the fence and use them to identify the ringbuffer, (0x00000001 for
+ * riug 0, 0x40000001 for ring 1, 0x50000001 for ring 2, etc). If you are going
+ * to do a fence comparision you have to make sure you are only comparing
+ * against fences from the same ring, but since fences within a ringbuffer are
+ * still contigious you can still use straight comparisons (i.e 0x40000001 is
+ * older than 0x40000002). Mathmatically there will be 0x3FFFFFFF timestamps
+ * per ring or ~103 days of 120 interrupts per second (two interrupts per frame
+ * at 60 FPS).
+ */
+#define FENCE_RING(_fence) ((_fence >> 30) & 3)
+#define FENCE(_ring, _fence) ((((_ring) & 3) << 30) | ((_fence) & 0x3FFFFFFF))
+
+static inline bool COMPARE_FENCE_LTE(uint32_t a, uint32_t b)
+{
+ return ((FENCE_RING(a) == FENCE_RING(b)) && a <= b);
+}
+
+static inline bool COMPARE_FENCE_LT(uint32_t a, uint32_t b)
+{
+ return ((FENCE_RING(a) == FENCE_RING(b)) && a < b);
+}
+
/* callback from wq once fence has passed: */
struct msm_fence_cb {
struct work_struct work;
@@ -374,20 +399,22 @@ int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence);
-int msm_register_address_space(struct drm_device *dev,
- struct msm_gem_address_space *aspace);
void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
void *priv);
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
- void *priv);
-void msm_gem_address_space_destroy(struct msm_gem_address_space *aspace);
+ void *priv, unsigned int flags);
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
/* For GPU and legacy display */
struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
+struct msm_gem_address_space *
+msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
+ uint64_t start, uint64_t end);
/* For SDE display */
struct msm_gem_address_space *
@@ -402,13 +429,16 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova);
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova);
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -439,9 +469,12 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
uint32_t size, struct sg_table *sgt);
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
@@ -525,7 +558,8 @@ u32 msm_readl(const void __iomem *addr);
static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
- return priv->completed_fence >= fence;
+
+ return priv->completed_fence[FENCE_RING(fence)] >= fence;
}
static inline int align_pitch(int width, int bpp)
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index dca4de382581..a3f0392c2f88 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -92,15 +92,16 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
* should be fine, since only the scanout (mdpN) side of things needs
* this, the gpu doesn't care about fb's.
*/
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int ret, i, n = drm_format_num_planes(fb->pixel_format);
- uint32_t iova;
+ uint64_t iova;
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
- DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
+ ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
+ DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
}
@@ -108,21 +109,30 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
return 0;
}
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
int i, n = drm_format_num_planes(fb->pixel_format);
for (i = 0; i < n; i++)
- msm_gem_put_iova(msm_fb->planes[i], id);
+ msm_gem_put_iova(msm_fb->planes[i], aspace);
}
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+/* FIXME: Leave this as a uint32_t and just return the lower 32 bits? */
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane)
{
struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ uint64_t iova;
+
if (!msm_fb->planes[plane])
return 0;
- return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane];
+
+ iova = msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
+
+ /* FIXME: Make sure it is < 32 bits */
+ return lower_32_bits(iova);
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 3f6ec077b51d..cf127250c0d0 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -85,7 +85,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb = NULL;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
- uint32_t paddr;
+ uint64_t paddr;
int ret, size;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
@@ -160,11 +160,12 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
- dev->mode_config.fb_base = paddr;
+ /* FIXME: Verify paddr < 32 bits? */
+ dev->mode_config.fb_base = lower_32_bits(paddr);
fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
fbi->screen_size = fbdev->bo->size;
- fbi->fix.smem_start = paddr;
+ fbi->fix.smem_start = lower_32_bits(paddr);
fbi->fix.smem_len = fbdev->bo->size;
DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 5aa08cf4d6d8..63128d11767e 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -273,22 +273,67 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
return offset;
}
+static void obj_remove_domain(struct msm_gem_vma *domain)
+{
+ if (domain) {
+ list_del(&domain->list);
+ kfree(domain);
+ }
+}
+
static void
put_iova(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int id;
+ struct msm_gem_vma *domain, *tmp;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- msm_gem_unmap_vma(priv->aspace[id], &msm_obj->domain[id],
- msm_obj->sgt, get_dmabuf_ptr(obj));
+ list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
+ if (iommu_present(&platform_bus_type)) {
+ msm_gem_unmap_vma(domain->aspace, domain,
+ msm_obj->sgt, get_dmabuf_ptr(obj));
+ }
+
+ obj_remove_domain(domain);
+ }
+}
+
+static struct msm_gem_vma *obj_add_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain = kzalloc(sizeof(*domain), GFP_KERNEL);
+
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ domain->aspace = aspace;
+
+ list_add_tail(&domain->list, &msm_obj->domains);
+
+ return domain;
+}
+
+static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
+
+ list_for_each_entry(domain, &msm_obj->domains, list) {
+ if (domain->aspace == aspace)
+ return domain;
}
+
+ return NULL;
}
+#ifndef IOMMU_PRIV
+#define IOMMU_PRIV 0
+#endif
+
/* should be called under struct_mutex.. although it can be called
* from atomic context without struct_mutex to acquire an extra
* iova ref if you know one is already held.
@@ -296,49 +341,64 @@ put_iova(struct drm_gem_object *obj)
* That means when I do eventually need to add support for unpinning
* the refcnt counter needs to be atomic_t.
*/
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova)
+int msm_gem_get_iova_locked(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+ struct msm_gem_vma *domain;
int ret = 0;
- if (!msm_obj->domain[id].iova) {
- struct msm_drm_private *priv = obj->dev->dev_private;
- struct page **pages = get_pages(obj);
+ if (!iommu_present(&platform_bus_type)) {
+ pages = get_pages(obj);
if (IS_ERR(pages))
return PTR_ERR(pages);
- if (iommu_present(&platform_bus_type)) {
- ret = msm_gem_map_vma(priv->aspace[id],
- &msm_obj->domain[id], msm_obj->sgt,
- get_dmabuf_ptr(obj));
- } else
- msm_obj->domain[id].iova = physaddr(obj);
+ *iova = (uint64_t) physaddr(obj);
+ return 0;
+ }
+
+ domain = obj_get_domain(obj, aspace);
+
+ if (!domain) {
+ domain = obj_add_domain(obj, aspace);
+ if (IS_ERR(domain))
+ return PTR_ERR(domain);
+
+ pages = get_pages(obj);
+ if (IS_ERR(pages)) {
+ obj_remove_domain(domain);
+ return PTR_ERR(pages);
+ }
+
+ ret = msm_gem_map_vma(aspace, domain, msm_obj->sgt,
+ get_dmabuf_ptr(obj), msm_obj->flags);
}
if (!ret)
- *iova = msm_obj->domain[id].iova;
+ *iova = domain->iova;
+ else
+ obj_remove_domain(domain);
return ret;
}
/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
int ret;
- /* this is safe right now because we don't unmap until the
- * bo is deleted:
- */
- if (msm_obj->domain[id].iova) {
- *iova = msm_obj->domain[id].iova;
+ domain = obj_get_domain(obj, aspace);
+ if (domain) {
+ *iova = domain->iova;
return 0;
}
mutex_lock(&obj->dev->struct_mutex);
- ret = msm_gem_get_iova_locked(obj, id, iova);
+ ret = msm_gem_get_iova_locked(obj, aspace, iova);
mutex_unlock(&obj->dev->struct_mutex);
return ret;
}
@@ -346,14 +406,18 @@ int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
/* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_iova()'.
*/
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
- struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!msm_obj->domain[id].iova);
- return msm_obj->domain[id].iova;
+ struct msm_gem_vma *domain = obj_get_domain(obj, aspace);
+
+ WARN_ON(!domain);
+
+ return domain ? domain->iova : 0;
}
-void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
// XXX TODO ..
// NOTE: probably don't need a _locked() version.. we wouldn't
@@ -487,9 +551,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- struct msm_drm_private *priv = obj->dev->dev_private;
+ struct msm_gem_vma *domain;
uint64_t off = drm_vma_node_start(&obj->vma_node);
- int id;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p\t",
@@ -498,8 +561,9 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
obj->name, obj->refcount.refcount.counter,
off, msm_obj->vaddr);
- for (id = 0; id < priv->num_aspaces; id++)
- seq_printf(m, " %08llx", msm_obj->domain[id].iova);
+ /* FIXME: we need to print the address space here too */
+ list_for_each_entry(domain, &msm_obj->domains, list)
+ seq_printf(m, " %08llx", domain->iova);
seq_puts(m, "\n");
}
@@ -618,8 +682,12 @@ static int msm_gem_new_impl(struct drm_device *dev,
if (!msm_obj)
return -ENOMEM;
- if (use_vram)
- msm_obj->vram_node = &msm_obj->domain[0].node;
+ if (use_vram) {
+ struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base, 0);
+
+ if (!IS_ERR(domain))
+ msm_obj->vram_node = &domain->node;
+ }
msm_obj->flags = flags;
@@ -627,6 +695,8 @@ static int msm_gem_new_impl(struct drm_device *dev,
reservation_object_init(msm_obj->resv);
INIT_LIST_HEAD(&msm_obj->submit_entry);
+ INIT_LIST_HEAD(&msm_obj->domains);
+
list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
*obj = &msm_obj->base;
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 86c297c6de32..ac46c473791f 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -18,6 +18,7 @@
#ifndef __MSM_GEM_H__
#define __MSM_GEM_H__
+#include <linux/kref.h>
#include <linux/reservation.h>
#include "msm_drv.h"
@@ -26,7 +27,7 @@
struct msm_gem_aspace_ops {
int (*map)(struct msm_gem_address_space *, struct msm_gem_vma *,
- struct sg_table *sgt, void *priv);
+ struct sg_table *sgt, void *priv, unsigned int flags);
void (*unmap)(struct msm_gem_address_space *, struct msm_gem_vma *,
struct sg_table *sgt, void *priv);
@@ -38,12 +39,15 @@ struct msm_gem_address_space {
const char *name;
struct msm_mmu *mmu;
const struct msm_gem_aspace_ops *ops;
+ struct kref kref;
};
struct msm_gem_vma {
/* Node used by the GPU address space, but not the SDE address space */
struct drm_mm_node node;
+ struct msm_gem_address_space *aspace;
uint64_t iova;
+ struct list_head list;
};
struct msm_gem_object {
@@ -74,7 +78,7 @@ struct msm_gem_object {
struct sg_table *sgt;
void *vaddr;
- struct msm_gem_vma domain[NUM_DOMAINS];
+ struct list_head domains;
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
@@ -114,24 +118,25 @@ static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
*/
struct msm_gem_submit {
struct drm_device *dev;
- struct msm_gpu *gpu;
+ struct msm_gem_address_space *aspace;
struct list_head node; /* node in gpu submit_list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;
uint32_t fence;
+ int ring;
bool valid;
unsigned int nr_cmds;
unsigned int nr_bos;
struct {
uint32_t type;
uint32_t size; /* in dwords */
- uint32_t iova;
+ uint64_t iova;
uint32_t idx; /* cmdstream buffer idx in bos[] */
} cmd[MAX_CMDS];
struct {
uint32_t flags;
struct msm_gem_object *obj;
- uint32_t iova;
+ uint64_t iova;
} bos[0];
};
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 1847f83b1e33..0566cefaae81 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -34,7 +34,7 @@ static inline void __user *to_user_ptr(u64 address)
}
static struct msm_gem_submit *submit_create(struct drm_device *dev,
- struct msm_gpu *gpu, int nr)
+ struct msm_gem_address_space *aspace, int nr)
{
struct msm_gem_submit *submit;
int sz = sizeof(*submit) + (nr * sizeof(submit->bos[0]));
@@ -42,7 +42,7 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (submit) {
submit->dev = dev;
- submit->gpu = gpu;
+ submit->aspace = aspace;
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
@@ -90,7 +90,8 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
pagefault_disable();
}
- if (submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) {
+ if ((submit_bo.flags & ~MSM_SUBMIT_BO_FLAGS) ||
+ !(submit_bo.flags & MSM_SUBMIT_BO_FLAGS)) {
DRM_ERROR("invalid flags: %x\n", submit_bo.flags);
ret = -EINVAL;
goto out_unlock;
@@ -141,7 +142,7 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
struct msm_gem_object *msm_obj = submit->bos[i].obj;
if (submit->bos[i].flags & BO_PINNED)
- msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+ msm_gem_put_iova(&msm_obj->base, submit->aspace);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
@@ -162,7 +163,7 @@ retry:
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- uint32_t iova;
+ uint64_t iova;
if (slow_locked == i)
slow_locked = -1;
@@ -180,7 +181,7 @@ retry:
/* if locking succeeded, pin bo: */
ret = msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
+ submit->aspace, &iova);
/* this would break the logic in the fail path.. there is no
* reason for this to happen, but just to be on the safe side
@@ -229,7 +230,7 @@ fail:
}
static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
- struct msm_gem_object **obj, uint32_t *iova, bool *valid)
+ struct msm_gem_object **obj, uint64_t *iova, bool *valid)
{
if (idx >= submit->nr_bos) {
DRM_ERROR("invalid buffer index: %u (out of %u)\n",
@@ -275,7 +276,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
struct drm_msm_gem_submit_reloc submit_reloc;
void __user *userptr =
to_user_ptr(relocs + (i * sizeof(submit_reloc)));
- uint32_t iova, off;
+ uint64_t iova;
+ uint32_t off;
bool valid;
ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
@@ -347,17 +349,19 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
*/
- if (args->pipe != MSM_PIPE_3D0)
+ if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
return -EINVAL;
gpu = priv->gpu;
+ if (!gpu)
+ return -ENXIO;
if (args->nr_cmds > MAX_CMDS)
return -EINVAL;
mutex_lock(&dev->struct_mutex);
- submit = submit_create(dev, gpu, args->nr_bos);
+ submit = submit_create(dev, ctx->aspace, args->nr_bos);
if (!submit) {
ret = -ENOMEM;
goto out;
@@ -376,7 +380,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
void __user *userptr =
to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
struct msm_gem_object *msm_obj;
- uint32_t iova;
+ uint64_t iova;
ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
if (ret) {
@@ -408,8 +412,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
}
- if ((submit_cmd.size + submit_cmd.submit_offset) >=
- msm_obj->base.size) {
+ if (!(submit_cmd.size) ||
+ ((submit_cmd.size + submit_cmd.submit_offset) >
+ msm_obj->base.size)) {
DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
ret = -EINVAL;
goto out;
@@ -431,7 +436,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->nr_cmds = i;
- ret = msm_gpu_submit(gpu, submit, ctx);
+ /* Clamp the user submitted ring to the range of available rings */
+ submit->ring = clamp_t(uint32_t,
+ (args->flags & MSM_SUBMIT_RING_MASK) >> MSM_SUBMIT_RING_SHIFT,
+ 0, gpu->nr_rings - 1);
+
+ ret = msm_gpu_submit(gpu, submit);
args->fence = submit->fence;
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
index 53e70263e03c..7ca96831a9b3 100644
--- a/drivers/gpu/drm/msm/msm_gem_vma.c
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -19,6 +19,24 @@
#include "msm_gem.h"
#include "msm_mmu.h"
+static void
+msm_gem_address_space_destroy(struct kref *kref)
+{
+ struct msm_gem_address_space *aspace = container_of(kref,
+ struct msm_gem_address_space, kref);
+
+ if (aspace->ops->destroy)
+ aspace->ops->destroy(aspace);
+
+ kfree(aspace);
+}
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
+{
+ if (aspace)
+ kref_put(&aspace->kref, msm_gem_address_space_destroy);
+}
+
/* SDE address space operations */
static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
@@ -34,12 +52,14 @@ static void smmu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
DMA_BIDIRECTIONAL);
vma->iova = 0;
+
+ msm_gem_address_space_put(aspace);
}
static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
- void *priv)
+ void *priv, unsigned int flags)
{
struct dma_buf *buf = priv;
int ret;
@@ -54,6 +74,9 @@ static int smmu_aspace_map_vma(struct msm_gem_address_space *aspace,
if (!ret)
vma->iova = sg_dma_address(sgt->sgl);
+ /* Get a reference to the aspace to keep it around */
+ kref_get(&aspace->kref);
+
return ret;
}
@@ -79,6 +102,8 @@ msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
aspace->mmu = mmu;
aspace->ops = &smmu_aspace_ops;
+ kref_init(&aspace->kref);
+
return aspace;
}
@@ -104,16 +129,25 @@ static void iommu_aspace_unmap_vma(struct msm_gem_address_space *aspace,
drm_mm_remove_node(&vma->node);
vma->iova = 0;
+
+ msm_gem_address_space_put(aspace);
}
static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
- struct msm_gem_vma *vma, struct sg_table *sgt,
- void *priv)
+ struct msm_gem_vma *vma, struct sg_table *sgt, void *priv,
+ unsigned int flags)
{
struct msm_iommu_aspace *local = to_iommu_aspace(aspace);
size_t size = 0;
struct scatterlist *sg;
- int ret = 0, i;
+ int ret, i;
+ int iommu_flags = IOMMU_READ;
+
+ if (!(flags & MSM_BO_GPU_READONLY))
+ iommu_flags |= IOMMU_WRITE;
+
+ if (flags & MSM_BO_PRIVILEGED)
+ iommu_flags |= IOMMU_PRIV;
if (WARN_ON(drm_mm_node_allocated(&vma->node)))
return 0;
@@ -129,8 +163,11 @@ static int iommu_aspace_map_vma(struct msm_gem_address_space *aspace,
vma->iova = vma->node.start << PAGE_SHIFT;
if (aspace->mmu)
- ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova,
- sgt, IOMMU_READ | IOMMU_WRITE);
+ ret = aspace->mmu->funcs->map(aspace->mmu, vma->iova, sgt,
+ iommu_flags);
+
+ /* Get a reference to the aspace to keep it around */
+ kref_get(&aspace->kref);
return ret;
}
@@ -169,15 +206,17 @@ msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
local->base.mmu = mmu;
local->base.ops = &msm_iommu_aspace_ops;
+ kref_init(&local->base.kref);
+
return &local->base;
}
int msm_gem_map_vma(struct msm_gem_address_space *aspace,
struct msm_gem_vma *vma, struct sg_table *sgt,
- void *priv)
+ void *priv, unsigned int flags)
{
if (aspace && aspace->ops->map)
- return aspace->ops->map(aspace, vma, sgt, priv);
+ return aspace->ops->map(aspace, vma, sgt, priv, flags);
return -EINVAL;
}
@@ -203,11 +242,15 @@ msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
domain->geometry.aperture_end);
}
-void
-msm_gem_address_space_destroy(struct msm_gem_address_space *aspace)
+/* Create a new dynamic instance */
+struct msm_gem_address_space *
+msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
+ uint64_t start, uint64_t end)
{
- if (aspace && aspace->ops->destroy)
- aspace->ops->destroy(aspace);
+ struct msm_mmu *child = msm_iommu_new_dynamic(parent);
- kfree(aspace);
+ if (IS_ERR(child))
+ return (struct msm_gem_address_space *) child;
+
+ return msm_gem_address_space_new(child, name, start, end);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 7b8f2d02b56d..3176f301e7a8 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -90,21 +90,20 @@ static int disable_pwrrail(struct msm_gpu *gpu)
static int enable_clk(struct msm_gpu *gpu)
{
- struct clk *rate_clk = NULL;
+ uint32_t rate = gpu->gpufreq[gpu->active_level];
int i;
- /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
- if (gpu->grp_clks[i]) {
- clk_prepare(gpu->grp_clks[i]);
- rate_clk = gpu->grp_clks[i];
- }
- }
+ if (gpu->core_clk)
+ clk_set_rate(gpu->core_clk, rate);
- if (rate_clk && gpu->fast_rate)
- clk_set_rate(rate_clk, gpu->fast_rate);
+ if (gpu->rbbmtimer_clk)
+ clk_set_rate(gpu->rbbmtimer_clk, 19200000);
+
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
+ clk_prepare(gpu->grp_clks[i]);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_enable(gpu->grp_clks[i]);
@@ -113,24 +112,23 @@ static int enable_clk(struct msm_gpu *gpu)
static int disable_clk(struct msm_gpu *gpu)
{
- struct clk *rate_clk = NULL;
+ uint32_t rate = gpu->gpufreq[gpu->nr_pwrlevels - 1];
int i;
- /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
- if (gpu->grp_clks[i]) {
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
clk_disable(gpu->grp_clks[i]);
- rate_clk = gpu->grp_clks[i];
- }
- }
- if (rate_clk && gpu->slow_rate)
- clk_set_rate(rate_clk, gpu->slow_rate);
-
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_unprepare(gpu->grp_clks[i]);
+ if (gpu->core_clk)
+ clk_set_rate(gpu->core_clk, rate);
+
+ if (gpu->rbbmtimer_clk)
+ clk_set_rate(gpu->rbbmtimer_clk, 0);
+
return 0;
}
@@ -138,8 +136,9 @@ static int enable_axi(struct msm_gpu *gpu)
{
if (gpu->ebi1_clk)
clk_prepare_enable(gpu->ebi1_clk);
- if (gpu->bus_freq)
- bs_set(gpu, gpu->bus_freq);
+
+ if (gpu->busfreq[gpu->active_level])
+ bs_set(gpu, gpu->busfreq[gpu->active_level]);
return 0;
}
@@ -147,7 +146,8 @@ static int disable_axi(struct msm_gpu *gpu)
{
if (gpu->ebi1_clk)
clk_disable_unprepare(gpu->ebi1_clk);
- if (gpu->bus_freq)
+
+ if (gpu->busfreq[gpu->active_level])
bs_set(gpu, 0);
return 0;
}
@@ -155,6 +155,8 @@ static int disable_axi(struct msm_gpu *gpu)
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
int ret;
DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
@@ -167,6 +169,8 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (WARN_ON(gpu->active_cnt <= 0))
return -EINVAL;
+ WARN_ON(pm_runtime_get_sync(&pdev->dev) < 0);
+
ret = enable_pwrrail(gpu);
if (ret)
return ret;
@@ -185,6 +189,8 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
int msm_gpu_pm_suspend(struct msm_gpu *gpu)
{
struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
int ret;
DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
@@ -209,6 +215,7 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
if (ret)
return ret;
+ pm_runtime_put(&pdev->dev);
return 0;
}
@@ -277,17 +284,35 @@ static void recover_worker(struct work_struct *work)
mutex_lock(&dev->struct_mutex);
if (msm_gpu_active(gpu)) {
struct msm_gem_submit *submit;
- uint32_t fence = gpu->funcs->last_fence(gpu);
-
- /* retire completed submits, plus the one that hung: */
- retire_submits(gpu, fence + 1);
+ struct msm_ringbuffer *ring;
+ int i;
inactive_cancel(gpu);
+
+ FOR_EACH_RING(gpu, ring, i) {
+ uint32_t fence;
+
+ if (!ring)
+ continue;
+
+ fence = gpu->funcs->last_fence(gpu, ring);
+
+ /*
+ * Retire the faulting command on the active ring and
+ * make sure the other rings are cleaned up
+ */
+ if (ring == gpu->funcs->active_ring(gpu))
+ retire_submits(gpu, fence + 1);
+ else
+ retire_submits(gpu, fence);
+ }
+
+ /* Recover the GPU */
gpu->funcs->recover(gpu);
- /* replay the remaining submits after the one that hung: */
+ /* replay the remaining submits for all rings: */
list_for_each_entry(submit, &gpu->submit_list, node) {
- gpu->funcs->submit(gpu, submit, NULL);
+ gpu->funcs->submit(gpu, submit);
}
}
mutex_unlock(&dev->struct_mutex);
@@ -307,25 +332,28 @@ static void hangcheck_handler(unsigned long data)
struct msm_gpu *gpu = (struct msm_gpu *)data;
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+ uint32_t fence = gpu->funcs->last_fence(gpu, ring);
+ uint32_t submitted = gpu->funcs->submitted_fence(gpu, ring);
- if (fence != gpu->hangcheck_fence) {
+ if (fence != gpu->hangcheck_fence[ring->id]) {
/* some progress has been made.. ya! */
- gpu->hangcheck_fence = fence;
- } else if (fence < gpu->submitted_fence) {
+ gpu->hangcheck_fence[ring->id] = fence;
+ } else if (fence < submitted) {
/* no progress and not done.. hung! */
- gpu->hangcheck_fence = fence;
- dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
- gpu->name);
+ gpu->hangcheck_fence[ring->id] = fence;
+ dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+ gpu->name, ring->id);
dev_err(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
dev_err(dev->dev, "%s: submitted fence: %u\n",
- gpu->name, gpu->submitted_fence);
+ gpu->name, submitted);
+
queue_work(priv->wq, &gpu->recover_work);
}
/* if still more pending work, reset the hangcheck timer: */
- if (gpu->submitted_fence > gpu->hangcheck_fence)
+ if (submitted > gpu->hangcheck_fence[ring->id])
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
@@ -434,54 +462,66 @@ out:
static void retire_submits(struct msm_gpu *gpu, uint32_t fence)
{
struct drm_device *dev = gpu->dev;
+ struct msm_gem_submit *submit, *tmp;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- while (!list_empty(&gpu->submit_list)) {
- struct msm_gem_submit *submit;
-
- submit = list_first_entry(&gpu->submit_list,
- struct msm_gem_submit, node);
+ /*
+ * Find and retire all the submits in the same ring that are older than
+ * or equal to 'fence'
+ */
- if (submit->fence <= fence) {
+ list_for_each_entry_safe(submit, tmp, &gpu->submit_list, node) {
+ if (COMPARE_FENCE_LTE(submit->fence, fence)) {
list_del(&submit->node);
kfree(submit);
- } else {
- break;
}
}
}
-static void retire_worker(struct work_struct *work)
+static bool _fence_signaled(struct msm_gem_object *obj, uint32_t fence)
{
- struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
- struct drm_device *dev = gpu->dev;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ if (obj->write_fence & 0x3FFFFFFF)
+ return COMPARE_FENCE_LTE(obj->write_fence, fence);
- msm_update_fence(gpu->dev, fence);
+ return COMPARE_FENCE_LTE(obj->read_fence, fence);
+}
- mutex_lock(&dev->struct_mutex);
+static void _retire_ring(struct msm_gpu *gpu, uint32_t fence)
+{
+ struct msm_gem_object *obj, *tmp;
retire_submits(gpu, fence);
- while (!list_empty(&gpu->active_list)) {
- struct msm_gem_object *obj;
-
- obj = list_first_entry(&gpu->active_list,
- struct msm_gem_object, mm_list);
-
- if ((obj->read_fence <= fence) &&
- (obj->write_fence <= fence)) {
- /* move to inactive: */
+ list_for_each_entry_safe(obj, tmp, &gpu->active_list, mm_list) {
+ if (_fence_signaled(obj, fence)) {
msm_gem_move_to_inactive(&obj->base);
- msm_gem_put_iova(&obj->base, gpu->id);
+ msm_gem_put_iova(&obj->base, gpu->aspace);
drm_gem_object_unreference(&obj->base);
- } else {
- break;
}
}
+}
- mutex_unlock(&dev->struct_mutex);
+static void retire_worker(struct work_struct *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
+ struct drm_device *dev = gpu->dev;
+ struct msm_ringbuffer *ring;
+ int i;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ uint32_t fence;
+
+ if (!ring)
+ continue;
+
+ fence = gpu->funcs->last_fence(gpu, ring);
+ msm_update_fence(gpu->dev, fence);
+
+ mutex_lock(&dev->struct_mutex);
+ _retire_ring(gpu, fence);
+ mutex_unlock(&dev->struct_mutex);
+ }
if (!msm_gpu_active(gpu))
inactive_start(gpu);
@@ -496,18 +536,16 @@ void msm_gpu_retire(struct msm_gpu *gpu)
}
/* add bo's to gpu's ring, and kick gpu: */
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx)
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->rb[submit->ring];
int i, ret;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- submit->fence = ++priv->next_fence;
-
- gpu->submitted_fence = submit->fence;
+ submit->fence = FENCE(submit->ring, ++priv->next_fence[submit->ring]);
inactive_cancel(gpu);
@@ -515,7 +553,7 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_rd_dump_submit(submit);
- gpu->submitted_fence = submit->fence;
+ ring->submitted_fence = submit->fence;
update_sw_cntrs(gpu);
@@ -528,12 +566,12 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
if (!is_active(msm_obj)) {
- uint32_t iova;
+ uint64_t iova;
/* ring takes a reference to the bo and iova: */
drm_gem_object_reference(&msm_obj->base);
msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
+ submit->aspace, &iova);
}
if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
@@ -543,8 +581,7 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
}
- ret = gpu->funcs->submit(gpu, submit, ctx);
- priv->lastctx = ctx;
+ ret = gpu->funcs->submit(gpu, submit);
hangcheck_timer_reset(gpu);
@@ -561,17 +598,54 @@ static irqreturn_t irq_handler(int irq, void *data)
return gpu->funcs->irq(gpu);
}
-static const char *clk_names[] = {
- "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
- "alt_mem_iface_clk",
-};
+static struct clk *get_clock(struct device *dev, const char *name)
+{
+ struct clk *clk = devm_clk_get(dev, name);
+
+ DBG("clks[%s]: %p", name, clk);
+
+ return IS_ERR(clk) ? NULL : clk;
+}
+
+static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
+{
+ struct device *dev = &pdev->dev;
+ struct property *prop;
+ const char *name;
+ int i = 0;
+
+ gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
+ if (gpu->nr_clocks < 1) {
+ gpu->nr_clocks = 0;
+ return 0;
+ }
+
+ gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
+ GFP_KERNEL);
+ if (!gpu->grp_clks)
+ return -ENOMEM;
+
+ of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+ gpu->grp_clks[i] = get_clock(dev, name);
+
+ /* Remember the key clocks that we need to control later */
+ if (!strcmp(name, "core_clk"))
+ gpu->core_clk = gpu->grp_clks[i];
+ else if (!strcmp(name, "rbbmtimer_clk"))
+ gpu->rbbmtimer_clk = gpu->grp_clks[i];
+
+ ++i;
+ }
+
+ return 0;
+}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
- const char *name, const char *ioname, const char *irqname, int ringsz)
+ const char *name, struct msm_gpu_config *config)
{
struct iommu_domain *iommu;
- int i, ret;
+ int i, ret, nr_rings;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
@@ -595,17 +669,16 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
spin_lock_init(&gpu->perf_lock);
- BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
/* Map registers: */
- gpu->mmio = msm_ioremap(pdev, ioname, name);
+ gpu->mmio = msm_ioremap(pdev, config->ioname, name);
if (IS_ERR(gpu->mmio)) {
ret = PTR_ERR(gpu->mmio);
goto fail;
}
/* Get Interrupt: */
- gpu->irq = platform_get_irq_byname(pdev, irqname);
+ gpu->irq = platform_get_irq_byname(pdev, config->irqname);
if (gpu->irq < 0) {
ret = gpu->irq;
dev_err(drm->dev, "failed to get irq: %d\n", ret);
@@ -619,13 +692,11 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
goto fail;
}
- /* Acquire clocks: */
- for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
- gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
- DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
- if (IS_ERR(gpu->grp_clks[i]))
- gpu->grp_clks[i] = NULL;
- }
+ pm_runtime_enable(&pdev->dev);
+
+ ret = get_clocks(pdev, gpu);
+ if (ret)
+ goto fail;
gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
DBG("ebi1_clk: %p", gpu->ebi1_clk);
@@ -650,8 +721,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
iommu = iommu_domain_alloc(&platform_bus_type);
if (iommu) {
/* TODO 32b vs 64b address space.. */
- iommu->geometry.aperture_start = 0x1000;
- iommu->geometry.aperture_end = 0xffffffff;
+ iommu->geometry.aperture_start = config->va_start;
+ iommu->geometry.aperture_end = config->va_end;
dev_info(drm->dev, "%s: using IOMMU\n", name);
gpu->aspace = msm_gem_address_space_create(&pdev->dev,
@@ -667,39 +738,80 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
} else {
dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
}
- gpu->id = msm_register_address_space(drm, gpu->aspace);
+ nr_rings = config->nr_rings;
- /* Create ringbuffer: */
- mutex_lock(&drm->struct_mutex);
- gpu->rb = msm_ringbuffer_new(gpu, ringsz);
- mutex_unlock(&drm->struct_mutex);
- if (IS_ERR(gpu->rb)) {
- ret = PTR_ERR(gpu->rb);
- gpu->rb = NULL;
- dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
- goto fail;
+ if (nr_rings > ARRAY_SIZE(gpu->rb)) {
+ WARN(1, "Only creating %lu ringbuffers\n", ARRAY_SIZE(gpu->rb));
+ nr_rings = ARRAY_SIZE(gpu->rb);
}
+ /* Create ringbuffer(s): */
+ for (i = 0; i < nr_rings; i++) {
+ mutex_lock(&drm->struct_mutex);
+ gpu->rb[i] = msm_ringbuffer_new(gpu, i);
+ mutex_unlock(&drm->struct_mutex);
+
+ if (IS_ERR(gpu->rb[i])) {
+ ret = PTR_ERR(gpu->rb[i]);
+ gpu->rb[i] = NULL;
+ dev_err(drm->dev,
+ "could not create ringbuffer %d: %d\n", i, ret);
+ goto fail;
+ }
+ }
+
+ gpu->nr_rings = nr_rings;
+
+#ifdef CONFIG_SMP
+ gpu->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
+ gpu->pm_qos_req_dma.irq = gpu->irq;
+#endif
+
+ pm_qos_add_request(&gpu->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+
bs_init(gpu);
+ gpu->snapshot = msm_snapshot_new(gpu);
+ if (IS_ERR(gpu->snapshot))
+ gpu->snapshot = NULL;
+
return 0;
fail:
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ if (gpu->rb[i])
+ msm_ringbuffer_destroy(gpu->rb[i]);
+ }
+
+ pm_runtime_disable(&pdev->dev);
return ret;
}
void msm_gpu_cleanup(struct msm_gpu *gpu)
{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ int i;
+
DBG("%s", gpu->name);
WARN_ON(!list_empty(&gpu->active_list));
bs_fini(gpu);
- if (gpu->rb) {
- if (gpu->rb_iova)
- msm_gem_put_iova(gpu->rb->bo, gpu->id);
- msm_ringbuffer_destroy(gpu->rb);
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++) {
+ if (!gpu->rb[i])
+ continue;
+
+ if (gpu->rb[i]->iova)
+ msm_gem_put_iova(gpu->rb[i]->bo, gpu->aspace);
+
+ msm_ringbuffer_destroy(gpu->rb[i]);
}
+
+ msm_snapshot_destroy(gpu, gpu->snapshot);
+ pm_runtime_disable(&pdev->dev);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 309422b73a49..06dfaabbfcfe 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -19,14 +19,24 @@
#define __MSM_GPU_H__
#include <linux/clk.h>
+#include <linux/pm_qos.h>
#include <linux/regulator/consumer.h>
#include "msm_drv.h"
#include "msm_ringbuffer.h"
+#include "msm_snapshot.h"
struct msm_gem_submit;
struct msm_gpu_perfcntr;
+struct msm_gpu_config {
+ const char *ioname;
+ const char *irqname;
+ int nr_rings;
+ uint64_t va_start;
+ uint64_t va_end;
+};
+
/* So far, with hardware that I've seen to date, we can have:
* + zero, one, or two z180 2d cores
* + a3xx or a2xx 3d core, which share a common CP (the firmware
@@ -46,18 +56,21 @@ struct msm_gpu_funcs {
int (*hw_init)(struct msm_gpu *gpu);
int (*pm_suspend)(struct msm_gpu *gpu);
int (*pm_resume)(struct msm_gpu *gpu);
- int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx);
- void (*flush)(struct msm_gpu *gpu);
- bool (*idle)(struct msm_gpu *gpu);
+ int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+ void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
irqreturn_t (*irq)(struct msm_gpu *irq);
- uint32_t (*last_fence)(struct msm_gpu *gpu);
+ uint32_t (*last_fence)(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring);
+ uint32_t (*submitted_fence)(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring);
+ struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu);
void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
/* show GPU status in debugfs: */
void (*show)(struct msm_gpu *gpu, struct seq_file *m);
#endif
+ int (*snapshot)(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
};
struct msm_gpu {
@@ -77,14 +90,12 @@ struct msm_gpu {
const struct msm_gpu_perfcntr *perfcntrs;
uint32_t num_perfcntrs;
- struct msm_ringbuffer *rb;
- uint32_t rb_iova;
+ struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
+ int nr_rings;
/* list of GEM active objects: */
struct list_head active_list;
- uint32_t submitted_fence;
-
/* is gpu powered/active? */
int active_cnt;
bool inactive;
@@ -96,12 +107,19 @@ struct msm_gpu {
int irq;
struct msm_gem_address_space *aspace;
- int id;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
- struct clk *ebi1_clk, *grp_clks[6];
- uint32_t fast_rate, slow_rate, bus_freq;
+ struct clk **grp_clks;
+ struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
+ int nr_clocks;
+
+ uint32_t gpufreq[10];
+ uint32_t busfreq[10];
+ uint32_t nr_pwrlevels;
+ uint32_t active_level;
+
+ struct pm_qos_request pm_qos_req_dma;
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
@@ -117,15 +135,44 @@ struct msm_gpu {
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
- uint32_t hangcheck_fence;
+ uint32_t hangcheck_fence[MSM_GPU_MAX_RINGS];
struct work_struct recover_work;
struct list_head submit_list;
+
+ struct msm_snapshot *snapshot;
};
+/* It turns out that all targets use the same ringbuffer size. */
+#define MSM_GPU_RINGBUFFER_SZ SZ_32K
+#define MSM_GPU_RINGBUFFER_BLKSIZE 32
+
+#define MSM_GPU_RB_CNTL_DEFAULT \
+ (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
+ AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
+
+static inline struct msm_ringbuffer *__get_ring(struct msm_gpu *gpu, int index)
+{
+ return (index < ARRAY_SIZE(gpu->rb) ? gpu->rb[index] : NULL);
+}
+
+#define FOR_EACH_RING(gpu, ring, index) \
+ for (index = 0, ring = (gpu)->rb[0]; \
+ index < (gpu)->nr_rings && index < ARRAY_SIZE((gpu)->rb); \
+ index++, ring = __get_ring(gpu, index))
+
static inline bool msm_gpu_active(struct msm_gpu *gpu)
{
- return gpu->submitted_fence > gpu->funcs->last_fence(gpu);
+ struct msm_ringbuffer *ring;
+ int i;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ if (gpu->funcs->submitted_fence(gpu, ring) >
+ gpu->funcs->last_fence(gpu, ring))
+ return true;
+ }
+
+ return false;
}
/* Perf-Counters:
@@ -151,6 +198,45 @@ static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
return msm_readl(gpu->mmio + (reg << 2));
}
+static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
+{
+ uint32_t val = gpu_read(gpu, reg);
+
+ val &= ~mask;
+ gpu_write(gpu, reg, val | or);
+}
+
+static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
+{
+ u64 val;
+
+ /*
+ * Why not a readq here? Two reasons: 1) many of the LO registers are
+ * not quad word aligned and 2) the GPU hardware designers have a bit
+ * of a history of putting registers where they fit, especially in
+ * spins. The longer a GPU family goes the higher the chance that
+ * we'll get burned. We could do a series of validity checks if we
+ * wanted to, but really is a readq() that much better? Nah.
+ */
+
+ /*
+ * For some lo/hi registers (like perfcounters), the hi value is latched
+ * when the lo is read, so make sure to read the lo first to trigger
+ * that
+ */
+ val = (u64) msm_readl(gpu->mmio + (lo << 2));
+ val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
+
+ return val;
+}
+
+static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
+{
+ /* Why not a writeq here? Read the screed above */
+ msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
+ msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
+}
+
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
@@ -160,12 +246,12 @@ int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
void msm_gpu_retire(struct msm_gpu *gpu);
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx);
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
- const char *name, const char *ioname, const char *irqname, int ringsz);
+ const char *name, struct msm_gpu_config *config);
+
void msm_gpu_cleanup(struct msm_gpu *gpu);
struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 7e64a6255c11..3c16222b8890 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -15,41 +15,189 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/of_platform.h>
#include "msm_drv.h"
-#include "msm_mmu.h"
-
-struct msm_iommu {
- struct msm_mmu base;
- struct iommu_domain *domain;
-};
-#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+#include "msm_iommu.h"
static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
unsigned long iova, int flags, void *arg)
{
- pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
+ pr_warn_ratelimited("*** fault: iova=%16llX, flags=%d\n", (u64) iova, flags);
return 0;
}
+/*
+ * Get and enable the IOMMU clocks so that we can make
+ * sure they stay on the entire duration so that we can
+ * safely change the pagetable from the GPU
+ */
+static void _get_iommu_clocks(struct msm_mmu *mmu, struct platform_device *pdev)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ struct device *dev;
+ struct property *prop;
+ const char *name;
+ int i = 0;
+
+ if (WARN_ON(!pdev))
+ return;
+
+ dev = &pdev->dev;
+
+ iommu->nr_clocks =
+ of_property_count_strings(dev->of_node, "clock-names");
+
+ if (iommu->nr_clocks < 0) {
+ iommu->nr_clocks = 0;
+ return;
+ }
+
+ if (WARN_ON(iommu->nr_clocks > ARRAY_SIZE(iommu->clocks)))
+ iommu->nr_clocks = ARRAY_SIZE(iommu->clocks);
+
+ of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+ if (i == iommu->nr_clocks)
+ break;
+
+ iommu->clocks[i] = clk_get(dev, name);
+ if (iommu->clocks[i])
+ clk_prepare_enable(iommu->clocks[i]);
+
+ i++;
+ }
+}
+
+static int _attach_iommu_device(struct msm_mmu *mmu,
+ struct iommu_domain *domain, const char **names, int cnt)
+{
+ int i;
+
+ /* See if there is a iommus member in the current device. If not, look
+ * for the names and see if there is one in there.
+ */
+
+ if (of_find_property(mmu->dev->of_node, "iommus", NULL))
+ return iommu_attach_device(domain, mmu->dev);
+
+ /* Look through the list of names for a target */
+ for (i = 0; i < cnt; i++) {
+ struct device_node *node =
+ of_find_node_by_name(mmu->dev->of_node, names[i]);
+
+ if (!node)
+ continue;
+
+ if (of_find_property(node, "iommus", NULL)) {
+ struct platform_device *pdev;
+
+ /* Get the platform device for the node */
+ of_platform_populate(node->parent, NULL, NULL,
+ mmu->dev);
+
+ pdev = of_find_device_by_node(node);
+
+ if (!pdev)
+ continue;
+
+ _get_iommu_clocks(mmu,
+ of_find_device_by_node(node->parent));
+
+ mmu->dev = &pdev->dev;
+
+ return iommu_attach_device(domain, mmu->dev);
+ }
+ }
+
+ dev_err(mmu->dev, "Couldn't find a IOMMU device\n");
+ return -ENODEV;
+}
+
static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- return iommu_attach_device(iommu->domain, mmu->dev);
+ int val = 1, ret;
+
+ /* Hope springs eternal */
+ iommu->allow_dynamic = true;
+
+ /* per-instance pagetables need TTBR1 support in the IOMMU driver */
+ ret = iommu_domain_set_attr(iommu->domain,
+ DOMAIN_ATTR_ENABLE_TTBR1, &val);
+ if (ret)
+ iommu->allow_dynamic = false;
+
+ /* Attach the device to the domain */
+ ret = _attach_iommu_device(mmu, iommu->domain, names, cnt);
+ if (ret)
+ return ret;
+
+ /*
+ * Get the context bank for the base domain; this will be shared with
+ * the children.
+ */
+ iommu->cb = -1;
+ if (iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_CONTEXT_BANK,
+ &iommu->cb))
+ iommu->allow_dynamic = false;
+
+ return 0;
+}
+
+static int msm_iommu_attach_dynamic(struct msm_mmu *mmu, const char **names,
+ int cnt)
+{
+ static unsigned int procid;
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int ret;
+ unsigned int id;
+
+ /* Assign a unique procid for the domain to cut down on TLB churn */
+ id = ++procid;
+
+ iommu_domain_set_attr(iommu->domain, DOMAIN_ATTR_PROCID, &id);
+
+ ret = iommu_attach_device(iommu->domain, mmu->dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Get the TTBR0 and the CONTEXTIDR - these will be used by the GPU to
+ * switch the pagetable on its own.
+ */
+ iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_TTBR0,
+ &iommu->ttbr0);
+ iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_CONTEXTIDR,
+ &iommu->contextidr);
+
+ return 0;
+}
+
+static void msm_iommu_detach(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int i;
+
+ iommu_detach_device(iommu->domain, mmu->dev);
+
+ for (i = 0; i < iommu->nr_clocks; i++) {
+ if (iommu->clocks[i])
+ clk_disable(iommu->clocks[i]);
+ }
}
-static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+static void msm_iommu_detach_dynamic(struct msm_mmu *mmu)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
iommu_detach_device(iommu->domain, mmu->dev);
}
-static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
+static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, int prot)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ uint64_t da = iova;
unsigned int i, j;
int ret;
@@ -60,7 +208,7 @@ static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
phys_addr_t pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
- VERB("map[%d]: %08x %pa(%zx)", i, iova, &pa, bytes);
+ VERB("map[%d]: %016llx %pa(%zx)", i, iova, &pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
@@ -82,13 +230,13 @@ fail:
return ret;
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
+static int msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ uint64_t da = iova;
int i;
for_each_sg(sgt->sgl, sg, sgt->nents, i) {
@@ -99,7 +247,7 @@ static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
if (unmapped < bytes)
return unmapped;
- VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+ VERB("unmap[%d]: %016llx(%zx)", i, iova, bytes);
BUG_ON(!PAGE_ALIGNED(bytes));
@@ -124,7 +272,16 @@ static const struct msm_mmu_funcs funcs = {
.destroy = msm_iommu_destroy,
};
-struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
+static const struct msm_mmu_funcs dynamic_funcs = {
+ .attach = msm_iommu_attach_dynamic,
+ .detach = msm_iommu_detach_dynamic,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .destroy = msm_iommu_destroy,
+};
+
+struct msm_mmu *_msm_iommu_new(struct device *dev, struct iommu_domain *domain,
+ const struct msm_mmu_funcs *funcs)
{
struct msm_iommu *iommu;
@@ -133,8 +290,54 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
return ERR_PTR(-ENOMEM);
iommu->domain = domain;
- msm_mmu_init(&iommu->base, dev, &funcs);
+ msm_mmu_init(&iommu->base, dev, funcs);
iommu_set_fault_handler(domain, msm_fault_handler, dev);
return &iommu->base;
}
+struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
+{
+ return _msm_iommu_new(dev, domain, &funcs);
+}
+
+/*
+ * Given a base domain that is attached to a IOMMU device try to create a
+ * dynamic domain that is also attached to the same device but allocates a new
+ * pagetable. This is used to allow multiple pagetables to be attached to the
+ * same device.
+ */
+struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *base)
+{
+ struct msm_iommu *base_iommu = to_msm_iommu(base);
+ struct iommu_domain *domain;
+ struct msm_mmu *mmu;
+ int ret, val = 1;
+
+ /* Don't continue if the base domain didn't have the support we need */
+ if (!base || base_iommu->allow_dynamic == false)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ domain = iommu_domain_alloc(&platform_bus_type);
+ if (!domain)
+ return ERR_PTR(-ENODEV);
+
+ mmu = _msm_iommu_new(base->dev, domain, &dynamic_funcs);
+
+ if (IS_ERR(mmu)) {
+ if (domain)
+ iommu_domain_free(domain);
+ return mmu;
+ }
+
+ ret = iommu_domain_set_attr(domain, DOMAIN_ATTR_DYNAMIC, &val);
+ if (ret) {
+ msm_iommu_destroy(mmu);
+ return ERR_PTR(ret);
+ }
+
+ /* Set the context bank to match the base domain */
+ iommu_domain_set_attr(domain, DOMAIN_ATTR_CONTEXT_BANK,
+ &base_iommu->cb);
+
+ return mmu;
+}
diff --git a/drivers/gpu/drm/msm/msm_iommu.h b/drivers/gpu/drm/msm/msm_iommu.h
new file mode 100644
index 000000000000..d005cfb9758f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_iommu.h
@@ -0,0 +1,37 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_IOMMU_H_
+#define _MSM_IOMMU_H_
+
+#include "msm_mmu.h"
+
+struct msm_iommu {
+ struct msm_mmu base;
+ struct iommu_domain *domain;
+ int cb;
+ phys_addr_t ttbr0;
+ uint32_t contextidr;
+ bool allow_dynamic;
+
+ struct clk *clocks[5];
+ int nr_clocks;
+};
+#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+
+static inline bool msm_iommu_allow_dynamic(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ return iommu->allow_dynamic;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 3d750eca5cae..501f12bef00d 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -32,10 +32,10 @@ enum msm_mmu_domain_type {
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
- void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
- int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
+ void (*detach)(struct msm_mmu *mmu);
+ int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
int prot);
- int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt);
+ int (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt);
int (*map_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
enum dma_data_direction dir);
void (*unmap_sg)(struct msm_mmu *mmu, struct sg_table *sgt,
@@ -62,5 +62,6 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
struct msm_mmu *msm_smmu_new(struct device *dev,
enum msm_mmu_domain_type domain);
+struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *orig);
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 9a78c48817c6..6dbb516cd4dc 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -307,7 +307,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
for (i = 0; i < submit->nr_cmds; i++) {
uint32_t idx = submit->cmd[i].idx;
- uint32_t iova = submit->cmd[i].iova;
+ uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
struct msm_gem_object *obj = submit->bos[idx].obj;
const char *buf = msm_gem_vaddr_locked(&obj->base);
@@ -315,7 +315,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
buf += iova - submit->bos[idx].iova;
rd_write_section(rd, RD_GPUADDR,
- (uint32_t[2]){ iova, szd * 4 }, 8);
+ (uint64_t[2]) { iova, szd * 4 }, 16);
rd_write_section(rd, RD_BUFFER_CONTENTS,
buf, szd * 4);
@@ -329,7 +329,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
case MSM_SUBMIT_CMD_BUF:
rd_write_section(rd, RD_CMDSTREAM_ADDR,
- (uint32_t[2]){ iova, szd }, 8);
+ (uint64_t[2]) { iova, szd }, 16);
break;
}
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index 1f14b908b221..14a16c4578d9 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -18,12 +18,13 @@
#include "msm_ringbuffer.h"
#include "msm_gpu.h"
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id)
{
struct msm_ringbuffer *ring;
int ret;
- size = ALIGN(size, 4); /* size should be dword aligned */
+ /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
+ BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
@@ -32,7 +33,8 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->gpu = gpu;
- ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
+ ring->id = id;
+ ring->bo = msm_gem_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ, MSM_BO_WC);
if (IS_ERR(ring->bo)) {
ret = PTR_ERR(ring->bo);
ring->bo = NULL;
@@ -40,10 +42,11 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->start = msm_gem_vaddr_locked(ring->bo);
- ring->end = ring->start + (size / 4);
+ ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
+ ring->next = ring->start;
ring->cur = ring->start;
- ring->size = size;
+ spin_lock_init(&ring->lock);
return ring;
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index 6e0e1049fa4f..1e84905073bf 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -22,12 +22,15 @@
struct msm_ringbuffer {
struct msm_gpu *gpu;
- int size;
+ int id;
struct drm_gem_object *bo;
- uint32_t *start, *end, *cur;
+ uint32_t *start, *end, *cur, *next;
+ uint64_t iova;
+ uint32_t submitted_fence;
+ spinlock_t lock;
};
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id);
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
@@ -35,9 +38,13 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
static inline void
OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
{
- if (ring->cur == ring->end)
- ring->cur = ring->start;
- *(ring->cur++) = data;
+ /*
+ * ring->next points to the current command being written - it won't be
+ * committed as ring->cur until the flush
+ */
+ if (ring->next == ring->end)
+ ring->next = ring->start;
+ *(ring->next++) = data;
}
#endif /* __MSM_RINGBUFFER_H__ */
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
index e3db6690e38a..c99f51e09700 100644
--- a/drivers/gpu/drm/msm/msm_smmu.c
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -86,7 +86,7 @@ static int msm_smmu_attach(struct msm_mmu *mmu, const char **names, int cnt)
return 0;
}
-static void msm_smmu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+static void msm_smmu_detach(struct msm_mmu *mmu)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
@@ -104,14 +104,14 @@ static void msm_smmu_detach(struct msm_mmu *mmu, const char **names, int cnt)
dev_dbg(client->dev, "iommu domain detached\n");
}
-static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
+static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt, int prot)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
struct iommu_domain *domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ uint64_t da = iova;
unsigned int i, j;
int ret;
@@ -126,7 +126,7 @@ static int msm_smmu_map(struct msm_mmu *mmu, uint32_t iova,
u32 pa = sg_phys(sg) - sg->offset;
size_t bytes = sg->length + sg->offset;
- VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+ VERB("map[%d]: %16llx %08x(%zx)", i, iova, pa, bytes);
ret = iommu_map(domain, da, pa, bytes, prot);
if (ret)
@@ -172,14 +172,14 @@ static void msm_smmu_unmap_sg(struct msm_mmu *mmu, struct sg_table *sgt,
dma_unmap_sg(client->dev, sgt->sgl, sgt->nents, dir);
}
-static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
+static int msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
struct sg_table *sgt)
{
struct msm_smmu *smmu = to_msm_smmu(mmu);
struct msm_smmu_client *client = msm_smmu_to_client(smmu);
struct iommu_domain *domain;
struct scatterlist *sg;
- unsigned int da = iova;
+ uint64_t da = iova;
int i;
if (!client)
@@ -197,7 +197,7 @@ static int msm_smmu_unmap(struct msm_mmu *mmu, uint32_t iova,
if (unmapped < bytes)
return unmapped;
- VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+ VERB("unmap[%d]: %16llx(%zx)", i, iova, bytes);
WARN_ON(!PAGE_ALIGNED(bytes));
diff --git a/drivers/gpu/drm/msm/msm_snapshot.c b/drivers/gpu/drm/msm/msm_snapshot.c
new file mode 100644
index 000000000000..30f3e5c64ebd
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_snapshot.c
@@ -0,0 +1,105 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+#include "msm_snapshot_api.h"
+
+void msm_snapshot_destroy(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+
+ if (!snapshot)
+ return;
+
+ dma_free_coherent(&pdev->dev, SZ_1M, snapshot->ptr,
+ snapshot->physaddr);
+
+ kfree(snapshot);
+}
+
+struct msm_snapshot *msm_snapshot_new(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct msm_snapshot *snapshot;
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
+ if (!snapshot)
+ return ERR_PTR(-ENOMEM);
+
+ snapshot->ptr = dma_alloc_coherent(&pdev->dev, SZ_1M,
+ &snapshot->physaddr, GFP_KERNEL);
+
+ if (!snapshot->ptr) {
+ kfree(snapshot);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ seq_buf_init(&snapshot->buf, snapshot->ptr, SZ_1M);
+
+ return snapshot;
+}
+
+int msm_gpu_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ int ret;
+ struct msm_snapshot_header header;
+ uint64_t val;
+
+ if (!snapshot)
+ return -ENOMEM;
+
+ /*
+ * For now, blow away the snapshot and take a new one - the most
+ * interesting hang is the last one we saw
+ */
+ seq_buf_init(&snapshot->buf, snapshot->ptr, SZ_1M);
+
+ header.magic = SNAPSHOT_MAGIC;
+ gpu->funcs->get_param(gpu, MSM_PARAM_GPU_ID, &val);
+ header.gpuid = lower_32_bits(val);
+
+ gpu->funcs->get_param(gpu, MSM_PARAM_CHIP_ID, &val);
+ header.chipid = lower_32_bits(val);
+
+ seq_buf_putmem(&snapshot->buf, &header, sizeof(header));
+
+ ret = gpu->funcs->snapshot(gpu, snapshot);
+
+ if (!ret) {
+ struct msm_snapshot_section_header end;
+
+ end.magic = SNAPSHOT_SECTION_MAGIC;
+ end.id = SNAPSHOT_SECTION_END;
+ end.size = sizeof(end);
+
+ seq_buf_putmem(&snapshot->buf, &end, sizeof(end));
+
+ dev_info(gpu->dev->dev, "GPU snapshot created [0x%pa (%d bytes)]\n",
+ &snapshot->physaddr, seq_buf_used(&snapshot->buf));
+ }
+
+ return ret;
+}
+
+int msm_snapshot_write(struct msm_gpu *gpu, struct seq_file *m)
+{
+ if (gpu && gpu->snapshot)
+ seq_write(m, gpu->snapshot->ptr,
+ seq_buf_used(&gpu->snapshot->buf));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/msm_snapshot.h b/drivers/gpu/drm/msm/msm_snapshot.h
new file mode 100644
index 000000000000..247e1358c885
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_snapshot.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SNAPSHOT_H_
+#define MSM_SNAPSHOT_H_
+
+#include <linux/string.h>
+#include <linux/seq_buf.h>
+#include "msm_snapshot_api.h"
+
+struct msm_snapshot {
+ void *ptr;
+ struct seq_buf buf;
+ phys_addr_t physaddr;
+ uint32_t index;
+ uint32_t remain;
+ unsigned long timestamp;
+ void *priv;
+};
+
+/* Write a uint32_t value to the next position in the snapshot buffer */
+static inline void SNAPSHOT_WRITE_U32(struct msm_snapshot *snapshot,
+ uint32_t value)
+{
+ seq_buf_putmem(&snapshot->buf, &value, sizeof(value));
+}
+
+/* Copy a block of memory to the next position in the snapshot buffer */
+static inline void SNAPSHOT_MEMCPY(struct msm_snapshot *snapshot, void *src,
+ uint32_t size)
+{
+ if (size)
+ seq_buf_putmem(&snapshot->buf, src, size);
+}
+
+static inline bool _snapshot_header(struct msm_snapshot *snapshot,
+ struct msm_snapshot_section_header *header,
+ u32 headsz, u32 datasz, u32 id)
+{
+ u32 size = headsz + datasz;
+
+ if (seq_buf_buffer_left(&snapshot->buf) <= size)
+ return false;
+
+ /* Write the section header */
+ header->magic = SNAPSHOT_SECTION_MAGIC;
+ header->id = id;
+ header->size = headsz + datasz;
+
+ /* Write the section header */
+ seq_buf_putmem(&snapshot->buf, header, headsz);
+
+ /* The caller will fill in the data from here */
+ return true;
+}
+
+/* SNAPSHOT_HEADER
+ * _snapshot: pointer to struct msm_snapshot
+ * _header: Local variable containing the sub-section header
+ * _id: Section ID to write
+ * _dword: Size of the data section (in dword)
+ */
+#define SNAPSHOT_HEADER(_snapshot, _header, _id, _dwords) \
+ _snapshot_header((_snapshot), \
+ (struct msm_snapshot_section_header *) &(header), \
+ sizeof(header), (_dwords) << 2, (_id))
+
+struct msm_gpu;
+
+struct msm_snapshot *msm_snapshot_new(struct msm_gpu *gpu);
+void msm_snapshot_destroy(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+int msm_gpu_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+int msm_snapshot_write(struct msm_gpu *gpu, struct seq_file *m);
+
+#endif
+
diff --git a/drivers/gpu/drm/msm/msm_snapshot_api.h b/drivers/gpu/drm/msm/msm_snapshot_api.h
new file mode 100644
index 000000000000..9f0adb9ee784
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_snapshot_api.h
@@ -0,0 +1,121 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SNAPSHOT_API_H_
+#define MSM_SNAPSHOT_API_H_
+
+#include <linux/types.h>
+
+/* High word is the magic, low word is the snapshot header version */
+#define SNAPSHOT_MAGIC 0x504D0002
+
+struct msm_snapshot_header {
+ __u32 magic;
+ __u32 gpuid;
+ __u32 chipid;
+} __packed;
+
+#define SNAPSHOT_SECTION_MAGIC 0xABCD
+
+struct msm_snapshot_section_header {
+ __u16 magic;
+ __u16 id;
+ __u32 size;
+} __packed;
+
+/* Section identifiers */
+#define SNAPSHOT_SECTION_OS 0x0101
+#define SNAPSHOT_SECTION_REGS_V2 0x0202
+#define SNAPSHOT_SECTION_RB_V2 0x0302
+#define SNAPSHOT_SECTION_IB_V2 0x0402
+#define SNAPSHOT_SECTION_INDEXED_REGS 0x0501
+#define SNAPSHOT_SECTION_DEBUG 0x0901
+#define SNAPSHOT_SECTION_DEBUGBUS 0x0A01
+#define SNAPSHOT_SECTION_GPU_OBJECT_V2 0x0B02
+#define SNAPSHOT_SECTION_MEMLIST_V2 0x0E02
+#define SNAPSHOT_SECTION_SHADER 0x1201
+#define SNAPSHOT_SECTION_END 0xFFFF
+
+#define SNAPSHOT_OS_LINUX_V3 0x00000202
+
+struct msm_snapshot_linux {
+ struct msm_snapshot_section_header header;
+ int osid;
+ __u32 seconds;
+ __u32 power_flags;
+ __u32 power_level;
+ __u32 power_interval_timeout;
+ __u32 grpclk;
+ __u32 busclk;
+ __u64 ptbase;
+ __u32 pid;
+ __u32 current_context;
+ __u32 ctxtcount;
+ unsigned char release[32];
+ unsigned char version[32];
+ unsigned char comm[16];
+} __packed;
+
+struct msm_snapshot_ringbuffer {
+ struct msm_snapshot_section_header header;
+ int start;
+ int end;
+ int rbsize;
+ int wptr;
+ int rptr;
+ int count;
+ __u32 timestamp_queued;
+ __u32 timestamp_retired;
+ __u64 gpuaddr;
+ __u32 id;
+} __packed;
+
+struct msm_snapshot_regs {
+ struct msm_snapshot_section_header header;
+ __u32 count;
+} __packed;
+
+struct msm_snapshot_indexed_regs {
+ struct msm_snapshot_section_header header;
+ __u32 index_reg;
+ __u32 data_reg;
+ __u32 start;
+ __u32 count;
+} __packed;
+
+#define SNAPSHOT_DEBUG_CP_MEQ 7
+#define SNAPSHOT_DEBUG_CP_PM4_RAM 8
+#define SNAPSHOT_DEBUG_CP_PFP_RAM 9
+#define SNAPSHOT_DEBUG_CP_ROQ 10
+#define SNAPSHOT_DEBUG_SHADER_MEMORY 11
+#define SNAPSHOT_DEBUG_CP_MERCIU 12
+
+struct msm_snapshot_debug {
+ struct msm_snapshot_section_header header;
+ __u32 type;
+ __u32 size;
+} __packed;
+
+struct msm_snapshot_debugbus {
+ struct msm_snapshot_section_header header;
+ __u32 id;
+ __u32 count;
+} __packed;
+
+struct msm_snapshot_shader {
+ struct msm_snapshot_section_header header;
+ __u32 type;
+ __u32 index;
+ __u32 size;
+} __packed;
+
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
index ac9997c238cd..31cf25ab5691 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.c
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -65,8 +65,12 @@ static void sde_connector_destroy(struct drm_connector *connector)
c_conn = to_sde_connector(connector);
+ if (c_conn->ops.pre_deinit)
+ c_conn->ops.pre_deinit(connector, c_conn->display);
+
if (c_conn->blob_caps)
drm_property_unreference_blob(c_conn->blob_caps);
+
msm_property_destroy(&c_conn->property_info);
drm_connector_unregister(connector);
@@ -88,8 +92,7 @@ static void _sde_connector_destroy_fb(struct sde_connector *c_conn,
return;
}
- msm_framebuffer_cleanup(c_state->out_fb,
- c_state->mmu_id);
+ msm_framebuffer_cleanup(c_state->out_fb, c_state->aspace);
drm_framebuffer_unreference(c_state->out_fb);
c_state->out_fb = NULL;
@@ -193,7 +196,7 @@ sde_connector_atomic_duplicate_state(struct drm_connector *connector)
if (c_state->out_fb) {
drm_framebuffer_reference(c_state->out_fb);
rc = msm_framebuffer_prepare(c_state->out_fb,
- c_state->mmu_id);
+ c_state->aspace);
if (rc)
SDE_ERROR("failed to prepare fb, %d\n", rc);
}
@@ -241,14 +244,14 @@ static int sde_connector_atomic_set_property(struct drm_connector *connector,
rc = -EFAULT;
} else {
if (c_state->out_fb->flags & DRM_MODE_FB_SECURE)
- c_state->mmu_id =
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE];
+ c_state->aspace =
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE];
else
- c_state->mmu_id =
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+ c_state->aspace =
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
rc = msm_framebuffer_prepare(c_state->out_fb,
- c_state->mmu_id);
+ c_state->aspace);
if (rc)
SDE_ERROR("prep fb failed, %d\n", rc);
}
@@ -492,18 +495,17 @@ struct drm_connector *sde_connector_init(struct drm_device *dev,
c_conn->panel = panel;
c_conn->display = display;
- /* cache mmu_id's for later */
sde_kms = to_sde_kms(priv->kms);
if (sde_kms->vbif[VBIF_NRT]) {
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
} else {
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
- c_conn->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
}
if (ops)
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
index 9580282291db..3f26ee7d5965 100644
--- a/drivers/gpu/drm/msm/sde/sde_connector.h
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -44,6 +44,15 @@ struct sde_connector_ops {
void *display);
/**
+ * pre_deinit - perform additional deinitialization steps
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+ int (*pre_deinit)(struct drm_connector *connector,
+ void *display);
+
+ /**
* detect - determine if connector is connected
* @connector: Pointer to drm connector structure
* @force: Force detect setting from drm framework
@@ -140,7 +149,7 @@ struct sde_connector {
struct drm_panel *panel;
void *display;
- int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+ struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
char name[SDE_CONNECTOR_NAME_SIZE];
@@ -195,13 +204,13 @@ struct sde_connector {
* struct sde_connector_state - private connector status structure
* @base: Base drm connector structure
* @out_fb: Pointer to output frame buffer, if applicable
- * @mmu_id: MMU ID for accessing frame buffer objects, if applicable
+ * @aspace: Address space for accessing frame buffer objects, if applicable
* @property_values: Local cache of current connector property values
*/
struct sde_connector_state {
struct drm_connector_state base;
struct drm_framebuffer *out_fb;
- int mmu_id;
+ struct msm_gem_address_space *aspace;
uint64_t property_values[CONNECTOR_PROP_COUNT];
};
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
index b2853e874d92..dbfc2dd11a17 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -432,6 +432,99 @@ void sde_core_irq_uninstall(struct sde_kms *sde_kms)
sde_kms->irq_obj.total_irqs = 0;
}
+static void sde_hw_irq_mask(struct irq_data *irqd)
+{
+ struct sde_kms *sde_kms;
+
+ if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+ SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
+ return;
+ }
+ sde_kms = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static void sde_hw_irq_unmask(struct irq_data *irqd)
+{
+ struct sde_kms *sde_kms;
+
+ if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+ SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
+ return;
+ }
+ sde_kms = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip sde_hw_irq_chip = {
+ .name = "sde",
+ .irq_mask = sde_hw_irq_mask,
+ .irq_unmask = sde_hw_irq_unmask,
+};
+
+static int sde_hw_irqdomain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct sde_kms *sde_kms;
+ int rc;
+
+ if (!domain || !domain->host_data) {
+ SDE_ERROR("invalid parameters domain %d\n", domain != 0);
+ return -EINVAL;
+ }
+ sde_kms = domain->host_data;
+
+ irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
+ rc = irq_set_chip_data(irq, sde_kms);
+
+ return rc;
+}
+
+static struct irq_domain_ops sde_hw_irqdomain_ops = {
+ .map = sde_hw_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+int sde_core_irq_domain_add(struct sde_kms *sde_kms)
+{
+ struct device *dev;
+ struct irq_domain *domain;
+
+ if (!sde_kms->dev || !sde_kms->dev->dev) {
+ pr_err("invalid device handles\n");
+ return -EINVAL;
+ }
+
+ dev = sde_kms->dev->dev;
+
+ domain = irq_domain_add_linear(dev->of_node, 32,
+ &sde_hw_irqdomain_ops, sde_kms);
+ if (!domain) {
+ pr_err("failed to add irq_domain\n");
+ return -EINVAL;
+ }
+
+ sde_kms->irq_controller.enabled_mask = 0;
+ sde_kms->irq_controller.domain = domain;
+
+ return 0;
+}
+
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms)
+{
+ if (sde_kms->irq_controller.domain) {
+ irq_domain_remove(sde_kms->irq_controller.domain);
+ sde_kms->irq_controller.domain = NULL;
+ }
+ return 0;
+}
+
irqreturn_t sde_core_irq(struct sde_kms *sde_kms)
{
/*
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.h b/drivers/gpu/drm/msm/sde/sde_core_irq.h
index 92642e73daa8..ee1b9bd1d32b 100644
--- a/drivers/gpu/drm/msm/sde/sde_core_irq.h
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -38,6 +38,20 @@ int sde_core_irq_postinstall(struct sde_kms *sde_kms);
void sde_core_irq_uninstall(struct sde_kms *sde_kms);
/**
+ * sde_core_irq_domain_add - Add core IRQ domain for SDE
+ * @sde_kms: SDE handle
+ * @return: none
+ */
+int sde_core_irq_domain_add(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_domain_fini - uninstall core IRQ domain
+ * @sde_kms: SDE handle
+ * @return: 0 if success; error code otherwise
+ */
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms);
+
+/**
* sde_core_irq - core IRQ handler
* @sde_kms: SDE handle
* @return: interrupt handling status
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
index ed4b7be34281..2205dd98a927 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -264,7 +264,7 @@ struct sde_encoder_phys_cmd {
* @wb_fmt: Writeback pixel format
* @frame_count: Counter of completed writeback operations
* @kickoff_count: Counter of issued writeback operations
- * @mmu_id: mmu identifier for non-secure/secure domain
+ * @aspace: address space identifier for non-secure/secure domain
* @wb_dev: Pointer to writeback device
* @start_time: Start time of writeback latest request
* @end_time: End time of writeback latest request
@@ -285,7 +285,7 @@ struct sde_encoder_phys_wb {
const struct sde_format *wb_fmt;
u32 frame_count;
u32 kickoff_count;
- int mmu_id[SDE_IOMMU_DOMAIN_MAX];
+ struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
struct sde_wb_device *wb_dev;
ktime_t start_time;
ktime_t end_time;
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
index 9943e3906df0..9368c4974126 100644
--- a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -180,7 +180,8 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
struct sde_hw_wb *hw_wb;
struct sde_hw_wb_cfg *wb_cfg;
const struct msm_format *format;
- int ret, mmu_id;
+ int ret;
+ struct msm_gem_address_space *aspace;
if (!phys_enc) {
SDE_ERROR("invalid encoder\n");
@@ -193,9 +194,9 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
wb_cfg->intf_mode = phys_enc->intf_mode;
wb_cfg->is_secure = (fb->flags & DRM_MODE_FB_SECURE) ? true : false;
- mmu_id = (wb_cfg->is_secure) ?
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] :
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE];
+ aspace = (wb_cfg->is_secure) ?
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] :
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
SDE_DEBUG("[fb_secure:%d]\n", wb_cfg->is_secure);
@@ -217,7 +218,7 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
wb_cfg->roi = *wb_roi;
if (hw_wb->caps->features & BIT(SDE_WB_XY_ROI_OFFSET)) {
- ret = sde_format_populate_layout(mmu_id, fb, &wb_cfg->dest);
+ ret = sde_format_populate_layout(aspace, fb, &wb_cfg->dest);
if (ret) {
SDE_DEBUG("failed to populate layout %d\n", ret);
return;
@@ -226,7 +227,7 @@ static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
wb_cfg->dest.height = fb->height;
wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
} else {
- ret = sde_format_populate_layout_with_roi(mmu_id, fb, wb_roi,
+ ret = sde_format_populate_layout_with_roi(aspace, fb, wb_roi,
&wb_cfg->dest);
if (ret) {
/* this error should be detected during atomic_check */
@@ -1017,15 +1018,15 @@ struct sde_encoder_phys *sde_encoder_phys_wb_init(
phys_enc = &wb_enc->base;
if (p->sde_kms->vbif[VBIF_NRT]) {
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_UNSECURE];
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_NRT_SECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
} else {
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_UNSECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
- wb_enc->mmu_id[SDE_IOMMU_DOMAIN_SECURE] =
- p->sde_kms->mmu_id[MSM_SMMU_DOMAIN_SECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
}
hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
index 41180f5dec12..dc7827872276 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.c
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -102,169 +102,169 @@ flg, fm, np) \
static const struct sde_format sde_format_map[] = {
INTERLEAVED_RGB_FMT(ARGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 4, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGB888,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
false, 3, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGR888,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 3, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGB565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ARGB1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR1555,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX5551,
COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ARGB4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(ABGR4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBA4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRA4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
true, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XRGB4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(XBGR4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(RGBX4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
INTERLEAVED_RGB_FMT(BGRX4444,
COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
- C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
false, 2, 0,
SDE_FETCH_LINEAR, 1),
@@ -366,13 +366,13 @@ static const struct sde_format sde_format_map[] = {
PLANAR_YUV_FMT(YUV420,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C0_G_Y, C1_B_Cb, C2_R_Cr,
+ C2_R_Cr, C1_B_Cb, C0_G_Y,
false, SDE_CHROMA_420, 1, SDE_FORMAT_FLAG_YUV,
SDE_FETCH_LINEAR, 3),
PLANAR_YUV_FMT(YVU420,
0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
- C0_G_Y, C2_R_Cr, C1_B_Cb,
+ C1_B_Cb, C2_R_Cr, C0_G_Y,
false, SDE_CHROMA_420, 1, SDE_FORMAT_FLAG_YUV,
SDE_FETCH_LINEAR, 3),
};
@@ -384,19 +384,19 @@ static const struct sde_format sde_format_map[] = {
* the data will be passed by user-space.
*/
static const struct sde_format sde_format_map_ubwc[] = {
- INTERLEAVED_RGB_FMT(RGB565,
+ INTERLEAVED_RGB_FMT(BGR565,
0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
false, 2, 0,
SDE_FETCH_UBWC, 2),
- INTERLEAVED_RGB_FMT(RGBA8888,
+ INTERLEAVED_RGB_FMT(ABGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
true, 4, 0,
SDE_FETCH_UBWC, 2),
- INTERLEAVED_RGB_FMT(RGBX8888,
+ INTERLEAVED_RGB_FMT(XBGR8888,
COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
false, 4, 0,
@@ -513,14 +513,15 @@ static int _sde_format_get_plane_sizes_ubwc(
ALIGN(DIV_ROUND_UP(height / 2, uv_tile_height), 16),
4096);
- } else if (fmt->base.pixel_format == DRM_FORMAT_RGBA8888 ||
- fmt->base.pixel_format == DRM_FORMAT_RGBX8888 ||
- fmt->base.pixel_format == DRM_FORMAT_RGBA1010102 ||
- fmt->base.pixel_format == DRM_FORMAT_RGBX1010102 ||
- fmt->base.pixel_format == DRM_FORMAT_RGB565) {
+ } else if (fmt->base.pixel_format == DRM_FORMAT_ABGR8888 ||
+ fmt->base.pixel_format == DRM_FORMAT_XBGR8888 ||
+ fmt->base.pixel_format == DRM_FORMAT_BGRA1010102 ||
+ fmt->base.pixel_format == DRM_FORMAT_BGRX1010102 ||
+ fmt->base.pixel_format == DRM_FORMAT_BGR565) {
+
uint32_t stride_alignment, aligned_bitstream_width;
- if (fmt->base.pixel_format == DRM_FORMAT_RGB565)
+ if (fmt->base.pixel_format == DRM_FORMAT_BGR565)
stride_alignment = 128;
else
stride_alignment = 64;
@@ -630,7 +631,7 @@ static int _sde_format_get_plane_sizes(
}
static int _sde_format_populate_addrs_ubwc(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -641,7 +642,7 @@ static int _sde_format_populate_addrs_ubwc(
return -EINVAL;
}
- base_addr = msm_framebuffer_iova(fb, mmu_id, 0);
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
if (!base_addr) {
DRM_ERROR("failed to retrieve base addr\n");
return -EFAULT;
@@ -711,7 +712,7 @@ static int _sde_format_populate_addrs_ubwc(
}
static int _sde_format_populate_addrs_linear(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -728,7 +729,7 @@ static int _sde_format_populate_addrs_linear(
/* Populate addresses for simple formats here */
for (i = 0; i < layout->num_planes; ++i) {
- layout->plane_addr[i] = msm_framebuffer_iova(fb, mmu_id, i);
+ layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
if (!layout->plane_addr[i]) {
DRM_ERROR("failed to retrieve base addr\n");
return -EFAULT;
@@ -739,7 +740,7 @@ static int _sde_format_populate_addrs_linear(
}
int sde_format_populate_layout(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *layout)
{
@@ -770,9 +771,9 @@ int sde_format_populate_layout(
/* Populate the addresses given the fb */
if (SDE_FORMAT_IS_UBWC(layout->format))
- ret = _sde_format_populate_addrs_ubwc(mmu_id, fb, layout);
+ ret = _sde_format_populate_addrs_ubwc(aspace, fb, layout);
else
- ret = _sde_format_populate_addrs_linear(mmu_id, fb, layout);
+ ret = _sde_format_populate_addrs_linear(aspace, fb, layout);
/* check if anything changed */
if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
@@ -814,14 +815,14 @@ static void _sde_format_calc_offset_linear(struct sde_hw_fmt_layout *source,
}
int sde_format_populate_layout_with_roi(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_rect *roi,
struct sde_hw_fmt_layout *layout)
{
int ret;
- ret = sde_format_populate_layout(mmu_id, fb, layout);
+ ret = sde_format_populate_layout(aspace, fb, layout);
if (ret || !roi)
return ret;
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
index 5dcdfbb653ed..0de081d619b7 100644
--- a/drivers/gpu/drm/msm/sde/sde_formats.h
+++ b/drivers/gpu/drm/msm/sde/sde_formats.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#define _SDE_FORMATS_H
#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
#include "sde_hw_mdss.h"
/**
@@ -76,7 +77,7 @@ int sde_format_check_modified_format(
/**
* sde_format_populate_layout - populate the given format layout based on
* mmu, fb, and format found in the fb
- * @mmu_id: mmu id handle
+ * @aspace: address space pointer
* @fb: framebuffer pointer
* @fmtl: format layout structure to populate
*
@@ -84,14 +85,14 @@ int sde_format_check_modified_format(
* are the same as before or 0 if new addresses were populated
*/
int sde_format_populate_layout(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_hw_fmt_layout *fmtl);
/**
* sde_format_populate_layout_with_roi - populate the given format layout
* based on mmu, fb, roi, and format found in the fb
- * @mmu_id: mmu id handle
+ * @aspace: mmu id handle
* @fb: framebuffer pointer
* @roi: region of interest (optional)
* @fmtl: format layout structure to populate
@@ -99,7 +100,7 @@ int sde_format_populate_layout(
* Return: error code on failure, 0 on success
*/
int sde_format_populate_layout_with_roi(
- int mmu_id,
+ struct msm_gem_address_space *aspace,
struct drm_framebuffer *fb,
struct sde_rect *roi,
struct sde_hw_fmt_layout *fmtl);
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
index 909d6df38260..eeb7a0002eab 100644
--- a/drivers/gpu/drm/msm/sde/sde_irq.c
+++ b/drivers/gpu/drm/msm/sde/sde_irq.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -49,86 +49,14 @@ irqreturn_t sde_irq(struct msm_kms *kms)
return IRQ_HANDLED;
}
-static void sde_hw_irq_mask(struct irq_data *irqd)
-{
- struct sde_kms *sde_kms;
-
- if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
- SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
- return;
- }
- sde_kms = irq_data_get_irq_chip_data(irqd);
-
- smp_mb__before_atomic();
- clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static void sde_hw_irq_unmask(struct irq_data *irqd)
-{
- struct sde_kms *sde_kms;
-
- if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
- SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
- return;
- }
- sde_kms = irq_data_get_irq_chip_data(irqd);
-
- smp_mb__before_atomic();
- set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
- smp_mb__after_atomic();
-}
-
-static struct irq_chip sde_hw_irq_chip = {
- .name = "sde",
- .irq_mask = sde_hw_irq_mask,
- .irq_unmask = sde_hw_irq_unmask,
-};
-
-static int sde_hw_irqdomain_map(struct irq_domain *domain,
- unsigned int irq, irq_hw_number_t hwirq)
-{
- struct sde_kms *sde_kms;
- int rc;
-
- if (!domain || !domain->host_data) {
- SDE_ERROR("invalid parameters domain %d\n", domain != 0);
- return -EINVAL;
- }
- sde_kms = domain->host_data;
-
- irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
- rc = irq_set_chip_data(irq, sde_kms);
-
- return rc;
-}
-
-static struct irq_domain_ops sde_hw_irqdomain_ops = {
- .map = sde_hw_irqdomain_map,
- .xlate = irq_domain_xlate_onecell,
-};
-
void sde_irq_preinstall(struct msm_kms *kms)
{
struct sde_kms *sde_kms = to_sde_kms(kms);
- struct device *dev;
- struct irq_domain *domain;
if (!sde_kms->dev || !sde_kms->dev->dev) {
pr_err("invalid device handles\n");
return;
}
- dev = sde_kms->dev->dev;
-
- domain = irq_domain_add_linear(dev->of_node, 32,
- &sde_hw_irqdomain_ops, sde_kms);
- if (!domain) {
- pr_err("failed to add irq_domain\n");
- return;
- }
-
- sde_kms->irq_controller.enabled_mask = 0;
- sde_kms->irq_controller.domain = domain;
sde_core_irq_preinstall(sde_kms);
}
@@ -158,9 +86,5 @@ void sde_irq_uninstall(struct msm_kms *kms)
}
sde_core_irq_uninstall(sde_kms);
-
- if (sde_kms->irq_controller.domain) {
- irq_domain_remove(sde_kms->irq_controller.domain);
- sde_kms->irq_controller.domain = NULL;
- }
+ sde_core_irq_domain_fini(sde_kms);
}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
index e40e33f11fd9..581918da183f 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.c
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -27,6 +27,7 @@
#include "dsi_display.h"
#include "dsi_drm.h"
#include "sde_wb.h"
+#include "sde_hdmi.h"
#include "sde_kms.h"
#include "sde_core_irq.h"
@@ -504,8 +505,30 @@ static int _sde_kms_get_displays(struct sde_kms *sde_kms)
wb_display_get_displays(sde_kms->wb_displays,
sde_kms->wb_display_count);
}
+
+ /* hdmi */
+ sde_kms->hdmi_displays = NULL;
+ sde_kms->hdmi_display_count = sde_hdmi_get_num_of_displays();
+ SDE_DEBUG("hdmi display count=%d", sde_kms->hdmi_display_count);
+ if (sde_kms->hdmi_display_count) {
+ sde_kms->hdmi_displays = kcalloc(sde_kms->hdmi_display_count,
+ sizeof(void *),
+ GFP_KERNEL);
+ if (!sde_kms->hdmi_displays) {
+ SDE_ERROR("failed to allocate hdmi displays\n");
+ goto exit_deinit_hdmi;
+ }
+ sde_kms->hdmi_display_count =
+ sde_hdmi_get_displays(sde_kms->hdmi_displays,
+ sde_kms->hdmi_display_count);
+ }
+
return 0;
+exit_deinit_hdmi:
+ sde_kms->hdmi_display_count = 0;
+ sde_kms->hdmi_displays = NULL;
+
exit_deinit_wb:
kfree(sde_kms->wb_displays);
sde_kms->wb_display_count = 0;
@@ -528,6 +551,9 @@ static void _sde_kms_release_displays(struct sde_kms *sde_kms)
SDE_ERROR("invalid sde kms\n");
return;
}
+ kfree(sde_kms->hdmi_displays);
+ sde_kms->hdmi_display_count = 0;
+ sde_kms->hdmi_displays = NULL;
kfree(sde_kms->wb_displays);
sde_kms->wb_displays = NULL;
@@ -565,18 +591,30 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
.set_property = sde_wb_connector_set_property,
.get_info = sde_wb_get_info,
};
- struct msm_display_info info;
+ static const struct sde_connector_ops hdmi_ops = {
+ .pre_deinit = sde_hdmi_connector_pre_deinit,
+ .post_init = sde_hdmi_connector_post_init,
+ .detect = sde_hdmi_connector_detect,
+ .get_modes = sde_hdmi_connector_get_modes,
+ .mode_valid = sde_hdmi_mode_valid,
+ .get_info = sde_hdmi_get_info,
+ };
+ struct msm_display_info info = {0};
struct drm_encoder *encoder;
void *display, *connector;
int i, max_encoders;
int rc = 0;
+ int connector_poll;
if (!dev || !priv || !sde_kms) {
SDE_ERROR("invalid argument(s)\n");
return -EINVAL;
}
- max_encoders = sde_kms->dsi_display_count + sde_kms->wb_display_count;
+ max_encoders = sde_kms->dsi_display_count +
+ sde_kms->wb_display_count +
+ sde_kms->hdmi_display_count;
+
if (max_encoders > ARRAY_SIZE(priv->encoders)) {
max_encoders = ARRAY_SIZE(priv->encoders);
SDE_ERROR("capping number of displays to %d", max_encoders);
@@ -666,6 +704,57 @@ static int _sde_kms_setup_displays(struct drm_device *dev,
}
}
+ /* hdmi */
+ for (i = 0; i < sde_kms->hdmi_display_count &&
+ priv->num_encoders < max_encoders; ++i) {
+ display = sde_kms->hdmi_displays[i];
+ encoder = NULL;
+
+ memset(&info, 0x0, sizeof(info));
+ rc = sde_hdmi_dev_init(display);
+ if (rc) {
+ SDE_ERROR("hdmi dev_init %d failed\n", i);
+ continue;
+ }
+ rc = sde_hdmi_get_info(&info, display);
+ if (rc) {
+ SDE_ERROR("hdmi get_info %d failed\n", i);
+ continue;
+ }
+ if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+ connector_poll = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector_poll = 0;
+ encoder = sde_encoder_init(dev, &info);
+ if (IS_ERR_OR_NULL(encoder)) {
+ SDE_ERROR("encoder init failed for hdmi %d\n", i);
+ continue;
+ }
+
+ rc = sde_hdmi_drm_init(display, encoder);
+ if (rc) {
+ SDE_ERROR("hdmi drm %d init failed, %d\n", i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
+ connector = sde_connector_init(dev,
+ encoder,
+ 0,
+ display,
+ &hdmi_ops,
+ connector_poll,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (connector) {
+ priv->encoders[priv->num_encoders++] = encoder;
+ } else {
+ SDE_ERROR("hdmi %d connector init failed\n", i);
+ sde_hdmi_dev_deinit(display);
+ sde_hdmi_drm_deinit(display);
+ sde_encoder_destroy(encoder);
+ }
+ }
+
return 0;
}
@@ -726,6 +815,9 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
priv = dev->dev_private;
catalog = sde_kms->catalog;
+ ret = sde_core_irq_domain_add(sde_kms);
+ if (ret)
+ goto fail_irq;
/*
* Query for underlying display drivers, and create connectors,
* bridges and encoders for them.
@@ -784,6 +876,8 @@ static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
return 0;
fail:
_sde_kms_drm_obj_destroy(sde_kms);
+fail_irq:
+ sde_core_irq_domain_fini(sde_kms);
return ret;
}
@@ -940,17 +1034,16 @@ static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
struct msm_mmu *mmu;
int i;
- for (i = ARRAY_SIZE(sde_kms->mmu_id) - 1; i >= 0; i--) {
- mmu = sde_kms->aspace[i]->mmu;
-
- if (!mmu)
+ for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
+ if (!sde_kms->aspace[i])
continue;
- mmu->funcs->detach(mmu, (const char **)iommu_ports,
- ARRAY_SIZE(iommu_ports));
- msm_gem_address_space_destroy(sde_kms->aspace[i]);
+ mmu = sde_kms->aspace[i]->mmu;
- sde_kms->mmu_id[i] = 0;
+ mmu->funcs->detach(mmu);
+ msm_gem_address_space_put(sde_kms->aspace[i]);
+
+ sde_kms->aspace[i] = NULL;
}
return 0;
@@ -987,21 +1080,10 @@ static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
ARRAY_SIZE(iommu_ports));
if (ret) {
SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
- msm_gem_address_space_destroy(aspace);
+ msm_gem_address_space_put(aspace);
goto fail;
}
- sde_kms->mmu_id[i] = msm_register_address_space(sde_kms->dev,
- aspace);
- if (sde_kms->mmu_id[i] < 0) {
- ret = sde_kms->mmu_id[i];
- SDE_ERROR("failed to register sde iommu %d: %d\n",
- i, ret);
- mmu->funcs->detach(mmu, (const char **)iommu_ports,
- ARRAY_SIZE(iommu_ports));
- msm_gem_address_space_destroy(aspace);
- goto fail;
- }
}
return 0;
@@ -1146,6 +1228,14 @@ static int sde_kms_hw_init(struct msm_kms *kms)
goto perf_err;
}
+ sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
+ if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
+ rc = PTR_ERR(sde_kms->hw_intr);
+ SDE_ERROR("hw_intr init failed: %d\n", rc);
+ sde_kms->hw_intr = NULL;
+ goto hw_intr_init_err;
+ }
+
/*
* _sde_kms_drm_obj_init should create the DRM related objects
* i.e. CRTCs, planes, encoders, connectors and so forth
@@ -1171,21 +1261,12 @@ static int sde_kms_hw_init(struct msm_kms *kms)
*/
dev->mode_config.allow_fb_modifiers = true;
- sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
- if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
- rc = PTR_ERR(sde_kms->hw_intr);
- SDE_ERROR("hw_intr init failed: %d\n", rc);
- sde_kms->hw_intr = NULL;
- goto hw_intr_init_err;
- }
-
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
return 0;
-hw_intr_init_err:
- _sde_kms_drm_obj_destroy(sde_kms);
drm_obj_init_err:
sde_core_perf_destroy(&sde_kms->perf);
+hw_intr_init_err:
perf_err:
power_error:
sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
index 8663c632699b..44f6be959ac9 100644
--- a/drivers/gpu/drm/msm/sde/sde_kms.h
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -123,7 +123,6 @@ struct sde_kms {
struct sde_mdss_cfg *catalog;
struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
- int mmu_id[MSM_SMMU_DOMAIN_MAX];
struct sde_power_client *core_client;
/* directory entry for debugfs */
@@ -155,8 +154,9 @@ struct sde_kms {
void **dsi_displays;
int wb_display_count;
void **wb_displays;
-
bool has_danger_ctrl;
+ void **hdmi_displays;
+ int hdmi_display_count;
};
struct vsync_info {
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
index b3de45302dea..114acfd7a173 100644
--- a/drivers/gpu/drm/msm/sde/sde_plane.c
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -86,7 +86,7 @@ enum sde_plane_qos {
struct sde_plane {
struct drm_plane base;
- int mmu_id;
+ struct msm_gem_address_space *aspace;
struct mutex lock;
@@ -580,7 +580,7 @@ static inline void _sde_plane_set_scanout(struct drm_plane *plane,
return;
}
- ret = sde_format_populate_layout(psde->mmu_id, fb, &pipe_cfg->layout);
+ ret = sde_format_populate_layout(psde->aspace, fb, &pipe_cfg->layout);
if (ret == -EAGAIN)
SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
else if (ret)
@@ -1285,7 +1285,7 @@ static int sde_plane_prepare_fb(struct drm_plane *plane,
return 0;
SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
- return msm_framebuffer_prepare(fb, psde->mmu_id);
+ return msm_framebuffer_prepare(fb, psde->aspace);
}
static void sde_plane_cleanup_fb(struct drm_plane *plane,
@@ -1294,11 +1294,11 @@ static void sde_plane_cleanup_fb(struct drm_plane *plane,
struct drm_framebuffer *fb = old_state ? old_state->fb : NULL;
struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
- if (!fb)
+ if (!fb || !psde)
return;
SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
- msm_framebuffer_cleanup(fb, psde->mmu_id);
+ msm_framebuffer_cleanup(fb, psde->aspace);
}
static void _sde_plane_atomic_check_mode_changed(struct sde_plane *psde,
@@ -2384,7 +2384,7 @@ struct drm_plane *sde_plane_init(struct drm_device *dev,
/* cache local stuff for later */
plane = &psde->base;
psde->pipe = pipe;
- psde->mmu_id = kms->mmu_id[MSM_SMMU_DOMAIN_UNSECURE];
+ psde->aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
/* initialize underlying h/w driver */
psde->pipe_hw = sde_hw_sspp_init(pipe, kms->mmio, kms->catalog);
diff --git a/drivers/gpu/drm/msm/sde_io_util.c b/drivers/gpu/drm/msm/sde_io_util.c
new file mode 100644
index 000000000000..70a42254a909
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_io_util.c
@@ -0,0 +1,502 @@
+/* Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/sde_io_util.h>
+
+#define MAX_I2C_CMDS 16
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug)
+{
+ u32 in_val;
+
+ if (!io || !io->base) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ if (offset > io->len) {
+ DEV_ERR("%pS->%s: offset out of range\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ writel_relaxed(value, io->base + offset);
+ if (debug) {
+ in_val = readl_relaxed(io->base + offset);
+ DEV_DBG("[%08x] => %08x [%08x]\n",
+ (u32)(unsigned long)(io->base + offset),
+ value, in_val);
+ }
+} /* dss_reg_w */
+EXPORT_SYMBOL(dss_reg_w);
+
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug)
+{
+ u32 value;
+
+ if (!io || !io->base) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ if (offset > io->len) {
+ DEV_ERR("%pS->%s: offset out of range\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ value = readl_relaxed(io->base + offset);
+ if (debug)
+ DEV_DBG("[%08x] <= %08x\n",
+ (u32)(unsigned long)(io->base + offset), value);
+
+ return value;
+} /* dss_reg_r */
+EXPORT_SYMBOL(dss_reg_r);
+
+void dss_reg_dump(void __iomem *base, u32 length, const char *prefix,
+ u32 debug)
+{
+ if (debug)
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
+ (void *)base, length, false);
+} /* dss_reg_dump */
+EXPORT_SYMBOL(dss_reg_dump);
+
+static struct resource *msm_dss_get_res_byname(struct platform_device *pdev,
+ unsigned int type, const char *name)
+{
+ struct resource *res = NULL;
+
+ res = platform_get_resource_byname(pdev, type, name);
+ if (!res)
+ DEV_ERR("%s: '%s' resource not found\n", __func__, name);
+
+ return res;
+} /* msm_dss_get_res_byname */
+EXPORT_SYMBOL(msm_dss_get_res_byname);
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+ struct dss_io_data *io_data, const char *name)
+{
+ struct resource *res = NULL;
+
+ if (!pdev || !io_data) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ res = msm_dss_get_res_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ DEV_ERR("%pS->%s: '%s' msm_dss_get_res_byname failed\n",
+ __builtin_return_address(0), __func__, name);
+ return -ENODEV;
+ }
+
+ io_data->len = (u32)resource_size(res);
+ io_data->base = ioremap(res->start, io_data->len);
+ if (!io_data->base) {
+ DEV_ERR("%pS->%s: '%s' ioremap failed\n",
+ __builtin_return_address(0), __func__, name);
+ return -EIO;
+ }
+
+ return 0;
+} /* msm_dss_ioremap_byname */
+EXPORT_SYMBOL(msm_dss_ioremap_byname);
+
+void msm_dss_iounmap(struct dss_io_data *io_data)
+{
+ if (!io_data) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ if (io_data->base) {
+ iounmap(io_data->base);
+ io_data->base = NULL;
+ }
+ io_data->len = 0;
+} /* msm_dss_iounmap */
+EXPORT_SYMBOL(msm_dss_iounmap);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+ int num_vreg, int config)
+{
+ int i = 0, rc = 0;
+ struct dss_vreg *curr_vreg = NULL;
+ enum dss_vreg_type type;
+
+ if (!in_vreg || !num_vreg)
+ return rc;
+
+ if (config) {
+ for (i = 0; i < num_vreg; i++) {
+ curr_vreg = &in_vreg[i];
+ curr_vreg->vreg = regulator_get(dev,
+ curr_vreg->vreg_name);
+ rc = PTR_RET(curr_vreg->vreg);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ curr_vreg->vreg_name, rc);
+ curr_vreg->vreg = NULL;
+ goto vreg_get_fail;
+ }
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ if (type == DSS_REG_LDO) {
+ rc = regulator_set_voltage(
+ curr_vreg->vreg,
+ curr_vreg->min_voltage,
+ curr_vreg->max_voltage);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s set vltg fail\n",
+ __builtin_return_address(0),
+ __func__,
+ curr_vreg->vreg_name);
+ goto vreg_set_voltage_fail;
+ }
+ }
+ }
+ } else {
+ for (i = num_vreg-1; i >= 0; i--) {
+ curr_vreg = &in_vreg[i];
+ if (curr_vreg->vreg) {
+ type = (regulator_count_voltages(
+ curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ if (type == DSS_REG_LDO) {
+ regulator_set_voltage(curr_vreg->vreg,
+ 0, curr_vreg->max_voltage);
+ }
+ regulator_put(curr_vreg->vreg);
+ curr_vreg->vreg = NULL;
+ }
+ }
+ }
+ return 0;
+
+vreg_unconfig:
+if (type == DSS_REG_LDO)
+ regulator_set_load(curr_vreg->vreg, 0);
+
+vreg_set_voltage_fail:
+ regulator_put(curr_vreg->vreg);
+ curr_vreg->vreg = NULL;
+
+vreg_get_fail:
+ for (i--; i >= 0; i--) {
+ curr_vreg = &in_vreg[i];
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ goto vreg_unconfig;
+ }
+ return rc;
+} /* msm_dss_config_vreg */
+EXPORT_SYMBOL(msm_dss_config_vreg);
+
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable)
+{
+ int i = 0, rc = 0;
+ bool need_sleep;
+
+ if (enable) {
+ for (i = 0; i < num_vreg; i++) {
+ rc = PTR_RET(in_vreg[i].vreg);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name, rc);
+ goto vreg_set_opt_mode_fail;
+ }
+ need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
+ if (in_vreg[i].pre_on_sleep && need_sleep)
+ usleep_range(in_vreg[i].pre_on_sleep * 1000,
+ in_vreg[i].pre_on_sleep * 1000);
+ rc = regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].enable_load);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s set opt m fail\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name);
+ goto vreg_set_opt_mode_fail;
+ }
+ rc = regulator_enable(in_vreg[i].vreg);
+ if (in_vreg[i].post_on_sleep && need_sleep)
+ usleep_range(in_vreg[i].post_on_sleep * 1000,
+ in_vreg[i].post_on_sleep * 1000);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s enable failed\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name);
+ goto disable_vreg;
+ }
+ }
+ } else {
+ for (i = num_vreg-1; i >= 0; i--) {
+ if (in_vreg[i].pre_off_sleep)
+ usleep_range(in_vreg[i].pre_off_sleep * 1000,
+ in_vreg[i].pre_off_sleep * 1000);
+ regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].disable_load);
+ regulator_disable(in_vreg[i].vreg);
+ if (in_vreg[i].post_off_sleep)
+ usleep_range(in_vreg[i].post_off_sleep * 1000,
+ in_vreg[i].post_off_sleep * 1000);
+ }
+ }
+ return rc;
+
+disable_vreg:
+ regulator_set_load(in_vreg[i].vreg, in_vreg[i].disable_load);
+
+vreg_set_opt_mode_fail:
+ for (i--; i >= 0; i--) {
+ if (in_vreg[i].pre_off_sleep)
+ usleep_range(in_vreg[i].pre_off_sleep * 1000,
+ in_vreg[i].pre_off_sleep * 1000);
+ regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].disable_load);
+ regulator_disable(in_vreg[i].vreg);
+ if (in_vreg[i].post_off_sleep)
+ usleep_range(in_vreg[i].post_off_sleep * 1000,
+ in_vreg[i].post_off_sleep * 1000);
+ }
+
+ return rc;
+} /* msm_dss_enable_vreg */
+EXPORT_SYMBOL(msm_dss_enable_vreg);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable)
+{
+ int i = 0, rc = 0;
+
+ if (enable) {
+ for (i = 0; i < num_gpio; i++) {
+ DEV_DBG("%pS->%s: %s enable\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+
+ rc = gpio_request(in_gpio[i].gpio,
+ in_gpio[i].gpio_name);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s enable failed\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+ goto disable_gpio;
+ }
+ gpio_set_value(in_gpio[i].gpio, in_gpio[i].value);
+ }
+ } else {
+ for (i = num_gpio-1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: %s disable\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+ if (in_gpio[i].gpio)
+ gpio_free(in_gpio[i].gpio);
+ }
+ }
+ return rc;
+
+disable_gpio:
+ for (i--; i >= 0; i--)
+ if (in_gpio[i].gpio)
+ gpio_free(in_gpio[i].gpio);
+
+ return rc;
+} /* msm_dss_enable_gpio */
+EXPORT_SYMBOL(msm_dss_enable_gpio);
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+ int i;
+
+ for (i = num_clk - 1; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+} /* msm_dss_put_clk */
+EXPORT_SYMBOL(msm_dss_put_clk);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+ rc = PTR_RET(clk_arry[i].clk);
+ if (rc) {
+ DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name, rc);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ msm_dss_put_clk(clk_arry, num_clk);
+
+ return rc;
+} /* msm_dss_get_clk */
+EXPORT_SYMBOL(msm_dss_get_clk);
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ if (clk_arry[i].clk) {
+ if (clk_arry[i].type != DSS_CLK_AHB) {
+ DEV_DBG("%pS->%s: '%s' rate %ld\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name,
+ clk_arry[i].rate);
+ rc = clk_set_rate(clk_arry[i].clk,
+ clk_arry[i].rate);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ break;
+ }
+ }
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ break;
+ }
+ }
+
+ return rc;
+} /* msm_dss_clk_set_rate */
+EXPORT_SYMBOL(msm_dss_clk_set_rate);
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+ int i, rc = 0;
+
+ if (enable) {
+ for (i = 0; i < num_clk; i++) {
+ DEV_DBG("%pS->%s: enable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ if (clk_arry[i].clk) {
+ rc = clk_prepare_enable(clk_arry[i].clk);
+ if (rc)
+ DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ }
+
+ if (rc) {
+ msm_dss_enable_clk(&clk_arry[i],
+ i, false);
+ break;
+ }
+ }
+ } else {
+ for (i = num_clk - 1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: disable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+
+ if (clk_arry[i].clk)
+ clk_disable_unprepare(clk_arry[i].clk);
+ else
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ }
+ }
+
+ return rc;
+} /* msm_dss_enable_clk */
+EXPORT_SYMBOL(msm_dss_enable_clk);
+
+
+int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *read_buf)
+{
+ struct i2c_msg msgs[2];
+ int ret = -1;
+
+ pr_debug("%s: reading from slave_addr=[%x] and offset=[%x]\n",
+ __func__, slave_addr, reg_offset);
+
+ msgs[0].addr = slave_addr >> 1;
+ msgs[0].flags = 0;
+ msgs[0].buf = &reg_offset;
+ msgs[0].len = 1;
+
+ msgs[1].addr = slave_addr >> 1;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].buf = read_buf;
+ msgs[1].len = 1;
+
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ if (ret < 1) {
+ pr_err("%s: I2C READ FAILED=[%d]\n", __func__, ret);
+ return -EACCES;
+ }
+ pr_debug("%s: i2c buf is [%x]\n", __func__, *read_buf);
+ return 0;
+}
+EXPORT_SYMBOL(sde_i2c_byte_read);
+
+int sde_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *value)
+{
+ struct i2c_msg msgs[1];
+ uint8_t data[2];
+ int status = -EACCES;
+
+ pr_debug("%s: writing from slave_addr=[%x] and offset=[%x]\n",
+ __func__, slave_addr, reg_offset);
+
+ data[0] = reg_offset;
+ data[1] = *value;
+
+ msgs[0].addr = slave_addr >> 1;
+ msgs[0].flags = 0;
+ msgs[0].len = 2;
+ msgs[0].buf = data;
+
+ status = i2c_transfer(client->adapter, msgs, 1);
+ if (status < 1) {
+ pr_err("I2C WRITE FAILED=[%d]\n", status);
+ return -EACCES;
+ }
+ pr_debug("%s: I2C write status=%x\n", __func__, status);
+ return status;
+}
+EXPORT_SYMBOL(sde_i2c_byte_write);
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
index 3c82a261e3fb..ab65283ceafc 100644
--- a/drivers/gpu/drm/msm/sde_power_handle.c
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -24,7 +24,7 @@
#include <linux/msm-bus.h>
#include <linux/msm-bus-board.h>
-#include <linux/mdss_io_util.h>
+#include <linux/sde_io_util.h>
#include "sde_power_handle.h"
#include "sde_trace.h"
diff --git a/drivers/gpu/msm/adreno_a5xx.c b/drivers/gpu/msm/adreno_a5xx.c
index 973884c2c5e7..b58391adf3ab 100644
--- a/drivers/gpu/msm/adreno_a5xx.c
+++ b/drivers/gpu/msm/adreno_a5xx.c
@@ -161,6 +161,7 @@ static const struct {
{ adreno_is_a530, a530_efuse_speed_bin },
{ adreno_is_a505, a530_efuse_speed_bin },
{ adreno_is_a512, a530_efuse_speed_bin },
+ { adreno_is_a508, a530_efuse_speed_bin },
};
static void a5xx_check_features(struct adreno_device *adreno_dev)
diff --git a/drivers/gpu/msm/adreno_debugfs.c b/drivers/gpu/msm/adreno_debugfs.c
index fffe08038bcd..5306303b8d15 100644
--- a/drivers/gpu/msm/adreno_debugfs.c
+++ b/drivers/gpu/msm/adreno_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2008-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -168,6 +168,7 @@ static const struct flag_entry context_flags[] = {KGSL_CONTEXT_FLAGS};
* KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC so it is ok to cross the streams here.
*/
static const struct flag_entry context_priv[] = {
+ { KGSL_CONTEXT_PRIV_SUBMITTED, "submitted"},
{ KGSL_CONTEXT_PRIV_DETACHED, "detached"},
{ KGSL_CONTEXT_PRIV_INVALID, "invalid"},
{ KGSL_CONTEXT_PRIV_PAGEFAULT, "pagefault"},
diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c
index 1cbd2ef4b6b4..21a6399ba38e 100644
--- a/drivers/gpu/msm/adreno_drawctxt.c
+++ b/drivers/gpu/msm/adreno_drawctxt.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -217,10 +217,12 @@ static int adreno_drawctxt_wait_rb(struct adreno_device *adreno_dev,
BUG_ON(!mutex_is_locked(&device->mutex));
/*
- * If the context is invalid then return immediately - we may end up
- * waiting for a timestamp that will never come
+ * If the context is invalid (OR) not submitted commands to GPU
+ * then return immediately - we may end up waiting for a timestamp
+ * that will never come
*/
- if (kgsl_context_invalid(context))
+ if (kgsl_context_invalid(context) ||
+ !test_bit(KGSL_CONTEXT_PRIV_SUBMITTED, &context->priv))
goto done;
trace_adreno_drawctxt_wait_start(drawctxt->rb->id, context->id,
diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c
index fc0602a60ac1..161b718b8a38 100644
--- a/drivers/gpu/msm/adreno_ringbuffer.c
+++ b/drivers/gpu/msm/adreno_ringbuffer.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -959,6 +959,7 @@ int adreno_ringbuffer_submitcmd(struct adreno_device *adreno_dev,
drawobj->timestamp, time);
if (!ret) {
+ set_bit(KGSL_CONTEXT_PRIV_SUBMITTED, &context->priv);
cmdobj->global_ts = drawctxt->internal_timestamp;
/* Put the timevalues in the profiling buffer */
diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c
index 601e7a23101b..1de8e212a703 100644
--- a/drivers/gpu/msm/kgsl.c
+++ b/drivers/gpu/msm/kgsl.c
@@ -4753,6 +4753,7 @@ error_close_mmu:
error_pwrctrl_close:
kgsl_pwrctrl_close(device);
error:
+ kgsl_device_debugfs_close(device);
_unregister_device(device);
return status;
}
@@ -4782,6 +4783,7 @@ void kgsl_device_platform_remove(struct kgsl_device *device)
kgsl_pwrctrl_close(device);
+ kgsl_device_debugfs_close(device);
_unregister_device(device);
}
EXPORT_SYMBOL(kgsl_device_platform_remove);
diff --git a/drivers/gpu/msm/kgsl_debugfs.c b/drivers/gpu/msm/kgsl_debugfs.c
index 7758fc956055..37d92428f02c 100644
--- a/drivers/gpu/msm/kgsl_debugfs.c
+++ b/drivers/gpu/msm/kgsl_debugfs.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2008-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -87,6 +87,11 @@ void kgsl_device_debugfs_init(struct kgsl_device *device)
&pwr_log_fops);
}
+void kgsl_device_debugfs_close(struct kgsl_device *device)
+{
+ debugfs_remove_recursive(device->d_debugfs);
+}
+
struct type_entry {
int type;
const char *str;
diff --git a/drivers/gpu/msm/kgsl_debugfs.h b/drivers/gpu/msm/kgsl_debugfs.h
index 34875954bb8b..949aed81581c 100644
--- a/drivers/gpu/msm/kgsl_debugfs.h
+++ b/drivers/gpu/msm/kgsl_debugfs.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2008-2011,2013,2015 The Linux Foundation.
+/* Copyright (c) 2002,2008-2011,2013,2015,2017 The Linux Foundation.
* All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
@@ -23,6 +23,7 @@ void kgsl_core_debugfs_init(void);
void kgsl_core_debugfs_close(void);
void kgsl_device_debugfs_init(struct kgsl_device *device);
+void kgsl_device_debugfs_close(struct kgsl_device *device);
extern struct dentry *kgsl_debugfs_dir;
static inline struct dentry *kgsl_get_debugfs_dir(void)
@@ -34,6 +35,7 @@ void kgsl_process_init_debugfs(struct kgsl_process_private *);
#else
static inline void kgsl_core_debugfs_init(void) { }
static inline void kgsl_device_debugfs_init(struct kgsl_device *device) { }
+static inline void kgsl_device_debugfs_close(struct kgsl_device *device) { }
static inline void kgsl_core_debugfs_close(void) { }
static inline struct dentry *kgsl_get_debugfs_dir(void) { return NULL; }
static inline void kgsl_process_init_debugfs(struct kgsl_process_private *priv)
diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h
index cb7ffd51460a..0a2c39b82781 100644
--- a/drivers/gpu/msm/kgsl_device.h
+++ b/drivers/gpu/msm/kgsl_device.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2002,2007-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2002,2007-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -322,6 +322,7 @@ struct kgsl_device {
/**
* enum bits for struct kgsl_context.priv
+ * @KGSL_CONTEXT_PRIV_SUBMITTED - The context has submitted commands to gpu.
* @KGSL_CONTEXT_PRIV_DETACHED - The context has been destroyed by userspace
* and is no longer using the gpu.
* @KGSL_CONTEXT_PRIV_INVALID - The context has been destroyed by the kernel
@@ -331,7 +332,8 @@ struct kgsl_device {
* reserved for devices specific use.
*/
enum kgsl_context_priv {
- KGSL_CONTEXT_PRIV_DETACHED = 0,
+ KGSL_CONTEXT_PRIV_SUBMITTED = 0,
+ KGSL_CONTEXT_PRIV_DETACHED,
KGSL_CONTEXT_PRIV_INVALID,
KGSL_CONTEXT_PRIV_PAGEFAULT,
KGSL_CONTEXT_PRIV_DEVICE_SPECIFIC = 16,
diff --git a/drivers/gpu/msm/kgsl_snapshot.c b/drivers/gpu/msm/kgsl_snapshot.c
index 1caa673db6ff..7de43dd27ffe 100644
--- a/drivers/gpu/msm/kgsl_snapshot.c
+++ b/drivers/gpu/msm/kgsl_snapshot.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -156,8 +156,10 @@ static size_t snapshot_os(struct kgsl_device *device,
header->osid = KGSL_SNAPSHOT_OS_LINUX_V3;
/* Get the kernel build information */
- strlcpy(header->release, utsname()->release, sizeof(header->release));
- strlcpy(header->version, utsname()->version, sizeof(header->version));
+ strlcpy(header->release, init_utsname()->release,
+ sizeof(header->release));
+ strlcpy(header->version, init_utsname()->version,
+ sizeof(header->version));
/* Get the Unix time for the timestamp */
header->seconds = get_seconds();
diff --git a/drivers/hwtracing/coresight/Kconfig b/drivers/hwtracing/coresight/Kconfig
index ed70a980d9ac..6e72cda433db 100644
--- a/drivers/hwtracing/coresight/Kconfig
+++ b/drivers/hwtracing/coresight/Kconfig
@@ -136,6 +136,17 @@ config CORESIGHT_CTI
hardware component to another. It can also be used to pass
software generated events.
+config CORESIGHT_CTI_SAVE_DISABLE
+ bool "Turn off CTI save and restore"
+ depends on CORESIGHT_CTI
+ help
+ Turns off CoreSight CTI save and restore support for cpu CTIs. This
+ avoids voting for the clocks during probe as well as the associated
+ save and restore latency at the cost of breaking cpu CTI support on
+ targets where cpu CTIs have to be preserved across power collapse.
+
+ If unsure, say 'N' here to avoid breaking cpu CTI support.
+
config CORESIGHT_TPDA
bool "CoreSight Trace, Profiling & Diagnostics Aggregator driver"
help
diff --git a/drivers/i2c/busses/i2c-msm-v2.c b/drivers/i2c/busses/i2c-msm-v2.c
index 04b1b62f85c3..bf2a1dd7cf15 100644
--- a/drivers/i2c/busses/i2c-msm-v2.c
+++ b/drivers/i2c/busses/i2c-msm-v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -50,6 +50,8 @@ static int i2c_msm_xfer_wait_for_completion(struct i2c_msm_ctrl *ctrl,
static int i2c_msm_pm_resume(struct device *dev);
static void i2c_msm_pm_suspend(struct device *dev);
static void i2c_msm_clk_path_init(struct i2c_msm_ctrl *ctrl);
+static void i2c_msm_pm_pinctrl_state(struct i2c_msm_ctrl *ctrl,
+ bool runtime_active);
/* string table for enum i2c_msm_xfer_mode_id */
const char * const i2c_msm_mode_str_tbl[] = {
@@ -2157,27 +2159,54 @@ static bool i2c_msm_xfer_next_buf(struct i2c_msm_ctrl *ctrl)
return true;
}
-static void i2c_msm_pm_clk_disable_unprepare(struct i2c_msm_ctrl *ctrl)
+static void i2c_msm_pm_clk_unprepare(struct i2c_msm_ctrl *ctrl)
{
- clk_disable_unprepare(ctrl->rsrcs.core_clk);
- clk_disable_unprepare(ctrl->rsrcs.iface_clk);
+ clk_unprepare(ctrl->rsrcs.core_clk);
+ clk_unprepare(ctrl->rsrcs.iface_clk);
}
-static int i2c_msm_pm_clk_prepare_enable(struct i2c_msm_ctrl *ctrl)
+static int i2c_msm_pm_clk_prepare(struct i2c_msm_ctrl *ctrl)
{
int ret;
- ret = clk_prepare_enable(ctrl->rsrcs.iface_clk);
+ ret = clk_prepare(ctrl->rsrcs.iface_clk);
if (ret) {
dev_err(ctrl->dev,
- "error on clk_prepare_enable(iface_clk):%d\n", ret);
+ "error on clk_prepare(iface_clk):%d\n", ret);
return ret;
}
- ret = clk_prepare_enable(ctrl->rsrcs.core_clk);
+ ret = clk_prepare(ctrl->rsrcs.core_clk);
+ if (ret) {
+ clk_unprepare(ctrl->rsrcs.iface_clk);
+ dev_err(ctrl->dev,
+ "error clk_prepare(core_clk):%d\n", ret);
+ }
+ return ret;
+}
+
+static void i2c_msm_pm_clk_disable(struct i2c_msm_ctrl *ctrl)
+{
+ clk_disable(ctrl->rsrcs.core_clk);
+ clk_disable(ctrl->rsrcs.iface_clk);
+}
+
+static int i2c_msm_pm_clk_enable(struct i2c_msm_ctrl *ctrl)
+{
+ int ret;
+
+ ret = clk_enable(ctrl->rsrcs.iface_clk);
+ if (ret) {
+ dev_err(ctrl->dev,
+ "error on clk_enable(iface_clk):%d\n", ret);
+ i2c_msm_pm_clk_unprepare(ctrl);
+ return ret;
+ }
+ ret = clk_enable(ctrl->rsrcs.core_clk);
if (ret) {
- clk_disable_unprepare(ctrl->rsrcs.iface_clk);
+ clk_disable(ctrl->rsrcs.iface_clk);
+ i2c_msm_pm_clk_unprepare(ctrl);
dev_err(ctrl->dev,
- "error clk_prepare_enable(core_clk):%d\n", ret);
+ "error clk_enable(core_clk):%d\n", ret);
}
return ret;
}
@@ -2198,6 +2227,7 @@ static int i2c_msm_pm_xfer_start(struct i2c_msm_ctrl *ctrl)
return -EIO;
}
+ i2c_msm_pm_pinctrl_state(ctrl, true);
pm_runtime_get_sync(ctrl->dev);
/*
* if runtime PM callback was not invoked (when both runtime-pm
@@ -2208,7 +2238,7 @@ static int i2c_msm_pm_xfer_start(struct i2c_msm_ctrl *ctrl)
i2c_msm_pm_resume(ctrl->dev);
}
- ret = i2c_msm_pm_clk_prepare_enable(ctrl);
+ ret = i2c_msm_pm_clk_enable(ctrl);
if (ret) {
mutex_unlock(&ctrl->xfer.mtx);
return ret;
@@ -2235,13 +2265,14 @@ static void i2c_msm_pm_xfer_end(struct i2c_msm_ctrl *ctrl)
if (ctrl->xfer.mode_id == I2C_MSM_XFER_MODE_DMA)
i2c_msm_dma_free_channels(ctrl);
- i2c_msm_pm_clk_disable_unprepare(ctrl);
+ i2c_msm_pm_clk_disable(ctrl);
if (!pm_runtime_enabled(ctrl->dev))
i2c_msm_pm_suspend(ctrl->dev);
pm_runtime_mark_last_busy(ctrl->dev);
pm_runtime_put_autosuspend(ctrl->dev);
+ i2c_msm_pm_pinctrl_state(ctrl, false);
mutex_unlock(&ctrl->xfer.mtx);
}
@@ -2663,7 +2694,7 @@ static void i2c_msm_pm_suspend(struct device *dev)
return;
}
i2c_msm_dbg(ctrl, MSM_DBG, "suspending...");
- i2c_msm_pm_pinctrl_state(ctrl, false);
+ i2c_msm_pm_clk_unprepare(ctrl);
i2c_msm_clk_path_unvote(ctrl);
/*
@@ -2690,7 +2721,7 @@ static int i2c_msm_pm_resume(struct device *dev)
i2c_msm_dbg(ctrl, MSM_DBG, "resuming...");
i2c_msm_clk_path_vote(ctrl);
- i2c_msm_pm_pinctrl_state(ctrl, true);
+ i2c_msm_pm_clk_prepare(ctrl);
ctrl->pwr_state = I2C_MSM_PM_RT_ACTIVE;
return 0;
}
@@ -2870,9 +2901,13 @@ static int i2c_msm_probe(struct platform_device *pdev)
/* vote for clock to enable reading the version number off the HW */
i2c_msm_clk_path_vote(ctrl);
- ret = i2c_msm_pm_clk_prepare_enable(ctrl);
+ ret = i2c_msm_pm_clk_prepare(ctrl);
+ if (ret)
+ goto clk_err;
+
+ ret = i2c_msm_pm_clk_enable(ctrl);
if (ret) {
- dev_err(ctrl->dev, "error in enabling clocks:%d\n", ret);
+ i2c_msm_pm_clk_unprepare(ctrl);
goto clk_err;
}
@@ -2884,7 +2919,8 @@ static int i2c_msm_probe(struct platform_device *pdev)
if (ret)
dev_err(ctrl->dev, "error error on qup software reset\n");
- i2c_msm_pm_clk_disable_unprepare(ctrl);
+ i2c_msm_pm_clk_disable(ctrl);
+ i2c_msm_pm_clk_unprepare(ctrl);
i2c_msm_clk_path_unvote(ctrl);
ret = i2c_msm_rsrcs_gpio_pinctrl_init(ctrl);
diff --git a/drivers/iommu/arm-smmu.c b/drivers/iommu/arm-smmu.c
index ce15e150277e..ce1eb562be36 100644
--- a/drivers/iommu/arm-smmu.c
+++ b/drivers/iommu/arm-smmu.c
@@ -249,17 +249,6 @@
#define RESUME_RETRY (0 << 0)
#define RESUME_TERMINATE (1 << 0)
-#define TTBCR2_SEP_SHIFT 15
-#define TTBCR2_SEP_UPSTREAM (0x7 << TTBCR2_SEP_SHIFT)
-
-#define TTBCR2_SEP_31 0
-#define TTBCR2_SEP_35 1
-#define TTBCR2_SEP_39 2
-#define TTBCR2_SEP_41 3
-#define TTBCR2_SEP_43 4
-#define TTBCR2_SEP_47 5
-#define TTBCR2_SEP_NOSIGN 7
-
#define TTBRn_ASID_SHIFT 48
#define FSR_MULTI (1 << 31)
@@ -1614,7 +1603,6 @@ static void arm_smmu_init_context_bank(struct arm_smmu_domain *smmu_domain,
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR);
if (smmu->version > ARM_SMMU_V1) {
reg = pgtbl_cfg->arm_lpae_s1_cfg.tcr >> 32;
- reg |= TTBCR2_SEP_UPSTREAM;
writel_relaxed(reg, cb_base + ARM_SMMU_CB_TTBCR2);
}
} else {
@@ -1745,7 +1733,9 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
struct arm_smmu_domain *smmu_domain = to_smmu_domain(domain);
struct arm_smmu_cfg *cfg = &smmu_domain->cfg;
bool is_fast = smmu_domain->attributes & (1 << DOMAIN_ATTR_FAST);
- unsigned long quirks = 0;
+ unsigned long quirks =
+ smmu_domain->attributes & (1 << DOMAIN_ATTR_ENABLE_TTBR1) ?
+ IO_PGTABLE_QUIRK_ARM_TTBR1 : 0;
if (smmu_domain->smmu)
goto out;
@@ -1837,6 +1827,7 @@ static int arm_smmu_init_domain_context(struct iommu_domain *domain,
};
fmt = ARM_MSM_SECURE;
} else {
+
smmu_domain->pgtbl_cfg = (struct io_pgtable_cfg) {
.quirks = quirks,
.pgsize_bitmap = arm_smmu_ops.pgsize_bitmap,
@@ -3140,6 +3131,12 @@ static int arm_smmu_domain_get_attr(struct iommu_domain *domain,
& (1 << DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT));
ret = 0;
break;
+ case DOMAIN_ATTR_ENABLE_TTBR1:
+ *((int *)data) = !!(smmu_domain->attributes
+ & (1 << DOMAIN_ATTR_ENABLE_TTBR1));
+ ret = 0;
+ break;
+
default:
ret = -ENODEV;
break;
@@ -3283,6 +3280,12 @@ static int arm_smmu_domain_set_attr(struct iommu_domain *domain,
ret = 0;
break;
}
+ case DOMAIN_ATTR_ENABLE_TTBR1:
+ if (*((int *)data))
+ smmu_domain->attributes |=
+ 1 << DOMAIN_ATTR_ENABLE_TTBR1;
+ ret = 0;
+ break;
default:
ret = -ENODEV;
break;
diff --git a/drivers/iommu/io-pgtable-arm.c b/drivers/iommu/io-pgtable-arm.c
index 0d057ca92972..5f2b66286c0c 100644
--- a/drivers/iommu/io-pgtable-arm.c
+++ b/drivers/iommu/io-pgtable-arm.c
@@ -131,14 +131,21 @@
#define ARM_LPAE_TCR_TG0_64K (1 << 14)
#define ARM_LPAE_TCR_TG0_16K (2 << 14)
+#define ARM_LPAE_TCR_TG1_16K 1ULL
+#define ARM_LPAE_TCR_TG1_4K 2ULL
+#define ARM_LPAE_TCR_TG1_64K 3ULL
+
#define ARM_LPAE_TCR_SH0_SHIFT 12
#define ARM_LPAE_TCR_SH0_MASK 0x3
+#define ARM_LPAE_TCR_SH1_SHIFT 28
#define ARM_LPAE_TCR_SH_NS 0
#define ARM_LPAE_TCR_SH_OS 2
#define ARM_LPAE_TCR_SH_IS 3
#define ARM_LPAE_TCR_ORGN0_SHIFT 10
+#define ARM_LPAE_TCR_ORGN1_SHIFT 26
#define ARM_LPAE_TCR_IRGN0_SHIFT 8
+#define ARM_LPAE_TCR_IRGN1_SHIFT 24
#define ARM_LPAE_TCR_RGN_MASK 0x3
#define ARM_LPAE_TCR_RGN_NC 0
#define ARM_LPAE_TCR_RGN_WBWA 1
@@ -151,6 +158,9 @@
#define ARM_LPAE_TCR_T0SZ_SHIFT 0
#define ARM_LPAE_TCR_SZ_MASK 0xf
+#define ARM_LPAE_TCR_T1SZ_SHIFT 16
+#define ARM_LPAE_TCR_T1SZ_MASK 0x3f
+
#define ARM_LPAE_TCR_PS_SHIFT 16
#define ARM_LPAE_TCR_PS_MASK 0x7
@@ -167,6 +177,16 @@
#define ARM_LPAE_TCR_EPD1_SHIFT 23
#define ARM_LPAE_TCR_EPD1_FAULT 1
+#define ARM_LPAE_TCR_SEP_SHIFT (15 + 32)
+
+#define ARM_LPAE_TCR_SEP_31 0ULL
+#define ARM_LPAE_TCR_SEP_35 1ULL
+#define ARM_LPAE_TCR_SEP_39 2ULL
+#define ARM_LPAE_TCR_SEP_41 3ULL
+#define ARM_LPAE_TCR_SEP_43 4ULL
+#define ARM_LPAE_TCR_SEP_47 5ULL
+#define ARM_LPAE_TCR_SEP_UPSTREAM 7ULL
+
#define ARM_LPAE_MAIR_ATTR_SHIFT(n) ((n) << 3)
#define ARM_LPAE_MAIR_ATTR_MASK 0xff
#define ARM_LPAE_MAIR_ATTR_DEVICE 0x04
@@ -206,7 +226,7 @@ struct arm_lpae_io_pgtable {
unsigned long pg_shift;
unsigned long bits_per_level;
- void *pgd;
+ void *pgd[2];
};
typedef u64 arm_lpae_iopte;
@@ -524,14 +544,26 @@ static arm_lpae_iopte arm_lpae_prot_to_pte(struct arm_lpae_io_pgtable *data,
return pte;
}
+static inline arm_lpae_iopte *arm_lpae_get_table(
+ struct arm_lpae_io_pgtable *data, unsigned long iova)
+{
+ struct io_pgtable_cfg *cfg = &data->iop.cfg;
+
+ return ((cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) &&
+ (iova & (1UL << (cfg->ias - 1)))) ?
+ data->pgd[1] : data->pgd[0];
+}
+
static int arm_lpae_map(struct io_pgtable_ops *ops, unsigned long iova,
phys_addr_t paddr, size_t size, int iommu_prot)
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
- arm_lpae_iopte *ptep = data->pgd;
+ arm_lpae_iopte *ptep;
int ret, lvl = ARM_LPAE_START_LVL(data);
arm_lpae_iopte prot;
+ ptep = arm_lpae_get_table(data, iova);
+
/* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
return 0;
@@ -554,7 +586,7 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
{
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable_cfg *cfg = &data->iop.cfg;
- arm_lpae_iopte *ptep = data->pgd;
+ arm_lpae_iopte *ptep;
int lvl = ARM_LPAE_START_LVL(data);
arm_lpae_iopte prot;
struct scatterlist *s;
@@ -563,6 +595,8 @@ static int arm_lpae_map_sg(struct io_pgtable_ops *ops, unsigned long iova,
unsigned int min_pagesz;
struct map_state ms;
+ ptep = arm_lpae_get_table(data, iova);
+
/* If no access, then nothing to do */
if (!(iommu_prot & (IOMMU_READ | IOMMU_WRITE)))
goto out_err;
@@ -672,7 +706,10 @@ static void arm_lpae_free_pgtable(struct io_pgtable *iop)
{
struct arm_lpae_io_pgtable *data = io_pgtable_to_data(iop);
- __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd);
+ __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data), data->pgd[0]);
+ if (data->pgd[1])
+ __arm_lpae_free_pgtable(data, ARM_LPAE_START_LVL(data),
+ data->pgd[1]);
kfree(data);
}
@@ -800,9 +837,11 @@ static size_t arm_lpae_unmap(struct io_pgtable_ops *ops, unsigned long iova,
size_t unmapped = 0;
struct arm_lpae_io_pgtable *data = io_pgtable_ops_to_data(ops);
struct io_pgtable *iop = &data->iop;
- arm_lpae_iopte *ptep = data->pgd;
+ arm_lpae_iopte *ptep;
int lvl = ARM_LPAE_START_LVL(data);
+ ptep = arm_lpae_get_table(data, iova);
+
while (unmapped < size) {
size_t ret, size_to_unmap, remaining;
@@ -828,7 +867,10 @@ static int arm_lpae_iova_to_pte(struct arm_lpae_io_pgtable *data,
unsigned long iova, int *plvl_ret,
arm_lpae_iopte *ptep_ret)
{
- arm_lpae_iopte pte, *ptep = data->pgd;
+ arm_lpae_iopte pte, *ptep;
+
+ ptep = arm_lpae_get_table(data, iova);
+
*plvl_ret = ARM_LPAE_START_LVL(data);
*ptep_ret = 0;
@@ -994,6 +1036,71 @@ arm_lpae_alloc_pgtable(struct io_pgtable_cfg *cfg)
return data;
}
+static u64 arm64_lpae_setup_ttbr1(struct io_pgtable_cfg *cfg,
+ struct arm_lpae_io_pgtable *data)
+
+{
+ u64 reg;
+
+ /* If TTBR1 is disabled, disable speculative walks through the TTBR1 */
+ if (!(cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1)) {
+ reg = ARM_LPAE_TCR_EPD1;
+ reg |= (ARM_LPAE_TCR_SEP_UPSTREAM << ARM_LPAE_TCR_SEP_SHIFT);
+ return reg;
+ }
+
+ if (cfg->iommu_dev && cfg->iommu_dev->archdata.dma_coherent)
+ reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH1_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_IRGN1_SHIFT) |
+ (ARM_LPAE_TCR_RGN_WBWA << ARM_LPAE_TCR_ORGN1_SHIFT);
+ else
+ reg = (ARM_LPAE_TCR_SH_OS << ARM_LPAE_TCR_SH1_SHIFT) |
+ (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_IRGN1_SHIFT) |
+ (ARM_LPAE_TCR_RGN_NC << ARM_LPAE_TCR_ORGN1_SHIFT);
+
+ switch (1 << data->pg_shift) {
+ case SZ_4K:
+ reg |= (ARM_LPAE_TCR_TG1_4K << 30);
+ break;
+ case SZ_16K:
+ reg |= (ARM_LPAE_TCR_TG1_16K << 30);
+ break;
+ case SZ_64K:
+ reg |= (ARM_LPAE_TCR_TG1_64K << 30);
+ break;
+ }
+
+ /* Set T1SZ */
+ reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T1SZ_SHIFT;
+
+ /* Set the SEP bit based on the size */
+ switch (cfg->ias) {
+ case 32:
+ reg |= (ARM_LPAE_TCR_SEP_31 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ case 36:
+ reg |= (ARM_LPAE_TCR_SEP_35 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ case 40:
+ reg |= (ARM_LPAE_TCR_SEP_39 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ case 42:
+ reg |= (ARM_LPAE_TCR_SEP_41 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ case 44:
+ reg |= (ARM_LPAE_TCR_SEP_43 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ case 48:
+ reg |= (ARM_LPAE_TCR_SEP_47 << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ default:
+ reg |= (ARM_LPAE_TCR_SEP_UPSTREAM << ARM_LPAE_TCR_SEP_SHIFT);
+ break;
+ }
+
+ return reg;
+}
+
static struct io_pgtable *
arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
{
@@ -1050,8 +1157,9 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
reg |= (64ULL - cfg->ias) << ARM_LPAE_TCR_T0SZ_SHIFT;
- /* Disable speculative walks through TTBR1 */
- reg |= ARM_LPAE_TCR_EPD1;
+ /* Bring in the TTBR1 configuration */
+ reg |= arm64_lpae_setup_ttbr1(cfg, data);
+
cfg->arm_lpae_s1_cfg.tcr = reg;
/* MAIRs */
@@ -1066,16 +1174,33 @@ arm_64_lpae_alloc_pgtable_s1(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s1_cfg.mair[1] = 0;
/* Looking good; allocate a pgd */
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg, cookie);
- if (!data->pgd)
+ data->pgd[0] = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
+ cookie);
+ if (!data->pgd[0])
goto out_free_data;
+
+ if (cfg->quirks & IO_PGTABLE_QUIRK_ARM_TTBR1) {
+ data->pgd[1] = __arm_lpae_alloc_pages(data->pgd_size,
+ GFP_KERNEL, cfg, cookie);
+ if (!data->pgd[1]) {
+ __arm_lpae_free_pages(data->pgd[0], data->pgd_size, cfg,
+ cookie);
+ goto out_free_data;
+ }
+ } else {
+ data->pgd[1] = NULL;
+ }
+
/* Ensure the empty pgd is visible before any actual TTBR write */
wmb();
/* TTBRs */
- cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd);
- cfg->arm_lpae_s1_cfg.ttbr[1] = 0;
+ cfg->arm_lpae_s1_cfg.ttbr[0] = virt_to_phys(data->pgd[0]);
+
+ if (data->pgd[1])
+ cfg->arm_lpae_s1_cfg.ttbr[1] = virt_to_phys(data->pgd[1]);
+
return &data->iop;
out_free_data:
@@ -1155,15 +1280,16 @@ arm_64_lpae_alloc_pgtable_s2(struct io_pgtable_cfg *cfg, void *cookie)
cfg->arm_lpae_s2_cfg.vtcr = reg;
/* Allocate pgd pages */
- data->pgd = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg, cookie);
- if (!data->pgd)
+ data->pgd[0] = __arm_lpae_alloc_pages(data->pgd_size, GFP_KERNEL, cfg,
+ cookie);
+ if (!data->pgd[0])
goto out_free_data;
/* Ensure the empty pgd is visible before any actual TTBR write */
wmb();
/* VTTBR */
- cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd);
+ cfg->arm_lpae_s2_cfg.vttbr = virt_to_phys(data->pgd[0]);
return &data->iop;
out_free_data:
@@ -1261,7 +1387,7 @@ static void __init arm_lpae_dump_ops(struct io_pgtable_ops *ops)
cfg->pgsize_bitmap, cfg->ias);
pr_err("data: %d levels, 0x%zx pgd_size, %lu pg_shift, %lu bits_per_level, pgd @ %p\n",
data->levels, data->pgd_size, data->pg_shift,
- data->bits_per_level, data->pgd);
+ data->bits_per_level, data->pgd[0]);
}
#define __FAIL(ops, i) ({ \
diff --git a/drivers/iommu/io-pgtable.h b/drivers/iommu/io-pgtable.h
index a3f366f559a7..f4533040806f 100644
--- a/drivers/iommu/io-pgtable.h
+++ b/drivers/iommu/io-pgtable.h
@@ -62,6 +62,7 @@ struct io_pgtable_cfg {
*/
#define IO_PGTABLE_QUIRK_ARM_NS (1 << 0) /* Set NS bit in PTEs */
#define IO_PGTABLE_QUIRK_PAGE_TABLE_COHERENT (1 << 1)
+ #define IO_PGTABLE_QUIRK_ARM_TTBR1 (1 << 2) /* Allocate TTBR1 PT */
int quirks;
unsigned long pgsize_bitmap;
unsigned int ias;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
index 840d84388a17..bb3f0dca9d92 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -588,6 +588,12 @@ int vfe_hw_probe(struct platform_device *pdev)
}
vfe_dev->hw_info =
(struct msm_vfe_hardware_info *) match_dev->data;
+ /* Cx ipeak support */
+ if (of_find_property(pdev->dev.of_node,
+ "qcom,vfe_cx_ipeak", NULL)) {
+ vfe_dev->vfe_cx_ipeak = cx_ipeak_register(
+ pdev->dev.of_node, "qcom,vfe_cx_ipeak");
+ }
} else {
vfe_dev->hw_info = (struct msm_vfe_hardware_info *)
platform_get_device_id(pdev)->driver_data;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
index f6fabc61620d..aca8e99650ba 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp.h
@@ -29,6 +29,7 @@
#include "msm_buf_mgr.h"
#include "cam_hw_ops.h"
+#include <soc/qcom/cx_ipeak.h>
#define VFE40_8974V1_VERSION 0x10000018
#define VFE40_8974V2_VERSION 0x1001001A
@@ -767,6 +768,8 @@ struct vfe_device {
size_t num_hvx_clk;
size_t num_norm_clk;
enum cam_ahb_clk_vote ahb_vote;
+ bool turbo_vote;
+ struct cx_ipeak_client *vfe_cx_ipeak;
/* Sync variables*/
struct completion reset_complete;
diff --git a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
index efad8b740a3b..57373c1fc74c 100644
--- a/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
+++ b/drivers/media/platform/msm/camera_v2/isp/msm_isp47.c
@@ -331,6 +331,7 @@ int msm_vfe47_init_hardware(struct vfe_device *vfe_dev)
goto ahb_vote_fail;
}
vfe_dev->ahb_vote = CAM_AHB_SVS_VOTE;
+ vfe_dev->turbo_vote = 0;
vfe_dev->common_data->dual_vfe_res->vfe_base[vfe_dev->pdev->id] =
vfe_dev->vfe_base;
@@ -2556,6 +2557,7 @@ int msm_vfe47_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
{
int rc = 0;
int clk_idx = vfe_dev->hw_info->vfe_clk_idx;
+ int ret;
rc = msm_camera_clk_set_rate(&vfe_dev->pdev->dev,
vfe_dev->vfe_clk[clk_idx], *rate);
@@ -2563,7 +2565,26 @@ int msm_vfe47_set_clk_rate(struct vfe_device *vfe_dev, long *rate)
return rc;
*rate = clk_round_rate(vfe_dev->vfe_clk[clk_idx], *rate);
vfe_dev->msm_isp_vfe_clk_rate = *rate;
-
+ if (vfe_dev->vfe_cx_ipeak) {
+ if (vfe_dev->msm_isp_vfe_clk_rate >=
+ vfe_dev->vfe_clk_rates[MSM_VFE_CLK_RATE_TURBO]
+ [vfe_dev->hw_info->vfe_clk_idx] &&
+ vfe_dev->turbo_vote == 0) {
+ ret = cx_ipeak_update(vfe_dev->vfe_cx_ipeak, true);
+ if (ret)
+ pr_debug("%s: cx_ipeak_update failed %d\n",
+ __func__, ret);
+ else
+ vfe_dev->turbo_vote = 1;
+ } else if (vfe_dev->turbo_vote == 1) {
+ ret = cx_ipeak_update(vfe_dev->vfe_cx_ipeak, false);
+ if (ret)
+ pr_debug("%s: cx_ipeak_update failed %d\n",
+ __func__, ret);
+ else
+ vfe_dev->turbo_vote = 0;
+ }
+ }
if (vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg)
vfe_dev->hw_info->vfe_ops.core_ops.ahb_clk_cfg(vfe_dev, NULL);
return 0;
diff --git a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c
index 3b38882c4c45..88d90d0a7c08 100644
--- a/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c
+++ b/drivers/media/platform/msm/camera_v2/jpeg_dma/msm_jpeg_dma_hw.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -72,6 +72,18 @@ static const struct msm_jpegdma_block msm_jpegdma_block_sel[] = {
};
/*
+* jpegdma_do_div - long division.
+* @num: dividend
+* @den: divisor
+* returns quotient value.
+*/
+static inline long long jpegdma_do_div(long long num, long long den)
+{
+ do_div(num, den);
+ return num;
+}
+
+/*
* msm_jpegdma_hw_read_reg - dma read from register.
* @dma: Pointer to dma device.
* @base_idx: dma memory resource index.
@@ -819,9 +831,9 @@ static int msm_jpegdma_hw_calc_speed(struct msm_jpegdma_device *dma,
}
speed->bus_ab = calc_rate * 2;
- speed->bus_ib = (real_clock *
- (MSM_JPEGDMA_BW_NUM + MSM_JPEGDMA_BW_DEN - 1)) /
- MSM_JPEGDMA_BW_DEN;
+ speed->bus_ib = jpegdma_do_div((real_clock *
+ (MSM_JPEGDMA_BW_NUM + MSM_JPEGDMA_BW_DEN - 1)),
+ MSM_JPEGDMA_BW_DEN);
speed->core_clock = real_clock;
dev_dbg(dma->dev, "Speed core clk %llu ab %llu ib %llu fps %d\n",
speed->core_clock, speed->bus_ab, speed->bus_ib, size->fps);
@@ -923,13 +935,15 @@ static int msm_jpegdma_hw_calc_config(struct msm_jpegdma_size_config *size_cfg,
in_width = size_cfg->in_size.width;
out_width = size_cfg->out_size.width;
- scale_hor = (in_width * MSM_JPEGDMA_SCALE_UNI) / out_width;
+ scale_hor = jpegdma_do_div((in_width * MSM_JPEGDMA_SCALE_UNI),
+ out_width);
if (scale_hor != MSM_JPEGDMA_SCALE_UNI)
config->scale_cfg.enable = 1;
in_height = size_cfg->in_size.height;
out_height = size_cfg->out_size.height;
- scale_ver = (in_height * MSM_JPEGDMA_SCALE_UNI) / out_height;
+ scale_ver = jpegdma_do_div((in_height * MSM_JPEGDMA_SCALE_UNI),
+ out_height);
if (scale_ver != MSM_JPEGDMA_SCALE_UNI)
config->scale_cfg.enable = 1;
@@ -946,23 +960,23 @@ static int msm_jpegdma_hw_calc_config(struct msm_jpegdma_size_config *size_cfg,
config->block_cfg.block = msm_jpegdma_block_sel[i];
if (plane->active_pipes > 1) {
- phase = (out_height * scale_ver + (plane->active_pipes - 1)) /
- plane->active_pipes;
+ phase = jpegdma_do_div((out_height * scale_ver +
+ (plane->active_pipes - 1)), plane->active_pipes);
phase &= (MSM_JPEGDMA_SCALE_UNI - 1);
- out_height = (out_height + (plane->active_pipes - 1)) /
- plane->active_pipes;
+ out_height = jpegdma_do_div((out_height +
+ (plane->active_pipes - 1)), plane->active_pipes);
in_height = (out_height * scale_ver) / MSM_JPEGDMA_SCALE_UNI;
}
- config->block_cfg.blocks_per_row = out_width /
- config->block_cfg.block.width;
+ config->block_cfg.blocks_per_row = (uint32_t) jpegdma_do_div(out_width,
+ config->block_cfg.block.width);
config->block_cfg.blocks_per_col = out_height;
config->block_cfg.h_step = config->block_cfg.block.width;
-
- config->block_cfg.h_step_last = out_width %
- config->block_cfg.block.width;
+ config->size_cfg.out_size.width = out_width;
+ config->block_cfg.h_step_last = (uint32_t) do_div(out_width,
+ config->block_cfg.block.width);
if (!config->block_cfg.h_step_last)
config->block_cfg.h_step_last = config->block_cfg.h_step;
else
@@ -974,7 +988,6 @@ static int msm_jpegdma_hw_calc_config(struct msm_jpegdma_size_config *size_cfg,
config->size_cfg = *size_cfg;
config->size_cfg.in_size.width = in_width;
config->size_cfg.in_size.height = in_height;
- config->size_cfg.out_size.width = out_width;
config->size_cfg.out_size.height = out_height;
config->in_offset = 0;
config->out_offset = 0;
@@ -1013,14 +1026,16 @@ int msm_jpegdma_hw_check_config(struct msm_jpegdma_device *dma,
in_width = size_cfg->in_size.width;
out_width = size_cfg->out_size.width;
- scale = ((in_width * MSM_JPEGDMA_SCALE_UNI)) / out_width;
+ scale = jpegdma_do_div(((in_width * MSM_JPEGDMA_SCALE_UNI)),
+ out_width);
if (scale < MSM_JPEGDMA_SCALE_UNI)
return -EINVAL;
in_height = size_cfg->in_size.height;
out_height = size_cfg->out_size.height;
- scale = (in_height * MSM_JPEGDMA_SCALE_UNI) / out_height;
+ scale = jpegdma_do_div((in_height * MSM_JPEGDMA_SCALE_UNI),
+ out_height);
if (scale < MSM_JPEGDMA_SCALE_UNI)
return -EINVAL;
@@ -1827,7 +1842,7 @@ int msm_jpegdma_hw_map_buffer(struct msm_jpegdma_device *dma, int fd,
buf->fd = fd;
ret = cam_smmu_get_phy_addr(dma->iommu_hndl, buf->fd,
- CAM_SMMU_MAP_RW, &buf->addr, &buf->size);
+ CAM_SMMU_MAP_RW, &buf->addr, (size_t *)&buf->size);
if (ret < 0) {
dev_err(dma->dev, "Can not get physical address\n");
goto error_get_phy;
diff --git a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
index 730f8b32ff1a..76a7c6942c68 100644
--- a/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
+++ b/drivers/media/platform/msm/camera_v2/msm_buf_mgr/msm_generic_buf_mgr.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -149,10 +149,7 @@ static int32_t msm_buf_mngr_buf_done(struct msm_buf_mngr_device *buf_mngr_dev,
list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) {
if ((bufs->session_id == buf_info->session_id) &&
(bufs->stream_id == buf_info->stream_id) &&
- (bufs->vb2_v4l2_buf->vb2_buf.index ==
- buf_info->index)) {
- bufs->vb2_v4l2_buf->sequence = buf_info->frame_id;
- bufs->vb2_v4l2_buf->timestamp = buf_info->timestamp;
+ (bufs->index == buf_info->index)) {
ret = buf_mngr_dev->vb2_ops.buf_done
(bufs->vb2_v4l2_buf,
buf_info->session_id,
@@ -181,7 +178,7 @@ static int32_t msm_buf_mngr_put_buf(struct msm_buf_mngr_device *buf_mngr_dev,
list_for_each_entry_safe(bufs, save, &buf_mngr_dev->buf_qhead, entry) {
if ((bufs->session_id == buf_info->session_id) &&
(bufs->stream_id == buf_info->stream_id) &&
- (bufs->vb2_v4l2_buf->vb2_buf.index == buf_info->index)) {
+ (bufs->index == buf_info->index)) {
ret = buf_mngr_dev->vb2_ops.put_buf(bufs->vb2_v4l2_buf,
buf_info->session_id, buf_info->stream_id);
list_del_init(&bufs->entry);
@@ -214,7 +211,7 @@ static int32_t msm_generic_buf_mngr_flush(
buf_info->session_id,
buf_info->stream_id, 0, &ts, 0);
pr_err("Bufs not flushed: str_id = %d buf_index = %d ret = %d\n",
- buf_info->stream_id, bufs->vb2_v4l2_buf->vb2_buf.index,
+ buf_info->stream_id, bufs->index,
ret);
list_del_init(&bufs->entry);
kfree(bufs);
diff --git a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
index da31dcd84b11..064c1e8c5bab 100644
--- a/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
+++ b/drivers/media/platform/msm/camera_v2/pproc/cpp/msm_cpp.c
@@ -3835,6 +3835,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
struct msm_cpp_frame_info32_t k32_frame_info;
struct msm_cpp_frame_info_t k64_frame_info;
uint32_t identity_k = 0;
+ bool is_copytouser_req = true;
void __user *up = (void __user *)arg;
if (sd == NULL) {
@@ -3969,9 +3970,8 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
break;
}
}
- if (copy_to_user(
- (void __user *)kp_ioctl.ioctl_ptr, &inst_info,
- sizeof(struct msm_cpp_frame_info32_t))) {
+ if (copy_to_user((void __user *)kp_ioctl.ioctl_ptr,
+ &inst_info, sizeof(struct msm_cpp_frame_info32_t))) {
mutex_unlock(&cpp_dev->mutex);
return -EFAULT;
}
@@ -4007,6 +4007,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
sizeof(struct msm_cpp_stream_buff_info_t);
}
}
+ is_copytouser_req = false;
if (cmd == VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO32)
cmd = VIDIOC_MSM_CPP_ENQUEUE_STREAM_BUFF_INFO;
else if (cmd == VIDIOC_MSM_CPP_DELETE_STREAM_BUFF32)
@@ -4021,6 +4022,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
get_user(identity_k, identity_u);
kp_ioctl.ioctl_ptr = (void *)&identity_k;
kp_ioctl.len = sizeof(uint32_t);
+ is_copytouser_req = false;
cmd = VIDIOC_MSM_CPP_DEQUEUE_STREAM_BUFF_INFO;
break;
}
@@ -4079,6 +4081,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
sizeof(struct msm_cpp_clock_settings_t);
}
}
+ is_copytouser_req = false;
cmd = VIDIOC_MSM_CPP_SET_CLOCK;
break;
}
@@ -4104,6 +4107,7 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
kp_ioctl.ioctl_ptr = (void *)&k_queue_buf;
kp_ioctl.len = sizeof(struct msm_pproc_queue_buf_info);
+ is_copytouser_req = false;
cmd = VIDIOC_MSM_CPP_QUEUE_BUF;
break;
}
@@ -4128,6 +4132,8 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
k64_frame_info.frame_id = k32_frame_info.frame_id;
kp_ioctl.ioctl_ptr = (void *)&k64_frame_info;
+
+ is_copytouser_req = false;
cmd = VIDIOC_MSM_CPP_POP_STREAM_BUFFER;
break;
}
@@ -4181,13 +4187,16 @@ static long msm_cpp_subdev_fops_compat_ioctl(struct file *file,
break;
}
- up32_ioctl.id = kp_ioctl.id;
- up32_ioctl.len = kp_ioctl.len;
- up32_ioctl.trans_code = kp_ioctl.trans_code;
- up32_ioctl.ioctl_ptr = ptr_to_compat(kp_ioctl.ioctl_ptr);
+ if (is_copytouser_req) {
+ up32_ioctl.id = kp_ioctl.id;
+ up32_ioctl.len = kp_ioctl.len;
+ up32_ioctl.trans_code = kp_ioctl.trans_code;
+ up32_ioctl.ioctl_ptr = ptr_to_compat(kp_ioctl.ioctl_ptr);
- if (copy_to_user((void __user *)up, &up32_ioctl, sizeof(up32_ioctl)))
- return -EFAULT;
+ if (copy_to_user((void __user *)up, &up32_ioctl,
+ sizeof(up32_ioctl)))
+ return -EFAULT;
+ }
return rc;
}
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h
index 07d522cb01bb..9120a4cc85ca 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_1_hwreg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -99,6 +99,7 @@ struct csiphy_reg_3ph_parms_t csiphy_v5_0_1_3ph = {
{0x70C, 0xA5},
{0x38, 0xFE},
{0x81c, 0x2},
+ {0x700, 0x80},
};
struct csiphy_settings_t csiphy_combo_mode_v5_0_1 = {
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h
index 198d130b24fc..8591f0646080 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/include/msm_csiphy_5_0_hwreg.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -100,6 +100,7 @@ struct csiphy_reg_3ph_parms_t csiphy_v5_0_3ph = {
{0x70C, 0x16},
{0x38, 0xFE},
{0x81c, 0x6},
+ {0x700, 0x80},
};
struct csiphy_settings_t csiphy_combo_mode_v5_0 = {
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
index a7cd44636d1d..be266641a105 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -46,6 +46,7 @@
#define MSM_CSIPHY_DRV_NAME "msm_csiphy"
#define CLK_LANE_OFFSET 1
#define NUM_LANES_OFFSET 4
+#define CLOCK_LANE 0x02
#define CSI_3PHASE_HW 1
#define MAX_DPHY_DATA_LN 4
@@ -683,12 +684,21 @@ static int msm_csiphy_2phase_lane_config_v50(
csiphybase + csiphy_dev->ctrl_reg->
csiphy_3ph_reg.
mipi_csiphy_2ph_lnn_ctrl15.addr + offset);
- msm_camera_io_w(csiphy_dev->ctrl_reg->
- csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_ctrl0.data,
- csiphybase + csiphy_dev->ctrl_reg->
- csiphy_3ph_reg.
- mipi_csiphy_2ph_lnn_ctrl0.addr + offset);
+ if (mask == CLOCK_LANE)
+ msm_camera_io_w(csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnck_ctrl0.data,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnck_ctrl0.addr);
+ else
+ msm_camera_io_w(csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl0.data,
+ csiphybase + csiphy_dev->ctrl_reg->
+ csiphy_3ph_reg.
+ mipi_csiphy_2ph_lnn_ctrl0.addr +
+ offset);
msm_camera_io_w(csiphy_dev->ctrl_reg->
csiphy_3ph_reg.
mipi_csiphy_2ph_lnn_cfg1.data,
diff --git a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
index 70462dcd3b12..c1a9748e8af5 100644
--- a/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
+++ b/drivers/media/platform/msm/camera_v2/sensor/csiphy/msm_csiphy.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2011-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2011-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -141,6 +141,7 @@ struct csiphy_reg_3ph_parms_t {
struct csiphy_reg_t mipi_csiphy_2ph_lnck_ctrl3;
struct csiphy_reg_t mipi_csiphy_2ph_lnn_ctrl14;
struct csiphy_reg_t mipi_csiphy_3ph_cmn_ctrl7_cphy;
+ struct csiphy_reg_t mipi_csiphy_2ph_lnck_ctrl0;
};
struct csiphy_ctrl_t {
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
index a2da663e2046..f41382b5b20c 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.c
@@ -990,11 +990,14 @@ static int sde_rotator_debug_base_release(struct inode *inode,
{
struct sde_rotator_debug_base *dbg = file->private_data;
- if (dbg && dbg->buf) {
+ if (dbg) {
+ mutex_lock(&dbg->buflock);
kfree(dbg->buf);
dbg->buf_len = 0;
dbg->buf = NULL;
+ mutex_unlock(&dbg->buflock);
}
+
return 0;
}
@@ -1026,8 +1029,10 @@ static ssize_t sde_rotator_debug_base_offset_write(struct file *file,
if (cnt > (dbg->max_offset - off))
cnt = dbg->max_offset - off;
+ mutex_lock(&dbg->buflock);
dbg->off = off;
dbg->cnt = cnt;
+ mutex_unlock(&dbg->buflock);
SDEROT_DBG("offset=%x cnt=%x\n", off, cnt);
@@ -1047,7 +1052,10 @@ static ssize_t sde_rotator_debug_base_offset_read(struct file *file,
if (*ppos)
return 0; /* the end */
+ mutex_lock(&dbg->buflock);
len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
+ mutex_unlock(&dbg->buflock);
+
if (len < 0 || len >= sizeof(buf))
return 0;
@@ -1086,6 +1094,8 @@ static ssize_t sde_rotator_debug_base_reg_write(struct file *file,
if (off >= dbg->max_offset)
return -EFAULT;
+ mutex_lock(&dbg->buflock);
+
/* Enable Clock for register access */
sde_rotator_clk_ctrl(dbg->mgr, true);
@@ -1094,6 +1104,8 @@ static ssize_t sde_rotator_debug_base_reg_write(struct file *file,
/* Disable Clock after register access */
sde_rotator_clk_ctrl(dbg->mgr, false);
+ mutex_unlock(&dbg->buflock);
+
SDEROT_DBG("addr=%zx data=%x\n", off, data);
return count;
@@ -1104,12 +1116,14 @@ static ssize_t sde_rotator_debug_base_reg_read(struct file *file,
{
struct sde_rotator_debug_base *dbg = file->private_data;
size_t len;
+ int rc = 0;
if (!dbg) {
SDEROT_ERR("invalid handle\n");
return -ENODEV;
}
+ mutex_lock(&dbg->buflock);
if (!dbg->buf) {
char dump_buf[64];
char *ptr;
@@ -1121,7 +1135,8 @@ static ssize_t sde_rotator_debug_base_reg_read(struct file *file,
if (!dbg->buf) {
SDEROT_ERR("not enough memory to hold reg dump\n");
- return -ENOMEM;
+ rc = -ENOMEM;
+ goto debug_read_error;
}
ptr = dbg->base + dbg->off;
@@ -1151,18 +1166,26 @@ static ssize_t sde_rotator_debug_base_reg_read(struct file *file,
dbg->buf_len = tot;
}
- if (*ppos >= dbg->buf_len)
- return 0; /* done reading */
+ if (*ppos >= dbg->buf_len) {
+ rc = 0; /* done reading */
+ goto debug_read_error;
+ }
len = min(count, dbg->buf_len - (size_t) *ppos);
if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
SDEROT_ERR("failed to copy to user\n");
- return -EFAULT;
+ rc = -EFAULT;
+ goto debug_read_error;
}
*ppos += len; /* increase offset */
+ mutex_unlock(&dbg->buflock);
return len;
+
+debug_read_error:
+ mutex_unlock(&dbg->buflock);
+ return rc;
}
static const struct file_operations sde_rotator_off_fops = {
@@ -1196,6 +1219,9 @@ int sde_rotator_debug_register_base(struct sde_rotator_device *rot_dev,
if (!dbg)
return -ENOMEM;
+ mutex_init(&dbg->buflock);
+ mutex_lock(&dbg->buflock);
+
if (name)
strlcpy(dbg->name, name, sizeof(dbg->name));
dbg->base = io_data->base;
@@ -1217,6 +1243,7 @@ int sde_rotator_debug_register_base(struct sde_rotator_device *rot_dev,
dbg->base += rot_dev->mdata->regdump ?
rot_dev->mdata->regdump[0].offset : 0;
}
+ mutex_unlock(&dbg->buflock);
strlcpy(dbgname + prefix_len, "off", sizeof(dbgname) - prefix_len);
ent_off = debugfs_create_file(dbgname, 0644, debugfs_root, dbg,
@@ -1234,7 +1261,9 @@ int sde_rotator_debug_register_base(struct sde_rotator_device *rot_dev,
goto reg_fail;
}
+ mutex_lock(&dbg->buflock);
dbg->mgr = rot_dev->mgr;
+ mutex_unlock(&dbg->buflock);
return 0;
reg_fail:
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
index c2c6f9775602..c6d0151d37de 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_debug.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -53,6 +53,7 @@ struct sde_rotator_debug_base {
char *buf;
size_t buf_len;
struct sde_rot_mgr *mgr;
+ struct mutex buflock;
};
#if defined(CONFIG_DEBUG_FS)
diff --git a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
index 915126f09a27..58cb160f118e 100644
--- a/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
+++ b/drivers/media/platform/msm/sde/rotator/sde_rotator_smmu.c
@@ -45,6 +45,13 @@ struct sde_smmu_domain {
unsigned long size;
};
+#ifndef CONFIG_FB_MSM_MDSS
+int mdss_smmu_request_mappings(msm_smmu_handler_t callback)
+{
+ return 0;
+}
+#endif
+
int sde_smmu_set_dma_direction(int dir)
{
struct sde_rot_data_type *mdata = sde_rot_get_mdata();
@@ -544,11 +551,18 @@ static int sde_smmu_fault_handler(struct iommu_domain *domain,
sde_smmu = (struct sde_smmu_client *)token;
- /* trigger rotator panic and dump */
- SDEROT_ERR("trigger rotator panic and dump, iova=0x%08lx\n", iova);
+ /* trigger rotator dump */
+ SDEROT_ERR("trigger rotator dump, iova=0x%08lx, flags=0x%x\n",
+ iova, flags);
+ SDEROT_ERR("SMMU device:%s", sde_smmu->dev->kobj.name);
- sde_rot_dump_panic();
+ /* generate dump, but no panic */
+ sde_rot_evtlog_tout_handler(false, __func__, "rot", "vbif_dbg_bus");
+ /*
+ * return -ENOSYS to allow smmu driver to dump out useful
+ * debug info.
+ */
return rc;
}
diff --git a/drivers/misc/hdcp.c b/drivers/misc/hdcp.c
index bd21f8cca2aa..460ffc79f566 100644
--- a/drivers/misc/hdcp.c
+++ b/drivers/misc/hdcp.c
@@ -2103,7 +2103,8 @@ static void hdcp_lib_msg_recvd(struct hdcp_lib_handle *handle)
(rc == 0) && (rsp_buf->status == 0)) {
pr_debug("Got Auth_Stream_Ready, nothing sent to rx\n");
- if (!hdcp_lib_enable_encryption(handle)) {
+ if (!handle->authenticated &&
+ !hdcp_lib_enable_encryption(handle)) {
handle->authenticated = true;
cdata.cmd = HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS;
diff --git a/drivers/misc/qseecom.c b/drivers/misc/qseecom.c
index 134995c9cd3c..8d03c36858b3 100644
--- a/drivers/misc/qseecom.c
+++ b/drivers/misc/qseecom.c
@@ -7043,7 +7043,11 @@ long qseecom_ioctl(struct file *file, unsigned cmd, unsigned long arg)
break;
}
pr_debug("SET_MEM_PARAM: qseecom addr = 0x%pK\n", data);
+ mutex_lock(&app_access_lock);
+ atomic_inc(&data->ioctl_count);
ret = qseecom_set_client_mem_param(data, argp);
+ atomic_dec(&data->ioctl_count);
+ mutex_unlock(&app_access_lock);
if (ret)
pr_err("failed Qqseecom_set_mem_param request: %d\n",
ret);
diff --git a/drivers/mmc/card/block.c b/drivers/mmc/card/block.c
index 52e3e9b5b778..60b02f28a8ff 100644
--- a/drivers/mmc/card/block.c
+++ b/drivers/mmc/card/block.c
@@ -3482,15 +3482,23 @@ static void mmc_blk_cmdq_err(struct mmc_queue *mq)
/* RED error - Fatal: requires reset */
if (mrq->cmdq_req->resp_err) {
err = mrq->cmdq_req->resp_err;
+ goto reset;
+ }
+
+ /*
+ * TIMEOUT errrors can happen because of execution error
+ * in the last command. So send cmd 13 to get device status
+ */
+ if ((mrq->cmd && (mrq->cmd->error == -ETIMEDOUT)) ||
+ (mrq->data && (mrq->data->error == -ETIMEDOUT))) {
if (mmc_host_halt(host) || mmc_host_cq_disable(host)) {
ret = get_card_status(host->card, &status, 0);
if (ret)
pr_err("%s: CMD13 failed with err %d\n",
mmc_hostname(host), ret);
}
- pr_err("%s: Response error detected with device status 0x%08x\n",
+ pr_err("%s: Timeout error detected with device status 0x%08x\n",
mmc_hostname(host), status);
- goto reset;
}
/*
diff --git a/drivers/mmc/host/cmdq_hci.c b/drivers/mmc/host/cmdq_hci.c
index 849722128ad2..96d4fbf1a823 100644
--- a/drivers/mmc/host/cmdq_hci.c
+++ b/drivers/mmc/host/cmdq_hci.c
@@ -33,8 +33,8 @@
#define DCMD_SLOT 31
#define NUM_SLOTS 32
-/* 1 sec */
-#define HALT_TIMEOUT_MS 1000
+/* 10 sec */
+#define HALT_TIMEOUT_MS 10000
static int cmdq_halt_poll(struct mmc_host *mmc, bool halt);
static int cmdq_halt(struct mmc_host *mmc, bool halt);
@@ -197,6 +197,10 @@ static void cmdq_dump_adma_mem(struct cmdq_host *cq_host)
static void cmdq_dumpregs(struct cmdq_host *cq_host)
{
struct mmc_host *mmc = cq_host->mmc;
+ int offset = 0;
+
+ if (cq_host->offset_changed)
+ offset = CQ_V5_VENDOR_CFG;
MMC_TRACE(mmc,
"%s: 0x0C=0x%08x 0x10=0x%08x 0x14=0x%08x 0x18=0x%08x 0x28=0x%08x 0x2C=0x%08x 0x30=0x%08x 0x34=0x%08x 0x54=0x%08x 0x58=0x%08x 0x5C=0x%08x 0x48=0x%08x\n",
@@ -243,7 +247,7 @@ static void cmdq_dumpregs(struct cmdq_host *cq_host)
cmdq_readl(cq_host, CQCRI),
cmdq_readl(cq_host, CQCRA));
pr_err(DRV_NAME": Vendor cfg 0x%08x\n",
- cmdq_readl(cq_host, CQ_VENDOR_CFG));
+ cmdq_readl(cq_host, CQ_VENDOR_CFG + offset));
pr_err(DRV_NAME ": ===========================================\n");
cmdq_dump_task_history(cq_host);
@@ -384,6 +388,12 @@ static int cmdq_enable(struct mmc_host *mmc)
cq_host->caps |= CMDQ_CAP_CRYPTO_SUPPORT |
CMDQ_TASK_DESC_SZ_128;
cqcfg |= CQ_ICE_ENABLE;
+ /*
+ * For SDHC v5.0 onwards, ICE 3.0 specific registers are added
+ * in CQ register space, due to which few CQ registers are
+ * shifted. Set offset_changed boolean to use updated address.
+ */
+ cq_host->offset_changed = true;
}
cmdq_writel(cq_host, cqcfg, CQCFG);
@@ -818,14 +828,18 @@ static void cmdq_finish_data(struct mmc_host *mmc, unsigned int tag)
{
struct mmc_request *mrq;
struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
+ int offset = 0;
+ if (cq_host->offset_changed)
+ offset = CQ_V5_VENDOR_CFG;
mrq = get_req_by_tag(cq_host, tag);
if (tag == cq_host->dcmd_slot)
mrq->cmd->resp[0] = cmdq_readl(cq_host, CQCRDCT);
if (mrq->cmdq_req->cmdq_req_flags & DCMD)
- cmdq_writel(cq_host, cmdq_readl(cq_host, CQ_VENDOR_CFG) |
- CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG);
+ cmdq_writel(cq_host,
+ cmdq_readl(cq_host, CQ_VENDOR_CFG + offset) |
+ CMDQ_SEND_STATUS_TRIGGER, CQ_VENDOR_CFG + offset);
cmdq_runtime_pm_put(cq_host);
if (!(cq_host->caps & CMDQ_CAP_CRYPTO_SUPPORT) &&
@@ -1008,6 +1022,9 @@ skip_cqterri:
if (status & CQIS_HAC) {
if (cq_host->ops->post_cqe_halt)
cq_host->ops->post_cqe_halt(mmc);
+ /* halt done: re-enable legacy interrupts */
+ if (cq_host->ops->clear_set_irqs)
+ cq_host->ops->clear_set_irqs(mmc, false);
/* halt is completed, wakeup waiting thread */
complete(&cq_host->halt_comp);
}
@@ -1065,6 +1082,7 @@ static int cmdq_halt(struct mmc_host *mmc, bool halt)
{
struct cmdq_host *cq_host = (struct cmdq_host *)mmc_cmdq_private(mmc);
u32 ret = 0;
+ u32 config = 0;
int retries = 3;
cmdq_runtime_pm_get(cq_host);
@@ -1074,16 +1092,31 @@ static int cmdq_halt(struct mmc_host *mmc, bool halt)
CQCTL);
ret = wait_for_completion_timeout(&cq_host->halt_comp,
msecs_to_jiffies(HALT_TIMEOUT_MS));
- if (!ret && !(cmdq_readl(cq_host, CQCTL) & HALT)) {
- retries--;
- continue;
+ if (!ret) {
+ pr_warn("%s: %s: HAC int timeout\n",
+ mmc_hostname(mmc), __func__);
+ if ((cmdq_readl(cq_host, CQCTL) & HALT)) {
+ /*
+ * Don't retry if CQE is halted but irq
+ * is not triggered in timeout period.
+ * And since we are returning error,
+ * un-halt CQE. Since irq was not fired
+ * yet, no need to set other params
+ */
+ retries = 0;
+ config = cmdq_readl(cq_host, CQCTL);
+ config &= ~HALT;
+ cmdq_writel(cq_host, config, CQCTL);
+ } else {
+ pr_warn("%s: %s: retryng halt (%d)\n",
+ mmc_hostname(mmc), __func__,
+ retries);
+ retries--;
+ continue;
+ }
} else {
MMC_TRACE(mmc, "%s: halt done , retries: %d\n",
__func__, retries);
- /* halt done: re-enable legacy interrupts */
- if (cq_host->ops->clear_set_irqs)
- cq_host->ops->clear_set_irqs(mmc,
- false);
break;
}
}
diff --git a/drivers/mmc/host/cmdq_hci.h b/drivers/mmc/host/cmdq_hci.h
index 05c924ae0935..6c10ab3859d1 100644
--- a/drivers/mmc/host/cmdq_hci.h
+++ b/drivers/mmc/host/cmdq_hci.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -143,6 +143,11 @@
#define DAT_ADDR_LO(x) ((x & 0xFFFFFFFF) << 32)
#define DAT_ADDR_HI(x) ((x & 0xFFFFFFFF) << 0)
+/*
+ * Add new macro for updated CQ vendor specific
+ * register address for SDHC v5.0 onwards.
+ */
+#define CQ_V5_VENDOR_CFG 0x900
#define CQ_VENDOR_CFG 0x100
#define CMDQ_SEND_STATUS_TRIGGER (1 << 31)
@@ -175,6 +180,7 @@ struct cmdq_host {
bool enabled;
bool halted;
bool init_done;
+ bool offset_changed;
u8 *desc_base;
diff --git a/drivers/mmc/host/sdhci-msm.c b/drivers/mmc/host/sdhci-msm.c
index 2cad8776411c..15d1eface2d4 100644
--- a/drivers/mmc/host/sdhci-msm.c
+++ b/drivers/mmc/host/sdhci-msm.c
@@ -3289,6 +3289,8 @@ static void sdhci_msm_cmdq_dump_debug_ram(struct sdhci_host *host)
/* registers offset changed starting from 4.2.0 */
int offset = minor >= SDHCI_MSM_VER_420 ? 0 : 0x48;
+ if (cq_host->offset_changed)
+ offset += CQ_V5_VENDOR_CFG;
pr_err("---- Debug RAM dump ----\n");
pr_err(DRV_NAME ": Debug RAM wrap-around: 0x%08x | Debug RAM overlap: 0x%08x\n",
cmdq_readl(cq_host, CQ_CMD_DBG_RAM_WA + offset),
diff --git a/drivers/net/ethernet/msm/msm_rmnet_mhi.c b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
index a55a26be4273..50d8e72a96c8 100644
--- a/drivers/net/ethernet/msm/msm_rmnet_mhi.c
+++ b/drivers/net/ethernet/msm/msm_rmnet_mhi.c
@@ -625,6 +625,9 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
tx_ring_full_count[rmnet_mhi_ptr->dev_index]++;
netif_stop_queue(dev);
rmnet_log(MSG_VERBOSE, "Stopping Queue\n");
+ write_unlock_irqrestore(
+ &rmnet_mhi_ptr->out_chan_full_lock,
+ flags);
goto rmnet_mhi_xmit_error_cleanup;
} else {
retry = 1;
@@ -652,7 +655,6 @@ static int rmnet_mhi_xmit(struct sk_buff *skb, struct net_device *dev)
rmnet_mhi_xmit_error_cleanup:
rmnet_log(MSG_VERBOSE, "Ring full\n");
- write_unlock_irqrestore(&rmnet_mhi_ptr->out_chan_full_lock, flags);
return NETDEV_TX_BUSY;
}
diff --git a/drivers/net/wireless/ath/ath10k/ce.c b/drivers/net/wireless/ath/ath10k/ce.c
index 7e5e25e96762..f1ead7c28823 100644
--- a/drivers/net/wireless/ath/ath10k/ce.c
+++ b/drivers/net/wireless/ath/ath10k/ce.c
@@ -1,6 +1,6 @@
/*
* Copyright (c) 2005-2011 Atheros Communications Inc.
- * Copyright (c) 2011-2013, 2017 Qualcomm Atheros, Inc.
+ * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
diff --git a/drivers/net/wireless/ath/wil6210/Makefile b/drivers/net/wireless/ath/wil6210/Makefile
index a0f76581e6eb..4874c5ba1e61 100644
--- a/drivers/net/wireless/ath/wil6210/Makefile
+++ b/drivers/net/wireless/ath/wil6210/Makefile
@@ -5,6 +5,7 @@ wil6210-y += netdev.o
wil6210-y += cfg80211.o
wil6210-y += pcie_bus.o
wil6210-y += debugfs.o
+wil6210-y += sysfs.o
wil6210-y += wmi.o
wil6210-y += interrupt.o
wil6210-y += txrx.o
diff --git a/drivers/net/wireless/ath/wil6210/pcie_bus.c b/drivers/net/wireless/ath/wil6210/pcie_bus.c
index b38515fc7ce7..d472e13fb9d9 100644
--- a/drivers/net/wireless/ath/wil6210/pcie_bus.c
+++ b/drivers/net/wireless/ath/wil6210/pcie_bus.c
@@ -303,7 +303,7 @@ static int wil_pcie_probe(struct pci_dev *pdev, const struct pci_device_id *id)
#endif /* CONFIG_PM */
wil6210_debugfs_init(wil);
-
+ wil6210_sysfs_init(wil);
return 0;
@@ -337,6 +337,7 @@ static void wil_pcie_remove(struct pci_dev *pdev)
#endif /* CONFIG_PM_SLEEP */
#endif /* CONFIG_PM */
+ wil6210_sysfs_remove(wil);
wil6210_debugfs_remove(wil);
rtnl_lock();
wil_p2p_wdev_free(wil);
diff --git a/drivers/net/wireless/ath/wil6210/sysfs.c b/drivers/net/wireless/ath/wil6210/sysfs.c
new file mode 100644
index 000000000000..0faa26ca41c9
--- /dev/null
+++ b/drivers/net/wireless/ath/wil6210/sysfs.c
@@ -0,0 +1,126 @@
+/*
+ * Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/device.h>
+#include <linux/sysfs.h>
+
+#include "wil6210.h"
+#include "wmi.h"
+
+static ssize_t
+wil_ftm_txrx_offset_sysfs_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf)
+{
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_tof_get_tx_rx_offset_event evt;
+ } __packed reply;
+ int rc;
+ ssize_t len;
+
+ if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ memset(&reply, 0, sizeof(reply));
+ rc = wmi_call(wil, WMI_TOF_GET_TX_RX_OFFSET_CMDID, NULL, 0,
+ WMI_TOF_GET_TX_RX_OFFSET_EVENTID,
+ &reply, sizeof(reply), 100);
+ if (rc < 0)
+ return rc;
+ if (reply.evt.status) {
+ wil_err(wil, "get_tof_tx_rx_offset failed, error %d\n",
+ reply.evt.status);
+ return -EIO;
+ }
+ len = snprintf(buf, PAGE_SIZE, "%u %u\n",
+ le32_to_cpu(reply.evt.tx_offset),
+ le32_to_cpu(reply.evt.rx_offset));
+ return len;
+}
+
+static ssize_t
+wil_ftm_txrx_offset_sysfs_store(struct device *dev,
+ struct device_attribute *attr,
+ const char *buf, size_t count)
+{
+ struct wil6210_priv *wil = dev_get_drvdata(dev);
+ struct wmi_tof_set_tx_rx_offset_cmd cmd;
+ struct {
+ struct wmi_cmd_hdr wmi;
+ struct wmi_tof_set_tx_rx_offset_event evt;
+ } __packed reply;
+ unsigned int tx_offset, rx_offset;
+ int rc;
+
+ if (sscanf(buf, "%u %u", &tx_offset, &rx_offset) != 2)
+ return -EINVAL;
+
+ if (!test_bit(WMI_FW_CAPABILITY_FTM, wil->fw_capabilities))
+ return -EOPNOTSUPP;
+
+ memset(&cmd, 0, sizeof(cmd));
+ cmd.tx_offset = cpu_to_le32(tx_offset);
+ cmd.rx_offset = cpu_to_le32(rx_offset);
+ memset(&reply, 0, sizeof(reply));
+ rc = wmi_call(wil, WMI_TOF_SET_TX_RX_OFFSET_CMDID, &cmd, sizeof(cmd),
+ WMI_TOF_SET_TX_RX_OFFSET_EVENTID,
+ &reply, sizeof(reply), 100);
+ if (rc < 0)
+ return rc;
+ if (reply.evt.status) {
+ wil_err(wil, "set_tof_tx_rx_offset failed, error %d\n",
+ reply.evt.status);
+ return -EIO;
+ }
+ return count;
+}
+
+static DEVICE_ATTR(ftm_txrx_offset, 0644,
+ wil_ftm_txrx_offset_sysfs_show,
+ wil_ftm_txrx_offset_sysfs_store);
+
+static struct attribute *wil6210_sysfs_entries[] = {
+ &dev_attr_ftm_txrx_offset.attr,
+ NULL
+};
+
+static struct attribute_group wil6210_attribute_group = {
+ .name = "wil6210",
+ .attrs = wil6210_sysfs_entries,
+};
+
+int wil6210_sysfs_init(struct wil6210_priv *wil)
+{
+ struct device *dev = wil_to_dev(wil);
+ int err;
+
+ err = sysfs_create_group(&dev->kobj, &wil6210_attribute_group);
+ if (err) {
+ wil_err(wil, "failed to create sysfs group: %d\n", err);
+ return err;
+ }
+
+ return 0;
+}
+
+void wil6210_sysfs_remove(struct wil6210_priv *wil)
+{
+ struct device *dev = wil_to_dev(wil);
+
+ sysfs_remove_group(&dev->kobj, &wil6210_attribute_group);
+}
diff --git a/drivers/net/wireless/ath/wil6210/wil6210.h b/drivers/net/wireless/ath/wil6210/wil6210.h
index 96062be9a471..fcfbcf7fbd7d 100644
--- a/drivers/net/wireless/ath/wil6210/wil6210.h
+++ b/drivers/net/wireless/ath/wil6210/wil6210.h
@@ -893,6 +893,8 @@ int wil_cfg80211_mgmt_tx(struct wiphy *wiphy, struct wireless_dev *wdev,
int wil6210_debugfs_init(struct wil6210_priv *wil);
void wil6210_debugfs_remove(struct wil6210_priv *wil);
+int wil6210_sysfs_init(struct wil6210_priv *wil);
+void wil6210_sysfs_remove(struct wil6210_priv *wil);
int wil_cid_fill_sinfo(struct wil6210_priv *wil, int cid,
struct station_info *sinfo);
diff --git a/drivers/pci/host/pci-msm.c b/drivers/pci/host/pci-msm.c
index 7c8b5e3e57a1..cd105a0bf5d1 100644
--- a/drivers/pci/host/pci-msm.c
+++ b/drivers/pci/host/pci-msm.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -2414,8 +2414,16 @@ static void msm_pcie_sel_debug_testcase(struct msm_pcie_dev_t *dev,
dev->res[base_sel - 1].base,
wr_offset, wr_mask, wr_value);
- msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
- wr_offset, wr_mask, wr_value);
+ base_sel_size = resource_size(dev->res[base_sel - 1].resource);
+
+ if (wr_offset > base_sel_size - 4 ||
+ msm_pcie_check_align(dev, wr_offset))
+ PCIE_DBG_FS(dev,
+ "PCIe: RC%d: Invalid wr_offset: 0x%x. wr_offset should be no more than 0x%x\n",
+ dev->rc_idx, wr_offset, base_sel_size - 4);
+ else
+ msm_pcie_write_reg_field(dev->res[base_sel - 1].base,
+ wr_offset, wr_mask, wr_value);
break;
case 13: /* dump all registers of base_sel */
diff --git a/drivers/platform/msm/Kconfig b/drivers/platform/msm/Kconfig
index 024c66ac8e57..66bdc593f811 100644
--- a/drivers/platform/msm/Kconfig
+++ b/drivers/platform/msm/Kconfig
@@ -217,4 +217,11 @@ config USB_BAM
Enabling this option adds USB BAM Driver.
USB BAM driver was added to supports SPS Peripheral-to-Peripheral
transfers between the USB and other peripheral.
+
+config MSM_EXT_DISPLAY
+ bool "MSM External Display Driver"
+ help
+ Enabling this option adds MSM External Display Driver.
+ External Display driver was added to support the communication
+ between external display driver and its couterparts.
endmenu
diff --git a/drivers/platform/msm/Makefile b/drivers/platform/msm/Makefile
index d5e87c209c21..d985aa81a3bb 100644
--- a/drivers/platform/msm/Makefile
+++ b/drivers/platform/msm/Makefile
@@ -16,3 +16,4 @@ obj-$(CONFIG_SEEMP_CORE) += seemp_core/
obj-$(CONFIG_SSM) += ssm.o
obj-$(CONFIG_USB_BAM) += usb_bam.o
obj-$(CONFIG_MSM_MHI_DEV) += mhi_dev/
+obj-$(CONFIG_MSM_EXT_DISPLAY) += msm_ext_display.o
diff --git a/drivers/platform/msm/gsi/gsi.c b/drivers/platform/msm/gsi/gsi.c
index 188388bc97a0..f48182cc04df 100644
--- a/drivers/platform/msm/gsi/gsi.c
+++ b/drivers/platform/msm/gsi/gsi.c
@@ -22,7 +22,7 @@
#include "gsi_reg.h"
#define GSI_CMD_TIMEOUT (5*HZ)
-#define GSI_STOP_CMD_TIMEOUT_MS 1
+#define GSI_STOP_CMD_TIMEOUT_MS 10
#define GSI_MAX_CH_LOW_WEIGHT 15
#define GSI_MHI_ER_START 10
#define GSI_MHI_ER_END 16
@@ -264,6 +264,11 @@ static void gsi_handle_glob_err(uint32_t err)
}
}
+static void gsi_handle_gp_int1(void)
+{
+ complete(&gsi_ctx->gen_ee_cmd_compl);
+}
+
static void gsi_handle_glob_ee(int ee)
{
uint32_t val;
@@ -288,8 +293,7 @@ static void gsi_handle_glob_ee(int ee)
}
if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT1_BMSK) {
- notify.evt_id = GSI_PER_EVT_GLOB_GP1;
- gsi_ctx->per.notify_cb(&notify);
+ gsi_handle_gp_int1();
}
if (val & GSI_EE_n_CNTXT_GLOB_IRQ_EN_GP_INT2_BMSK) {
@@ -2745,6 +2749,67 @@ void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
}
EXPORT_SYMBOL(gsi_get_inst_ram_offset_and_size);
+int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code)
+{
+ enum gsi_generic_ee_cmd_opcode op = GSI_GEN_EE_CMD_HALT_CHANNEL;
+ uint32_t val;
+ int res;
+
+ if (!gsi_ctx) {
+ pr_err("%s:%d gsi context not allocated\n", __func__, __LINE__);
+ return -GSI_STATUS_NODEV;
+ }
+
+ if (chan_idx >= gsi_ctx->max_ch || !code) {
+ GSIERR("bad params chan_idx=%d\n", chan_idx);
+ return -GSI_STATUS_INVALID_PARAMS;
+ }
+
+ mutex_lock(&gsi_ctx->mlock);
+ reinit_completion(&gsi_ctx->gen_ee_cmd_compl);
+
+ /* invalidate the response */
+ gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
+ GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+ gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code = 0;
+ gsi_writel(gsi_ctx->scratch.word0.val, gsi_ctx->base +
+ GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+
+ gsi_ctx->gen_ee_cmd_dbg.halt_channel++;
+ val = (((op << GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT) &
+ GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK) |
+ ((chan_idx << GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT) &
+ GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK) |
+ ((ee << GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT) &
+ GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK));
+ gsi_writel(val, gsi_ctx->base +
+ GSI_EE_n_GSI_EE_GENERIC_CMD_OFFS(gsi_ctx->per.ee));
+
+ res = wait_for_completion_timeout(&gsi_ctx->gen_ee_cmd_compl,
+ msecs_to_jiffies(GSI_CMD_TIMEOUT));
+ if (res == 0) {
+ GSIERR("chan_idx=%u ee=%u timed out\n", chan_idx, ee);
+ res = -GSI_STATUS_TIMED_OUT;
+ goto free_lock;
+ }
+
+ gsi_ctx->scratch.word0.val = gsi_readl(gsi_ctx->base +
+ GSI_EE_n_CNTXT_SCRATCH_0_OFFS(gsi_ctx->per.ee));
+ if (gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code == 0) {
+ GSIERR("No response received\n");
+ res = -GSI_STATUS_ERROR;
+ goto free_lock;
+ }
+
+ res = GSI_STATUS_SUCCESS;
+ *code = gsi_ctx->scratch.word0.s.generic_ee_cmd_return_code;
+free_lock:
+ mutex_unlock(&gsi_ctx->mlock);
+
+ return res;
+}
+EXPORT_SYMBOL(gsi_halt_channel_ee);
+
static int msm_gsi_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -2757,6 +2822,7 @@ static int msm_gsi_probe(struct platform_device *pdev)
}
gsi_ctx->dev = dev;
+ init_completion(&gsi_ctx->gen_ee_cmd_compl);
gsi_debugfs_init();
return 0;
diff --git a/drivers/platform/msm/gsi/gsi.h b/drivers/platform/msm/gsi/gsi.h
index 750ae2b329d3..d0eb162c49cf 100644
--- a/drivers/platform/msm/gsi/gsi.h
+++ b/drivers/platform/msm/gsi/gsi.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -115,9 +115,12 @@ struct gsi_evt_ctx {
struct gsi_ee_scratch {
union __packed {
struct {
- uint32_t resvd1:15;
+ uint32_t inter_ee_cmd_return_code:3;
+ uint32_t resvd1:2;
+ uint32_t generic_ee_cmd_return_code:3;
+ uint32_t resvd2:7;
uint32_t max_usb_pkt_size:1;
- uint32_t resvd2:8;
+ uint32_t resvd3:8;
uint32_t mhi_base_chan_idx:8;
} s;
uint32_t val;
@@ -135,6 +138,10 @@ struct ch_debug_stats {
unsigned long cmd_completed;
};
+struct gsi_generic_ee_cmd_debug_stats {
+ unsigned long halt_channel;
+};
+
struct gsi_ctx {
void __iomem *base;
struct device *dev;
@@ -143,6 +150,7 @@ struct gsi_ctx {
struct gsi_chan_ctx chan[GSI_CHAN_MAX];
struct ch_debug_stats ch_dbg[GSI_CHAN_MAX];
struct gsi_evt_ctx evtr[GSI_EVT_RING_MAX];
+ struct gsi_generic_ee_cmd_debug_stats gen_ee_cmd_dbg;
struct mutex mlock;
spinlock_t slock;
unsigned long evt_bmap;
@@ -154,6 +162,7 @@ struct gsi_ctx {
struct workqueue_struct *dp_stat_wq;
u32 max_ch;
u32 max_ev;
+ struct completion gen_ee_cmd_compl;
};
enum gsi_re_type {
@@ -227,6 +236,18 @@ enum gsi_evt_ch_cmd_opcode {
GSI_EVT_DE_ALLOC = 0xa,
};
+enum gsi_generic_ee_cmd_opcode {
+ GSI_GEN_EE_CMD_HALT_CHANNEL = 0x1,
+};
+
+enum gsi_generic_ee_cmd_return_code {
+ GSI_GEN_EE_CMD_RETURN_CODE_SUCCESS = 0x1,
+ GSI_GEN_EE_CMD_RETURN_CODE_CHANNEL_NOT_RUNNING = 0x2,
+ GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_DIRECTION = 0x3,
+ GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_TYPE = 0x4,
+ GSI_GEN_EE_CMD_RETURN_CODE_INCORRECT_CHANNEL_INDEX = 0x5,
+};
+
extern struct gsi_ctx *gsi_ctx;
void gsi_debugfs_init(void);
uint16_t gsi_find_idx_from_addr(struct gsi_ring_ctx *ctx, uint64_t addr);
diff --git a/drivers/platform/msm/gsi/gsi_reg.h b/drivers/platform/msm/gsi/gsi_reg.h
index 1acaf74a0968..d0462aad72d2 100644
--- a/drivers/platform/msm/gsi/gsi_reg.h
+++ b/drivers/platform/msm/gsi/gsi_reg.h
@@ -1365,8 +1365,12 @@
(GSI_GSI_REG_BASE_OFFS + 0x0001f018 + 0x4000 * (n))
#define GSI_EE_n_GSI_EE_GENERIC_CMD_RMSK 0xffffffff
#define GSI_EE_n_GSI_EE_GENERIC_CMD_MAXn 3
-#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0xffffffff
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_BMSK 0x1f
#define GSI_EE_n_GSI_EE_GENERIC_CMD_OPCODE_SHFT 0x0
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_BMSK 0x3e0
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_VIRT_CHAN_IDX_SHFT 0x5
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_BMSK 0x3c00
+#define GSI_EE_n_GSI_EE_GENERIC_CMD_EE_SHFT 0xa
/* v1.0 */
#define GSI_V1_0_EE_n_GSI_HW_PARAM_OFFS(n) \
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa.c b/drivers/platform/msm/ipa/ipa_v3/ipa.c
index 8ca42c7c2846..aa681d3eacaa 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa.c
@@ -1878,6 +1878,45 @@ static void ipa3_q6_avoid_holb(void)
}
}
+static void ipa3_halt_q6_cons_gsi_channels(void)
+{
+ int ep_idx;
+ int client_idx;
+ struct ipa_gsi_ep_config *gsi_ep_cfg;
+ int ret;
+ int code = 0;
+
+ for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++) {
+ if (IPA_CLIENT_IS_Q6_CONS(client_idx)) {
+ ep_idx = ipa3_get_ep_mapping(client_idx);
+ if (ep_idx == -1)
+ continue;
+
+ gsi_ep_cfg = ipa3_get_gsi_ep_info(ep_idx);
+ if (!gsi_ep_cfg) {
+ IPAERR("failed to get GSI config\n");
+ ipa_assert();
+ return;
+ }
+
+ ret = gsi_halt_channel_ee(
+ gsi_ep_cfg->ipa_gsi_chan_num, gsi_ep_cfg->ee,
+ &code);
+ if (ret == GSI_STATUS_SUCCESS)
+ IPADBG("halted gsi ch %d ee %d with code %d\n",
+ gsi_ep_cfg->ipa_gsi_chan_num,
+ gsi_ep_cfg->ee,
+ code);
+ else
+ IPAERR("failed to halt ch %d ee %d code %d\n",
+ gsi_ep_cfg->ipa_gsi_chan_num,
+ gsi_ep_cfg->ee,
+ code);
+ }
+ }
+}
+
+
static int ipa3_q6_clean_q6_flt_tbls(enum ipa_ip_type ip,
enum ipa_rule_type rlt)
{
@@ -2312,6 +2351,7 @@ void ipa3_q6_post_shutdown_cleanup(void)
/* Handle the issue where SUSPEND was removed for some reason */
ipa3_q6_avoid_holb();
+ ipa3_halt_q6_cons_gsi_channels();
for (client_idx = 0; client_idx < IPA_CLIENT_MAX; client_idx++)
if (IPA_CLIENT_IS_Q6_PROD(client_idx)) {
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
index e3dfe8927682..81eae05d7ed9 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_client.c
@@ -1777,7 +1777,8 @@ dealloc_chan_fail:
int ipa3_xdci_suspend(u32 ul_clnt_hdl, u32 dl_clnt_hdl,
bool should_force_clear, u32 qmi_req_id, bool is_dpl)
{
- struct ipa3_ep_context *ul_ep, *dl_ep;
+ struct ipa3_ep_context *ul_ep = NULL;
+ struct ipa3_ep_context *dl_ep;
int result = -EFAULT;
u32 source_pipe_bitmask = 0;
bool dl_data_pending = true;
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
index 483b2ca118fa..06f65906841d 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dma.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -853,6 +853,10 @@ void ipa3_dma_async_memcpy_notify_cb(void *priv
mem_info = (struct ipa_mem_buffer *)data;
ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_MEMCPY_DMA_ASYNC_CONS);
+ if (ep_idx < 0) {
+ IPADMA_ERR("IPA Client mapping failed\n");
+ return;
+ }
sys = ipa3_ctx->ep[ep_idx].sys;
spin_lock_irqsave(&ipa3_dma_ctx->async_lock, flags);
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
index 5c678f1cfc28..8ec0974711a4 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_dp.c
@@ -337,7 +337,7 @@ int ipa3_send_one(struct ipa3_sys_context *sys, struct ipa3_desc *desc,
int result;
u16 sps_flags = SPS_IOVEC_FLAG_EOT;
dma_addr_t dma_address;
- u16 len;
+ u16 len = 0;
u32 mem_flag = GFP_ATOMIC;
if (unlikely(!in_atomic))
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
index c3a12dd0b17c..362294b0f695 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_flt.c
@@ -14,7 +14,6 @@
#include "ipahal/ipahal.h"
#include "ipahal/ipahal_fltrt.h"
-#define IPA_FLT_TABLE_INDEX_NOT_FOUND (-1)
#define IPA_FLT_STATUS_OF_ADD_FAILED (-1)
#define IPA_FLT_STATUS_OF_DEL_FAILED (-1)
#define IPA_FLT_STATUS_OF_MDFY_FAILED (-1)
@@ -1001,7 +1000,7 @@ error:
static int __ipa_add_flt_get_ep_idx(enum ipa_client_type ep, int *ipa_ep_idx)
{
*ipa_ep_idx = ipa3_get_ep_mapping(ep);
- if (*ipa_ep_idx == IPA_FLT_TABLE_INDEX_NOT_FOUND) {
+ if (*ipa_ep_idx < 0) {
IPAERR("ep not valid ep=%d\n", ep);
return -EINVAL;
}
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
index 4ef1a96c8450..9e2ffe70170c 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_mhi.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, 2016 The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -153,10 +153,16 @@ int ipa3_mhi_reset_channel_internal(enum ipa_client_type client)
int ipa3_mhi_start_channel_internal(enum ipa_client_type client)
{
int res;
+ int ipa_ep_idx;
IPA_MHI_FUNC_ENTRY();
- res = ipa3_enable_data_path(ipa3_get_ep_mapping(client));
+ ipa_ep_idx = ipa3_get_ep_mapping(client);
+ if (ipa_ep_idx < 0) {
+ IPA_MHI_ERR("Invalid client %d\n", client);
+ return -EINVAL;
+ }
+ res = ipa3_enable_data_path(ipa_ep_idx);
if (res) {
IPA_MHI_ERR("ipa3_enable_data_path failed %d\n", res);
return res;
@@ -521,6 +527,10 @@ int ipa3_mhi_resume_channels_internal(enum ipa_client_type client,
IPA_MHI_FUNC_ENTRY();
ipa_ep_idx = ipa3_get_ep_mapping(client);
+ if (ipa_ep_idx < 0) {
+ IPA_MHI_ERR("Invalid client %d\n", client);
+ return -EINVAL;
+ }
ep = &ipa3_ctx->ep[ipa_ep_idx];
if (brstmode_enabled && !LPTransitionRejected) {
@@ -557,11 +567,14 @@ int ipa3_mhi_query_ch_info(enum ipa_client_type client,
IPA_MHI_FUNC_ENTRY();
ipa_ep_idx = ipa3_get_ep_mapping(client);
-
+ if (ipa_ep_idx < 0) {
+ IPA_MHI_ERR("Invalid client %d\n", client);
+ return -EINVAL;
+ }
ep = &ipa3_ctx->ep[ipa_ep_idx];
res = gsi_query_channel_info(ep->gsi_chan_hdl, ch_info);
if (res) {
- IPAERR("gsi_query_channel_info failed\n");
+ IPA_MHI_ERR("gsi_query_channel_info failed\n");
return res;
}
@@ -596,7 +609,10 @@ int ipa3_mhi_destroy_channel(enum ipa_client_type client)
struct ipa3_ep_context *ep;
ipa_ep_idx = ipa3_get_ep_mapping(client);
-
+ if (ipa_ep_idx < 0) {
+ IPA_MHI_ERR("Invalid client %d\n", client);
+ return -EINVAL;
+ }
ep = &ipa3_ctx->ep[ipa_ep_idx];
IPA_MHI_DBG("reset event ring (hdl: %lu, ep: %d)\n",
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
index 21ce28204069..c1d1d9659850 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_uc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -498,7 +498,7 @@ static int ipa3_uc_send_cmd_64b_param(u32 cmd_lo, u32 cmd_hi, u32 opcode,
{
int index;
union IpaHwCpuCmdCompletedResponseData_t uc_rsp;
- unsigned long flags;
+ unsigned long flags = 0;
int retries = 0;
send_cmd_lock:
@@ -775,7 +775,7 @@ int ipa3_uc_send_cmd(u32 cmd, u32 opcode, u32 expected_status,
void ipa3_uc_register_handlers(enum ipa3_hw_features feature,
struct ipa3_uc_hdlrs *hdlrs)
{
- unsigned long flags;
+ unsigned long flags = 0;
if (0 > feature || IPA_HW_FEATURE_MAX <= feature) {
IPAERR("Feature %u is invalid, not registering hdlrs\n",
diff --git a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
index 9f38af1b520b..2f28ba673d5a 100644
--- a/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
+++ b/drivers/platform/msm/ipa/ipa_v3/ipa_utils.c
@@ -929,7 +929,7 @@ int ipa3_get_ep_mapping(enum ipa_client_type client)
if (client >= IPA_CLIENT_MAX || client < 0) {
IPAERR("Bad client number! client =%d\n", client);
- return -EINVAL;
+ return IPA_EP_NOT_ALLOCATED;
}
ipa_ep_idx = ipa3_ep_mapping[ipa3_get_hw_type_index()][client].pipe_num;
@@ -3446,6 +3446,11 @@ void ipa3_suspend_apps_pipes(bool suspend)
cfg.ipa_ep_suspend = suspend;
ipa_ep_idx = ipa3_get_ep_mapping(IPA_CLIENT_APPS_LAN_CONS);
+ if (ipa_ep_idx < 0) {
+ IPAERR("IPA client mapping failed\n");
+ ipa_assert();
+ return;
+ }
ep = &ipa3_ctx->ep[ipa_ep_idx];
if (ep->valid) {
IPADBG("%s pipe %d\n", suspend ? "suspend" : "unsuspend",
diff --git a/drivers/platform/msm/mhi/mhi.h b/drivers/platform/msm/mhi/mhi.h
index c6fa39d6e0e2..3d40d114437a 100644
--- a/drivers/platform/msm/mhi/mhi.h
+++ b/drivers/platform/msm/mhi/mhi.h
@@ -20,6 +20,7 @@
#include <linux/completion.h>
#include <linux/atomic.h>
#include <linux/spinlock.h>
+#include <linux/interrupt.h>
#include <linux/sched.h>
#include <linux/cdev.h>
#include <linux/msm_pcie.h>
@@ -29,6 +30,7 @@
#include <linux/dma-mapping.h>
extern struct mhi_pcie_devices mhi_devices;
+struct mhi_device_ctxt;
enum MHI_DEBUG_LEVEL {
MHI_MSG_RAW = 0x1,
@@ -125,6 +127,31 @@ enum MHI_STATE {
MHI_STATE_reserved = 0x80000000
};
+enum MHI_BRSTMODE {
+ /* BRST Mode Enable for HW Channels, SW Channel Disabled */
+ MHI_BRSTMODE_DEFAULT = 0x0,
+ MHI_BRSTMODE_RESERVED = 0x1,
+ MHI_BRSTMODE_DISABLE = 0x2,
+ MHI_BRSTMODE_ENABLE = 0x3
+};
+
+enum MHI_PM_STATE {
+ MHI_PM_DISABLE = 0x0, /* MHI is not enabled */
+ MHI_PM_POR = 0x1, /* Power On Reset State */
+ MHI_PM_M0 = 0x2,
+ MHI_PM_M1 = 0x4,
+ MHI_PM_M1_M2_TRANSITION = 0x8, /* Register access not allowed */
+ MHI_PM_M2 = 0x10,
+ MHI_PM_M3_ENTER = 0x20,
+ MHI_PM_M3 = 0x40,
+ MHI_PM_M3_EXIT = 0x80,
+};
+
+#define MHI_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | MHI_PM_M1))
+#define MHI_WAKE_DB_ACCESS_VALID(pm_state) (pm_state & (MHI_PM_M0 | \
+ MHI_PM_M1 | MHI_PM_M2))
+#define MHI_REG_ACCESS_VALID(pm_state) ((pm_state > MHI_PM_DISABLE) && \
+ (pm_state < MHI_PM_M3_EXIT))
struct __packed mhi_event_ctxt {
u32 mhi_intmodt;
u32 mhi_event_er_type;
@@ -136,8 +163,11 @@ struct __packed mhi_event_ctxt {
};
struct __packed mhi_chan_ctxt {
- enum MHI_CHAN_STATE mhi_chan_state;
- enum MHI_CHAN_DIR mhi_chan_type;
+ u32 chstate : 8;
+ u32 brstmode : 2;
+ u32 pollcfg : 6;
+ u32 reserved : 16;
+ u32 chtype;
u32 mhi_event_ring_index;
u64 mhi_trb_ring_base_addr;
u64 mhi_trb_ring_len;
@@ -172,11 +202,11 @@ enum MHI_PKT_TYPE {
MHI_PKT_TYPE_RESET_CHAN_CMD = 0x10,
MHI_PKT_TYPE_STOP_CHAN_CMD = 0x11,
MHI_PKT_TYPE_START_CHAN_CMD = 0x12,
- MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD = 0x1F,
MHI_PKT_TYPE_STATE_CHANGE_EVENT = 0x20,
MHI_PKT_TYPE_CMD_COMPLETION_EVENT = 0x21,
MHI_PKT_TYPE_TX_EVENT = 0x22,
MHI_PKT_TYPE_EE_EVENT = 0x40,
+ MHI_PKT_TYPE_STALE_EVENT, /* Internal event */
MHI_PKT_TYPE_SYS_ERR_EVENT = 0xFF,
};
@@ -261,6 +291,17 @@ enum MHI_EVENT_CCS {
MHI_EVENT_CC_BAD_TRE = 0x11,
};
+struct db_mode {
+ /* if set do not reset DB_Mode during M0 resume */
+ u32 preserve_db_state : 1;
+ u32 db_mode : 1;
+ enum MHI_BRSTMODE brstmode;
+ void (*process_db)(struct mhi_device_ctxt *mhi_dev_ctxt,
+ void __iomem *io_addr,
+ uintptr_t chan,
+ u32 val);
+};
+
struct mhi_ring {
void *base;
void *wp;
@@ -270,6 +311,11 @@ struct mhi_ring {
uintptr_t el_size;
u32 overwrite_en;
enum MHI_CHAN_DIR dir;
+ enum MHI_CHAN_STATE ch_state;
+ struct db_mode db_mode;
+ u32 msi_disable_cntr;
+ u32 msi_enable_cntr;
+ spinlock_t ring_lock;
};
enum MHI_CMD_STATUS {
@@ -327,12 +373,19 @@ struct mhi_chan_info {
u32 flags;
};
+struct mhi_chan_cfg {
+ enum MHI_COMMAND current_cmd;
+ struct mutex chan_lock;
+ spinlock_t event_lock; /* completion event lock */
+ struct completion cmd_complete;
+ struct mhi_cmd_complete_event_pkt cmd_event_pkt;
+ union mhi_cmd_pkt cmd_pkt;
+};
+
struct mhi_client_handle {
struct mhi_chan_info chan_info;
struct mhi_device_ctxt *mhi_dev_ctxt;
struct mhi_client_info_t client_info;
- struct completion chan_reset_complete;
- struct completion chan_open_complete;
void *user_data;
struct mhi_result result;
u32 device_index;
@@ -344,12 +397,6 @@ struct mhi_client_handle {
int event_ring_index;
};
-enum MHI_EVENT_POLLING {
- MHI_EVENT_POLLING_DISABLED = 0x0,
- MHI_EVENT_POLLING_ENABLED = 0x1,
- MHI_EVENT_POLLING_reserved = 0x80000000
-};
-
enum MHI_TYPE_EVENT_RING {
MHI_ER_DATA_TYPE = 0x1,
MHI_ER_CTRL_TYPE = 0x2,
@@ -375,46 +422,27 @@ struct mhi_buf_info {
struct mhi_counters {
u32 m0_m1;
- u32 m1_m0;
u32 m1_m2;
u32 m2_m0;
u32 m0_m3;
- u32 m3_m0;
u32 m1_m3;
- u32 mhi_reset_cntr;
- u32 mhi_ready_cntr;
- u32 m3_event_timeouts;
- u32 m0_event_timeouts;
- u32 m2_event_timeouts;
- u32 msi_disable_cntr;
- u32 msi_enable_cntr;
- u32 nr_irq_migrations;
- u32 *msi_counter;
- u32 *ev_counter;
- atomic_t outbound_acks;
+ u32 m3_m0;
u32 chan_pkts_xferd[MHI_MAX_CHANNELS];
u32 bb_used[MHI_MAX_CHANNELS];
+ atomic_t device_wake;
+ atomic_t outbound_acks;
+ atomic_t events_pending;
+ u32 *msi_counter;
+ u32 mhi_reset_cntr;
};
struct mhi_flags {
u32 mhi_initialized;
- u32 pending_M3;
- u32 pending_M0;
u32 link_up;
- u32 kill_threads;
- atomic_t data_pending;
- atomic_t events_pending;
- atomic_t pending_resume;
- atomic_t pending_ssr;
- atomic_t pending_powerup;
- atomic_t m2_transition;
int stop_threads;
- atomic_t device_wake;
- u32 ssr;
+ u32 kill_threads;
u32 ev_thread_stopped;
u32 st_thread_stopped;
- u32 uldl_enabled;
- u32 db_mode[MHI_MAX_CHANNELS];
};
struct mhi_wait_queues {
@@ -458,44 +486,35 @@ struct mhi_dev_space {
};
struct mhi_device_ctxt {
- enum MHI_STATE mhi_state;
+ enum MHI_PM_STATE mhi_pm_state; /* Host driver state */
+ enum MHI_STATE mhi_state; /* protocol state */
enum MHI_EXEC_ENV dev_exec_env;
struct mhi_dev_space dev_space;
struct mhi_pcie_dev_info *dev_info;
struct pcie_core_info *dev_props;
struct mhi_ring chan_bb_list[MHI_MAX_CHANNELS];
-
struct mhi_ring mhi_local_chan_ctxt[MHI_MAX_CHANNELS];
struct mhi_ring *mhi_local_event_ctxt;
struct mhi_ring mhi_local_cmd_ctxt[NR_OF_CMD_RINGS];
+ struct mhi_chan_cfg mhi_chan_cfg[MHI_MAX_CHANNELS];
+
- struct mutex *mhi_chan_mutex;
- struct mutex mhi_link_state;
- spinlock_t *mhi_ev_spinlock_list;
- struct mutex *mhi_cmd_mutex_list;
struct mhi_client_handle *client_handle_list[MHI_MAX_CHANNELS];
struct mhi_event_ring_cfg *ev_ring_props;
struct task_struct *event_thread_handle;
struct task_struct *st_thread_handle;
+ struct tasklet_struct ev_task; /* Process control Events */
+ struct work_struct process_m1_worker;
struct mhi_wait_queues mhi_ev_wq;
struct dev_mmio_info mmio_info;
- u32 mhi_chan_db_order[MHI_MAX_CHANNELS];
- u32 mhi_ev_db_order[MHI_MAX_CHANNELS];
- spinlock_t *db_write_lock;
-
struct mhi_state_work_queue state_change_work_item_list;
- enum MHI_CMD_STATUS mhi_chan_pend_cmd_ack[MHI_MAX_CHANNELS];
- u32 cmd_ring_order;
struct mhi_counters counters;
struct mhi_flags flags;
- u32 device_wake_asserted;
-
- rwlock_t xfer_lock;
struct hrtimer m1_timer;
ktime_t m1_timeout;
@@ -508,11 +527,12 @@ struct mhi_device_ctxt {
unsigned long esoc_notif;
enum STATE_TRANSITION base_state;
- atomic_t outbound_acks;
+
+ rwlock_t pm_xfer_lock; /* lock to control PM State */
+ spinlock_t dev_wake_lock; /* lock to set wake bit */
struct mutex pm_lock;
struct wakeup_source w_lock;
- int enable_lpm;
char *chan_info;
struct dentry *mhi_parent_folder;
};
@@ -577,7 +597,9 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
enum MHI_CHAN_DIR chan_type,
u32 event_ring,
struct mhi_ring *ring,
- enum MHI_CHAN_STATE chan_state);
+ enum MHI_CHAN_STATE chan_state,
+ bool preserve_db_state,
+ enum MHI_BRSTMODE brstmode);
int mhi_populate_event_cfg(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_get_event_ring_for_channel(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 chan);
@@ -622,8 +644,9 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_CB_REASON reason);
void mhi_notify_client(struct mhi_client_handle *client_handle,
enum MHI_CB_REASON reason);
-int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
-int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
+void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt);
+void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt,
+ bool force_set);
int mhi_reg_notifiers(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_cpu_notifier_cb(struct notifier_block *nfb, unsigned long action,
void *hcpu);
@@ -635,6 +658,14 @@ int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt);
int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt,
int index);
int start_chan_sync(struct mhi_client_handle *client_handle);
+void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
+ void __iomem *io_addr,
+ uintptr_t chan,
+ u32 val);
+void mhi_process_db_brstmode_disable(struct mhi_device_ctxt *mhi_dev_ctxt,
+ void __iomem *io_addr,
+ uintptr_t chan,
+ u32 val);
void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt, void __iomem *io_addr,
uintptr_t io_offset, u32 val);
void mhi_reg_write_field(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -651,12 +682,19 @@ int mhi_runtime_suspend(struct device *dev);
int get_chan_props(struct mhi_device_ctxt *mhi_dev_ctxt, int chan,
struct mhi_chan_info *chan_info);
int mhi_runtime_resume(struct device *dev);
-int mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt);
+int mhi_runtime_idle(struct device *dev);
int init_ev_rings(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_TYPE_EVENT_RING type);
void mhi_reset_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
int index);
void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt);
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt);
+enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt);
+void process_m1_transition(struct work_struct *work);
+int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev);
+void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
+ enum MHI_STATE new_state);
+const char *state_transition_str(enum STATE_TRANSITION state);
+void mhi_ctrl_ev_task(unsigned long data);
#endif
diff --git a/drivers/platform/msm/mhi/mhi_bhi.c b/drivers/platform/msm/mhi/mhi_bhi.c
index 4b63e880a44f..113791a62c38 100644
--- a/drivers/platform/msm/mhi/mhi_bhi.c
+++ b/drivers/platform/msm/mhi/mhi_bhi.c
@@ -41,6 +41,9 @@ static ssize_t bhi_write(struct file *file,
size_t amount_copied = 0;
uintptr_t align_len = 0x1000;
u32 tx_db_val = 0;
+ rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
+ const long bhi_timeout_ms = 1000;
+ long timeout;
if (buf == NULL || 0 == count)
return -EIO;
@@ -48,8 +51,12 @@ static ssize_t bhi_write(struct file *file,
if (count > BHI_MAX_IMAGE_SIZE)
return -ENOMEM;
- wait_event_interruptible(*mhi_dev_ctxt->mhi_ev_wq.bhi_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_BHI);
+ timeout = wait_event_interruptible_timeout(
+ *mhi_dev_ctxt->mhi_ev_wq.bhi_event,
+ mhi_dev_ctxt->mhi_state == MHI_STATE_BHI,
+ msecs_to_jiffies(bhi_timeout_ms));
+ if (timeout <= 0 && mhi_dev_ctxt->mhi_state != MHI_STATE_BHI)
+ return -EIO;
mhi_log(MHI_MSG_INFO, "Entered. User Image size 0x%zx\n", count);
@@ -95,6 +102,11 @@ static ssize_t bhi_write(struct file *file,
bhi_ctxt->image_size = count;
/* Write the image size */
+ read_lock_bh(pm_xfer_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_bh(pm_xfer_lock);
+ goto bhi_copy_error;
+ }
pcie_word_val = HIGH_WORD(bhi_ctxt->phy_image_loc);
mhi_reg_write_field(mhi_dev_ctxt, bhi_ctxt->bhi_base,
BHI_IMGADDR_HIGH,
@@ -119,10 +131,15 @@ static ssize_t bhi_write(struct file *file,
BHI_IMGTXDB, 0xFFFFFFFF, 0, ++pcie_word_val);
mhi_reg_write(mhi_dev_ctxt, bhi_ctxt->bhi_base, BHI_INTVEC, 0);
-
+ read_unlock_bh(pm_xfer_lock);
for (i = 0; i < BHI_POLL_NR_RETRIES; ++i) {
u32 err = 0, errdbg1 = 0, errdbg2 = 0, errdbg3 = 0;
+ read_lock_bh(pm_xfer_lock);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_bh(pm_xfer_lock);
+ goto bhi_copy_error;
+ }
err = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRCODE);
errdbg1 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG1);
errdbg2 = mhi_reg_read(bhi_ctxt->bhi_base, BHI_ERRDBG2);
@@ -131,6 +148,7 @@ static ssize_t bhi_write(struct file *file,
BHI_STATUS,
BHI_STATUS_MASK,
BHI_STATUS_SHIFT);
+ read_unlock_bh(pm_xfer_lock);
mhi_log(MHI_MSG_CRITICAL,
"BHI STATUS 0x%x, err:0x%x errdbg1:0x%x errdbg2:0x%x errdbg3:0x%x\n",
tx_db_val, err, errdbg1, errdbg2, errdbg3);
@@ -176,9 +194,6 @@ int bhi_probe(struct mhi_pcie_dev_info *mhi_pcie_device)
|| 0 == mhi_pcie_device->core.bar0_end)
return -EIO;
- mhi_log(MHI_MSG_INFO,
- "Successfully registered char dev. bhi base is: 0x%p.\n",
- bhi_ctxt->bhi_base);
ret_val = alloc_chrdev_region(&bhi_ctxt->bhi_dev, 0, 1, "bhi");
if (IS_ERR_VALUE(ret_val)) {
mhi_log(MHI_MSG_CRITICAL,
diff --git a/drivers/platform/msm/mhi/mhi_event.c b/drivers/platform/msm/mhi/mhi_event.c
index aa8500d277cd..fe163f3895a5 100644
--- a/drivers/platform/msm/mhi/mhi_event.c
+++ b/drivers/platform/msm/mhi/mhi_event.c
@@ -89,32 +89,31 @@ dt_error:
int create_local_ev_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int r = 0;
+ int i;
mhi_dev_ctxt->mhi_local_event_ctxt = kzalloc(sizeof(struct mhi_ring)*
mhi_dev_ctxt->mmio_info.nr_event_rings,
GFP_KERNEL);
-
if (!mhi_dev_ctxt->mhi_local_event_ctxt)
return -ENOMEM;
- mhi_dev_ctxt->counters.ev_counter = kzalloc(sizeof(u32) *
- mhi_dev_ctxt->mmio_info.nr_event_rings,
- GFP_KERNEL);
- if (!mhi_dev_ctxt->counters.ev_counter) {
- r = -ENOMEM;
- goto free_local_ec_list;
- }
mhi_dev_ctxt->counters.msi_counter = kzalloc(sizeof(u32) *
mhi_dev_ctxt->mmio_info.nr_event_rings,
GFP_KERNEL);
if (!mhi_dev_ctxt->counters.msi_counter) {
r = -ENOMEM;
- goto free_ev_counter;
+ goto free_local_ec_list;
}
+
+ for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; i++) {
+ struct mhi_ring *mhi_ring = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[i];
+
+ spin_lock_init(&mhi_ring->ring_lock);
+ }
+
return r;
-free_ev_counter:
- kfree(mhi_dev_ctxt->counters.ev_counter);
free_local_ec_list:
kfree(mhi_dev_ctxt->mhi_local_event_ctxt);
return r;
@@ -123,19 +122,27 @@ void ring_ev_db(struct mhi_device_ctxt *mhi_dev_ctxt, u32 event_ring_index)
{
struct mhi_ring *event_ctxt = NULL;
u64 db_value = 0;
+ unsigned long flags;
event_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[event_ring_index];
+ spin_lock_irqsave(&event_ctxt->ring_lock, flags);
db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING,
event_ring_index,
(uintptr_t) event_ctxt->wp);
- mhi_process_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.event_db_addr,
- event_ring_index, db_value);
+ event_ctxt->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.event_db_addr,
+ event_ring_index,
+ db_value);
+ spin_unlock_irqrestore(&event_ctxt->ring_lock, flags);
}
static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
- struct mhi_ring *ring, u32 el_per_ring,
- u32 intmodt_val, u32 msi_vec)
+ struct mhi_ring *ring,
+ u32 el_per_ring,
+ u32 intmodt_val,
+ u32 msi_vec,
+ enum MHI_BRSTMODE brstmode)
{
ev_list->mhi_event_er_type = MHI_EVENT_RING_TYPE_VALID;
ev_list->mhi_msi_vector = msi_vec;
@@ -144,6 +151,20 @@ static int mhi_event_ring_init(struct mhi_event_ctxt *ev_list,
ring->len = ((size_t)(el_per_ring)*sizeof(union mhi_event_pkt));
ring->el_size = sizeof(union mhi_event_pkt);
ring->overwrite_en = 0;
+
+ ring->db_mode.db_mode = 1;
+ ring->db_mode.brstmode = brstmode;
+ switch (ring->db_mode.brstmode) {
+ case MHI_BRSTMODE_ENABLE:
+ ring->db_mode.process_db = mhi_process_db_brstmode;
+ break;
+ case MHI_BRSTMODE_DISABLE:
+ ring->db_mode.process_db = mhi_process_db_brstmode_disable;
+ break;
+ default:
+ ring->db_mode.process_db = mhi_process_db;
+ }
+
/* Flush writes to MMIO */
wmb();
return 0;
@@ -159,9 +180,12 @@ void init_event_ctxt_array(struct mhi_device_ctxt *mhi_dev_ctxt)
event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
mhi_local_event_ctxt = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
mhi_event_ring_init(event_ctxt, mhi_local_event_ctxt,
- mhi_dev_ctxt->ev_ring_props[i].nr_desc,
- mhi_dev_ctxt->ev_ring_props[i].intmod,
- mhi_dev_ctxt->ev_ring_props[i].msi_vec);
+ mhi_dev_ctxt->ev_ring_props[i].nr_desc,
+ mhi_dev_ctxt->ev_ring_props[i].intmod,
+ mhi_dev_ctxt->ev_ring_props[i].msi_vec,
+ GET_EV_PROPS(EV_BRSTMODE,
+ mhi_dev_ctxt->
+ ev_ring_props[i].flags));
}
}
@@ -219,10 +243,9 @@ int mhi_init_local_event_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 i = 0;
unsigned long flags = 0;
int ret_val = 0;
- spinlock_t *lock =
- &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
struct mhi_ring *event_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[ring_index];
+ spinlock_t *lock = &event_ctxt->ring_lock;
if (NULL == mhi_dev_ctxt || 0 == nr_ev_el) {
mhi_log(MHI_MSG_ERROR, "Bad Input data, quitting\n");
diff --git a/drivers/platform/msm/mhi/mhi_hwio.h b/drivers/platform/msm/mhi/mhi_hwio.h
index 370ee0ebab7e..88a6a74566e6 100644
--- a/drivers/platform/msm/mhi/mhi_hwio.h
+++ b/drivers/platform/msm/mhi/mhi_hwio.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014, 2016, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,14 +23,14 @@
#define MHICFG (0x10)
-#define MHICFG_RESERVED_BITS31_24_MASK 0xff000000
-#define MHICFG_RESERVED_BITS31_24_SHIFT 0x18
-#define MHICFG_NER_MASK 0xff0000
-#define MHICFG_NER_SHIFT 0x10
-#define MHICFG_RESERVED_BITS15_8_MASK 0xff00
-#define MHICFG_RESERVED_BITS15_8_SHIFT 0x8
-#define MHICFG_NCH_MASK 0xff
-#define MHICFG_NCH_SHIFT 0x0
+#define MHICFG_NHWER_MASK (0xff000000)
+#define MHICFG_NHWER_SHIFT (24)
+#define MHICFG_NER_MASK (0xff0000)
+#define MHICFG_NER_SHIFT (16)
+#define MHICFG_NHWCH_MASK (0xff00)
+#define MHICFG_NHWCH_SHIFT (8)
+#define MHICFG_NCH_MASK (0xff)
+#define MHICFG_NCH_SHIFT (0)
#define CHDBOFF (0x18)
diff --git a/drivers/platform/msm/mhi/mhi_iface.c b/drivers/platform/msm/mhi/mhi_iface.c
index b3fff19d8fa4..395e19c91f35 100644
--- a/drivers/platform/msm/mhi/mhi_iface.c
+++ b/drivers/platform/msm/mhi/mhi_iface.c
@@ -96,22 +96,6 @@ int mhi_ctxt_init(struct mhi_pcie_dev_info *mhi_pcie_dev)
"Failed to register with esoc ret %d.\n",
ret_val);
}
- mhi_pcie_dev->mhi_ctxt.bus_scale_table =
- msm_bus_cl_get_pdata(mhi_pcie_dev->plat_dev);
- mhi_pcie_dev->mhi_ctxt.bus_client =
- msm_bus_scale_register_client(
- mhi_pcie_dev->mhi_ctxt.bus_scale_table);
- if (!mhi_pcie_dev->mhi_ctxt.bus_client) {
- mhi_log(MHI_MSG_CRITICAL,
- "Could not register for bus control ret: %d.\n",
- mhi_pcie_dev->mhi_ctxt.bus_client);
- } else {
- ret_val = mhi_set_bus_request(&mhi_pcie_dev->mhi_ctxt, 1);
- if (ret_val)
- mhi_log(MHI_MSG_CRITICAL,
- "Could not set bus frequency ret: %d\n",
- ret_val);
- }
device_disable_async_suspend(&pcie_device->dev);
ret_val = pci_enable_msi_range(pcie_device, 1, requested_msi_number);
@@ -188,9 +172,7 @@ mhi_state_transition_error:
mhi_dev_ctxt->dev_space.dev_mem_len,
mhi_dev_ctxt->dev_space.dev_mem_start,
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
- kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
- kfree(mhi_dev_ctxt->mhi_chan_mutex);
- kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
+
kfree(mhi_dev_ctxt->ev_ring_props);
mhi_rem_pm_sysfs(&pcie_device->dev);
sysfs_config_err:
@@ -203,7 +185,9 @@ msi_config_err:
}
static const struct dev_pm_ops pm_ops = {
- SET_RUNTIME_PM_OPS(mhi_runtime_suspend, mhi_runtime_resume, NULL)
+ SET_RUNTIME_PM_OPS(mhi_runtime_suspend,
+ mhi_runtime_resume,
+ mhi_runtime_idle)
SET_SYSTEM_SLEEP_PM_OPS(mhi_pci_suspend, mhi_pci_resume)
};
@@ -217,14 +201,15 @@ static struct pci_driver mhi_pcie_driver = {
};
static int mhi_pci_probe(struct pci_dev *pcie_device,
- const struct pci_device_id *mhi_device_id)
+ const struct pci_device_id *mhi_device_id)
{
int ret_val = 0;
struct mhi_pcie_dev_info *mhi_pcie_dev = NULL;
struct platform_device *plat_dev;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
u32 nr_dev = mhi_devices.nr_of_devices;
- mhi_log(MHI_MSG_INFO, "Entering.\n");
+ mhi_log(MHI_MSG_INFO, "Entering\n");
mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices];
if (mhi_devices.nr_of_devices + 1 > MHI_MAX_SUPPORTED_DEVICES) {
mhi_log(MHI_MSG_ERROR, "Error: Too many devices\n");
@@ -234,29 +219,120 @@ static int mhi_pci_probe(struct pci_dev *pcie_device,
mhi_devices.nr_of_devices++;
plat_dev = mhi_devices.device_list[nr_dev].plat_dev;
pcie_device->dev.of_node = plat_dev->dev.of_node;
- pm_runtime_put_noidle(&pcie_device->dev);
+ mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_DISABLE;
+ INIT_WORK(&mhi_dev_ctxt->process_m1_worker, process_m1_transition);
+ mutex_init(&mhi_dev_ctxt->pm_lock);
+ rwlock_init(&mhi_dev_ctxt->pm_xfer_lock);
+ spin_lock_init(&mhi_dev_ctxt->dev_wake_lock);
+ tasklet_init(&mhi_dev_ctxt->ev_task,
+ mhi_ctrl_ev_task,
+ (unsigned long)mhi_dev_ctxt);
+
+ mhi_dev_ctxt->flags.link_up = 1;
+ ret_val = mhi_set_bus_request(mhi_dev_ctxt, 1);
mhi_pcie_dev->pcie_device = pcie_device;
mhi_pcie_dev->mhi_pcie_driver = &mhi_pcie_driver;
mhi_pcie_dev->mhi_pci_link_event.events =
- (MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_LINKUP |
- MSM_PCIE_EVENT_WAKEUP);
+ (MSM_PCIE_EVENT_LINKDOWN | MSM_PCIE_EVENT_WAKEUP);
mhi_pcie_dev->mhi_pci_link_event.user = pcie_device;
mhi_pcie_dev->mhi_pci_link_event.callback = mhi_link_state_cb;
mhi_pcie_dev->mhi_pci_link_event.notify.data = mhi_pcie_dev;
ret_val = msm_pcie_register_event(&mhi_pcie_dev->mhi_pci_link_event);
- if (ret_val)
+ if (ret_val) {
mhi_log(MHI_MSG_ERROR,
"Failed to register for link notifications %d.\n",
ret_val);
+ return ret_val;
+ }
+
+ /* Initialize MHI CNTXT */
+ ret_val = mhi_ctxt_init(mhi_pcie_dev);
+ if (ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "MHI Initialization failed, ret %d\n",
+ ret_val);
+ goto deregister_pcie;
+ }
+ pci_set_master(mhi_pcie_dev->pcie_device);
+
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_POR;
+ ret_val = set_mhi_base_state(mhi_pcie_dev);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ if (ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error Setting MHI Base State %d\n", ret_val);
+ goto unlock_pm_lock;
+ }
+
+ if (mhi_dev_ctxt->base_state == STATE_TRANSITION_BHI) {
+ ret_val = bhi_probe(mhi_pcie_dev);
+ if (ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error with bhi_probe ret:%d", ret_val);
+ goto unlock_pm_lock;
+ }
+ }
+
+ init_mhi_base_state(mhi_dev_ctxt);
+
+ pm_runtime_set_autosuspend_delay(&pcie_device->dev,
+ MHI_RPM_AUTOSUSPEND_TMR_VAL_MS);
+ pm_runtime_use_autosuspend(&pcie_device->dev);
+ pm_suspend_ignore_children(&pcie_device->dev, true);
+
+ /*
+ * pci framework will increment usage count (twice) before
+ * calling local device driver probe function.
+ * 1st pci.c pci_pm_init() calls pm_runtime_forbid
+ * 2nd pci-driver.c local_pci_probe calls pm_runtime_get_sync
+ * Framework expect pci device driver to call pm_runtime_put_noidle
+ * to decrement usage count after successful probe and
+ * and call pm_runtime_allow to enable runtime suspend.
+ * MHI will allow runtime after entering AMSS state.
+ */
+ pm_runtime_mark_last_busy(&pcie_device->dev);
+ pm_runtime_put_noidle(&pcie_device->dev);
+
+ /*
+ * Keep the MHI state in Active (M0) state until AMSS because EP
+ * would error fatal if we try to enter M1 before entering
+ * AMSS state.
+ */
+ read_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+
+ return 0;
+
+unlock_pm_lock:
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+deregister_pcie:
+ msm_pcie_deregister_event(&mhi_pcie_dev->mhi_pci_link_event);
return ret_val;
}
static int mhi_plat_probe(struct platform_device *pdev)
{
u32 nr_dev = mhi_devices.nr_of_devices;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
int r = 0;
mhi_log(MHI_MSG_INFO, "Entered\n");
+ mhi_dev_ctxt = &mhi_devices.device_list[nr_dev].mhi_ctxt;
+
+ mhi_dev_ctxt->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (!mhi_dev_ctxt->bus_scale_table)
+ return -ENODATA;
+ mhi_dev_ctxt->bus_client = msm_bus_scale_register_client
+ (mhi_dev_ctxt->bus_scale_table);
+ if (!mhi_dev_ctxt->bus_client)
+ return -EINVAL;
+
mhi_devices.device_list[nr_dev].plat_dev = pdev;
r = dma_set_mask(&pdev->dev, MHI_DMA_MASK);
if (r)
diff --git a/drivers/platform/msm/mhi/mhi_init.c b/drivers/platform/msm/mhi/mhi_init.c
index 93c3de35473b..a496c81239bf 100644
--- a/drivers/platform/msm/mhi/mhi_init.c
+++ b/drivers/platform/msm/mhi/mhi_init.c
@@ -27,46 +27,21 @@ static int mhi_init_sync(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int i;
- mhi_dev_ctxt->mhi_ev_spinlock_list = kmalloc(sizeof(spinlock_t) *
- mhi_dev_ctxt->mmio_info.nr_event_rings,
- GFP_KERNEL);
- if (NULL == mhi_dev_ctxt->mhi_ev_spinlock_list)
- goto ev_mutex_free;
- mhi_dev_ctxt->mhi_chan_mutex = kmalloc(sizeof(struct mutex) *
- MHI_MAX_CHANNELS, GFP_KERNEL);
- if (NULL == mhi_dev_ctxt->mhi_chan_mutex)
- goto chan_mutex_free;
- mhi_dev_ctxt->mhi_cmd_mutex_list = kmalloc(sizeof(struct mutex) *
- NR_OF_CMD_RINGS, GFP_KERNEL);
- if (NULL == mhi_dev_ctxt->mhi_cmd_mutex_list)
- goto cmd_mutex_free;
-
- mhi_dev_ctxt->db_write_lock = kmalloc(sizeof(spinlock_t) *
- MHI_MAX_CHANNELS, GFP_KERNEL);
- if (NULL == mhi_dev_ctxt->db_write_lock)
- goto db_write_lock_free;
- for (i = 0; i < MHI_MAX_CHANNELS; ++i)
- mutex_init(&mhi_dev_ctxt->mhi_chan_mutex[i]);
- for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i)
- spin_lock_init(&mhi_dev_ctxt->mhi_ev_spinlock_list[i]);
- for (i = 0; i < NR_OF_CMD_RINGS; ++i)
- mutex_init(&mhi_dev_ctxt->mhi_cmd_mutex_list[i]);
- for (i = 0; i < MHI_MAX_CHANNELS; ++i)
- spin_lock_init(&mhi_dev_ctxt->db_write_lock[i]);
- rwlock_init(&mhi_dev_ctxt->xfer_lock);
- mutex_init(&mhi_dev_ctxt->mhi_link_state);
- mutex_init(&mhi_dev_ctxt->pm_lock);
- atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0);
- return 0;
+ for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
+ struct mhi_ring *ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
-db_write_lock_free:
- kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
-cmd_mutex_free:
- kfree(mhi_dev_ctxt->mhi_chan_mutex);
-chan_mutex_free:
- kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
-ev_mutex_free:
- return -ENOMEM;
+ mutex_init(&mhi_dev_ctxt->mhi_chan_cfg[i].chan_lock);
+ spin_lock_init(&mhi_dev_ctxt->mhi_chan_cfg[i].event_lock);
+ spin_lock_init(&ring->ring_lock);
+ }
+
+ for (i = 0; i < NR_OF_CMD_RINGS; i++) {
+ struct mhi_ring *ring = &mhi_dev_ctxt->mhi_local_cmd_ctxt[i];
+
+ spin_lock_init(&ring->ring_lock);
+ }
+
+ return 0;
}
size_t calculate_mhi_space(struct mhi_device_ctxt *mhi_dev_ctxt)
@@ -115,7 +90,7 @@ void init_dev_chan_ctxt(struct mhi_chan_ctxt *chan_ctxt,
chan_ctxt->mhi_trb_write_ptr = p_base_addr;
chan_ctxt->mhi_trb_ring_len = len;
/* Prepulate the channel ctxt */
- chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED;
+ chan_ctxt->chstate = MHI_CHAN_STATE_ENABLED;
chan_ctxt->mhi_event_ring_index = ev_index;
}
@@ -173,6 +148,8 @@ static int mhi_cmd_ring_init(struct mhi_cmd_ctxt *cmd_ctxt,
ring[PRIMARY_CMD_RING].len = ring_size;
ring[PRIMARY_CMD_RING].el_size = sizeof(union mhi_cmd_pkt);
ring[PRIMARY_CMD_RING].overwrite_en = 0;
+ ring[PRIMARY_CMD_RING].db_mode.process_db =
+ mhi_process_db_brstmode_disable;
return 0;
}
@@ -547,7 +524,6 @@ int mhi_init_device_ctxt(struct mhi_pcie_dev_info *dev_info,
}
init_event_ctxt_array(mhi_dev_ctxt);
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
- mhi_dev_ctxt->enable_lpm = 1;
r = mhi_spawn_threads(mhi_dev_ctxt);
if (r) {
@@ -573,9 +549,6 @@ error_wq_init:
mhi_dev_ctxt->dev_space.dma_dev_mem_start);
error_during_dev_mem_init:
error_during_local_ev_ctxt:
- kfree(mhi_dev_ctxt->mhi_cmd_mutex_list);
- kfree(mhi_dev_ctxt->mhi_chan_mutex);
- kfree(mhi_dev_ctxt->mhi_ev_spinlock_list);
error_during_sync:
kfree(mhi_dev_ctxt->ev_ring_props);
error_during_props:
@@ -585,24 +558,28 @@ error_during_props:
/**
* @brief Initialize the channel context and shadow context
*
- * @cc_list: Context to initialize
- * @trb_list_phy: Physical base address for the TRE ring
- * @trb_list_virt: Virtual base address for the TRE ring
- * @el_per_ring: Number of TREs this ring will contain
- * @chan_type: Type of channel IN/OUT
- * @event_ring: Event ring to be mapped to this channel context
- * @ring: Shadow context to be initialized alongside
- *
+ * @cc_list: Context to initialize
+ * @trb_list_phy: Physical base address for the TRE ring
+ * @trb_list_virt: Virtual base address for the TRE ring
+ * @el_per_ring: Number of TREs this ring will contain
+ * @chan_type: Type of channel IN/OUT
+ * @event_ring: Event ring to be mapped to this channel context
+ * @ring: Shadow context to be initialized alongside
+ * @chan_state: Channel state
+ * @preserve_db_state: Do not reset DB state during resume
* @Return errno
*/
int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
- uintptr_t trb_list_phy, uintptr_t trb_list_virt,
- u64 el_per_ring, enum MHI_CHAN_DIR chan_type,
- u32 event_ring, struct mhi_ring *ring,
- enum MHI_CHAN_STATE chan_state)
+ uintptr_t trb_list_phy, uintptr_t trb_list_virt,
+ u64 el_per_ring, enum MHI_CHAN_DIR chan_type,
+ u32 event_ring, struct mhi_ring *ring,
+ enum MHI_CHAN_STATE chan_state,
+ bool preserve_db_state,
+ enum MHI_BRSTMODE brstmode)
{
- cc_list->mhi_chan_state = chan_state;
- cc_list->mhi_chan_type = chan_type;
+ cc_list->brstmode = brstmode;
+ cc_list->chstate = chan_state;
+ cc_list->chtype = chan_type;
cc_list->mhi_event_ring_index = event_ring;
cc_list->mhi_trb_ring_base_addr = trb_list_phy;
cc_list->mhi_trb_ring_len =
@@ -617,6 +594,22 @@ int mhi_init_chan_ctxt(struct mhi_chan_ctxt *cc_list,
ring->el_size = sizeof(struct mhi_tx_pkt);
ring->overwrite_en = 0;
ring->dir = chan_type;
+ ring->ch_state = MHI_CHAN_STATE_DISABLED;
+ ring->db_mode.db_mode = 1;
+ ring->db_mode.preserve_db_state = (preserve_db_state) ? 1 : 0;
+ ring->db_mode.brstmode = brstmode;
+
+ switch (ring->db_mode.brstmode) {
+ case MHI_BRSTMODE_ENABLE:
+ ring->db_mode.process_db = mhi_process_db_brstmode;
+ break;
+ case MHI_BRSTMODE_DISABLE:
+ ring->db_mode.process_db = mhi_process_db_brstmode_disable;
+ break;
+ default:
+ ring->db_mode.process_db = mhi_process_db;
+ }
+
/* Flush writes to MMIO */
wmb();
return 0;
diff --git a/drivers/platform/msm/mhi/mhi_isr.c b/drivers/platform/msm/mhi/mhi_isr.c
index 5336b9ff0c11..d7c604419593 100644
--- a/drivers/platform/msm/mhi/mhi_isr.c
+++ b/drivers/platform/msm/mhi/mhi_isr.c
@@ -15,57 +15,6 @@
#include "mhi_sys.h"
#include "mhi_trace.h"
-irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
-{
- struct device *mhi_device = dev_id;
- struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
-
- if (!mhi_dev_ctxt) {
- mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n");
- return IRQ_HANDLED;
- }
- mhi_dev_ctxt->counters.msi_counter[
- IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
- mhi_log(MHI_MSG_VERBOSE,
- "Got MSI 0x%x\n", IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
- trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
- atomic_inc(&mhi_dev_ctxt->flags.events_pending);
- wake_up_interruptible(
- mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
- return IRQ_HANDLED;
-}
-
-irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id)
-{
- struct device *mhi_device = dev_id;
- u32 client_index;
- struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
- struct mhi_client_handle *client_handle;
- struct mhi_client_info_t *client_info;
- struct mhi_cb_info cb_info;
- int msi_num = (IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
-
- mhi_dev_ctxt->counters.msi_counter[msi_num]++;
- mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num);
- trace_mhi_msi(msi_num);
- client_index = MHI_MAX_CHANNELS -
- (mhi_dev_ctxt->mmio_info.nr_event_rings - msi_num);
- client_handle = mhi_dev_ctxt->client_handle_list[client_index];
- client_info = &client_handle->client_info;
- if (likely(NULL != client_handle)) {
- client_handle->result.user_data =
- client_handle->user_data;
- if (likely(NULL != &client_info->mhi_client_cb)) {
- cb_info.result = &client_handle->result;
- cb_info.cb_reason = MHI_CB_XFER;
- cb_info.chan = client_handle->chan_info.chan_nr;
- cb_info.result->transaction_status = 0;
- client_info->mhi_client_cb(&cb_info);
- }
- }
- return IRQ_HANDLED;
-}
-
static int mhi_process_event_ring(
struct mhi_device_ctxt *mhi_dev_ctxt,
u32 ev_index,
@@ -76,14 +25,21 @@ static int mhi_process_event_ring(
union mhi_event_pkt event_to_process;
int ret_val = 0;
struct mhi_event_ctxt *ev_ctxt = NULL;
- union mhi_cmd_pkt *cmd_pkt = NULL;
- union mhi_event_pkt *ev_ptr = NULL;
+ unsigned long flags;
struct mhi_ring *local_ev_ctxt =
&mhi_dev_ctxt->mhi_local_event_ctxt[ev_index];
- u32 event_code;
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) {
+ mhi_log(MHI_MSG_ERROR, "Invalid MHI PM State\n");
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ return -EIO;
+ }
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
ev_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[ev_index];
+ spin_lock_irqsave(&local_ev_ctxt->ring_lock, flags);
device_rp = (union mhi_event_pkt *)mhi_p2v_addr(
mhi_dev_ctxt,
MHI_RING_TYPE_EVENT_RING,
@@ -91,64 +47,105 @@ static int mhi_process_event_ring(
ev_ctxt->mhi_event_read_ptr);
local_rp = (union mhi_event_pkt *)local_ev_ctxt->rp;
-
+ spin_unlock_irqrestore(&local_ev_ctxt->ring_lock, flags);
BUG_ON(validate_ev_el_addr(local_ev_ctxt, (uintptr_t)device_rp));
while ((local_rp != device_rp) && (event_quota > 0) &&
(device_rp != NULL) && (local_rp != NULL)) {
+
+ spin_lock_irqsave(&local_ev_ctxt->ring_lock, flags);
event_to_process = *local_rp;
- ev_ptr = &event_to_process;
- event_code = get_cmd_pkt(mhi_dev_ctxt,
- ev_ptr, &cmd_pkt, ev_index);
- if (((MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process)) ==
- MHI_PKT_TYPE_CMD_COMPLETION_EVENT)) &&
- (event_code == MHI_EVENT_CC_SUCCESS)) {
- mhi_log(MHI_MSG_INFO, "Command Completion event\n");
- if ((MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt) ==
- MHI_PKT_TYPE_RESET_CHAN_CMD)) {
- mhi_log(MHI_MSG_INFO, "First Reset CC event\n");
- MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt,
- MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD);
- ret_val = -EINPROGRESS;
- break;
- } else if ((MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)
- == MHI_PKT_TYPE_RESET_CHAN_DEFER_CMD)) {
- MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt,
- MHI_PKT_TYPE_RESET_CHAN_CMD);
- mhi_log(MHI_MSG_INFO,
- "Processing Reset CC event\n");
- }
- }
- if (unlikely(0 != recycle_trb_and_ring(mhi_dev_ctxt,
- local_ev_ctxt,
- MHI_RING_TYPE_EVENT_RING,
- ev_index)))
- mhi_log(MHI_MSG_ERROR, "Failed to recycle ev pkt\n");
- switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, (&event_to_process))) {
+ recycle_trb_and_ring(mhi_dev_ctxt,
+ local_ev_ctxt,
+ MHI_RING_TYPE_EVENT_RING,
+ ev_index);
+ spin_unlock_irqrestore(&local_ev_ctxt->ring_lock, flags);
+
+ switch (MHI_TRB_READ_INFO(EV_TRB_TYPE, &event_to_process)) {
case MHI_PKT_TYPE_CMD_COMPLETION_EVENT:
- mhi_log(MHI_MSG_INFO,
- "MHI CCE received ring 0x%x\n",
- ev_index);
+ {
+ union mhi_cmd_pkt *cmd_pkt;
+ u32 chan;
+ struct mhi_chan_cfg *cfg;
+ unsigned long flags;
+ struct mhi_ring *cmd_ring = &mhi_dev_ctxt->
+ mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
__pm_relax(&mhi_dev_ctxt->w_lock);
- ret_val = parse_cmd_event(mhi_dev_ctxt,
- &event_to_process, ev_index);
+ get_cmd_pkt(mhi_dev_ctxt,
+ &event_to_process,
+ &cmd_pkt, ev_index);
+ MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
+ cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
+ mhi_log(MHI_MSG_INFO,
+ "MHI CCE received ring 0x%x chan:%u\n",
+ ev_index,
+ chan);
+ spin_lock_irqsave(&cfg->event_lock, flags);
+ cfg->cmd_pkt = *cmd_pkt;
+ cfg->cmd_event_pkt =
+ event_to_process.cmd_complete_event_pkt;
+ complete(&cfg->cmd_complete);
+ spin_unlock_irqrestore(&cfg->event_lock, flags);
+ spin_lock_irqsave(&cmd_ring->ring_lock,
+ flags);
+ ctxt_del_element(cmd_ring, NULL);
+ spin_unlock_irqrestore(&cmd_ring->ring_lock,
+ flags);
break;
+ }
case MHI_PKT_TYPE_TX_EVENT:
+ {
+ u32 chan;
+ struct mhi_ring *ring;
+
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
- parse_xfer_event(mhi_dev_ctxt,
- &event_to_process, ev_index);
+ chan = MHI_EV_READ_CHID(EV_CHID, &event_to_process);
+ if (unlikely(!VALID_CHAN_NR(chan))) {
+ mhi_log(MHI_MSG_ERROR,
+ "Invalid chan:%d\n",
+ chan);
+ break;
+ }
+ ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+ spin_lock_bh(&ring->ring_lock);
+ if (ring->ch_state == MHI_CHAN_STATE_ENABLED)
+ parse_xfer_event(mhi_dev_ctxt,
+ &event_to_process,
+ ev_index);
+ spin_unlock_bh(&ring->ring_lock);
__pm_relax(&mhi_dev_ctxt->w_lock);
break;
+ }
case MHI_PKT_TYPE_STATE_CHANGE_EVENT:
{
enum STATE_TRANSITION new_state;
-
+ unsigned long flags;
new_state = MHI_READ_STATE(&event_to_process);
mhi_log(MHI_MSG_INFO,
- "MHI STE received ring 0x%x\n",
- ev_index);
- mhi_init_state_transition(mhi_dev_ctxt, new_state);
+ "MHI STE received ring 0x%x State:%s\n",
+ ev_index,
+ state_transition_str(new_state));
+
+ /* If transitioning to M1 schedule worker thread */
+ if (new_state == STATE_TRANSITION_M1) {
+ write_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock,
+ flags);
+ mhi_dev_ctxt->mhi_state =
+ mhi_get_m_state(mhi_dev_ctxt);
+ if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1;
+ mhi_dev_ctxt->counters.m0_m1++;
+ schedule_work(&mhi_dev_ctxt->
+ process_m1_worker);
+ }
+ write_unlock_irqrestore(&mhi_dev_ctxt->
+ pm_xfer_lock,
+ flags);
+ } else {
+ mhi_init_state_transition(mhi_dev_ctxt,
+ new_state);
+ }
break;
}
case MHI_PKT_TYPE_EE_EVENT:
@@ -174,14 +171,16 @@ static int mhi_process_event_ring(
}
break;
}
+ case MHI_PKT_TYPE_STALE_EVENT:
+ mhi_log(MHI_MSG_INFO,
+ "Stale Event received for chan:%u\n",
+ MHI_EV_READ_CHID(EV_CHID, local_rp));
+ break;
case MHI_PKT_TYPE_SYS_ERR_EVENT:
mhi_log(MHI_MSG_INFO,
"MHI System Error Detected. Triggering Reset\n");
BUG();
- if (!mhi_trigger_reset(mhi_dev_ctxt))
- mhi_log(MHI_MSG_ERROR,
- "Failed to reset for SYSERR recovery\n");
- break;
+ break;
default:
mhi_log(MHI_MSG_ERROR,
"Unsupported packet type code 0x%x\n",
@@ -189,15 +188,20 @@ static int mhi_process_event_ring(
&event_to_process));
break;
}
+ spin_lock_irqsave(&local_ev_ctxt->ring_lock, flags);
local_rp = (union mhi_event_pkt *)local_ev_ctxt->rp;
device_rp = (union mhi_event_pkt *)mhi_p2v_addr(
mhi_dev_ctxt,
MHI_RING_TYPE_EVENT_RING,
ev_index,
ev_ctxt->mhi_event_read_ptr);
+ spin_unlock_irqrestore(&local_ev_ctxt->ring_lock, flags);
ret_val = 0;
--event_quota;
}
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
return ret_val;
}
@@ -207,7 +211,7 @@ int parse_event_thread(void *ctxt)
u32 i = 0;
int ret_val = 0;
int ret_val_process_event = 0;
- atomic_t *ev_pen_ptr = &mhi_dev_ctxt->flags.events_pending;
+ atomic_t *ev_pen_ptr = &mhi_dev_ctxt->counters.events_pending;
/* Go through all event rings */
for (;;) {
@@ -215,7 +219,7 @@ int parse_event_thread(void *ctxt)
wait_event_interruptible(
*mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq,
((atomic_read(
- &mhi_dev_ctxt->flags.events_pending) > 0) &&
+ &mhi_dev_ctxt->counters.events_pending) > 0) &&
!mhi_dev_ctxt->flags.stop_threads) ||
mhi_dev_ctxt->flags.kill_threads ||
(mhi_dev_ctxt->flags.stop_threads &&
@@ -237,27 +241,45 @@ int parse_event_thread(void *ctxt)
break;
}
mhi_dev_ctxt->flags.ev_thread_stopped = 0;
- atomic_dec(&mhi_dev_ctxt->flags.events_pending);
- for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
+ atomic_dec(&mhi_dev_ctxt->counters.events_pending);
+ for (i = 1; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
if (mhi_dev_ctxt->mhi_state == MHI_STATE_SYS_ERR) {
mhi_log(MHI_MSG_INFO,
- "SYS_ERR detected, not processing events\n");
- atomic_set(&mhi_dev_ctxt->flags.events_pending,
+ "SYS_ERR detected, not processing events\n");
+ atomic_set(&mhi_dev_ctxt->
+ counters.events_pending,
0);
break;
}
if (GET_EV_PROPS(EV_MANAGED,
- mhi_dev_ctxt->ev_ring_props[i].flags)){
+ mhi_dev_ctxt->ev_ring_props[i].flags)) {
ret_val_process_event =
- mhi_process_event_ring(mhi_dev_ctxt, i,
- mhi_dev_ctxt->ev_ring_props[i].nr_desc);
- if (ret_val_process_event ==
- -EINPROGRESS)
+ mhi_process_event_ring(mhi_dev_ctxt,
+ i,
+ mhi_dev_ctxt->
+ ev_ring_props[i].nr_desc);
+ if (ret_val_process_event == -EINPROGRESS)
atomic_inc(ev_pen_ptr);
}
}
}
- return ret_val;
+}
+
+void mhi_ctrl_ev_task(unsigned long data)
+{
+ struct mhi_device_ctxt *mhi_dev_ctxt =
+ (struct mhi_device_ctxt *)data;
+ const unsigned CTRL_EV_RING = 0;
+ struct mhi_event_ring_cfg *ring_props =
+ &mhi_dev_ctxt->ev_ring_props[CTRL_EV_RING];
+
+ mhi_log(MHI_MSG_VERBOSE, "Enter\n");
+ /* Process control event ring */
+ mhi_process_event_ring(mhi_dev_ctxt,
+ CTRL_EV_RING,
+ ring_props->nr_desc);
+ enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, CTRL_EV_RING));
+ mhi_log(MHI_MSG_VERBOSE, "Exit\n");
}
struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
@@ -268,8 +290,8 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
client_handle->result.bytes_xferd = 0;
client_handle->result.transaction_status = 0;
ret_val = mhi_process_event_ring(client_handle->mhi_dev_ctxt,
- client_handle->event_ring_index,
- 1);
+ client_handle->event_ring_index,
+ 1);
if (ret_val)
mhi_log(MHI_MSG_INFO, "NAPI failed to process event ring\n");
return &(client_handle->result);
@@ -277,20 +299,79 @@ struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle)
void mhi_mask_irq(struct mhi_client_handle *client_handle)
{
- disable_irq_nosync(MSI_TO_IRQ(client_handle->mhi_dev_ctxt,
- client_handle->msi_vec));
- client_handle->mhi_dev_ctxt->counters.msi_disable_cntr++;
- if (client_handle->mhi_dev_ctxt->counters.msi_disable_cntr >
- (client_handle->mhi_dev_ctxt->counters.msi_enable_cntr + 1))
- mhi_log(MHI_MSG_INFO, "No nested IRQ disable Allowed\n");
+ struct mhi_device_ctxt *mhi_dev_ctxt =
+ client_handle->mhi_dev_ctxt;
+ struct mhi_ring *ev_ring = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[client_handle->event_ring_index];
+
+ disable_irq_nosync(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
+ ev_ring->msi_disable_cntr++;
}
void mhi_unmask_irq(struct mhi_client_handle *client_handle)
{
- client_handle->mhi_dev_ctxt->counters.msi_enable_cntr++;
- enable_irq(MSI_TO_IRQ(client_handle->mhi_dev_ctxt,
- client_handle->msi_vec));
- if (client_handle->mhi_dev_ctxt->counters.msi_enable_cntr >
- client_handle->mhi_dev_ctxt->counters.msi_disable_cntr)
- mhi_log(MHI_MSG_INFO, "No nested IRQ enable Allowed\n");
+ struct mhi_device_ctxt *mhi_dev_ctxt =
+ client_handle->mhi_dev_ctxt;
+ struct mhi_ring *ev_ring = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[client_handle->event_ring_index];
+
+ ev_ring->msi_enable_cntr++;
+ enable_irq(MSI_TO_IRQ(mhi_dev_ctxt, client_handle->msi_vec));
+}
+
+irqreturn_t mhi_msi_handlr(int irq_number, void *dev_id)
+{
+ struct device *mhi_device = dev_id;
+ struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
+ int msi = IRQ_TO_MSI(mhi_dev_ctxt, irq_number);
+
+ if (!mhi_dev_ctxt) {
+ mhi_log(MHI_MSG_ERROR, "Failed to get a proper context\n");
+ return IRQ_HANDLED;
+ }
+ mhi_dev_ctxt->counters.msi_counter[
+ IRQ_TO_MSI(mhi_dev_ctxt, irq_number)]++;
+ mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi);
+ trace_mhi_msi(IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
+
+ if (msi) {
+ atomic_inc(&mhi_dev_ctxt->counters.events_pending);
+ wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
+ } else {
+ disable_irq_nosync(irq_number);
+ tasklet_schedule(&mhi_dev_ctxt->ev_task);
+ }
+
+ return IRQ_HANDLED;
+}
+
+irqreturn_t mhi_msi_ipa_handlr(int irq_number, void *dev_id)
+{
+ struct device *mhi_device = dev_id;
+ u32 client_index;
+ struct mhi_device_ctxt *mhi_dev_ctxt = mhi_device->platform_data;
+ struct mhi_client_handle *client_handle;
+ struct mhi_client_info_t *client_info;
+ struct mhi_cb_info cb_info;
+ int msi_num = (IRQ_TO_MSI(mhi_dev_ctxt, irq_number));
+
+ mhi_dev_ctxt->counters.msi_counter[msi_num]++;
+ mhi_log(MHI_MSG_VERBOSE, "Got MSI 0x%x\n", msi_num);
+ trace_mhi_msi(msi_num);
+ client_index = MHI_MAX_CHANNELS -
+ (mhi_dev_ctxt->mmio_info.nr_event_rings - msi_num);
+ client_handle = mhi_dev_ctxt->client_handle_list[client_index];
+ client_info = &client_handle->client_info;
+ if (likely(client_handle)) {
+ client_handle->result.user_data =
+ client_handle->user_data;
+ if (likely(client_info->mhi_client_cb)) {
+ cb_info.result = &client_handle->result;
+ cb_info.cb_reason = MHI_CB_XFER;
+ cb_info.chan = client_handle->chan_info.chan_nr;
+ cb_info.result->transaction_status = 0;
+ client_info->mhi_client_cb(&cb_info);
+ }
+ }
+ return IRQ_HANDLED;
}
diff --git a/drivers/platform/msm/mhi/mhi_macros.h b/drivers/platform/msm/mhi/mhi_macros.h
index bb47f53e244d..133c0eeb034e 100644
--- a/drivers/platform/msm/mhi/mhi_macros.h
+++ b/drivers/platform/msm/mhi/mhi_macros.h
@@ -96,7 +96,6 @@
((_mhi_dev_ctxt)->mmio_info.nr_event_rings - \
((_mhi_dev_ctxt)->mmio_info.nr_hw_event_rings)))
-
/* MHI Transfer Ring Elements 7.4.1*/
#define TX_TRB_LEN
#define MHI_TX_TRB_LEN__SHIFT (0)
@@ -244,9 +243,21 @@
#define MHI_CHAN_TYPE__MASK (3)
#define MHI_CHAN_TYPE__SHIFT (6)
+#define PRESERVE_DB_STATE
+#define MHI_PRESERVE_DB_STATE__MASK (1)
+#define MHI_PRESERVE_DB_STATE__SHIFT (8)
+
+#define BRSTMODE
+#define MHI_BRSTMODE__MASK (3)
+#define MHI_BRSTMODE__SHIFT (9)
+
#define GET_CHAN_PROPS(_FIELD, _VAL) \
(((_VAL) >> MHI_##_FIELD ## __SHIFT) & MHI_##_FIELD ## __MASK)
+#define EV_BRSTMODE
+#define MHI_EV_BRSTMODE__MASK (3)
+#define MHI_EV_BRSTMODE__SHIFT (5)
+
#define EV_TYPE
#define MHI_EV_TYPE__MASK (3)
#define MHI_EV_TYPE__SHIFT (3)
diff --git a/drivers/platform/msm/mhi/mhi_main.c b/drivers/platform/msm/mhi/mhi_main.c
index c949745f5af7..430dc918af7e 100644
--- a/drivers/platform/msm/mhi/mhi_main.c
+++ b/drivers/platform/msm/mhi/mhi_main.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2014-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2014-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -29,6 +29,9 @@
#include "mhi_macros.h"
#include "mhi_trace.h"
+static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
+ union mhi_cmd_pkt *cmd_pkt);
+
static int enable_bb_ctxt(struct mhi_ring *bb_ctxt, int nr_el)
{
bb_ctxt->el_size = sizeof(struct mhi_buf_info);
@@ -212,7 +215,9 @@ int mhi_release_chan_ctxt(struct mhi_device_ctxt *mhi_dev_ctxt,
ring->len, ring->base,
cc_list->mhi_trb_ring_base_addr);
mhi_init_chan_ctxt(cc_list, 0, 0, 0, 0, 0, ring,
- MHI_CHAN_STATE_DISABLED);
+ MHI_CHAN_STATE_DISABLED,
+ false,
+ MHI_BRSTMODE_DEFAULT);
return 0;
}
@@ -259,7 +264,11 @@ static int populate_tre_ring(struct mhi_client_handle *client_handle)
client_handle->chan_info.flags),
client_handle->chan_info.ev_ring,
&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
- MHI_CHAN_STATE_ENABLED);
+ MHI_CHAN_STATE_ENABLED,
+ GET_CHAN_PROPS(PRESERVE_DB_STATE,
+ client_handle->chan_info.flags),
+ GET_CHAN_PROPS(BRSTMODE,
+ client_handle->chan_info.flags));
mhi_log(MHI_MSG_INFO, "Exited\n");
return 0;
}
@@ -268,48 +277,60 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
{
int ret_val = 0;
struct mhi_device_ctxt *mhi_dev_ctxt;
- int r = 0;
+ struct mhi_ring *chan_ring;
int chan;
+ struct mhi_chan_cfg *cfg;
+ struct mhi_cmd_complete_event_pkt cmd_event_pkt;
+ union mhi_cmd_pkt cmd_pkt;
+ enum MHI_EVENT_CCS ev_code;
- if (NULL == client_handle ||
- client_handle->magic != MHI_HANDLE_MAGIC)
+ if (!client_handle || client_handle->magic != MHI_HANDLE_MAGIC)
return -EINVAL;
mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
- r = get_chan_props(mhi_dev_ctxt,
- client_handle->chan_info.chan_nr,
- &client_handle->chan_info);
- if (r)
- return r;
+ ret_val = get_chan_props(mhi_dev_ctxt,
+ client_handle->chan_info.chan_nr,
+ &client_handle->chan_info);
+ if (ret_val)
+ return ret_val;
chan = client_handle->chan_info.chan_nr;
+ cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
+ chan_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+ mutex_lock(&cfg->chan_lock);
mhi_log(MHI_MSG_INFO,
"Entered: Client opening chan 0x%x\n", chan);
if (mhi_dev_ctxt->dev_exec_env <
GET_CHAN_PROPS(CHAN_BRINGUP_STAGE,
- client_handle->chan_info.flags)) {
+ client_handle->chan_info.flags)) {
mhi_log(MHI_MSG_INFO,
"Chan %d, MHI exec_env %d, not ready!\n",
- chan, mhi_dev_ctxt->dev_exec_env);
+ chan,
+ mhi_dev_ctxt->dev_exec_env);
+ mutex_unlock(&cfg->chan_lock);
return -ENOTCONN;
}
- r = populate_tre_ring(client_handle);
- if (r) {
+ ret_val = populate_tre_ring(client_handle);
+ if (ret_val) {
mhi_log(MHI_MSG_ERROR,
"Failed to initialize tre ring chan %d ret %d\n",
- chan, r);
- return r;
+ chan,
+ ret_val);
+ mutex_unlock(&cfg->chan_lock);
+ return ret_val;
}
client_handle->event_ring_index =
- mhi_dev_ctxt->dev_space.ring_ctxt.
- cc_list[chan].mhi_event_ring_index;
- r = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan],
- client_handle->chan_info.max_desc);
- if (r) {
+ mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan].
+ mhi_event_ring_index;
+ ret_val = enable_bb_ctxt(&mhi_dev_ctxt->chan_bb_list[chan],
+ client_handle->chan_info.max_desc);
+ if (ret_val) {
mhi_log(MHI_MSG_ERROR,
"Failed to initialize bb ctxt chan %d ret %d\n",
- chan, r);
- return r;
+ chan,
+ ret_val);
+ mutex_unlock(&cfg->chan_lock);
+ return ret_val;
}
client_handle->msi_vec =
@@ -319,16 +340,70 @@ int mhi_open_channel(struct mhi_client_handle *client_handle)
mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[
client_handle->event_ring_index].mhi_intmodt;
- init_completion(&client_handle->chan_open_complete);
- ret_val = start_chan_sync(client_handle);
+ init_completion(&cfg->cmd_complete);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ if (unlikely(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE)) {
+ mhi_log(MHI_MSG_ERROR,
+ "MHI State is disabled\n");
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&cfg->chan_lock);
+ return -EIO;
+ }
+ WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE);
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- if (0 != ret_val)
+ spin_lock_irq(&chan_ring->ring_lock);
+ chan_ring->ch_state = MHI_CHAN_STATE_ENABLED;
+ spin_unlock_irq(&chan_ring->ring_lock);
+ ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
+ MHI_COMMAND_START_CHAN,
+ chan);
+ if (ret_val) {
mhi_log(MHI_MSG_ERROR,
- "Failed to start chan 0x%x, ret %d\n", chan, ret_val);
- BUG_ON(ret_val);
+ "Failed to send start cmd for chan %d ret %d\n",
+ chan, ret_val);
+ goto error_completion;
+ }
+ ret_val = wait_for_completion_timeout(&cfg->cmd_complete,
+ msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
+ if (!ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "Failed to receive cmd completion for %d\n",
+ chan);
+ goto error_completion;
+ } else {
+ ret_val = 0;
+ }
+
+ spin_lock(&cfg->event_lock);
+ cmd_event_pkt = cfg->cmd_event_pkt;
+ cmd_pkt = cfg->cmd_pkt;
+ spin_unlock(&cfg->event_lock);
+
+ ev_code = MHI_EV_READ_CODE(EV_TRB_CODE,
+ ((union mhi_event_pkt *)&cmd_event_pkt));
+ if (ev_code != MHI_EVENT_CC_SUCCESS) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error to receive event completion ev_code:0x%x\n",
+ ev_code);
+ ret_val = -EIO;
+ goto error_completion;
+ }
+
client_handle->chan_status = 1;
+
+error_completion:
+
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ mutex_unlock(&cfg->chan_lock);
+
mhi_log(MHI_MSG_INFO,
- "Exited chan 0x%x\n", chan);
+ "Exited chan 0x%x ret:%d\n", chan, ret_val);
return ret_val;
}
EXPORT_SYMBOL(mhi_open_channel);
@@ -387,46 +462,86 @@ EXPORT_SYMBOL(mhi_register_channel);
void mhi_close_channel(struct mhi_client_handle *client_handle)
{
u32 chan;
- int r = 0;
int ret_val = 0;
+ struct mhi_chan_cfg *cfg;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
+ struct mhi_cmd_complete_event_pkt cmd_event_pkt;
+ union mhi_cmd_pkt cmd_pkt;
+ struct mhi_ring *chan_ring;
+ enum MHI_EVENT_CCS ev_code;
if (!client_handle ||
client_handle->magic != MHI_HANDLE_MAGIC ||
!client_handle->chan_status)
return;
+ mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
chan = client_handle->chan_info.chan_nr;
+ cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
mhi_log(MHI_MSG_INFO, "Client attempting to close chan 0x%x\n", chan);
- init_completion(&client_handle->chan_reset_complete);
- if (!atomic_read(&client_handle->mhi_dev_ctxt->flags.pending_ssr)) {
- ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
- MHI_COMMAND_RESET_CHAN, chan);
- if (ret_val != 0) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to send reset cmd for chan %d ret %d\n",
- chan, ret_val);
- }
- r = wait_for_completion_timeout(
- &client_handle->chan_reset_complete,
+ chan_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+ mutex_lock(&cfg->chan_lock);
+
+ /* No more processing events for this channel */
+ spin_lock_irq(&chan_ring->ring_lock);
+ chan_ring->ch_state = MHI_CHAN_STATE_DISABLED;
+ spin_unlock_irq(&chan_ring->ring_lock);
+ init_completion(&cfg->cmd_complete);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ WARN_ON(mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE);
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ ret_val = mhi_send_cmd(client_handle->mhi_dev_ctxt,
+ MHI_COMMAND_RESET_CHAN,
+ chan);
+ if (ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "Failed to send reset cmd for chan %d ret %d\n",
+ chan,
+ ret_val);
+ goto error_completion;
+ }
+ ret_val = wait_for_completion_timeout(&cfg->cmd_complete,
msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
- if (!r)
- mhi_log(MHI_MSG_ERROR,
- "Failed to reset chan %d ret %d\n",
- chan, r);
- } else {
- /*
- * Assumption: Device is not playing with our
- * buffers after BEFORE_SHUTDOWN
- */
- mhi_log(MHI_MSG_INFO,
- "Pending SSR local free only chan %d.\n", chan);
+ if (!ret_val) {
+ mhi_log(MHI_MSG_ERROR,
+ "Failed to receive cmd completion for %d\n",
+ chan);
+ goto error_completion;
}
+ spin_lock_irq(&cfg->event_lock);
+ cmd_event_pkt = cfg->cmd_event_pkt;
+ cmd_pkt = cfg->cmd_pkt;
+ spin_unlock_irq(&cfg->event_lock);
+ ev_code = MHI_EV_READ_CODE(EV_TRB_CODE,
+ ((union mhi_event_pkt *)&cmd_event_pkt));
+ if (ev_code != MHI_EVENT_CC_SUCCESS) {
+ mhi_log(MHI_MSG_ERROR,
+ "Error to receive event completion ev_cod:0x%x\n",
+ ev_code);
+ goto error_completion;
+ }
+
+ ret_val = reset_chan_cmd(mhi_dev_ctxt, &cmd_pkt);
+ if (ret_val)
+ mhi_log(MHI_MSG_ERROR,
+ "Error resetting cmd ret:%d\n",
+ ret_val);
+
+error_completion:
+
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
mhi_log(MHI_MSG_INFO, "Freeing ring for chan 0x%x\n", chan);
free_tre_ring(client_handle);
mhi_log(MHI_MSG_INFO, "Chan 0x%x confirmed closed.\n", chan);
client_handle->chan_status = 0;
+ mutex_unlock(&cfg->chan_lock);
}
EXPORT_SYMBOL(mhi_close_channel);
@@ -439,93 +554,47 @@ void mhi_update_chan_db(struct mhi_device_ctxt *mhi_dev_ctxt,
chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
db_value = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_XFER_RING, chan,
(uintptr_t) chan_ctxt->wp);
- mhi_dev_ctxt->mhi_chan_db_order[chan]++;
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.chan_db_addr,
- chan, db_value);
-}
-
-int mhi_check_m2_transition(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- int ret_val = 0;
+ chan_ctxt->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.chan_db_addr,
+ chan,
+ db_value);
- if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
- mhi_log(MHI_MSG_INFO, "M2 Transition flag value = %d\n",
- (atomic_read(&mhi_dev_ctxt->flags.m2_transition)));
- if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) {
- if (mhi_dev_ctxt->flags.link_up) {
- mhi_assert_device_wake(mhi_dev_ctxt);
- ret_val = -ENOTCONN;
- }
- } else{
- mhi_log(MHI_MSG_INFO, "M2 transition flag is set\n");
- ret_val = -ENOTCONN;
- }
- } else {
- ret_val = 0;
- }
-
- return ret_val;
}
static inline int mhi_queue_tre(struct mhi_device_ctxt
- *mhi_dev_ctxt,
- u32 chan,
- enum MHI_RING_TYPE type)
+ *mhi_dev_ctxt,
+ u32 chan,
+ enum MHI_RING_TYPE type)
{
struct mhi_chan_ctxt *chan_ctxt;
unsigned long flags = 0;
- int ret_val = 0;
u64 db_value = 0;
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
- mhi_dev_ctxt->counters.m1_m0++;
- if (type == MHI_RING_TYPE_CMD_RING)
- atomic_inc(&mhi_dev_ctxt->counters.outbound_acks);
+ if (!MHI_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+ return -EACCES;
- ret_val = mhi_check_m2_transition(mhi_dev_ctxt);
- if (likely(((ret_val == 0) &&
- (((mhi_dev_ctxt->mhi_state == MHI_STATE_M0) ||
- (mhi_dev_ctxt->mhi_state == MHI_STATE_M1))) &&
- (chan_ctxt->mhi_chan_state != MHI_CHAN_STATE_ERROR)) &&
- (!mhi_dev_ctxt->flags.pending_M3))) {
- if (likely(type == MHI_RING_TYPE_XFER_RING)) {
- spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan],
- flags);
- db_value =
- mhi_v2p_addr(
- mhi_dev_ctxt,
- MHI_RING_TYPE_XFER_RING,
- chan,
- (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp);
- mhi_dev_ctxt->mhi_chan_db_order[chan]++;
- mhi_update_chan_db(mhi_dev_ctxt, chan);
- spin_unlock_irqrestore(
- &mhi_dev_ctxt->db_write_lock[chan], flags);
- } else if (type == MHI_RING_TYPE_CMD_RING) {
- db_value = mhi_v2p_addr(mhi_dev_ctxt,
- MHI_RING_TYPE_CMD_RING,
- PRIMARY_CMD_RING,
- (uintptr_t)
- mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING].wp);
- mhi_dev_ctxt->cmd_ring_order++;
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.cmd_db_addr,
- 0, db_value);
- } else {
- mhi_log(MHI_MSG_VERBOSE,
- "Wrong type of packet = %d\n", type);
- ret_val = -EPROTO;
- }
+ if (likely(type == MHI_RING_TYPE_XFER_RING)) {
+ struct mhi_ring *mhi_ring =
+ &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+ spin_lock_irqsave(&mhi_ring->ring_lock, flags);
+ mhi_update_chan_db(mhi_dev_ctxt, chan);
+ spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
} else {
- mhi_log(MHI_MSG_VERBOSE,
- "Wakeup, pending data state %d chan state %d\n",
- mhi_dev_ctxt->mhi_state,
- chan_ctxt->mhi_chan_state);
- ret_val = 0;
+ struct mhi_ring *cmd_ring = &mhi_dev_ctxt->
+ mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
+ db_value = mhi_v2p_addr(mhi_dev_ctxt,
+ MHI_RING_TYPE_CMD_RING,
+ PRIMARY_CMD_RING,
+ (uintptr_t)cmd_ring->wp);
+ cmd_ring->db_mode.process_db
+ (mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.cmd_db_addr,
+ 0,
+ db_value);
}
- return ret_val;
+ return 0;
}
static int create_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
int chan, void *buf, size_t buf_len,
@@ -645,17 +714,11 @@ static int mhi_queue_dma_xfer(
int ret_val;
enum MHI_CLIENT_CHANNEL chan;
struct mhi_device_ctxt *mhi_dev_ctxt;
- unsigned long flags;
-
- if (!client_handle || !buf || !buf_len)
- return -EINVAL;
mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
MHI_ASSERT(VALID_BUF(buf, buf_len, mhi_dev_ctxt),
"Client buffer is of invalid length\n");
chan = client_handle->chan_info.chan_nr;
- mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan);
- pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
pkt_loc = mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp;
pkt_loc->data_tx_pkt.buffer_ptr = buf;
@@ -682,24 +745,14 @@ static int mhi_queue_dma_xfer(
(void *)&pkt_loc);
if (unlikely(0 != ret_val)) {
mhi_log(MHI_MSG_VERBOSE,
- "Failed to insert trb in xfer ring\n");
- goto error;
+ "Failed to insert trb in xfer ring\n");
+ return ret_val;
}
- read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
+
if (MHI_OUT ==
GET_CHAN_PROPS(CHAN_DIR, client_handle->chan_info.flags))
atomic_inc(&mhi_dev_ctxt->counters.outbound_acks);
- ret_val = mhi_queue_tre(mhi_dev_ctxt, chan, MHI_RING_TYPE_XFER_RING);
- if (unlikely(ret_val))
- mhi_log(MHI_MSG_VERBOSE, "Failed queue TRE.\n");
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
- read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-error:
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_INFO, "Putting Reference %d", chan);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
return ret_val;
}
@@ -709,10 +762,28 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
int r;
enum dma_data_direction dma_dir;
struct mhi_buf_info *bb;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
+ u32 chan;
+ unsigned long flags;
if (!client_handle || !buf || !buf_len)
return -EINVAL;
+ mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
+ chan = client_handle->chan_info.chan_nr;
+
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE) {
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ mhi_log(MHI_MSG_ERROR,
+ "MHI is not in active state\n");
+ return -EINVAL;
+ }
+
+ pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+
if (MHI_OUT == GET_CHAN_PROPS(CHAN_DIR, client_handle->chan_info.flags))
dma_dir = DMA_TO_DEVICE;
else
@@ -723,8 +794,16 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
buf, buf_len, dma_dir, &bb);
if (r) {
mhi_log(MHI_MSG_VERBOSE,
- "Failed to create BB, chan %d ret %d\n",
- client_handle->chan_info.chan_nr, r);
+ "Failed to create BB, chan %d ret %d\n",
+ chan,
+ r);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->
+ pcie_device->dev);
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
return r;
}
@@ -733,9 +812,9 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
buf, buf_len, (u64)bb->bb_p_addr,
client_handle->chan_info.chan_nr);
r = mhi_queue_dma_xfer(client_handle,
- bb->bb_p_addr,
- bb->buf_len,
- mhi_flags);
+ bb->bb_p_addr,
+ bb->buf_len,
+ mhi_flags);
/*
* Assumption: If create_bounce_buffer did not fail, we do not
@@ -743,103 +822,76 @@ int mhi_queue_xfer(struct mhi_client_handle *client_handle,
* out of sync with the descriptor list which is problematic.
*/
BUG_ON(r);
- return r;
+
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ mhi_queue_tre(mhi_dev_ctxt, chan, MHI_RING_TYPE_XFER_RING);
+ if (dma_dir == DMA_FROM_DEVICE) {
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ }
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ return 0;
}
EXPORT_SYMBOL(mhi_queue_xfer);
int mhi_send_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_COMMAND cmd, u32 chan)
{
- unsigned long flags = 0;
union mhi_cmd_pkt *cmd_pkt = NULL;
- enum MHI_CHAN_STATE from_state = MHI_CHAN_STATE_DISABLED;
- enum MHI_CHAN_STATE to_state = MHI_CHAN_STATE_DISABLED;
enum MHI_PKT_TYPE ring_el_type = MHI_PKT_TYPE_NOOP_CMD;
- struct mutex *chan_mutex = NULL;
int ret_val = 0;
+ unsigned long flags, flags2;
+ struct mhi_ring *mhi_ring = &mhi_dev_ctxt->
+ mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
- if (chan >= MHI_MAX_CHANNELS ||
- cmd >= MHI_COMMAND_MAX_NR || mhi_dev_ctxt == NULL) {
+ if (chan >= MHI_MAX_CHANNELS || cmd >= MHI_COMMAND_MAX_NR) {
mhi_log(MHI_MSG_ERROR,
- "Invalid channel id, received id: 0x%x", chan);
+ "Invalid channel id, received id: 0x%x",
+ chan);
return -EINVAL;
}
mhi_log(MHI_MSG_INFO,
- "Entered, MHI state %d dev_exec_env %d chan %d cmd %d\n",
- mhi_dev_ctxt->mhi_state,
- mhi_dev_ctxt->dev_exec_env,
- chan, cmd);
- mhi_log(MHI_MSG_INFO, "Getting Reference %d", chan);
- pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- /*
- * If there is a cmd pending a device confirmation,
- * do not send anymore for this channel
- */
- if (MHI_CMD_PENDING == mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan]) {
- mhi_log(MHI_MSG_ERROR, "Cmd Pending on chan %d", chan);
- ret_val = -EALREADY;
- goto error_invalid;
- }
-
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
- from_state =
- mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan].mhi_chan_state;
+ "Entered, MHI state %s dev_exec_env %d chan %d cmd %d\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
+ mhi_dev_ctxt->dev_exec_env, chan, cmd);
switch (cmd) {
break;
case MHI_COMMAND_RESET_CHAN:
- to_state = MHI_CHAN_STATE_DISABLED;
ring_el_type = MHI_PKT_TYPE_RESET_CHAN_CMD;
break;
case MHI_COMMAND_START_CHAN:
- switch (from_state) {
- case MHI_CHAN_STATE_DISABLED:
- case MHI_CHAN_STATE_ENABLED:
- case MHI_CHAN_STATE_STOP:
- to_state = MHI_CHAN_STATE_RUNNING;
- break;
- default:
- mhi_log(MHI_MSG_ERROR,
- "Invalid stt cmd 0x%x, from_state 0x%x\n",
- cmd, from_state);
- ret_val = -EPERM;
- goto error_invalid;
- }
ring_el_type = MHI_PKT_TYPE_START_CHAN_CMD;
break;
default:
mhi_log(MHI_MSG_ERROR, "Bad command received\n");
+ return -EINVAL;
}
- mutex_lock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]);
- ret_val = ctxt_add_element(mhi_dev_ctxt->mhi_local_cmd_ctxt,
- (void *)&cmd_pkt);
+ spin_lock_irqsave(&mhi_ring->ring_lock, flags);
+ ret_val = ctxt_add_element(mhi_ring, (void *)&cmd_pkt);
if (ret_val) {
mhi_log(MHI_MSG_ERROR, "Failed to insert element\n");
- goto error_general;
+ spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
+ return ret_val;
}
- chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan];
- mutex_lock(chan_mutex);
+
MHI_TRB_SET_INFO(CMD_TRB_TYPE, cmd_pkt, ring_el_type);
MHI_TRB_SET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
- mutex_unlock(chan_mutex);
- mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_PENDING;
-
- read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags2);
mhi_queue_tre(mhi_dev_ctxt, 0, MHI_RING_TYPE_CMD_RING);
- read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags2);
+ spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
- mhi_log(MHI_MSG_VERBOSE, "Sent command 0x%x for chan %d\n",
- cmd, chan);
-error_general:
- mutex_unlock(&mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING]);
-error_invalid:
- pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_INFO, "Putting Reference %d", chan);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ mhi_log(MHI_MSG_VERBOSE,
+ "Sent command 0x%x for chan %d\n",
+ cmd,
+ chan);
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
mhi_log(MHI_MSG_INFO, "Exited ret %d.\n", ret_val);
return ret_val;
}
@@ -870,7 +922,6 @@ static void parse_inbound_bb(struct mhi_device_ctxt *mhi_dev_ctxt,
result->buf_addr = bb->client_buf;
result->bytes_xferd = bb->filled_size;
- result->transaction_status = 0;
/* At this point the bounce buffer is no longer necessary
* Whatever was received from the device was copied back to the
@@ -922,6 +973,9 @@ static int parse_outbound(struct mhi_device_ctxt *mhi_dev_ctxt,
mhi_log(MHI_MSG_RAW, "Removing BB from head, chan %d\n", chan);
atomic_dec(&mhi_dev_ctxt->counters.outbound_acks);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
ret_val = ctxt_del_element(&mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
NULL);
BUG_ON(ret_val);
@@ -1018,16 +1072,8 @@ static int parse_inbound(struct mhi_device_ctxt *mhi_dev_ctxt,
ctxt_index_rp, ctxt_index_wp, chan);
BUG_ON(bb_index != ctxt_index_rp);
} else {
- /* Hardware Channel, no client registerered,
- drop data */
- recycle_trb_and_ring(mhi_dev_ctxt,
- &mhi_dev_ctxt->mhi_local_chan_ctxt[chan],
- MHI_RING_TYPE_XFER_RING,
- chan);
BUG();
- /* No bounce buffer to recycle as no user request
- * can be present.
- */
+
}
}
return 0;
@@ -1068,13 +1114,11 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
u32 nr_trb_to_parse;
u32 i = 0;
u32 ev_code;
+ struct mhi_ring *local_chan_ctxt;
trace_mhi_ev(event);
chan = MHI_EV_READ_CHID(EV_CHID, event);
- if (unlikely(!VALID_CHAN_NR(chan))) {
- mhi_log(MHI_MSG_ERROR, "Bad ring id.\n");
- return -EINVAL;
- }
+ local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
ev_code = MHI_EV_READ_CODE(EV_TRB_CODE, event);
client_handle = mhi_dev_ctxt->client_handle_list[chan];
client_handle->pkt_count++;
@@ -1095,10 +1139,7 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
dma_addr_t trb_data_loc;
u32 ieot_flag;
int ret_val;
- struct mhi_ring *local_chan_ctxt;
- local_chan_ctxt =
- &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
phy_ev_trb_loc = MHI_EV_READ_PTR(EV_PTR, event);
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
@@ -1120,6 +1161,7 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
mhi_log(MHI_MSG_CRITICAL,
"Failed to get nr available trbs ret: %d.\n",
ret_val);
+ panic("critical error");
return ret_val;
}
do {
@@ -1164,26 +1206,22 @@ int parse_xfer_event(struct mhi_device_ctxt *ctxt,
case MHI_EVENT_CC_OOB:
case MHI_EVENT_CC_DB_MODE:
{
- struct mhi_ring *chan_ctxt = NULL;
u64 db_value = 0;
- mhi_dev_ctxt->flags.uldl_enabled = 1;
- chan = MHI_EV_READ_CHID(EV_CHID, event);
- mhi_dev_ctxt->flags.db_mode[chan] = 1;
- chan_ctxt =
- &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
mhi_log(MHI_MSG_INFO, "DB_MODE/OOB Detected chan %d.\n", chan);
- if (chan_ctxt->wp != chan_ctxt->rp) {
+
+ local_chan_ctxt->db_mode.db_mode = 1;
+ if (local_chan_ctxt->wp != local_chan_ctxt->rp) {
db_value = mhi_v2p_addr(mhi_dev_ctxt,
- MHI_RING_TYPE_XFER_RING, chan,
- (uintptr_t) chan_ctxt->wp);
- mhi_process_db(mhi_dev_ctxt,
+ MHI_RING_TYPE_XFER_RING, chan,
+ (uintptr_t) local_chan_ctxt->wp);
+ local_chan_ctxt->db_mode.process_db(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.chan_db_addr, chan,
db_value);
}
client_handle = mhi_dev_ctxt->client_handle_list[chan];
- if (NULL != client_handle)
- result->transaction_status = -ENOTCONN;
+ if (client_handle)
+ result->transaction_status = -ENOTCONN;
break;
}
case MHI_EVENT_CC_BAD_TRE:
@@ -1216,6 +1254,9 @@ int recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
u64 db_value = 0;
void *removed_element = NULL;
void *added_element = NULL;
+ unsigned long flags;
+ struct mhi_ring *mhi_ring = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[ring_index];
ret_val = ctxt_del_element(ring, &removed_element);
@@ -1224,102 +1265,26 @@ int recycle_trb_and_ring(struct mhi_device_ctxt *mhi_dev_ctxt,
return ret_val;
}
ret_val = ctxt_add_element(ring, &added_element);
- if (0 != ret_val)
+ if (ret_val) {
mhi_log(MHI_MSG_ERROR, "Could not add element to ring\n");
- db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type, ring_index,
- (uintptr_t) ring->wp);
- if (0 != ret_val)
return ret_val;
- if (MHI_RING_TYPE_XFER_RING == ring_type) {
- union mhi_xfer_pkt *removed_xfer_pkt =
- (union mhi_xfer_pkt *)removed_element;
- union mhi_xfer_pkt *added_xfer_pkt =
- (union mhi_xfer_pkt *)added_element;
- added_xfer_pkt->data_tx_pkt =
- *(struct mhi_tx_pkt *)removed_xfer_pkt;
- } else if (MHI_RING_TYPE_EVENT_RING == ring_type) {
-
- spinlock_t *lock;
- unsigned long flags;
-
- if (ring_index >= mhi_dev_ctxt->mmio_info.nr_event_rings)
- return -ERANGE;
- lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
- spin_lock_irqsave(lock, flags);
- db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type, ring_index,
- (uintptr_t) ring->wp);
- mhi_log(MHI_MSG_INFO,
- "Updating ctxt, ring index %d\n", ring_index);
- mhi_update_ctxt(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.event_db_addr,
- ring_index, db_value);
- mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
- mhi_dev_ctxt->counters.ev_counter[ring_index]++;
- spin_unlock_irqrestore(lock, flags);
}
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
- /* Asserting Device Wake here, will imediately wake mdm */
- if ((MHI_STATE_M0 == mhi_dev_ctxt->mhi_state ||
- MHI_STATE_M1 == mhi_dev_ctxt->mhi_state) &&
- mhi_dev_ctxt->flags.link_up) {
- switch (ring_type) {
- case MHI_RING_TYPE_CMD_RING:
- {
- struct mutex *cmd_mutex = NULL;
-
- cmd_mutex =
- &mhi_dev_ctxt->
- mhi_cmd_mutex_list[PRIMARY_CMD_RING];
- mutex_lock(cmd_mutex);
- mhi_dev_ctxt->cmd_ring_order = 1;
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.cmd_db_addr,
- ring_index, db_value);
- mutex_unlock(cmd_mutex);
- break;
- }
- case MHI_RING_TYPE_EVENT_RING:
- {
- spinlock_t *lock = NULL;
- unsigned long flags = 0;
-
- lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[ring_index];
- spin_lock_irqsave(lock, flags);
- mhi_dev_ctxt->mhi_ev_db_order[ring_index] = 1;
- if ((mhi_dev_ctxt->counters.ev_counter[ring_index] %
- MHI_EV_DB_INTERVAL) == 0) {
- db_value = mhi_v2p_addr(mhi_dev_ctxt, ring_type,
- ring_index,
- (uintptr_t) ring->wp);
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.event_db_addr,
- ring_index, db_value);
- }
- spin_unlock_irqrestore(lock, flags);
- break;
- }
- case MHI_RING_TYPE_XFER_RING:
- {
- unsigned long flags = 0;
-
- spin_lock_irqsave(
- &mhi_dev_ctxt->db_write_lock[ring_index],
- flags);
- mhi_dev_ctxt->mhi_chan_db_order[ring_index] = 1;
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.chan_db_addr,
- ring_index, db_value);
- spin_unlock_irqrestore(
- &mhi_dev_ctxt->db_write_lock[ring_index],
- flags);
- break;
- }
- default:
- mhi_log(MHI_MSG_ERROR, "Bad ring type\n");
- }
- }
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
- return ret_val;
+
+ if (!MHI_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+ return -EACCES;
+
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+ db_value = mhi_v2p_addr(mhi_dev_ctxt,
+ ring_type,
+ ring_index,
+ (uintptr_t) ring->wp);
+ mhi_ring->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.event_db_addr,
+ ring_index, db_value);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+
+ return 0;
+
}
static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -1328,15 +1293,18 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
u32 chan = 0;
int ret_val = 0;
struct mhi_ring *local_chan_ctxt;
+ struct mhi_ring *ev_ring;
struct mhi_chan_ctxt *chan_ctxt;
+ struct mhi_event_ctxt *ev_ctxt = NULL;
struct mhi_client_handle *client_handle = NULL;
- struct mutex *chan_mutex;
- int pending_el = 0;
+ int pending_el = 0, i;
struct mhi_ring *bb_ctxt;
+ unsigned long flags;
+ union mhi_event_pkt *local_rp = NULL;
+ union mhi_event_pkt *device_rp = NULL;
MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
-
if (!VALID_CHAN_NR(chan)) {
mhi_log(MHI_MSG_ERROR,
"Bad channel number for CCE\n");
@@ -1344,13 +1312,41 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
}
bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
- chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan];
- mutex_lock(chan_mutex);
client_handle = mhi_dev_ctxt->client_handle_list[chan];
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
chan_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[chan];
+ ev_ring = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[chan_ctxt->mhi_event_ring_index];
+ ev_ctxt = &mhi_dev_ctxt->
+ dev_space.ring_ctxt.ec_list[chan_ctxt->mhi_event_ring_index];
mhi_log(MHI_MSG_INFO, "Processed cmd reset event\n");
+ /* Clear all stale events related to Channel */
+ spin_lock_irqsave(&ev_ring->ring_lock, flags);
+ device_rp = (union mhi_event_pkt *)mhi_p2v_addr(
+ mhi_dev_ctxt,
+ MHI_RING_TYPE_EVENT_RING,
+ chan_ctxt->mhi_event_ring_index,
+ ev_ctxt->mhi_event_read_ptr);
+ local_rp = (union mhi_event_pkt *)ev_ring->rp;
+ while (device_rp != local_rp) {
+ if (MHI_TRB_READ_INFO(EV_TRB_TYPE, local_rp) ==
+ MHI_PKT_TYPE_TX_EVENT) {
+ u32 ev_chan = MHI_EV_READ_CHID(EV_CHID, local_rp);
+
+ /* Mark as stale event */
+ if (ev_chan == chan)
+ MHI_TRB_SET_INFO(EV_TRB_TYPE,
+ local_rp,
+ MHI_PKT_TYPE_STALE_EVENT);
+ }
+
+ local_rp++;
+ if (local_rp == (ev_ring->base + ev_ring->len))
+ local_rp = ev_ring->base;
+ }
+ spin_unlock_irqrestore(&ev_ring->ring_lock, flags);
+
/*
* If outbound elements are pending, they must be cleared since
* they will never be acked after a channel reset.
@@ -1365,6 +1361,18 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
chan, pending_el);
atomic_sub(pending_el, &mhi_dev_ctxt->counters.outbound_acks);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ for (i = 0; i < pending_el; i++)
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+ for (i = 0; i < pending_el; i++) {
+ pm_runtime_put_noidle(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ }
/* Reset the local channel context */
local_chan_ctxt->rp = local_chan_ctxt->base;
@@ -1372,37 +1380,17 @@ static int reset_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
local_chan_ctxt->ack_rp = local_chan_ctxt->base;
/* Reset the mhi channel context */
- chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED;
+ chan_ctxt->chstate = MHI_CHAN_STATE_DISABLED;
chan_ctxt->mhi_trb_read_ptr = chan_ctxt->mhi_trb_ring_base_addr;
chan_ctxt->mhi_trb_write_ptr = chan_ctxt->mhi_trb_ring_base_addr;
mhi_log(MHI_MSG_INFO, "Cleaning up BB list\n");
reset_bb_ctxt(mhi_dev_ctxt, bb_ctxt);
- mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_NOT_PENDING;
- mutex_unlock(chan_mutex);
mhi_log(MHI_MSG_INFO, "Reset complete.\n");
- if (NULL != client_handle)
- complete(&client_handle->chan_reset_complete);
return ret_val;
}
-static int start_chan_cmd(struct mhi_device_ctxt *mhi_dev_ctxt,
- union mhi_cmd_pkt *cmd_pkt)
-{
- u32 chan;
-
- MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
- if (!VALID_CHAN_NR(chan))
- mhi_log(MHI_MSG_ERROR, "Bad chan: 0x%x\n", chan);
- mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] =
- MHI_CMD_NOT_PENDING;
- mhi_log(MHI_MSG_INFO, "Processed START CMD chan %d\n", chan);
- if (NULL != mhi_dev_ctxt->client_handle_list[chan])
- complete(
- &mhi_dev_ctxt->client_handle_list[chan]->chan_open_complete);
- return 0;
-}
enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
union mhi_event_pkt *ev_pkt,
union mhi_cmd_pkt **cmd_pkt,
@@ -1421,68 +1409,13 @@ enum MHI_EVENT_CCS get_cmd_pkt(struct mhi_device_ctxt *mhi_dev_ctxt,
return MHI_EV_READ_CODE(EV_TRB_CODE, ev_pkt);
}
-int parse_cmd_event(struct mhi_device_ctxt *mhi_dev_ctxt,
- union mhi_event_pkt *ev_pkt, u32 event_index)
-{
- int ret_val = 0;
- union mhi_cmd_pkt *cmd_pkt = NULL;
- u32 event_code = 0;
-
- event_code = get_cmd_pkt(mhi_dev_ctxt, ev_pkt, &cmd_pkt, event_index);
- switch (event_code) {
- case MHI_EVENT_CC_SUCCESS:
- {
- u32 chan = 0;
-
- MHI_TRB_GET_INFO(CMD_TRB_CHID, cmd_pkt, chan);
- switch (MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt)) {
- mhi_log(MHI_MSG_INFO, "CCE chan %d cmd %d\n", chan,
- MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt));
- case MHI_PKT_TYPE_RESET_CHAN_CMD:
- ret_val = reset_chan_cmd(mhi_dev_ctxt, cmd_pkt);
- if (ret_val)
- mhi_log(MHI_MSG_INFO,
- "Failed to process reset cmd ret %d\n",
- ret_val);
- break;
- case MHI_PKT_TYPE_STOP_CHAN_CMD:
- if (ret_val) {
- mhi_log(MHI_MSG_INFO,
- "Failed to set chan state\n");
- return ret_val;
- }
- break;
- case MHI_PKT_TYPE_START_CHAN_CMD:
- ret_val = start_chan_cmd(mhi_dev_ctxt, cmd_pkt);
- if (ret_val)
- mhi_log(MHI_MSG_INFO,
- "Failed to process reset cmd\n");
- break;
- default:
- mhi_log(MHI_MSG_INFO,
- "Bad cmd type 0x%x\n",
- MHI_TRB_READ_INFO(CMD_TRB_TYPE, cmd_pkt));
- break;
- }
- mhi_dev_ctxt->mhi_chan_pend_cmd_ack[chan] = MHI_CMD_NOT_PENDING;
- atomic_dec(&mhi_dev_ctxt->counters.outbound_acks);
- break;
- }
- default:
- mhi_log(MHI_MSG_INFO, "Unhandled mhi completion code\n");
- break;
- }
- ctxt_del_element(mhi_dev_ctxt->mhi_local_cmd_ctxt, NULL);
- return 0;
-}
-
int mhi_poll_inbound(struct mhi_client_handle *client_handle,
struct mhi_result *result)
{
struct mhi_tx_pkt *pending_trb = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
struct mhi_ring *local_chan_ctxt = NULL;
- struct mutex *chan_mutex = NULL;
+ struct mhi_chan_cfg *cfg;
struct mhi_ring *bb_ctxt = NULL;
struct mhi_buf_info *bb = NULL;
int chan = 0, r = 0;
@@ -1495,10 +1428,10 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
chan = client_handle->chan_info.chan_nr;
local_chan_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
- chan_mutex = &mhi_dev_ctxt->mhi_chan_mutex[chan];
+ cfg = &mhi_dev_ctxt->mhi_chan_cfg[chan];
bb_ctxt = &mhi_dev_ctxt->chan_bb_list[chan];
- mutex_lock(chan_mutex);
+ mutex_lock(&cfg->chan_lock);
if (bb_ctxt->rp != bb_ctxt->ack_rp) {
pending_trb = (struct mhi_tx_pkt *)(local_chan_ctxt->ack_rp);
result->flags = pending_trb->info;
@@ -1524,7 +1457,7 @@ int mhi_poll_inbound(struct mhi_client_handle *client_handle,
result->bytes_xferd = 0;
r = -ENODATA;
}
- mutex_unlock(chan_mutex);
+ mutex_unlock(&cfg->chan_lock);
mhi_log(MHI_MSG_VERBOSE,
"Exited Result: Buf addr: 0x%p Bytes xfed 0x%zx chan %d\n",
result->buf_addr, result->bytes_xferd, chan);
@@ -1581,49 +1514,80 @@ int mhi_get_epid(struct mhi_client_handle *client_handle)
return MHI_EPID;
}
-int mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
+/*
+ * mhi_assert_device_wake - Set WAKE_DB register
+ * force_set - if true, will set bit regardless of counts
+ */
+void mhi_assert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt,
+ bool force_set)
{
- if ((mhi_dev_ctxt->mmio_info.chan_db_addr) &&
- (mhi_dev_ctxt->flags.link_up)) {
- mhi_log(MHI_MSG_VERBOSE, "LPM %d\n",
- mhi_dev_ctxt->enable_lpm);
- atomic_set(&mhi_dev_ctxt->flags.device_wake, 1);
+ unsigned long flags;
+
+ if (unlikely(force_set)) {
+ spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags);
+ atomic_inc(&mhi_dev_ctxt->counters.device_wake);
+ mhi_write_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.chan_db_addr,
+ MHI_DEV_WAKE_DB, 1);
+ spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags);
+ } else {
+ if (likely(atomic_add_unless(&mhi_dev_ctxt->
+ counters.device_wake,
+ 1,
+ 0)))
+ return;
+
+ spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags);
+ if ((atomic_inc_return(&mhi_dev_ctxt->counters.device_wake)
+ == 1) &&
+ MHI_WAKE_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
mhi_write_db(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.chan_db_addr,
- MHI_DEV_WAKE_DB, 1);
- mhi_dev_ctxt->device_wake_asserted = 1;
- } else {
- mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm);
+ MHI_DEV_WAKE_DB,
+ 1);
+ }
+ spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags);
}
- return 0;
}
-inline int mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
+void mhi_deassert_device_wake(struct mhi_device_ctxt *mhi_dev_ctxt)
{
- if ((mhi_dev_ctxt->enable_lpm) &&
- (atomic_read(&mhi_dev_ctxt->flags.device_wake)) &&
- (mhi_dev_ctxt->mmio_info.chan_db_addr != NULL) &&
- (mhi_dev_ctxt->flags.link_up)) {
- mhi_log(MHI_MSG_VERBOSE, "LPM %d\n", mhi_dev_ctxt->enable_lpm);
- atomic_set(&mhi_dev_ctxt->flags.device_wake, 0);
- mhi_write_db(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.chan_db_addr,
- MHI_DEV_WAKE_DB, 0);
- mhi_dev_ctxt->device_wake_asserted = 0;
- } else {
- mhi_log(MHI_MSG_VERBOSE, "LPM %d DEV_WAKE %d link %d\n",
- mhi_dev_ctxt->enable_lpm,
- atomic_read(&mhi_dev_ctxt->flags.device_wake),
- mhi_dev_ctxt->flags.link_up);
- }
- return 0;
+ unsigned long flags;
+
+ WARN_ON(atomic_read(&mhi_dev_ctxt->counters.device_wake) == 0);
+
+ if (likely(atomic_add_unless
+ (&mhi_dev_ctxt->counters.device_wake, -1, 1)))
+ return;
+
+ spin_lock_irqsave(&mhi_dev_ctxt->dev_wake_lock, flags);
+ if ((atomic_dec_return(&mhi_dev_ctxt->counters.device_wake) == 0) &&
+ MHI_WAKE_DB_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state))
+ mhi_write_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.chan_db_addr,
+ MHI_DEV_WAKE_DB,
+ 0);
+ spin_unlock_irqrestore(&mhi_dev_ctxt->dev_wake_lock, flags);
}
-int mhi_set_lpm(struct mhi_client_handle *client_handle, int enable_lpm)
+int mhi_set_lpm(struct mhi_client_handle *client_handle, bool enable_lpm)
{
- mhi_log(MHI_MSG_VERBOSE, "LPM Set %d\n", enable_lpm);
- client_handle->mhi_dev_ctxt->enable_lpm = enable_lpm ? 1 : 0;
+ struct mhi_device_ctxt *mhi_dev_ctxt = client_handle->mhi_dev_ctxt;
+ unsigned long flags;
+
+ read_lock_irqsave(&mhi_dev_ctxt->pm_xfer_lock, flags);
+
+ /* Disable low power mode by asserting Wake */
+ if (enable_lpm == false)
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ else
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
+
return 0;
}
+EXPORT_SYMBOL(mhi_set_lpm);
int mhi_set_bus_request(struct mhi_device_ctxt *mhi_dev_ctxt,
int index)
@@ -1648,10 +1612,57 @@ int mhi_deregister_channel(struct mhi_client_handle
}
EXPORT_SYMBOL(mhi_deregister_channel);
+void mhi_process_db_brstmode(struct mhi_device_ctxt *mhi_dev_ctxt,
+ void __iomem *io_addr,
+ uintptr_t chan,
+ u32 val)
+{
+ struct mhi_ring *ring_ctxt =
+ &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+
+ if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr)
+ ring_ctxt = &mhi_dev_ctxt->
+ mhi_local_chan_ctxt[chan];
+ else
+ ring_ctxt = &mhi_dev_ctxt->
+ mhi_local_event_ctxt[chan];
+
+ mhi_log(MHI_MSG_VERBOSE,
+ "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+ io_addr, chan, val);
+
+ mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
+
+ if (ring_ctxt->db_mode.db_mode) {
+ mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
+ ring_ctxt->db_mode.db_mode = 0;
+ } else {
+ mhi_log(MHI_MSG_INFO,
+ "Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n",
+ chan,
+ ring_ctxt->db_mode.brstmode,
+ ring_ctxt->db_mode.db_mode);
+ }
+}
+
+void mhi_process_db_brstmode_disable(struct mhi_device_ctxt *mhi_dev_ctxt,
+ void __iomem *io_addr,
+ uintptr_t chan,
+ u32 val)
+{
+ mhi_log(MHI_MSG_VERBOSE,
+ "db.set addr: %p io_offset 0x%lx val:0x%x\n",
+ io_addr, chan, val);
+
+ mhi_update_ctxt(mhi_dev_ctxt, io_addr, chan, val);
+ mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
+}
+
void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
void __iomem *io_addr,
uintptr_t chan, u32 val)
{
+
mhi_log(MHI_MSG_VERBOSE,
"db.set addr: %p io_offset 0x%lx val:0x%x\n",
io_addr, chan, val);
@@ -1660,28 +1671,29 @@ void mhi_process_db(struct mhi_device_ctxt *mhi_dev_ctxt,
/* Channel Doorbell and Polling Mode Disabled or Software Channel*/
if (io_addr == mhi_dev_ctxt->mmio_info.chan_db_addr) {
+ struct mhi_ring *chan_ctxt =
+ &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
if (!(IS_HARDWARE_CHANNEL(chan) &&
- mhi_dev_ctxt->flags.uldl_enabled &&
- !mhi_dev_ctxt->flags.db_mode[chan])) {
+ !chan_ctxt->db_mode.db_mode)) {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
- mhi_dev_ctxt->flags.db_mode[chan] = 0;
+ chan_ctxt->db_mode.db_mode = 0;
} else {
mhi_log(MHI_MSG_INFO,
- "Not ringing xfer db, chan %ld, ul_dl %d db_mode %d\n",
- chan, mhi_dev_ctxt->flags.uldl_enabled,
- mhi_dev_ctxt->flags.db_mode[chan]);
+ "Not ringing xfer db, chan %ld, brstmode %d db_mode %d\n",
+ chan, chan_ctxt->db_mode.brstmode,
+ chan_ctxt->db_mode.db_mode);
}
/* Event Doorbell and Polling mode Disabled */
} else if (io_addr == mhi_dev_ctxt->mmio_info.event_db_addr) {
- /* Only ring for software channel */
- if (IS_SW_EV_RING(mhi_dev_ctxt, chan) ||
- !mhi_dev_ctxt->flags.uldl_enabled) {
+ struct mhi_ring *ev_ctxt =
+ &mhi_dev_ctxt->mhi_local_event_ctxt[chan];
+ /* Only ring for software channel or db mode*/
+ if (!(IS_HW_EV_RING(mhi_dev_ctxt, chan) &&
+ !ev_ctxt->db_mode.db_mode)) {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
- mhi_dev_ctxt->flags.db_mode[chan] = 0;
}
} else {
mhi_write_db(mhi_dev_ctxt, io_addr, chan, val);
- mhi_dev_ctxt->flags.db_mode[chan] = 0;
}
}
diff --git a/drivers/platform/msm/mhi/mhi_mmio_ops.c b/drivers/platform/msm/mhi/mhi_mmio_ops.c
index ddf18e466cd3..b4447378683e 100644
--- a/drivers/platform/msm/mhi/mhi_mmio_ops.c
+++ b/drivers/platform/msm/mhi/mhi_mmio_ops.c
@@ -9,6 +9,19 @@
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
+#include <linux/completion.h>
+#include <linux/of_device.h>
+#include <linux/of_platform.h>
+#include <linux/of_gpio.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/msm-bus.h>
+#include <linux/cpu.h>
+#include <linux/kthread.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/completion.h>
+#include <linux/pm_runtime.h>
#include "mhi_sys.h"
#include "mhi_hwio.h"
#include "mhi.h"
@@ -17,25 +30,40 @@ int mhi_test_for_device_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 pcie_word_val = 0;
u32 expiry_counter;
+ unsigned long flags;
+ rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
mhi_log(MHI_MSG_INFO, "Waiting for MMIO RESET bit to be cleared.\n");
+ read_lock_irqsave(pm_xfer_lock, flags);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_irqrestore(pm_xfer_lock, flags);
+ return -EIO;
+ }
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
- MHISTATUS);
+ MHISTATUS);
MHI_READ_FIELD(pcie_word_val,
MHICTRL_RESET_MASK,
MHICTRL_RESET_SHIFT);
+ read_unlock_irqrestore(&mhi_dev_ctxt->pm_xfer_lock, flags);
if (pcie_word_val == 0xFFFFFFFF)
return -ENOTCONN;
+
while (MHI_STATE_RESET != pcie_word_val && expiry_counter < 100) {
expiry_counter++;
mhi_log(MHI_MSG_ERROR,
"Device is not RESET, sleeping and retrying.\n");
msleep(MHI_READY_STATUS_TIMEOUT_MS);
+ read_lock_irqsave(pm_xfer_lock, flags);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_irqrestore(pm_xfer_lock, flags);
+ return -EIO;
+ }
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
MHICTRL);
MHI_READ_FIELD(pcie_word_val,
MHICTRL_RESET_MASK,
MHICTRL_RESET_SHIFT);
+ read_unlock_irqrestore(pm_xfer_lock, flags);
}
if (MHI_STATE_READY != pcie_word_val)
@@ -47,15 +75,23 @@ int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt)
{
u32 pcie_word_val = 0;
u32 expiry_counter;
+ unsigned long flags;
+ rwlock_t *pm_xfer_lock = &mhi_dev_ctxt->pm_xfer_lock;
mhi_log(MHI_MSG_INFO, "Waiting for MMIO Ready bit to be set\n");
+ read_lock_irqsave(pm_xfer_lock, flags);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_irqrestore(pm_xfer_lock, flags);
+ return -EIO;
+ }
/* Read MMIO and poll for READY bit to be set */
pcie_word_val = mhi_reg_read(
mhi_dev_ctxt->mmio_info.mmio_addr, MHISTATUS);
MHI_READ_FIELD(pcie_word_val,
MHISTATUS_READY_MASK,
MHISTATUS_READY_SHIFT);
+ read_unlock_irqrestore(pm_xfer_lock, flags);
if (pcie_word_val == 0xFFFFFFFF)
return -ENOTCONN;
@@ -65,10 +101,16 @@ int mhi_test_for_device_ready(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_log(MHI_MSG_ERROR,
"Device is not ready, sleeping and retrying.\n");
msleep(MHI_READY_STATUS_TIMEOUT_MS);
+ read_lock_irqsave(pm_xfer_lock, flags);
+ if (!MHI_REG_ACCESS_VALID(mhi_dev_ctxt->mhi_pm_state)) {
+ read_unlock_irqrestore(pm_xfer_lock, flags);
+ return -EIO;
+ }
pcie_word_val = mhi_reg_read(mhi_dev_ctxt->mmio_info.mmio_addr,
MHISTATUS);
MHI_READ_FIELD(pcie_word_val,
MHISTATUS_READY_MASK, MHISTATUS_READY_SHIFT);
+ read_unlock_irqrestore(pm_xfer_lock, flags);
}
if (pcie_word_val != MHI_STATE_READY)
@@ -102,21 +144,20 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_dev_ctxt->dev_props->mhi_ver = mhi_reg_read(
mhi_dev_ctxt->mmio_info.mmio_addr, MHIVER);
if (MHI_VERSION != mhi_dev_ctxt->dev_props->mhi_ver) {
- mhi_log(MHI_MSG_CRITICAL, "Bad MMIO version, 0x%x\n",
- mhi_dev_ctxt->dev_props->mhi_ver);
- if (mhi_dev_ctxt->dev_props->mhi_ver == 0xFFFFFFFF)
- ret_val = mhi_wait_for_mdm(mhi_dev_ctxt);
- if (ret_val)
+ mhi_log(MHI_MSG_CRITICAL,
+ "Bad MMIO version, 0x%x\n",
+ mhi_dev_ctxt->dev_props->mhi_ver);
return ret_val;
}
+
/* Enable the channels */
for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
struct mhi_chan_ctxt *chan_ctxt =
&mhi_dev_ctxt->dev_space.ring_ctxt.cc_list[i];
if (VALID_CHAN_NR(i))
- chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_ENABLED;
+ chan_ctxt->chstate = MHI_CHAN_STATE_ENABLED;
else
- chan_ctxt->mhi_chan_state = MHI_CHAN_STATE_DISABLED;
+ chan_ctxt->chstate = MHI_CHAN_STATE_DISABLED;
}
mhi_log(MHI_MSG_INFO,
"Read back MMIO Ready bit successfully. Moving on..\n");
@@ -144,6 +185,11 @@ int mhi_init_mmio(struct mhi_device_ctxt *mhi_dev_ctxt)
MHICFG,
MHICFG_NER_MASK, MHICFG_NER_SHIFT,
mhi_dev_ctxt->mmio_info.nr_event_rings);
+ mhi_reg_write_field(mhi_dev_ctxt, mhi_dev_ctxt->mmio_info.mmio_addr,
+ MHICFG,
+ MHICFG_NHWER_MASK,
+ MHICFG_NHWER_SHIFT,
+ mhi_dev_ctxt->mmio_info.nr_hw_event_rings);
pcie_dword_val = mhi_dev_ctxt->dev_space.ring_ctxt.dma_cc_list;
pcie_word_val = HIGH_WORD(pcie_dword_val);
diff --git a/drivers/platform/msm/mhi/mhi_pm.c b/drivers/platform/msm/mhi/mhi_pm.c
index a928c05b805a..2f44601e225e 100644
--- a/drivers/platform/msm/mhi/mhi_pm.c
+++ b/drivers/platform/msm/mhi/mhi_pm.c
@@ -22,16 +22,12 @@
#include "mhi_hwio.h"
/* Write only sysfs attributes */
-static DEVICE_ATTR(MHI_M3, S_IWUSR, NULL, sysfs_init_m3);
static DEVICE_ATTR(MHI_M0, S_IWUSR, NULL, sysfs_init_m0);
-static DEVICE_ATTR(MHI_RESET, S_IWUSR, NULL, sysfs_init_mhi_reset);
/* Read only sysfs attributes */
static struct attribute *mhi_attributes[] = {
- &dev_attr_MHI_M3.attr,
&dev_attr_MHI_M0.attr,
- &dev_attr_MHI_RESET.attr,
NULL,
};
@@ -42,21 +38,20 @@ static struct attribute_group mhi_attribute_group = {
int mhi_pci_suspend(struct device *dev)
{
int r = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
- if (NULL == mhi_dev_ctxt)
- return -EINVAL;
- mhi_log(MHI_MSG_INFO, "Entered, MHI state %d\n",
- mhi_dev_ctxt->mhi_state);
- atomic_set(&mhi_dev_ctxt->flags.pending_resume, 1);
+ mhi_log(MHI_MSG_INFO, "Entered\n");
- r = mhi_initiate_m3(mhi_dev_ctxt);
+ /* if rpm status still active then force suspend */
+ if (!pm_runtime_status_suspended(dev)) {
+ r = mhi_runtime_suspend(dev);
+ if (r)
+ return r;
+ }
- if (!r)
- return r;
+ pm_runtime_set_suspended(dev);
+ pm_runtime_disable(dev);
- atomic_set(&mhi_dev_ctxt->flags.pending_resume, 0);
- mhi_log(MHI_MSG_INFO, "Exited, ret %d\n", r);
+ mhi_log(MHI_MSG_INFO, "Exit\n");
return r;
}
@@ -65,61 +60,150 @@ int mhi_runtime_suspend(struct device *dev)
int r = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
- mhi_log(MHI_MSG_INFO, "Entered\n");
- r = mhi_initiate_m3(mhi_dev_ctxt);
- if (r)
- mhi_log(MHI_MSG_ERROR, "Init M3 failed ret %d\n", r);
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+ mhi_log(MHI_MSG_INFO, "Entered with State:0x%x %s\n",
+ mhi_dev_ctxt->mhi_pm_state,
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+ /* Link is already disabled */
+ if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_DISABLE ||
+ mhi_dev_ctxt->mhi_pm_state == MHI_PM_M3) {
+ mhi_log(MHI_MSG_INFO, "Already in active state, exiting\n");
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ return 0;
+ }
+
+ if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) {
+ mhi_log(MHI_MSG_INFO, "Busy, Aborting Runtime Suspend\n");
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ return -EBUSY;
+ }
+
+ mhi_assert_device_wake(mhi_dev_ctxt, false);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
+ msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
+ if (!r) {
+ mhi_log(MHI_MSG_CRITICAL,
+ "Failed to get M0||M1 event, timeout, current state:%s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ r = -EIO;
+ goto rpm_suspend_exit;
+ }
+
+ mhi_log(MHI_MSG_INFO, "Allowing M3 State\n");
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_ENTER;
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_log(MHI_MSG_INFO,
+ "Waiting for M3 completion.\n");
+ r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
+ msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
+ if (!r) {
+ mhi_log(MHI_MSG_CRITICAL,
+ "Failed to get M3 event, timeout, current state:%s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ r = -EIO;
+ goto rpm_suspend_exit;
+ }
+
+ r = mhi_turn_off_pcie_link(mhi_dev_ctxt);
+ if (r) {
+ mhi_log(MHI_MSG_ERROR,
+ "Failed to Turn off link ret:%d\n", r);
+ }
- pm_runtime_mark_last_busy(dev);
+rpm_suspend_exit:
mhi_log(MHI_MSG_INFO, "Exited\n");
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
return r;
}
+int mhi_runtime_idle(struct device *dev)
+{
+ mhi_log(MHI_MSG_INFO, "Entered returning -EBUSY\n");
+
+ /*
+ * RPM framework during runtime resume always calls
+ * rpm_idle to see if device ready to suspend.
+ * If dev.power usage_count count is 0, rpm fw will call
+ * rpm_idle cb to see if device is ready to suspend.
+ * if cb return 0, or cb not defined the framework will
+ * assume device driver is ready to suspend;
+ * therefore, fw will schedule runtime suspend.
+ * In MHI power management, MHI host shall go to
+ * runtime suspend only after entering MHI State M2, even if
+ * usage count is 0. Return -EBUSY to disable automatic suspend.
+ */
+ return -EBUSY;
+}
+
int mhi_runtime_resume(struct device *dev)
{
int r = 0;
struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
- mhi_log(MHI_MSG_INFO, "Entered\n");
- r = mhi_initiate_m0(mhi_dev_ctxt);
- if (r)
- mhi_log(MHI_MSG_ERROR, "Init M0 failed ret %d\n", r);
- pm_runtime_mark_last_busy(dev);
- mhi_log(MHI_MSG_INFO, "Exited\n");
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ WARN_ON(mhi_dev_ctxt->mhi_pm_state != MHI_PM_M3);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+ /* turn on link */
+ r = mhi_turn_on_pcie_link(mhi_dev_ctxt);
+ if (r) {
+ mhi_log(MHI_MSG_CRITICAL,
+ "Failed to resume link\n");
+ goto rpm_resume_exit;
+ }
+
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3_EXIT;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
+ /* Set and wait for M0 Event */
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ r = wait_event_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
+ mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
+ msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
+ if (!r) {
+ mhi_log(MHI_MSG_ERROR,
+ "Failed to get M0 event, timeout\n");
+ r = -EIO;
+ goto rpm_resume_exit;
+ }
+ r = 0; /* no errors */
+
+rpm_resume_exit:
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ mhi_log(MHI_MSG_INFO, "Exited with :%d\n", r);
return r;
}
int mhi_pci_resume(struct device *dev)
{
int r = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt = dev->platform_data;
- r = mhi_initiate_m0(mhi_dev_ctxt);
- if (r)
- goto exit;
- r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m0_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
- msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
- switch (r) {
- case 0:
- mhi_log(MHI_MSG_CRITICAL,
- "Timeout: No M0 event after %d ms\n",
- MHI_MAX_SUSPEND_TIMEOUT);
- mhi_dev_ctxt->counters.m0_event_timeouts++;
- r = -ETIME;
- break;
- case -ERESTARTSYS:
+ r = mhi_runtime_resume(dev);
+ if (r) {
mhi_log(MHI_MSG_CRITICAL,
- "Going Down...\n");
- break;
- default:
- mhi_log(MHI_MSG_INFO,
- "Wait complete state: %d\n", mhi_dev_ctxt->mhi_state);
- r = 0;
+ "Failed to resume link\n");
+ } else {
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
}
-exit:
- atomic_set(&mhi_dev_ctxt->flags.pending_resume, 0);
+
return r;
}
@@ -133,57 +217,15 @@ void mhi_rem_pm_sysfs(struct device *dev)
return sysfs_remove_group(&dev->kobj, &mhi_attribute_group);
}
-ssize_t sysfs_init_m3(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- int r = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
- r = mhi_initiate_m3(mhi_dev_ctxt);
- if (r) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to suspend %d\n", r);
- return r;
- }
- r = mhi_turn_off_pcie_link(mhi_dev_ctxt);
- if (r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to turn off link ret %d\n", r);
-
- return count;
-}
-ssize_t sysfs_init_mhi_reset(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
- int r = 0;
-
- mhi_log(MHI_MSG_INFO, "Triggering MHI Reset.\n");
- r = mhi_trigger_reset(mhi_dev_ctxt);
- if (r != 0)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to trigger MHI RESET ret %d\n",
- r);
- else
- mhi_log(MHI_MSG_INFO, "Triggered! MHI RESET\n");
- return count;
-}
ssize_t sysfs_init_m0(struct device *dev, struct device_attribute *attr,
const char *buf, size_t count)
{
struct mhi_device_ctxt *mhi_dev_ctxt =
&mhi_devices.device_list[0].mhi_ctxt;
- if (0 != mhi_turn_on_pcie_link(mhi_dev_ctxt)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to resume link\n");
- return count;
- }
- mhi_initiate_m0(mhi_dev_ctxt);
- mhi_log(MHI_MSG_CRITICAL,
- "Current mhi_state = 0x%x\n",
- mhi_dev_ctxt->mhi_state);
+ pm_runtime_get(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
return count;
}
@@ -194,35 +236,42 @@ int mhi_turn_off_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
mhi_log(MHI_MSG_INFO, "Entered...\n");
pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
- mutex_lock(&mhi_dev_ctxt->mhi_link_state);
+
if (0 == mhi_dev_ctxt->flags.link_up) {
mhi_log(MHI_MSG_CRITICAL,
"Link already marked as down, nothing to do\n");
goto exit;
}
- /* Disable shadow to avoid restoring D3 hot struct device */
- r = msm_pcie_shadow_control(mhi_dev_ctxt->dev_info->pcie_device, 0);
- if (r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to stop shadow config space: %d\n", r);
- r = pci_set_power_state(mhi_dev_ctxt->dev_info->pcie_device, PCI_D3hot);
+ r = pci_save_state(pcie_dev);
if (r) {
mhi_log(MHI_MSG_CRITICAL,
- "Failed to set pcie power state to D3 hotret: %x\n", r);
- goto exit;
+ "Failed to save pcie state ret: %d\n",
+ r);
}
+ mhi_dev_ctxt->dev_props->pcie_state = pci_store_saved_state(pcie_dev);
+ pci_disable_device(pcie_dev);
+ r = pci_set_power_state(pcie_dev, PCI_D3hot);
+ if (r) {
+ mhi_log(MHI_MSG_CRITICAL,
+ "Failed to set pcie power state to D3 hot ret: %d\n",
+ r);
+ }
+
r = msm_pcie_pm_control(MSM_PCIE_SUSPEND,
- mhi_dev_ctxt->dev_info->pcie_device->bus->number,
- mhi_dev_ctxt->dev_info->pcie_device,
+ pcie_dev->bus->number,
+ pcie_dev,
NULL,
0);
if (r)
mhi_log(MHI_MSG_CRITICAL,
- "Failed to suspend pcie bus ret 0x%x\n", r);
+ "Failed to suspend pcie bus ret 0x%x\n", r);
+
+ r = mhi_set_bus_request(mhi_dev_ctxt, 0);
+ if (r)
+ mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
mhi_dev_ctxt->flags.link_up = 0;
exit:
- mutex_unlock(&mhi_dev_ctxt->mhi_link_state);
mhi_log(MHI_MSG_INFO, "Exited...\n");
return 0;
}
@@ -234,37 +283,40 @@ int mhi_turn_on_pcie_link(struct mhi_device_ctxt *mhi_dev_ctxt)
pcie_dev = mhi_dev_ctxt->dev_info->pcie_device;
- mutex_lock(&mhi_dev_ctxt->mhi_link_state);
mhi_log(MHI_MSG_INFO, "Entered...\n");
if (mhi_dev_ctxt->flags.link_up)
goto exit;
+
+ r = mhi_set_bus_request(mhi_dev_ctxt, 1);
+ if (r)
+ mhi_log(MHI_MSG_CRITICAL,
+ "Could not set bus frequency ret: %d\n",
+ r);
+
r = msm_pcie_pm_control(MSM_PCIE_RESUME,
- mhi_dev_ctxt->dev_info->pcie_device->bus->number,
- mhi_dev_ctxt->dev_info->pcie_device,
- NULL, 0);
+ pcie_dev->bus->number,
+ pcie_dev,
+ NULL,
+ 0);
if (r) {
mhi_log(MHI_MSG_CRITICAL,
"Failed to resume pcie bus ret %d\n", r);
goto exit;
}
- r = pci_set_power_state(mhi_dev_ctxt->dev_info->pcie_device,
- PCI_D0);
- if (r) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to load stored state %d\n", r);
- goto exit;
- }
- r = msm_pcie_recover_config(mhi_dev_ctxt->dev_info->pcie_device);
- if (r) {
+ r = pci_enable_device(pcie_dev);
+ if (r)
mhi_log(MHI_MSG_CRITICAL,
- "Failed to Recover config space ret: %d\n", r);
- goto exit;
- }
+ "Failed to enable device ret:%d\n",
+ r);
+
+ pci_load_and_free_saved_state(pcie_dev,
+ &mhi_dev_ctxt->dev_props->pcie_state);
+ pci_restore_state(pcie_dev);
+ pci_set_master(pcie_dev);
+
mhi_dev_ctxt->flags.link_up = 1;
exit:
- mutex_unlock(&mhi_dev_ctxt->mhi_link_state);
mhi_log(MHI_MSG_INFO, "Exited...\n");
return r;
}
-
diff --git a/drivers/platform/msm/mhi/mhi_ring_ops.c b/drivers/platform/msm/mhi/mhi_ring_ops.c
index e5a6dd51f51a..07d0098a1b61 100644
--- a/drivers/platform/msm/mhi/mhi_ring_ops.c
+++ b/drivers/platform/msm/mhi/mhi_ring_ops.c
@@ -48,6 +48,9 @@ static int add_element(struct mhi_ring *ring, void **rp,
*assigned_addr = (char *)ring->wp;
*wp = (void *)(((d_wp + 1) % ring_size) * ring->el_size +
(uintptr_t)ring->base);
+
+ /* force update visible to other cores */
+ smp_wmb();
return 0;
}
@@ -101,6 +104,9 @@ int delete_element(struct mhi_ring *ring, void **rp,
*rp = (void *)(((d_rp + 1) % ring_size) * ring->el_size +
(uintptr_t)ring->base);
+
+ /* force update visible to other cores */
+ smp_wmb();
return 0;
}
@@ -108,6 +114,7 @@ int mhi_get_free_desc(struct mhi_client_handle *client_handle)
{
u32 chan;
struct mhi_device_ctxt *ctxt;
+ int bb_ring, ch_ring;
if (!client_handle || MHI_HANDLE_MAGIC != client_handle->magic ||
!client_handle->mhi_dev_ctxt)
@@ -115,7 +122,10 @@ int mhi_get_free_desc(struct mhi_client_handle *client_handle)
ctxt = client_handle->mhi_dev_ctxt;
chan = client_handle->chan_info.chan_nr;
- return get_nr_avail_ring_elements(&ctxt->mhi_local_chan_ctxt[chan]);
+ bb_ring = get_nr_avail_ring_elements(&ctxt->chan_bb_list[chan]);
+ ch_ring = get_nr_avail_ring_elements(&ctxt->mhi_local_chan_ctxt[chan]);
+
+ return min(bb_ring, ch_ring);
}
EXPORT_SYMBOL(mhi_get_free_desc);
diff --git a/drivers/platform/msm/mhi/mhi_ssr.c b/drivers/platform/msm/mhi/mhi_ssr.c
index 8ee3deddbb95..defd6f4fd137 100644
--- a/drivers/platform/msm/mhi/mhi_ssr.c
+++ b/drivers/platform/msm/mhi/mhi_ssr.c
@@ -10,6 +10,7 @@
* GNU General Public License for more details.
*/
+#include <linux/pm_runtime.h>
#include <mhi_sys.h>
#include <mhi.h>
#include <mhi_bhi.h>
@@ -23,24 +24,11 @@
static int mhi_ssr_notify_cb(struct notifier_block *nb,
unsigned long action, void *data)
{
- int ret_val = 0;
- struct mhi_device_ctxt *mhi_dev_ctxt =
- &mhi_devices.device_list[0].mhi_ctxt;
- struct mhi_pcie_dev_info *mhi_pcie_dev = NULL;
- mhi_pcie_dev = &mhi_devices.device_list[mhi_devices.nr_of_devices];
- if (NULL != mhi_dev_ctxt)
- mhi_dev_ctxt->esoc_notif = action;
switch (action) {
case SUBSYS_BEFORE_POWERUP:
mhi_log(MHI_MSG_INFO,
"Received Subsystem event BEFORE_POWERUP\n");
- atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 1);
- ret_val = init_mhi_base_state(mhi_dev_ctxt);
- if (0 != ret_val)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to transition to base state %d.\n",
- ret_val);
break;
case SUBSYS_AFTER_POWERUP:
mhi_log(MHI_MSG_INFO,
@@ -148,7 +136,7 @@ void mhi_notify_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
}
}
-static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
+int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
{
u32 pcie_word_val = 0;
int r = 0;
@@ -159,13 +147,11 @@ static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
mhi_pcie_dev->bhi_ctxt.bhi_base += pcie_word_val;
pcie_word_val = mhi_reg_read(mhi_pcie_dev->bhi_ctxt.bhi_base,
BHI_EXECENV);
+ mhi_dev_ctxt->dev_exec_env = pcie_word_val;
if (pcie_word_val == MHI_EXEC_ENV_AMSS) {
mhi_dev_ctxt->base_state = STATE_TRANSITION_RESET;
} else if (pcie_word_val == MHI_EXEC_ENV_PBL) {
mhi_dev_ctxt->base_state = STATE_TRANSITION_BHI;
- r = bhi_probe(mhi_pcie_dev);
- if (r)
- mhi_log(MHI_MSG_ERROR, "Failed to initialize BHI.\n");
} else {
mhi_log(MHI_MSG_ERROR, "Invalid EXEC_ENV: 0x%x\n",
pcie_word_val);
@@ -178,10 +164,9 @@ static int set_mhi_base_state(struct mhi_pcie_dev_info *mhi_pcie_dev)
void mhi_link_state_cb(struct msm_pcie_notify *notify)
{
- int ret_val = 0;
+
struct mhi_pcie_dev_info *mhi_pcie_dev;
struct mhi_device_ctxt *mhi_dev_ctxt = NULL;
- int r = 0;
if (NULL == notify || NULL == notify->data) {
mhi_log(MHI_MSG_CRITICAL,
@@ -198,32 +183,6 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify)
case MSM_PCIE_EVENT_LINKUP:
mhi_log(MHI_MSG_INFO,
"Received MSM_PCIE_EVENT_LINKUP\n");
- if (0 == mhi_pcie_dev->link_up_cntr) {
- mhi_log(MHI_MSG_INFO,
- "Initializing MHI for the first time\n");
- r = mhi_ctxt_init(mhi_pcie_dev);
- if (r) {
- mhi_log(MHI_MSG_ERROR,
- "MHI initialization failed, ret %d.\n",
- r);
- r = msm_pcie_register_event(
- &mhi_pcie_dev->mhi_pci_link_event);
- mhi_log(MHI_MSG_ERROR,
- "Deregistered from PCIe notif r %d.\n",
- r);
- return;
- }
- mhi_dev_ctxt = &mhi_pcie_dev->mhi_ctxt;
- mhi_pcie_dev->mhi_ctxt.flags.link_up = 1;
- pci_set_master(mhi_pcie_dev->pcie_device);
- r = set_mhi_base_state(mhi_pcie_dev);
- if (r)
- return;
- init_mhi_base_state(mhi_dev_ctxt);
- } else {
- mhi_log(MHI_MSG_INFO,
- "Received Link Up Callback\n");
- }
mhi_pcie_dev->link_up_cntr++;
break;
case MSM_PCIE_EVENT_WAKEUP:
@@ -231,17 +190,14 @@ void mhi_link_state_cb(struct msm_pcie_notify *notify)
"Received MSM_PCIE_EVENT_WAKE\n");
__pm_stay_awake(&mhi_dev_ctxt->w_lock);
__pm_relax(&mhi_dev_ctxt->w_lock);
- if (atomic_read(&mhi_dev_ctxt->flags.pending_resume)) {
- mhi_log(MHI_MSG_INFO,
- "There is a pending resume, doing nothing.\n");
- return;
- }
- ret_val = mhi_init_state_transition(mhi_dev_ctxt,
- STATE_TRANSITION_WAKE);
- if (0 != ret_val) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to init state transition, to %d\n",
- STATE_TRANSITION_WAKE);
+
+ if (mhi_dev_ctxt->flags.mhi_initialized) {
+ pm_runtime_get(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_runtime_put_noidle(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
}
break;
default:
@@ -255,12 +211,6 @@ int init_mhi_base_state(struct mhi_device_ctxt *mhi_dev_ctxt)
{
int r = 0;
- mhi_assert_device_wake(mhi_dev_ctxt);
- mhi_dev_ctxt->flags.link_up = 1;
- r = mhi_set_bus_request(mhi_dev_ctxt, 1);
- if (r)
- mhi_log(MHI_MSG_INFO,
- "Failed to scale bus request to active set.\n");
r = mhi_init_state_transition(mhi_dev_ctxt, mhi_dev_ctxt->base_state);
if (r) {
mhi_log(MHI_MSG_CRITICAL,
diff --git a/drivers/platform/msm/mhi/mhi_states.c b/drivers/platform/msm/mhi/mhi_states.c
index ca4520a3c57d..1021a56d1b3d 100644
--- a/drivers/platform/msm/mhi/mhi_states.c
+++ b/drivers/platform/msm/mhi/mhi_states.c
@@ -17,7 +17,40 @@
#include <linux/platform_device.h>
#include <linux/pm_runtime.h>
-static inline void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
+const char *state_transition_str(enum STATE_TRANSITION state)
+{
+ static const char * const mhi_states_transition_str[] = {
+ "RESET",
+ "READY",
+ "M0",
+ "M1",
+ "M2",
+ "M3",
+ "BHI",
+ "SBL",
+ "AMSS",
+ "LINK_DOWN",
+ "WAKE"
+ };
+
+ if (state == STATE_TRANSITION_SYS_ERR)
+ return "SYS_ERR";
+
+ return (state <= STATE_TRANSITION_WAKE) ?
+ mhi_states_transition_str[state] : "Invalid";
+}
+
+enum MHI_STATE mhi_get_m_state(struct mhi_device_ctxt *mhi_dev_ctxt)
+{
+ u32 state = mhi_reg_read_field(mhi_dev_ctxt->mmio_info.mmio_addr,
+ MHISTATUS,
+ MHISTATUS_MHISTATE_MASK,
+ MHISTATUS_MHISTATE_SHIFT);
+
+ return (state >= MHI_STATE_LIMIT) ? MHI_STATE_LIMIT : state;
+}
+
+void mhi_set_m_state(struct mhi_device_ctxt *mhi_dev_ctxt,
enum MHI_STATE new_state)
{
if (MHI_STATE_RESET == new_state) {
@@ -41,23 +74,22 @@ static void conditional_chan_db_write(
{
u64 db_value;
unsigned long flags;
-
- mhi_dev_ctxt->mhi_chan_db_order[chan] = 0;
- spin_lock_irqsave(&mhi_dev_ctxt->db_write_lock[chan], flags);
- if (0 == mhi_dev_ctxt->mhi_chan_db_order[chan]) {
- db_value =
- mhi_v2p_addr(mhi_dev_ctxt,
- MHI_RING_TYPE_XFER_RING, chan,
- (uintptr_t)mhi_dev_ctxt->mhi_local_chan_ctxt[chan].wp);
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.chan_db_addr,
- chan, db_value);
- }
- mhi_dev_ctxt->mhi_chan_db_order[chan] = 0;
- spin_unlock_irqrestore(&mhi_dev_ctxt->db_write_lock[chan], flags);
+ struct mhi_ring *mhi_ring = &mhi_dev_ctxt->mhi_local_chan_ctxt[chan];
+
+ spin_lock_irqsave(&mhi_ring->ring_lock, flags);
+ db_value = mhi_v2p_addr(mhi_dev_ctxt,
+ MHI_RING_TYPE_XFER_RING,
+ chan,
+ (uintptr_t)mhi_ring->wp);
+ mhi_ring->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.chan_db_addr,
+ chan,
+ db_value);
+ spin_unlock_irqrestore(&mhi_ring->ring_lock, flags);
}
-static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
+static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt,
+ bool reset_db_mode)
{
u32 i = 0;
struct mhi_ring *local_ctxt = NULL;
@@ -66,40 +98,38 @@ static void ring_all_chan_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
for (i = 0; i < MHI_MAX_CHANNELS; ++i)
if (VALID_CHAN_NR(i)) {
local_ctxt = &mhi_dev_ctxt->mhi_local_chan_ctxt[i];
- if (IS_HARDWARE_CHANNEL(i))
- mhi_dev_ctxt->flags.db_mode[i] = 1;
- if ((local_ctxt->wp != local_ctxt->rp) ||
- ((local_ctxt->wp != local_ctxt->rp) &&
- (local_ctxt->dir == MHI_IN)))
+
+ /* Reset the DB Mode state to DB Mode */
+ if (local_ctxt->db_mode.preserve_db_state == 0
+ && reset_db_mode)
+ local_ctxt->db_mode.db_mode = 1;
+
+ if (local_ctxt->wp != local_ctxt->rp)
conditional_chan_db_write(mhi_dev_ctxt, i);
}
}
static void ring_all_cmd_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
{
- struct mutex *cmd_mutex = NULL;
u64 db_value;
u64 rp = 0;
struct mhi_ring *local_ctxt = NULL;
mhi_log(MHI_MSG_VERBOSE, "Ringing chan dbs\n");
- cmd_mutex = &mhi_dev_ctxt->mhi_cmd_mutex_list[PRIMARY_CMD_RING];
- mhi_dev_ctxt->cmd_ring_order = 0;
- mutex_lock(cmd_mutex);
+
local_ctxt = &mhi_dev_ctxt->mhi_local_cmd_ctxt[PRIMARY_CMD_RING];
rp = mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING,
PRIMARY_CMD_RING,
(uintptr_t)local_ctxt->rp);
- db_value =
- mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_CMD_RING,
- PRIMARY_CMD_RING,
- (uintptr_t)mhi_dev_ctxt->mhi_local_cmd_ctxt[0].wp);
- if (0 == mhi_dev_ctxt->cmd_ring_order && rp != db_value)
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.cmd_db_addr,
- 0, db_value);
- mhi_dev_ctxt->cmd_ring_order = 0;
- mutex_unlock(cmd_mutex);
+ db_value = mhi_v2p_addr(mhi_dev_ctxt,
+ MHI_RING_TYPE_CMD_RING,
+ PRIMARY_CMD_RING,
+ (uintptr_t)local_ctxt->wp);
+ if (rp != db_value)
+ local_ctxt->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.cmd_db_addr,
+ 0,
+ db_value);
}
static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
@@ -107,24 +137,23 @@ static void ring_all_ev_dbs(struct mhi_device_ctxt *mhi_dev_ctxt)
u32 i;
u64 db_value = 0;
struct mhi_event_ctxt *event_ctxt = NULL;
+ struct mhi_ring *mhi_ring;
spinlock_t *lock = NULL;
unsigned long flags;
for (i = 0; i < mhi_dev_ctxt->mmio_info.nr_event_rings; ++i) {
- lock = &mhi_dev_ctxt->mhi_ev_spinlock_list[i];
- mhi_dev_ctxt->mhi_ev_db_order[i] = 0;
+ mhi_ring = &mhi_dev_ctxt->mhi_local_event_ctxt[i];
+ lock = &mhi_ring->ring_lock;
spin_lock_irqsave(lock, flags);
event_ctxt = &mhi_dev_ctxt->dev_space.ring_ctxt.ec_list[i];
- db_value =
- mhi_v2p_addr(mhi_dev_ctxt, MHI_RING_TYPE_EVENT_RING,
- i,
- (uintptr_t)mhi_dev_ctxt->mhi_local_event_ctxt[i].wp);
- if (0 == mhi_dev_ctxt->mhi_ev_db_order[i]) {
- mhi_process_db(mhi_dev_ctxt,
- mhi_dev_ctxt->mmio_info.event_db_addr,
- i, db_value);
- }
- mhi_dev_ctxt->mhi_ev_db_order[i] = 0;
+ db_value = mhi_v2p_addr(mhi_dev_ctxt,
+ MHI_RING_TYPE_EVENT_RING,
+ i,
+ (uintptr_t)mhi_ring->wp);
+ mhi_ring->db_mode.process_db(mhi_dev_ctxt,
+ mhi_dev_ctxt->mmio_info.event_db_addr,
+ i,
+ db_value);
spin_unlock_irqrestore(lock, flags);
}
}
@@ -133,168 +162,121 @@ static int process_m0_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- unsigned long flags;
- int r = 0;
- mhi_log(MHI_MSG_INFO, "Entered\n");
+ mhi_log(MHI_MSG_INFO, "Entered With State %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
- if (mhi_dev_ctxt->mhi_state == MHI_STATE_M2) {
+ switch (mhi_dev_ctxt->mhi_state) {
+ case MHI_STATE_M2:
mhi_dev_ctxt->counters.m2_m0++;
- } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M3) {
- mhi_dev_ctxt->counters.m3_m0++;
- } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_READY) {
- mhi_log(MHI_MSG_INFO,
- "Transitioning from READY.\n");
- } else if (mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
- mhi_log(MHI_MSG_INFO,
- "Transitioning from M1.\n");
- } else {
- mhi_log(MHI_MSG_INFO,
- "MHI State %d link state %d. Quitting\n",
- mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.link_up);
+ break;
+ case MHI_STATE_M3:
+ mhi_dev_ctxt->counters.m3_m0++;
+ break;
+ default:
+ break;
}
- read_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_M0;
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
- mhi_assert_device_wake(mhi_dev_ctxt);
- read_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M0;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_assert_device_wake(mhi_dev_ctxt, true);
if (mhi_dev_ctxt->flags.mhi_initialized) {
ring_all_ev_dbs(mhi_dev_ctxt);
- ring_all_chan_dbs(mhi_dev_ctxt);
+ ring_all_chan_dbs(mhi_dev_ctxt, true);
ring_all_cmd_dbs(mhi_dev_ctxt);
}
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
- r = mhi_set_bus_request(mhi_dev_ctxt, 1);
- if (r)
- mhi_log(MHI_MSG_CRITICAL,
- "Could not set bus frequency ret: %d\n",
- r);
- mhi_dev_ctxt->flags.pending_M0 = 0;
- if (atomic_read(&mhi_dev_ctxt->flags.pending_powerup)) {
- atomic_set(&mhi_dev_ctxt->flags.pending_ssr, 0);
- atomic_set(&mhi_dev_ctxt->flags.pending_powerup, 0);
- }
- wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m0_event);
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- if (!mhi_dev_ctxt->flags.pending_M3 &&
- mhi_dev_ctxt->flags.link_up &&
- mhi_dev_ctxt->flags.mhi_initialized)
- mhi_deassert_device_wake(mhi_dev_ctxt);
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ wake_up(mhi_dev_ctxt->mhi_ev_wq.m0_event);
mhi_log(MHI_MSG_INFO, "Exited\n");
return 0;
}
-static int process_m1_transition(
- struct mhi_device_ctxt *mhi_dev_ctxt,
- enum STATE_TRANSITION cur_work_item)
+void process_m1_transition(struct work_struct *work)
{
- unsigned long flags = 0;
- int r = 0;
+ struct mhi_device_ctxt *mhi_dev_ctxt;
+
+ mhi_dev_ctxt = container_of(work,
+ struct mhi_device_ctxt,
+ process_m1_worker);
+ mutex_lock(&mhi_dev_ctxt->pm_lock);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_log(MHI_MSG_INFO,
- "Processing M1 state transition from state %d\n",
- mhi_dev_ctxt->mhi_state);
-
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- if (!mhi_dev_ctxt->flags.pending_M3) {
- mhi_log(MHI_MSG_INFO, "Setting M2 Transition flag\n");
- atomic_inc(&mhi_dev_ctxt->flags.m2_transition);
- mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
- mhi_log(MHI_MSG_INFO, "Allowing transition to M2\n");
- mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M2);
- mhi_dev_ctxt->counters.m1_m2++;
+ "Processing M1 state transition from state %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+
+ /* We either Entered M3 or we did M3->M0 Exit */
+ if (mhi_dev_ctxt->mhi_pm_state != MHI_PM_M1) {
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
+ return;
}
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- r = mhi_set_bus_request(mhi_dev_ctxt, 0);
- if (r)
- mhi_log(MHI_MSG_INFO, "Failed to update bus request\n");
- mhi_log(MHI_MSG_INFO, "Debouncing M2\n");
+ mhi_log(MHI_MSG_INFO, "Transitioning to M2 Transition\n");
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M1_M2_TRANSITION;
+ mhi_dev_ctxt->counters.m1_m2++;
+ mhi_dev_ctxt->mhi_state = MHI_STATE_M2;
+ mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M2);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
msleep(MHI_M2_DEBOUNCE_TMR_MS);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_log(MHI_MSG_INFO, "Pending acks %d\n",
- atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
- if (atomic_read(&mhi_dev_ctxt->counters.outbound_acks) ||
- mhi_dev_ctxt->flags.pending_M3) {
- mhi_assert_device_wake(mhi_dev_ctxt);
- } else {
- pm_runtime_mark_last_busy(
- &mhi_dev_ctxt->dev_info->pcie_device->dev);
- r = pm_request_autosuspend(
- &mhi_dev_ctxt->dev_info->pcie_device->dev);
- if (r && r != -EAGAIN) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to remove counter ret %d\n", r);
- BUG_ON(mhi_dev_ctxt->dev_info->
- pcie_device->dev.power.runtime_error);
- }
+ /* During DEBOUNCE Time We could be receiving M0 Event */
+ if (mhi_dev_ctxt->mhi_pm_state == MHI_PM_M1_M2_TRANSITION) {
+ mhi_log(MHI_MSG_INFO, "Entered M2 State\n");
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M2;
}
- atomic_set(&mhi_dev_ctxt->flags.m2_transition, 0);
- mhi_log(MHI_MSG_INFO, "M2 transition complete.\n");
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- BUG_ON(atomic_read(&mhi_dev_ctxt->outbound_acks) < 0);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
- return 0;
+ if (unlikely(atomic_read(&mhi_dev_ctxt->counters.device_wake))) {
+ mhi_log(MHI_MSG_INFO, "Exiting M2 Immediately, count:%d\n",
+ atomic_read(&mhi_dev_ctxt->counters.device_wake));
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_assert_device_wake(mhi_dev_ctxt, true);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ } else {
+ mhi_log(MHI_MSG_INFO, "Schedule RPM suspend");
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ pm_request_autosuspend(&mhi_dev_ctxt->
+ dev_info->pcie_device->dev);
+ }
+ mutex_unlock(&mhi_dev_ctxt->pm_lock);
}
static int process_m3_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- unsigned long flags;
mhi_log(MHI_MSG_INFO,
- "Processing M3 state transition\n");
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
- mhi_dev_ctxt->flags.pending_M3 = 0;
- wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.m3_event);
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_dev_ctxt->counters.m0_m3++;
- return 0;
-}
-
-static int mhi_process_link_down(
- struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- unsigned long flags;
- int r;
-
- mhi_log(MHI_MSG_INFO, "Entered.\n");
- if (NULL == mhi_dev_ctxt)
- return -EINVAL;
-
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_dev_ctxt->flags.mhi_initialized = 0;
- mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
- mhi_deassert_device_wake(mhi_dev_ctxt);
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-
- mhi_dev_ctxt->flags.stop_threads = 1;
+ "Entered with State %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
- while (!mhi_dev_ctxt->flags.ev_thread_stopped) {
- wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.mhi_event_wq);
- mhi_log(MHI_MSG_INFO,
- "Waiting for threads to SUSPEND EVT: %d, STT: %d\n",
- mhi_dev_ctxt->flags.st_thread_stopped,
- mhi_dev_ctxt->flags.ev_thread_stopped);
- msleep(20);
+ switch (mhi_dev_ctxt->mhi_state) {
+ case MHI_STATE_M1:
+ mhi_dev_ctxt->counters.m1_m3++;
+ break;
+ case MHI_STATE_M0:
+ mhi_dev_ctxt->counters.m0_m3++;
+ break;
+ default:
+ break;
}
- r = mhi_set_bus_request(mhi_dev_ctxt, 0);
- if (r)
- mhi_log(MHI_MSG_INFO,
- "Failed to scale bus request to sleep set.\n");
- mhi_turn_off_pcie_link(mhi_dev_ctxt);
- mhi_dev_ctxt->dev_info->link_down_cntr++;
- atomic_set(&mhi_dev_ctxt->flags.data_pending, 0);
- mhi_log(MHI_MSG_INFO, "Exited.\n");
-
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_state = MHI_STATE_M3;
+ mhi_dev_ctxt->mhi_pm_state = MHI_PM_M3;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ wake_up(mhi_dev_ctxt->mhi_ev_wq.m3_event);
return 0;
}
@@ -302,51 +284,20 @@ static int process_link_down_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- mhi_log(MHI_MSG_INFO, "Entered\n");
- if (0 !=
- mhi_process_link_down(mhi_dev_ctxt)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to process link down\n");
- }
- mhi_log(MHI_MSG_INFO, "Exited.\n");
- return 0;
+ mhi_log(MHI_MSG_INFO,
+ "Entered with State %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ return -EIO;
}
static int process_wake_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- int r = 0;
-
- mhi_log(MHI_MSG_INFO, "Entered\n");
- __pm_stay_awake(&mhi_dev_ctxt->w_lock);
-
- if (atomic_read(&mhi_dev_ctxt->flags.pending_ssr)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Pending SSR, Ignoring.\n");
- goto exit;
- }
- if (mhi_dev_ctxt->flags.mhi_initialized) {
- r = pm_request_resume(
- &mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_VERBOSE,
- "MHI is initialized, transitioning to M0, ret %d\n", r);
- }
-
- if (!mhi_dev_ctxt->flags.mhi_initialized) {
- mhi_log(MHI_MSG_INFO,
- "MHI is not initialized transitioning to base.\n");
- r = init_mhi_base_state(mhi_dev_ctxt);
- if (0 != r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to transition to base state %d.\n",
- r);
- }
-
-exit:
- __pm_relax(&mhi_dev_ctxt->w_lock);
- mhi_log(MHI_MSG_INFO, "Exited.\n");
- return r;
+ mhi_log(MHI_MSG_INFO,
+ "Entered with State %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ return -EIO;
}
@@ -354,9 +305,10 @@ static int process_bhi_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- mhi_turn_on_pcie_link(mhi_dev_ctxt);
mhi_log(MHI_MSG_INFO, "Entered\n");
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_BHI;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.bhi_event);
mhi_log(MHI_MSG_INFO, "Exited\n");
return 0;
@@ -369,36 +321,42 @@ static int process_ready_transition(
int r = 0;
mhi_log(MHI_MSG_INFO, "Processing READY state transition\n");
- mhi_dev_ctxt->mhi_state = MHI_STATE_READY;
r = mhi_reset_all_thread_queues(mhi_dev_ctxt);
-
- if (r)
+ if (r) {
mhi_log(MHI_MSG_ERROR,
"Failed to reset thread queues\n");
+ return r;
+ }
+
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_dev_ctxt->mhi_state = MHI_STATE_READY;
r = mhi_init_mmio(mhi_dev_ctxt);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
/* Initialize MMIO */
if (r) {
mhi_log(MHI_MSG_ERROR,
"Failure during MMIO initialization\n");
return r;
}
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
cur_work_item);
-
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
if (r) {
mhi_log(MHI_MSG_ERROR,
"Failure during event ring init\n");
return r;
}
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->flags.stop_threads = 0;
- mhi_assert_device_wake(mhi_dev_ctxt);
mhi_reg_write_field(mhi_dev_ctxt,
mhi_dev_ctxt->mmio_info.mmio_addr, MHICTRL,
MHICTRL_MHISTATE_MASK,
MHICTRL_MHISTATE_SHIFT,
MHI_STATE_M0);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
return r;
}
@@ -421,37 +379,22 @@ static int process_reset_transition(
enum STATE_TRANSITION cur_work_item)
{
int r = 0, i = 0;
- unsigned long flags = 0;
-
mhi_log(MHI_MSG_INFO, "Processing RESET state transition\n");
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
mhi_dev_ctxt->counters.mhi_reset_cntr++;
- mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_PBL;
r = mhi_test_for_device_reset(mhi_dev_ctxt);
if (r)
mhi_log(MHI_MSG_INFO, "Device not RESET ret %d\n", r);
r = mhi_test_for_device_ready(mhi_dev_ctxt);
- switch (r) {
- case 0:
- break;
- case -ENOTCONN:
- mhi_log(MHI_MSG_CRITICAL, "Link down detected\n");
- break;
- case -ETIMEDOUT:
- r = mhi_init_state_transition(mhi_dev_ctxt,
- STATE_TRANSITION_RESET);
- if (0 != r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to initiate 0x%x state trans\n",
- STATE_TRANSITION_RESET);
- break;
- default:
- mhi_log(MHI_MSG_CRITICAL,
- "Unexpected ret code detected for\n");
- break;
+ if (r) {
+ mhi_log(MHI_MSG_ERROR, "timed out waiting for ready ret:%d\n",
+ r);
+ return r;
}
+
for (i = 0; i < NR_OF_CMD_RINGS; ++i) {
mhi_dev_ctxt->mhi_local_cmd_ctxt[i].rp =
mhi_dev_ctxt->mhi_local_cmd_ctxt[i].base;
@@ -475,8 +418,8 @@ static int process_reset_transition(
STATE_TRANSITION_READY);
if (0 != r)
mhi_log(MHI_MSG_CRITICAL,
- "Failed to initiate 0x%x state trans\n",
- STATE_TRANSITION_READY);
+ "Failed to initiate %s state trans\n",
+ state_transition_str(STATE_TRANSITION_READY));
return r;
}
@@ -484,45 +427,10 @@ static int process_syserr_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- int r = 0;
-
- mhi_log(MHI_MSG_CRITICAL, "Received SYS ERROR. Resetting MHI\n");
- mhi_dev_ctxt->mhi_state = MHI_STATE_RESET;
- r = mhi_init_state_transition(mhi_dev_ctxt,
- STATE_TRANSITION_RESET);
- if (r) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to init state transition to RESET ret %d\n", r);
- mhi_log(MHI_MSG_CRITICAL, "Failed to reset mhi\n");
- }
- return r;
-}
-
-int start_chan_sync(struct mhi_client_handle *client_handle)
-{
- int r = 0;
- int chan = client_handle->chan_info.chan_nr;
-
- init_completion(&client_handle->chan_open_complete);
- r = mhi_send_cmd(client_handle->mhi_dev_ctxt,
- MHI_COMMAND_START_CHAN,
- chan);
- if (r != 0) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to send start command for chan %d ret %d\n",
- chan, r);
- return r;
- }
- r = wait_for_completion_timeout(
- &client_handle->chan_open_complete,
- msecs_to_jiffies(MHI_MAX_CMD_TIMEOUT));
- if (!r) {
- mhi_log(MHI_MSG_ERROR,
- "Timed out waiting for chan %d start completion\n",
- chan);
- r = -ETIME;
- }
- return 0;
+ mhi_log(MHI_MSG_INFO,
+ "Entered with State %s\n",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state));
+ return -EIO;
}
static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
@@ -546,8 +454,7 @@ static void enable_clients(struct mhi_device_ctxt *mhi_dev_ctxt,
chan_info.flags))
mhi_notify_client(client_handle, MHI_CB_MHI_ENABLED);
}
- if (exec_env == MHI_EXEC_ENV_AMSS)
- mhi_deassert_device_wake(mhi_dev_ctxt);
+
mhi_log(MHI_MSG_INFO, "Done.\n");
}
@@ -555,36 +462,25 @@ static int process_sbl_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- int r = 0;
- pm_runtime_set_autosuspend_delay(
- &mhi_dev_ctxt->dev_info->pcie_device->dev,
- MHI_RPM_AUTOSUSPEND_TMR_VAL_MS);
- pm_runtime_use_autosuspend(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- r = pm_runtime_set_active(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- if (r) {
- mhi_log(MHI_MSG_ERROR,
- "Failed to activate runtime pm ret %d\n", r);
- }
- pm_runtime_enable(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_INFO, "Enabled runtime pm autosuspend\n");
+ mhi_log(MHI_MSG_INFO, "Enabled\n");
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_SBL;
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
- pm_runtime_put_noidle(&mhi_dev_ctxt->dev_info->pcie_device->dev);
return 0;
-
}
static int process_amss_transition(
struct mhi_device_ctxt *mhi_dev_ctxt,
enum STATE_TRANSITION cur_work_item)
{
- int r = 0, i = 0;
- struct mhi_client_handle *client_handle = NULL;
+ int r = 0;
mhi_log(MHI_MSG_INFO, "Processing AMSS state transition\n");
+ write_lock_irq(&mhi_dev_ctxt->pm_xfer_lock);
mhi_dev_ctxt->dev_exec_env = MHI_EXEC_ENV_AMSS;
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
- mhi_assert_device_wake(mhi_dev_ctxt);
+ write_unlock_irq(&mhi_dev_ctxt->pm_xfer_lock);
+
if (!mhi_dev_ctxt->flags.mhi_initialized) {
r = mhi_add_elements_to_event_rings(mhi_dev_ctxt,
cur_work_item);
@@ -592,54 +488,40 @@ static int process_amss_transition(
if (r) {
mhi_log(MHI_MSG_CRITICAL,
"Failed to set local chan state ret %d\n", r);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
return r;
}
- ring_all_chan_dbs(mhi_dev_ctxt);
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ ring_all_chan_dbs(mhi_dev_ctxt, true);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
mhi_log(MHI_MSG_INFO,
"Notifying clients that MHI is enabled\n");
enable_clients(mhi_dev_ctxt, mhi_dev_ctxt->dev_exec_env);
} else {
mhi_log(MHI_MSG_INFO, "MHI is initialized\n");
- for (i = 0; i < MHI_MAX_CHANNELS; ++i) {
- client_handle = mhi_dev_ctxt->client_handle_list[i];
- if (client_handle && client_handle->chan_status)
- r = start_chan_sync(client_handle);
- WARN(r, "Failed to start chan %d ret %d\n",
- i, r);
- return r;
- }
- ring_all_chan_dbs(mhi_dev_ctxt);
}
+
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
ring_all_ev_dbs(mhi_dev_ctxt);
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
- if (!mhi_dev_ctxt->flags.pending_M3 &&
- mhi_dev_ctxt->flags.link_up)
- mhi_deassert_device_wake(mhi_dev_ctxt);
- mhi_log(MHI_MSG_INFO, "Exited\n");
- return 0;
-}
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
-int mhi_trigger_reset(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- int r = 0;
- unsigned long flags = 0;
+ /*
+ * runtime_allow will decrement usage_count, counts were
+ * incremented by pci fw pci_pm_init() or by
+ * mhi shutdown/ssr apis.
+ */
+ mhi_log(MHI_MSG_INFO, "Allow runtime suspend\n");
- mhi_log(MHI_MSG_INFO, "Entered\n");
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_dev_ctxt->mhi_state = MHI_STATE_SYS_ERR;
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
+ pm_runtime_mark_last_busy(&mhi_dev_ctxt->dev_info->pcie_device->dev);
+ pm_runtime_allow(&mhi_dev_ctxt->dev_info->pcie_device->dev);
- mhi_log(MHI_MSG_INFO, "Setting RESET to MDM.\n");
- mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_RESET);
- mhi_log(MHI_MSG_INFO, "Transitioning state to RESET\n");
- r = mhi_init_state_transition(mhi_dev_ctxt,
- STATE_TRANSITION_RESET);
- if (0 != r)
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to initiate 0x%x state trans ret %d\n",
- STATE_TRANSITION_RESET, r);
- mhi_log(MHI_MSG_INFO, "Exiting\n");
- return r;
+ /* During probe we incremented, releasing that count */
+ read_lock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+ mhi_deassert_device_wake(mhi_dev_ctxt);
+ read_unlock_bh(&mhi_dev_ctxt->pm_xfer_lock);
+
+ mhi_log(MHI_MSG_INFO, "Exited\n");
+ return 0;
}
static int process_stt_work_item(
@@ -648,8 +530,8 @@ static int process_stt_work_item(
{
int r = 0;
- mhi_log(MHI_MSG_INFO, "Transitioning to %d\n",
- (int)cur_work_item);
+ mhi_log(MHI_MSG_INFO, "Transitioning to %s\n",
+ state_transition_str(cur_work_item));
trace_mhi_state(cur_work_item);
switch (cur_work_item) {
case STATE_TRANSITION_BHI:
@@ -670,9 +552,6 @@ static int process_stt_work_item(
case STATE_TRANSITION_M0:
r = process_m0_transition(mhi_dev_ctxt, cur_work_item);
break;
- case STATE_TRANSITION_M1:
- r = process_m1_transition(mhi_dev_ctxt, cur_work_item);
- break;
case STATE_TRANSITION_M3:
r = process_m3_transition(mhi_dev_ctxt, cur_work_item);
break;
@@ -689,7 +568,8 @@ static int process_stt_work_item(
break;
default:
mhi_log(MHI_MSG_ERROR,
- "Unrecongized state: %d\n", cur_work_item);
+ "Unrecongized state: %s\n",
+ state_transition_str(cur_work_item));
break;
}
return r;
@@ -762,8 +642,8 @@ int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
BUG_ON(nr_avail_work_items <= 0);
mhi_log(MHI_MSG_VERBOSE,
- "Processing state transition %x\n",
- new_state);
+ "Processing state transition %s\n",
+ state_transition_str(new_state));
*(enum STATE_TRANSITION *)stt_ring->wp = new_state;
r = ctxt_add_element(stt_ring, (void **)&cur_work_item);
BUG_ON(r);
@@ -771,216 +651,3 @@ int mhi_init_state_transition(struct mhi_device_ctxt *mhi_dev_ctxt,
wake_up_interruptible(mhi_dev_ctxt->mhi_ev_wq.state_change_event);
return r;
}
-
-int mhi_initiate_m0(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
- int r = 0;
- unsigned long flags;
-
- mhi_log(MHI_MSG_INFO,
- "Entered MHI state %d, Pending M0 %d Pending M3 %d\n",
- mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0,
- mhi_dev_ctxt->flags.pending_M3);
- mutex_lock(&mhi_dev_ctxt->pm_lock);
- mhi_log(MHI_MSG_INFO,
- "Waiting for M0 M1 or M3. Currently %d...\n",
- mhi_dev_ctxt->mhi_state);
-
- r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M3 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
- msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
- switch (r) {
- case 0:
- mhi_log(MHI_MSG_CRITICAL,
- "Timeout: State %d after %d ms\n",
- mhi_dev_ctxt->mhi_state,
- MHI_MAX_SUSPEND_TIMEOUT);
- mhi_dev_ctxt->counters.m0_event_timeouts++;
- r = -ETIME;
- goto exit;
- case -ERESTARTSYS:
- mhi_log(MHI_MSG_CRITICAL,
- "Going Down...\n");
- goto exit;
- default:
- mhi_log(MHI_MSG_INFO,
- "Wait complete state: %d\n", mhi_dev_ctxt->mhi_state);
- r = 0;
- break;
- }
- if (mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1) {
- mhi_assert_device_wake(mhi_dev_ctxt);
- mhi_log(MHI_MSG_INFO,
- "MHI state %d, done\n",
- mhi_dev_ctxt->mhi_state);
- goto exit;
- } else {
- if (0 != mhi_turn_on_pcie_link(mhi_dev_ctxt)) {
- mhi_log(MHI_MSG_CRITICAL,
- "Failed to resume link\n");
- r = -EIO;
- goto exit;
- }
-
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_log(MHI_MSG_VERBOSE, "Setting M0 ...\n");
- if (mhi_dev_ctxt->flags.pending_M3) {
- mhi_log(MHI_MSG_INFO,
- "Pending M3 detected, aborting M0 procedure\n");
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock,
- flags);
- r = -EPERM;
- goto exit;
- }
- if (mhi_dev_ctxt->flags.link_up) {
- mhi_dev_ctxt->flags.pending_M0 = 1;
- mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M0);
- }
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- r = wait_event_interruptible_timeout(
- *mhi_dev_ctxt->mhi_ev_wq.m0_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
- msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
- WARN_ON(!r || -ERESTARTSYS == r);
- if (!r || -ERESTARTSYS == r)
- mhi_log(MHI_MSG_ERROR,
- "Failed to get M0 event ret %d\n", r);
- r = 0;
- }
-exit:
- mutex_unlock(&mhi_dev_ctxt->pm_lock);
- mhi_log(MHI_MSG_INFO, "Exited...\n");
- return r;
-}
-
-int mhi_initiate_m3(struct mhi_device_ctxt *mhi_dev_ctxt)
-{
-
- unsigned long flags;
- int r = 0, abort_m3 = 0;
-
- mhi_log(MHI_MSG_INFO,
- "Entered MHI state %d, Pending M0 %d Pending M3 %d\n",
- mhi_dev_ctxt->mhi_state, mhi_dev_ctxt->flags.pending_M0,
- mhi_dev_ctxt->flags.pending_M3);
- mutex_lock(&mhi_dev_ctxt->pm_lock);
- switch (mhi_dev_ctxt->mhi_state) {
- case MHI_STATE_RESET:
- mhi_log(MHI_MSG_INFO,
- "MHI in RESET turning link off and quitting\n");
- mhi_turn_off_pcie_link(mhi_dev_ctxt);
- r = mhi_set_bus_request(mhi_dev_ctxt, 0);
- if (r)
- mhi_log(MHI_MSG_INFO,
- "Failed to set bus freq ret %d\n", r);
- goto exit;
- case MHI_STATE_M0:
- case MHI_STATE_M1:
- case MHI_STATE_M2:
- mhi_log(MHI_MSG_INFO,
- "Triggering wake out of M2\n");
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- mhi_dev_ctxt->flags.pending_M3 = 1;
- if ((atomic_read(&mhi_dev_ctxt->flags.m2_transition)) == 0) {
- mhi_log(MHI_MSG_INFO,
- "M2 transition not set\n");
- mhi_assert_device_wake(mhi_dev_ctxt);
- }
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- r = wait_event_interruptible_timeout(
- *mhi_dev_ctxt->mhi_ev_wq.m0_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M0 ||
- mhi_dev_ctxt->mhi_state == MHI_STATE_M1,
- msecs_to_jiffies(MHI_MAX_RESUME_TIMEOUT));
- if (0 == r || -ERESTARTSYS == r) {
- mhi_log(MHI_MSG_CRITICAL,
- "MDM failed to come out of M2.\n");
- mhi_dev_ctxt->counters.m2_event_timeouts++;
- r = -EAGAIN;
- goto exit;
- }
- break;
- case MHI_STATE_M3:
- mhi_log(MHI_MSG_INFO,
- "MHI state %d, link state %d.\n",
- mhi_dev_ctxt->mhi_state,
- mhi_dev_ctxt->flags.link_up);
- if (mhi_dev_ctxt->flags.link_up)
- r = -EAGAIN;
- else
- r = 0;
- goto exit;
- default:
- mhi_log(MHI_MSG_INFO,
- "MHI state %d, link state %d.\n",
- mhi_dev_ctxt->mhi_state,
- mhi_dev_ctxt->flags.link_up);
- break;
- }
- while (atomic_read(&mhi_dev_ctxt->counters.outbound_acks)) {
- mhi_log(MHI_MSG_INFO,
- "There are still %d acks pending from device\n",
- atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
- __pm_stay_awake(&mhi_dev_ctxt->w_lock);
- __pm_relax(&mhi_dev_ctxt->w_lock);
- abort_m3 = 1;
- r = -EAGAIN;
- goto exit;
- }
-
- if (atomic_read(&mhi_dev_ctxt->flags.data_pending)) {
- abort_m3 = 1;
- r = -EAGAIN;
- goto exit;
- }
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- if (mhi_dev_ctxt->flags.pending_M0) {
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- r = -EAGAIN;
- goto exit;
- }
- mhi_dev_ctxt->flags.pending_M3 = 1;
-
- mhi_set_m_state(mhi_dev_ctxt, MHI_STATE_M3);
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
-
- mhi_log(MHI_MSG_INFO,
- "Waiting for M3 completion.\n");
- r = wait_event_interruptible_timeout(*mhi_dev_ctxt->mhi_ev_wq.m3_event,
- mhi_dev_ctxt->mhi_state == MHI_STATE_M3,
- msecs_to_jiffies(MHI_MAX_SUSPEND_TIMEOUT));
- switch (r) {
- case 0:
- mhi_log(MHI_MSG_CRITICAL,
- "MDM failed to suspend after %d ms\n",
- MHI_MAX_SUSPEND_TIMEOUT);
- mhi_dev_ctxt->counters.m3_event_timeouts++;
- mhi_dev_ctxt->flags.pending_M3 = 0;
- goto exit;
- default:
- mhi_log(MHI_MSG_INFO,
- "M3 completion received\n");
- break;
- }
- mhi_turn_off_pcie_link(mhi_dev_ctxt);
- r = mhi_set_bus_request(mhi_dev_ctxt, 0);
- if (r)
- mhi_log(MHI_MSG_INFO, "Failed to set bus freq ret %d\n", r);
-exit:
- if (abort_m3) {
- write_lock_irqsave(&mhi_dev_ctxt->xfer_lock, flags);
- atomic_inc(&mhi_dev_ctxt->flags.data_pending);
- write_unlock_irqrestore(&mhi_dev_ctxt->xfer_lock, flags);
- ring_all_chan_dbs(mhi_dev_ctxt);
- ring_all_cmd_dbs(mhi_dev_ctxt);
- atomic_dec(&mhi_dev_ctxt->flags.data_pending);
- mhi_deassert_device_wake(mhi_dev_ctxt);
- }
- mhi_dev_ctxt->flags.pending_M3 = 0;
- mutex_unlock(&mhi_dev_ctxt->pm_lock);
- return r;
-}
diff --git a/drivers/platform/msm/mhi/mhi_sys.c b/drivers/platform/msm/mhi/mhi_sys.c
index b8652770275e..c5c025b8585a 100644
--- a/drivers/platform/msm/mhi/mhi_sys.c
+++ b/drivers/platform/msm/mhi/mhi_sys.c
@@ -21,9 +21,9 @@
enum MHI_DEBUG_LEVEL mhi_msg_lvl = MHI_MSG_ERROR;
#ifdef CONFIG_MSM_MHI_DEBUG
- enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_VERBOSE;
+enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_VERBOSE;
#else
- enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_ERROR;
+enum MHI_DEBUG_LEVEL mhi_ipc_log_lvl = MHI_MSG_ERROR;
#endif
unsigned int mhi_log_override;
@@ -34,6 +34,18 @@ MODULE_PARM_DESC(mhi_msg_lvl, "dbg lvl");
module_param(mhi_ipc_log_lvl, uint, S_IRUGO | S_IWUSR);
MODULE_PARM_DESC(mhi_ipc_log_lvl, "dbg lvl");
+const char * const mhi_states_str[MHI_STATE_LIMIT] = {
+ "RESET",
+ "READY",
+ "M0",
+ "M1",
+ "M2",
+ "M3",
+ "Reserved: 0x06",
+ "BHI",
+ "SYS_ERR",
+};
+
static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
size_t count, loff_t *offp)
{
@@ -46,6 +58,7 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
int valid_chan = 0;
struct mhi_chan_ctxt *cc_list;
struct mhi_client_handle *client_handle;
+ int pkts_queued;
if (NULL == mhi_dev_ctxt)
return -EIO;
@@ -74,35 +87,37 @@ static ssize_t mhi_dbgfs_chan_read(struct file *fp, char __user *buf,
mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
&v_wp_index);
+ pkts_queued = client_handle->chan_info.max_desc -
+ get_nr_avail_ring_elements(&mhi_dev_ctxt->
+ mhi_local_chan_ctxt[*offp]) - 1;
amnt_copied =
scnprintf(mhi_dev_ctxt->chan_info,
- MHI_LOG_SIZE,
- "%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d %s %u\n",
- "chan:",
- (unsigned int)*offp,
- "pkts from dev:",
- mhi_dev_ctxt->counters.chan_pkts_xferd[*offp],
- "state:",
- chan_ctxt->mhi_chan_state,
- "p_base:",
- chan_ctxt->mhi_trb_ring_base_addr,
- "v_base:",
- mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].base,
- "v_wp:",
- mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
- "index:",
- v_wp_index,
- "v_rp:",
- mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp,
- "index:",
- v_rp_index,
- "pkts_queued",
- get_nr_avail_ring_elements(
- &mhi_dev_ctxt->mhi_local_chan_ctxt[*offp]),
- "/",
- client_handle->chan_info.max_desc,
- "bb_used:",
- mhi_dev_ctxt->counters.bb_used[*offp]);
+ MHI_LOG_SIZE,
+ "%s0x%x %s %d %s 0x%x %s 0x%llx %s %p %s %p %s %lu %s %p %s %lu %s %d %s %d %s %u\n",
+ "chan:",
+ (unsigned int)*offp,
+ "pkts from dev:",
+ mhi_dev_ctxt->counters.chan_pkts_xferd[*offp],
+ "state:",
+ chan_ctxt->chstate,
+ "p_base:",
+ chan_ctxt->mhi_trb_ring_base_addr,
+ "v_base:",
+ mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].base,
+ "v_wp:",
+ mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].wp,
+ "index:",
+ v_wp_index,
+ "v_rp:",
+ mhi_dev_ctxt->mhi_local_chan_ctxt[*offp].rp,
+ "index:",
+ v_rp_index,
+ "pkts_queued",
+ pkts_queued,
+ "/",
+ client_handle->chan_info.max_desc,
+ "bb_used:",
+ mhi_dev_ctxt->counters.bb_used[*offp]);
*offp += 1;
@@ -224,39 +239,37 @@ static ssize_t mhi_dbgfs_state_read(struct file *fp, char __user *buf,
msleep(100);
amnt_copied =
scnprintf(mhi_dev_ctxt->chan_info,
- MHI_LOG_SIZE,
- "%s %u %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d %s %d, %s, %d, %s %d\n",
- "Our State:",
- mhi_dev_ctxt->mhi_state,
- "M0->M1:",
- mhi_dev_ctxt->counters.m0_m1,
- "M0<-M1:",
- mhi_dev_ctxt->counters.m1_m0,
- "M1->M2:",
- mhi_dev_ctxt->counters.m1_m2,
- "M0<-M2:",
- mhi_dev_ctxt->counters.m2_m0,
- "M0->M3:",
- mhi_dev_ctxt->counters.m0_m3,
- "M0<-M3:",
- mhi_dev_ctxt->counters.m3_m0,
- "M3_ev_TO:",
- mhi_dev_ctxt->counters.m3_event_timeouts,
- "M0_ev_TO:",
- mhi_dev_ctxt->counters.m0_event_timeouts,
- "MSI_d:",
- mhi_dev_ctxt->counters.msi_disable_cntr,
- "MSI_e:",
- mhi_dev_ctxt->counters.msi_enable_cntr,
- "outstanding_acks:",
- atomic_read(&mhi_dev_ctxt->counters.outbound_acks),
- "LPM:",
- mhi_dev_ctxt->enable_lpm);
+ MHI_LOG_SIZE,
+ "%s %s %s 0x%02x %s %u %s %u %s %u %s %u %s %u %s %u %s %d %s %d %s %d\n",
+ "MHI State:",
+ TO_MHI_STATE_STR(mhi_dev_ctxt->mhi_state),
+ "PM State:",
+ mhi_dev_ctxt->mhi_pm_state,
+ "M0->M1:",
+ mhi_dev_ctxt->counters.m0_m1,
+ "M1->M2:",
+ mhi_dev_ctxt->counters.m1_m2,
+ "M2->M0:",
+ mhi_dev_ctxt->counters.m2_m0,
+ "M0->M3:",
+ mhi_dev_ctxt->counters.m0_m3,
+ "M1->M3:",
+ mhi_dev_ctxt->counters.m1_m3,
+ "M3->M0:",
+ mhi_dev_ctxt->counters.m3_m0,
+ "device_wake:",
+ atomic_read(&mhi_dev_ctxt->counters.device_wake),
+ "usage_count:",
+ atomic_read(&mhi_dev_ctxt->dev_info->pcie_device->dev.
+ power.usage_count),
+ "outbound_acks:",
+ atomic_read(&mhi_dev_ctxt->counters.outbound_acks));
if (amnt_copied < count)
return amnt_copied - copy_to_user(buf,
mhi_dev_ctxt->chan_info, amnt_copied);
else
return -ENOMEM;
+ return 0;
}
static const struct file_operations mhi_dbgfs_state_fops = {
diff --git a/drivers/platform/msm/mhi/mhi_sys.h b/drivers/platform/msm/mhi/mhi_sys.h
index 765fd46ef2dd..a948a2354de7 100644
--- a/drivers/platform/msm/mhi/mhi_sys.h
+++ b/drivers/platform/msm/mhi/mhi_sys.h
@@ -46,6 +46,10 @@ extern void *mhi_ipc_log;
"[%s] " _msg, __func__, ##__VA_ARGS__); \
} while (0)
+extern const char * const mhi_states_str[MHI_STATE_LIMIT];
+#define TO_MHI_STATE_STR(state) (((state) >= MHI_STATE_LIMIT) ? \
+ "INVALID_STATE" : mhi_states_str[state])
+
irqreturn_t mhi_msi_handlr(int msi_number, void *dev_id);
struct mhi_meminfo {
diff --git a/drivers/platform/msm/mhi_uci/mhi_uci.c b/drivers/platform/msm/mhi_uci/mhi_uci.c
index d6eb96a3e89e..88a213b1b241 100644
--- a/drivers/platform/msm/mhi_uci/mhi_uci.c
+++ b/drivers/platform/msm/mhi_uci/mhi_uci.c
@@ -895,6 +895,8 @@ static int uci_init_client_attributes(struct mhi_uci_ctxt_t
case MHI_CLIENT_BL_IN:
case MHI_CLIENT_DUN_OUT:
case MHI_CLIENT_DUN_IN:
+ case MHI_CLIENT_TF_OUT:
+ case MHI_CLIENT_TF_IN:
chan_attrib->uci_ownership = 1;
break;
default:
diff --git a/drivers/video/fbdev/msm/msm_ext_display.c b/drivers/platform/msm/msm_ext_display.c
index d74b1432ea71..bb1259e3cfa1 100644
--- a/drivers/video/fbdev/msm/msm_ext_display.c
+++ b/drivers/platform/msm/msm_ext_display.c
@@ -23,9 +23,6 @@
#include <linux/of_platform.h>
#include <linux/msm_ext_display.h>
-#include "mdss_hdmi_util.h"
-#include "mdss_fb.h"
-
struct msm_ext_disp_list {
struct msm_ext_disp_init_data *data;
struct list_head list;
@@ -48,7 +45,6 @@ struct msm_ext_disp {
static int msm_ext_disp_get_intf_data(struct msm_ext_disp *ext_disp,
enum msm_ext_disp_type type,
struct msm_ext_disp_init_data **data);
-static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack);
static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp,
enum msm_ext_disp_type type,
enum msm_ext_disp_cable_state state, u32 flags);
@@ -103,128 +99,6 @@ end:
return;
}
-static void msm_ext_disp_get_pdev_by_name(struct device *dev,
- const char *phandle, struct platform_device **pdev)
-{
- struct device_node *pd_np;
-
- if (!dev) {
- pr_err("Invalid device\n");
- return;
- }
-
- if (!dev->of_node) {
- pr_err("Invalid of_node\n");
- return;
- }
-
- pd_np = of_parse_phandle(dev->of_node, phandle, 0);
- if (!pd_np) {
- pr_err("Cannot find %s dev\n", phandle);
- return;
- }
-
- *pdev = of_find_device_by_node(pd_np);
-}
-
-static void msm_ext_disp_get_fb_pdev(struct device *device,
- struct platform_device **fb_pdev)
-{
- struct msm_fb_data_type *mfd = NULL;
- struct fb_info *fbi = dev_get_drvdata(device);
-
- if (!fbi) {
- pr_err("fb_info is null\n");
- return;
- }
-
- mfd = (struct msm_fb_data_type *)fbi->par;
-
- *fb_pdev = mfd->pdev;
-}
-static ssize_t msm_ext_disp_sysfs_wta_audio_cb(struct device *dev,
- struct device_attribute *attr, const char *buf, size_t count)
-{
- int ack, ret = 0;
- ssize_t size = strnlen(buf, PAGE_SIZE);
- const char *ext_phandle = "qcom,msm_ext_disp";
- struct platform_device *ext_pdev = NULL;
- const char *intf_phandle = "qcom,mdss-intf";
- struct platform_device *intf_pdev = NULL;
- struct platform_device *fb_pdev = NULL;
-
- ret = kstrtoint(buf, 10, &ack);
- if (ret) {
- pr_err("kstrtoint failed. ret=%d\n", ret);
- goto end;
- }
-
- msm_ext_disp_get_fb_pdev(dev, &fb_pdev);
- if (!fb_pdev) {
- pr_err("failed to get fb pdev\n");
- goto end;
- }
-
- msm_ext_disp_get_pdev_by_name(&fb_pdev->dev, intf_phandle, &intf_pdev);
- if (!intf_pdev) {
- pr_err("failed to get display intf pdev\n");
- goto end;
- }
-
- msm_ext_disp_get_pdev_by_name(&intf_pdev->dev, ext_phandle, &ext_pdev);
- if (!ext_pdev) {
- pr_err("failed to get ext_pdev\n");
- goto end;
- }
-
- ret = msm_ext_disp_audio_ack(ext_pdev, ack);
- if (ret)
- pr_err("Failed to process ack. ret=%d\n", ret);
-
-end:
- return size;
-}
-
-static DEVICE_ATTR(hdmi_audio_cb, S_IWUSR, NULL,
- msm_ext_disp_sysfs_wta_audio_cb);
-
-static struct attribute *msm_ext_disp_fs_attrs[] = {
- &dev_attr_hdmi_audio_cb.attr,
- NULL,
-};
-
-static struct attribute_group msm_ext_disp_fs_attrs_group = {
- .attrs = msm_ext_disp_fs_attrs,
-};
-
-static int msm_ext_disp_sysfs_create(struct msm_ext_disp_init_data *data)
-{
- int ret = 0;
-
- if (!data || !data->kobj) {
- pr_err("Invalid params\n");
- ret = -EINVAL;
- goto end;
- }
-
- ret = sysfs_create_group(data->kobj, &msm_ext_disp_fs_attrs_group);
- if (ret)
- pr_err("Failed, ret=%d\n", ret);
-
-end:
- return ret;
-}
-
-static void msm_ext_disp_sysfs_remove(struct msm_ext_disp_init_data *data)
-{
- if (!data || !data->kobj) {
- pr_err("Invalid params\n");
- return;
- }
-
- sysfs_remove_group(data->kobj, &msm_ext_disp_fs_attrs_group);
-}
-
static const char *msm_ext_disp_name(enum msm_ext_disp_type type)
{
switch (type) {
@@ -293,31 +167,6 @@ end:
return ret;
}
-static void msm_ext_disp_remove_intf_data(struct msm_ext_disp *ext_disp,
- enum msm_ext_disp_type type)
-{
- struct msm_ext_disp_list *node;
- struct list_head *position = NULL;
- struct list_head *temp = NULL;
-
- if (!ext_disp) {
- pr_err("Invalid params\n");
- return;
- }
-
- list_for_each_safe(position, temp, &ext_disp->display_list) {
- node = list_entry(position, struct msm_ext_disp_list, list);
- if (node->data->type == type) {
- msm_ext_disp_sysfs_remove(node->data);
- list_del(&node->list);
- pr_debug("Removed display (%s)\n",
- msm_ext_disp_name(type));
- kfree(node);
- break;
- }
- }
-}
-
static int msm_ext_disp_send_cable_notification(struct msm_ext_disp *ext_disp,
enum msm_ext_disp_cable_state new_state)
{
@@ -661,6 +510,46 @@ static void msm_ext_disp_teardown_done(struct platform_device *pdev)
complete_all(&ext_disp->hpd_comp);
}
+static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack)
+{
+ u32 ack_hpd;
+ int ret = 0;
+ struct msm_ext_disp *ext_disp = NULL;
+
+ if (!pdev) {
+ pr_err("Invalid platform device\n");
+ return -EINVAL;
+ }
+
+ ext_disp = platform_get_drvdata(pdev);
+ if (!ext_disp) {
+ pr_err("Invalid drvdata\n");
+ return -EINVAL;
+ }
+
+ if (ack & AUDIO_ACK_SET_ENABLE) {
+ ext_disp->ack_enabled = ack & AUDIO_ACK_ENABLE ?
+ true : false;
+
+ pr_debug("audio ack feature %s\n",
+ ext_disp->ack_enabled ? "enabled" : "disabled");
+ goto end;
+ }
+
+ if (!ext_disp->ack_enabled)
+ goto end;
+
+ ack_hpd = ack & AUDIO_ACK_CONNECT;
+
+ pr_debug("%s acknowledging audio (%d)\n",
+ msm_ext_disp_name(ext_disp->current_disp), ack_hpd);
+
+ if (!ext_disp->audio_session_on)
+ complete_all(&ext_disp->hpd_comp);
+end:
+ return ret;
+}
+
static int msm_ext_disp_get_intf_id(struct platform_device *pdev)
{
int ret = 0;
@@ -710,12 +599,14 @@ static int msm_ext_disp_update_audio_ops(struct msm_ext_disp *ext_disp,
ops->cable_status = msm_ext_disp_cable_status;
ops->get_intf_id = msm_ext_disp_get_intf_id;
ops->teardown_done = msm_ext_disp_teardown_done;
+ ops->acknowledge = msm_ext_disp_audio_ack;
} else {
ops->audio_info_setup = NULL;
ops->get_audio_edid_blk = NULL;
ops->cable_status = NULL;
ops->get_intf_id = NULL;
ops->teardown_done = NULL;
+ ops->acknowledge = NULL;
}
end:
return ret;
@@ -755,46 +646,6 @@ end:
return ret;
}
-static int msm_ext_disp_audio_ack(struct platform_device *pdev, u32 ack)
-{
- u32 ack_hpd;
- int ret = 0;
- struct msm_ext_disp *ext_disp = NULL;
-
- if (!pdev) {
- pr_err("Invalid platform device\n");
- return -EINVAL;
- }
-
- ext_disp = platform_get_drvdata(pdev);
- if (!ext_disp) {
- pr_err("Invalid drvdata\n");
- return -EINVAL;
- }
-
- if (ack & AUDIO_ACK_SET_ENABLE) {
- ext_disp->ack_enabled = ack & AUDIO_ACK_ENABLE ?
- true : false;
-
- pr_debug("audio ack feature %s\n",
- ext_disp->ack_enabled ? "enabled" : "disabled");
- goto end;
- }
-
- if (!ext_disp->ack_enabled)
- goto end;
-
- ack_hpd = ack & AUDIO_ACK_CONNECT;
-
- pr_debug("%s acknowledging audio (%d)\n",
- msm_ext_disp_name(ext_disp->current_disp), ack_hpd);
-
- if (!ext_disp->audio_session_on)
- complete_all(&ext_disp->hpd_comp);
-end:
- return ret;
-}
-
int msm_hdmi_register_audio_codec(struct platform_device *pdev,
struct msm_ext_disp_audio_codec_ops *ops)
{
@@ -849,11 +700,6 @@ static int msm_ext_disp_validate_intf(struct msm_ext_disp_init_data *init_data)
return -EINVAL;
}
- if (!init_data->kobj) {
- pr_err("Invalid display intf kobj\n");
- return -EINVAL;
- }
-
if (!init_data->codec_ops.get_audio_edid_blk ||
!init_data->codec_ops.cable_status ||
!init_data->codec_ops.audio_info_setup) {
@@ -899,10 +745,6 @@ int msm_ext_disp_register_intf(struct platform_device *pdev,
if (ret)
goto end;
- ret = msm_ext_disp_sysfs_create(init_data);
- if (ret)
- goto sysfs_failure;
-
init_data->intf_ops.hpd = msm_ext_disp_hpd;
init_data->intf_ops.notify = msm_ext_disp_notify;
@@ -913,8 +755,6 @@ int msm_ext_disp_register_intf(struct platform_device *pdev,
return ret;
-sysfs_failure:
- msm_ext_disp_remove_intf_data(ext_disp, init_data->type);
end:
mutex_unlock(&ext_disp->lock);
@@ -1035,7 +875,7 @@ static void __exit msm_ext_disp_exit(void)
platform_driver_unregister(&this_driver);
}
-module_init(msm_ext_disp_init);
+subsys_initcall(msm_ext_disp_init);
module_exit(msm_ext_disp_exit);
MODULE_LICENSE("GPL v2");
diff --git a/drivers/power/supply/qcom/battery.c b/drivers/power/supply/qcom/battery.c
index 4039b4312e93..4973f91c8af1 100644
--- a/drivers/power/supply/qcom/battery.c
+++ b/drivers/power/supply/qcom/battery.c
@@ -635,14 +635,16 @@ static void handle_main_charge_type(struct pl_data *chip)
}
#define MIN_ICL_CHANGE_DELTA_UA 300000
-static void handle_settled_aicl_split(struct pl_data *chip)
+static void handle_settled_icl_change(struct pl_data *chip)
{
union power_supply_propval pval = {0, };
int rc;
- if (!get_effective_result(chip->pl_disable_votable)
- && (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN
- || chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT)) {
+ if (get_effective_result(chip->pl_disable_votable))
+ return;
+
+ if (chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN
+ || chip->pl_mode == POWER_SUPPLY_PL_USBIN_USBIN_EXT) {
/*
* call aicl split only when USBIN_USBIN and enabled
* and if aicl changed
@@ -659,6 +661,8 @@ static void handle_settled_aicl_split(struct pl_data *chip)
if (abs((chip->main_settled_ua - chip->pl_settled_ua)
- pval.intval) > MIN_ICL_CHANGE_DELTA_UA)
split_settled(chip);
+ } else {
+ rerun_election(chip->fcc_votable);
}
}
@@ -705,7 +709,7 @@ static void status_change_work(struct work_struct *work)
is_parallel_available(chip);
handle_main_charge_type(chip);
- handle_settled_aicl_split(chip);
+ handle_settled_icl_change(chip);
handle_parallel_in_taper(chip);
}
@@ -719,7 +723,8 @@ static int pl_notifier_call(struct notifier_block *nb,
return NOTIFY_OK;
if ((strcmp(psy->desc->name, "parallel") == 0)
- || (strcmp(psy->desc->name, "battery") == 0))
+ || (strcmp(psy->desc->name, "battery") == 0)
+ || (strcmp(psy->desc->name, "main") == 0))
schedule_work(&chip->status_change_work);
return NOTIFY_OK;
diff --git a/drivers/power/supply/qcom/qpnp-smb2.c b/drivers/power/supply/qcom/qpnp-smb2.c
index 7e5b239cea96..64f4d46df9a0 100644
--- a/drivers/power/supply/qcom/qpnp-smb2.c
+++ b/drivers/power/supply/qcom/qpnp-smb2.c
@@ -992,6 +992,9 @@ static int smb2_batt_set_prop(struct power_supply *psy,
/* Not in ship mode as long as the device is active */
if (!val->intval)
break;
+ if (chg->pl.psy)
+ power_supply_set_property(chg->pl.psy,
+ POWER_SUPPLY_PROP_SET_SHIP_MODE, val);
rc = smblib_set_prop_ship_mode(chg, val);
break;
case POWER_SUPPLY_PROP_RERUN_AICL:
diff --git a/drivers/power/supply/qcom/smb-lib.c b/drivers/power/supply/qcom/smb-lib.c
index ce2e077c4f07..5d4b46469e9c 100644
--- a/drivers/power/supply/qcom/smb-lib.c
+++ b/drivers/power/supply/qcom/smb-lib.c
@@ -381,13 +381,13 @@ static int smblib_set_opt_freq_buck(struct smb_charger *chg, int fsw_khz)
if (chg->mode == PARALLEL_MASTER && chg->pl.psy) {
pval.intval = fsw_khz;
- rc = power_supply_set_property(chg->pl.psy,
+ /*
+ * Some parallel charging implementations may not have
+ * PROP_BUCK_FREQ property - they could be running
+ * with a fixed frequency
+ */
+ power_supply_set_property(chg->pl.psy,
POWER_SUPPLY_PROP_BUCK_FREQ, &pval);
- if (rc < 0) {
- dev_err(chg->dev,
- "Could not set parallel buck_freq rc=%d\n", rc);
- return rc;
- }
}
return rc;
@@ -921,7 +921,6 @@ override_suspend_config:
enable_icl_changed_interrupt:
enable_irq(chg->irq_info[USBIN_ICL_CHANGE_IRQ].irq);
-
return rc;
}
@@ -1720,33 +1719,54 @@ int smblib_set_prop_system_temp_level(struct smb_charger *chg,
int smblib_rerun_aicl(struct smb_charger *chg)
{
- int rc = 0;
- u8 val;
+ int rc, settled_icl_ua;
+ u8 stat;
- /*
- * Use restart_AICL instead of trigger_AICL as it runs the
- * complete AICL instead of starting from the last settled value.
- *
- * 8998 only supports trigger_AICL return error for 8998
- */
+ rc = smblib_read(chg, POWER_PATH_STATUS_REG, &stat);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't read POWER_PATH_STATUS rc=%d\n",
+ rc);
+ return rc;
+ }
+
+ /* USB is suspended so skip re-running AICL */
+ if (stat & USBIN_SUSPEND_STS_BIT)
+ return rc;
+
+ smblib_dbg(chg, PR_MISC, "re-running AICL\n");
switch (chg->smb_version) {
case PMI8998_SUBTYPE:
- smblib_dbg(chg, PR_PARALLEL, "AICL rerun not supported\n");
- return -EINVAL;
+ rc = smblib_get_charge_param(chg, &chg->param.icl_stat,
+ &settled_icl_ua);
+ if (rc < 0) {
+ smblib_err(chg, "Couldn't get settled ICL rc=%d\n", rc);
+ return rc;
+ }
+
+ vote(chg->usb_icl_votable, AICL_RERUN_VOTER, true,
+ max(settled_icl_ua - chg->param.usb_icl.step_u,
+ chg->param.usb_icl.step_u));
+ vote(chg->usb_icl_votable, AICL_RERUN_VOTER, false, 0);
+ break;
case PM660_SUBTYPE:
- val = RESTART_AICL_BIT;
+ /*
+ * Use restart_AICL instead of trigger_AICL as it runs the
+ * complete AICL instead of starting from the last settled
+ * value.
+ */
+ rc = smblib_masked_write(chg, CMD_HVDCP_2_REG,
+ RESTART_AICL_BIT, RESTART_AICL_BIT);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
+ rc);
break;
default:
smblib_dbg(chg, PR_PARALLEL, "unknown SMB chip %d\n",
chg->smb_version);
return -EINVAL;
}
- rc = smblib_masked_write(chg, CMD_HVDCP_2_REG, val, val);
- if (rc < 0)
- smblib_err(chg, "Couldn't write to CMD_HVDCP_2_REG rc=%d\n",
- rc);
- return rc;
+ return 0;
}
static int smblib_dp_pulse(struct smb_charger *chg)
@@ -2403,6 +2423,10 @@ int smblib_set_prop_usb_voltage_max(struct smb_charger *chg,
}
chg->voltage_max_uv = max_uv;
+ rc = smblib_rerun_aicl(chg);
+ if (rc < 0)
+ smblib_err(chg, "Couldn't re-run AICL rc=%d\n", rc);
+
return rc;
}
diff --git a/drivers/power/supply/qcom/smb-lib.h b/drivers/power/supply/qcom/smb-lib.h
index fa6fd3eb310d..21ccd3ce57c7 100644
--- a/drivers/power/supply/qcom/smb-lib.h
+++ b/drivers/power/supply/qcom/smb-lib.h
@@ -57,6 +57,7 @@ enum print_reason {
#define PL_DELAY_HVDCP_VOTER "PL_DELAY_HVDCP_VOTER"
#define CTM_VOTER "CTM_VOTER"
#define SW_QC3_VOTER "SW_QC3_VOTER"
+#define AICL_RERUN_VOTER "AICL_RERUN_VOTER"
#define VCONN_MAX_ATTEMPTS 3
#define OTG_MAX_ATTEMPTS 3
diff --git a/drivers/power/supply/qcom/smb138x-charger.c b/drivers/power/supply/qcom/smb138x-charger.c
index fe1aced4f5c6..1c7c1e78699f 100644
--- a/drivers/power/supply/qcom/smb138x-charger.c
+++ b/drivers/power/supply/qcom/smb138x-charger.c
@@ -44,6 +44,8 @@
#define SMB2CHG_DC_TM_SREFGEN (DCIN_BASE + 0xE2)
#define STACKED_DIODE_EN_BIT BIT(2)
+#define TDIE_AVG_COUNT 10
+
enum {
OOB_COMP_WA_BIT = BIT(0),
};
@@ -118,6 +120,27 @@ irqreturn_t smb138x_handle_slave_chg_state_change(int irq, void *data)
return IRQ_HANDLED;
}
+static int smb138x_get_prop_charger_temp(struct smb138x *chip,
+ union power_supply_propval *val)
+{
+ union power_supply_propval pval;
+ int rc = 0, avg = 0, i;
+ struct smb_charger *chg = &chip->chg;
+
+ for (i = 0; i < TDIE_AVG_COUNT; i++) {
+ pval.intval = 0;
+ rc = smblib_get_prop_charger_temp(chg, &pval);
+ if (rc < 0) {
+ pr_err("Couldnt read chg temp at %dth iteration rc = %d\n",
+ i + 1, rc);
+ return rc;
+ }
+ avg += pval.intval;
+ }
+ val->intval = avg / TDIE_AVG_COUNT;
+ return rc;
+}
+
static int smb138x_parse_dt(struct smb138x *chip)
{
struct smb_charger *chg = &chip->chg;
@@ -312,6 +335,7 @@ static enum power_supply_property smb138x_batt_props[] = {
POWER_SUPPLY_PROP_CAPACITY,
POWER_SUPPLY_PROP_CHARGER_TEMP,
POWER_SUPPLY_PROP_CHARGER_TEMP_MAX,
+ POWER_SUPPLY_PROP_SET_SHIP_MODE,
};
static int smb138x_batt_get_prop(struct power_supply *psy,
@@ -342,11 +366,15 @@ static int smb138x_batt_get_prop(struct power_supply *psy,
rc = smblib_get_prop_batt_capacity(chg, val);
break;
case POWER_SUPPLY_PROP_CHARGER_TEMP:
- rc = smblib_get_prop_charger_temp(chg, val);
+ rc = smb138x_get_prop_charger_temp(chip, val);
break;
case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
rc = smblib_get_prop_charger_temp_max(chg, val);
break;
+ case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+ /* Not in ship mode as long as device is active */
+ val->intval = 0;
+ break;
default:
pr_err("batt power supply get prop %d not supported\n", prop);
return -EINVAL;
@@ -375,6 +403,12 @@ static int smb138x_batt_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CAPACITY:
rc = smblib_set_prop_batt_capacity(chg, val);
break;
+ case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+ /* Not in ship mode as long as the device is active */
+ if (!val->intval)
+ break;
+ rc = smblib_set_prop_ship_mode(chg, val);
+ break;
default:
pr_err("batt power supply set prop %d not supported\n", prop);
return -EINVAL;
@@ -494,6 +528,7 @@ static enum power_supply_property smb138x_parallel_props[] = {
POWER_SUPPLY_PROP_MODEL_NAME,
POWER_SUPPLY_PROP_PARALLEL_MODE,
POWER_SUPPLY_PROP_CONNECTOR_HEALTH,
+ POWER_SUPPLY_PROP_SET_SHIP_MODE,
};
static int smb138x_parallel_get_prop(struct power_supply *psy,
@@ -535,7 +570,7 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
rc = smblib_get_prop_slave_current_now(chg, val);
break;
case POWER_SUPPLY_PROP_CHARGER_TEMP:
- rc = smblib_get_prop_charger_temp(chg, val);
+ rc = smb138x_get_prop_charger_temp(chip, val);
break;
case POWER_SUPPLY_PROP_CHARGER_TEMP_MAX:
rc = smblib_get_prop_charger_temp_max(chg, val);
@@ -549,6 +584,10 @@ static int smb138x_parallel_get_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONNECTOR_HEALTH:
val->intval = smb138x_get_prop_connector_health(chip);
break;
+ case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+ /* Not in ship mode as long as device is active */
+ val->intval = 0;
+ break;
default:
pr_err("parallel power supply get prop %d not supported\n",
prop);
@@ -605,12 +644,14 @@ static int smb138x_parallel_set_prop(struct power_supply *psy,
case POWER_SUPPLY_PROP_CONSTANT_CHARGE_CURRENT_MAX:
rc = smblib_set_charge_param(chg, &chg->param.fcc, val->intval);
break;
- case POWER_SUPPLY_PROP_BUCK_FREQ:
- rc = smblib_set_charge_param(chg, &chg->param.freq_buck,
- val->intval);
+ case POWER_SUPPLY_PROP_SET_SHIP_MODE:
+ /* Not in ship mode as long as the device is active */
+ if (!val->intval)
+ break;
+ rc = smblib_set_prop_ship_mode(chg, val);
break;
default:
- pr_err("parallel power supply set prop %d not supported\n",
+ pr_debug("parallel power supply set prop %d not supported\n",
prop);
return -EINVAL;
}
@@ -889,6 +930,13 @@ static int smb138x_init_hw(struct smb138x *chip)
chg->dcp_icl_ua = chip->dt.usb_icl_ua;
+ /* configure to a fixed 700khz freq to avoid tdie errors */
+ rc = smblib_set_charge_param(chg, &chg->param.freq_buck, 700);
+ if (rc < 0) {
+ pr_err("Couldn't configure 700Khz switch freq rc=%d\n", rc);
+ return rc;
+ }
+
/* configure charge enable for software control; active high */
rc = smblib_masked_write(chg, CHGR_CFG2_REG,
CHG_EN_POLARITY_BIT | CHG_EN_SRC_BIT, 0);
diff --git a/drivers/regulator/cprh-kbss-regulator.c b/drivers/regulator/cprh-kbss-regulator.c
index 9be8bb94b257..2307da9497dc 100644
--- a/drivers/regulator/cprh-kbss-regulator.c
+++ b/drivers/regulator/cprh-kbss-regulator.c
@@ -37,7 +37,8 @@
#define MSM8998_KBSS_FUSE_CORNERS 4
#define SDM660_KBSS_FUSE_CORNERS 5
-#define SDM630_KBSS_FUSE_CORNERS 4
+#define SDM630_POWER_KBSS_FUSE_CORNERS 3
+#define SDM630_PERF_KBSS_FUSE_CORNERS 5
/**
* struct cprh_kbss_fuses - KBSS specific fuse data
@@ -131,18 +132,32 @@ static const char * const cprh_sdm660_perf_kbss_fuse_corner_name[] = {
[CPRH_SDM660_PERF_KBSS_FUSE_CORNER_TURBO_L2] = "TURBO_L2",
};
-enum cprh_sdm630_kbss_fuse_corner {
- CPRH_SDM630_KBSS_FUSE_CORNER_LOWSVS = 0,
- CPRH_SDM630_KBSS_FUSE_CORNER_SVSPLUS = 1,
- CPRH_SDM630_KBSS_FUSE_CORNER_NOM = 2,
- CPRH_SDM630_KBSS_FUSE_CORNER_TURBO_L1 = 3,
+enum cprh_sdm630_power_kbss_fuse_corner {
+ CPRH_SDM630_POWER_KBSS_FUSE_CORNER_LOWSVS = 0,
+ CPRH_SDM630_POWER_KBSS_FUSE_CORNER_SVSPLUS = 1,
+ CPRH_SDM630_POWER_KBSS_FUSE_CORNER_TURBO_L1 = 2,
};
-static const char * const cprh_sdm630_kbss_fuse_corner_name[] = {
- [CPRH_SDM630_KBSS_FUSE_CORNER_LOWSVS] = "LowSVS",
- [CPRH_SDM630_KBSS_FUSE_CORNER_SVSPLUS] = "SVSPLUS",
- [CPRH_SDM630_KBSS_FUSE_CORNER_NOM] = "NOM",
- [CPRH_SDM630_KBSS_FUSE_CORNER_TURBO_L1] = "TURBO_L1",
+static const char * const cprh_sdm630_power_kbss_fuse_corner_name[] = {
+ [CPRH_SDM630_POWER_KBSS_FUSE_CORNER_LOWSVS] = "LowSVS",
+ [CPRH_SDM630_POWER_KBSS_FUSE_CORNER_SVSPLUS] = "SVSPLUS",
+ [CPRH_SDM630_POWER_KBSS_FUSE_CORNER_TURBO_L1] = "TURBO_L1",
+};
+
+enum cprh_sdm630_perf_kbss_fuse_corner {
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_LOWSVS = 0,
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_SVSPLUS = 1,
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_NOM = 2,
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO = 3,
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO_L2 = 4,
+};
+
+static const char * const cprh_sdm630_perf_kbss_fuse_corner_name[] = {
+ [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_LOWSVS] = "LowSVS",
+ [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_SVSPLUS] = "SVSPLUS",
+ [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_NOM] = "NOM",
+ [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO] = "TURBO",
+ [CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO_L2] = "TURBO_L2",
};
/* KBSS cluster IDs */
@@ -202,17 +217,17 @@ sdm660_kbss_ro_sel_param[2][SDM660_KBSS_FUSE_CORNERS][3] = {
};
static const struct cpr3_fuse_param
-sdm630_kbss_ro_sel_param[2][SDM630_KBSS_FUSE_CORNERS][3] = {
+sdm630_kbss_ro_sel_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][3] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
{{67, 12, 15}, {} },
{{65, 56, 59}, {} },
- {{67, 4, 7}, {} },
{{67, 0, 3}, {} },
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
{{68, 61, 63}, {69, 0, 0} },
{{69, 1, 4}, {} },
{{68, 57, 60}, {} },
+ {{68, 53, 56}, {} },
{{66, 14, 17}, {} },
},
};
@@ -252,17 +267,17 @@ sdm660_kbss_init_voltage_param[2][SDM660_KBSS_FUSE_CORNERS][2] = {
};
static const struct cpr3_fuse_param
-sdm630_kbss_init_voltage_param[2][SDM630_KBSS_FUSE_CORNERS][2] = {
+sdm630_kbss_init_voltage_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][2] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
{{67, 34, 39}, {} },
{{71, 3, 8}, {} },
- {{67, 22, 27}, {} },
{{67, 16, 21}, {} },
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
{{69, 17, 22}, {} },
{{69, 23, 28}, {} },
{{69, 11, 16}, {} },
+ {{69, 5, 10}, {} },
{{70, 42, 47}, {} },
},
};
@@ -302,17 +317,17 @@ sdm660_kbss_target_quot_param[2][SDM660_KBSS_FUSE_CORNERS][3] = {
};
static const struct cpr3_fuse_param
-sdm630_kbss_target_quot_param[2][SDM630_KBSS_FUSE_CORNERS][3] = {
+sdm630_kbss_target_quot_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][3] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
{{68, 12, 23}, {} },
{{71, 9, 20}, {} },
- {{67, 52, 63}, {} },
{{67, 40, 51}, {} },
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
{{69, 53, 63}, {70, 0, 0}, {} },
{{70, 1, 12}, {} },
{{69, 41, 52}, {} },
+ {{69, 29, 40}, {} },
{{70, 48, 59}, {} },
},
};
@@ -352,17 +367,17 @@ sdm660_kbss_quot_offset_param[2][SDM660_KBSS_FUSE_CORNERS][3] = {
};
static const struct cpr3_fuse_param
-sdm630_kbss_quot_offset_param[2][SDM630_KBSS_FUSE_CORNERS][3] = {
+sdm630_kbss_quot_offset_param[2][SDM630_PERF_KBSS_FUSE_CORNERS][3] = {
[CPRH_KBSS_POWER_CLUSTER_ID] = {
{{} },
{{71, 21, 27}, {} },
- {{68, 31, 37}, {} },
{{68, 24, 30}, {} },
},
[CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
{{} },
{{70, 27, 33}, {} },
{{70, 20, 26}, {} },
+ {{70, 13, 19}, {} },
{{70, 60, 63}, {71, 0, 2}, {} },
},
};
@@ -484,18 +499,27 @@ sdm660_kbss_fuse_ref_volt[2][SDM660_KBSS_FUSE_CORNERS] = {
* Open loop voltage fuse reference voltages in microvolts for SDM630
*/
static const int
-sdm630_kbss_fuse_ref_volt[SDM630_KBSS_FUSE_CORNERS] = {
- 644000,
- 788000,
- 868000,
- 1068000,
+sdm630_kbss_fuse_ref_volt[2][SDM630_PERF_KBSS_FUSE_CORNERS] = {
+ [CPRH_KBSS_POWER_CLUSTER_ID] = {
+ 644000,
+ 788000,
+ 1068000,
+ },
+ [CPRH_KBSS_PERFORMANCE_CLUSTER_ID] = {
+ 644000,
+ 788000,
+ 868000,
+ 988000,
+ 1068000,
+ },
};
static const int
-sdm630_kbss_speed_bin_2_fuse_ref_volt[SDM630_KBSS_FUSE_CORNERS] = {
+sdm630_perf_kbss_speed_bin_2_fuse_ref_volt[SDM630_PERF_KBSS_FUSE_CORNERS] = {
644000,
788000,
868000,
+ 988000,
1140000,
};
@@ -552,7 +576,7 @@ sdm630_kbss_speed_bin_2_fuse_ref_volt[SDM630_KBSS_FUSE_CORNERS] = {
* sdm630 configuration
*/
#define SDM630_KBSS_POWER_CPR_SENSOR_COUNT 6
-#define SDM630_KBSS_PERFORMANCE_CPR_SENSOR_COUNT 9
+#define SDM630_KBSS_PERFORMANCE_CPR_SENSOR_COUNT 6
/*
* SOC IDs
@@ -760,7 +784,7 @@ static int cprh_sdm630_kbss_read_fuse_data(struct cpr3_regulator *vreg,
struct cprh_kbss_fuses *fuse)
{
void __iomem *base = vreg->thread->ctrl->fuse_base;
- int i, id, rc;
+ int i, id, rc, fuse_corners;
rc = cpr3_read_fuse_param(base, sdm630_cpr_fusing_rev_param,
&fuse->cpr_fusing_rev);
@@ -772,7 +796,12 @@ static int cprh_sdm630_kbss_read_fuse_data(struct cpr3_regulator *vreg,
cpr3_info(vreg, "CPR fusing revision = %llu\n", fuse->cpr_fusing_rev);
id = vreg->thread->ctrl->ctrl_id;
- for (i = 0; i < SDM630_KBSS_FUSE_CORNERS; i++) {
+ if (id == CPRH_KBSS_POWER_CLUSTER_ID)
+ fuse_corners = SDM630_POWER_KBSS_FUSE_CORNERS;
+ else
+ fuse_corners = SDM630_PERF_KBSS_FUSE_CORNERS;
+
+ for (i = 0; i < fuse_corners; i++) {
rc = cpr3_read_fuse_param(base,
sdm630_kbss_init_voltage_param[id][i],
&fuse->init_voltage[i]);
@@ -856,7 +885,10 @@ static int cprh_kbss_read_fuse_data(struct cpr3_regulator *vreg)
fuse_corners = SDM660_KBSS_FUSE_CORNERS;
break;
case SDM630_SOC_ID:
- fuse_corners = SDM630_KBSS_FUSE_CORNERS;
+ if (vreg->thread->ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID)
+ fuse_corners = SDM630_POWER_KBSS_FUSE_CORNERS;
+ else
+ fuse_corners = SDM630_PERF_KBSS_FUSE_CORNERS;
break;
case MSM8998_V1_SOC_ID:
case MSM8998_V2_SOC_ID:
@@ -1008,10 +1040,15 @@ static int cprh_kbss_calculate_open_loop_voltages(struct cpr3_regulator *vreg)
corner_name = cprh_sdm660_perf_kbss_fuse_corner_name;
break;
case SDM630_SOC_ID:
- ref_volt = sdm630_kbss_fuse_ref_volt;
- if (vreg->speed_bin_fuse == 2)
- ref_volt = sdm630_kbss_speed_bin_2_fuse_ref_volt;
- corner_name = cprh_sdm630_kbss_fuse_corner_name;
+ ref_volt = sdm630_kbss_fuse_ref_volt[id];
+ if (id == CPRH_KBSS_PERFORMANCE_CLUSTER_ID
+ && vreg->speed_bin_fuse == 2)
+ ref_volt = sdm630_perf_kbss_speed_bin_2_fuse_ref_volt;
+
+ if (id == CPRH_KBSS_POWER_CLUSTER_ID)
+ corner_name = cprh_sdm630_power_kbss_fuse_corner_name;
+ else
+ corner_name = cprh_sdm630_perf_kbss_fuse_corner_name;
break;
case MSM8998_V1_SOC_ID:
ref_volt = msm8998_v1_kbss_fuse_ref_volt;
@@ -1581,11 +1618,19 @@ static int cprh_kbss_calculate_target_quotients(struct cpr3_regulator *vreg)
}
break;
case SDM630_SOC_ID:
- corner_name = cprh_sdm630_kbss_fuse_corner_name;
- lowest_fuse_corner =
- CPRH_SDM630_KBSS_FUSE_CORNER_LOWSVS;
- highest_fuse_corner =
- CPRH_SDM630_KBSS_FUSE_CORNER_TURBO_L1;
+ if (vreg->thread->ctrl->ctrl_id == CPRH_KBSS_POWER_CLUSTER_ID) {
+ corner_name = cprh_sdm630_power_kbss_fuse_corner_name;
+ lowest_fuse_corner =
+ CPRH_SDM630_POWER_KBSS_FUSE_CORNER_LOWSVS;
+ highest_fuse_corner =
+ CPRH_SDM630_POWER_KBSS_FUSE_CORNER_TURBO_L1;
+ } else {
+ corner_name = cprh_sdm630_perf_kbss_fuse_corner_name;
+ lowest_fuse_corner =
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_LOWSVS;
+ highest_fuse_corner =
+ CPRH_SDM630_PERF_KBSS_FUSE_CORNER_TURBO_L2;
+ }
break;
case MSM8998_V1_SOC_ID:
case MSM8998_V2_SOC_ID:
diff --git a/drivers/regulator/qpnp-labibb-regulator.c b/drivers/regulator/qpnp-labibb-regulator.c
index cf8f00085a0c..dbe2a08f1776 100644
--- a/drivers/regulator/qpnp-labibb-regulator.c
+++ b/drivers/regulator/qpnp-labibb-regulator.c
@@ -19,16 +19,19 @@
#include <linux/kernel.h>
#include <linux/regmap.h>
#include <linux/module.h>
+#include <linux/notifier.h>
#include <linux/of.h>
#include <linux/of_device.h>
#include <linux/of_irq.h>
#include <linux/spmi.h>
#include <linux/platform_device.h>
#include <linux/string.h>
+#include <linux/workqueue.h>
#include <linux/regulator/driver.h>
#include <linux/regulator/machine.h>
#include <linux/regulator/of_regulator.h>
#include <linux/qpnp/qpnp-revid.h>
+#include <linux/regulator/qpnp-labibb-regulator.h>
#define QPNP_LABIBB_REGULATOR_DRIVER_NAME "qcom,qpnp-labibb-regulator"
@@ -594,6 +597,7 @@ struct qpnp_labibb {
const struct lab_ver_ops *lab_ver_ops;
struct mutex bus_mutex;
enum qpnp_labibb_mode mode;
+ struct work_struct lab_vreg_ok_work;
bool standalone;
bool ttw_en;
bool in_ttw_mode;
@@ -603,10 +607,13 @@ struct qpnp_labibb {
bool ttw_force_lab_on;
bool skip_2nd_swire_cmd;
bool pfm_enable;
+ bool notify_lab_vreg_ok_sts;
u32 swire_2nd_cmd_delay;
u32 swire_ibb_ps_enable_delay;
};
+static RAW_NOTIFIER_HEAD(labibb_notifier);
+
struct ibb_ver_ops {
int (*set_default_voltage)(struct qpnp_labibb *labibb,
bool use_default);
@@ -2124,6 +2131,36 @@ static int qpnp_labibb_regulator_ttw_mode_exit(struct qpnp_labibb *labibb)
return rc;
}
+static void qpnp_lab_vreg_notifier_work(struct work_struct *work)
+{
+ int rc = 0;
+ u16 retries = 1000, dly = 5000;
+ u8 val;
+ struct qpnp_labibb *labibb = container_of(work, struct qpnp_labibb,
+ lab_vreg_ok_work);
+
+ while (retries--) {
+ rc = qpnp_labibb_read(labibb, labibb->lab_base +
+ REG_LAB_STATUS1, &val, 1);
+ if (rc < 0) {
+ pr_err("read register %x failed rc = %d\n",
+ REG_LAB_STATUS1, rc);
+ return;
+ }
+
+ if (val & LAB_STATUS1_VREG_OK) {
+ raw_notifier_call_chain(&labibb_notifier,
+ LAB_VREG_OK, NULL);
+ break;
+ }
+
+ usleep_range(dly, dly + 100);
+ }
+
+ if (!retries)
+ pr_err("LAB_VREG_OK not set, failed to notify\n");
+}
+
static int qpnp_labibb_regulator_enable(struct qpnp_labibb *labibb)
{
int rc;
@@ -2326,6 +2363,9 @@ static int qpnp_lab_regulator_enable(struct regulator_dev *rdev)
labibb->lab_vreg.vreg_enabled = 1;
}
+ if (labibb->notify_lab_vreg_ok_sts)
+ schedule_work(&labibb->lab_vreg_ok_work);
+
return 0;
}
@@ -2578,6 +2618,9 @@ static int register_qpnp_lab_regulator(struct qpnp_labibb *labibb,
return rc;
}
+ labibb->notify_lab_vreg_ok_sts = of_property_read_bool(of_node,
+ "qcom,notify-lab-vreg-ok-sts");
+
rc = of_property_read_u32(of_node, "qcom,qpnp-lab-soft-start",
&(labibb->lab_vreg.soft_start));
if (!rc) {
@@ -3817,6 +3860,8 @@ static int qpnp_labibb_regulator_probe(struct platform_device *pdev)
goto fail_registration;
}
}
+
+ INIT_WORK(&labibb->lab_vreg_ok_work, qpnp_lab_vreg_notifier_work);
dev_set_drvdata(&pdev->dev, labibb);
pr_info("LAB/IBB registered successfully, lab_vreg enable=%d ibb_vreg enable=%d swire_control=%d\n",
labibb->lab_vreg.vreg_enabled,
@@ -3834,6 +3879,18 @@ fail_registration:
return rc;
}
+int qpnp_labibb_notifier_register(struct notifier_block *nb)
+{
+ return raw_notifier_chain_register(&labibb_notifier, nb);
+}
+EXPORT_SYMBOL(qpnp_labibb_notifier_register);
+
+int qpnp_labibb_notifier_unregister(struct notifier_block *nb)
+{
+ return raw_notifier_chain_unregister(&labibb_notifier, nb);
+}
+EXPORT_SYMBOL(qpnp_labibb_notifier_unregister);
+
static int qpnp_labibb_regulator_remove(struct platform_device *pdev)
{
struct qpnp_labibb *labibb = dev_get_drvdata(&pdev->dev);
@@ -3843,6 +3900,8 @@ static int qpnp_labibb_regulator_remove(struct platform_device *pdev)
regulator_unregister(labibb->lab_vreg.rdev);
if (labibb->ibb_vreg.rdev)
regulator_unregister(labibb->ibb_vreg.rdev);
+
+ cancel_work_sync(&labibb->lab_vreg_ok_work);
}
return 0;
}
diff --git a/drivers/regulator/qpnp-lcdb-regulator.c b/drivers/regulator/qpnp-lcdb-regulator.c
index a08ade61e057..19b1d319ef45 100644
--- a/drivers/regulator/qpnp-lcdb-regulator.c
+++ b/drivers/regulator/qpnp-lcdb-regulator.c
@@ -190,6 +190,9 @@ struct qpnp_lcdb {
bool ttw_enable;
bool ttw_mode_sw;
+ /* top level DT params */
+ bool force_module_reenable;
+
/* status parameters */
bool lcdb_enabled;
bool settings_saved;
@@ -588,6 +591,23 @@ static int qpnp_lcdb_enable(struct qpnp_lcdb *lcdb)
goto fail_enable;
}
+ if (lcdb->force_module_reenable) {
+ val = 0;
+ rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+ &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to enable lcdb rc= %d\n", rc);
+ goto fail_enable;
+ }
+ val = MODULE_EN_BIT;
+ rc = qpnp_lcdb_write(lcdb, lcdb->base + LCDB_ENABLE_CTL1_REG,
+ &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to disable lcdb rc= %d\n", rc);
+ goto fail_enable;
+ }
+ }
+
/* poll for vreg_ok */
timeout = 10;
delay = lcdb->bst.soft_start_us + lcdb->ldo.soft_start_us +
@@ -1590,6 +1610,9 @@ static int qpnp_lcdb_parse_dt(struct qpnp_lcdb *lcdb)
}
}
+ lcdb->force_module_reenable = of_property_read_bool(node,
+ "qcom,force-module-reenable");
+
if (of_property_read_bool(node, "qcom,ttw-enable")) {
rc = qpnp_lcdb_parse_ttw(lcdb);
if (rc < 0) {
diff --git a/drivers/sensors/sensors_ssc.c b/drivers/sensors/sensors_ssc.c
index 0910ef34e777..0e299efece94 100644
--- a/drivers/sensors/sensors_ssc.c
+++ b/drivers/sensors/sensors_ssc.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -26,6 +26,8 @@
#include <linux/err.h>
#include <linux/delay.h>
#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
#include <soc/qcom/subsystem_restart.h>
#define IMAGE_LOAD_CMD 1
@@ -64,10 +66,11 @@ static struct attribute *attrs[] = {
};
static struct platform_device *slpi_private;
+static struct work_struct slpi_ldr_work;
-static void slpi_loader_do(struct platform_device *pdev)
+static void slpi_load_fw(struct work_struct *slpi_ldr_work)
{
-
+ struct platform_device *pdev = slpi_private;
struct slpi_loader_private *priv = NULL;
int ret;
const char *firmware_name = NULL;
@@ -111,6 +114,12 @@ fail:
dev_err(&pdev->dev, "%s: SLPI image loading failed\n", __func__);
}
+static void slpi_loader_do(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "%s: scheduling work to load SLPI fw\n", __func__);
+ schedule_work(&slpi_ldr_work);
+}
+
static void slpi_loader_unload(struct platform_device *pdev)
{
struct slpi_loader_private *priv = NULL;
@@ -336,6 +345,8 @@ static int sensors_ssc_probe(struct platform_device *pdev)
goto cdev_add_err;
}
+ INIT_WORK(&slpi_ldr_work, slpi_load_fw);
+
return 0;
cdev_add_err:
diff --git a/drivers/soc/qcom/Kconfig b/drivers/soc/qcom/Kconfig
index d3f1050499f3..75bebf66376d 100644
--- a/drivers/soc/qcom/Kconfig
+++ b/drivers/soc/qcom/Kconfig
@@ -46,6 +46,15 @@ config QPNP_HAPTIC
module provides haptic feedback for user actions such as a long press
on the touch screen. It uses the Android timed-output framework.
+config QPNP_PBS
+ tristate "PBS trigger support for QPNP PMIC"
+ depends on SPMI
+ help
+ This driver supports configuring software PBS trigger event through PBS
+ RAM on Qualcomm Technologies, Inc. QPNP PMICs. This module provides
+ the APIs to the client drivers that wants to send the PBS trigger
+ event to the PBS RAM.
+
config MSM_SMD
depends on MSM_SMEM
bool "MSM Shared Memory Driver (SMD)"
diff --git a/drivers/soc/qcom/Makefile b/drivers/soc/qcom/Makefile
index 2605107e2dbd..fa350d122384 100644
--- a/drivers/soc/qcom/Makefile
+++ b/drivers/soc/qcom/Makefile
@@ -52,6 +52,7 @@ endif
obj-$(CONFIG_QPNP_HAPTIC) += qpnp-haptic.o
obj-$(CONFIG_QCOM_GSBI) += qcom_gsbi.o
obj-$(CONFIG_QCOM_CPUSS_DUMP) += cpuss_dump.o
+obj-$(CONFIG_QPNP_PBS) += qpnp-pbs.o
obj-$(CONFIG_QCOM_PM) += spm.o
obj-$(CONFIG_QCOM_SMD) += smd.o
obj-$(CONFIG_QCOM_SMD_RPM) += smd-rpm.o
diff --git a/drivers/soc/qcom/glink.c b/drivers/soc/qcom/glink.c
index 33cf48454414..09c50a81a943 100644
--- a/drivers/soc/qcom/glink.c
+++ b/drivers/soc/qcom/glink.c
@@ -2048,7 +2048,7 @@ static struct glink_core_xprt_ctx *find_open_transport(const char *edge,
uint16_t *best_id)
{
struct glink_core_xprt_ctx *xprt;
- struct glink_core_xprt_ctx *best_xprt;
+ struct glink_core_xprt_ctx *best_xprt = NULL;
struct glink_core_xprt_ctx *ret;
bool first = true;
@@ -5502,7 +5502,7 @@ static void tx_func(struct kthread_work *work)
{
struct channel_ctx *ch_ptr;
uint32_t prio;
- uint32_t tx_ready_head_prio;
+ uint32_t tx_ready_head_prio = 0;
int ret;
struct channel_ctx *tx_ready_head = NULL;
bool transmitted_successfully = true;
diff --git a/drivers/soc/qcom/msm_smem.c b/drivers/soc/qcom/msm_smem.c
index d3f44ab75f67..a94f741c2056 100644
--- a/drivers/soc/qcom/msm_smem.c
+++ b/drivers/soc/qcom/msm_smem.c
@@ -1157,7 +1157,7 @@ static void smem_module_init_notify(uint32_t state, void *data)
static void smem_init_security_partition(struct smem_toc_entry *entry,
uint32_t num)
{
- uint16_t remote_host;
+ uint16_t remote_host = 0;
struct smem_partition_header *hdr;
bool is_comm_partition = false;
diff --git a/drivers/soc/qcom/peripheral-loader.c b/drivers/soc/qcom/peripheral-loader.c
index 4d9767b6f8a3..21e9f17e6a7e 100644
--- a/drivers/soc/qcom/peripheral-loader.c
+++ b/drivers/soc/qcom/peripheral-loader.c
@@ -595,6 +595,22 @@ static void pil_release_mmap(struct pil_desc *desc)
struct pil_priv *priv = desc->priv;
struct pil_seg *p, *tmp;
u64 zero = 0ULL;
+
+ if (priv->info) {
+ __iowrite32_copy(&priv->info->start, &zero,
+ sizeof(zero) / 4);
+ writel_relaxed(0, &priv->info->size);
+ }
+
+ list_for_each_entry_safe(p, tmp, &priv->segs, list) {
+ list_del(&p->list);
+ kfree(p);
+ }
+}
+
+static void pil_clear_segment(struct pil_desc *desc)
+{
+ struct pil_priv *priv = desc->priv;
u8 __iomem *buf;
struct pil_map_fw_info map_fw_info = {
@@ -613,16 +629,6 @@ static void pil_release_mmap(struct pil_desc *desc)
desc->unmap_fw_mem(buf, (priv->region_end - priv->region_start),
map_data);
- if (priv->info) {
- __iowrite32_copy(&priv->info->start, &zero,
- sizeof(zero) / 4);
- writel_relaxed(0, &priv->info->size);
- }
-
- list_for_each_entry_safe(p, tmp, &priv->segs, list) {
- list_del(&p->list);
- kfree(p);
- }
}
#define IOMAP_SIZE SZ_1M
@@ -914,6 +920,7 @@ out:
&desc->attrs);
priv->region = NULL;
}
+ pil_clear_segment(desc);
pil_release_mmap(desc);
}
return ret;
diff --git a/drivers/soc/qcom/qdsp6v2/adsp-loader.c b/drivers/soc/qcom/qdsp6v2/adsp-loader.c
index 51539a36a74f..b45eac88ed12 100644
--- a/drivers/soc/qcom/qdsp6v2/adsp-loader.c
+++ b/drivers/soc/qcom/qdsp6v2/adsp-loader.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -20,6 +20,8 @@
#include <linux/qdsp6v2/apr.h>
#include <linux/of_device.h>
#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
#include <soc/qcom/subsystem_restart.h>
#define Q6_PIL_GET_DELAY_MS 100
@@ -44,12 +46,13 @@ static struct attribute *attrs[] = {
NULL,
};
+static struct work_struct adsp_ldr_work;
static struct platform_device *adsp_private;
static void adsp_loader_unload(struct platform_device *pdev);
-static void adsp_loader_do(struct platform_device *pdev)
+static void adsp_load_fw(struct work_struct *adsp_ldr_work)
{
-
+ struct platform_device *pdev = adsp_private;
struct adsp_loader_private *priv = NULL;
const char *adsp_dt = "qcom,adsp-state";
@@ -146,6 +149,11 @@ fail:
return;
}
+static void adsp_loader_do(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "%s: scheduling work to load ADSP fw\n", __func__);
+ schedule_work(&adsp_ldr_work);
+}
static ssize_t adsp_boot_store(struct kobject *kobj,
struct kobj_attribute *attr,
@@ -270,6 +278,8 @@ static int adsp_loader_probe(struct platform_device *pdev)
return ret;
}
+ INIT_WORK(&adsp_ldr_work, adsp_load_fw);
+
return 0;
}
diff --git a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
index 19974b61ec1c..d11ffdde23be 100644
--- a/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
+++ b/drivers/soc/qcom/qdsp6v2/apr_tal_glink.c
@@ -218,6 +218,7 @@ static void apr_tal_notify_remote_rx_intent(void *handle, const void *priv,
*/
pr_debug("%s: remote queued an intent\n", __func__);
apr_ch->if_remote_intent_ready = true;
+ wake_up(&apr_ch->wait);
}
void apr_tal_notify_state(void *handle, const void *priv, unsigned event)
diff --git a/drivers/soc/qcom/qdsp6v2/cdsp-loader.c b/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
index 0b801c5cd7dd..cae26e3913d6 100644
--- a/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
+++ b/drivers/soc/qcom/qdsp6v2/cdsp-loader.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2012-2014,2017, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2012-2014, 2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -19,6 +19,8 @@
#include <linux/platform_device.h>
#include <linux/of_device.h>
#include <linux/sysfs.h>
+#include <linux/workqueue.h>
+
#include <soc/qcom/subsystem_restart.h>
#define BOOT_CMD 1
@@ -47,11 +49,12 @@ static struct attribute *attrs[] = {
static u32 cdsp_state = CDSP_SUBSYS_DOWN;
static struct platform_device *cdsp_private;
+static struct work_struct cdsp_ldr_work;
static void cdsp_loader_unload(struct platform_device *pdev);
-static int cdsp_loader_do(struct platform_device *pdev)
+static void cdsp_load_fw(struct work_struct *cdsp_ldr_work)
{
-
+ struct platform_device *pdev = cdsp_private;
struct cdsp_loader_private *priv = NULL;
int rc = 0;
@@ -101,14 +104,19 @@ static int cdsp_loader_do(struct platform_device *pdev)
}
dev_dbg(&pdev->dev, "%s: CDSP image is loaded\n", __func__);
- return rc;
+ return;
}
fail:
dev_err(&pdev->dev, "%s: CDSP image loading failed\n", __func__);
- return rc;
+ return;
}
+static void cdsp_loader_do(struct platform_device *pdev)
+{
+ dev_info(&pdev->dev, "%s: scheduling work to load CDSP fw\n", __func__);
+ schedule_work(&cdsp_ldr_work);
+}
static ssize_t cdsp_boot_store(struct kobject *kobj,
struct kobj_attribute *attr,
@@ -126,7 +134,7 @@ static ssize_t cdsp_boot_store(struct kobject *kobj,
pr_debug("%s: going to call cdsp_loader_do\n", __func__);
cdsp_loader_do(cdsp_private);
} else if (boot == IMAGE_UNLOAD_CMD) {
- pr_debug("%s: going to call adsp_unloader\n", __func__);
+ pr_debug("%s: going to call cdsp_unloader\n", __func__);
cdsp_loader_unload(cdsp_private);
}
return count;
@@ -238,6 +246,8 @@ static int cdsp_loader_probe(struct platform_device *pdev)
return ret;
}
+ INIT_WORK(&cdsp_ldr_work, cdsp_load_fw);
+
return 0;
}
diff --git a/drivers/soc/qcom/qpnp-haptic.c b/drivers/soc/qcom/qpnp-haptic.c
index 39070561d7e4..cf0b7ff25201 100644
--- a/drivers/soc/qcom/qpnp-haptic.c
+++ b/drivers/soc/qcom/qpnp-haptic.c
@@ -141,9 +141,7 @@
#define QPNP_HAP_CYCLS 5
#define QPNP_TEST_TIMER_MS 5
-#define AUTO_RES_ENABLE_TIMEOUT 20000
-#define AUTO_RES_ERR_CAPTURE_RES 5
-#define AUTO_RES_ERR_MAX 15
+#define QPNP_HAP_TIME_REQ_FOR_BACK_EMF_GEN 20000
#define MISC_TRIM_ERROR_RC19P2_CLK 0x09F5
#define MISC_SEC_ACCESS 0x09D0
@@ -152,8 +150,22 @@
#define POLL_TIME_AUTO_RES_ERR_NS (5 * NSEC_PER_MSEC)
-#define LRA_POS_FREQ_COUNT 6
-int lra_play_rate_code[LRA_POS_FREQ_COUNT];
+#define MAX_POSITIVE_VARIATION_LRA_FREQ 30
+#define MAX_NEGATIVE_VARIATION_LRA_FREQ -30
+#define FREQ_VARIATION_STEP 5
+#define AUTO_RES_ERROR_CAPTURE_RES 5
+#define AUTO_RES_ERROR_MAX 30
+#define ADJUSTED_LRA_PLAY_RATE_CODE_ARRSIZE \
+ ((MAX_POSITIVE_VARIATION_LRA_FREQ - MAX_NEGATIVE_VARIATION_LRA_FREQ) \
+ / FREQ_VARIATION_STEP)
+#define LRA_DRIVE_PERIOD_POS_ERR(hap, rc_clk_err_percent) \
+ (hap->init_drive_period_code = (hap->init_drive_period_code * \
+ (1000 + rc_clk_err_percent_x10)) / 1000)
+#define LRA_DRIVE_PERIOD_NEG_ERR(hap, rc_clk_err_percent) \
+ (hap->init_drive_period_code = (hap->init_drive_period_code * \
+ (1000 - rc_clk_err_percent_x10)) / 1000)
+
+u32 adjusted_lra_play_rate_code[ADJUSTED_LRA_PLAY_RATE_CODE_ARRSIZE];
/* haptic debug register set */
static u8 qpnp_hap_dbg_regs[] = {
@@ -246,10 +258,21 @@ struct qpnp_pwm_info {
* @ pwm_info - pwm info
* @ lock - mutex lock
* @ wf_lock - mutex lock for waveform
+ * @ init_drive_period_code - the initial lra drive period code
+ * @ drive_period_code_max_limit_percent_variation - maximum limit of
+ percentage variation of drive period code
+ * @ drive_period_code_min_limit_percent_variation - minimum limit og
+ percentage variation of drive period code
+ * @ drive_period_code_max_limit - calculated drive period code with
+ percentage variation on the higher side.
+ * @ drive_period_code_min_limit - calculated drive period code with
+ percentage variation on the lower side
* @ play_mode - play mode
* @ auto_res_mode - auto resonace mode
* @ lra_high_z - high z option line
* @ timeout_ms - max timeout in ms
+ * @ time_required_to_generate_back_emf_us - the time required for sufficient
+ back-emf to be generated for auto resonance to be successful
* @ vmax_mv - max voltage in mv
* @ ilim_ma - limiting current in ma
* @ sc_deb_cycles - short circuit debounce cycles
@@ -280,6 +303,8 @@ struct qpnp_pwm_info {
* @ sup_brake_pat - support custom brake pattern
* @ correct_lra_drive_freq - correct LRA Drive Frequency
* @ misc_trim_error_rc19p2_clk_reg_present - if MISC Trim Error reg is present
+ * @ perform_lra_auto_resonance_search - whether lra auto resonance search
+ * algorithm should be performed or not.
*/
struct qpnp_hap {
struct platform_device *pdev;
@@ -300,7 +325,9 @@ struct qpnp_hap {
enum qpnp_hap_mode play_mode;
enum qpnp_hap_auto_res_mode auto_res_mode;
enum qpnp_hap_high_z lra_high_z;
+ u32 init_drive_period_code;
u32 timeout_ms;
+ u32 time_required_to_generate_back_emf_us;
u32 vmax_mv;
u32 ilim_ma;
u32 sc_deb_cycles;
@@ -312,16 +339,21 @@ struct qpnp_hap {
u32 play_irq;
u32 sc_irq;
u16 base;
+ u16 drive_period_code_max_limit;
+ u16 drive_period_code_min_limit;
+ u8 drive_period_code_max_limit_percent_variation;
+ u8 drive_period_code_min_limit_percent_variation;
u8 act_type;
u8 wave_shape;
- u8 wave_samp[QPNP_HAP_WAV_SAMP_LEN];
- u8 shadow_wave_samp[QPNP_HAP_WAV_SAMP_LEN];
- u8 brake_pat[QPNP_HAP_BRAKE_PAT_LEN];
+ u8 wave_samp[QPNP_HAP_WAV_SAMP_LEN];
+ u8 shadow_wave_samp[QPNP_HAP_WAV_SAMP_LEN];
+ u8 brake_pat[QPNP_HAP_BRAKE_PAT_LEN];
u8 reg_en_ctl;
u8 reg_play;
u8 lra_res_cal_period;
u8 sc_duration;
u8 ext_pwm_dtest_line;
+ bool vcc_pon_enabled;
bool state;
bool use_play_irq;
bool use_sc_irq;
@@ -333,6 +365,7 @@ struct qpnp_hap {
bool sup_brake_pat;
bool correct_lra_drive_freq;
bool misc_trim_error_rc19p2_clk_reg_present;
+ bool perform_lra_auto_resonance_search;
};
static struct qpnp_hap *ghap;
@@ -1314,29 +1347,62 @@ static struct device_attribute qpnp_hap_attrs[] = {
qpnp_hap_min_max_test_data_store),
};
-static void calculate_lra_code(struct qpnp_hap *hap)
+static int calculate_lra_code(struct qpnp_hap *hap)
{
- u8 play_rate_code_lo, play_rate_code_hi;
- int play_rate_code, neg_idx = 0, pos_idx = LRA_POS_FREQ_COUNT-1;
- int lra_init_freq, freq_variation, start_variation = AUTO_RES_ERR_MAX;
+ u8 lra_drive_period_code_lo = 0, lra_drive_period_code_hi = 0;
+ u32 lra_drive_period_code, lra_drive_frequency_hz, freq_variation;
+ u8 start_variation = AUTO_RES_ERROR_MAX, i;
+ u8 neg_idx = 0, pos_idx = ADJUSTED_LRA_PLAY_RATE_CODE_ARRSIZE - 1;
+ int rc = 0;
- qpnp_hap_read_reg(hap, &play_rate_code_lo,
- QPNP_HAP_RATE_CFG1_REG(hap->base));
- qpnp_hap_read_reg(hap, &play_rate_code_hi,
- QPNP_HAP_RATE_CFG2_REG(hap->base));
+ rc = qpnp_hap_read_reg(hap, &lra_drive_period_code_lo,
+ QPNP_HAP_RATE_CFG1_REG(hap->base));
+ if (rc) {
+ dev_err(&hap->pdev->dev,
+ "Error while reading RATE_CFG1 register\n");
+ return rc;
+ }
+
+ rc = qpnp_hap_read_reg(hap, &lra_drive_period_code_hi,
+ QPNP_HAP_RATE_CFG2_REG(hap->base));
+ if (rc) {
+ dev_err(&hap->pdev->dev,
+ "Error while reading RATE_CFG2 register\n");
+ return rc;
+ }
- play_rate_code = (play_rate_code_hi << 8) | (play_rate_code_lo & 0xff);
+ if (!lra_drive_period_code_lo && !lra_drive_period_code_hi) {
+ dev_err(&hap->pdev->dev,
+ "Unexpected Error: both RATE_CFG1 and RATE_CFG2 read 0\n");
+ return -EINVAL;
+ }
- lra_init_freq = 200000 / play_rate_code;
+ lra_drive_period_code =
+ (lra_drive_period_code_hi << 8) | (lra_drive_period_code_lo & 0xff);
+ lra_drive_frequency_hz = 200000 / lra_drive_period_code;
- while (start_variation >= AUTO_RES_ERR_CAPTURE_RES) {
- freq_variation = (lra_init_freq * start_variation) / 100;
- lra_play_rate_code[neg_idx++] = 200000 / (lra_init_freq -
- freq_variation);
- lra_play_rate_code[pos_idx--] = 200000 / (lra_init_freq +
- freq_variation);
- start_variation -= AUTO_RES_ERR_CAPTURE_RES;
+ while (start_variation >= AUTO_RES_ERROR_CAPTURE_RES) {
+ freq_variation =
+ (lra_drive_frequency_hz * start_variation) / 100;
+ adjusted_lra_play_rate_code[neg_idx++] =
+ 200000 / (lra_drive_frequency_hz - freq_variation);
+ adjusted_lra_play_rate_code[pos_idx--] =
+ 200000 / (lra_drive_frequency_hz + freq_variation);
+ start_variation -= AUTO_RES_ERROR_CAPTURE_RES;
}
+
+ dev_dbg(&hap->pdev->dev,
+ "lra_drive_period_code_lo = 0x%x lra_drive_period_code_hi = 0x%x\n"
+ "lra_drive_period_code = 0x%x, lra_drive_frequency_hz = 0x%x\n"
+ "Calculated play rate code values are :\n",
+ lra_drive_period_code_lo, lra_drive_period_code_hi,
+ lra_drive_period_code, lra_drive_frequency_hz);
+
+ for (i = 0; i < ADJUSTED_LRA_PLAY_RATE_CODE_ARRSIZE; ++i)
+ dev_dbg(&hap->pdev->dev,
+ " 0x%x", adjusted_lra_play_rate_code[i]);
+
+ return 0;
}
static int qpnp_hap_auto_res_enable(struct qpnp_hap *hap, int enable)
@@ -1369,20 +1435,37 @@ static int qpnp_hap_auto_res_enable(struct qpnp_hap *hap, int enable)
static void update_lra_frequency(struct qpnp_hap *hap)
{
u8 lra_auto_res_lo = 0, lra_auto_res_hi = 0;
+ u32 play_rate_code;
qpnp_hap_read_reg(hap, &lra_auto_res_lo,
QPNP_HAP_LRA_AUTO_RES_LO(hap->base));
qpnp_hap_read_reg(hap, &lra_auto_res_hi,
QPNP_HAP_LRA_AUTO_RES_HI(hap->base));
- if (lra_auto_res_lo && lra_auto_res_hi) {
- qpnp_hap_write_reg(hap, &lra_auto_res_lo,
- QPNP_HAP_RATE_CFG1_REG(hap->base));
+ play_rate_code =
+ (lra_auto_res_hi & 0xF0) << 4 | (lra_auto_res_lo & 0xFF);
- lra_auto_res_hi = lra_auto_res_hi >> 4;
- qpnp_hap_write_reg(hap, &lra_auto_res_hi,
- QPNP_HAP_RATE_CFG2_REG(hap->base));
- }
+ dev_dbg(&hap->pdev->dev,
+ "lra_auto_res_lo = 0x%x lra_auto_res_hi = 0x%x play_rate_code = 0x%x\n",
+ lra_auto_res_lo, lra_auto_res_hi, play_rate_code);
+
+ /*
+ * If the drive period code read from AUTO RES_LO and AUTO_RES_HI
+ * registers is more than the max limit percent variation read from
+ * DT or less than the min limit percent variation read from DT, then
+ * RATE_CFG registers are not uptdated.
+ */
+
+ if ((play_rate_code <= hap->drive_period_code_min_limit) ||
+ (play_rate_code >= hap->drive_period_code_max_limit))
+ return;
+
+ qpnp_hap_write_reg(hap, &lra_auto_res_lo,
+ QPNP_HAP_RATE_CFG1_REG(hap->base));
+
+ lra_auto_res_hi = lra_auto_res_hi >> 4;
+ qpnp_hap_write_reg(hap, &lra_auto_res_hi,
+ QPNP_HAP_RATE_CFG2_REG(hap->base));
}
static enum hrtimer_restart detect_auto_res_error(struct hrtimer *timer)
@@ -1412,36 +1495,38 @@ static void correct_auto_res_error(struct work_struct *auto_res_err_work)
struct qpnp_hap, auto_res_err_work);
u8 lra_code_lo, lra_code_hi, disable_hap = 0x00;
- static int lra_freq_index;
- ktime_t currtime, remaining_time;
- int temp, rem = 0, index = lra_freq_index % LRA_POS_FREQ_COUNT;
+ static u8 lra_freq_index;
+ ktime_t currtime = ktime_set(0, 0), remaining_time = ktime_set(0, 0);
- if (hrtimer_active(&hap->hap_timer)) {
+ if (hrtimer_active(&hap->hap_timer))
remaining_time = hrtimer_get_remaining(&hap->hap_timer);
- rem = (int)ktime_to_us(remaining_time);
- }
qpnp_hap_play(hap, 0);
qpnp_hap_write_reg(hap, &disable_hap,
QPNP_HAP_EN_CTL_REG(hap->base));
- lra_code_lo = lra_play_rate_code[index] & QPNP_HAP_RATE_CFG1_MASK;
- qpnp_hap_write_reg(hap, &lra_code_lo,
- QPNP_HAP_RATE_CFG1_REG(hap->base));
+ if (hap->perform_lra_auto_resonance_search) {
+ lra_code_lo =
+ adjusted_lra_play_rate_code[lra_freq_index]
+ & QPNP_HAP_RATE_CFG1_MASK;
- qpnp_hap_read_reg(hap, &lra_code_hi,
- QPNP_HAP_RATE_CFG2_REG(hap->base));
+ qpnp_hap_write_reg(hap, &lra_code_lo,
+ QPNP_HAP_RATE_CFG1_REG(hap->base));
- lra_code_hi &= QPNP_HAP_RATE_CFG2_MASK;
- temp = lra_play_rate_code[index] >> QPNP_HAP_RATE_CFG2_SHFT;
- lra_code_hi |= temp;
+ lra_code_hi = adjusted_lra_play_rate_code[lra_freq_index]
+ >> QPNP_HAP_RATE_CFG2_SHFT;
- qpnp_hap_write_reg(hap, &lra_code_hi,
+ qpnp_hap_write_reg(hap, &lra_code_hi,
QPNP_HAP_RATE_CFG2_REG(hap->base));
- lra_freq_index++;
+ lra_freq_index = (lra_freq_index+1) %
+ ADJUSTED_LRA_PLAY_RATE_CODE_ARRSIZE;
+ }
- if (rem > 0) {
+ dev_dbg(&hap->pdev->dev, "Remaining time is %lld\n",
+ ktime_to_us(remaining_time));
+
+ if ((ktime_to_us(remaining_time)) > 0) {
currtime = ktime_get();
hap->state = 1;
hrtimer_forward(&hap->hap_timer, currtime, remaining_time);
@@ -1455,6 +1540,7 @@ static int qpnp_hap_set(struct qpnp_hap *hap, int on)
int rc = 0;
u8 val = 0;
unsigned long timeout_ns = POLL_TIME_AUTO_RES_ERR_NS;
+ u32 back_emf_delay_us = hap->time_required_to_generate_back_emf_us;
if (hap->play_mode == QPNP_HAP_PWM) {
if (on)
@@ -1464,8 +1550,21 @@ static int qpnp_hap_set(struct qpnp_hap *hap, int on)
} else if (hap->play_mode == QPNP_HAP_BUFFER ||
hap->play_mode == QPNP_HAP_DIRECT) {
if (on) {
- if (hap->correct_lra_drive_freq ||
- hap->auto_res_mode == QPNP_HAP_AUTO_RES_QWD)
+ /*
+ * For auto resonance detection to work properly,
+ * sufficient back-emf has to be generated. In general,
+ * back-emf takes some time to build up. When the auto
+ * resonance mode is chosen as QWD, high-z will be
+ * applied for every LRA cycle and hence there won't be
+ * enough back-emf at the start-up. Hence, the motor
+ * needs to vibrate for few LRA cycles after the PLAY
+ * bit is asserted. So disable the auto resonance here
+ * and enable it after the sleep of
+ * 'time_required_to_generate_back_emf_us' is completed.
+ */
+ if ((hap->act_type == QPNP_HAP_LRA) &&
+ (hap->correct_lra_drive_freq ||
+ hap->auto_res_mode == QPNP_HAP_AUTO_RES_QWD))
qpnp_hap_auto_res_enable(hap, 0);
rc = qpnp_hap_mod_enable(hap, on);
@@ -1474,17 +1573,18 @@ static int qpnp_hap_set(struct qpnp_hap *hap, int on)
rc = qpnp_hap_play(hap, on);
- if ((hap->act_type == QPNP_HAP_LRA &&
- hap->correct_lra_drive_freq) ||
- hap->auto_res_mode == QPNP_HAP_AUTO_RES_QWD) {
- usleep_range(AUTO_RES_ENABLE_TIMEOUT,
- (AUTO_RES_ENABLE_TIMEOUT + 1));
+ if ((hap->act_type == QPNP_HAP_LRA) &&
+ (hap->correct_lra_drive_freq ||
+ hap->auto_res_mode == QPNP_HAP_AUTO_RES_QWD)) {
+ usleep_range(back_emf_delay_us,
+ (back_emf_delay_us + 1));
rc = qpnp_hap_auto_res_enable(hap, 1);
if (rc < 0)
return rc;
}
- if (hap->correct_lra_drive_freq) {
+ if (hap->act_type == QPNP_HAP_LRA &&
+ hap->correct_lra_drive_freq) {
/*
* Start timer to poll Auto Resonance error bit
*/
@@ -1500,7 +1600,8 @@ static int qpnp_hap_set(struct qpnp_hap *hap, int on)
if (rc < 0)
return rc;
- if (hap->correct_lra_drive_freq) {
+ if (hap->act_type == QPNP_HAP_LRA &&
+ hap->correct_lra_drive_freq) {
rc = qpnp_hap_read_reg(hap, &val,
QPNP_HAP_STATUS(hap->base));
if (!(val & AUTO_RES_ERR_BIT))
@@ -1511,7 +1612,6 @@ static int qpnp_hap_set(struct qpnp_hap *hap, int on)
if (hap->act_type == QPNP_HAP_LRA &&
hap->correct_lra_drive_freq) {
hrtimer_cancel(&hap->auto_res_err_poll_timer);
- calculate_lra_code(hap);
}
}
}
@@ -1619,13 +1719,15 @@ static void qpnp_hap_worker(struct work_struct *work)
struct qpnp_hap *hap = container_of(work, struct qpnp_hap,
work);
u8 val = 0x00;
- int rc, reg_en = 0;
+ int rc;
- if (hap->vcc_pon) {
- reg_en = regulator_enable(hap->vcc_pon);
- if (reg_en)
- pr_err("%s: could not enable vcc_pon regulator\n",
- __func__);
+ if (hap->vcc_pon && hap->state && !hap->vcc_pon_enabled) {
+ rc = regulator_enable(hap->vcc_pon);
+ if (rc < 0)
+ pr_err("%s: could not enable vcc_pon regulator rc=%d\n",
+ __func__, rc);
+ else
+ hap->vcc_pon_enabled = true;
}
/* Disable haptics module if the duration of short circuit
@@ -1640,11 +1742,13 @@ static void qpnp_hap_worker(struct work_struct *work)
qpnp_hap_set(hap, hap->state);
}
- if (hap->vcc_pon && !reg_en) {
+ if (hap->vcc_pon && !hap->state && hap->vcc_pon_enabled) {
rc = regulator_disable(hap->vcc_pon);
if (rc)
- pr_err("%s: could not disable vcc_pon regulator\n",
- __func__);
+ pr_err("%s: could not disable vcc_pon regulator rc=%d\n",
+ __func__, rc);
+ else
+ hap->vcc_pon_enabled = false;
}
}
@@ -1706,10 +1810,16 @@ static SIMPLE_DEV_PM_OPS(qpnp_haptic_pm_ops, qpnp_haptic_suspend, NULL);
/* Configuration api for haptics registers */
static int qpnp_hap_config(struct qpnp_hap *hap)
{
- u8 reg = 0, unlock_val, error_value;
- int rc, i, temp;
+ u8 reg = 0, unlock_val;
+ u32 temp;
+ int rc, i;
uint error_code = 0;
+ /*
+ * This denotes the percentage error in rc clock multiplied by 10
+ */
+ u8 rc_clk_err_percent_x10;
+
/* Configure the ACTUATOR TYPE register */
rc = qpnp_hap_read_reg(hap, &reg, QPNP_HAP_ACT_TYPE_REG(hap->base));
if (rc < 0)
@@ -1838,16 +1948,22 @@ static int qpnp_hap_config(struct qpnp_hap *hap)
else if (hap->wave_play_rate_us > QPNP_HAP_WAV_PLAY_RATE_US_MAX)
hap->wave_play_rate_us = QPNP_HAP_WAV_PLAY_RATE_US_MAX;
- temp = hap->wave_play_rate_us / QPNP_HAP_RATE_CFG_STEP_US;
+ hap->init_drive_period_code =
+ hap->wave_play_rate_us / QPNP_HAP_RATE_CFG_STEP_US;
/*
- * The frequency of 19.2Mzhz RC clock is subject to variation.
- * In PMI8950, TRIM_ERROR_RC19P2_CLK register in MISC module
- * holds the frequency error in 19.2Mhz RC clock
+ * The frequency of 19.2Mzhz RC clock is subject to variation. Currently
+ * a few PMI modules have MISC_TRIM_ERROR_RC19P2_CLK register
+ * present in their MISC block. This register holds the frequency error
+ * in 19.2Mhz RC clock.
*/
if ((hap->act_type == QPNP_HAP_LRA) && hap->correct_lra_drive_freq
&& hap->misc_trim_error_rc19p2_clk_reg_present) {
unlock_val = MISC_SEC_UNLOCK;
+ /*
+ * This SID value may change depending on the PMI chip where
+ * the MISC block is present.
+ */
rc = regmap_write(hap->regmap, MISC_SEC_ACCESS, unlock_val);
if (rc)
dev_err(&hap->pdev->dev,
@@ -1855,36 +1971,69 @@ static int qpnp_hap_config(struct qpnp_hap *hap)
regmap_read(hap->regmap, MISC_TRIM_ERROR_RC19P2_CLK,
&error_code);
+ dev_dbg(&hap->pdev->dev, "TRIM register = 0x%x\n", error_code);
- error_value = (error_code & 0x0F) * 7;
+ /*
+ * Extract the 4 LSBs and multiply by 7 to get
+ * the %error in RC clock multiplied by 10
+ */
+ rc_clk_err_percent_x10 = (error_code & 0x0F) * 7;
- if (error_code & 0x80)
- temp = (temp * (1000 - error_value)) / 1000;
+ /*
+ * If the TRIM register holds value less than 0x80,
+ * then there is a positive error in the RC clock.
+ * If the TRIM register holds value greater than or equal to
+ * 0x80, then there is a negative error in the RC clock.
+ *
+ * The adjusted play rate code is calculated as follows:
+ * LRA drive period code (RATE_CFG) =
+ * 200KHz * 1 / LRA drive frequency * ( 1 + %error/ 100)
+ *
+ * This can be rewritten as:
+ * LRA drive period code (RATE_CFG) =
+ * 200KHz * 1/LRA drive frequency *( 1 + %error * 10/1000)
+ *
+ * Since 200KHz * 1/LRA drive frequency is already calculated
+ * above we only do rest of the scaling here.
+ */
+ if (error_code >= 128)
+ LRA_DRIVE_PERIOD_NEG_ERR(hap, rc_clk_err_percent_x10);
else
- temp = (temp * (1000 + error_value)) / 1000;
+ LRA_DRIVE_PERIOD_POS_ERR(hap, rc_clk_err_percent_x10);
}
- reg = temp & QPNP_HAP_RATE_CFG1_MASK;
+ dev_dbg(&hap->pdev->dev,
+ "Play rate code 0x%x\n", hap->init_drive_period_code);
+
+ reg = hap->init_drive_period_code & QPNP_HAP_RATE_CFG1_MASK;
rc = qpnp_hap_write_reg(hap, &reg,
QPNP_HAP_RATE_CFG1_REG(hap->base));
if (rc)
return rc;
- rc = qpnp_hap_read_reg(hap, &reg,
- QPNP_HAP_RATE_CFG2_REG(hap->base));
- if (rc < 0)
- return rc;
- reg &= QPNP_HAP_RATE_CFG2_MASK;
- temp = temp >> QPNP_HAP_RATE_CFG2_SHFT;
- reg |= temp;
+ reg = (hap->init_drive_period_code & 0xF00) >> QPNP_HAP_RATE_CFG2_SHFT;
rc = qpnp_hap_write_reg(hap, &reg,
QPNP_HAP_RATE_CFG2_REG(hap->base));
if (rc)
return rc;
- if ((hap->act_type == QPNP_HAP_LRA) && hap->correct_lra_drive_freq)
+ if (hap->act_type == QPNP_HAP_LRA &&
+ hap->perform_lra_auto_resonance_search)
calculate_lra_code(hap);
+ if (hap->act_type == QPNP_HAP_LRA && hap->correct_lra_drive_freq) {
+ hap->drive_period_code_max_limit =
+ (hap->init_drive_period_code * 100) /
+ (100 - hap->drive_period_code_max_limit_percent_variation);
+ hap->drive_period_code_min_limit =
+ (hap->init_drive_period_code * 100) /
+ (100 + hap->drive_period_code_min_limit_percent_variation);
+ dev_dbg(&hap->pdev->dev, "Drive period code max limit %x\n"
+ "Drive period code min limit %x\n",
+ hap->drive_period_code_max_limit,
+ hap->drive_period_code_min_limit);
+ }
+
/* Configure BRAKE register */
rc = qpnp_hap_read_reg(hap, &reg, QPNP_HAP_EN_CTL2_REG(hap->base));
if (rc < 0)
@@ -2031,13 +2180,44 @@ static int qpnp_hap_parse_dt(struct qpnp_hap *hap)
return rc;
}
+ hap->perform_lra_auto_resonance_search =
+ of_property_read_bool(pdev->dev.of_node,
+ "qcom,perform-lra-auto-resonance-search");
+
hap->correct_lra_drive_freq =
of_property_read_bool(pdev->dev.of_node,
"qcom,correct-lra-drive-freq");
+ hap->drive_period_code_max_limit_percent_variation = 25;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,drive-period-code-max-limit-percent-variation", &temp);
+ if (!rc)
+ hap->drive_period_code_max_limit_percent_variation =
+ (u8) temp;
+
+ hap->drive_period_code_min_limit_percent_variation = 25;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,drive-period-code-min-limit-percent-variation", &temp);
+ if (!rc)
+ hap->drive_period_code_min_limit_percent_variation =
+ (u8) temp;
+
hap->misc_trim_error_rc19p2_clk_reg_present =
of_property_read_bool(pdev->dev.of_node,
"qcom,misc-trim-error-rc19p2-clk-reg-present");
+
+ if (hap->auto_res_mode == QPNP_HAP_AUTO_RES_QWD) {
+ hap->time_required_to_generate_back_emf_us =
+ QPNP_HAP_TIME_REQ_FOR_BACK_EMF_GEN;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,time-required-to-generate-back-emf-us",
+ &temp);
+ if (!rc)
+ hap->time_required_to_generate_back_emf_us =
+ temp;
+ } else {
+ hap->time_required_to_generate_back_emf_us = 0;
+ }
}
rc = of_property_read_string(pdev->dev.of_node,
diff --git a/drivers/soc/qcom/qpnp-pbs.c b/drivers/soc/qcom/qpnp-pbs.c
new file mode 100644
index 000000000000..287c8a25b391
--- /dev/null
+++ b/drivers/soc/qcom/qpnp-pbs.c
@@ -0,0 +1,361 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "PBS: %s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/spmi.h>
+#include <linux/platform_device.h>
+#include <linux/regmap.h>
+#include <linux/err.h>
+#include <linux/of.h>
+#include <linux/qpnp/qpnp-pbs.h>
+
+#define QPNP_PBS_DEV_NAME "qcom,qpnp-pbs"
+
+#define PBS_CLIENT_TRIG_CTL 0x42
+#define PBS_CLIENT_SW_TRIG_BIT BIT(7)
+#define PBS_CLIENT_SCRATCH1 0x50
+#define PBS_CLIENT_SCRATCH2 0x51
+
+static LIST_HEAD(pbs_dev_list);
+static DEFINE_MUTEX(pbs_list_lock);
+
+struct qpnp_pbs {
+ struct platform_device *pdev;
+ struct device *dev;
+ struct device_node *dev_node;
+ struct regmap *regmap;
+ struct mutex pbs_lock;
+ struct list_head link;
+
+ u32 base;
+};
+
+static int qpnp_pbs_read(struct qpnp_pbs *pbs, u32 address,
+ u8 *val, int count)
+{
+ int rc = 0;
+ struct platform_device *pdev = pbs->pdev;
+
+ rc = regmap_bulk_read(pbs->regmap, address, val, count);
+ if (rc)
+ pr_err("Failed to read address=0x%02x sid=0x%02x rc=%d\n",
+ address, to_spmi_device(pdev->dev.parent)->usid, rc);
+
+ return rc;
+}
+
+static int qpnp_pbs_write(struct qpnp_pbs *pbs, u16 address,
+ u8 *val, int count)
+{
+ int rc = 0;
+ struct platform_device *pdev = pbs->pdev;
+
+ rc = regmap_bulk_write(pbs->regmap, address, val, count);
+ if (rc < 0)
+ pr_err("Failed to write address =0x%02x sid=0x%02x rc=%d\n",
+ address, to_spmi_device(pdev->dev.parent)->usid, rc);
+ else
+ pr_debug("Wrote 0x%02X to addr 0x%04x\n", *val, address);
+
+ return rc;
+}
+
+static int qpnp_pbs_masked_write(struct qpnp_pbs *pbs, u16 address,
+ u8 mask, u8 val)
+{
+ int rc;
+
+ rc = regmap_update_bits(pbs->regmap, address, mask, val);
+ if (rc < 0)
+ pr_err("Failed to write address 0x%04X, rc = %d\n",
+ address, rc);
+ else
+ pr_debug("Wrote 0x%02X to addr 0x%04X\n",
+ val, address);
+
+ return rc;
+}
+
+static struct qpnp_pbs *get_pbs_client_node(struct device_node *dev_node)
+{
+ struct qpnp_pbs *pbs;
+
+ mutex_lock(&pbs_list_lock);
+ list_for_each_entry(pbs, &pbs_dev_list, link) {
+ if (dev_node == pbs->dev_node) {
+ mutex_unlock(&pbs_list_lock);
+ return pbs;
+ }
+ }
+
+ mutex_unlock(&pbs_list_lock);
+ return ERR_PTR(-EINVAL);
+}
+
+static int qpnp_pbs_wait_for_ack(struct qpnp_pbs *pbs, u8 bit_pos)
+{
+ int rc = 0;
+ u16 retries = 2000, dly = 1000;
+ u8 val;
+
+ while (retries--) {
+ rc = qpnp_pbs_read(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH2, &val, 1);
+ if (rc < 0) {
+ pr_err("Failed to read register %x rc = %d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ return rc;
+ }
+
+ if (val == 0xFF) {
+ /* PBS error - clear SCRATCH2 register */
+ rc = qpnp_pbs_write(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH2, 0, 1);
+ if (rc < 0) {
+ pr_err("Failed to clear register %x rc=%d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ return rc;
+ }
+
+ pr_err("NACK from PBS for bit %d\n", bit_pos);
+ return -EINVAL;
+ }
+
+ if (val & BIT(bit_pos)) {
+ pr_debug("PBS sequence for bit %d executed!\n",
+ bit_pos);
+ break;
+ }
+
+ usleep_range(dly, dly + 100);
+ }
+
+ if (!retries) {
+ pr_err("Timeout for PBS ACK/NACK for bit %d\n", bit_pos);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+/**
+ * qpnp_pbs_trigger_event - Trigger the PBS RAM sequence
+ *
+ * Returns = 0 If the PBS RAM sequence executed successfully.
+ *
+ * Returns < 0 for errors.
+ *
+ * This function is used to trigger the PBS RAM sequence to be
+ * executed by the client driver.
+ *
+ * The PBS trigger sequence involves
+ * 1. setting the PBS sequence bit in PBS_CLIENT_SCRATCH1
+ * 2. Initiating the SW PBS trigger
+ * 3. Checking the equivalent bit in PBS_CLIENT_SCRATCH2 for the
+ * completion of the sequence.
+ * 4. If PBS_CLIENT_SCRATCH2 == 0xFF, the PBS sequence failed to execute
+ */
+int qpnp_pbs_trigger_event(struct device_node *dev_node, u8 bitmap)
+{
+ struct qpnp_pbs *pbs;
+ int rc = 0;
+ u16 bit_pos = 0;
+ u8 val, mask = 0;
+
+ if (!dev_node)
+ return -EINVAL;
+
+ if (!bitmap) {
+ pr_err("Invalid bitmap passed by client\n");
+ return -EINVAL;
+ }
+
+ pbs = get_pbs_client_node(dev_node);
+ if (IS_ERR_OR_NULL(pbs)) {
+ pr_err("Unable to find the PBS dev_node\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&pbs->pbs_lock);
+ rc = qpnp_pbs_read(pbs, pbs->base + PBS_CLIENT_SCRATCH2, &val, 1);
+ if (rc < 0) {
+ pr_err("read register %x failed rc = %d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ goto out;
+ }
+
+ if (val == 0xFF) {
+ /* PBS error - clear SCRATCH2 register */
+ rc = qpnp_pbs_write(pbs, pbs->base + PBS_CLIENT_SCRATCH2, 0, 1);
+ if (rc < 0) {
+ pr_err("Failed to clear register %x rc=%d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ goto out;
+ }
+ }
+
+ for (bit_pos = 0; bit_pos < 8; bit_pos++) {
+ if (bitmap & BIT(bit_pos)) {
+ /*
+ * Clear the PBS sequence bit position in
+ * PBS_CLIENT_SCRATCH2 mask register.
+ */
+ rc = qpnp_pbs_masked_write(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH2, BIT(bit_pos), 0);
+ if (rc < 0) {
+ pr_err("Failed to clear %x reg bit rc=%d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ goto error;
+ }
+
+ /*
+ * Set the PBS sequence bit position in
+ * PBS_CLIENT_SCRATCH1 register.
+ */
+ val = mask = BIT(bit_pos);
+ rc = qpnp_pbs_masked_write(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH1, mask, val);
+ if (rc < 0) {
+ pr_err("Failed to set %x reg bit rc=%d\n",
+ PBS_CLIENT_SCRATCH1, rc);
+ goto error;
+ }
+
+ /* Initiate the SW trigger */
+ val = mask = PBS_CLIENT_SW_TRIG_BIT;
+ rc = qpnp_pbs_masked_write(pbs, pbs->base +
+ PBS_CLIENT_TRIG_CTL, mask, val);
+ if (rc < 0) {
+ pr_err("Failed to write register %x rc=%d\n",
+ PBS_CLIENT_TRIG_CTL, rc);
+ goto error;
+ }
+
+ rc = qpnp_pbs_wait_for_ack(pbs, bit_pos);
+ if (rc < 0) {
+ pr_err("Error during wait_for_ack\n");
+ goto error;
+ }
+
+ /*
+ * Clear the PBS sequence bit position in
+ * PBS_CLIENT_SCRATCH1 register.
+ */
+ rc = qpnp_pbs_masked_write(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH1, BIT(bit_pos), 0);
+ if (rc < 0) {
+ pr_err("Failed to clear %x reg bit rc=%d\n",
+ PBS_CLIENT_SCRATCH1, rc);
+ goto error;
+ }
+
+ /*
+ * Clear the PBS sequence bit position in
+ * PBS_CLIENT_SCRATCH2 mask register.
+ */
+ rc = qpnp_pbs_masked_write(pbs, pbs->base +
+ PBS_CLIENT_SCRATCH2, BIT(bit_pos), 0);
+ if (rc < 0) {
+ pr_err("Failed to clear %x reg bit rc=%d\n",
+ PBS_CLIENT_SCRATCH2, rc);
+ goto error;
+ }
+
+ }
+ }
+
+error:
+ /* Clear all the requested bitmap */
+ rc = qpnp_pbs_masked_write(pbs, pbs->base + PBS_CLIENT_SCRATCH1,
+ bitmap, 0);
+ if (rc < 0)
+ pr_err("Failed to clear %x reg bit rc=%d\n",
+ PBS_CLIENT_SCRATCH1, rc);
+out:
+ mutex_unlock(&pbs->pbs_lock);
+
+ return rc;
+}
+EXPORT_SYMBOL(qpnp_pbs_trigger_event);
+
+static int qpnp_pbs_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ u32 val = 0;
+ struct qpnp_pbs *pbs;
+
+ pbs = devm_kzalloc(&pdev->dev, sizeof(*pbs), GFP_KERNEL);
+ if (!pbs)
+ return -ENOMEM;
+
+ pbs->pdev = pdev;
+ pbs->dev = &pdev->dev;
+ pbs->dev_node = pdev->dev.of_node;
+ pbs->regmap = dev_get_regmap(pdev->dev.parent, NULL);
+ if (!pbs->regmap) {
+ dev_err(&pdev->dev, "Couldn't get parent's regmap\n");
+ return -EINVAL;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "reg", &val);
+ if (rc < 0) {
+ dev_err(&pdev->dev,
+ "Couldn't find reg in node = %s rc = %d\n",
+ pdev->dev.of_node->full_name, rc);
+ return rc;
+ }
+
+ pbs->base = val;
+ mutex_init(&pbs->pbs_lock);
+
+ dev_set_drvdata(&pdev->dev, pbs);
+
+ mutex_lock(&pbs_list_lock);
+ list_add(&pbs->link, &pbs_dev_list);
+ mutex_unlock(&pbs_list_lock);
+
+ return 0;
+}
+
+static const struct of_device_id qpnp_pbs_match_table[] = {
+ { .compatible = QPNP_PBS_DEV_NAME },
+ {}
+};
+
+static struct platform_driver qpnp_pbs_driver = {
+ .driver = {
+ .name = QPNP_PBS_DEV_NAME,
+ .owner = THIS_MODULE,
+ .of_match_table = qpnp_pbs_match_table,
+ },
+ .probe = qpnp_pbs_probe,
+};
+
+static int __init qpnp_pbs_init(void)
+{
+ return platform_driver_register(&qpnp_pbs_driver);
+}
+arch_initcall(qpnp_pbs_init);
+
+static void __exit qpnp_pbs_exit(void)
+{
+ return platform_driver_unregister(&qpnp_pbs_driver);
+}
+module_exit(qpnp_pbs_exit);
+
+MODULE_DESCRIPTION("QPNP PBS DRIVER");
+MODULE_LICENSE("GPL v2");
+MODULE_ALIAS("platform:" QPNP_PBS_DEV_NAME);
diff --git a/drivers/soc/qcom/service-locator.c b/drivers/soc/qcom/service-locator.c
index 2b708732760f..8581ed587ead 100644
--- a/drivers/soc/qcom/service-locator.c
+++ b/drivers/soc/qcom/service-locator.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -111,6 +111,7 @@ static void service_locator_svc_arrive(struct work_struct *work)
qmi_handle_create(service_locator_clnt_notify, NULL);
if (!service_locator.clnt_handle) {
service_locator.clnt_handle = NULL;
+ complete_all(&service_locator.service_available);
mutex_unlock(&service_locator.service_mutex);
pr_err("Service locator QMI client handle alloc failed!\n");
return;
@@ -123,6 +124,7 @@ static void service_locator_svc_arrive(struct work_struct *work)
if (rc) {
qmi_handle_destroy(service_locator.clnt_handle);
service_locator.clnt_handle = NULL;
+ complete_all(&service_locator.service_available);
mutex_unlock(&service_locator.service_mutex);
pr_err("Unable to connnect to service rc:%d\n", rc);
return;
@@ -138,6 +140,7 @@ static void service_locator_svc_exit(struct work_struct *work)
mutex_lock(&service_locator.service_mutex);
qmi_handle_destroy(service_locator.clnt_handle);
service_locator.clnt_handle = NULL;
+ complete_all(&service_locator.service_available);
mutex_unlock(&service_locator.service_mutex);
pr_info("Connection with service locator lost\n");
}
diff --git a/drivers/soc/qcom/smcinvoke.c b/drivers/soc/qcom/smcinvoke.c
index d0fef9d31755..99ae24735f05 100644
--- a/drivers/soc/qcom/smcinvoke.c
+++ b/drivers/soc/qcom/smcinvoke.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -207,12 +207,11 @@ static int prepare_send_scm_msg(const uint8_t *in_buf, size_t in_buf_len,
ret = qseecom_process_listener_from_smcinvoke(&desc);
*smcinvoke_result = (int32_t)desc.ret[1];
- if (ret || desc.ret[1] || desc.ret[2] || desc.ret[0]) {
+ if (ret || desc.ret[1] || desc.ret[2] || desc.ret[0])
pr_err("SCM call failed with ret val = %d %d %d %d\n",
ret, (int)desc.ret[0],
(int)desc.ret[1], (int)desc.ret[2]);
- ret = ret | desc.ret[0] | desc.ret[1] | desc.ret[2];
- }
+
dmac_inv_range(in_buf, in_buf + inbuf_flush_size);
dmac_inv_range(out_buf, out_buf + outbuf_flush_size);
return ret;
diff --git a/drivers/soc/qcom/spcom.c b/drivers/soc/qcom/spcom.c
index 6a60e3624420..07610877f140 100644
--- a/drivers/soc/qcom/spcom.c
+++ b/drivers/soc/qcom/spcom.c
@@ -886,10 +886,11 @@ static int spcom_rx(struct spcom_channel *ch,
if (timeleft == 0) {
pr_err("rx_done timeout [%d] msec expired.\n", timeout_msec);
- goto exit_err;
+ mutex_unlock(&ch->lock);
+ return -ETIMEDOUT;
} else if (ch->rx_abort) {
- pr_err("rx aborted.\n");
- goto exit_err;
+ mutex_unlock(&ch->lock);
+ return -ERESTART; /* probably SSR */
} else if (ch->actual_rx_size) {
pr_debug("actual_rx_size is [%d].\n", ch->actual_rx_size);
} else {
@@ -1976,7 +1977,8 @@ static int spcom_handle_read_req_resp(struct spcom_channel *ch,
ret = spcom_rx(ch, rx_buf, rx_buf_size, timeout_msec);
if (ret < 0) {
pr_err("rx error %d.\n", ret);
- goto exit_err;
+ kfree(rx_buf);
+ return ret;
} else {
size = ret; /* actual_rx_size */
}
@@ -2269,8 +2271,14 @@ static ssize_t spcom_device_read(struct file *filp, char __user *user_buff,
if (buf == NULL)
return -ENOMEM;
- actual_size = spcom_handle_read(ch, buf, size);
- if ((actual_size <= 0) || (actual_size > size)) {
+ ret = spcom_handle_read(ch, buf, size);
+ if (ret < 0) {
+ pr_err("read error [%d].\n", ret);
+ kfree(buf);
+ return ret;
+ }
+ actual_size = ret;
+ if ((actual_size == 0) || (actual_size > size)) {
pr_err("invalid actual_size [%d].\n", actual_size);
kfree(buf);
return -EFAULT;
diff --git a/drivers/spi/spi_qsd.c b/drivers/spi/spi_qsd.c
index 2d52fcaf954a..6f75eee8f921 100644
--- a/drivers/spi/spi_qsd.c
+++ b/drivers/spi/spi_qsd.c
@@ -1442,9 +1442,9 @@ static inline void msm_spi_set_cs(struct spi_device *spi, bool set_flag)
u32 spi_ioc_orig;
int rc;
- rc = pm_runtime_get_sync(dd->dev);
- if (rc < 0) {
- dev_err(dd->dev, "Failure during runtime get");
+ if (dd->suspended) {
+ dev_err(dd->dev, "%s: SPI operational state not valid %d\n",
+ __func__, dd->suspended);
return;
}
@@ -1470,8 +1470,6 @@ static inline void msm_spi_set_cs(struct spi_device *spi, bool set_flag)
writel_relaxed(spi_ioc, dd->base + SPI_IO_CONTROL);
if (dd->pdata->is_shared)
put_local_resources(dd);
- pm_runtime_mark_last_busy(dd->dev);
- pm_runtime_put_autosuspend(dd->dev);
}
static void reset_core(struct msm_spi *dd)
@@ -1655,23 +1653,16 @@ static int msm_spi_prepare_transfer_hardware(struct spi_master *master)
struct msm_spi *dd = spi_master_get_devdata(master);
int resume_state = 0;
- resume_state = pm_runtime_get_sync(dd->dev);
- if (resume_state < 0)
+ if (!pm_runtime_enabled(dd->dev)) {
+ dev_err(dd->dev, "Runtime PM not available\n");
+ resume_state = -EBUSY;
goto spi_finalize;
+ }
- /*
- * Counter-part of system-suspend when runtime-pm is not enabled.
- * This way, resume can be left empty and device will be put in
- * active mode only if client requests anything on the bus
- */
- if (!pm_runtime_enabled(dd->dev))
- resume_state = msm_spi_pm_resume_runtime(dd->dev);
+ resume_state = pm_runtime_get_sync(dd->dev);
if (resume_state < 0)
goto spi_finalize;
- if (dd->suspended) {
- resume_state = -EBUSY;
- goto spi_finalize;
- }
+
return 0;
spi_finalize:
@@ -1683,6 +1674,11 @@ static int msm_spi_unprepare_transfer_hardware(struct spi_master *master)
{
struct msm_spi *dd = spi_master_get_devdata(master);
+ if (!pm_runtime_enabled(dd->dev)) {
+ dev_err(dd->dev, "Runtime PM not available\n");
+ return -EBUSY;
+ }
+
pm_runtime_mark_last_busy(dd->dev);
pm_runtime_put_autosuspend(dd->dev);
return 0;
@@ -2621,27 +2617,10 @@ resume_exit:
#ifdef CONFIG_PM_SLEEP
static int msm_spi_suspend(struct device *device)
{
- if (!pm_runtime_enabled(device) || !pm_runtime_suspended(device)) {
- struct platform_device *pdev = to_platform_device(device);
- struct spi_master *master = platform_get_drvdata(pdev);
- struct msm_spi *dd;
-
- dev_dbg(device, "system suspend");
- if (!master)
- goto suspend_exit;
- dd = spi_master_get_devdata(master);
- if (!dd)
- goto suspend_exit;
- msm_spi_pm_suspend_runtime(device);
-
- /*
- * set the device's runtime PM status to 'suspended'
- */
- pm_runtime_disable(device);
- pm_runtime_set_suspended(device);
- pm_runtime_enable(device);
+ if (!pm_runtime_status_suspended(device)) {
+ dev_err(device, "Runtime not suspended, deny sys suspend");
+ return -EBUSY;
}
-suspend_exit:
return 0;
}
diff --git a/drivers/usb/gadget/function/f_gsi.c b/drivers/usb/gadget/function/f_gsi.c
index 2f08a6c9d476..9473e7e31eaa 100644
--- a/drivers/usb/gadget/function/f_gsi.c
+++ b/drivers/usb/gadget/function/f_gsi.c
@@ -1086,9 +1086,9 @@ static ssize_t gsi_ctrl_dev_write(struct file *fp, const char __user *buf,
list_add_tail(&cpkt->list, &c_port->cpkt_resp_q);
spin_unlock_irqrestore(&c_port->lock, flags);
- ret = gsi_ctrl_send_notification(gsi);
+ if (!gsi_ctrl_send_notification(gsi))
+ c_port->modem_to_host++;
- c_port->modem_to_host++;
log_event_dbg("Exit %zu", count);
return ret ? ret : count;
@@ -1392,33 +1392,18 @@ static int queue_notification_request(struct f_gsi *gsi)
{
int ret;
unsigned long flags;
- struct usb_cdc_notification *event;
- struct gsi_ctrl_pkt *cpkt;
ret = usb_func_ep_queue(&gsi->function, gsi->c_port.notify,
gsi->c_port.notify_req, GFP_ATOMIC);
- if (ret == -ENOTSUPP || (ret < 0 && ret != -EAGAIN)) {
+ if (ret < 0) {
spin_lock_irqsave(&gsi->c_port.lock, flags);
gsi->c_port.notify_req_queued = false;
- /* check if device disconnected while we dropped lock */
- if (atomic_read(&gsi->connected) &&
- !list_empty(&gsi->c_port.cpkt_resp_q)) {
- cpkt = list_first_entry(&gsi->c_port.cpkt_resp_q,
- struct gsi_ctrl_pkt, list);
- list_del(&cpkt->list);
- log_event_err("%s: drop ctrl pkt of len %d error %d",
- __func__, cpkt->len, ret);
- gsi_ctrl_pkt_free(cpkt);
- }
- gsi->c_port.cpkt_drop_cnt++;
spin_unlock_irqrestore(&gsi->c_port.lock, flags);
- } else {
- ret = 0;
- event = gsi->c_port.notify_req->buf;
- log_event_dbg("%s: Queued Notify type %02x", __func__,
- event->bNotificationType);
}
+ log_event_dbg("%s: ret:%d req_queued:%d",
+ __func__, ret, gsi->c_port.notify_req_queued);
+
return ret;
}
@@ -3132,7 +3117,7 @@ MODULE_DESCRIPTION("GSI function driver");
static int fgsi_init(void)
{
ipa_usb_wq = alloc_workqueue("k_ipa_usb",
- WQ_UNBOUND | WQ_MEM_RECLAIM, 1);
+ WQ_UNBOUND | WQ_MEM_RECLAIM | WQ_FREEZABLE, 1);
if (!ipa_usb_wq) {
log_event_err("Failed to create workqueue for IPA");
return -ENOMEM;
diff --git a/drivers/usb/gadget/function/f_mass_storage.c b/drivers/usb/gadget/function/f_mass_storage.c
index 97d86b6ac69b..e309dec68a75 100644
--- a/drivers/usb/gadget/function/f_mass_storage.c
+++ b/drivers/usb/gadget/function/f_mass_storage.c
@@ -2289,8 +2289,11 @@ reset:
}
common->running = 0;
- if (!new_fsg || rc)
+ if (!new_fsg || rc) {
+ /* allow usb LPM after eps are disabled */
+ usb_gadget_autopm_put_async(common->gadget);
return rc;
+ }
common->fsg = new_fsg;
fsg = common->fsg;
@@ -2333,6 +2336,9 @@ reset:
bh->outreq->complete = bulk_out_complete;
}
+ /* prevents usb LPM until thread runs to completion */
+ usb_gadget_autopm_get_noresume(common->gadget);
+
common->running = 1;
for (i = 0; i < ARRAY_SIZE(common->luns); ++i)
if (common->luns[i])
diff --git a/drivers/usb/pd/policy_engine.c b/drivers/usb/pd/policy_engine.c
index ea278781440c..935bd0778bfb 100644
--- a/drivers/usb/pd/policy_engine.c
+++ b/drivers/usb/pd/policy_engine.c
@@ -665,7 +665,7 @@ static void phy_msg_received(struct usbpd *pd, enum pd_msg_type type,
rx_msg->type = PD_MSG_HDR_TYPE(header);
rx_msg->len = PD_MSG_HDR_COUNT(header);
- memcpy(&rx_msg->payload, buf, len);
+ memcpy(&rx_msg->payload, buf, min(len, sizeof(rx_msg->payload)));
spin_lock_irqsave(&pd->rx_lock, flags);
list_add_tail(&rx_msg->entry, &pd->rx_q);
diff --git a/drivers/video/fbdev/msm/Kconfig b/drivers/video/fbdev/msm/Kconfig
index ef5c96214c19..03ee89ad0d99 100644
--- a/drivers/video/fbdev/msm/Kconfig
+++ b/drivers/video/fbdev/msm/Kconfig
@@ -63,6 +63,7 @@ config FB_MSM_MDSS_WRITEBACK
config FB_MSM_MDSS_HDMI_PANEL
depends on FB_MSM_MDSS
+ select MSM_EXT_DISPLAY
bool "MDSS HDMI Tx Panel"
default n
---help---
@@ -98,6 +99,7 @@ config FB_MSM_MDSS_DSI_CTRL_STATUS
config FB_MSM_MDSS_DP_PANEL
depends on FB_MSM_MDSS
+ select MSM_EXT_DISPLAY
bool "MDSS DP Panel"
---help---
The MDSS DP Panel provides support for DP host controller driver
diff --git a/drivers/video/fbdev/msm/Makefile b/drivers/video/fbdev/msm/Makefile
index b905c0e855dd..e101b873f361 100644
--- a/drivers/video/fbdev/msm/Makefile
+++ b/drivers/video/fbdev/msm/Makefile
@@ -49,7 +49,6 @@ obj-$(CONFIG_FB_MSM_MDSS_DP_PANEL) += mdss_dp_aux.o
obj-$(CONFIG_FB_MSM_MDSS_DP_PANEL) += mdss_dp_hdcp2p2.o
obj-$(CONFIG_FB_MSM_MDSS) += mdss_io_util.o
-obj-$(CONFIG_FB_MSM_MDSS) += msm_ext_display.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_tx.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_panel.o
obj-$(CONFIG_FB_MSM_MDSS_HDMI_PANEL) += mdss_hdmi_hdcp2p2.o
diff --git a/drivers/video/fbdev/msm/mdss.h b/drivers/video/fbdev/msm/mdss.h
index bf4dc39f57ee..d9a4bd91f3eb 100644
--- a/drivers/video/fbdev/msm/mdss.h
+++ b/drivers/video/fbdev/msm/mdss.h
@@ -27,6 +27,7 @@
#include <linux/msm-bus.h>
#include <linux/file.h>
#include <linux/dma-direction.h>
+#include <soc/qcom/cx_ipeak.h>
#include "mdss_panel.h"
@@ -535,6 +536,7 @@ struct mdss_data_type {
u32 sec_cam_en;
u32 sec_session_cnt;
wait_queue_head_t secure_waitq;
+ struct cx_ipeak_client *mdss_cx_ipeak;
};
extern struct mdss_data_type *mdss_res;
diff --git a/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c b/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c
index 3bcacf945761..5a677dfe7484 100644
--- a/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c
+++ b/drivers/video/fbdev/msm/mdss_dp_hdcp2p2.c
@@ -449,7 +449,7 @@ static struct attribute *dp_hdcp2p2_fs_attrs[] = {
};
static struct attribute_group dp_hdcp2p2_fs_attr_group = {
- .name = "dp_hdcp2p2",
+ .name = "hdcp2p2",
.attrs = dp_hdcp2p2_fs_attrs,
};
diff --git a/drivers/video/fbdev/msm/mdss_fb.c b/drivers/video/fbdev/msm/mdss_fb.c
index a183fd7cd247..db27842eaccc 100644
--- a/drivers/video/fbdev/msm/mdss_fb.c
+++ b/drivers/video/fbdev/msm/mdss_fb.c
@@ -892,6 +892,12 @@ static ssize_t mdss_fb_get_persist_mode(struct device *dev,
return ret;
}
+static ssize_t mdss_fb_idle_pc_notify(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ return scnprintf(buf, PAGE_SIZE, "idle power collapsed\n");
+}
+
static DEVICE_ATTR(msm_fb_type, S_IRUGO, mdss_fb_get_type, NULL);
static DEVICE_ATTR(msm_fb_split, S_IRUGO | S_IWUSR, mdss_fb_show_split,
mdss_fb_store_split);
@@ -912,6 +918,8 @@ static DEVICE_ATTR(measured_fps, S_IRUGO | S_IWUSR | S_IWGRP,
mdss_fb_get_fps_info, NULL);
static DEVICE_ATTR(msm_fb_persist_mode, S_IRUGO | S_IWUSR,
mdss_fb_get_persist_mode, mdss_fb_change_persist_mode);
+static DEVICE_ATTR(idle_power_collapse, S_IRUGO, mdss_fb_idle_pc_notify, NULL);
+
static struct attribute *mdss_fb_attrs[] = {
&dev_attr_msm_fb_type.attr,
&dev_attr_msm_fb_split.attr,
@@ -925,6 +933,7 @@ static struct attribute *mdss_fb_attrs[] = {
&dev_attr_msm_fb_dfps_mode.attr,
&dev_attr_measured_fps.attr,
&dev_attr_msm_fb_persist_mode.attr,
+ &dev_attr_idle_power_collapse.attr,
NULL,
};
@@ -4276,8 +4285,6 @@ static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
goto buf_sync_err_2;
}
- sync_fence_install(rel_fence, rel_fen_fd);
-
ret = copy_to_user(buf_sync->rel_fen_fd, &rel_fen_fd, sizeof(int));
if (ret) {
pr_err("%s: copy_to_user failed\n", sync_pt_data->fence_name);
@@ -4314,8 +4321,6 @@ static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
goto buf_sync_err_3;
}
- sync_fence_install(retire_fence, retire_fen_fd);
-
ret = copy_to_user(buf_sync->retire_fen_fd, &retire_fen_fd,
sizeof(int));
if (ret) {
@@ -4326,6 +4331,9 @@ static int mdss_fb_handle_buf_sync_ioctl(struct msm_sync_pt_data *sync_pt_data,
goto buf_sync_err_3;
}
+ sync_fence_install(rel_fence, rel_fen_fd);
+ sync_fence_install(retire_fence, retire_fen_fd);
+
skip_retire_fence:
mutex_unlock(&sync_pt_data->sync_mutex);
@@ -4471,7 +4479,7 @@ err:
static int __mdss_fb_copy_destscaler_data(struct fb_info *info,
struct mdp_layer_commit *commit)
{
- int i;
+ int i = 0;
int ret = 0;
u32 data_size;
struct mdp_destination_scaler_data __user *ds_data_user;
@@ -4544,6 +4552,7 @@ static int __mdss_fb_copy_destscaler_data(struct fb_info *info,
data_size);
if (ret) {
pr_err("scale data copy from user failed\n");
+ kfree(scale_data);
goto err;
}
}
@@ -4553,7 +4562,7 @@ static int __mdss_fb_copy_destscaler_data(struct fb_info *info,
err:
if (ds_data) {
- for (i = 0; i < commit->commit_v1.dest_scaler_cnt; i++) {
+ for (i--; i >= 0; i--) {
scale_data = to_user_ptr(ds_data[i].scale);
kfree(scale_data);
}
@@ -5177,3 +5186,16 @@ void mdss_fb_calc_fps(struct msm_fb_data_type *mfd)
mfd->fps_info.frame_count = 0;
}
}
+
+void mdss_fb_idle_pc(struct msm_fb_data_type *mfd)
+{
+ struct mdss_overlay_private *mdp5_data = mfd_to_mdp5_data(mfd);
+
+ if (mdss_fb_is_power_off(mfd))
+ return;
+
+ if ((mfd->panel_info->type == MIPI_CMD_PANEL) && mdp5_data) {
+ pr_debug("Notify fb%d idle power collapsed\n", mfd->index);
+ sysfs_notify(&mfd->fbi->dev->kobj, NULL, "idle_power_collapse");
+ }
+}
diff --git a/drivers/video/fbdev/msm/mdss_fb.h b/drivers/video/fbdev/msm/mdss_fb.h
index 111d7cfc7c9a..d64580a35775 100644
--- a/drivers/video/fbdev/msm/mdss_fb.h
+++ b/drivers/video/fbdev/msm/mdss_fb.h
@@ -468,4 +468,5 @@ void mdss_fb_report_panel_dead(struct msm_fb_data_type *mfd);
void mdss_panelinfo_to_fb_var(struct mdss_panel_info *pinfo,
struct fb_var_screeninfo *var);
void mdss_fb_calc_fps(struct msm_fb_data_type *mfd);
+void mdss_fb_idle_pc(struct msm_fb_data_type *mfd);
#endif /* MDSS_FB_H */
diff --git a/drivers/video/fbdev/msm/mdss_mdp.c b/drivers/video/fbdev/msm/mdss_mdp.c
index d8d11f21f3b2..3cadfa4ecef7 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.c
+++ b/drivers/video/fbdev/msm/mdss_mdp.c
@@ -1173,6 +1173,31 @@ irqreturn_t mdss_mdp_isr(int irq, void *ptr)
return IRQ_HANDLED;
}
+static void mdss_mdp_cxipeak_vote(bool set_vote, unsigned long new_rate,
+ unsigned long prev_rate)
+{
+ struct mdss_data_type *mdata = mdss_mdp_get_mdata();
+ int ret = 0;
+
+ if (!mdata->mdss_cx_ipeak)
+ return;
+
+ /* fmax threshold for mdp in sdm660 is max MDP clk */
+ if (set_vote) {
+ if ((new_rate >= mdata->max_mdp_clk_rate) &&
+ (prev_rate < mdata->max_mdp_clk_rate))
+ ret = cx_ipeak_update(mdata->mdss_cx_ipeak, true);
+ } else {
+ if ((new_rate < mdata->max_mdp_clk_rate) &&
+ (prev_rate >= mdata->max_mdp_clk_rate))
+ ret = cx_ipeak_update(mdata->mdss_cx_ipeak, false);
+ }
+ if (ret) {
+ pr_err("cxipeak api fail ret:%d set_vote :%d new_rate:%lu prev_rate:%lu\n",
+ ret, (int)set_vote, new_rate, prev_rate);
+ }
+}
+
static int mdss_mdp_clk_update(u32 clk_idx, u32 enable)
{
int ret = -ENODEV;
@@ -1234,7 +1259,7 @@ void mdss_mdp_set_clk_rate(unsigned long rate, bool locked)
struct mdss_data_type *mdata = mdss_res;
unsigned long clk_rate;
struct clk *clk = mdss_mdp_get_clk(MDSS_CLK_MDP_CORE);
- unsigned long min_clk_rate;
+ unsigned long min_clk_rate, curr_clk_rate;
min_clk_rate = max(rate, mdata->perf_tune.min_mdp_clk);
@@ -1246,15 +1271,20 @@ void mdss_mdp_set_clk_rate(unsigned long rate, bool locked)
clk_rate = clk_round_rate(clk, min_clk_rate);
else
clk_rate = mdata->max_mdp_clk_rate;
+
+ curr_clk_rate = clk_get_rate(clk);
if (IS_ERR_VALUE(clk_rate)) {
pr_err("unable to round rate err=%ld\n", clk_rate);
- } else if (clk_rate != clk_get_rate(clk)) {
-
+ } else if (clk_rate != curr_clk_rate) {
+ mdss_mdp_cxipeak_vote(true, clk_rate, curr_clk_rate);
mdata->mdp_clk_rate = clk_rate;
- if (IS_ERR_VALUE(clk_set_rate(clk, clk_rate)))
+ if (IS_ERR_VALUE(clk_set_rate(clk, clk_rate))) {
pr_err("clk_set_rate failed\n");
- else
+ } else {
+ mdss_mdp_cxipeak_vote(false, clk_rate,
+ curr_clk_rate);
pr_debug("mdp clk rate=%lu\n", clk_rate);
+ }
}
if (!locked)
mutex_unlock(&mdp_clk_lock);
@@ -1355,6 +1385,68 @@ static inline void __mdss_mdp_reg_access_clk_enable(
}
}
+/*
+ * __mdss_mdp_clk_control - Overall MDSS clock control for power on/off
+ */
+static void __mdss_mdp_clk_control(struct mdss_data_type *mdata, bool enable)
+{
+ int rc = 0;
+ unsigned long flags;
+
+ if (enable) {
+ pm_runtime_get_sync(&mdata->pdev->dev);
+
+ mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+ VOTE_INDEX_LOW);
+
+ rc = mdss_iommu_ctrl(1);
+ if (IS_ERR_VALUE(rc))
+ pr_err("IOMMU attach failed\n");
+
+ /* Active+Sleep */
+ msm_bus_scale_client_update_context(mdata->bus_hdl,
+ false, mdata->curr_bw_uc_idx);
+
+ spin_lock_irqsave(&mdp_lock, flags);
+ mdata->clk_ena = enable;
+ spin_unlock_irqrestore(&mdp_lock, flags);
+
+ mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, 1);
+ mdss_mdp_clk_update(MDSS_CLK_AHB, 1);
+ mdss_mdp_clk_update(MDSS_CLK_AXI, 1);
+ mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 1);
+ mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, 1);
+ if (mdata->vsync_ena)
+ mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, 1);
+ } else {
+ spin_lock_irqsave(&mdp_lock, flags);
+ mdata->clk_ena = enable;
+ spin_unlock_irqrestore(&mdp_lock, flags);
+
+ if (mdata->vsync_ena)
+ mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, 0);
+
+ mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, 0);
+ mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, 0);
+ mdss_mdp_clk_update(MDSS_CLK_AXI, 0);
+ mdss_mdp_clk_update(MDSS_CLK_AHB, 0);
+ mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, 0);
+
+ /* release iommu control */
+ mdss_iommu_ctrl(0);
+
+ /* Active-Only */
+ msm_bus_scale_client_update_context(mdata->bus_hdl,
+ true, mdata->ao_bw_uc_idx);
+
+ mdss_update_reg_bus_vote(mdata->reg_bus_clt,
+ VOTE_INDEX_DISABLE);
+
+ pm_runtime_mark_last_busy(&mdata->pdev->dev);
+ pm_runtime_put_autosuspend(&mdata->pdev->dev);
+ }
+}
+
int __mdss_mdp_vbif_halt(struct mdss_data_type *mdata, bool is_nrt)
{
int rc = 0;
@@ -1646,9 +1738,7 @@ void mdss_mdp_clk_ctrl(int enable)
{
struct mdss_data_type *mdata = mdss_mdp_get_mdata();
static int mdp_clk_cnt;
- unsigned long flags;
int changed = 0;
- int rc = 0;
mutex_lock(&mdp_clk_lock);
if (enable) {
@@ -1672,49 +1762,8 @@ void mdss_mdp_clk_ctrl(int enable)
__builtin_return_address(0), current->group_leader->comm,
mdata->bus_ref_cnt, changed, enable);
- if (changed) {
- if (enable) {
- pm_runtime_get_sync(&mdata->pdev->dev);
-
- mdss_update_reg_bus_vote(mdata->reg_bus_clt,
- VOTE_INDEX_LOW);
-
- rc = mdss_iommu_ctrl(1);
- if (IS_ERR_VALUE(rc))
- pr_err("IOMMU attach failed\n");
-
- /* Active+Sleep */
- msm_bus_scale_client_update_context(mdata->bus_hdl,
- false, mdata->curr_bw_uc_idx);
- }
-
- spin_lock_irqsave(&mdp_lock, flags);
- mdata->clk_ena = enable;
- spin_unlock_irqrestore(&mdp_lock, flags);
-
- mdss_mdp_clk_update(MDSS_CLK_MNOC_AHB, enable);
- mdss_mdp_clk_update(MDSS_CLK_AHB, enable);
- mdss_mdp_clk_update(MDSS_CLK_AXI, enable);
- mdss_mdp_clk_update(MDSS_CLK_MDP_CORE, enable);
- mdss_mdp_clk_update(MDSS_CLK_MDP_LUT, enable);
- if (mdata->vsync_ena)
- mdss_mdp_clk_update(MDSS_CLK_MDP_VSYNC, enable);
-
- if (!enable) {
- /* release iommu control */
- mdss_iommu_ctrl(0);
-
- /* Active-Only */
- msm_bus_scale_client_update_context(mdata->bus_hdl,
- true, mdata->ao_bw_uc_idx);
-
- mdss_update_reg_bus_vote(mdata->reg_bus_clt,
- VOTE_INDEX_DISABLE);
-
- pm_runtime_mark_last_busy(&mdata->pdev->dev);
- pm_runtime_put_autosuspend(&mdata->pdev->dev);
- }
- }
+ if (changed)
+ __mdss_mdp_clk_control(mdata, enable);
if (enable && changed)
mdss_mdp_idle_pc_restore();
@@ -4519,6 +4568,10 @@ static int mdss_mdp_parse_dt_misc(struct platform_device *pdev)
pr_debug("max pipe width not specified. Using default value\n");
mdata->max_pipe_width = DEFAULT_MDP_PIPE_WIDTH;
}
+
+ if (of_find_property(pdev->dev.of_node, "qcom,mdss-cx-ipeak", NULL))
+ mdata->mdss_cx_ipeak = cx_ipeak_register(pdev->dev.of_node,
+ "qcom,mdss-cx-ipeak");
return 0;
}
@@ -5092,6 +5145,22 @@ vreg_set_voltage_fail:
}
/**
+ * mdss_mdp_notify_idle_pc() - Notify fb driver of idle power collapse
+ * @mdata: MDP private data
+ *
+ * This function is called if there are active overlays.
+ */
+static void mdss_mdp_notify_idle_pc(struct mdss_data_type *mdata)
+{
+ int i;
+
+ for (i = 0; i < mdata->nctl; i++)
+ if ((mdata->ctl_off[i].ref_cnt) &&
+ !mdss_mdp_ctl_is_power_off(&mdata->ctl_off[i]))
+ mdss_fb_idle_pc(mdata->ctl_off[i].mfd);
+}
+
+/**
* mdss_mdp_footswitch_ctrl() - Disable/enable MDSS GDSC and CX/Batfet rails
* @mdata: MDP private data
* @on: 1 to turn on footswitch, 0 to turn off footswitch
@@ -5155,6 +5224,7 @@ static void mdss_mdp_footswitch_ctrl(struct mdss_data_type *mdata, int on)
mdss_mdp_memory_retention_ctrl(MEM_RETAIN_ON,
PERIPH_RETAIN_OFF);
mdata->idle_pc = true;
+ mdss_mdp_notify_idle_pc(mdata);
pr_debug("idle pc. active overlays=%d\n",
active_cnt);
} else {
@@ -5487,6 +5557,8 @@ static int mdss_mdp_remove(struct platform_device *pdev)
pm_runtime_disable(&pdev->dev);
mdss_mdp_pp_term(&pdev->dev);
mdss_mdp_bus_scale_unregister(mdata);
+ if (mdata->mdss_cx_ipeak)
+ cx_ipeak_unregister(mdata->mdss_cx_ipeak);
mdss_debugfs_remove(mdata);
if (mdata->regulator_notif_register)
regulator_unregister_notifier(mdata->fs, &(mdata->gdsc_cb));
diff --git a/drivers/video/fbdev/msm/mdss_mdp.h b/drivers/video/fbdev/msm/mdss_mdp.h
index 5e98de043e55..36a866685f21 100644
--- a/drivers/video/fbdev/msm/mdss_mdp.h
+++ b/drivers/video/fbdev/msm/mdss_mdp.h
@@ -1842,7 +1842,7 @@ int mdss_mdp_calib_mode(struct msm_fb_data_type *mfd,
int mdss_mdp_pipe_handoff(struct mdss_mdp_pipe *pipe);
int mdss_mdp_smp_handoff(struct mdss_data_type *mdata);
struct mdss_mdp_pipe *mdss_mdp_pipe_alloc(struct mdss_mdp_mixer *mixer,
- u32 type, struct mdss_mdp_pipe *left_blend_pipe);
+ u32 off, u32 type, struct mdss_mdp_pipe *left_blend_pipe);
struct mdss_mdp_pipe *mdss_mdp_pipe_get(u32 ndx,
enum mdss_mdp_pipe_rect rect_num);
struct mdss_mdp_pipe *mdss_mdp_pipe_search(struct mdss_data_type *mdata,
diff --git a/drivers/video/fbdev/msm/mdss_mdp_ctl.c b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
index e258f258aeca..7b0207de101a 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_ctl.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_ctl.c
@@ -892,7 +892,7 @@ static u32 __calc_prefill_line_time_us(struct mdss_mdp_ctl *ctl)
static u32 __get_min_prefill_line_time_us(struct mdss_mdp_ctl *ctl)
{
- u32 vbp_min = 0;
+ u32 vbp_min = UINT_MAX;
int i;
struct mdss_data_type *mdata;
@@ -914,6 +914,9 @@ static u32 __get_min_prefill_line_time_us(struct mdss_mdp_ctl *ctl)
}
}
+ if (vbp_min == UINT_MAX)
+ vbp_min = 0;
+
return vbp_min;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
index 663d63092ebf..5173567a3420 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_intf_video.c
@@ -1896,7 +1896,6 @@ static int mdss_mdp_video_ctx_setup(struct mdss_mdp_ctl *ctl,
struct mdss_mdp_format_params *fmt;
struct mdss_data_type *mdata = ctl->mdata;
struct dsc_desc *dsc = NULL;
- u32 hdmi_dp_core;
ctx->ctl = ctl;
ctx->intf_type = ctl->intf_type;
@@ -2033,10 +2032,19 @@ static int mdss_mdp_video_ctx_setup(struct mdss_mdp_ctl *ctl,
mdp_video_write(ctx, MDSS_MDP_REG_INTF_PANEL_FORMAT, ctl->dst_format);
- hdmi_dp_core = (ctx->intf_type == MDSS_INTF_EDP) ? 1 : 0;
-
- writel_relaxed(hdmi_dp_core, mdata->mdp_base +
+ /* select HDMI or DP core usage */
+ switch (ctx->intf_type) {
+ case MDSS_INTF_EDP:
+ writel_relaxed(0x1, mdata->mdp_base +
+ MDSS_MDP_HDMI_DP_CORE_SELECT);
+ break;
+ case MDSS_INTF_HDMI:
+ writel_relaxed(0x0, mdata->mdp_base +
MDSS_MDP_HDMI_DP_CORE_SELECT);
+ break;
+ default:
+ break;
+ }
return 0;
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_overlay.c b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
index da08917d334b..5d80c80ebcef 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_overlay.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_overlay.c
@@ -609,6 +609,7 @@ int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
bool is_vig_needed = false;
u32 left_lm_w = left_lm_w_from_mfd(mfd);
u32 flags = 0;
+ u32 off = 0;
if (mdp5_data->ctl == NULL)
return -ENODEV;
@@ -692,18 +693,29 @@ int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
break;
case PIPE_TYPE_AUTO:
default:
- if (req->flags & MDP_OV_PIPE_FORCE_DMA)
+ if (req->flags & MDP_OV_PIPE_FORCE_DMA) {
pipe_type = MDSS_MDP_PIPE_TYPE_DMA;
- else if (fmt->is_yuv ||
+ /*
+ * For paths using legacy API's for pipe
+ * allocation, use offset of 2 for allocating
+ * right pipe for pipe type DMA. This is
+ * because from SDM 3.x.x. onwards one DMA
+ * pipe has two instances for multirect.
+ */
+ off = (mixer_mux == MDSS_MDP_MIXER_MUX_RIGHT)
+ ? 2 : 0;
+ } else if (fmt->is_yuv ||
(req->flags & MDP_OV_PIPE_SHARE) ||
- is_vig_needed)
+ is_vig_needed) {
pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
- else
+ } else {
pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
+ }
break;
}
- pipe = mdss_mdp_pipe_alloc(mixer, pipe_type, left_blend_pipe);
+ pipe = mdss_mdp_pipe_alloc(mixer, off,
+ pipe_type, left_blend_pipe);
/* RGB pipes can be used instead of DMA */
if (IS_ERR_OR_NULL(pipe) &&
@@ -712,7 +724,7 @@ int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
pr_debug("giving RGB pipe for fb%d. flags:0x%x\n",
mfd->index, req->flags);
pipe_type = MDSS_MDP_PIPE_TYPE_RGB;
- pipe = mdss_mdp_pipe_alloc(mixer, pipe_type,
+ pipe = mdss_mdp_pipe_alloc(mixer, off, pipe_type,
left_blend_pipe);
}
@@ -723,7 +735,7 @@ int mdss_mdp_overlay_pipe_setup(struct msm_fb_data_type *mfd,
pr_debug("giving ViG pipe for fb%d. flags:0x%x\n",
mfd->index, req->flags);
pipe_type = MDSS_MDP_PIPE_TYPE_VIG;
- pipe = mdss_mdp_pipe_alloc(mixer, pipe_type,
+ pipe = mdss_mdp_pipe_alloc(mixer, off, pipe_type,
left_blend_pipe);
}
diff --git a/drivers/video/fbdev/msm/mdss_mdp_pipe.c b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
index 965a6533dfcb..8d7bd60318ad 100644
--- a/drivers/video/fbdev/msm/mdss_mdp_pipe.c
+++ b/drivers/video/fbdev/msm/mdss_mdp_pipe.c
@@ -1250,11 +1250,12 @@ cursor_done:
}
struct mdss_mdp_pipe *mdss_mdp_pipe_alloc(struct mdss_mdp_mixer *mixer,
- u32 type, struct mdss_mdp_pipe *left_blend_pipe)
+ u32 off, u32 type, struct mdss_mdp_pipe *left_blend_pipe)
{
struct mdss_mdp_pipe *pipe;
+
mutex_lock(&mdss_mdp_sspp_lock);
- pipe = mdss_mdp_pipe_init(mixer, type, 0, left_blend_pipe);
+ pipe = mdss_mdp_pipe_init(mixer, type, off, left_blend_pipe);
mutex_unlock(&mdss_mdp_sspp_lock);
return pipe;
}
diff --git a/drivers/virtio/virtio_balloon.c b/drivers/virtio/virtio_balloon.c
index 56f7e2521202..8959b320d472 100644
--- a/drivers/virtio/virtio_balloon.c
+++ b/drivers/virtio/virtio_balloon.c
@@ -30,6 +30,7 @@
#include <linux/balloon_compaction.h>
#include <linux/oom.h>
#include <linux/wait.h>
+#include <linux/mount.h>
/*
* Balloon device works in 4K page units. So each page is pointed to by
@@ -45,6 +46,10 @@ static int oom_pages = OOM_VBALLOON_DEFAULT_PAGES;
module_param(oom_pages, int, S_IRUSR | S_IWUSR);
MODULE_PARM_DESC(oom_pages, "pages to free on OOM");
+#ifdef CONFIG_BALLOON_COMPACTION
+static struct vfsmount *balloon_mnt;
+#endif
+
struct virtio_balloon {
struct virtio_device *vdev;
struct virtqueue *inflate_vq, *deflate_vq, *stats_vq;
@@ -486,6 +491,24 @@ static int virtballoon_migratepage(struct balloon_dev_info *vb_dev_info,
return MIGRATEPAGE_SUCCESS;
}
+
+static struct dentry *balloon_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ static const struct dentry_operations ops = {
+ .d_dname = simple_dname,
+ };
+
+ return mount_pseudo(fs_type, "balloon-kvm:", NULL, &ops,
+ BALLOON_KVM_MAGIC);
+}
+
+static struct file_system_type balloon_fs = {
+ .name = "balloon-kvm",
+ .mount = balloon_mount,
+ .kill_sb = kill_anon_super,
+};
+
#endif /* CONFIG_BALLOON_COMPACTION */
static int virtballoon_probe(struct virtio_device *vdev)
@@ -513,9 +536,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
vb->need_stats_update = 0;
balloon_devinfo_init(&vb->vb_dev_info);
-#ifdef CONFIG_BALLOON_COMPACTION
- vb->vb_dev_info.migratepage = virtballoon_migratepage;
-#endif
err = init_vqs(vb);
if (err)
@@ -525,7 +545,27 @@ static int virtballoon_probe(struct virtio_device *vdev)
vb->nb.priority = VIRTBALLOON_OOM_NOTIFY_PRIORITY;
err = register_oom_notifier(&vb->nb);
if (err < 0)
- goto out_oom_notify;
+ goto out_del_vqs;
+
+#ifdef CONFIG_BALLOON_COMPACTION
+ balloon_mnt = kern_mount(&balloon_fs);
+ if (IS_ERR(balloon_mnt)) {
+ err = PTR_ERR(balloon_mnt);
+ unregister_oom_notifier(&vb->nb);
+ goto out_del_vqs;
+ }
+
+ vb->vb_dev_info.migratepage = virtballoon_migratepage;
+ vb->vb_dev_info.inode = alloc_anon_inode(balloon_mnt->mnt_sb);
+ if (IS_ERR(vb->vb_dev_info.inode)) {
+ err = PTR_ERR(vb->vb_dev_info.inode);
+ kern_unmount(balloon_mnt);
+ unregister_oom_notifier(&vb->nb);
+ vb->vb_dev_info.inode = NULL;
+ goto out_del_vqs;
+ }
+ vb->vb_dev_info.inode->i_mapping->a_ops = &balloon_aops;
+#endif
virtio_device_ready(vdev);
@@ -539,7 +579,6 @@ static int virtballoon_probe(struct virtio_device *vdev)
out_del_vqs:
unregister_oom_notifier(&vb->nb);
-out_oom_notify:
vdev->config->del_vqs(vdev);
out_free_vb:
kfree(vb);
@@ -567,6 +606,8 @@ static void virtballoon_remove(struct virtio_device *vdev)
unregister_oom_notifier(&vb->nb);
kthread_stop(vb->thread);
remove_common(vb);
+ if (vb->vb_dev_info.inode)
+ iput(vb->vb_dev_info.inode);
kfree(vb);
}
diff --git a/include/dt-bindings/clock/msm-clocks-8998.h b/include/dt-bindings/clock/msm-clocks-8998.h
index 42617016188d..cd36374a04a7 100644
--- a/include/dt-bindings/clock/msm-clocks-8998.h
+++ b/include/dt-bindings/clock/msm-clocks-8998.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -105,7 +105,6 @@
#define clk_gcc_gpu_gpll0_div_clk 0x07d16c6a
#define clk_gpll4 0xb3b5d85b
#define clk_gpll4_out_main 0xa9a0ab9d
-#define clk_hmss_ahb_clk_src 0xaec8450f
#define clk_usb30_master_clk_src 0xc6262f89
#define clk_pcie_aux_clk_src 0xebc50566
#define clk_ufs_axi_clk_src 0x297ca380
@@ -201,7 +200,6 @@
#define clk_gcc_gpu_bimc_gfx_clk 0x3909459b
#define clk_gcc_gpu_cfg_ahb_clk 0x72f20a57
#define clk_gcc_gpu_iref_clk 0xfd82abad
-#define clk_gcc_hmss_ahb_clk 0x62818713
#define clk_gcc_hmss_dvm_bus_clk 0x17cc8b53
#define clk_gcc_hmss_rbcpr_clk 0x699183be
#define clk_hmss_gpll0_clk_src 0x17eb05d0
diff --git a/include/dt-bindings/clock/msm-clocks-hwio-8998.h b/include/dt-bindings/clock/msm-clocks-hwio-8998.h
index 8dfa36362e8c..fc42ac3d49a0 100644
--- a/include/dt-bindings/clock/msm-clocks-hwio-8998.h
+++ b/include/dt-bindings/clock/msm-clocks-hwio-8998.h
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -66,7 +66,6 @@
#define CLOCK_FRQ_MEASURE_STATUS 0x62008
#define GCC_GPLL0_MODE 0x00000
#define GCC_GPLL4_MODE 0x77000
-#define GCC_HMSS_AHB_CMD_RCGR 0x48014
#define GCC_USB30_MASTER_CMD_RCGR 0x0F014
#define GCC_PCIE_AUX_CMD_RCGR 0x6C000
#define GCC_UFS_AXI_CMD_RCGR 0x75018
@@ -169,7 +168,6 @@
#define GCC_GPU_BIMC_GFX_CBCR 0x71010
#define GCC_GPU_CFG_AHB_CBCR 0x71004
#define GCC_GPU_IREF_EN 0x88010
-#define GCC_HMSS_AHB_CBCR 0x48000
#define GCC_HMSS_DVM_BUS_CBCR 0x4808C
#define GCC_HMSS_RBCPR_CBCR 0x48008
#define GCC_MMSS_SYS_NOC_AXI_CBCR 0x09000
diff --git a/include/linux/balloon_compaction.h b/include/linux/balloon_compaction.h
index 9b0a15d06a4f..79542b2698ec 100644
--- a/include/linux/balloon_compaction.h
+++ b/include/linux/balloon_compaction.h
@@ -48,6 +48,7 @@
#include <linux/migrate.h>
#include <linux/gfp.h>
#include <linux/err.h>
+#include <linux/fs.h>
/*
* Balloon device information descriptor.
@@ -62,6 +63,7 @@ struct balloon_dev_info {
struct list_head pages; /* Pages enqueued & handled to Host */
int (*migratepage)(struct balloon_dev_info *, struct page *newpage,
struct page *page, enum migrate_mode mode);
+ struct inode *inode;
};
extern struct page *balloon_page_enqueue(struct balloon_dev_info *b_dev_info);
@@ -73,45 +75,19 @@ static inline void balloon_devinfo_init(struct balloon_dev_info *balloon)
spin_lock_init(&balloon->pages_lock);
INIT_LIST_HEAD(&balloon->pages);
balloon->migratepage = NULL;
+ balloon->inode = NULL;
}
#ifdef CONFIG_BALLOON_COMPACTION
-extern bool balloon_page_isolate(struct page *page);
+extern const struct address_space_operations balloon_aops;
+extern bool balloon_page_isolate(struct page *page,
+ isolate_mode_t mode);
extern void balloon_page_putback(struct page *page);
-extern int balloon_page_migrate(struct page *newpage,
+extern int balloon_page_migrate(struct address_space *mapping,
+ struct page *newpage,
struct page *page, enum migrate_mode mode);
/*
- * __is_movable_balloon_page - helper to perform @page PageBalloon tests
- */
-static inline bool __is_movable_balloon_page(struct page *page)
-{
- return PageBalloon(page);
-}
-
-/*
- * balloon_page_movable - test PageBalloon to identify balloon pages
- * and PagePrivate to check that the page is not
- * isolated and can be moved by compaction/migration.
- *
- * As we might return false positives in the case of a balloon page being just
- * released under us, this need to be re-tested later, under the page lock.
- */
-static inline bool balloon_page_movable(struct page *page)
-{
- return PageBalloon(page) && PagePrivate(page);
-}
-
-/*
- * isolated_balloon_page - identify an isolated balloon page on private
- * compaction/migration page lists.
- */
-static inline bool isolated_balloon_page(struct page *page)
-{
- return PageBalloon(page);
-}
-
-/*
* balloon_page_insert - insert a page into the balloon's page list and make
* the page->private assignment accordingly.
* @balloon : pointer to balloon device
@@ -124,7 +100,7 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
struct page *page)
{
__SetPageBalloon(page);
- SetPagePrivate(page);
+ __SetPageMovable(page, balloon->inode->i_mapping);
set_page_private(page, (unsigned long)balloon);
list_add(&page->lru, &balloon->pages);
}
@@ -140,11 +116,14 @@ static inline void balloon_page_insert(struct balloon_dev_info *balloon,
static inline void balloon_page_delete(struct page *page)
{
__ClearPageBalloon(page);
+ __ClearPageMovable(page);
set_page_private(page, 0);
- if (PagePrivate(page)) {
- ClearPagePrivate(page);
+ /*
+ * No touch page.lru field once @page has been isolated
+ * because VM is using the field.
+ */
+ if (!PageIsolated(page))
list_del(&page->lru);
- }
}
/*
diff --git a/include/linux/compaction.h b/include/linux/compaction.h
index 4cd4ddf64cc7..e864751d870a 100644
--- a/include/linux/compaction.h
+++ b/include/linux/compaction.h
@@ -52,6 +52,10 @@ extern void compaction_defer_reset(struct zone *zone, int order,
bool alloc_success);
extern bool compaction_restarting(struct zone *zone, int order);
+extern int kcompactd_run(int nid);
+extern void kcompactd_stop(int nid);
+extern void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx);
+
#else
static inline unsigned long try_to_compact_pages(gfp_t gfp_mask,
unsigned int order, int alloc_flags,
@@ -84,9 +88,22 @@ static inline bool compaction_deferred(struct zone *zone, int order)
return true;
}
+static inline int kcompactd_run(int nid)
+{
+ return 0;
+}
+static inline void kcompactd_stop(int nid)
+{
+}
+
+static inline void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
+{
+}
+
#endif /* CONFIG_COMPACTION */
#if defined(CONFIG_COMPACTION) && defined(CONFIG_SYSFS) && defined(CONFIG_NUMA)
+struct node;
extern int compaction_register_node(struct node *node);
extern void compaction_unregister_node(struct node *node);
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 94edbb64f1c6..db16b3fd48c0 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -404,6 +404,8 @@ struct address_space_operations {
*/
int (*migratepage) (struct address_space *,
struct page *, struct page *, enum migrate_mode);
+ bool (*isolate_page)(struct page *, isolate_mode_t);
+ void (*putback_page)(struct page *);
int (*launder_page) (struct page *);
int (*is_partially_uptodate) (struct page *, unsigned long,
unsigned long);
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 1b3f20e8fb74..c4c25651ff21 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -136,6 +136,7 @@ enum iommu_attr {
DOMAIN_ATTR_EARLY_MAP,
DOMAIN_ATTR_PAGE_TABLE_IS_COHERENT,
DOMAIN_ATTR_PAGE_TABLE_FORCE_COHERENT,
+ DOMAIN_ATTR_ENABLE_TTBR1,
DOMAIN_ATTR_MAX,
};
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index 7ae216a39c9e..481c8c4627ca 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -43,8 +43,7 @@ static inline struct stable_node *page_stable_node(struct page *page)
static inline void set_page_stable_node(struct page *page,
struct stable_node *stable_node)
{
- page->mapping = (void *)stable_node +
- (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+ page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
}
/*
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index cac1c0904d5f..5219df44cfec 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -33,6 +33,8 @@ extern int migrate_page(struct address_space *,
struct page *, struct page *, enum migrate_mode);
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason);
+extern bool isolate_movable_page(struct page *page, isolate_mode_t mode);
+extern void putback_movable_page(struct page *page);
extern int migrate_prep(void);
extern int migrate_prep_local(void);
@@ -65,6 +67,21 @@ static inline int migrate_huge_page_move_mapping(struct address_space *mapping,
#endif /* CONFIG_MIGRATION */
+#ifdef CONFIG_COMPACTION
+extern int PageMovable(struct page *page);
+extern void __SetPageMovable(struct page *page, struct address_space *mapping);
+extern void __ClearPageMovable(struct page *page);
+#else
+static inline int PageMovable(struct page *page) { return 0; };
+static inline void __SetPageMovable(struct page *page,
+ struct address_space *mapping)
+{
+}
+static inline void __ClearPageMovable(struct page *page)
+{
+}
+#endif
+
#ifdef CONFIG_NUMA_BALANCING
extern bool pmd_trans_migrating(pmd_t pmd);
extern int migrate_misplaced_page(struct page *page,
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 97b11c9fd48a..87a4bb8ed512 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -995,6 +995,7 @@ static inline int page_mapped(struct page *page)
{
return atomic_read(&(page)->_mapcount) >= 0;
}
+struct address_space *page_mapping(struct page *page);
/*
* Return true only if the page has been allocated with
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index ddb3b927de39..45037a3a8e8e 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -689,6 +689,12 @@ typedef struct pglist_data {
mem_hotplug_begin/end() */
int kswapd_max_order;
enum zone_type classzone_idx;
+#ifdef CONFIG_COMPACTION
+ int kcompactd_max_order;
+ enum zone_type kcompactd_classzone_idx;
+ wait_queue_head_t kcompactd_wait;
+ struct task_struct *kcompactd;
+#endif
#ifdef CONFIG_NUMA_BALANCING
/* Lock serializing the migrate rate limiting window */
spinlock_t numabalancing_migrate_lock;
diff --git a/include/linux/msm_ext_display.h b/include/linux/msm_ext_display.h
index 4378080da0d9..d9831d7cbb4e 100644
--- a/include/linux/msm_ext_display.h
+++ b/include/linux/msm_ext_display.h
@@ -109,6 +109,7 @@ struct msm_ext_disp_intf_ops {
* @get_audio_edid_blk: retrieve audio edid block
* @cable_status: cable connected/disconnected
* @get_intf_id: id of connected interface
+ * @acknowledge: acknowledge audio status
*/
struct msm_ext_disp_audio_codec_ops {
int (*audio_info_setup)(struct platform_device *pdev,
@@ -118,6 +119,7 @@ struct msm_ext_disp_audio_codec_ops {
int (*cable_status)(struct platform_device *pdev, u32 vote);
int (*get_intf_id)(struct platform_device *pdev);
void (*teardown_done)(struct platform_device *pdev);
+ int (*acknowledge)(struct platform_device *pdev, u32 ack);
};
/*
diff --git a/include/linux/msm_gsi.h b/include/linux/msm_gsi.h
index 6037fbf00a23..d4b4cc7f8737 100644
--- a/include/linux/msm_gsi.h
+++ b/include/linux/msm_gsi.h
@@ -1053,6 +1053,18 @@ int gsi_enable_fw(phys_addr_t gsi_base_addr, u32 gsi_size);
void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
unsigned long *size);
+/**
+ * gsi_halt_channel_ee - Peripheral should call this function
+ * to stop other EE's channel. This is usually used in SSR clean
+ *
+ * @chan_idx: Virtual channel index
+ * @ee: EE
+ * @code: [out] response code for operation
+
+ * @Return gsi_status
+ */
+int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee, int *code);
+
/*
* Here is a typical sequence of calls
*
@@ -1250,5 +1262,11 @@ static inline void gsi_get_inst_ram_offset_and_size(unsigned long *base_offset,
unsigned long *size)
{
}
+
+static inline int gsi_halt_channel_ee(unsigned int chan_idx, unsigned int ee,
+ int *code)
+{
+ return -GSI_STATUS_UNSUPPORTED_OP;
+}
#endif
#endif
diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h
index 68cfe76e8652..5991485cdea4 100644
--- a/include/linux/msm_kgsl.h
+++ b/include/linux/msm_kgsl.h
@@ -3,11 +3,32 @@
#include <uapi/linux/msm_kgsl.h>
+#ifdef CONFIG_QCOM_KGSL
/* Limits mitigations APIs */
void *kgsl_pwr_limits_add(enum kgsl_deviceid id);
void kgsl_pwr_limits_del(void *limit);
int kgsl_pwr_limits_set_freq(void *limit, unsigned int freq);
void kgsl_pwr_limits_set_default(void *limit);
unsigned int kgsl_pwr_limits_get_freq(enum kgsl_deviceid id);
+#else
+static inline void *kgsl_pwr_limits_add(enum kgsl_deviceid id)
+{
+ return NULL;
+}
+
+static inline void kgsl_pwr_limits_del(void *limit) { }
+
+static inline int kgsl_pwr_limits_set_freq(void *limit, unsigned int freq)
+{
+ return -EINVAL;
+}
+
+static inline void kgsl_pwr_limits_set_default(void *limit) { }
+
+static inline unsigned int kgsl_pwr_limits_get_freq(enum kgsl_deviceid id)
+{
+ return 0;
+}
+#endif
#endif /* _MSM_KGSL_H */
diff --git a/include/linux/msm_mhi.h b/include/linux/msm_mhi.h
index b8b2226940a4..f8ba31ea7573 100644
--- a/include/linux/msm_mhi.h
+++ b/include/linux/msm_mhi.h
@@ -63,9 +63,10 @@ enum MHI_CLIENT_CHANNEL {
MHI_CLIENT_RESERVED_1_UPPER = 99,
MHI_CLIENT_IP_HW_0_OUT = 100,
MHI_CLIENT_IP_HW_0_IN = 101,
- MHI_CLIENT_RESERVED_2_LOWER = 102,
+ MHI_CLIENT_IP_HW_ADPL_IN = 102,
+ MHI_CLIENT_RESERVED_2_LOWER = 103,
MHI_CLIENT_RESERVED_2_UPPER = 127,
- MHI_MAX_CHANNELS = 102
+ MHI_MAX_CHANNELS = 103
};
enum MHI_CB_REASON {
@@ -214,7 +215,7 @@ int mhi_get_max_desc(struct mhi_client_handle *client_handle);
/* RmNET Reserved APIs, This APIs are reserved for use by the linux network
* stack only. Use by other clients will introduce system wide issues
*/
-int mhi_set_lpm(struct mhi_client_handle *client_handle, int enable_lpm);
+int mhi_set_lpm(struct mhi_client_handle *client_handle, bool enable_lpm);
int mhi_get_epid(struct mhi_client_handle *mhi_handle);
struct mhi_result *mhi_poll(struct mhi_client_handle *client_handle);
void mhi_mask_irq(struct mhi_client_handle *client_handle);
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 4b115168607f..86c233be1cfc 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -132,6 +132,9 @@ enum pageflags {
/* SLOB */
PG_slob_free = PG_private,
+
+ /* non-lru isolated movable page */
+ PG_isolated = PG_reclaim,
};
#ifndef __GENERATING_BOUNDS_H
@@ -309,25 +312,38 @@ PAGEFLAG(Idle, idle)
* with the PAGE_MAPPING_ANON bit set to distinguish it. See rmap.h.
*
* On an anonymous page in a VM_MERGEABLE area, if CONFIG_KSM is enabled,
- * the PAGE_MAPPING_KSM bit may be set along with the PAGE_MAPPING_ANON bit;
- * and then page->mapping points, not to an anon_vma, but to a private
+ * the PAGE_MAPPING_MOVABLE bit may be set along with the PAGE_MAPPING_ANON
+ * bit; and then page->mapping points, not to an anon_vma, but to a private
* structure which KSM associates with that merged page. See ksm.h.
*
- * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is currently never used.
+ * PAGE_MAPPING_KSM without PAGE_MAPPING_ANON is used for non-lru movable
+ * page and then page->mapping points a struct address_space.
*
* Please note that, confusingly, "page_mapping" refers to the inode
* address_space which maps the page from disk; whereas "page_mapped"
* refers to user virtual address space into which the page is mapped.
*/
-#define PAGE_MAPPING_ANON 1
-#define PAGE_MAPPING_KSM 2
-#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM)
+#define PAGE_MAPPING_ANON 0x1
+#define PAGE_MAPPING_MOVABLE 0x2
+#define PAGE_MAPPING_KSM (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
+#define PAGE_MAPPING_FLAGS (PAGE_MAPPING_ANON | PAGE_MAPPING_MOVABLE)
+
+static __always_inline int PageMappingFlags(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) != 0;
+}
static inline int PageAnon(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_ANON) != 0;
}
+static __always_inline int __PageMovable(struct page *page)
+{
+ return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
+ PAGE_MAPPING_MOVABLE;
+}
+
#ifdef CONFIG_KSM
/*
* A KSM page is one of those write-protected "shared pages" or "merged pages"
@@ -338,7 +354,7 @@ static inline int PageAnon(struct page *page)
static inline int PageKsm(struct page *page)
{
return ((unsigned long)page->mapping & PAGE_MAPPING_FLAGS) ==
- (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+ PAGE_MAPPING_KSM;
}
#else
TESTPAGEFLAG_FALSE(Ksm)
@@ -557,6 +573,8 @@ static inline void __ClearPageBalloon(struct page *page)
atomic_set(&page->_mapcount, -1);
}
+__PAGEFLAG(Isolated, isolated);
+
/*
* If network-based swap is enabled, sl*b must keep track of whether pages
* were allocated from pfmemalloc reserves.
diff --git a/include/linux/qpnp/qpnp-pbs.h b/include/linux/qpnp/qpnp-pbs.h
new file mode 100644
index 000000000000..39497ac4b552
--- /dev/null
+++ b/include/linux/qpnp/qpnp-pbs.h
@@ -0,0 +1,25 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QPNP_PBS_H
+#define _QPNP_PBS_H
+
+#ifdef CONFIG_QPNP_PBS
+int qpnp_pbs_trigger_event(struct device_node *dev_node, u8 bitmap);
+#else
+static inline int qpnp_pbs_trigger_event(struct device_node *dev_node,
+ u8 bitmap) {
+ return -ENODEV;
+}
+#endif
+
+#endif
diff --git a/include/linux/regulator/qpnp-labibb-regulator.h b/include/linux/regulator/qpnp-labibb-regulator.h
new file mode 100644
index 000000000000..247069507fd9
--- /dev/null
+++ b/include/linux/regulator/qpnp-labibb-regulator.h
@@ -0,0 +1,23 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _QPNP_LABIBB_REGULATOR_H
+#define _QPNP_LABIBB_REGULATOR_H
+
+enum labibb_notify_event {
+ LAB_VREG_OK = 1,
+};
+
+int qpnp_labibb_notifier_register(struct notifier_block *nb);
+int qpnp_labibb_notifier_unregister(struct notifier_block *nb);
+
+#endif
diff --git a/include/linux/sde_io_util.h b/include/linux/sde_io_util.h
new file mode 100644
index 000000000000..6bd5c168ecd8
--- /dev/null
+++ b/include/linux/sde_io_util.h
@@ -0,0 +1,113 @@
+/* Copyright (c) 2012, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_IO_UTIL_H__
+#define __SDE_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+#ifdef DEBUG
+#define DEV_DBG(fmt, args...) pr_err(fmt, ##args)
+#else
+#define DEV_DBG(fmt, args...) pr_debug(fmt, ##args)
+#endif
+#define DEV_INFO(fmt, args...) pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...) pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...) pr_err(fmt, ##args)
+
+struct dss_io_data {
+ u32 len;
+ void __iomem *base;
+};
+
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug);
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug);
+void dss_reg_dump(void __iomem *base, u32 len, const char *prefix, u32 debug);
+
+#define DSS_REG_W_ND(io, offset, val) dss_reg_w(io, offset, val, false)
+#define DSS_REG_W(io, offset, val) dss_reg_w(io, offset, val, true)
+#define DSS_REG_R_ND(io, offset) dss_reg_r(io, offset, false)
+#define DSS_REG_R(io, offset) dss_reg_r(io, offset, true)
+
+enum dss_vreg_type {
+ DSS_REG_LDO,
+ DSS_REG_VS,
+};
+
+struct dss_vreg {
+ struct regulator *vreg; /* vreg handle */
+ char vreg_name[32];
+ int min_voltage;
+ int max_voltage;
+ int enable_load;
+ int disable_load;
+ int pre_on_sleep;
+ int post_on_sleep;
+ int pre_off_sleep;
+ int post_off_sleep;
+};
+
+struct dss_gpio {
+ unsigned int gpio;
+ unsigned int value;
+ char gpio_name[32];
+};
+
+enum dss_clk_type {
+ DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+ DSS_CLK_PCLK,
+ DSS_CLK_OTHER,
+};
+
+struct dss_clk {
+ struct clk *clk; /* clk handle */
+ char clk_name[32];
+ enum dss_clk_type type;
+ unsigned long rate;
+ unsigned long max_rate;
+};
+
+struct dss_module_power {
+ unsigned int num_vreg;
+ struct dss_vreg *vreg_config;
+ unsigned int num_gpio;
+ struct dss_gpio *gpio_config;
+ unsigned int num_clk;
+ struct dss_clk *clk_config;
+};
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+ struct dss_io_data *io_data, const char *name);
+void msm_dss_iounmap(struct dss_io_data *io_data);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable);
+int msm_dss_gpio_enable(struct dss_gpio *in_gpio, int num_gpio, int enable);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+ int num_vreg, int config);
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+
+int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *read_buf);
+int sde_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *value);
+
+#endif /* __SDE_IO_UTIL_H__ */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index 2a03a062a395..1534086e16d0 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -52,6 +52,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PGPGOUTCLEAN, PSWPIN, PSWPOUT,
COMPACTMIGRATE_SCANNED, COMPACTFREE_SCANNED,
COMPACTISOLATED,
COMPACTSTALL, COMPACTFAIL, COMPACTSUCCESS,
+ KCOMPACTD_WAKE,
#endif
#ifdef CONFIG_HUGETLB_PAGE
HTLB_BUDDY_PGALLOC, HTLB_BUDDY_PGALLOC_FAIL,
diff --git a/include/linux/zsmalloc.h b/include/linux/zsmalloc.h
index 34eb16098a33..57a8e98f2708 100644
--- a/include/linux/zsmalloc.h
+++ b/include/linux/zsmalloc.h
@@ -41,10 +41,10 @@ struct zs_pool_stats {
struct zs_pool;
-struct zs_pool *zs_create_pool(const char *name, gfp_t flags);
+struct zs_pool *zs_create_pool(const char *name);
void zs_destroy_pool(struct zs_pool *pool);
-unsigned long zs_malloc(struct zs_pool *pool, size_t size);
+unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t flags);
void zs_free(struct zs_pool *pool, unsigned long obj);
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
diff --git a/include/trace/events/compaction.h b/include/trace/events/compaction.h
index c92d1e1cbad9..223450aeb49e 100644
--- a/include/trace/events/compaction.h
+++ b/include/trace/events/compaction.h
@@ -350,6 +350,61 @@ DEFINE_EVENT(mm_compaction_defer_template, mm_compaction_defer_reset,
);
#endif
+TRACE_EVENT(mm_compaction_kcompactd_sleep,
+
+ TP_PROTO(int nid),
+
+ TP_ARGS(nid),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ ),
+
+ TP_printk("nid=%d", __entry->nid)
+);
+
+DECLARE_EVENT_CLASS(kcompactd_wake_template,
+
+ TP_PROTO(int nid, int order, enum zone_type classzone_idx),
+
+ TP_ARGS(nid, order, classzone_idx),
+
+ TP_STRUCT__entry(
+ __field(int, nid)
+ __field(int, order)
+ __field(enum zone_type, classzone_idx)
+ ),
+
+ TP_fast_assign(
+ __entry->nid = nid;
+ __entry->order = order;
+ __entry->classzone_idx = classzone_idx;
+ ),
+
+ TP_printk("nid=%d order=%d classzone_idx=%-8s",
+ __entry->nid,
+ __entry->order,
+ __print_symbolic(__entry->classzone_idx, ZONE_TYPE))
+);
+
+DEFINE_EVENT(kcompactd_wake_template, mm_compaction_wakeup_kcompactd,
+
+ TP_PROTO(int nid, int order, enum zone_type classzone_idx),
+
+ TP_ARGS(nid, order, classzone_idx)
+);
+
+DEFINE_EVENT(kcompactd_wake_template, mm_compaction_kcompactd_wake,
+
+ TP_PROTO(int nid, int order, enum zone_type classzone_idx),
+
+ TP_ARGS(nid, order, classzone_idx)
+);
+
#endif /* _TRACE_COMPACTION_H */
/* This part must be outside protection */
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 4fe580f0680d..d2f19ac6f536 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -40,6 +40,15 @@
#define MSM_PIPE_2D1 0x02
#define MSM_PIPE_3D0 0x10
+/* The pipe-id just uses the lower bits, so can be OR'd with flags in
+ * the upper 16 bits (which could be extended further, if needed, maybe
+ * we extend/overload the pipe-id some day to deal with multiple rings,
+ * but even then I don't think we need the full lower 16 bits).
+ */
+#define MSM_PIPE_ID_MASK 0xffff
+#define MSM_PIPE_ID(x) ((x) & MSM_PIPE_ID_MASK)
+#define MSM_PIPE_FLAGS(x) ((x) & ~MSM_PIPE_ID_MASK)
+
/* timeouts are specified in clock-monotonic absolute times (to simplify
* restarting interrupted ioctls). The following struct is logically the
* same as 'struct timespec' but 32/64b ABI safe.
@@ -54,6 +63,7 @@ struct drm_msm_timespec {
#define MSM_PARAM_CHIP_ID 0x03
#define MSM_PARAM_MAX_FREQ 0x04
#define MSM_PARAM_TIMESTAMP 0x05
+#define MSM_PARAM_GMEM_BASE 0x06
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
@@ -67,6 +77,7 @@ struct drm_msm_param {
#define MSM_BO_SCANOUT 0x00000001 /* scanout capable */
#define MSM_BO_GPU_READONLY 0x00000002
+#define MSM_BO_PRIVILEGED 0x00000004
#define MSM_BO_CACHE_MASK 0x000f0000
/* cache modes */
#define MSM_BO_CACHED 0x00010000
@@ -85,10 +96,14 @@ struct drm_msm_gem_new {
__u32 handle; /* out */
};
+#define MSM_INFO_IOVA 0x01
+
+#define MSM_INFO_FLAGS (MSM_INFO_IOVA)
+
struct drm_msm_gem_info {
__u32 handle; /* in */
- __u32 pad;
- __u64 offset; /* out, offset to pass to mmap() */
+ __u32 flags; /* in - combination of MSM_INFO_* flags */
+ __u64 offset; /* out, mmap() offset or iova */
};
#define MSM_PREP_READ 0x01
@@ -173,12 +188,18 @@ struct drm_msm_gem_submit_bo {
__u64 presumed; /* in/out, presumed buffer address */
};
+/* Valid submit ioctl flags: */
+#define MSM_SUBMIT_RING_MASK 0x000F0000
+#define MSM_SUBMIT_RING_SHIFT 16
+
+#define MSM_SUBMIT_FLAGS (MSM_SUBMIT_RING_MASK)
+
/* Each cmdstream submit consists of a table of buffers involved, and
* one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
*/
struct drm_msm_gem_submit {
- __u32 pipe; /* in, MSM_PIPE_x */
+ __u32 flags; /* MSM_PIPE_x | MSM_SUBMIT_x */
__u32 fence; /* out */
__u32 nr_bos; /* in, number of submit_bo's */
__u32 nr_cmds; /* in, number of submit_cmd's */
diff --git a/include/uapi/linux/magic.h b/include/uapi/linux/magic.h
index cfb5c406f344..0bdfe727d6e0 100644
--- a/include/uapi/linux/magic.h
+++ b/include/uapi/linux/magic.h
@@ -78,5 +78,7 @@
#define BTRFS_TEST_MAGIC 0x73727279
#define NSFS_MAGIC 0x6e736673
#define BPF_FS_MAGIC 0xcafe4a11
+#define BALLOON_KVM_MAGIC 0x13661366
+#define ZSMALLOC_MAGIC 0x58295829
#endif /* __LINUX_MAGIC_H__ */
diff --git a/include/uapi/sound/compress_offload.h b/include/uapi/sound/compress_offload.h
index e04ccf0b6e8b..e050bc758b3b 100644
--- a/include/uapi/sound/compress_offload.h
+++ b/include/uapi/sound/compress_offload.h
@@ -70,7 +70,7 @@ struct snd_compr_tstamp {
__u32 pcm_frames;
__u32 pcm_io_frames;
__u32 sampling_rate;
- uint64_t timestamp;
+ __u64 timestamp;
} __attribute__((packed, aligned(4)));
/**
@@ -128,8 +128,8 @@ struct snd_compr_codec_caps {
* @reserved: reserved for furture use
*/
struct snd_compr_audio_info {
- uint32_t frame_size;
- uint32_t reserved[15];
+ __u32 frame_size;
+ __u32 reserved[15];
} __attribute__((packed, aligned(4)));
/**
diff --git a/kernel/sched/core_ctl.c b/kernel/sched/core_ctl.c
index 133b412eabc7..1e3accddd103 100644
--- a/kernel/sched/core_ctl.c
+++ b/kernel/sched/core_ctl.c
@@ -277,9 +277,6 @@ static ssize_t show_global_state(const struct cluster_data *state, char *buf)
for_each_possible_cpu(cpu) {
c = &per_cpu(cpu_state, cpu);
- if (!c->cluster)
- continue;
-
cluster = c->cluster;
if (!cluster || !cluster->inited)
continue;
@@ -301,6 +298,9 @@ static ssize_t show_global_state(const struct cluster_data *state, char *buf)
count += snprintf(buf + count, PAGE_SIZE - count,
"\tIs busy: %u\n", c->is_busy);
count += snprintf(buf + count, PAGE_SIZE - count,
+ "\tNot preferred: %u\n",
+ c->not_preferred);
+ count += snprintf(buf + count, PAGE_SIZE - count,
"\tNr running: %u\n", cluster->nrrun);
count += snprintf(buf + count, PAGE_SIZE - count,
"\tActive CPUs: %u\n", get_active_cpu_count(cluster));
@@ -323,13 +323,14 @@ static ssize_t store_not_preferred(struct cluster_data *state,
int ret;
ret = sscanf(buf, "%u %u %u %u\n", &val[0], &val[1], &val[2], &val[3]);
- if (ret != 1 && ret != state->num_cpus)
+ if (ret != state->num_cpus)
return -EINVAL;
- i = 0;
spin_lock_irqsave(&state_lock, flags);
- list_for_each_entry(c, &state->lru, sib)
- c->not_preferred = val[i++];
+ for (i = 0; i < state->num_cpus; i++) {
+ c = &per_cpu(cpu_state, i + state->first_cpu);
+ c->not_preferred = val[i];
+ }
spin_unlock_irqrestore(&state_lock, flags);
return count;
@@ -340,11 +341,14 @@ static ssize_t show_not_preferred(const struct cluster_data *state, char *buf)
struct cpu_data *c;
ssize_t count = 0;
unsigned long flags;
+ int i;
spin_lock_irqsave(&state_lock, flags);
- list_for_each_entry(c, &state->lru, sib)
- count += snprintf(buf + count, PAGE_SIZE - count,
- "\tCPU:%d %u\n", c->cpu, c->not_preferred);
+ for (i = 0; i < state->num_cpus; i++) {
+ c = &per_cpu(cpu_state, i + state->first_cpu);
+ count += scnprintf(buf + count, PAGE_SIZE - count,
+ "CPU#%d: %u\n", c->cpu, c->not_preferred);
+ }
spin_unlock_irqrestore(&state_lock, flags);
return count;
diff --git a/mm/balloon_compaction.c b/mm/balloon_compaction.c
index 300117f1a08f..6c563a4846c4 100644
--- a/mm/balloon_compaction.c
+++ b/mm/balloon_compaction.c
@@ -70,7 +70,7 @@ struct page *balloon_page_dequeue(struct balloon_dev_info *b_dev_info)
*/
if (trylock_page(page)) {
#ifdef CONFIG_BALLOON_COMPACTION
- if (!PagePrivate(page)) {
+ if (PageIsolated(page)) {
/* raced with isolation */
unlock_page(page);
continue;
@@ -106,110 +106,50 @@ EXPORT_SYMBOL_GPL(balloon_page_dequeue);
#ifdef CONFIG_BALLOON_COMPACTION
-static inline void __isolate_balloon_page(struct page *page)
+bool balloon_page_isolate(struct page *page, isolate_mode_t mode)
+
{
struct balloon_dev_info *b_dev_info = balloon_page_device(page);
unsigned long flags;
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
- ClearPagePrivate(page);
list_del(&page->lru);
b_dev_info->isolated_pages++;
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
+
+ return true;
}
-static inline void __putback_balloon_page(struct page *page)
+void balloon_page_putback(struct page *page)
{
struct balloon_dev_info *b_dev_info = balloon_page_device(page);
unsigned long flags;
spin_lock_irqsave(&b_dev_info->pages_lock, flags);
- SetPagePrivate(page);
list_add(&page->lru, &b_dev_info->pages);
b_dev_info->isolated_pages--;
spin_unlock_irqrestore(&b_dev_info->pages_lock, flags);
}
-/* __isolate_lru_page() counterpart for a ballooned page */
-bool balloon_page_isolate(struct page *page)
-{
- /*
- * Avoid burning cycles with pages that are yet under __free_pages(),
- * or just got freed under us.
- *
- * In case we 'win' a race for a balloon page being freed under us and
- * raise its refcount preventing __free_pages() from doing its job
- * the put_page() at the end of this block will take care of
- * release this page, thus avoiding a nasty leakage.
- */
- if (likely(get_page_unless_zero(page))) {
- /*
- * As balloon pages are not isolated from LRU lists, concurrent
- * compaction threads can race against page migration functions
- * as well as race against the balloon driver releasing a page.
- *
- * In order to avoid having an already isolated balloon page
- * being (wrongly) re-isolated while it is under migration,
- * or to avoid attempting to isolate pages being released by
- * the balloon driver, lets be sure we have the page lock
- * before proceeding with the balloon page isolation steps.
- */
- if (likely(trylock_page(page))) {
- /*
- * A ballooned page, by default, has PagePrivate set.
- * Prevent concurrent compaction threads from isolating
- * an already isolated balloon page by clearing it.
- */
- if (balloon_page_movable(page)) {
- __isolate_balloon_page(page);
- unlock_page(page);
- return true;
- }
- unlock_page(page);
- }
- put_page(page);
- }
- return false;
-}
-
-/* putback_lru_page() counterpart for a ballooned page */
-void balloon_page_putback(struct page *page)
-{
- /*
- * 'lock_page()' stabilizes the page and prevents races against
- * concurrent isolation threads attempting to re-isolate it.
- */
- lock_page(page);
-
- if (__is_movable_balloon_page(page)) {
- __putback_balloon_page(page);
- /* drop the extra ref count taken for page isolation */
- put_page(page);
- } else {
- WARN_ON(1);
- dump_page(page, "not movable balloon page");
- }
- unlock_page(page);
-}
/* move_to_new_page() counterpart for a ballooned page */
-int balloon_page_migrate(struct page *newpage,
- struct page *page, enum migrate_mode mode)
+int balloon_page_migrate(struct address_space *mapping,
+ struct page *newpage, struct page *page,
+ enum migrate_mode mode)
{
struct balloon_dev_info *balloon = balloon_page_device(page);
- int rc = -EAGAIN;
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
- if (WARN_ON(!__is_movable_balloon_page(page))) {
- dump_page(page, "not movable balloon page");
- return rc;
- }
+ return balloon->migratepage(balloon, newpage, page, mode);
+}
- if (balloon && balloon->migratepage)
- rc = balloon->migratepage(balloon, newpage, page, mode);
+const struct address_space_operations balloon_aops = {
+ .migratepage = balloon_page_migrate,
+ .isolate_page = balloon_page_isolate,
+ .putback_page = balloon_page_putback,
+};
+EXPORT_SYMBOL_GPL(balloon_aops);
- return rc;
-}
#endif /* CONFIG_BALLOON_COMPACTION */
diff --git a/mm/compaction.c b/mm/compaction.c
index b4c33357f96e..87ed50835881 100644
--- a/mm/compaction.c
+++ b/mm/compaction.c
@@ -7,6 +7,7 @@
*
* Copyright IBM Corp. 2007-2010 Mel Gorman <mel@csn.ul.ie>
*/
+#include <linux/cpu.h>
#include <linux/swap.h>
#include <linux/migrate.h>
#include <linux/compaction.h>
@@ -14,9 +15,10 @@
#include <linux/backing-dev.h>
#include <linux/sysctl.h>
#include <linux/sysfs.h>
-#include <linux/balloon_compaction.h>
#include <linux/page-isolation.h>
#include <linux/kasan.h>
+#include <linux/kthread.h>
+#include <linux/freezer.h>
#include "internal.h"
#ifdef CONFIG_COMPACTION
@@ -116,6 +118,44 @@ static struct page *pageblock_pfn_to_page(unsigned long start_pfn,
#ifdef CONFIG_COMPACTION
+int PageMovable(struct page *page)
+{
+ struct address_space *mapping;
+
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ if (!__PageMovable(page))
+ return 0;
+
+ mapping = page_mapping(page);
+ if (mapping && mapping->a_ops && mapping->a_ops->isolate_page)
+ return 1;
+
+ return 0;
+}
+EXPORT_SYMBOL(PageMovable);
+
+void __SetPageMovable(struct page *page, struct address_space *mapping)
+{
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE((unsigned long)mapping & PAGE_MAPPING_MOVABLE, page);
+ page->mapping = (void *)((unsigned long)mapping | PAGE_MAPPING_MOVABLE);
+}
+EXPORT_SYMBOL(__SetPageMovable);
+
+void __ClearPageMovable(struct page *page)
+{
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(!PageMovable(page), page);
+ /*
+ * Clear registered address_space val with keeping PAGE_MAPPING_MOVABLE
+ * flag so that VM can catch up released page by driver after isolation.
+ * With it, VM migration doesn't try to put it back.
+ */
+ page->mapping = (void *)((unsigned long)page->mapping &
+ PAGE_MAPPING_MOVABLE);
+}
+EXPORT_SYMBOL(__ClearPageMovable);
+
/* Do not skip compaction more than 64 times */
#define COMPACT_MAX_DEFER_SHIFT 6
@@ -717,7 +757,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
/* Time to isolate some pages for migration */
for (; low_pfn < end_pfn; low_pfn++) {
- bool is_lru;
/*
* Periodically drop the lock (if held) regardless of its
@@ -758,21 +797,6 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
}
/*
- * Check may be lockless but that's ok as we recheck later.
- * It's possible to migrate LRU pages and balloon pages
- * Skip any other type of page
- */
- is_lru = PageLRU(page);
- if (!is_lru) {
- if (unlikely(balloon_page_movable(page))) {
- if (balloon_page_isolate(page)) {
- /* Successfully isolated */
- goto isolate_success;
- }
- }
- }
-
- /*
* Regardless of being on LRU, compound pages such as THP and
* hugetlbfs are not to be compacted. We can potentially save
* a lot of iterations if we skip them at once. The check is
@@ -788,8 +812,30 @@ isolate_migratepages_block(struct compact_control *cc, unsigned long low_pfn,
continue;
}
- if (!is_lru)
+ /*
+ * Check may be lockless but that's ok as we recheck later.
+ * It's possible to migrate LRU and non-lru movable pages.
+ * Skip any other type of page
+ */
+ if (!PageLRU(page)) {
+ /*
+ * __PageMovable can return false positive so we need
+ * to verify it under page_lock.
+ */
+ if (unlikely(__PageMovable(page)) &&
+ !PageIsolated(page)) {
+ if (locked) {
+ spin_unlock_irqrestore(&zone->lru_lock,
+ flags);
+ locked = false;
+ }
+
+ if (isolate_movable_page(page, isolate_mode))
+ goto isolate_success;
+ }
+
continue;
+ }
/*
* Migration will fail if an anonymous page is pinned in memory,
@@ -1223,11 +1269,11 @@ static int __compact_finished(struct zone *zone, struct compact_control *cc,
/*
* Mark that the PG_migrate_skip information should be cleared
- * by kswapd when it goes to sleep. kswapd does not set the
+ * by kswapd when it goes to sleep. kcompactd does not set the
* flag itself as the decision to be clear should be directly
* based on an allocation request.
*/
- if (!current_is_kswapd())
+ if (cc->direct_compaction)
zone->compact_blockskip_flush = true;
return COMPACT_COMPLETE;
@@ -1370,10 +1416,9 @@ static int compact_zone(struct zone *zone, struct compact_control *cc)
/*
* Clear pageblock skip if there were failures recently and compaction
- * is about to be retried after being deferred. kswapd does not do
- * this reset as it'll reset the cached information when going to sleep.
+ * is about to be retried after being deferred.
*/
- if (compaction_restarting(zone, cc->order) && !current_is_kswapd())
+ if (compaction_restarting(zone, cc->order))
__reset_isolation_suitable(zone);
/*
@@ -1509,6 +1554,7 @@ static unsigned long compact_zone_order(struct zone *zone, int order,
.mode = mode,
.alloc_flags = alloc_flags,
.classzone_idx = classzone_idx,
+ .direct_compaction = true,
};
INIT_LIST_HEAD(&cc.freepages);
INIT_LIST_HEAD(&cc.migratepages);
@@ -1767,4 +1813,225 @@ void compaction_unregister_node(struct node *node)
}
#endif /* CONFIG_SYSFS && CONFIG_NUMA */
+static inline bool kcompactd_work_requested(pg_data_t *pgdat)
+{
+ return pgdat->kcompactd_max_order > 0 || kthread_should_stop();
+}
+
+static bool kcompactd_node_suitable(pg_data_t *pgdat)
+{
+ int zoneid;
+ struct zone *zone;
+ enum zone_type classzone_idx = pgdat->kcompactd_classzone_idx;
+
+ for (zoneid = 0; zoneid <= classzone_idx; zoneid++) {
+ zone = &pgdat->node_zones[zoneid];
+
+ if (!populated_zone(zone))
+ continue;
+
+ if (compaction_suitable(zone, pgdat->kcompactd_max_order, 0,
+ classzone_idx) == COMPACT_CONTINUE)
+ return true;
+ }
+
+ return false;
+}
+
+static void kcompactd_do_work(pg_data_t *pgdat)
+{
+ /*
+ * With no special task, compact all zones so that a page of requested
+ * order is allocatable.
+ */
+ int zoneid;
+ struct zone *zone;
+ struct compact_control cc = {
+ .order = pgdat->kcompactd_max_order,
+ .classzone_idx = pgdat->kcompactd_classzone_idx,
+ .mode = MIGRATE_SYNC_LIGHT,
+ .ignore_skip_hint = true,
+
+ };
+ bool success = false;
+
+ trace_mm_compaction_kcompactd_wake(pgdat->node_id, cc.order,
+ cc.classzone_idx);
+ count_vm_event(KCOMPACTD_WAKE);
+
+ for (zoneid = 0; zoneid <= cc.classzone_idx; zoneid++) {
+ int status;
+
+ zone = &pgdat->node_zones[zoneid];
+ if (!populated_zone(zone))
+ continue;
+
+ if (compaction_deferred(zone, cc.order))
+ continue;
+
+ if (compaction_suitable(zone, cc.order, 0, zoneid) !=
+ COMPACT_CONTINUE)
+ continue;
+
+ cc.nr_freepages = 0;
+ cc.nr_migratepages = 0;
+ cc.zone = zone;
+ INIT_LIST_HEAD(&cc.freepages);
+ INIT_LIST_HEAD(&cc.migratepages);
+
+ if (kthread_should_stop())
+ return;
+ status = compact_zone(zone, &cc);
+
+ if (zone_watermark_ok(zone, cc.order, low_wmark_pages(zone),
+ cc.classzone_idx, 0)) {
+ success = true;
+ compaction_defer_reset(zone, cc.order, false);
+ } else if (status == COMPACT_COMPLETE) {
+ /*
+ * We use sync migration mode here, so we defer like
+ * sync direct compaction does.
+ */
+ defer_compaction(zone, cc.order);
+ }
+
+ VM_BUG_ON(!list_empty(&cc.freepages));
+ VM_BUG_ON(!list_empty(&cc.migratepages));
+ }
+
+ /*
+ * Regardless of success, we are done until woken up next. But remember
+ * the requested order/classzone_idx in case it was higher/tighter than
+ * our current ones
+ */
+ if (pgdat->kcompactd_max_order <= cc.order)
+ pgdat->kcompactd_max_order = 0;
+ if (pgdat->kcompactd_classzone_idx >= cc.classzone_idx)
+ pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
+}
+
+void wakeup_kcompactd(pg_data_t *pgdat, int order, int classzone_idx)
+{
+ if (!order)
+ return;
+
+ if (pgdat->kcompactd_max_order < order)
+ pgdat->kcompactd_max_order = order;
+
+ if (pgdat->kcompactd_classzone_idx > classzone_idx)
+ pgdat->kcompactd_classzone_idx = classzone_idx;
+
+ if (!waitqueue_active(&pgdat->kcompactd_wait))
+ return;
+
+ if (!kcompactd_node_suitable(pgdat))
+ return;
+
+ trace_mm_compaction_wakeup_kcompactd(pgdat->node_id, order,
+ classzone_idx);
+ wake_up_interruptible(&pgdat->kcompactd_wait);
+}
+
+/*
+ * The background compaction daemon, started as a kernel thread
+ * from the init process.
+ */
+static int kcompactd(void *p)
+{
+ pg_data_t *pgdat = (pg_data_t*)p;
+ struct task_struct *tsk = current;
+
+ const struct cpumask *cpumask = cpumask_of_node(pgdat->node_id);
+
+ if (!cpumask_empty(cpumask))
+ set_cpus_allowed_ptr(tsk, cpumask);
+
+ set_freezable();
+
+ pgdat->kcompactd_max_order = 0;
+ pgdat->kcompactd_classzone_idx = pgdat->nr_zones - 1;
+
+ while (!kthread_should_stop()) {
+ trace_mm_compaction_kcompactd_sleep(pgdat->node_id);
+ wait_event_freezable(pgdat->kcompactd_wait,
+ kcompactd_work_requested(pgdat));
+
+ kcompactd_do_work(pgdat);
+ }
+
+ return 0;
+}
+
+/*
+ * This kcompactd start function will be called by init and node-hot-add.
+ * On node-hot-add, kcompactd will moved to proper cpus if cpus are hot-added.
+ */
+int kcompactd_run(int nid)
+{
+ pg_data_t *pgdat = NODE_DATA(nid);
+ int ret = 0;
+
+ if (pgdat->kcompactd)
+ return 0;
+
+ pgdat->kcompactd = kthread_run(kcompactd, pgdat, "kcompactd%d", nid);
+ if (IS_ERR(pgdat->kcompactd)) {
+ pr_err("Failed to start kcompactd on node %d\n", nid);
+ ret = PTR_ERR(pgdat->kcompactd);
+ pgdat->kcompactd = NULL;
+ }
+ return ret;
+}
+
+/*
+ * Called by memory hotplug when all memory in a node is offlined. Caller must
+ * hold mem_hotplug_begin/end().
+ */
+void kcompactd_stop(int nid)
+{
+ struct task_struct *kcompactd = NODE_DATA(nid)->kcompactd;
+
+ if (kcompactd) {
+ kthread_stop(kcompactd);
+ NODE_DATA(nid)->kcompactd = NULL;
+ }
+}
+
+/*
+ * It's optimal to keep kcompactd on the same CPUs as their memory, but
+ * not required for correctness. So if the last cpu in a node goes
+ * away, we get changed to run anywhere: as the first one comes back,
+ * restore their cpu bindings.
+ */
+static int cpu_callback(struct notifier_block *nfb, unsigned long action,
+ void *hcpu)
+{
+ int nid;
+
+ if (action == CPU_ONLINE || action == CPU_ONLINE_FROZEN) {
+ for_each_node_state(nid, N_MEMORY) {
+ pg_data_t *pgdat = NODE_DATA(nid);
+ const struct cpumask *mask;
+
+ mask = cpumask_of_node(pgdat->node_id);
+
+ if (cpumask_any_and(cpu_online_mask, mask) < nr_cpu_ids)
+ /* One of our CPUs online: restore mask */
+ set_cpus_allowed_ptr(pgdat->kcompactd, mask);
+ }
+ }
+ return NOTIFY_OK;
+}
+
+static int __init kcompactd_init(void)
+{
+ int nid;
+
+ for_each_node_state(nid, N_MEMORY)
+ kcompactd_run(nid);
+ hotcpu_notifier(cpu_callback, 0);
+ return 0;
+}
+subsys_initcall(kcompactd_init)
+
#endif /* CONFIG_COMPACTION */
diff --git a/mm/internal.h b/mm/internal.h
index 55d4fa99b486..7b9e313d9dea 100644
--- a/mm/internal.h
+++ b/mm/internal.h
@@ -206,6 +206,7 @@ struct compact_control {
unsigned long last_migrated_pfn;/* Not yet flushed page being freed */
enum migrate_mode mode; /* Async or sync migration mode */
bool ignore_skip_hint; /* Scan blocks even if marked skip */
+ bool direct_compaction; /* False from kcompactd or /proc/... */
int order; /* order a direct compactor needs */
const gfp_t gfp_mask; /* gfp mask of a direct compactor */
const int alloc_flags; /* alloc flags of a direct compactor */
diff --git a/mm/ksm.c b/mm/ksm.c
index 2bdad62ff809..7b59d9723adb 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -558,8 +558,8 @@ static struct page *get_ksm_page(struct stable_node *stable_node, bool lock_it)
void *expected_mapping;
unsigned long kpfn;
- expected_mapping = (void *)stable_node +
- (PAGE_MAPPING_ANON | PAGE_MAPPING_KSM);
+ expected_mapping = (void *)((unsigned long)stable_node |
+ PAGE_MAPPING_KSM);
again:
kpfn = READ_ONCE(stable_node->kpfn);
page = pfn_to_page(kpfn);
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index a042a9d537bb..91f7c074e385 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -32,6 +32,7 @@
#include <linux/hugetlb.h>
#include <linux/memblock.h>
#include <linux/bootmem.h>
+#include <linux/compaction.h>
#include <asm/tlbflush.h>
@@ -1012,7 +1013,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
arg.nr_pages = nr_pages;
node_states_check_changes_online(nr_pages, zone, &arg);
- nid = pfn_to_nid(pfn);
+ nid = zone_to_nid(zone);
ret = memory_notify(MEM_GOING_ONLINE, &arg);
ret = notifier_to_errno(ret);
@@ -1052,7 +1053,7 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
pgdat_resize_unlock(zone->zone_pgdat, &flags);
if (onlined_pages) {
- node_states_set_node(zone_to_nid(zone), &arg);
+ node_states_set_node(nid, &arg);
if (need_zonelists_rebuild)
build_all_zonelists(NULL, NULL);
else
@@ -1063,8 +1064,10 @@ int __ref online_pages(unsigned long pfn, unsigned long nr_pages, int online_typ
init_per_zone_wmark_min();
- if (onlined_pages)
- kswapd_run(zone_to_nid(zone));
+ if (onlined_pages) {
+ kswapd_run(nid);
+ kcompactd_run(nid);
+ }
vm_total_pages = nr_free_pagecache_pages();
@@ -1828,8 +1831,10 @@ repeat:
zone_pcp_update(zone);
node_states_clear_node(node, &arg);
- if (arg.status_change_nid >= 0)
+ if (arg.status_change_nid >= 0) {
kswapd_stop(node);
+ kcompactd_stop(node);
+ }
vm_total_pages = nr_free_pagecache_pages();
writeback_set_ratelimit();
diff --git a/mm/migrate.c b/mm/migrate.c
index a9ce3783361a..9a5ccfc71afc 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -31,6 +31,7 @@
#include <linux/vmalloc.h>
#include <linux/security.h>
#include <linux/backing-dev.h>
+#include <linux/compaction.h>
#include <linux/syscalls.h>
#include <linux/hugetlb.h>
#include <linux/hugetlb_cgroup.h>
@@ -72,6 +73,81 @@ int migrate_prep_local(void)
return 0;
}
+bool isolate_movable_page(struct page *page, isolate_mode_t mode)
+{
+ struct address_space *mapping;
+
+ /*
+ * Avoid burning cycles with pages that are yet under __free_pages(),
+ * or just got freed under us.
+ *
+ * In case we 'win' a race for a movable page being freed under us and
+ * raise its refcount preventing __free_pages() from doing its job
+ * the put_page() at the end of this block will take care of
+ * release this page, thus avoiding a nasty leakage.
+ */
+ if (unlikely(!get_page_unless_zero(page)))
+ goto out;
+
+ /*
+ * Check PageMovable before holding a PG_lock because page's owner
+ * assumes anybody doesn't touch PG_lock of newly allocated page
+ * so unconditionally grapping the lock ruins page's owner side.
+ */
+ if (unlikely(!__PageMovable(page)))
+ goto out_putpage;
+ /*
+ * As movable pages are not isolated from LRU lists, concurrent
+ * compaction threads can race against page migration functions
+ * as well as race against the releasing a page.
+ *
+ * In order to avoid having an already isolated movable page
+ * being (wrongly) re-isolated while it is under migration,
+ * or to avoid attempting to isolate pages being released,
+ * lets be sure we have the page lock
+ * before proceeding with the movable page isolation steps.
+ */
+ if (unlikely(!trylock_page(page)))
+ goto out_putpage;
+
+ if (!PageMovable(page) || PageIsolated(page))
+ goto out_no_isolated;
+
+ mapping = page_mapping(page);
+ VM_BUG_ON_PAGE(!mapping, page);
+
+ if (!mapping->a_ops->isolate_page(page, mode))
+ goto out_no_isolated;
+
+ /* Driver shouldn't use PG_isolated bit of page->flags */
+ WARN_ON_ONCE(PageIsolated(page));
+ __SetPageIsolated(page);
+ unlock_page(page);
+
+ return true;
+
+out_no_isolated:
+ unlock_page(page);
+out_putpage:
+ put_page(page);
+out:
+ return false;
+}
+
+/* It should be called on page which is PG_movable */
+void putback_movable_page(struct page *page)
+{
+ struct address_space *mapping;
+
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ VM_BUG_ON_PAGE(!PageMovable(page), page);
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
+
+ mapping = page_mapping(page);
+ mapping->a_ops->putback_page(page);
+ __ClearPageIsolated(page);
+}
+
/*
* Put previously isolated pages back onto the appropriate lists
* from where they were once taken off for compaction/migration.
@@ -93,10 +169,23 @@ void putback_movable_pages(struct list_head *l)
list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
- if (unlikely(isolated_balloon_page(page)))
- balloon_page_putback(page);
- else
+ /*
+ * We isolated non-lru movable page so here we can use
+ * __PageMovable because LRU page's mapping cannot have
+ * PAGE_MAPPING_MOVABLE.
+ */
+ if (unlikely(__PageMovable(page))) {
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
+ lock_page(page);
+ if (PageMovable(page))
+ putback_movable_page(page);
+ else
+ __ClearPageIsolated(page);
+ unlock_page(page);
+ put_page(page);
+ } else {
putback_lru_page(page);
+ }
}
}
@@ -587,7 +676,7 @@ EXPORT_SYMBOL(migrate_page_copy);
***********************************************************/
/*
- * Common logic to directly migrate a single page suitable for
+ * Common logic to directly migrate a single LRU page suitable for
* pages that do not use PagePrivate/PagePrivate2.
*
* Pages are locked upon entry and exit.
@@ -750,24 +839,47 @@ static int move_to_new_page(struct page *newpage, struct page *page,
enum migrate_mode mode)
{
struct address_space *mapping;
- int rc;
+ int rc = -EAGAIN;
+ bool is_lru = !__PageMovable(page);
VM_BUG_ON_PAGE(!PageLocked(page), page);
VM_BUG_ON_PAGE(!PageLocked(newpage), newpage);
mapping = page_mapping(page);
- if (!mapping)
- rc = migrate_page(mapping, newpage, page, mode);
- else if (mapping->a_ops->migratepage)
+
+ if (likely(is_lru)) {
+ if (!mapping)
+ rc = migrate_page(mapping, newpage, page, mode);
+ else if (mapping->a_ops->migratepage)
+ /*
+ * Most pages have a mapping and most filesystems
+ * provide a migratepage callback. Anonymous pages
+ * are part of swap space which also has its own
+ * migratepage callback. This is the most common path
+ * for page migration.
+ */
+ rc = mapping->a_ops->migratepage(mapping, newpage,
+ page, mode);
+ else
+ rc = fallback_migrate_page(mapping, newpage,
+ page, mode);
+ } else {
/*
- * Most pages have a mapping and most filesystems provide a
- * migratepage callback. Anonymous pages are part of swap
- * space which also has its own migratepage callback. This
- * is the most common path for page migration.
+ * In case of non-lru page, it could be released after
+ * isolation step. In that case, we shouldn't try migration.
*/
- rc = mapping->a_ops->migratepage(mapping, newpage, page, mode);
- else
- rc = fallback_migrate_page(mapping, newpage, page, mode);
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
+ if (!PageMovable(page)) {
+ rc = MIGRATEPAGE_SUCCESS;
+ __ClearPageIsolated(page);
+ goto out;
+ }
+
+ rc = mapping->a_ops->migratepage(mapping, newpage,
+ page, mode);
+ WARN_ON_ONCE(rc == MIGRATEPAGE_SUCCESS &&
+ !PageIsolated(page));
+ }
/*
* When successful, old pagecache page->mapping must be cleared before
@@ -775,9 +887,25 @@ static int move_to_new_page(struct page *newpage, struct page *page,
*/
if (rc == MIGRATEPAGE_SUCCESS) {
set_page_memcg(page, NULL);
- if (!PageAnon(page))
+ if (__PageMovable(page)) {
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
+
+ /*
+ * We clear PG_movable under page_lock so any compactor
+ * cannot try to migrate this page.
+ */
+ __ClearPageIsolated(page);
+ }
+
+ /*
+ * Anonymous and movable page->mapping will be cleard by
+ * free_pages_prepare so don't reset it here for keeping
+ * the type to work PageAnon, for example.
+ */
+ if (!PageMappingFlags(page))
page->mapping = NULL;
}
+out:
return rc;
}
@@ -787,6 +915,7 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
int rc = -EAGAIN;
int page_was_mapped = 0;
struct anon_vma *anon_vma = NULL;
+ bool is_lru = !__PageMovable(page);
if (!trylock_page(page)) {
if (!force || mode == MIGRATE_ASYNC)
@@ -855,15 +984,8 @@ static int __unmap_and_move(struct page *page, struct page *newpage,
if (unlikely(!trylock_page(newpage)))
goto out_unlock;
- if (unlikely(isolated_balloon_page(page))) {
- /*
- * A ballooned page does not need any special attention from
- * physical to virtual reverse mapping procedures.
- * Skip any attempt to unmap PTEs or to remap swap cache,
- * in order to avoid burning cycles at rmap level, and perform
- * the page migration right away (proteced by page lock).
- */
- rc = balloon_page_migrate(newpage, page, mode);
+ if (unlikely(!is_lru)) {
+ rc = move_to_new_page(newpage, page, mode);
goto out_unlock_both;
}
@@ -909,6 +1031,19 @@ out_unlock:
put_anon_vma(anon_vma);
unlock_page(page);
out:
+ /*
+ * If migration is successful, decrease refcount of the newpage
+ * which will not free the page because new page owner increased
+ * refcounter. As well, if it is LRU page, add the page to LRU
+ * list in here.
+ */
+ if (rc == MIGRATEPAGE_SUCCESS) {
+ if (unlikely(__PageMovable(newpage)))
+ put_page(newpage);
+ else
+ putback_lru_page(newpage);
+ }
+
return rc;
}
@@ -942,6 +1077,18 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
if (page_count(page) == 1) {
/* page was freed from under us. So we are done. */
+ ClearPageActive(page);
+ ClearPageUnevictable(page);
+ if (unlikely(__PageMovable(page))) {
+ lock_page(page);
+ if (!PageMovable(page))
+ __ClearPageIsolated(page);
+ unlock_page(page);
+ }
+ if (put_new_page)
+ put_new_page(newpage, private);
+ else
+ put_page(newpage);
goto out;
}
@@ -950,8 +1097,6 @@ static ICE_noinline int unmap_and_move(new_page_t get_new_page,
goto out;
rc = __unmap_and_move(page, newpage, force, mode);
- if (rc == MIGRATEPAGE_SUCCESS)
- put_new_page = NULL;
out:
if (rc != -EAGAIN) {
@@ -964,33 +1109,45 @@ out:
list_del(&page->lru);
dec_zone_page_state(page, NR_ISOLATED_ANON +
page_is_file_cache(page));
- /* Soft-offlined page shouldn't go through lru cache list */
- if (reason == MR_MEMORY_FAILURE && rc == MIGRATEPAGE_SUCCESS) {
+ }
+
+ /*
+ * If migration is successful, releases reference grabbed during
+ * isolation. Otherwise, restore the page to right list unless
+ * we want to retry.
+ */
+ if (rc == MIGRATEPAGE_SUCCESS) {
+ put_page(page);
+ if (reason == MR_MEMORY_FAILURE) {
/*
- * With this release, we free successfully migrated
- * page and set PG_HWPoison on just freed page
- * intentionally. Although it's rather weird, it's how
- * HWPoison flag works at the moment.
+ * Set PG_HWPoison on just freed page
+ * intentionally. Although it's rather weird,
+ * it's how HWPoison flag works at the moment.
*/
- put_page(page);
if (!test_set_page_hwpoison(page))
num_poisoned_pages_inc();
- } else
- putback_lru_page(page);
- }
+ }
+ } else {
+ if (rc != -EAGAIN) {
+ if (likely(!__PageMovable(page))) {
+ putback_lru_page(page);
+ goto put_new;
+ }
- /*
- * If migration was not successful and there's a freeing callback, use
- * it. Otherwise, putback_lru_page() will drop the reference grabbed
- * during isolation.
- */
- if (put_new_page)
- put_new_page(newpage, private);
- else if (unlikely(__is_movable_balloon_page(newpage))) {
- /* drop our reference, page already in the balloon */
- put_page(newpage);
- } else
- putback_lru_page(newpage);
+ lock_page(page);
+ if (PageMovable(page))
+ putback_movable_page(page);
+ else
+ __ClearPageIsolated(page);
+ unlock_page(page);
+ put_page(page);
+ }
+put_new:
+ if (put_new_page)
+ put_new_page(newpage, private);
+ else
+ put_page(newpage);
+ }
if (result) {
if (rc)
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index c42587d02190..af5901c65ba8 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -999,7 +999,7 @@ static bool free_pages_prepare(struct page *page, unsigned int order)
trace_mm_page_free(page, order);
kmemcheck_free_shadow(page, order);
- if (PageAnon(page))
+ if (PageMappingFlags(page))
page->mapping = NULL;
bad += free_pages_check(page);
for (i = 1; i < (1 << order); i++) {
@@ -5319,6 +5319,9 @@ static void __paginginit free_area_init_core(struct pglist_data *pgdat)
#endif
init_waitqueue_head(&pgdat->kswapd_wait);
init_waitqueue_head(&pgdat->pfmemalloc_wait);
+#ifdef CONFIG_COMPACTION
+ init_waitqueue_head(&pgdat->kcompactd_wait);
+#endif
pgdat_page_ext_init(pgdat);
for (j = 0; j < MAX_NR_ZONES; j++) {
diff --git a/mm/util.c b/mm/util.c
index d5259b62f8d7..d41da54e8d83 100644
--- a/mm/util.c
+++ b/mm/util.c
@@ -344,10 +344,12 @@ struct address_space *page_mapping(struct page *page)
}
mapping = (unsigned long)page->mapping;
- if (mapping & PAGE_MAPPING_FLAGS)
+ if ((unsigned long)mapping & PAGE_MAPPING_ANON)
return NULL;
- return page->mapping;
+
+ return (void *)((unsigned long)mapping & ~PAGE_MAPPING_FLAGS);
}
+EXPORT_SYMBOL(page_mapping);
int overcommit_ratio_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
diff --git a/mm/vmscan.c b/mm/vmscan.c
index 03f91ee3c71a..6801adacadb0 100644
--- a/mm/vmscan.c
+++ b/mm/vmscan.c
@@ -1274,7 +1274,7 @@ unsigned long reclaim_clean_pages_from_list(struct zone *zone,
list_for_each_entry_safe(page, next, page_list, lru) {
if (page_is_file_cache(page) && !PageDirty(page) &&
- !isolated_balloon_page(page)) {
+ !__PageMovable(page)) {
ClearPageActive(page);
list_move(&page->lru, &clean_pages);
}
@@ -3070,18 +3070,23 @@ static void age_active_anon(struct zone *zone, struct scan_control *sc)
} while (memcg);
}
-static bool zone_balanced(struct zone *zone, int order,
- unsigned long balance_gap, int classzone_idx)
+static bool zone_balanced(struct zone *zone, int order, bool highorder,
+ unsigned long balance_gap, int classzone_idx)
{
- if (!zone_watermark_ok_safe(zone, order, high_wmark_pages(zone) +
- balance_gap, classzone_idx))
- return false;
+ unsigned long mark = high_wmark_pages(zone) + balance_gap;
- if (IS_ENABLED(CONFIG_COMPACTION) && order && compaction_suitable(zone,
- order, 0, classzone_idx) == COMPACT_SKIPPED)
- return false;
+ /*
+ * When checking from pgdat_balanced(), kswapd should stop and sleep
+ * when it reaches the high order-0 watermark and let kcompactd take
+ * over. Other callers such as wakeup_kswapd() want to determine the
+ * true high-order watermark.
+ */
+ if (IS_ENABLED(CONFIG_COMPACTION) && !highorder) {
+ mark += (1UL << order);
+ order = 0;
+ }
- return true;
+ return zone_watermark_ok_safe(zone, order, mark, classzone_idx);
}
/*
@@ -3131,7 +3136,7 @@ static bool pgdat_balanced(pg_data_t *pgdat, int order, int classzone_idx)
continue;
}
- if (zone_balanced(zone, order, 0, i))
+ if (zone_balanced(zone, order, false, 0, i))
balanced_pages += zone->managed_pages;
else if (!order)
return false;
@@ -3185,10 +3190,8 @@ static bool prepare_kswapd_sleep(pg_data_t *pgdat, int order, long remaining,
*/
static bool kswapd_shrink_zone(struct zone *zone,
int classzone_idx,
- struct scan_control *sc,
- unsigned long *nr_attempted)
+ struct scan_control *sc)
{
- int testorder = sc->order;
unsigned long balance_gap;
bool lowmem_pressure;
@@ -3196,17 +3199,6 @@ static bool kswapd_shrink_zone(struct zone *zone,
sc->nr_to_reclaim = max(SWAP_CLUSTER_MAX, high_wmark_pages(zone));
/*
- * Kswapd reclaims only single pages with compaction enabled. Trying
- * too hard to reclaim until contiguous free pages have become
- * available can hurt performance by evicting too much useful data
- * from memory. Do not reclaim more than needed for compaction.
- */
- if (IS_ENABLED(CONFIG_COMPACTION) && sc->order &&
- compaction_suitable(zone, sc->order, 0, classzone_idx)
- != COMPACT_SKIPPED)
- testorder = 0;
-
- /*
* We put equal pressure on every zone, unless one zone has way too
* many pages free already. The "too many pages" is defined as the
* high wmark plus a "gap" where the gap is either the low
@@ -3220,15 +3212,12 @@ static bool kswapd_shrink_zone(struct zone *zone,
* reclaim is necessary
*/
lowmem_pressure = (buffer_heads_over_limit && is_highmem(zone));
- if (!lowmem_pressure && zone_balanced(zone, testorder,
+ if (!lowmem_pressure && zone_balanced(zone, sc->order, false,
balance_gap, classzone_idx))
return true;
shrink_zone(zone, sc, zone_idx(zone) == classzone_idx);
- /* Account for the number of pages attempted to reclaim */
- *nr_attempted += sc->nr_to_reclaim;
-
clear_bit(ZONE_WRITEBACK, &zone->flags);
/*
@@ -3238,7 +3227,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
* waits.
*/
if (zone_reclaimable(zone) &&
- zone_balanced(zone, testorder, 0, classzone_idx)) {
+ zone_balanced(zone, sc->order, false, 0, classzone_idx)) {
clear_bit(ZONE_CONGESTED, &zone->flags);
clear_bit(ZONE_DIRTY, &zone->flags);
}
@@ -3250,7 +3239,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
* For kswapd, balance_pgdat() will work across all this node's zones until
* they are all at high_wmark_pages(zone).
*
- * Returns the final order kswapd was reclaiming at
+ * Returns the highest zone idx kswapd was reclaiming at
*
* There is special handling here for zones which are full of pinned pages.
* This can happen if the pages are all mlocked, or if they are all used by
@@ -3267,8 +3256,7 @@ static bool kswapd_shrink_zone(struct zone *zone,
* interoperates with the page allocator fallback scheme to ensure that aging
* of pages is balanced across the zones.
*/
-static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
- int *classzone_idx)
+static int balance_pgdat(pg_data_t *pgdat, int order, int classzone_idx)
{
int i;
int end_zone = 0; /* Inclusive. 0 = ZONE_DMA */
@@ -3285,9 +3273,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
count_vm_event(PAGEOUTRUN);
do {
- unsigned long nr_attempted = 0;
bool raise_priority = true;
- bool pgdat_needs_compaction = (order > 0);
sc.nr_reclaimed = 0;
@@ -3322,7 +3308,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
break;
}
- if (!zone_balanced(zone, order, 0, 0)) {
+ if (!zone_balanced(zone, order, false, 0, 0)) {
end_zone = i;
break;
} else {
@@ -3338,24 +3324,6 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
if (i < 0)
goto out;
- for (i = 0; i <= end_zone; i++) {
- struct zone *zone = pgdat->node_zones + i;
-
- if (!populated_zone(zone))
- continue;
-
- /*
- * If any zone is currently balanced then kswapd will
- * not call compaction as it is expected that the
- * necessary pages are already available.
- */
- if (pgdat_needs_compaction &&
- zone_watermark_ok(zone, order,
- low_wmark_pages(zone),
- *classzone_idx, 0))
- pgdat_needs_compaction = false;
- }
-
/*
* If we're getting trouble reclaiming, start doing writepage
* even in laptop mode.
@@ -3399,8 +3367,7 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
* that that high watermark would be met at 100%
* efficiency.
*/
- if (kswapd_shrink_zone(zone, end_zone,
- &sc, &nr_attempted))
+ if (kswapd_shrink_zone(zone, end_zone, &sc))
raise_priority = false;
}
@@ -3413,49 +3380,29 @@ static unsigned long balance_pgdat(pg_data_t *pgdat, int order,
pfmemalloc_watermark_ok(pgdat))
wake_up_all(&pgdat->pfmemalloc_wait);
- /*
- * Fragmentation may mean that the system cannot be rebalanced
- * for high-order allocations in all zones. If twice the
- * allocation size has been reclaimed and the zones are still
- * not balanced then recheck the watermarks at order-0 to
- * prevent kswapd reclaiming excessively. Assume that a
- * process requested a high-order can direct reclaim/compact.
- */
- if (order && sc.nr_reclaimed >= 2UL << order)
- order = sc.order = 0;
-
/* Check if kswapd should be suspending */
if (try_to_freeze() || kthread_should_stop())
break;
/*
- * Compact if necessary and kswapd is reclaiming at least the
- * high watermark number of pages as requsted
- */
- if (pgdat_needs_compaction && sc.nr_reclaimed > nr_attempted)
- compact_pgdat(pgdat, order);
-
- /*
* Raise priority if scanning rate is too low or there was no
* progress in reclaiming pages
*/
if (raise_priority || !sc.nr_reclaimed)
sc.priority--;
} while (sc.priority >= 1 &&
- !pgdat_balanced(pgdat, order, *classzone_idx));
+ !pgdat_balanced(pgdat, order, classzone_idx));
out:
/*
- * Return the order we were reclaiming at so prepare_kswapd_sleep()
- * makes a decision on the order we were last reclaiming at. However,
- * if another caller entered the allocator slow path while kswapd
- * was awake, order will remain at the higher level
+ * Return the highest zone idx we were reclaiming at so
+ * prepare_kswapd_sleep() makes the same decisions as here.
*/
- *classzone_idx = end_zone;
- return order;
+ return end_zone;
}
-static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
+static void kswapd_try_to_sleep(pg_data_t *pgdat, int order,
+ int classzone_idx, int balanced_classzone_idx)
{
long remaining = 0;
DEFINE_WAIT(wait);
@@ -3466,7 +3413,22 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
/* Try to sleep for a short interval */
- if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
+ if (prepare_kswapd_sleep(pgdat, order, remaining,
+ balanced_classzone_idx)) {
+ /*
+ * Compaction records what page blocks it recently failed to
+ * isolate pages from and skips them in the future scanning.
+ * When kswapd is going to sleep, it is reasonable to assume
+ * that pages and compaction may succeed so reset the cache.
+ */
+ reset_isolation_suitable(pgdat);
+
+ /*
+ * We have freed the memory, now we should compact it to make
+ * allocation of the requested order possible.
+ */
+ wakeup_kcompactd(pgdat, order, classzone_idx);
+
remaining = schedule_timeout(HZ/10);
finish_wait(&pgdat->kswapd_wait, &wait);
prepare_to_wait(&pgdat->kswapd_wait, &wait, TASK_INTERRUPTIBLE);
@@ -3476,7 +3438,8 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
* After a short sleep, check if it was a premature sleep. If not, then
* go fully to sleep until explicitly woken up.
*/
- if (prepare_kswapd_sleep(pgdat, order, remaining, classzone_idx)) {
+ if (prepare_kswapd_sleep(pgdat, order, remaining,
+ balanced_classzone_idx)) {
trace_mm_vmscan_kswapd_sleep(pgdat->node_id);
/*
@@ -3489,14 +3452,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
*/
set_pgdat_percpu_threshold(pgdat, calculate_normal_threshold);
- /*
- * Compaction records what page blocks it recently failed to
- * isolate pages from and skips them in the future scanning.
- * When kswapd is going to sleep, it is reasonable to assume
- * that pages and compaction may succeed so reset the cache.
- */
- reset_isolation_suitable(pgdat);
-
if (!kthread_should_stop())
schedule();
@@ -3526,7 +3481,6 @@ static void kswapd_try_to_sleep(pg_data_t *pgdat, int order, int classzone_idx)
static int kswapd(void *p)
{
unsigned long order, new_order;
- unsigned balanced_order;
int classzone_idx, new_classzone_idx;
int balanced_classzone_idx;
pg_data_t *pgdat = (pg_data_t*)p;
@@ -3559,23 +3513,19 @@ static int kswapd(void *p)
set_freezable();
order = new_order = 0;
- balanced_order = 0;
classzone_idx = new_classzone_idx = pgdat->nr_zones - 1;
balanced_classzone_idx = classzone_idx;
for ( ; ; ) {
bool ret;
/*
- * If the last balance_pgdat was unsuccessful it's unlikely a
- * new request of a similar or harder type will succeed soon
- * so consider going to sleep on the basis we reclaimed at
+ * While we were reclaiming, there might have been another
+ * wakeup, so check the values.
*/
- if (balanced_order == new_order) {
- new_order = pgdat->kswapd_max_order;
- new_classzone_idx = pgdat->classzone_idx;
- pgdat->kswapd_max_order = 0;
- pgdat->classzone_idx = pgdat->nr_zones - 1;
- }
+ new_order = pgdat->kswapd_max_order;
+ new_classzone_idx = pgdat->classzone_idx;
+ pgdat->kswapd_max_order = 0;
+ pgdat->classzone_idx = pgdat->nr_zones - 1;
if (order < new_order || classzone_idx > new_classzone_idx) {
/*
@@ -3585,7 +3535,7 @@ static int kswapd(void *p)
order = new_order;
classzone_idx = new_classzone_idx;
} else {
- kswapd_try_to_sleep(pgdat, balanced_order,
+ kswapd_try_to_sleep(pgdat, order, classzone_idx,
balanced_classzone_idx);
order = pgdat->kswapd_max_order;
classzone_idx = pgdat->classzone_idx;
@@ -3605,9 +3555,8 @@ static int kswapd(void *p)
*/
if (!ret) {
trace_mm_vmscan_kswapd_wake(pgdat->node_id, order);
- balanced_classzone_idx = classzone_idx;
- balanced_order = balance_pgdat(pgdat, order,
- &balanced_classzone_idx);
+ balanced_classzone_idx = balance_pgdat(pgdat, order,
+ classzone_idx);
}
}
@@ -3637,7 +3586,7 @@ void wakeup_kswapd(struct zone *zone, int order, enum zone_type classzone_idx)
}
if (!waitqueue_active(&pgdat->kswapd_wait))
return;
- if (zone_balanced(zone, order, 0, 0))
+ if (zone_balanced(zone, order, true, 0, 0))
return;
trace_mm_vmscan_wakeup_kswapd(pgdat->node_id, zone_idx(zone), order);
diff --git a/mm/vmstat.c b/mm/vmstat.c
index 77b8eabd5446..b8f2eda3381d 100644
--- a/mm/vmstat.c
+++ b/mm/vmstat.c
@@ -827,6 +827,7 @@ const char * const vmstat_text[] = {
"compact_stall",
"compact_fail",
"compact_success",
+ "compact_daemon_wake",
#endif
#ifdef CONFIG_HUGETLB_PAGE
diff --git a/mm/zsmalloc.c b/mm/zsmalloc.c
index c1ea19478119..1eb00e343523 100644
--- a/mm/zsmalloc.c
+++ b/mm/zsmalloc.c
@@ -16,32 +16,15 @@
* struct page(s) to form a zspage.
*
* Usage of struct page fields:
- * page->private: points to the first component (0-order) page
- * page->index (union with page->freelist): offset of the first object
- * starting in this page. For the first page, this is
- * always 0, so we use this field (aka freelist) to point
- * to the first free object in zspage.
- * page->lru: links together all component pages (except the first page)
- * of a zspage
- *
- * For _first_ page only:
- *
- * page->private: refers to the component page after the first page
- * If the page is first_page for huge object, it stores handle.
- * Look at size_class->huge.
- * page->freelist: points to the first free object in zspage.
- * Free objects are linked together using in-place
- * metadata.
- * page->objects: maximum number of objects we can store in this
- * zspage (class->zspage_order * PAGE_SIZE / class->size)
- * page->lru: links together first pages of various zspages.
- * Basically forming list of zspages in a fullness group.
- * page->mapping: class index and fullness group of the zspage
- * page->inuse: the number of objects that are used in this zspage
+ * page->private: points to zspage
+ * page->freelist(index): links together all component pages of a zspage
+ * For the huge page, this is always 0, so we use this field
+ * to store handle.
*
* Usage of struct page flags:
* PG_private: identifies the first component page
* PG_private2: identifies the last component page
+ * PG_owner_priv_1: indentifies the huge component page
*
*/
@@ -64,6 +47,11 @@
#include <linux/debugfs.h>
#include <linux/zsmalloc.h>
#include <linux/zpool.h>
+#include <linux/mount.h>
+#include <linux/migrate.h>
+#include <linux/pagemap.h>
+
+#define ZSPAGE_MAGIC 0x58
/*
* This must be power of 2 and greater than of equal to sizeof(link_free).
@@ -86,9 +74,7 @@
* Object location (<PFN>, <obj_idx>) is encoded as
* as single (unsigned long) handle value.
*
- * Note that object index <obj_idx> is relative to system
- * page <PFN> it is stored in, so for each sub-page belonging
- * to a zspage, obj_idx starts with 0.
+ * Note that object index <obj_idx> starts from 0.
*
* This is made more complicated by various memory models and PAE.
*/
@@ -147,33 +133,29 @@
* ZS_MIN_ALLOC_SIZE and ZS_SIZE_CLASS_DELTA must be multiple of ZS_ALIGN
* (reason above)
*/
-#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> 8)
+#define ZS_SIZE_CLASS_DELTA (PAGE_SIZE >> CLASS_BITS)
/*
* We do not maintain any list for completely empty or full pages
*/
enum fullness_group {
- ZS_ALMOST_FULL,
- ZS_ALMOST_EMPTY,
- _ZS_NR_FULLNESS_GROUPS,
-
ZS_EMPTY,
- ZS_FULL
+ ZS_ALMOST_EMPTY,
+ ZS_ALMOST_FULL,
+ ZS_FULL,
+ NR_ZS_FULLNESS,
};
enum zs_stat_type {
+ CLASS_EMPTY,
+ CLASS_ALMOST_EMPTY,
+ CLASS_ALMOST_FULL,
+ CLASS_FULL,
OBJ_ALLOCATED,
OBJ_USED,
- CLASS_ALMOST_FULL,
- CLASS_ALMOST_EMPTY,
+ NR_ZS_STAT_TYPE,
};
-#ifdef CONFIG_ZSMALLOC_STAT
-#define NR_ZS_STAT_TYPE (CLASS_ALMOST_EMPTY + 1)
-#else
-#define NR_ZS_STAT_TYPE (OBJ_USED + 1)
-#endif
-
struct zs_size_stat {
unsigned long objs[NR_ZS_STAT_TYPE];
};
@@ -182,6 +164,10 @@ struct zs_size_stat {
static struct dentry *zs_stat_root;
#endif
+#ifdef CONFIG_COMPACTION
+static struct vfsmount *zsmalloc_mnt;
+#endif
+
/*
* number of size_classes
*/
@@ -205,35 +191,49 @@ static const int fullness_threshold_frac = 4;
struct size_class {
spinlock_t lock;
- struct page *fullness_list[_ZS_NR_FULLNESS_GROUPS];
+ struct list_head fullness_list[NR_ZS_FULLNESS];
/*
* Size of objects stored in this class. Must be multiple
* of ZS_ALIGN.
*/
int size;
- unsigned int index;
-
+ int objs_per_zspage;
/* Number of PAGE_SIZE sized pages to combine to form a 'zspage' */
int pages_per_zspage;
- struct zs_size_stat stats;
- /* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
- bool huge;
+ unsigned int index;
+ struct zs_size_stat stats;
};
+/* huge object: pages_per_zspage == 1 && maxobj_per_zspage == 1 */
+static void SetPageHugeObject(struct page *page)
+{
+ SetPageOwnerPriv1(page);
+}
+
+static void ClearPageHugeObject(struct page *page)
+{
+ ClearPageOwnerPriv1(page);
+}
+
+static int PageHugeObject(struct page *page)
+{
+ return PageOwnerPriv1(page);
+}
+
/*
* Placed within free objects to form a singly linked list.
- * For every zspage, first_page->freelist gives head of this list.
+ * For every zspage, zspage->freeobj gives head of this list.
*
* This must be power of 2 and less than or equal to ZS_ALIGN
*/
struct link_free {
union {
/*
- * Position of next free chunk (encodes <PFN, obj_idx>)
+ * Free object index;
* It's valid for non-allocated object
*/
- void *next;
+ unsigned long next;
/*
* Handle of allocated object.
*/
@@ -246,8 +246,8 @@ struct zs_pool {
struct size_class **size_class;
struct kmem_cache *handle_cachep;
+ struct kmem_cache *zspage_cachep;
- gfp_t flags; /* allocation flags used when growing pool */
atomic_long_t pages_allocated;
struct zs_pool_stats stats;
@@ -262,16 +262,36 @@ struct zs_pool {
#ifdef CONFIG_ZSMALLOC_STAT
struct dentry *stat_dentry;
#endif
+#ifdef CONFIG_COMPACTION
+ struct inode *inode;
+ struct work_struct free_work;
+#endif
};
/*
* A zspage's class index and fullness group
* are encoded in its (first)page->mapping
*/
-#define CLASS_IDX_BITS 28
-#define FULLNESS_BITS 4
-#define CLASS_IDX_MASK ((1 << CLASS_IDX_BITS) - 1)
-#define FULLNESS_MASK ((1 << FULLNESS_BITS) - 1)
+#define FULLNESS_BITS 2
+#define CLASS_BITS 8
+#define ISOLATED_BITS 3
+#define MAGIC_VAL_BITS 8
+
+struct zspage {
+ struct {
+ unsigned int fullness:FULLNESS_BITS;
+ unsigned int class:CLASS_BITS;
+ unsigned int isolated:ISOLATED_BITS;
+ unsigned int magic:MAGIC_VAL_BITS;
+ };
+ unsigned int inuse;
+ unsigned int freeobj;
+ struct page *first_page;
+ struct list_head list; /* fullness list */
+#ifdef CONFIG_COMPACTION
+ rwlock_t lock;
+#endif
+};
struct mapping_area {
#ifdef CONFIG_PGTABLE_MAPPING
@@ -281,32 +301,76 @@ struct mapping_area {
#endif
char *vm_addr; /* address of kmap_atomic()'ed pages */
enum zs_mapmode vm_mm; /* mapping mode */
- bool huge;
};
-static int create_handle_cache(struct zs_pool *pool)
+#ifdef CONFIG_COMPACTION
+static int zs_register_migration(struct zs_pool *pool);
+static void zs_unregister_migration(struct zs_pool *pool);
+static void migrate_lock_init(struct zspage *zspage);
+static void migrate_read_lock(struct zspage *zspage);
+static void migrate_read_unlock(struct zspage *zspage);
+static void kick_deferred_free(struct zs_pool *pool);
+static void init_deferred_free(struct zs_pool *pool);
+static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage);
+#else
+static int zsmalloc_mount(void) { return 0; }
+static void zsmalloc_unmount(void) {}
+static int zs_register_migration(struct zs_pool *pool) { return 0; }
+static void zs_unregister_migration(struct zs_pool *pool) {}
+static void migrate_lock_init(struct zspage *zspage) {}
+static void migrate_read_lock(struct zspage *zspage) {}
+static void migrate_read_unlock(struct zspage *zspage) {}
+static void kick_deferred_free(struct zs_pool *pool) {}
+static void init_deferred_free(struct zs_pool *pool) {}
+static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage) {}
+#endif
+
+static int create_cache(struct zs_pool *pool)
{
pool->handle_cachep = kmem_cache_create("zs_handle", ZS_HANDLE_SIZE,
0, 0, NULL);
- return pool->handle_cachep ? 0 : 1;
+ if (!pool->handle_cachep)
+ return 1;
+
+ pool->zspage_cachep = kmem_cache_create("zspage", sizeof(struct zspage),
+ 0, 0, NULL);
+ if (!pool->zspage_cachep) {
+ kmem_cache_destroy(pool->handle_cachep);
+ pool->handle_cachep = NULL;
+ return 1;
+ }
+
+ return 0;
}
-static void destroy_handle_cache(struct zs_pool *pool)
+static void destroy_cache(struct zs_pool *pool)
{
kmem_cache_destroy(pool->handle_cachep);
+ kmem_cache_destroy(pool->zspage_cachep);
}
-static unsigned long alloc_handle(struct zs_pool *pool)
+static unsigned long cache_alloc_handle(struct zs_pool *pool, gfp_t gfp)
{
return (unsigned long)kmem_cache_alloc(pool->handle_cachep,
- pool->flags & ~__GFP_HIGHMEM);
+ gfp & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
}
-static void free_handle(struct zs_pool *pool, unsigned long handle)
+static void cache_free_handle(struct zs_pool *pool, unsigned long handle)
{
kmem_cache_free(pool->handle_cachep, (void *)handle);
}
+static struct zspage *cache_alloc_zspage(struct zs_pool *pool, gfp_t flags)
+{
+ return kmem_cache_alloc(pool->zspage_cachep,
+ flags & ~(__GFP_HIGHMEM|__GFP_MOVABLE));
+};
+
+static void cache_free_zspage(struct zs_pool *pool, struct zspage *zspage)
+{
+ kmem_cache_free(pool->zspage_cachep, zspage);
+}
+
static void record_obj(unsigned long handle, unsigned long obj)
{
/*
@@ -325,7 +389,12 @@ static void *zs_zpool_create(const char *name, gfp_t gfp,
const struct zpool_ops *zpool_ops,
struct zpool *zpool)
{
- return zs_create_pool(name, gfp);
+ /*
+ * Ignore global gfp flags: zs_malloc() may be invoked from
+ * different contexts and its caller must provide a valid
+ * gfp mask.
+ */
+ return zs_create_pool(name);
}
static void zs_zpool_destroy(void *pool)
@@ -336,7 +405,7 @@ static void zs_zpool_destroy(void *pool)
static int zs_zpool_malloc(void *pool, size_t size, gfp_t gfp,
unsigned long *handle)
{
- *handle = zs_malloc(pool, size);
+ *handle = zs_malloc(pool, size, gfp);
return *handle ? 0 : -1;
}
static void zs_zpool_free(void *pool, unsigned long handle)
@@ -404,36 +473,76 @@ static unsigned int get_maxobj_per_zspage(int size, int pages_per_zspage)
/* per-cpu VM mapping areas for zspage accesses that cross page boundaries */
static DEFINE_PER_CPU(struct mapping_area, zs_map_area);
+static bool is_zspage_isolated(struct zspage *zspage)
+{
+ return zspage->isolated;
+}
+
static int is_first_page(struct page *page)
{
return PagePrivate(page);
}
-static int is_last_page(struct page *page)
+/* Protected by class->lock */
+static inline int get_zspage_inuse(struct zspage *zspage)
+{
+ return zspage->inuse;
+}
+
+static inline void set_zspage_inuse(struct zspage *zspage, int val)
+{
+ zspage->inuse = val;
+}
+
+static inline void mod_zspage_inuse(struct zspage *zspage, int val)
+{
+ zspage->inuse += val;
+}
+
+static inline struct page *get_first_page(struct zspage *zspage)
{
- return PagePrivate2(page);
+ struct page *first_page = zspage->first_page;
+
+ VM_BUG_ON_PAGE(!is_first_page(first_page), first_page);
+ return first_page;
}
-static void get_zspage_mapping(struct page *page, unsigned int *class_idx,
+static inline int get_first_obj_offset(struct page *page)
+{
+ return page->units;
+}
+
+static inline void set_first_obj_offset(struct page *page, int offset)
+{
+ page->units = offset;
+}
+
+static inline unsigned int get_freeobj(struct zspage *zspage)
+{
+ return zspage->freeobj;
+}
+
+static inline void set_freeobj(struct zspage *zspage, unsigned int obj)
+{
+ zspage->freeobj = obj;
+}
+
+static void get_zspage_mapping(struct zspage *zspage,
+ unsigned int *class_idx,
enum fullness_group *fullness)
{
- unsigned long m;
- BUG_ON(!is_first_page(page));
+ BUG_ON(zspage->magic != ZSPAGE_MAGIC);
- m = (unsigned long)page->mapping;
- *fullness = m & FULLNESS_MASK;
- *class_idx = (m >> FULLNESS_BITS) & CLASS_IDX_MASK;
+ *fullness = zspage->fullness;
+ *class_idx = zspage->class;
}
-static void set_zspage_mapping(struct page *page, unsigned int class_idx,
+static void set_zspage_mapping(struct zspage *zspage,
+ unsigned int class_idx,
enum fullness_group fullness)
{
- unsigned long m;
- BUG_ON(!is_first_page(page));
-
- m = ((class_idx & CLASS_IDX_MASK) << FULLNESS_BITS) |
- (fullness & FULLNESS_MASK);
- page->mapping = (struct address_space *)m;
+ zspage->class = class_idx;
+ zspage->fullness = fullness;
}
/*
@@ -457,23 +566,19 @@ static int get_size_class_index(int size)
static inline void zs_stat_inc(struct size_class *class,
enum zs_stat_type type, unsigned long cnt)
{
- if (type < NR_ZS_STAT_TYPE)
- class->stats.objs[type] += cnt;
+ class->stats.objs[type] += cnt;
}
static inline void zs_stat_dec(struct size_class *class,
enum zs_stat_type type, unsigned long cnt)
{
- if (type < NR_ZS_STAT_TYPE)
- class->stats.objs[type] -= cnt;
+ class->stats.objs[type] -= cnt;
}
static inline unsigned long zs_stat_get(struct size_class *class,
enum zs_stat_type type)
{
- if (type < NR_ZS_STAT_TYPE)
- return class->stats.objs[type];
- return 0;
+ return class->stats.objs[type];
}
#ifdef CONFIG_ZSMALLOC_STAT
@@ -495,6 +600,8 @@ static void __exit zs_stat_exit(void)
debugfs_remove_recursive(zs_stat_root);
}
+static unsigned long zs_can_compact(struct size_class *class);
+
static int zs_stats_size_show(struct seq_file *s, void *v)
{
int i;
@@ -502,14 +609,15 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
struct size_class *class;
int objs_per_zspage;
unsigned long class_almost_full, class_almost_empty;
- unsigned long obj_allocated, obj_used, pages_used;
+ unsigned long obj_allocated, obj_used, pages_used, freeable;
unsigned long total_class_almost_full = 0, total_class_almost_empty = 0;
unsigned long total_objs = 0, total_used_objs = 0, total_pages = 0;
+ unsigned long total_freeable = 0;
- seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s\n",
+ seq_printf(s, " %5s %5s %11s %12s %13s %10s %10s %16s %8s\n",
"class", "size", "almost_full", "almost_empty",
"obj_allocated", "obj_used", "pages_used",
- "pages_per_zspage");
+ "pages_per_zspage", "freeable");
for (i = 0; i < zs_size_classes; i++) {
class = pool->size_class[i];
@@ -522,6 +630,7 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
class_almost_empty = zs_stat_get(class, CLASS_ALMOST_EMPTY);
obj_allocated = zs_stat_get(class, OBJ_ALLOCATED);
obj_used = zs_stat_get(class, OBJ_USED);
+ freeable = zs_can_compact(class);
spin_unlock(&class->lock);
objs_per_zspage = get_maxobj_per_zspage(class->size,
@@ -529,23 +638,25 @@ static int zs_stats_size_show(struct seq_file *s, void *v)
pages_used = obj_allocated / objs_per_zspage *
class->pages_per_zspage;
- seq_printf(s, " %5u %5u %11lu %12lu %13lu %10lu %10lu %16d\n",
+ seq_printf(s, " %5u %5u %11lu %12lu %13lu"
+ " %10lu %10lu %16d %8lu\n",
i, class->size, class_almost_full, class_almost_empty,
obj_allocated, obj_used, pages_used,
- class->pages_per_zspage);
+ class->pages_per_zspage, freeable);
total_class_almost_full += class_almost_full;
total_class_almost_empty += class_almost_empty;
total_objs += obj_allocated;
total_used_objs += obj_used;
total_pages += pages_used;
+ total_freeable += freeable;
}
seq_puts(s, "\n");
- seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu\n",
+ seq_printf(s, " %5s %5s %11lu %12lu %13lu %10lu %10lu %16s %8lu\n",
"Total", "", total_class_almost_full,
total_class_almost_empty, total_objs,
- total_used_objs, total_pages);
+ total_used_objs, total_pages, "", total_freeable);
return 0;
}
@@ -562,7 +673,7 @@ static const struct file_operations zs_stat_size_ops = {
.release = single_release,
};
-static int zs_pool_stat_create(const char *name, struct zs_pool *pool)
+static int zs_pool_stat_create(struct zs_pool *pool, const char *name)
{
struct dentry *entry;
@@ -602,7 +713,7 @@ static void __exit zs_stat_exit(void)
{
}
-static inline int zs_pool_stat_create(const char *name, struct zs_pool *pool)
+static inline int zs_pool_stat_create(struct zs_pool *pool, const char *name)
{
return 0;
}
@@ -620,20 +731,20 @@ static inline void zs_pool_stat_destroy(struct zs_pool *pool)
* the pool (not yet implemented). This function returns fullness
* status of the given page.
*/
-static enum fullness_group get_fullness_group(struct page *page)
+static enum fullness_group get_fullness_group(struct size_class *class,
+ struct zspage *zspage)
{
- int inuse, max_objects;
+ int inuse, objs_per_zspage;
enum fullness_group fg;
- BUG_ON(!is_first_page(page));
- inuse = page->inuse;
- max_objects = page->objects;
+ inuse = get_zspage_inuse(zspage);
+ objs_per_zspage = class->objs_per_zspage;
if (inuse == 0)
fg = ZS_EMPTY;
- else if (inuse == max_objects)
+ else if (inuse == objs_per_zspage)
fg = ZS_FULL;
- else if (inuse <= 3 * max_objects / fullness_threshold_frac)
+ else if (inuse <= 3 * objs_per_zspage / fullness_threshold_frac)
fg = ZS_ALMOST_EMPTY;
else
fg = ZS_ALMOST_FULL;
@@ -647,59 +758,41 @@ static enum fullness_group get_fullness_group(struct page *page)
* have. This functions inserts the given zspage into the freelist
* identified by <class, fullness_group>.
*/
-static void insert_zspage(struct page *page, struct size_class *class,
+static void insert_zspage(struct size_class *class,
+ struct zspage *zspage,
enum fullness_group fullness)
{
- struct page **head;
-
- BUG_ON(!is_first_page(page));
-
- if (fullness >= _ZS_NR_FULLNESS_GROUPS)
- return;
-
- zs_stat_inc(class, fullness == ZS_ALMOST_EMPTY ?
- CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1);
-
- head = &class->fullness_list[fullness];
- if (!*head) {
- *head = page;
- return;
- }
+ struct zspage *head;
+ zs_stat_inc(class, fullness, 1);
+ head = list_first_entry_or_null(&class->fullness_list[fullness],
+ struct zspage, list);
/*
- * We want to see more ZS_FULL pages and less almost
- * empty/full. Put pages with higher ->inuse first.
+ * We want to see more ZS_FULL pages and less almost empty/full.
+ * Put pages with higher ->inuse first.
*/
- list_add_tail(&page->lru, &(*head)->lru);
- if (page->inuse >= (*head)->inuse)
- *head = page;
+ if (head) {
+ if (get_zspage_inuse(zspage) < get_zspage_inuse(head)) {
+ list_add(&zspage->list, &head->list);
+ return;
+ }
+ }
+ list_add(&zspage->list, &class->fullness_list[fullness]);
}
/*
* This function removes the given zspage from the freelist identified
* by <class, fullness_group>.
*/
-static void remove_zspage(struct page *page, struct size_class *class,
+static void remove_zspage(struct size_class *class,
+ struct zspage *zspage,
enum fullness_group fullness)
{
- struct page **head;
-
- BUG_ON(!is_first_page(page));
-
- if (fullness >= _ZS_NR_FULLNESS_GROUPS)
- return;
-
- head = &class->fullness_list[fullness];
- BUG_ON(!*head);
- if (list_empty(&(*head)->lru))
- *head = NULL;
- else if (*head == page)
- *head = (struct page *)list_entry((*head)->lru.next,
- struct page, lru);
+ VM_BUG_ON(list_empty(&class->fullness_list[fullness]));
+ VM_BUG_ON(is_zspage_isolated(zspage));
- list_del_init(&page->lru);
- zs_stat_dec(class, fullness == ZS_ALMOST_EMPTY ?
- CLASS_ALMOST_EMPTY : CLASS_ALMOST_FULL, 1);
+ list_del_init(&zspage->list);
+ zs_stat_dec(class, fullness, 1);
}
/*
@@ -712,21 +805,22 @@ static void remove_zspage(struct page *page, struct size_class *class,
* fullness group.
*/
static enum fullness_group fix_fullness_group(struct size_class *class,
- struct page *page)
+ struct zspage *zspage)
{
int class_idx;
enum fullness_group currfg, newfg;
- BUG_ON(!is_first_page(page));
-
- get_zspage_mapping(page, &class_idx, &currfg);
- newfg = get_fullness_group(page);
+ get_zspage_mapping(zspage, &class_idx, &currfg);
+ newfg = get_fullness_group(class, zspage);
if (newfg == currfg)
goto out;
- remove_zspage(page, class, currfg);
- insert_zspage(page, class, newfg);
- set_zspage_mapping(page, class_idx, newfg);
+ if (!is_zspage_isolated(zspage)) {
+ remove_zspage(class, zspage, currfg);
+ insert_zspage(class, zspage, newfg);
+ }
+
+ set_zspage_mapping(zspage, class_idx, newfg);
out:
return newfg;
@@ -768,64 +862,49 @@ static int get_pages_per_zspage(int class_size)
return max_usedpc_order;
}
-/*
- * A single 'zspage' is composed of many system pages which are
- * linked together using fields in struct page. This function finds
- * the first/head page, given any component page of a zspage.
- */
-static struct page *get_first_page(struct page *page)
+static struct zspage *get_zspage(struct page *page)
{
- if (is_first_page(page))
- return page;
- else
- return (struct page *)page_private(page);
+ struct zspage *zspage = (struct zspage *)page->private;
+
+ BUG_ON(zspage->magic != ZSPAGE_MAGIC);
+ return zspage;
}
static struct page *get_next_page(struct page *page)
{
- struct page *next;
+ if (unlikely(PageHugeObject(page)))
+ return NULL;
- if (is_last_page(page))
- next = NULL;
- else if (is_first_page(page))
- next = (struct page *)page_private(page);
- else
- next = list_entry(page->lru.next, struct page, lru);
+ return page->freelist;
+}
- return next;
+/**
+ * obj_to_location - get (<page>, <obj_idx>) from encoded object value
+ * @page: page object resides in zspage
+ * @obj_idx: object index
+ */
+static void obj_to_location(unsigned long obj, struct page **page,
+ unsigned int *obj_idx)
+{
+ obj >>= OBJ_TAG_BITS;
+ *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
+ *obj_idx = (obj & OBJ_INDEX_MASK);
}
-/*
- * Encode <page, obj_idx> as a single handle value.
- * We use the least bit of handle for tagging.
+/**
+ * location_to_obj - get obj value encoded from (<page>, <obj_idx>)
+ * @page: page object resides in zspage
+ * @obj_idx: object index
*/
-static void *location_to_obj(struct page *page, unsigned long obj_idx)
+static unsigned long location_to_obj(struct page *page, unsigned int obj_idx)
{
unsigned long obj;
- if (!page) {
- BUG_ON(obj_idx);
- return NULL;
- }
-
obj = page_to_pfn(page) << OBJ_INDEX_BITS;
- obj |= ((obj_idx) & OBJ_INDEX_MASK);
+ obj |= obj_idx & OBJ_INDEX_MASK;
obj <<= OBJ_TAG_BITS;
- return (void *)obj;
-}
-
-/*
- * Decode <page, obj_idx> pair from the given object handle. We adjust the
- * decoded obj_idx back to its original value since it was adjusted in
- * location_to_obj().
- */
-static void obj_to_location(unsigned long obj, struct page **page,
- unsigned long *obj_idx)
-{
- obj >>= OBJ_TAG_BITS;
- *page = pfn_to_page(obj >> OBJ_INDEX_BITS);
- *obj_idx = (obj & OBJ_INDEX_MASK);
+ return obj;
}
static unsigned long handle_to_obj(unsigned long handle)
@@ -833,108 +912,146 @@ static unsigned long handle_to_obj(unsigned long handle)
return *(unsigned long *)handle;
}
-static unsigned long obj_to_head(struct size_class *class, struct page *page,
- void *obj)
+static unsigned long obj_to_head(struct page *page, void *obj)
{
- if (class->huge) {
- VM_BUG_ON(!is_first_page(page));
- return page_private(page);
+ if (unlikely(PageHugeObject(page))) {
+ VM_BUG_ON_PAGE(!is_first_page(page), page);
+ return page->index;
} else
return *(unsigned long *)obj;
}
-static unsigned long obj_idx_to_offset(struct page *page,
- unsigned long obj_idx, int class_size)
+static inline int testpin_tag(unsigned long handle)
{
- unsigned long off = 0;
-
- if (!is_first_page(page))
- off = page->index;
-
- return off + obj_idx * class_size;
+ return bit_spin_is_locked(HANDLE_PIN_BIT, (unsigned long *)handle);
}
static inline int trypin_tag(unsigned long handle)
{
- unsigned long *ptr = (unsigned long *)handle;
-
- return !test_and_set_bit_lock(HANDLE_PIN_BIT, ptr);
+ return bit_spin_trylock(HANDLE_PIN_BIT, (unsigned long *)handle);
}
static void pin_tag(unsigned long handle)
{
- while (!trypin_tag(handle));
+ bit_spin_lock(HANDLE_PIN_BIT, (unsigned long *)handle);
}
static void unpin_tag(unsigned long handle)
{
- unsigned long *ptr = (unsigned long *)handle;
-
- clear_bit_unlock(HANDLE_PIN_BIT, ptr);
+ bit_spin_unlock(HANDLE_PIN_BIT, (unsigned long *)handle);
}
static void reset_page(struct page *page)
{
+ __ClearPageMovable(page);
clear_bit(PG_private, &page->flags);
clear_bit(PG_private_2, &page->flags);
set_page_private(page, 0);
- page->mapping = NULL;
- page->freelist = NULL;
page_mapcount_reset(page);
+ ClearPageHugeObject(page);
+ page->freelist = NULL;
}
-static void free_zspage(struct page *first_page)
+/*
+ * To prevent zspage destroy during migration, zspage freeing should
+ * hold locks of all pages in the zspage.
+ */
+void lock_zspage(struct zspage *zspage)
{
- struct page *nextp, *tmp, *head_extra;
+ struct page *page = get_first_page(zspage);
- BUG_ON(!is_first_page(first_page));
- BUG_ON(first_page->inuse);
+ do {
+ lock_page(page);
+ } while ((page = get_next_page(page)) != NULL);
+}
- head_extra = (struct page *)page_private(first_page);
+int trylock_zspage(struct zspage *zspage)
+{
+ struct page *cursor, *fail;
- reset_page(first_page);
- __free_page(first_page);
+ for (cursor = get_first_page(zspage); cursor != NULL; cursor =
+ get_next_page(cursor)) {
+ if (!trylock_page(cursor)) {
+ fail = cursor;
+ goto unlock;
+ }
+ }
- /* zspage with only 1 system page */
- if (!head_extra)
- return;
+ return 1;
+unlock:
+ for (cursor = get_first_page(zspage); cursor != fail; cursor =
+ get_next_page(cursor))
+ unlock_page(cursor);
+
+ return 0;
+}
+
+static void __free_zspage(struct zs_pool *pool, struct size_class *class,
+ struct zspage *zspage)
+{
+ struct page *page, *next;
+ enum fullness_group fg;
+ unsigned int class_idx;
+
+ get_zspage_mapping(zspage, &class_idx, &fg);
+
+ assert_spin_locked(&class->lock);
+
+ VM_BUG_ON(get_zspage_inuse(zspage));
+ VM_BUG_ON(fg != ZS_EMPTY);
+
+ next = page = get_first_page(zspage);
+ do {
+ VM_BUG_ON_PAGE(!PageLocked(page), page);
+ next = get_next_page(page);
+ reset_page(page);
+ unlock_page(page);
+ put_page(page);
+ page = next;
+ } while (page != NULL);
- list_for_each_entry_safe(nextp, tmp, &head_extra->lru, lru) {
- list_del(&nextp->lru);
- reset_page(nextp);
- __free_page(nextp);
+ cache_free_zspage(pool, zspage);
+
+ zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
+ class->size, class->pages_per_zspage));
+ atomic_long_sub(class->pages_per_zspage,
+ &pool->pages_allocated);
+}
+
+static void free_zspage(struct zs_pool *pool, struct size_class *class,
+ struct zspage *zspage)
+{
+ VM_BUG_ON(get_zspage_inuse(zspage));
+ VM_BUG_ON(list_empty(&zspage->list));
+
+ if (!trylock_zspage(zspage)) {
+ kick_deferred_free(pool);
+ return;
}
- reset_page(head_extra);
- __free_page(head_extra);
+
+ remove_zspage(class, zspage, ZS_EMPTY);
+ __free_zspage(pool, class, zspage);
}
/* Initialize a newly allocated zspage */
-static void init_zspage(struct page *first_page, struct size_class *class)
+static void init_zspage(struct size_class *class, struct zspage *zspage)
{
+ unsigned int freeobj = 1;
unsigned long off = 0;
- struct page *page = first_page;
+ struct page *page = get_first_page(zspage);
- BUG_ON(!is_first_page(first_page));
while (page) {
struct page *next_page;
struct link_free *link;
- unsigned int i = 1;
void *vaddr;
- /*
- * page->index stores offset of first object starting
- * in the page. For the first page, this is always 0,
- * so we use first_page->index (aka ->freelist) to store
- * head of corresponding zspage's freelist.
- */
- if (page != first_page)
- page->index = off;
+ set_first_obj_offset(page, off);
vaddr = kmap_atomic(page);
link = (struct link_free *)vaddr + off / sizeof(*link);
while ((off += class->size) < PAGE_SIZE) {
- link->next = location_to_obj(page, i++);
+ link->next = freeobj++ << OBJ_ALLOCATED_TAG;
link += class->size / sizeof(*link);
}
@@ -944,87 +1061,108 @@ static void init_zspage(struct page *first_page, struct size_class *class)
* page (if present)
*/
next_page = get_next_page(page);
- link->next = location_to_obj(next_page, 0);
+ if (next_page) {
+ link->next = freeobj++ << OBJ_ALLOCATED_TAG;
+ } else {
+ /*
+ * Reset OBJ_ALLOCATED_TAG bit to last link to tell
+ * whether it's allocated object or not.
+ */
+ link->next = -1 << OBJ_ALLOCATED_TAG;
+ }
kunmap_atomic(vaddr);
page = next_page;
off %= PAGE_SIZE;
}
+
+ set_freeobj(zspage, 0);
}
-/*
- * Allocate a zspage for the given size class
- */
-static struct page *alloc_zspage(struct size_class *class, gfp_t flags)
+static void create_page_chain(struct size_class *class, struct zspage *zspage,
+ struct page *pages[])
{
- int i, error;
- struct page *first_page = NULL, *uninitialized_var(prev_page);
+ int i;
+ struct page *page;
+ struct page *prev_page = NULL;
+ int nr_pages = class->pages_per_zspage;
/*
* Allocate individual pages and link them together as:
- * 1. first page->private = first sub-page
- * 2. all sub-pages are linked together using page->lru
- * 3. each sub-page is linked to the first page using page->private
+ * 1. all pages are linked together using page->freelist
+ * 2. each sub-page point to zspage using page->private
*
- * For each size class, First/Head pages are linked together using
- * page->lru. Also, we set PG_private to identify the first page
- * (i.e. no other sub-page has this flag set) and PG_private_2 to
- * identify the last page.
+ * we set PG_private to identify the first page (i.e. no other sub-page
+ * has this flag set) and PG_private_2 to identify the last page.
*/
- error = -ENOMEM;
- for (i = 0; i < class->pages_per_zspage; i++) {
- struct page *page;
-
- page = alloc_page(flags);
- if (!page)
- goto cleanup;
-
- INIT_LIST_HEAD(&page->lru);
- if (i == 0) { /* first page */
+ for (i = 0; i < nr_pages; i++) {
+ page = pages[i];
+ set_page_private(page, (unsigned long)zspage);
+ page->freelist = NULL;
+ if (i == 0) {
+ zspage->first_page = page;
SetPagePrivate(page);
- set_page_private(page, 0);
- first_page = page;
- first_page->inuse = 0;
+ if (unlikely(class->objs_per_zspage == 1 &&
+ class->pages_per_zspage == 1))
+ SetPageHugeObject(page);
+ } else {
+ prev_page->freelist = page;
}
- if (i == 1)
- set_page_private(first_page, (unsigned long)page);
- if (i >= 1)
- set_page_private(page, (unsigned long)first_page);
- if (i >= 2)
- list_add(&page->lru, &prev_page->lru);
- if (i == class->pages_per_zspage - 1) /* last page */
+ if (i == nr_pages - 1)
SetPagePrivate2(page);
prev_page = page;
}
+}
- init_zspage(first_page, class);
+/*
+ * Allocate a zspage for the given size class
+ */
+static struct zspage *alloc_zspage(struct zs_pool *pool,
+ struct size_class *class,
+ gfp_t gfp)
+{
+ int i;
+ struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE];
+ struct zspage *zspage = cache_alloc_zspage(pool, gfp);
- first_page->freelist = location_to_obj(first_page, 0);
- /* Maximum number of objects we can store in this zspage */
- first_page->objects = class->pages_per_zspage * PAGE_SIZE / class->size;
+ if (!zspage)
+ return NULL;
- error = 0; /* Success */
+ memset(zspage, 0, sizeof(struct zspage));
+ zspage->magic = ZSPAGE_MAGIC;
+ migrate_lock_init(zspage);
-cleanup:
- if (unlikely(error) && first_page) {
- free_zspage(first_page);
- first_page = NULL;
+ for (i = 0; i < class->pages_per_zspage; i++) {
+ struct page *page;
+
+ page = alloc_page(gfp);
+ if (!page) {
+ while (--i >= 0)
+ __free_page(pages[i]);
+ cache_free_zspage(pool, zspage);
+ return NULL;
+ }
+ pages[i] = page;
}
- return first_page;
+ create_page_chain(class, zspage, pages);
+ init_zspage(class, zspage);
+
+ return zspage;
}
-static struct page *find_get_zspage(struct size_class *class)
+static struct zspage *find_get_zspage(struct size_class *class)
{
int i;
- struct page *page;
+ struct zspage *zspage;
- for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
- page = class->fullness_list[i];
- if (page)
+ for (i = ZS_ALMOST_FULL; i >= ZS_EMPTY; i--) {
+ zspage = list_first_entry_or_null(&class->fullness_list[i],
+ struct zspage, list);
+ if (zspage)
break;
}
- return page;
+ return zspage;
}
#ifdef CONFIG_PGTABLE_MAPPING
@@ -1127,11 +1265,9 @@ static void __zs_unmap_object(struct mapping_area *area,
goto out;
buf = area->vm_buf;
- if (!area->huge) {
- buf = buf + ZS_HANDLE_SIZE;
- size -= ZS_HANDLE_SIZE;
- off += ZS_HANDLE_SIZE;
- }
+ buf = buf + ZS_HANDLE_SIZE;
+ size -= ZS_HANDLE_SIZE;
+ off += ZS_HANDLE_SIZE;
sizes[0] = PAGE_SIZE - off;
sizes[1] = size - sizes[0];
@@ -1231,11 +1367,9 @@ static bool can_merge(struct size_class *prev, int size, int pages_per_zspage)
return true;
}
-static bool zspage_full(struct page *page)
+static bool zspage_full(struct size_class *class, struct zspage *zspage)
{
- BUG_ON(!is_first_page(page));
-
- return page->inuse == page->objects;
+ return get_zspage_inuse(zspage) == class->objs_per_zspage;
}
unsigned long zs_get_total_pages(struct zs_pool *pool)
@@ -1261,8 +1395,10 @@ EXPORT_SYMBOL_GPL(zs_get_total_pages);
void *zs_map_object(struct zs_pool *pool, unsigned long handle,
enum zs_mapmode mm)
{
+ struct zspage *zspage;
struct page *page;
- unsigned long obj, obj_idx, off;
+ unsigned long obj, off;
+ unsigned int obj_idx;
unsigned int class_idx;
enum fullness_group fg;
@@ -1271,23 +1407,26 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
struct page *pages[2];
void *ret;
- BUG_ON(!handle);
-
/*
* Because we use per-cpu mapping areas shared among the
* pools/users, we can't allow mapping in interrupt context
* because it can corrupt another users mappings.
*/
- BUG_ON(in_interrupt());
+ WARN_ON_ONCE(in_interrupt());
/* From now on, migration cannot move the object */
pin_tag(handle);
obj = handle_to_obj(handle);
obj_to_location(obj, &page, &obj_idx);
- get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ zspage = get_zspage(page);
+
+ /* migration cannot move any subpage in this zspage */
+ migrate_read_lock(zspage);
+
+ get_zspage_mapping(zspage, &class_idx, &fg);
class = pool->size_class[class_idx];
- off = obj_idx_to_offset(page, obj_idx, class->size);
+ off = (class->size * obj_idx) & ~PAGE_MASK;
area = &get_cpu_var(zs_map_area);
area->vm_mm = mm;
@@ -1305,7 +1444,7 @@ void *zs_map_object(struct zs_pool *pool, unsigned long handle,
ret = __zs_map_object(area, pages, off, class->size);
out:
- if (!class->huge)
+ if (likely(!PageHugeObject(page)))
ret += ZS_HANDLE_SIZE;
return ret;
@@ -1314,21 +1453,22 @@ EXPORT_SYMBOL_GPL(zs_map_object);
void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
{
+ struct zspage *zspage;
struct page *page;
- unsigned long obj, obj_idx, off;
+ unsigned long obj, off;
+ unsigned int obj_idx;
unsigned int class_idx;
enum fullness_group fg;
struct size_class *class;
struct mapping_area *area;
- BUG_ON(!handle);
-
obj = handle_to_obj(handle);
obj_to_location(obj, &page, &obj_idx);
- get_zspage_mapping(get_first_page(page), &class_idx, &fg);
+ zspage = get_zspage(page);
+ get_zspage_mapping(zspage, &class_idx, &fg);
class = pool->size_class[class_idx];
- off = obj_idx_to_offset(page, obj_idx, class->size);
+ off = (class->size * obj_idx) & ~PAGE_MASK;
area = this_cpu_ptr(&zs_map_area);
if (off + class->size <= PAGE_SIZE)
@@ -1343,38 +1483,50 @@ void zs_unmap_object(struct zs_pool *pool, unsigned long handle)
__zs_unmap_object(area, pages, off, class->size);
}
put_cpu_var(zs_map_area);
+
+ migrate_read_unlock(zspage);
unpin_tag(handle);
}
EXPORT_SYMBOL_GPL(zs_unmap_object);
-static unsigned long obj_malloc(struct page *first_page,
- struct size_class *class, unsigned long handle)
+static unsigned long obj_malloc(struct size_class *class,
+ struct zspage *zspage, unsigned long handle)
{
+ int i, nr_page, offset;
unsigned long obj;
struct link_free *link;
struct page *m_page;
- unsigned long m_objidx, m_offset;
+ unsigned long m_offset;
void *vaddr;
handle |= OBJ_ALLOCATED_TAG;
- obj = (unsigned long)first_page->freelist;
- obj_to_location(obj, &m_page, &m_objidx);
- m_offset = obj_idx_to_offset(m_page, m_objidx, class->size);
+ obj = get_freeobj(zspage);
+
+ offset = obj * class->size;
+ nr_page = offset >> PAGE_SHIFT;
+ m_offset = offset & ~PAGE_MASK;
+ m_page = get_first_page(zspage);
+
+ for (i = 0; i < nr_page; i++)
+ m_page = get_next_page(m_page);
vaddr = kmap_atomic(m_page);
link = (struct link_free *)vaddr + m_offset / sizeof(*link);
- first_page->freelist = link->next;
- if (!class->huge)
+ set_freeobj(zspage, link->next >> OBJ_ALLOCATED_TAG);
+ if (likely(!PageHugeObject(m_page)))
/* record handle in the header of allocated chunk */
link->handle = handle;
else
- /* record handle in first_page->private */
- set_page_private(first_page, handle);
+ /* record handle to page->index */
+ zspage->first_page->index = handle;
+
kunmap_atomic(vaddr);
- first_page->inuse++;
+ mod_zspage_inuse(zspage, 1);
zs_stat_inc(class, OBJ_USED, 1);
+ obj = location_to_obj(m_page, obj);
+
return obj;
}
@@ -1388,16 +1540,17 @@ static unsigned long obj_malloc(struct page *first_page,
* otherwise 0.
* Allocation requests with size > ZS_MAX_ALLOC_SIZE will fail.
*/
-unsigned long zs_malloc(struct zs_pool *pool, size_t size)
+unsigned long zs_malloc(struct zs_pool *pool, size_t size, gfp_t gfp)
{
unsigned long handle, obj;
struct size_class *class;
- struct page *first_page;
+ enum fullness_group newfg;
+ struct zspage *zspage;
if (unlikely(!size || size > ZS_MAX_ALLOC_SIZE))
return 0;
- handle = alloc_handle(pool);
+ handle = cache_alloc_handle(pool, gfp);
if (!handle)
return 0;
@@ -1406,71 +1559,79 @@ unsigned long zs_malloc(struct zs_pool *pool, size_t size)
class = pool->size_class[get_size_class_index(size)];
spin_lock(&class->lock);
- first_page = find_get_zspage(class);
-
- if (!first_page) {
+ zspage = find_get_zspage(class);
+ if (likely(zspage)) {
+ obj = obj_malloc(class, zspage, handle);
+ /* Now move the zspage to another fullness group, if required */
+ fix_fullness_group(class, zspage);
+ record_obj(handle, obj);
spin_unlock(&class->lock);
- first_page = alloc_zspage(class, pool->flags);
- if (unlikely(!first_page)) {
- free_handle(pool, handle);
- return 0;
- }
- set_zspage_mapping(first_page, class->index, ZS_EMPTY);
- atomic_long_add(class->pages_per_zspage,
- &pool->pages_allocated);
+ return handle;
+ }
- spin_lock(&class->lock);
- zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
- class->size, class->pages_per_zspage));
+ spin_unlock(&class->lock);
+
+ zspage = alloc_zspage(pool, class, gfp);
+ if (!zspage) {
+ cache_free_handle(pool, handle);
+ return 0;
}
- obj = obj_malloc(first_page, class, handle);
- /* Now move the zspage to another fullness group, if required */
- fix_fullness_group(class, first_page);
+ spin_lock(&class->lock);
+ obj = obj_malloc(class, zspage, handle);
+ newfg = get_fullness_group(class, zspage);
+ insert_zspage(class, zspage, newfg);
+ set_zspage_mapping(zspage, class->index, newfg);
record_obj(handle, obj);
+ atomic_long_add(class->pages_per_zspage,
+ &pool->pages_allocated);
+ zs_stat_inc(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
+ class->size, class->pages_per_zspage));
+
+ /* We completely set up zspage so mark them as movable */
+ SetZsPageMovable(pool, zspage);
spin_unlock(&class->lock);
return handle;
}
EXPORT_SYMBOL_GPL(zs_malloc);
-static void obj_free(struct zs_pool *pool, struct size_class *class,
- unsigned long obj)
+static void obj_free(struct size_class *class, unsigned long obj)
{
struct link_free *link;
- struct page *first_page, *f_page;
- unsigned long f_objidx, f_offset;
+ struct zspage *zspage;
+ struct page *f_page;
+ unsigned long f_offset;
+ unsigned int f_objidx;
void *vaddr;
- BUG_ON(!obj);
-
obj &= ~OBJ_ALLOCATED_TAG;
obj_to_location(obj, &f_page, &f_objidx);
- first_page = get_first_page(f_page);
-
- f_offset = obj_idx_to_offset(f_page, f_objidx, class->size);
+ f_offset = (class->size * f_objidx) & ~PAGE_MASK;
+ zspage = get_zspage(f_page);
vaddr = kmap_atomic(f_page);
/* Insert this object in containing zspage's freelist */
link = (struct link_free *)(vaddr + f_offset);
- link->next = first_page->freelist;
- if (class->huge)
- set_page_private(first_page, 0);
+ link->next = get_freeobj(zspage) << OBJ_ALLOCATED_TAG;
kunmap_atomic(vaddr);
- first_page->freelist = (void *)obj;
- first_page->inuse--;
+ set_freeobj(zspage, f_objidx);
+ mod_zspage_inuse(zspage, -1);
zs_stat_dec(class, OBJ_USED, 1);
}
void zs_free(struct zs_pool *pool, unsigned long handle)
{
- struct page *first_page, *f_page;
- unsigned long obj, f_objidx;
+ struct zspage *zspage;
+ struct page *f_page;
+ unsigned long obj;
+ unsigned int f_objidx;
int class_idx;
struct size_class *class;
enum fullness_group fullness;
+ bool isolated;
if (unlikely(!handle))
return;
@@ -1478,33 +1639,39 @@ void zs_free(struct zs_pool *pool, unsigned long handle)
pin_tag(handle);
obj = handle_to_obj(handle);
obj_to_location(obj, &f_page, &f_objidx);
- first_page = get_first_page(f_page);
+ zspage = get_zspage(f_page);
+
+ migrate_read_lock(zspage);
- get_zspage_mapping(first_page, &class_idx, &fullness);
+ get_zspage_mapping(zspage, &class_idx, &fullness);
class = pool->size_class[class_idx];
spin_lock(&class->lock);
- obj_free(pool, class, obj);
- fullness = fix_fullness_group(class, first_page);
- if (fullness == ZS_EMPTY) {
- zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
- class->size, class->pages_per_zspage));
- atomic_long_sub(class->pages_per_zspage,
- &pool->pages_allocated);
- free_zspage(first_page);
+ obj_free(class, obj);
+ fullness = fix_fullness_group(class, zspage);
+ if (fullness != ZS_EMPTY) {
+ migrate_read_unlock(zspage);
+ goto out;
}
+
+ isolated = is_zspage_isolated(zspage);
+ migrate_read_unlock(zspage);
+ /* If zspage is isolated, zs_page_putback will free the zspage */
+ if (likely(!isolated))
+ free_zspage(pool, class, zspage);
+out:
+
spin_unlock(&class->lock);
unpin_tag(handle);
-
- free_handle(pool, handle);
+ cache_free_handle(pool, handle);
}
EXPORT_SYMBOL_GPL(zs_free);
-static void zs_object_copy(unsigned long dst, unsigned long src,
- struct size_class *class)
+static void zs_object_copy(struct size_class *class, unsigned long dst,
+ unsigned long src)
{
struct page *s_page, *d_page;
- unsigned long s_objidx, d_objidx;
+ unsigned int s_objidx, d_objidx;
unsigned long s_off, d_off;
void *s_addr, *d_addr;
int s_size, d_size, size;
@@ -1515,8 +1682,8 @@ static void zs_object_copy(unsigned long dst, unsigned long src,
obj_to_location(src, &s_page, &s_objidx);
obj_to_location(dst, &d_page, &d_objidx);
- s_off = obj_idx_to_offset(s_page, s_objidx, class->size);
- d_off = obj_idx_to_offset(d_page, d_objidx, class->size);
+ s_off = (class->size * s_objidx) & ~PAGE_MASK;
+ d_off = (class->size * d_objidx) & ~PAGE_MASK;
if (s_off + class->size > PAGE_SIZE)
s_size = PAGE_SIZE - s_off;
@@ -1544,7 +1711,6 @@ static void zs_object_copy(unsigned long dst, unsigned long src,
kunmap_atomic(d_addr);
kunmap_atomic(s_addr);
s_page = get_next_page(s_page);
- BUG_ON(!s_page);
s_addr = kmap_atomic(s_page);
d_addr = kmap_atomic(d_page);
s_size = class->size - written;
@@ -1554,7 +1720,6 @@ static void zs_object_copy(unsigned long dst, unsigned long src,
if (d_off >= PAGE_SIZE) {
kunmap_atomic(d_addr);
d_page = get_next_page(d_page);
- BUG_ON(!d_page);
d_addr = kmap_atomic(d_page);
d_size = class->size - written;
d_off = 0;
@@ -1569,20 +1734,19 @@ static void zs_object_copy(unsigned long dst, unsigned long src,
* Find alloced object in zspage from index object and
* return handle.
*/
-static unsigned long find_alloced_obj(struct page *page, int index,
- struct size_class *class)
+static unsigned long find_alloced_obj(struct size_class *class,
+ struct page *page, int index)
{
unsigned long head;
int offset = 0;
unsigned long handle = 0;
void *addr = kmap_atomic(page);
- if (!is_first_page(page))
- offset = page->index;
+ offset = get_first_obj_offset(page);
offset += class->size * index;
while (offset < PAGE_SIZE) {
- head = obj_to_head(class, page, addr + offset);
+ head = obj_to_head(page, addr + offset);
if (head & OBJ_ALLOCATED_TAG) {
handle = head & ~OBJ_ALLOCATED_TAG;
if (trypin_tag(handle))
@@ -1599,7 +1763,7 @@ static unsigned long find_alloced_obj(struct page *page, int index,
}
struct zs_compact_control {
- /* Source page for migration which could be a subpage of zspage. */
+ /* Source spage for migration which could be a subpage of zspage */
struct page *s_page;
/* Destination page for migration which should be a first page
* of zspage. */
@@ -1620,7 +1784,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
int ret = 0;
while (1) {
- handle = find_alloced_obj(s_page, index, class);
+ handle = find_alloced_obj(class, s_page, index);
if (!handle) {
s_page = get_next_page(s_page);
if (!s_page)
@@ -1630,15 +1794,15 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
}
/* Stop if there is no more space */
- if (zspage_full(d_page)) {
+ if (zspage_full(class, get_zspage(d_page))) {
unpin_tag(handle);
ret = -ENOMEM;
break;
}
used_obj = handle_to_obj(handle);
- free_obj = obj_malloc(d_page, class, handle);
- zs_object_copy(free_obj, used_obj, class);
+ free_obj = obj_malloc(class, get_zspage(d_page), handle);
+ zs_object_copy(class, free_obj, used_obj);
index++;
/*
* record_obj updates handle's value to free_obj and it will
@@ -1649,7 +1813,7 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
free_obj |= BIT(HANDLE_PIN_BIT);
record_obj(handle, free_obj);
unpin_tag(handle);
- obj_free(pool, class, used_obj);
+ obj_free(class, used_obj);
}
/* Remember last position in this iteration */
@@ -1659,71 +1823,423 @@ static int migrate_zspage(struct zs_pool *pool, struct size_class *class,
return ret;
}
-static struct page *isolate_target_page(struct size_class *class)
+static struct zspage *isolate_zspage(struct size_class *class, bool source)
{
int i;
- struct page *page;
+ struct zspage *zspage;
+ enum fullness_group fg[2] = {ZS_ALMOST_EMPTY, ZS_ALMOST_FULL};
- for (i = 0; i < _ZS_NR_FULLNESS_GROUPS; i++) {
- page = class->fullness_list[i];
- if (page) {
- remove_zspage(page, class, i);
- break;
+ if (!source) {
+ fg[0] = ZS_ALMOST_FULL;
+ fg[1] = ZS_ALMOST_EMPTY;
+ }
+
+ for (i = 0; i < 2; i++) {
+ zspage = list_first_entry_or_null(&class->fullness_list[fg[i]],
+ struct zspage, list);
+ if (zspage) {
+ VM_BUG_ON(is_zspage_isolated(zspage));
+ remove_zspage(class, zspage, fg[i]);
+ return zspage;
}
}
- return page;
+ return zspage;
}
/*
- * putback_zspage - add @first_page into right class's fullness list
- * @pool: target pool
+ * putback_zspage - add @zspage into right class's fullness list
* @class: destination class
- * @first_page: target page
+ * @zspage: target page
*
- * Return @fist_page's fullness_group
+ * Return @zspage's fullness_group
*/
-static enum fullness_group putback_zspage(struct zs_pool *pool,
- struct size_class *class,
- struct page *first_page)
+static enum fullness_group putback_zspage(struct size_class *class,
+ struct zspage *zspage)
{
enum fullness_group fullness;
- BUG_ON(!is_first_page(first_page));
+ VM_BUG_ON(is_zspage_isolated(zspage));
- fullness = get_fullness_group(first_page);
- insert_zspage(first_page, class, fullness);
- set_zspage_mapping(first_page, class->index, fullness);
+ fullness = get_fullness_group(class, zspage);
+ insert_zspage(class, zspage, fullness);
+ set_zspage_mapping(zspage, class->index, fullness);
- if (fullness == ZS_EMPTY) {
- zs_stat_dec(class, OBJ_ALLOCATED, get_maxobj_per_zspage(
- class->size, class->pages_per_zspage));
- atomic_long_sub(class->pages_per_zspage,
- &pool->pages_allocated);
+ return fullness;
+}
+
+#ifdef CONFIG_COMPACTION
+static struct dentry *zs_mount(struct file_system_type *fs_type,
+ int flags, const char *dev_name, void *data)
+{
+ static const struct dentry_operations ops = {
+ .d_dname = simple_dname,
+ };
+
+ return mount_pseudo(fs_type, "zsmalloc:", NULL, &ops, ZSMALLOC_MAGIC);
+}
+
+static struct file_system_type zsmalloc_fs = {
+ .name = "zsmalloc",
+ .mount = zs_mount,
+ .kill_sb = kill_anon_super,
+};
+
+static int zsmalloc_mount(void)
+{
+ int ret = 0;
+
+ zsmalloc_mnt = kern_mount(&zsmalloc_fs);
+ if (IS_ERR(zsmalloc_mnt))
+ ret = PTR_ERR(zsmalloc_mnt);
+
+ return ret;
+}
+
+static void zsmalloc_unmount(void)
+{
+ kern_unmount(zsmalloc_mnt);
+}
+
+static void migrate_lock_init(struct zspage *zspage)
+{
+ rwlock_init(&zspage->lock);
+}
+
+static void migrate_read_lock(struct zspage *zspage)
+{
+ read_lock(&zspage->lock);
+}
- free_zspage(first_page);
+static void migrate_read_unlock(struct zspage *zspage)
+{
+ read_unlock(&zspage->lock);
+}
+
+static void migrate_write_lock(struct zspage *zspage)
+{
+ write_lock(&zspage->lock);
+}
+
+static void migrate_write_unlock(struct zspage *zspage)
+{
+ write_unlock(&zspage->lock);
+}
+
+/* Number of isolated subpage for *page migration* in this zspage */
+static void inc_zspage_isolation(struct zspage *zspage)
+{
+ zspage->isolated++;
+}
+
+static void dec_zspage_isolation(struct zspage *zspage)
+{
+ zspage->isolated--;
+}
+
+static void replace_sub_page(struct size_class *class, struct zspage *zspage,
+ struct page *newpage, struct page *oldpage)
+{
+ struct page *page;
+ struct page *pages[ZS_MAX_PAGES_PER_ZSPAGE] = {NULL, };
+ int idx = 0;
+
+ page = get_first_page(zspage);
+ do {
+ if (page == oldpage)
+ pages[idx] = newpage;
+ else
+ pages[idx] = page;
+ idx++;
+ } while ((page = get_next_page(page)) != NULL);
+
+ create_page_chain(class, zspage, pages);
+ set_first_obj_offset(newpage, get_first_obj_offset(oldpage));
+ if (unlikely(PageHugeObject(oldpage)))
+ newpage->index = oldpage->index;
+ __SetPageMovable(newpage, page_mapping(oldpage));
+}
+
+bool zs_page_isolate(struct page *page, isolate_mode_t mode)
+{
+ struct zs_pool *pool;
+ struct size_class *class;
+ int class_idx;
+ enum fullness_group fullness;
+ struct zspage *zspage;
+ struct address_space *mapping;
+
+ /*
+ * Page is locked so zspage couldn't be destroyed. For detail, look at
+ * lock_zspage in free_zspage.
+ */
+ VM_BUG_ON_PAGE(!PageMovable(page), page);
+ VM_BUG_ON_PAGE(PageIsolated(page), page);
+
+ zspage = get_zspage(page);
+
+ /*
+ * Without class lock, fullness could be stale while class_idx is okay
+ * because class_idx is constant unless page is freed so we should get
+ * fullness again under class lock.
+ */
+ get_zspage_mapping(zspage, &class_idx, &fullness);
+ mapping = page_mapping(page);
+ pool = mapping->private_data;
+ class = pool->size_class[class_idx];
+
+ spin_lock(&class->lock);
+ if (get_zspage_inuse(zspage) == 0) {
+ spin_unlock(&class->lock);
+ return false;
}
- return fullness;
+ /* zspage is isolated for object migration */
+ if (list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
+ spin_unlock(&class->lock);
+ return false;
+ }
+
+ /*
+ * If this is first time isolation for the zspage, isolate zspage from
+ * size_class to prevent further object allocation from the zspage.
+ */
+ if (!list_empty(&zspage->list) && !is_zspage_isolated(zspage)) {
+ get_zspage_mapping(zspage, &class_idx, &fullness);
+ remove_zspage(class, zspage, fullness);
+ }
+
+ inc_zspage_isolation(zspage);
+ spin_unlock(&class->lock);
+
+ return true;
}
-static struct page *isolate_source_page(struct size_class *class)
+int zs_page_migrate(struct address_space *mapping, struct page *newpage,
+ struct page *page, enum migrate_mode mode)
+{
+ struct zs_pool *pool;
+ struct size_class *class;
+ int class_idx;
+ enum fullness_group fullness;
+ struct zspage *zspage;
+ struct page *dummy;
+ void *s_addr, *d_addr, *addr;
+ int offset, pos;
+ unsigned long handle, head;
+ unsigned long old_obj, new_obj;
+ unsigned int obj_idx;
+ int ret = -EAGAIN;
+
+ VM_BUG_ON_PAGE(!PageMovable(page), page);
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
+
+ zspage = get_zspage(page);
+
+ /* Concurrent compactor cannot migrate any subpage in zspage */
+ migrate_write_lock(zspage);
+ get_zspage_mapping(zspage, &class_idx, &fullness);
+ pool = mapping->private_data;
+ class = pool->size_class[class_idx];
+ offset = get_first_obj_offset(page);
+
+ spin_lock(&class->lock);
+ if (!get_zspage_inuse(zspage)) {
+ ret = -EBUSY;
+ goto unlock_class;
+ }
+
+ pos = offset;
+ s_addr = kmap_atomic(page);
+ while (pos < PAGE_SIZE) {
+ head = obj_to_head(page, s_addr + pos);
+ if (head & OBJ_ALLOCATED_TAG) {
+ handle = head & ~OBJ_ALLOCATED_TAG;
+ if (!trypin_tag(handle))
+ goto unpin_objects;
+ }
+ pos += class->size;
+ }
+
+ /*
+ * Here, any user cannot access all objects in the zspage so let's move.
+ */
+ d_addr = kmap_atomic(newpage);
+ memcpy(d_addr, s_addr, PAGE_SIZE);
+ kunmap_atomic(d_addr);
+
+ for (addr = s_addr + offset; addr < s_addr + pos;
+ addr += class->size) {
+ head = obj_to_head(page, addr);
+ if (head & OBJ_ALLOCATED_TAG) {
+ handle = head & ~OBJ_ALLOCATED_TAG;
+ if (!testpin_tag(handle))
+ BUG();
+
+ old_obj = handle_to_obj(handle);
+ obj_to_location(old_obj, &dummy, &obj_idx);
+ new_obj = (unsigned long)location_to_obj(newpage,
+ obj_idx);
+ new_obj |= BIT(HANDLE_PIN_BIT);
+ record_obj(handle, new_obj);
+ }
+ }
+
+ replace_sub_page(class, zspage, newpage, page);
+ get_page(newpage);
+
+ dec_zspage_isolation(zspage);
+
+ /*
+ * Page migration is done so let's putback isolated zspage to
+ * the list if @page is final isolated subpage in the zspage.
+ */
+ if (!is_zspage_isolated(zspage))
+ putback_zspage(class, zspage);
+
+ reset_page(page);
+ put_page(page);
+ page = newpage;
+
+ ret = MIGRATEPAGE_SUCCESS;
+unpin_objects:
+ for (addr = s_addr + offset; addr < s_addr + pos;
+ addr += class->size) {
+ head = obj_to_head(page, addr);
+ if (head & OBJ_ALLOCATED_TAG) {
+ handle = head & ~OBJ_ALLOCATED_TAG;
+ if (!testpin_tag(handle))
+ BUG();
+ unpin_tag(handle);
+ }
+ }
+ kunmap_atomic(s_addr);
+unlock_class:
+ spin_unlock(&class->lock);
+ migrate_write_unlock(zspage);
+
+ return ret;
+}
+
+void zs_page_putback(struct page *page)
+{
+ struct zs_pool *pool;
+ struct size_class *class;
+ int class_idx;
+ enum fullness_group fg;
+ struct address_space *mapping;
+ struct zspage *zspage;
+
+ VM_BUG_ON_PAGE(!PageMovable(page), page);
+ VM_BUG_ON_PAGE(!PageIsolated(page), page);
+
+ zspage = get_zspage(page);
+ get_zspage_mapping(zspage, &class_idx, &fg);
+ mapping = page_mapping(page);
+ pool = mapping->private_data;
+ class = pool->size_class[class_idx];
+
+ spin_lock(&class->lock);
+ dec_zspage_isolation(zspage);
+ if (!is_zspage_isolated(zspage)) {
+ fg = putback_zspage(class, zspage);
+ /*
+ * Due to page_lock, we cannot free zspage immediately
+ * so let's defer.
+ */
+ if (fg == ZS_EMPTY)
+ schedule_work(&pool->free_work);
+ }
+ spin_unlock(&class->lock);
+}
+
+const struct address_space_operations zsmalloc_aops = {
+ .isolate_page = zs_page_isolate,
+ .migratepage = zs_page_migrate,
+ .putback_page = zs_page_putback,
+};
+
+static int zs_register_migration(struct zs_pool *pool)
+{
+ pool->inode = alloc_anon_inode(zsmalloc_mnt->mnt_sb);
+ if (IS_ERR(pool->inode)) {
+ pool->inode = NULL;
+ return 1;
+ }
+
+ pool->inode->i_mapping->private_data = pool;
+ pool->inode->i_mapping->a_ops = &zsmalloc_aops;
+ return 0;
+}
+
+static void zs_unregister_migration(struct zs_pool *pool)
+{
+ flush_work(&pool->free_work);
+ if (pool->inode)
+ iput(pool->inode);
+}
+
+/*
+ * Caller should hold page_lock of all pages in the zspage
+ * In here, we cannot use zspage meta data.
+ */
+static void async_free_zspage(struct work_struct *work)
{
int i;
- struct page *page = NULL;
+ struct size_class *class;
+ unsigned int class_idx;
+ enum fullness_group fullness;
+ struct zspage *zspage, *tmp;
+ LIST_HEAD(free_pages);
+ struct zs_pool *pool = container_of(work, struct zs_pool,
+ free_work);
- for (i = ZS_ALMOST_EMPTY; i >= ZS_ALMOST_FULL; i--) {
- page = class->fullness_list[i];
- if (!page)
+ for (i = 0; i < zs_size_classes; i++) {
+ class = pool->size_class[i];
+ if (class->index != i)
continue;
- remove_zspage(page, class, i);
- break;
+ spin_lock(&class->lock);
+ list_splice_init(&class->fullness_list[ZS_EMPTY], &free_pages);
+ spin_unlock(&class->lock);
}
- return page;
+
+ list_for_each_entry_safe(zspage, tmp, &free_pages, list) {
+ list_del(&zspage->list);
+ lock_zspage(zspage);
+
+ get_zspage_mapping(zspage, &class_idx, &fullness);
+ VM_BUG_ON(fullness != ZS_EMPTY);
+ class = pool->size_class[class_idx];
+ spin_lock(&class->lock);
+ __free_zspage(pool, pool->size_class[class_idx], zspage);
+ spin_unlock(&class->lock);
+ }
+};
+
+static void kick_deferred_free(struct zs_pool *pool)
+{
+ schedule_work(&pool->free_work);
+}
+
+static void init_deferred_free(struct zs_pool *pool)
+{
+ INIT_WORK(&pool->free_work, async_free_zspage);
}
+static void SetZsPageMovable(struct zs_pool *pool, struct zspage *zspage)
+{
+ struct page *page = get_first_page(zspage);
+
+ do {
+ WARN_ON(!trylock_page(page));
+ __SetPageMovable(page, pool->inode->i_mapping);
+ unlock_page(page);
+ } while ((page = get_next_page(page)) != NULL);
+}
+#endif
+
/*
*
* Based on the number of unused allocated objects calculate
@@ -1748,22 +2264,20 @@ static unsigned long zs_can_compact(struct size_class *class)
static void __zs_compact(struct zs_pool *pool, struct size_class *class)
{
struct zs_compact_control cc;
- struct page *src_page;
- struct page *dst_page = NULL;
+ struct zspage *src_zspage;
+ struct zspage *dst_zspage = NULL;
spin_lock(&class->lock);
- while ((src_page = isolate_source_page(class))) {
-
- BUG_ON(!is_first_page(src_page));
+ while ((src_zspage = isolate_zspage(class, true))) {
if (!zs_can_compact(class))
break;
cc.index = 0;
- cc.s_page = src_page;
+ cc.s_page = get_first_page(src_zspage);
- while ((dst_page = isolate_target_page(class))) {
- cc.d_page = dst_page;
+ while ((dst_zspage = isolate_zspage(class, false))) {
+ cc.d_page = get_first_page(dst_zspage);
/*
* If there is no more space in dst_page, resched
* and see if anyone had allocated another zspage.
@@ -1771,23 +2285,25 @@ static void __zs_compact(struct zs_pool *pool, struct size_class *class)
if (!migrate_zspage(pool, class, &cc))
break;
- putback_zspage(pool, class, dst_page);
+ putback_zspage(class, dst_zspage);
}
/* Stop if we couldn't find slot */
- if (dst_page == NULL)
+ if (dst_zspage == NULL)
break;
- putback_zspage(pool, class, dst_page);
- if (putback_zspage(pool, class, src_page) == ZS_EMPTY)
+ putback_zspage(class, dst_zspage);
+ if (putback_zspage(class, src_zspage) == ZS_EMPTY) {
+ free_zspage(pool, class, src_zspage);
pool->stats.pages_compacted += class->pages_per_zspage;
+ }
spin_unlock(&class->lock);
cond_resched();
spin_lock(&class->lock);
}
- if (src_page)
- putback_zspage(pool, class, src_page);
+ if (src_zspage)
+ putback_zspage(class, src_zspage);
spin_unlock(&class->lock);
}
@@ -1884,7 +2400,7 @@ static int zs_register_shrinker(struct zs_pool *pool)
* On success, a pointer to the newly created pool is returned,
* otherwise NULL.
*/
-struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
+struct zs_pool *zs_create_pool(const char *name)
{
int i;
struct zs_pool *pool;
@@ -1894,6 +2410,7 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
if (!pool)
return NULL;
+ init_deferred_free(pool);
pool->size_class = kcalloc(zs_size_classes, sizeof(struct size_class *),
GFP_KERNEL);
if (!pool->size_class) {
@@ -1905,7 +2422,7 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
if (!pool->name)
goto err;
- if (create_handle_cache(pool))
+ if (create_cache(pool))
goto err;
/*
@@ -1916,6 +2433,7 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
int size;
int pages_per_zspage;
struct size_class *class;
+ int fullness = 0;
size = ZS_MIN_ALLOC_SIZE + i * ZS_SIZE_CLASS_DELTA;
if (size > ZS_MAX_ALLOC_SIZE)
@@ -1945,18 +2463,21 @@ struct zs_pool *zs_create_pool(const char *name, gfp_t flags)
class->size = size;
class->index = i;
class->pages_per_zspage = pages_per_zspage;
- if (pages_per_zspage == 1 &&
- get_maxobj_per_zspage(size, pages_per_zspage) == 1)
- class->huge = true;
+ class->objs_per_zspage = class->pages_per_zspage *
+ PAGE_SIZE / class->size;
spin_lock_init(&class->lock);
pool->size_class[i] = class;
+ for (fullness = ZS_EMPTY; fullness < NR_ZS_FULLNESS;
+ fullness++)
+ INIT_LIST_HEAD(&class->fullness_list[fullness]);
prev_class = class;
}
- pool->flags = flags;
+ if (zs_pool_stat_create(pool, name))
+ goto err;
- if (zs_pool_stat_create(name, pool))
+ if (zs_register_migration(pool))
goto err;
/*
@@ -1978,6 +2499,7 @@ void zs_destroy_pool(struct zs_pool *pool)
int i;
zs_unregister_shrinker(pool);
+ zs_unregister_migration(pool);
zs_pool_stat_destroy(pool);
for (i = 0; i < zs_size_classes; i++) {
@@ -1990,8 +2512,8 @@ void zs_destroy_pool(struct zs_pool *pool)
if (class->index != i)
continue;
- for (fg = 0; fg < _ZS_NR_FULLNESS_GROUPS; fg++) {
- if (class->fullness_list[fg]) {
+ for (fg = ZS_EMPTY; fg < NR_ZS_FULLNESS; fg++) {
+ if (!list_empty(&class->fullness_list[fg])) {
pr_info("Freeing non-empty class with size %db, fullness group %d\n",
class->size, fg);
}
@@ -1999,7 +2521,7 @@ void zs_destroy_pool(struct zs_pool *pool)
kfree(class);
}
- destroy_handle_cache(pool);
+ destroy_cache(pool);
kfree(pool->size_class);
kfree(pool->name);
kfree(pool);
@@ -2008,7 +2530,13 @@ EXPORT_SYMBOL_GPL(zs_destroy_pool);
static int __init zs_init(void)
{
- int ret = zs_register_cpu_notifier();
+ int ret;
+
+ ret = zsmalloc_mount();
+ if (ret)
+ goto out;
+
+ ret = zs_register_cpu_notifier();
if (ret)
goto notifier_fail;
@@ -2032,7 +2560,8 @@ stat_fail:
#endif
notifier_fail:
zs_unregister_cpu_notifier();
-
+ zsmalloc_unmount();
+out:
return ret;
}
@@ -2041,6 +2570,7 @@ static void __exit zs_exit(void)
#ifdef CONFIG_ZPOOL
zpool_unregister_driver(&zs_zpool_driver);
#endif
+ zsmalloc_unmount();
zs_unregister_cpu_notifier();
zs_stat_exit();
diff --git a/scripts/checkpatch.pl b/scripts/checkpatch.pl
index 17a3a6d1785c..1dcd4ed37103 100755
--- a/scripts/checkpatch.pl
+++ b/scripts/checkpatch.pl
@@ -60,6 +60,8 @@ my $spelling_file = "$D/spelling.txt";
my $codespell = 0;
my $codespellfile = "/usr/share/codespell/dictionary.txt";
my $color = 1;
+my $qca_sign_off = 0;
+my $codeaurora_sign_off = 0;
sub help {
my ($exitcode) = @_;
@@ -2429,10 +2431,16 @@ sub process {
"email address '$email' might be better as '$suggested_email$comment'\n" . $herecurr);
}
}
- if ($chk_author && $line =~ /^\s*signed-off-by:.*(quicinc|qualcomm)\.com/i) {
- WARN("BAD_SIGN_OFF",
- "invalid Signed-off-by identity\n" . $line );
- }
+ if ($chk_author) {
+ if ($line =~ /^\s*signed-off-by:.*qca\.qualcomm\.com/i) {
+ $qca_sign_off = 1;
+ } elsif ($line =~ /^\s*signed-off-by:.*codeaurora\.org/i) {
+ $codeaurora_sign_off = 1;
+ } elsif ($line =~ /^\s*signed-off-by:.*(quicinc|qualcomm)\.com/i) {
+ WARN("BAD_SIGN_OFF",
+ "invalid Signed-off-by identity\n" . $line );
+ }
+ }
# Check for duplicate signatures
my $sig_nospace = $line;
@@ -2558,7 +2566,8 @@ sub process {
}
#check the patch for invalid author credentials
- if ($chk_author && $line =~ /^From:.*(quicinc|qualcomm)\.com/) {
+ if ($chk_author && !($line =~ /^From:.*qca\.qualcomm\.com/) &&
+ $line =~ /^From:.*(quicinc|qualcomm)\.com/) {
WARN("BAD_AUTHOR", "invalid author identity\n" . $line );
}
@@ -6042,6 +6051,11 @@ sub process {
}
}
+ if ($chk_author && $qca_sign_off && !$codeaurora_sign_off) {
+ WARN("BAD_SIGN_OFF",
+ "QCA Signed-off-by requires CODEAURORA Signed-off-by\n" . $line );
+ }
+
# If we have no input at all, then there is nothing to report on
# so just keep quiet.
if ($#rawlines == -1) {
diff --git a/sound/soc/codecs/Kconfig b/sound/soc/codecs/Kconfig
index 8d623d01425b..566bcb04ea51 100644
--- a/sound/soc/codecs/Kconfig
+++ b/sound/soc/codecs/Kconfig
@@ -989,7 +989,7 @@ config SND_SOC_MSM_STUB
config SND_SOC_MSM_HDMI_CODEC_RX
bool "HDMI Audio Playback"
- depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998)
+ depends on FB_MSM_MDSS_HDMI_PANEL && (SND_SOC_APQ8084 || SND_SOC_MSM8994 || SND_SOC_MSM8996 || SND_SOC_MSM8998 || SND_SOC_SDM660_COMMON)
help
HDMI audio drivers should be built only if the platform
supports hdmi panel.
diff --git a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
index 850238764d87..6b267122dc08 100644
--- a/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
+++ b/sound/soc/codecs/msm_sdw/msm_sdw_cdc.c
@@ -48,10 +48,14 @@
#define MSM_SDW_VERSION_1_0 0x0001
#define MSM_SDW_VERSION_ENTRY_SIZE 32
+/*
+ * 200 Milliseconds sufficient for DSP bring up in the modem
+ * after Sub System Restart
+ */
+#define ADSP_STATE_READY_TIMEOUT_MS 200
+
static const DECLARE_TLV_DB_SCALE(digital_gain, 0, 1, 0);
static struct snd_soc_dai_driver msm_sdw_dai[];
-static bool initial_boot = true;
-static bool is_ssr_en;
static bool skip_irq = true;
static int msm_sdw_config_ear_spkr_gain(struct snd_soc_codec *codec,
@@ -467,10 +471,9 @@ static int msm_sdw_codec_enable_vi_feedback(struct snd_soc_dapm_widget *w,
MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x20,
0x20);
snd_soc_update_bits(codec,
- MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x0F, 0x00);
+ MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x0F, 0x04);
snd_soc_update_bits(codec,
- MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x0F,
- 0x00);
+ MSM_SDW_TX10_SPKR_PROT_PATH_CTL, 0x0F, 0x04);
snd_soc_update_bits(codec,
MSM_SDW_TX9_SPKR_PROT_PATH_CTL, 0x10, 0x10);
snd_soc_update_bits(codec,
@@ -493,10 +496,10 @@ static int msm_sdw_codec_enable_vi_feedback(struct snd_soc_dapm_widget *w,
0x20);
snd_soc_update_bits(codec,
MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x0F,
- 0x00);
+ 0x04);
snd_soc_update_bits(codec,
MSM_SDW_TX12_SPKR_PROT_PATH_CTL, 0x0F,
- 0x00);
+ 0x04);
snd_soc_update_bits(codec,
MSM_SDW_TX11_SPKR_PROT_PATH_CTL, 0x10,
0x10);
@@ -1036,6 +1039,13 @@ static int msm_sdw_swrm_read(void *handle, int reg)
__func__, reg);
sdw_rd_addr_base = MSM_SDW_AHB_BRIDGE_RD_ADDR_0;
sdw_rd_data_base = MSM_SDW_AHB_BRIDGE_RD_DATA_0;
+
+ /*
+ * Add sleep as SWR slave access read takes time.
+ * Allow for RD_DONE to complete for previous register if any.
+ */
+ usleep_range(50, 55);
+
/* read_lock */
mutex_lock(&msm_sdw->sdw_read_lock);
ret = regmap_bulk_write(msm_sdw->regmap, sdw_rd_addr_base,
@@ -1225,7 +1235,7 @@ static int msm_sdw_hw_params(struct snd_pcm_substream *substream,
struct snd_pcm_hw_params *params,
struct snd_soc_dai *dai)
{
- u8 rx_clk_fs_rate, rx_fs_rate;
+ u8 clk_fs_rate, fs_rate;
dev_dbg(dai->codec->dev,
"%s: dai_name = %s DAI-ID %x rate %d num_ch %d format %d\n",
@@ -1234,28 +1244,28 @@ static int msm_sdw_hw_params(struct snd_pcm_substream *substream,
switch (params_rate(params)) {
case 8000:
- rx_clk_fs_rate = 0x00;
- rx_fs_rate = 0x00;
+ clk_fs_rate = 0x00;
+ fs_rate = 0x00;
break;
case 16000:
- rx_clk_fs_rate = 0x01;
- rx_fs_rate = 0x01;
+ clk_fs_rate = 0x01;
+ fs_rate = 0x01;
break;
case 32000:
- rx_clk_fs_rate = 0x02;
- rx_fs_rate = 0x03;
+ clk_fs_rate = 0x02;
+ fs_rate = 0x03;
break;
case 48000:
- rx_clk_fs_rate = 0x03;
- rx_fs_rate = 0x04;
+ clk_fs_rate = 0x03;
+ fs_rate = 0x04;
break;
case 96000:
- rx_clk_fs_rate = 0x04;
- rx_fs_rate = 0x05;
+ clk_fs_rate = 0x04;
+ fs_rate = 0x05;
break;
case 192000:
- rx_clk_fs_rate = 0x05;
- rx_fs_rate = 0x06;
+ clk_fs_rate = 0x05;
+ fs_rate = 0x06;
break;
default:
dev_err(dai->codec->dev,
@@ -1264,30 +1274,45 @@ static int msm_sdw_hw_params(struct snd_pcm_substream *substream,
return -EINVAL;
}
- snd_soc_update_bits(dai->codec,
- MSM_SDW_TOP_RX_I2S_CTL, 0x1C, (rx_clk_fs_rate << 2));
- snd_soc_update_bits(dai->codec,
- MSM_SDW_RX7_RX_PATH_CTL, 0x0F, rx_fs_rate);
- snd_soc_update_bits(dai->codec,
- MSM_SDW_RX8_RX_PATH_CTL, 0x0F, rx_fs_rate);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE) {
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_TX_I2S_CTL, 0x1C,
+ (clk_fs_rate << 2));
+ } else {
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_RX_I2S_CTL, 0x1C,
+ (clk_fs_rate << 2));
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_RX7_RX_PATH_CTL, 0x0F,
+ fs_rate);
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_RX8_RX_PATH_CTL, 0x0F,
+ fs_rate);
+ }
switch (params_format(params)) {
case SNDRV_PCM_FORMAT_S16_LE:
- snd_soc_update_bits(dai->codec,
- MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x20);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_TX_I2S_CTL, 0x20, 0x20);
+ else
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x20);
break;
case SNDRV_PCM_FORMAT_S24_LE:
case SNDRV_PCM_FORMAT_S24_3LE:
- snd_soc_update_bits(dai->codec,
- MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x00);
+ if (substream->stream == SNDRV_PCM_STREAM_CAPTURE)
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_TX_I2S_CTL, 0x20, 0x00);
+ else
+ snd_soc_update_bits(dai->codec,
+ MSM_SDW_TOP_RX_I2S_CTL, 0x20, 0x00);
break;
default:
dev_err(dai->codec->dev, "%s: wrong format selected\n",
__func__);
return -EINVAL;
}
- snd_soc_update_bits(dai->codec,
- MSM_SDW_TOP_TX_I2S_CTL, 0x20, 0x20);
return 0;
}
@@ -1403,7 +1428,7 @@ static struct snd_soc_dai_driver msm_sdw_dai[] = {
.rate_max = 192000,
.rate_min = 8000,
.channels_min = 1,
- .channels_max = 2,
+ .channels_max = 4,
},
.ops = &msm_sdw_dai_ops,
},
@@ -1412,9 +1437,9 @@ static struct snd_soc_dai_driver msm_sdw_dai[] = {
.id = AIF1_SDW_VIFEED,
.capture = {
.stream_name = "VIfeed_SDW",
- .rates = SNDRV_PCM_RATE_8000,
+ .rates = MSM_SDW_RATES,
.formats = SNDRV_PCM_FMTBIT_S16_LE,
- .rate_max = 8000,
+ .rate_max = 48000,
.rate_min = 8000,
.channels_min = 2,
.channels_max = 4,
@@ -1629,6 +1654,8 @@ static int msm_sdw_notifier_service_cb(struct notifier_block *nb,
struct msm_sdw_priv *msm_sdw = container_of(nb,
struct msm_sdw_priv,
service_nb);
+ bool adsp_ready = false;
+ unsigned long timeout;
pr_debug("%s: Service opcode 0x%lx\n", __func__, opcode);
@@ -1641,15 +1668,34 @@ static int msm_sdw_notifier_service_cb(struct notifier_block *nb,
SWR_DEVICE_DOWN, NULL);
break;
case AUDIO_NOTIFIER_SERVICE_UP:
- if (initial_boot) {
- initial_boot = false;
- break;
+ if (!q6core_is_adsp_ready()) {
+ dev_dbg(msm_sdw->dev, "ADSP isn't ready\n");
+ timeout = jiffies +
+ msecs_to_jiffies(ADSP_STATE_READY_TIMEOUT_MS);
+ while (!time_after(jiffies, timeout)) {
+ if (!q6core_is_adsp_ready()) {
+ dev_dbg(msm_sdw->dev,
+ "ADSP isn't ready\n");
+ } else {
+ dev_dbg(msm_sdw->dev,
+ "ADSP is ready\n");
+ adsp_ready = true;
+ goto powerup;
+ }
+ }
+ } else {
+ adsp_ready = true;
+ dev_dbg(msm_sdw->dev, "%s: DSP is ready\n", __func__);
+ }
+powerup:
+ if (adsp_ready) {
+ msm_sdw->dev_up = true;
+ msm_sdw_init_reg(msm_sdw->codec);
+ regcache_mark_dirty(msm_sdw->regmap);
+ regcache_sync(msm_sdw->regmap);
+ msm_sdw_set_spkr_mode(msm_sdw->codec,
+ msm_sdw->spkr_mode);
}
- msm_sdw->dev_up = true;
- msm_sdw_init_reg(msm_sdw->codec);
- regcache_mark_dirty(msm_sdw->regmap);
- regcache_sync(msm_sdw->regmap);
- msm_sdw_set_spkr_mode(msm_sdw->codec, msm_sdw->spkr_mode);
break;
default:
break;
@@ -1676,17 +1722,14 @@ static int msm_sdw_codec_probe(struct snd_soc_codec *codec)
msm_sdw_init_reg(codec);
msm_sdw->version = MSM_SDW_VERSION_1_0;
- if (is_ssr_en) {
- msm_sdw->service_nb.notifier_call = msm_sdw_notifier_service_cb;
- ret = audio_notifier_register("msm_sdw",
- AUDIO_NOTIFIER_ADSP_DOMAIN,
- &msm_sdw->service_nb);
- if (ret < 0)
- dev_err(msm_sdw->dev,
- "%s: Audio notifier register failed ret = %d\n",
- __func__, ret);
- }
-
+ msm_sdw->service_nb.notifier_call = msm_sdw_notifier_service_cb;
+ ret = audio_notifier_register("msm_sdw",
+ AUDIO_NOTIFIER_ADSP_DOMAIN,
+ &msm_sdw->service_nb);
+ if (ret < 0)
+ dev_err(msm_sdw->dev,
+ "%s: Audio notifier register failed ret = %d\n",
+ __func__, ret);
return 0;
}
diff --git a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
index 8f7db4d13378..5ecd7870bb3b 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-analog-cdc.c
@@ -93,8 +93,6 @@ static const DECLARE_TLV_DB_SCALE(analog_gain, 0, 25, 1);
static struct snd_soc_dai_driver msm_anlg_cdc_i2s_dai[];
/* By default enable the internal speaker boost */
static bool spkr_boost_en = true;
-static bool initial_boot = true;
-static bool is_ssr_en;
static char on_demand_supply_name[][MAX_ON_DEMAND_SUPPLY_NAME_LENGTH] = {
"cdc-vdd-mic-bias",
@@ -1453,7 +1451,6 @@ static int msm_anlg_cdc_codec_enable_clock_block(struct snd_soc_codec *codec,
} else {
snd_soc_update_bits(codec,
MSM89XX_PMIC_DIGITAL_CDC_TOP_CLK_CTL, 0x0C, 0x00);
- msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_CLK_OFF);
}
return 0;
}
@@ -3500,18 +3497,24 @@ static const struct snd_soc_dapm_widget msm_anlg_cdc_dapm_widgets[] = {
SND_SOC_DAPM_INPUT("AMIC1"),
SND_SOC_DAPM_INPUT("AMIC2"),
SND_SOC_DAPM_INPUT("AMIC3"),
- SND_SOC_DAPM_INPUT("PDM_IN_RX1"),
- SND_SOC_DAPM_INPUT("PDM_IN_RX2"),
- SND_SOC_DAPM_INPUT("PDM_IN_RX3"),
+ SND_SOC_DAPM_AIF_IN("PDM_IN_RX1", "PDM Playback",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PDM_IN_RX2", "PDM Playback",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_IN("PDM_IN_RX3", "PDM Playback",
+ 0, SND_SOC_NOPM, 0, 0),
SND_SOC_DAPM_OUTPUT("EAR"),
SND_SOC_DAPM_OUTPUT("WSA_SPK OUT"),
SND_SOC_DAPM_OUTPUT("HEADPHONE"),
SND_SOC_DAPM_OUTPUT("SPK_OUT"),
SND_SOC_DAPM_OUTPUT("LINEOUT"),
- SND_SOC_DAPM_OUTPUT("ADC1_OUT"),
- SND_SOC_DAPM_OUTPUT("ADC2_OUT"),
- SND_SOC_DAPM_OUTPUT("ADC3_OUT"),
+ SND_SOC_DAPM_AIF_OUT("ADC1_OUT", "PDM Capture",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("ADC2_OUT", "PDM Capture",
+ 0, SND_SOC_NOPM, 0, 0),
+ SND_SOC_DAPM_AIF_OUT("ADC3_OUT", "PDM Capture",
+ 0, SND_SOC_NOPM, 0, 0),
};
static const struct sdm660_cdc_reg_mask_val msm_anlg_cdc_reg_defaults[] = {
@@ -3772,7 +3775,6 @@ static int msm_anlg_cdc_device_down(struct snd_soc_codec *codec)
snd_soc_write(codec,
MSM89XX_PMIC_ANALOG_SPKR_DAC_CTL, 0x93);
- msm_anlg_cdc_bringup(codec);
atomic_set(&pdata->int_mclk0_enabled, false);
msm_anlg_cdc_dig_notifier_call(codec, DIG_CDC_EVENT_SSR_DOWN);
set_bit(BUS_DOWN, &sdm660_cdc_priv->status_mask);
@@ -3794,14 +3796,6 @@ static int msm_anlg_cdc_device_up(struct snd_soc_codec *codec)
/* delay is required to make sure sound card state updated */
usleep_range(5000, 5100);
- msm_anlg_cdc_codec_init_reg(codec);
- msm_anlg_cdc_update_reg_defaults(codec);
-
- regcache_mark_dirty(codec->component.regmap);
- regcache_sync_region(codec->component.regmap,
- MSM89XX_PMIC_DIGITAL_REVISION1,
- MSM89XX_PMIC_CDC_MAX_REGISTER);
-
snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_INT_EN_SET,
MSM89XX_PMIC_DIGITAL_INT_EN_SET__POR);
snd_soc_write(codec, MSM89XX_PMIC_DIGITAL_INT_EN_CLR,
@@ -3850,10 +3844,6 @@ static int sdm660_cdc_notifier_service_cb(struct notifier_block *nb,
msm_anlg_cdc_device_down(codec);
break;
case AUDIO_NOTIFIER_SERVICE_UP:
- if (initial_boot) {
- initial_boot = false;
- break;
- }
dev_dbg(codec->dev,
"ADSP is about to power up. bring up codec\n");
@@ -4053,17 +4043,16 @@ int msm_anlg_codec_info_create_codec_entry(struct snd_info_entry *codec_root,
return -ENOMEM;
}
sdm660_cdc_priv->version_entry = version_entry;
- if (is_ssr_en) {
- sdm660_cdc_priv->audio_ssr_nb.notifier_call =
- sdm660_cdc_notifier_service_cb;
- ret = audio_notifier_register("pmic_analog_cdc",
- AUDIO_NOTIFIER_ADSP_DOMAIN,
- &sdm660_cdc_priv->audio_ssr_nb);
- if (ret < 0) {
- pr_err("%s: Audio notifier register failed ret = %d\n",
- __func__, ret);
- return ret;
- }
+
+ sdm660_cdc_priv->audio_ssr_nb.notifier_call =
+ sdm660_cdc_notifier_service_cb;
+ ret = audio_notifier_register("pmic_analog_cdc",
+ AUDIO_NOTIFIER_ADSP_DOMAIN,
+ &sdm660_cdc_priv->audio_ssr_nb);
+ if (ret < 0) {
+ pr_err("%s: Audio notifier register failed ret = %d\n",
+ __func__, ret);
+ return ret;
}
return 0;
}
diff --git a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
index 5e078dba0448..f1c3b4050323 100644
--- a/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
+++ b/sound/soc/codecs/sdm660_cdc/msm-digital-cdc.c
@@ -74,6 +74,7 @@ static int msm_digcdc_clock_control(bool flag)
pdata = snd_soc_card_get_drvdata(registered_digcodec->component.card);
+ mutex_lock(&pdata->cdc_int_mclk0_mutex);
if (flag) {
if (atomic_read(&pdata->int_mclk0_enabled) == false) {
pdata->digital_cdc_core_clk.enable = 1;
@@ -83,6 +84,7 @@ static int msm_digcdc_clock_control(bool flag)
if (ret < 0) {
pr_err("%s:failed to enable the MCLK\n",
__func__);
+ mutex_unlock(&pdata->cdc_int_mclk0_mutex);
return ret;
}
pr_debug("enabled digital codec core clk\n");
@@ -94,6 +96,7 @@ static int msm_digcdc_clock_control(bool flag)
dev_dbg(registered_digcodec->dev,
"disable MCLK, workq to disable set already\n");
}
+ mutex_unlock(&pdata->cdc_int_mclk0_mutex);
return 0;
}
diff --git a/sound/soc/codecs/wsa881x-regmap.c b/sound/soc/codecs/wsa881x-regmap.c
index faa44301286c..20dc3508a5af 100644
--- a/sound/soc/codecs/wsa881x-regmap.c
+++ b/sound/soc/codecs/wsa881x-regmap.c
@@ -216,9 +216,6 @@ static bool wsa881x_readable_register(struct device *dev, unsigned int reg)
static bool wsa881x_volatile_register(struct device *dev, unsigned int reg)
{
- if (cache_always)
- return false;
-
switch (reg) {
case WSA881X_CHIP_ID0:
case WSA881X_CHIP_ID1:
diff --git a/sound/soc/codecs/wsa881x-temp-sensor.c b/sound/soc/codecs/wsa881x-temp-sensor.c
index 0079d0f6cd52..5ab0ecfdc022 100644
--- a/sound/soc/codecs/wsa881x-temp-sensor.c
+++ b/sound/soc/codecs/wsa881x-temp-sensor.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2015, 2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -23,7 +23,7 @@
#define LOW_TEMP_THRESHOLD 5
#define HIGH_TEMP_THRESHOLD 45
#define TEMP_INVALID 0xFFFF
-
+#define WSA881X_TEMP_RETRY 3
/*
* wsa881x_get_temp - get wsa temperature
* @thermal: thermal zone device
@@ -44,6 +44,7 @@ int wsa881x_get_temp(struct thermal_zone_device *thermal,
int temp_val;
int t1 = T1_TEMP;
int t2 = T2_TEMP;
+ u8 retry = WSA881X_TEMP_RETRY;
if (!thermal)
return -EINVAL;
@@ -60,6 +61,7 @@ int wsa881x_get_temp(struct thermal_zone_device *thermal,
pr_err("%s: pdata is NULL\n", __func__);
return -EINVAL;
}
+temp_retry:
if (pdata->wsa_temp_reg_read) {
ret = pdata->wsa_temp_reg_read(codec, &reg);
if (ret) {
@@ -101,6 +103,10 @@ int wsa881x_get_temp(struct thermal_zone_device *thermal,
printk_ratelimited("%s: T0: %d is out of range[%d, %d]\n",
__func__, temp_val, LOW_TEMP_THRESHOLD,
HIGH_TEMP_THRESHOLD);
+ if (retry--) {
+ msleep(20);
+ goto temp_retry;
+ }
}
if (temp)
*temp = temp_val;
diff --git a/sound/soc/codecs/wsa881x.c b/sound/soc/codecs/wsa881x.c
index ba74175dbe10..6addbde34545 100644
--- a/sound/soc/codecs/wsa881x.c
+++ b/sound/soc/codecs/wsa881x.c
@@ -78,7 +78,6 @@ enum {
WSA881X_DEV_UP,
};
-bool cache_always;
/*
* Private data Structure for wsa881x. All parameters related to
* WSA881X codec needs to be defined here.
@@ -987,6 +986,7 @@ static int32_t wsa881x_temp_reg_read(struct snd_soc_codec *codec,
{
struct wsa881x_priv *wsa881x = snd_soc_codec_get_drvdata(codec);
struct swr_device *dev;
+ u8 retry = WSA881X_NUM_RETRY;
u8 devnum = 0;
if (!wsa881x) {
@@ -995,7 +995,12 @@ static int32_t wsa881x_temp_reg_read(struct snd_soc_codec *codec,
}
dev = wsa881x->swr_slave;
if (dev && (wsa881x->state == WSA881X_DEV_DOWN)) {
- if (swr_get_logical_dev_num(dev, dev->addr, &devnum)) {
+ while (swr_get_logical_dev_num(dev, dev->addr, &devnum) &&
+ retry--) {
+ /* Retry after 1 msec delay */
+ usleep_range(1000, 1100);
+ }
+ if (retry == 0) {
dev_err(codec->dev,
"%s get devnum %d for dev addr %lx failed\n",
__func__, devnum, dev->addr);
@@ -1089,6 +1094,7 @@ static int wsa881x_swr_startup(struct swr_device *swr_dev)
{
int ret = 0;
u8 devnum = 0;
+ u8 retry = WSA881X_NUM_RETRY;
struct wsa881x_priv *wsa881x;
wsa881x = swr_get_dev_data(swr_dev);
@@ -1103,11 +1109,16 @@ static int wsa881x_swr_startup(struct swr_device *swr_dev)
* as per HW requirement.
*/
usleep_range(5000, 5010);
- ret = swr_get_logical_dev_num(swr_dev, swr_dev->addr, &devnum);
- if (ret) {
- dev_dbg(&swr_dev->dev, "%s failed to get devnum, err:%d\n",
- __func__, ret);
- goto err;
+ while (swr_get_logical_dev_num(swr_dev, swr_dev->addr, &devnum) &&
+ retry--) {
+ /* Retry after 1 msec delay */
+ usleep_range(1000, 1100);
+ }
+ if (retry == 0) {
+ dev_err(&swr_dev->dev,
+ "%s get devnum %d for dev addr %lx failed\n",
+ __func__, devnum, swr_dev->addr);
+ return -EINVAL;
}
swr_dev->dev_num = devnum;
@@ -1226,9 +1237,6 @@ static int wsa881x_swr_probe(struct swr_device *pdev)
if (ret)
goto err;
}
-
- cache_always = of_property_read_bool(pdev->dev.of_node,
- "qcom,cache-always");
wsa881x_gpio_ctrl(wsa881x, true);
wsa881x->state = WSA881X_DEV_UP;
diff --git a/sound/soc/codecs/wsa881x.h b/sound/soc/codecs/wsa881x.h
index 178237555c54..9bd9f9587a2e 100644
--- a/sound/soc/codecs/wsa881x.h
+++ b/sound/soc/codecs/wsa881x.h
@@ -20,7 +20,6 @@
#define WSA881X_MAX_SWR_PORTS 4
-extern bool cache_always;
extern int wsa881x_set_channel_map(struct snd_soc_codec *codec, u8 *port,
u8 num_port, unsigned int *ch_mask,
unsigned int *ch_rate);
diff --git a/sound/soc/msm/Kconfig b/sound/soc/msm/Kconfig
index 337fc15aa28e..eb650d19a97c 100644
--- a/sound/soc/msm/Kconfig
+++ b/sound/soc/msm/Kconfig
@@ -112,6 +112,7 @@ config SND_SOC_INT_CODEC
select MSM_CDC_PINCTRL
select SND_SOC_MSM_SDW
select SND_SOC_SDM660_CDC
+ select SND_SOC_MSM_HDMI_CODEC_RX
select QTI_PP
select DTS_SRS_TM
select DOLBY_LICENSE
@@ -143,6 +144,7 @@ config SND_SOC_EXT_CODEC
select SND_SOC_WCD9335
select SND_SOC_WCD934X
select SND_SOC_WSA881X
+ select SND_SOC_MSM_HDMI_CODEC_RX
select MFD_CORE
select QTI_PP
select DTS_SRS_TM
diff --git a/sound/soc/msm/msm-cpe-lsm.c b/sound/soc/msm/msm-cpe-lsm.c
index b2008d6da2a1..48f8e22e7faa 100644
--- a/sound/soc/msm/msm-cpe-lsm.c
+++ b/sound/soc/msm/msm-cpe-lsm.c
@@ -2152,7 +2152,8 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "LSM_REG_SND_MODEL_V2");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&snd_model, (void *)arg,
@@ -2285,7 +2286,8 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "SNDRV_LSM_SET_PARAMS");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&det_params, (void *) arg,
@@ -2312,14 +2314,16 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if not using topology\n",
__func__, "SET_MODULE_PARAMS");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (!arg) {
dev_err(rtd->dev,
"%s: %s: No Param data to set\n",
__func__, "SET_MODULE_PARAMS");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&p_data, arg,
@@ -2327,7 +2331,8 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: copy_from_user failed, size = %zd\n",
__func__, "p_data", sizeof(p_data));
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
if (p_data.num_params > LSM_PARAMS_MAX) {
@@ -2335,7 +2340,8 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: Invalid num_params %d\n",
__func__, "SET_MODULE_PARAMS",
p_data.num_params);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
p_size = p_data.num_params *
@@ -2346,12 +2352,15 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: Invalid size %zd\n",
__func__, "SET_MODULE_PARAMS", p_size);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
params = kzalloc(p_size, GFP_KERNEL);
- if (!params)
- return -ENOMEM;
+ if (!params) {
+ err = -ENOMEM;
+ goto done;
+ }
if (copy_from_user(params, p_data.params,
p_data.data_size)) {
@@ -2359,7 +2368,8 @@ static int msm_cpe_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: copy_from_user failed, size = %d\n",
__func__, "params", p_data.data_size);
kfree(params);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
err = msm_cpe_lsm_process_params(substream, &p_data, params);
@@ -2470,7 +2480,8 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "LSM_REG_SND_MODEL_V2_32");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
dev_dbg(rtd->dev,
@@ -2690,7 +2701,9 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "SNDRV_LSM_SET_PARAMS32");
- return -EINVAL;
+
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&det_params32, arg,
@@ -2734,7 +2747,8 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if not using topology\n",
__func__, "SET_MODULE_PARAMS_32");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&p_data_32, arg,
@@ -2743,7 +2757,8 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: copy_from_user failed, size = %zd\n",
__func__, "SET_MODULE_PARAMS_32",
sizeof(p_data_32));
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
p_data.params = compat_ptr(p_data_32.params);
@@ -2755,7 +2770,8 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: Invalid num_params %d\n",
__func__, "SET_MODULE_PARAMS_32",
p_data.num_params);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (p_data.data_size !=
@@ -2764,21 +2780,25 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: Invalid size %d\n",
__func__, "SET_MODULE_PARAMS_32",
p_data.data_size);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
p_size = sizeof(struct lsm_params_info_32) *
p_data.num_params;
params32 = kzalloc(p_size, GFP_KERNEL);
- if (!params32)
- return -ENOMEM;
+ if (!params32) {
+ err = -ENOMEM;
+ goto done;
+ }
p_size = sizeof(struct lsm_params_info) * p_data.num_params;
params = kzalloc(p_size, GFP_KERNEL);
if (!params) {
kfree(params32);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto done;
}
if (copy_from_user(params32, p_data.params,
@@ -2788,7 +2808,8 @@ static int msm_cpe_lsm_ioctl_compat(struct snd_pcm_substream *substream,
__func__, "params32", p_data.data_size);
kfree(params32);
kfree(params);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
p_info_32 = (struct lsm_params_info_32 *) params32;
diff --git a/sound/soc/msm/msm-dai-fe.c b/sound/soc/msm/msm-dai-fe.c
index 44a6a245c7a2..a7a25ea070da 100644
--- a/sound/soc/msm/msm-dai-fe.c
+++ b/sound/soc/msm/msm-dai-fe.c
@@ -964,7 +964,7 @@ static struct snd_soc_dai_driver msm_fe_dais[] = {
.formats = (SNDRV_PCM_FMTBIT_S16_LE |
SNDRV_PCM_FMTBIT_S24_LE),
.channels_min = 1,
- .channels_max = 2,
+ .channels_max = 4,
.rate_min = 8000,
.rate_max = 192000,
},
diff --git a/sound/soc/msm/qdsp6v2/msm-lsm-client.c b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
index 3fa14d0113ef..ec4380036047 100644
--- a/sound/soc/msm/qdsp6v2/msm-lsm-client.c
+++ b/sound/soc/msm/qdsp6v2/msm-lsm-client.c
@@ -86,6 +86,7 @@ struct lsm_priv {
atomic_t buf_count;
atomic_t read_abort;
wait_queue_head_t period_wait;
+ struct mutex lsm_api_lock;
int appl_cnt;
int dma_write;
};
@@ -954,10 +955,18 @@ static int msm_lsm_ioctl_shared(struct snd_pcm_substream *substream,
dev_dbg(rtd->dev, "%s: Get event status\n", __func__);
atomic_set(&prtd->event_wait_stop, 0);
+
+ /*
+ * Release the api lock before wait to allow
+ * other IOCTLs to be invoked while waiting
+ * for event
+ */
+ mutex_unlock(&prtd->lsm_api_lock);
rc = wait_event_freezable(prtd->event_wait,
(cmpxchg(&prtd->event_avail, 1, 0) ||
(xchg = atomic_cmpxchg(&prtd->event_wait_stop,
1, 0))));
+ mutex_lock(&prtd->lsm_api_lock);
dev_dbg(rtd->dev, "%s: wait_event_freezable %d event_wait_stop %d\n",
__func__, rc, xchg);
if (!rc && !xchg) {
@@ -1281,6 +1290,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
rtd = substream->private_data;
prtd = runtime->private_data;
+ mutex_lock(&prtd->lsm_api_lock);
+
switch (cmd) {
case SNDRV_LSM_EVENT_STATUS: {
struct snd_lsm_event_status *user = NULL, userarg32;
@@ -1288,7 +1299,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
if (copy_from_user(&userarg32, arg, sizeof(userarg32))) {
dev_err(rtd->dev, "%s: err copyuser ioctl %s\n",
__func__, "SNDRV_LSM_EVENT_STATUS");
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
if (userarg32.payload_size >
@@ -1296,7 +1308,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
__func__, userarg32.payload_size,
LISTEN_MAX_STATUS_PAYLOAD_SIZE);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
size = sizeof(*user) + userarg32.payload_size;
@@ -1305,7 +1318,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: Allocation failed event status size %d\n",
__func__, size);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
} else {
cmd = SNDRV_LSM_EVENT_STATUS;
user->payload_size = userarg32.payload_size;
@@ -1421,7 +1435,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "REG_SND_MODEL_V2");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&snd_modelv232, arg,
@@ -1462,7 +1477,7 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "SET_PARAMS_32");
- return -EINVAL;
+ err = -EINVAL;
}
if (copy_from_user(&det_params32, arg,
@@ -1505,7 +1520,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if not using topology\n",
__func__, "SET_MODULE_PARAMS_32");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&p_data_32, arg,
@@ -1514,7 +1530,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: copy_from_user failed, size = %zd\n",
__func__, "SET_MODULE_PARAMS_32",
sizeof(p_data_32));
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
p_data.params = compat_ptr(p_data_32.params);
@@ -1526,7 +1543,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: Invalid num_params %d\n",
__func__, "SET_MODULE_PARAMS_32",
p_data.num_params);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (p_data.data_size !=
@@ -1535,7 +1553,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: %s: Invalid size %d\n",
__func__, "SET_MODULE_PARAMS_32",
p_data.data_size);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
p_size = sizeof(struct lsm_params_info_32) *
@@ -1546,7 +1565,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: no memory for params32, size = %zd\n",
__func__, p_size);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto done;
}
p_size = sizeof(struct lsm_params_info) * p_data.num_params;
@@ -1556,7 +1576,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
"%s: no memory for params, size = %zd\n",
__func__, p_size);
kfree(params32);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto done;
}
if (copy_from_user(params32, p_data.params,
@@ -1566,7 +1587,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
__func__, "params32", p_data.data_size);
kfree(params32);
kfree(params);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
p_info_32 = (struct lsm_params_info_32 *) params32;
@@ -1609,6 +1631,8 @@ static int msm_lsm_ioctl_compat(struct snd_pcm_substream *substream,
err = msm_lsm_ioctl_shared(substream, cmd, arg);
break;
}
+done:
+ mutex_unlock(&prtd->lsm_api_lock);
return err;
}
#else
@@ -1633,6 +1657,7 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
prtd = runtime->private_data;
rtd = substream->private_data;
+ mutex_lock(&prtd->lsm_api_lock);
switch (cmd) {
case SNDRV_LSM_REG_SND_MODEL_V2: {
struct snd_lsm_sound_model_v2 snd_model_v2;
@@ -1641,7 +1666,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "REG_SND_MODEL_V2");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&snd_model_v2, arg, sizeof(snd_model_v2))) {
@@ -1668,7 +1694,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if using topology\n",
__func__, "SET_PARAMS");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
pr_debug("%s: SNDRV_LSM_SET_PARAMS\n", __func__);
@@ -1689,7 +1716,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: LSM_SET_PARAMS failed, err %d\n",
__func__, err);
- return err;
+
+ goto done;
}
case SNDRV_LSM_SET_MODULE_PARAMS: {
@@ -1701,7 +1729,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: not supported if not using topology\n",
__func__, "SET_MODULE_PARAMS");
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
if (copy_from_user(&p_data, arg,
@@ -1709,7 +1738,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: %s: copy_from_user failed, size = %zd\n",
__func__, "p_data", sizeof(p_data));
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
if (p_data.num_params > LSM_PARAMS_MAX) {
@@ -1717,7 +1747,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: Invalid num_params %d\n",
__func__, "SET_MODULE_PARAMS",
p_data.num_params);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
p_size = p_data.num_params *
@@ -1728,7 +1759,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: Invalid size %zd\n",
__func__, "SET_MODULE_PARAMS", p_size);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
params = kzalloc(p_size, GFP_KERNEL);
@@ -1736,7 +1768,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: no memory for params\n",
__func__);
- return -ENOMEM;
+ err = -ENOMEM;
+ goto done;
}
if (copy_from_user(params, p_data.params,
@@ -1745,7 +1778,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
"%s: %s: copy_from_user failed, size = %d\n",
__func__, "params", p_data.data_size);
kfree(params);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
err = msm_lsm_process_params(substream, &p_data, params);
@@ -1765,7 +1799,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: err copyuser event_status\n",
__func__);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
}
if (userarg.payload_size >
@@ -1773,7 +1808,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
pr_err("%s: payload_size %d is invalid, max allowed = %d\n",
__func__, userarg.payload_size,
LISTEN_MAX_STATUS_PAYLOAD_SIZE);
- return -EINVAL;
+ err = -EINVAL;
+ goto done;
}
size = sizeof(struct snd_lsm_event_status) +
@@ -1783,7 +1819,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
dev_err(rtd->dev,
"%s: Allocation failed event status size %d\n",
__func__, size);
- return -EFAULT;
+ err = -EFAULT;
+ goto done;
} else {
user->payload_size = userarg.payload_size;
err = msm_lsm_ioctl_shared(substream, cmd, user);
@@ -1806,7 +1843,7 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
if (err)
dev_err(rtd->dev,
"%s: lsmevent failed %d", __func__, err);
- return err;
+ goto done;
}
case SNDRV_LSM_EVENT_STATUS_V3: {
@@ -1873,6 +1910,8 @@ static int msm_lsm_ioctl(struct snd_pcm_substream *substream,
err = msm_lsm_ioctl_shared(substream, cmd, arg);
break;
}
+done:
+ mutex_unlock(&prtd->lsm_api_lock);
return err;
}
@@ -1889,6 +1928,7 @@ static int msm_lsm_open(struct snd_pcm_substream *substream)
__func__);
return -ENOMEM;
}
+ mutex_init(&prtd->lsm_api_lock);
spin_lock_init(&prtd->event_lock);
init_waitqueue_head(&prtd->event_wait);
init_waitqueue_head(&prtd->period_wait);
@@ -2048,6 +2088,7 @@ static int msm_lsm_close(struct snd_pcm_substream *substream)
kfree(prtd->event_status);
prtd->event_status = NULL;
spin_unlock_irqrestore(&prtd->event_lock, flags);
+ mutex_destroy(&prtd->lsm_api_lock);
kfree(prtd);
runtime->private_data = NULL;
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
index cfade420c509..3677a06c65ae 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-routing-v2.c
@@ -10016,6 +10016,9 @@ static int msm_audio_sound_focus_derive_port_id(struct snd_kcontrol *kcontrol,
} else if (!strcmp(kcontrol->id.name + strlen(prefix),
"TERT_MI2S")) {
*port_id = AFE_PORT_ID_TERTIARY_MI2S_TX;
+ } else if (!strcmp(kcontrol->id.name + strlen(prefix),
+ "INT3_MI2S")) {
+ *port_id = AFE_PORT_ID_INT3_MI2S_TX;
} else {
pr_err("%s: mixer ctl name=%s, could not derive valid port id\n",
__func__, kcontrol->id.name);
@@ -10220,6 +10223,36 @@ static const struct snd_kcontrol_new msm_source_tracking_controls[] = {
.info = msm_source_tracking_info,
.get = msm_audio_source_tracking_get,
},
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Sound Focus Voice Tx INT3_MI2S",
+ .info = msm_sound_focus_info,
+ .get = msm_voice_sound_focus_get,
+ .put = msm_voice_sound_focus_put,
+ },
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Source Tracking Voice Tx INT3_MI2S",
+ .info = msm_source_tracking_info,
+ .get = msm_voice_source_tracking_get,
+ },
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READWRITE,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Sound Focus Audio Tx INT3_MI2S",
+ .info = msm_sound_focus_info,
+ .get = msm_audio_sound_focus_get,
+ .put = msm_audio_sound_focus_put,
+ },
+ {
+ .access = SNDRV_CTL_ELEM_ACCESS_READ,
+ .iface = SNDRV_CTL_ELEM_IFACE_MIXER,
+ .name = "Source Tracking Audio Tx INT3_MI2S",
+ .info = msm_source_tracking_info,
+ .get = msm_audio_source_tracking_get,
+ },
};
static int spkr_prot_put_vi_lch_port(struct snd_kcontrol *kcontrol,
@@ -14668,6 +14701,7 @@ static void __exit msm_soc_routing_platform_exit(void)
{
msm_routing_delete_cal_data();
memset(&be_dai_name_table, 0, sizeof(be_dai_name_table));
+ mutex_destroy(&routing_lock);
platform_driver_unregister(&msm_routing_pcm_driver);
}
module_exit(msm_soc_routing_platform_exit);
diff --git a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
index c444a27c06e6..b2387a746f61 100644
--- a/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
+++ b/sound/soc/msm/qdsp6v2/msm-pcm-voip-v2.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2012-2016, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2012-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -814,20 +814,25 @@ static int msm_pcm_playback_copy(struct snd_pcm_substream *substream, int a,
if (prtd->mode == MODE_PCM) {
ret = copy_from_user(&buf_node->frame.voc_pkt,
buf, count);
+ if (ret) {
+ pr_err("%s: copy from user failed %d\n",
+ __func__, ret);
+ return -EFAULT;
+ }
buf_node->frame.pktlen = count;
} else {
ret = copy_from_user(&buf_node->frame,
buf, count);
+ if (ret) {
+ pr_err("%s: copy from user failed %d\n",
+ __func__, ret);
+ return -EFAULT;
+ }
if (buf_node->frame.pktlen >= count)
buf_node->frame.pktlen = count -
(sizeof(buf_node->frame.frm_hdr) +
sizeof(buf_node->frame.pktlen));
}
- if (ret) {
- pr_err("%s: copy from user failed %d\n",
- __func__, ret);
- return -EFAULT;
- }
spin_lock_irqsave(&prtd->dsp_lock, dsp_flags);
list_add_tail(&buf_node->list, &prtd->in_queue);
spin_unlock_irqrestore(&prtd->dsp_lock, dsp_flags);
diff --git a/sound/soc/msm/sdm660-common.c b/sound/soc/msm/sdm660-common.c
index 3a8fdf8f1256..edf5719fbf3e 100644
--- a/sound/soc/msm/sdm660-common.c
+++ b/sound/soc/msm/sdm660-common.c
@@ -36,6 +36,11 @@ struct dev_config {
u32 channels;
};
+enum {
+ DP_RX_IDX,
+ EXT_DISP_RX_IDX_MAX,
+};
+
/* TDM default config */
static struct dev_config tdm_rx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
{ /* PRI TDM */
@@ -124,6 +129,10 @@ static struct dev_config tdm_tx_cfg[TDM_INTERFACE_MAX][TDM_PORT_MAX] = {
}
};
+/* Default configuration of external display BE */
+static struct dev_config ext_disp_rx_cfg[] = {
+ [DP_RX_IDX] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+};
static struct dev_config usb_rx_cfg = {
.sample_rate = SAMPLING_RATE_48KHZ,
.bit_format = SNDRV_PCM_FORMAT_S16_LE,
@@ -251,6 +260,8 @@ static const char *const mi2s_ch_text[] = {"One", "Two", "Three", "Four",
"Eight"};
static char const *bit_format_text[] = {"S16_LE", "S24_LE", "S24_3LE",
"S32_LE"};
+static char const *mi2s_format_text[] = {"S16_LE", "S24_LE", "S24_3LE",
+ "S32_LE"};
static char const *tdm_ch_text[] = {"One", "Two", "Three", "Four",
"Five", "Six", "Seven", "Eight"};
static char const *tdm_bit_format_text[] = {"S16_LE", "S24_LE", "S32_LE"};
@@ -264,7 +275,11 @@ static char const *usb_sample_rate_text[] = {"KHZ_8", "KHZ_11P025",
"KHZ_16", "KHZ_22P05",
"KHZ_32", "KHZ_44P1", "KHZ_48",
"KHZ_96", "KHZ_192", "KHZ_384"};
+static char const *ext_disp_bit_format_text[] = {"S16_LE", "S24_LE"};
+static char const *ext_disp_sample_rate_text[] = {"KHZ_48", "KHZ_96",
+ "KHZ_192"};
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_chs, ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(proxy_rx_chs, ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(prim_aux_pcm_rx_sample_rate, auxpcm_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(sec_aux_pcm_rx_sample_rate, auxpcm_rate_text);
@@ -282,6 +297,14 @@ static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_sample_rate, mi2s_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_tx_sample_rate, mi2s_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_sample_rate, mi2s_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_sample_rate, mi2s_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_rx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_tx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(tert_mi2s_tx_format, mi2s_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(quat_mi2s_tx_format, mi2s_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_rx_chs, mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(prim_mi2s_tx_chs, mi2s_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(sec_mi2s_rx_chs, mi2s_ch_text);
@@ -294,8 +317,11 @@ static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_chs, usb_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_chs, usb_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_format, bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_format, bit_format_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_format, ext_disp_bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_rx_sample_rate, usb_sample_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(usb_tx_sample_rate, usb_sample_rate_text);
+static SOC_ENUM_SINGLE_EXT_DECL(ext_disp_rx_sample_rate,
+ ext_disp_sample_rate_text);
static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_chs, tdm_ch_text);
static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_format, tdm_bit_format_text);
static SOC_ENUM_SINGLE_EXT_DECL(tdm_tx_sample_rate, tdm_sample_rate_text);
@@ -667,6 +693,54 @@ static int tdm_get_format_val(int format)
return value;
}
+static int mi2s_get_format(int value)
+{
+ int format = 0;
+
+ switch (value) {
+ case 0:
+ format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ case 1:
+ format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 2:
+ format = SNDRV_PCM_FORMAT_S24_3LE;
+ break;
+ case 3:
+ format = SNDRV_PCM_FORMAT_S32_LE;
+ break;
+ default:
+ format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ return format;
+}
+
+static int mi2s_get_format_value(int format)
+{
+ int value = 0;
+
+ switch (format) {
+ case SNDRV_PCM_FORMAT_S16_LE:
+ value = 0;
+ break;
+ case SNDRV_PCM_FORMAT_S24_LE:
+ value = 1;
+ break;
+ case SNDRV_PCM_FORMAT_S24_3LE:
+ value = 2;
+ break;
+ case SNDRV_PCM_FORMAT_S32_LE:
+ value = 3;
+ break;
+ default:
+ value = 0;
+ break;
+ }
+ return value;
+}
+
static int tdm_rx_format_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1132,6 +1206,78 @@ static int mi2s_tx_sample_rate_get(struct snd_kcontrol *kcontrol,
return 0;
}
+static int mi2s_tx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ mi2s_tx_cfg[idx].bit_format =
+ mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d] _tx_format = %d, item = %d\n", __func__,
+ idx, mi2s_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int mi2s_tx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_get_format_value(mi2s_tx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_tx_format = %d, item = %d\n", __func__,
+ idx, mi2s_tx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int mi2s_rx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ mi2s_rx_cfg[idx].bit_format =
+ mi2s_get_format(ucontrol->value.enumerated.item[0]);
+
+ pr_debug("%s: idx[%d] _rx_format = %d, item = %d\n", __func__,
+ idx, mi2s_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
+static int mi2s_rx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = mi2s_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.enumerated.item[0] =
+ mi2s_get_format_value(mi2s_rx_cfg[idx].bit_format);
+
+ pr_debug("%s: idx[%d]_rx_format = %d, item = %d\n", __func__,
+ idx, mi2s_rx_cfg[idx].bit_format,
+ ucontrol->value.enumerated.item[0]);
+
+ return 0;
+}
+
static int msm_mi2s_rx_ch_get(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
@@ -1514,6 +1660,162 @@ static int usb_audio_tx_format_put(struct snd_kcontrol *kcontrol,
return rc;
}
+static int ext_disp_get_port_idx(struct snd_kcontrol *kcontrol)
+{
+ int idx;
+
+ if (strnstr(kcontrol->id.name, "Display Port RX",
+ sizeof("Display Port RX")))
+ idx = DP_RX_IDX;
+ else {
+ pr_err("%s: unsupported BE: %s",
+ __func__, kcontrol->id.name);
+ idx = -EINVAL;
+ }
+
+ return idx;
+}
+
+static int ext_disp_rx_format_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ switch (ext_disp_rx_cfg[idx].bit_format) {
+ case SNDRV_PCM_FORMAT_S24_LE:
+ ucontrol->value.integer.value[0] = 1;
+ break;
+
+ case SNDRV_PCM_FORMAT_S16_LE:
+ default:
+ ucontrol->value.integer.value[0] = 0;
+ break;
+ }
+
+ pr_debug("%s: ext_disp_rx[%d].format = %d, ucontrol value = %ld\n",
+ __func__, idx, ext_disp_rx_cfg[idx].bit_format,
+ ucontrol->value.integer.value[0]);
+ return 0;
+}
+
+static int ext_disp_rx_format_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 1:
+ ext_disp_rx_cfg[idx].bit_format = SNDRV_PCM_FORMAT_S24_LE;
+ break;
+ case 0:
+ default:
+ ext_disp_rx_cfg[idx].bit_format = SNDRV_PCM_FORMAT_S16_LE;
+ break;
+ }
+ pr_debug("%s: ext_disp_rx[%d].format = %d, ucontrol value = %ld\n",
+ __func__, idx, ext_disp_rx_cfg[idx].bit_format,
+ ucontrol->value.integer.value[0]);
+
+ return 0;
+}
+
+static int ext_disp_rx_ch_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ucontrol->value.integer.value[0] =
+ ext_disp_rx_cfg[idx].channels - 2;
+
+ pr_debug("%s: ext_disp_rx[%d].ch = %d\n", __func__,
+ idx, ext_disp_rx_cfg[idx].channels);
+
+ return 0;
+}
+
+static int ext_disp_rx_ch_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ ext_disp_rx_cfg[idx].channels =
+ ucontrol->value.integer.value[0] + 2;
+
+ pr_debug("%s: ext_disp_rx[%d].ch = %d\n", __func__,
+ idx, ext_disp_rx_cfg[idx].channels);
+ return 1;
+}
+
+static int ext_disp_rx_sample_rate_get(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int sample_rate_val;
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ switch (ext_disp_rx_cfg[idx].sample_rate) {
+ case SAMPLING_RATE_192KHZ:
+ sample_rate_val = 2;
+ break;
+
+ case SAMPLING_RATE_96KHZ:
+ sample_rate_val = 1;
+ break;
+
+ case SAMPLING_RATE_48KHZ:
+ default:
+ sample_rate_val = 0;
+ break;
+ }
+
+ ucontrol->value.integer.value[0] = sample_rate_val;
+ pr_debug("%s: ext_disp_rx[%d].sample_rate = %d\n", __func__,
+ idx, ext_disp_rx_cfg[idx].sample_rate);
+
+ return 0;
+}
+
+static int ext_disp_rx_sample_rate_put(struct snd_kcontrol *kcontrol,
+ struct snd_ctl_elem_value *ucontrol)
+{
+ int idx = ext_disp_get_port_idx(kcontrol);
+
+ if (idx < 0)
+ return idx;
+
+ switch (ucontrol->value.integer.value[0]) {
+ case 2:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_192KHZ;
+ break;
+ case 1:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_96KHZ;
+ break;
+ case 0:
+ default:
+ ext_disp_rx_cfg[idx].sample_rate = SAMPLING_RATE_48KHZ;
+ break;
+ }
+
+ pr_debug("%s: control value = %ld, ext_disp_rx[%d].sample_rate = %d\n",
+ __func__, ucontrol->value.integer.value[0], idx,
+ ext_disp_rx_cfg[idx].sample_rate);
+ return 0;
+}
+
const struct snd_kcontrol_new msm_common_snd_controls[] = {
SOC_ENUM_EXT("PROXY_RX Channels", proxy_rx_chs,
proxy_rx_ch_get, proxy_rx_ch_put),
@@ -1565,6 +1867,30 @@ const struct snd_kcontrol_new msm_common_snd_controls[] = {
SOC_ENUM_EXT("QUAT_MI2S_TX SampleRate", quat_mi2s_tx_sample_rate,
mi2s_tx_sample_rate_get,
mi2s_tx_sample_rate_put),
+ SOC_ENUM_EXT("PRIM_MI2S_RX Format", prim_mi2s_rx_format,
+ mi2s_rx_format_get,
+ mi2s_rx_format_put),
+ SOC_ENUM_EXT("SEC_MI2S_RX Format", sec_mi2s_rx_format,
+ mi2s_rx_format_get,
+ mi2s_rx_format_put),
+ SOC_ENUM_EXT("TERT_MI2S_RX Format", tert_mi2s_rx_format,
+ mi2s_rx_format_get,
+ mi2s_rx_format_put),
+ SOC_ENUM_EXT("QUAT_MI2S_RX Format", quat_mi2s_rx_format,
+ mi2s_rx_format_get,
+ mi2s_rx_format_put),
+ SOC_ENUM_EXT("PRIM_MI2S_TX Format", prim_mi2s_tx_format,
+ mi2s_tx_format_get,
+ mi2s_tx_format_put),
+ SOC_ENUM_EXT("SEC_MI2S_TX Format", sec_mi2s_tx_format,
+ mi2s_tx_format_get,
+ mi2s_tx_format_put),
+ SOC_ENUM_EXT("TERT_MI2S_TX Format", tert_mi2s_tx_format,
+ mi2s_tx_format_get,
+ mi2s_tx_format_put),
+ SOC_ENUM_EXT("QUAT_MI2S_TX Format", quat_mi2s_tx_format,
+ mi2s_tx_format_get,
+ mi2s_tx_format_put),
SOC_ENUM_EXT("PRIM_MI2S_RX Channels", prim_mi2s_rx_chs,
msm_mi2s_rx_ch_get, msm_mi2s_rx_ch_put),
SOC_ENUM_EXT("PRIM_MI2S_TX Channels", prim_mi2s_tx_chs,
@@ -1585,16 +1911,23 @@ const struct snd_kcontrol_new msm_common_snd_controls[] = {
usb_audio_rx_ch_get, usb_audio_rx_ch_put),
SOC_ENUM_EXT("USB_AUDIO_TX Channels", usb_tx_chs,
usb_audio_tx_ch_get, usb_audio_tx_ch_put),
+ SOC_ENUM_EXT("Display Port RX Channels", ext_disp_rx_chs,
+ ext_disp_rx_ch_get, ext_disp_rx_ch_put),
SOC_ENUM_EXT("USB_AUDIO_RX Format", usb_rx_format,
usb_audio_rx_format_get, usb_audio_rx_format_put),
SOC_ENUM_EXT("USB_AUDIO_TX Format", usb_tx_format,
usb_audio_tx_format_get, usb_audio_tx_format_put),
+ SOC_ENUM_EXT("Display Port RX Bit Format", ext_disp_rx_format,
+ ext_disp_rx_format_get, ext_disp_rx_format_put),
SOC_ENUM_EXT("USB_AUDIO_RX SampleRate", usb_rx_sample_rate,
usb_audio_rx_sample_rate_get,
usb_audio_rx_sample_rate_put),
SOC_ENUM_EXT("USB_AUDIO_TX SampleRate", usb_tx_sample_rate,
usb_audio_tx_sample_rate_get,
usb_audio_tx_sample_rate_put),
+ SOC_ENUM_EXT("Display Port RX SampleRate", ext_disp_rx_sample_rate,
+ ext_disp_rx_sample_rate_get,
+ ext_disp_rx_sample_rate_put),
SOC_ENUM_EXT("PRI_TDM_RX_0 SampleRate", tdm_rx_sample_rate,
tdm_rx_sample_rate_get,
tdm_rx_sample_rate_put),
@@ -1705,6 +2038,23 @@ static void param_set_mask(struct snd_pcm_hw_params *p, int n, unsigned bit)
}
}
+static int msm_ext_disp_get_idx_from_beid(int32_t be_id)
+{
+ int idx;
+
+ switch (be_id) {
+ case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
+ idx = DP_RX_IDX;
+ break;
+ default:
+ pr_err("%s: Incorrect ext_disp be_id %d\n", __func__, be_id);
+ idx = -EINVAL;
+ break;
+ }
+
+ return idx;
+}
+
/**
* msm_common_be_hw_params_fixup - updates settings of ALSA BE hw params.
*
@@ -1722,6 +2072,7 @@ int msm_common_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
struct snd_interval *channels = hw_param_interval(params,
SNDRV_PCM_HW_PARAM_CHANNELS);
int rc = 0;
+ int idx;
pr_debug("%s: format = %d, rate = %d\n",
__func__, params_format(params), params_rate(params));
@@ -1741,6 +2092,21 @@ int msm_common_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
channels->min = channels->max = usb_tx_cfg.channels;
break;
+ case MSM_BACKEND_DAI_DISPLAY_PORT_RX:
+ idx = msm_ext_disp_get_idx_from_beid(dai_link->be_id);
+ if (IS_ERR_VALUE(idx)) {
+ pr_err("%s: Incorrect ext disp idx %d\n",
+ __func__, idx);
+ rc = idx;
+ break;
+ }
+
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ ext_disp_rx_cfg[idx].bit_format);
+ rate->min = rate->max = ext_disp_rx_cfg[idx].sample_rate;
+ channels->min = channels->max = ext_disp_rx_cfg[idx].channels;
+ break;
+
case MSM_BACKEND_DAI_AFE_PCM_RX:
channels->min = channels->max = proxy_rx_cfg.channels;
rate->min = rate->max = SAMPLING_RATE_48KHZ;
@@ -1870,48 +2236,64 @@ int msm_common_be_hw_params_fixup(struct snd_soc_pcm_runtime *rtd,
rate->min = rate->max = mi2s_rx_cfg[PRIM_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[PRIM_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[PRIM_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_PRI_MI2S_TX:
rate->min = rate->max = mi2s_tx_cfg[PRIM_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[PRIM_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[PRIM_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_SECONDARY_MI2S_RX:
rate->min = rate->max = mi2s_rx_cfg[SEC_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[SEC_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[SEC_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_SECONDARY_MI2S_TX:
rate->min = rate->max = mi2s_tx_cfg[SEC_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[SEC_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[SEC_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_TERTIARY_MI2S_RX:
rate->min = rate->max = mi2s_rx_cfg[TERT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[TERT_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[TERT_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_TERTIARY_MI2S_TX:
rate->min = rate->max = mi2s_tx_cfg[TERT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[TERT_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[TERT_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_QUATERNARY_MI2S_RX:
rate->min = rate->max = mi2s_rx_cfg[QUAT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_rx_cfg[QUAT_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_rx_cfg[QUAT_MI2S].bit_format);
break;
case MSM_BACKEND_DAI_QUATERNARY_MI2S_TX:
rate->min = rate->max = mi2s_tx_cfg[QUAT_MI2S].sample_rate;
channels->min = channels->max =
mi2s_tx_cfg[QUAT_MI2S].channels;
+ param_set_mask(params, SNDRV_PCM_HW_PARAM_FORMAT,
+ mi2s_tx_cfg[QUAT_MI2S].bit_format);
break;
default:
@@ -2001,6 +2383,7 @@ static u32 get_mi2s_bits_per_sample(u32 bit_format)
u32 bit_per_sample;
switch (bit_format) {
+ case SNDRV_PCM_FORMAT_S32_LE:
case SNDRV_PCM_FORMAT_S24_3LE:
case SNDRV_PCM_FORMAT_S24_LE:
bit_per_sample = 32;
diff --git a/sound/soc/msm/sdm660-ext-dai-links.c b/sound/soc/msm/sdm660-ext-dai-links.c
index 4cf5aefb0e0f..f64074d442dc 100644
--- a/sound/soc/msm/sdm660-ext-dai-links.c
+++ b/sound/soc/msm/sdm660-ext-dai-links.c
@@ -1861,6 +1861,24 @@ static struct snd_soc_dai_link msm_wcn_be_dai_links[] = {
},
};
+static struct snd_soc_dai_link ext_disp_be_dai_link[] = {
+ /* DISP PORT BACK END DAI Link */
+ {
+ .name = LPASS_BE_DISPLAY_PORT,
+ .stream_name = "Display Port Playback",
+ .cpu_dai_name = "msm-dai-q6-dp.24608",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-ext-disp-audio-codec-rx",
+ .codec_dai_name = "msm_dp_audio_codec_rx_dai",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+ .be_hw_params_fixup = msm_common_be_hw_params_fixup,
+ .ignore_pmdown_time = 1,
+ .ignore_suspend = 1,
+ },
+};
+
static struct snd_soc_dai_link msm_ext_tasha_dai_links[
ARRAY_SIZE(msm_ext_common_fe_dai) +
ARRAY_SIZE(msm_ext_tasha_fe_dai) +
@@ -1868,7 +1886,8 @@ ARRAY_SIZE(msm_ext_common_be_dai) +
ARRAY_SIZE(msm_ext_tasha_be_dai) +
ARRAY_SIZE(msm_mi2s_be_dai_links) +
ARRAY_SIZE(msm_auxpcm_be_dai_links) +
-ARRAY_SIZE(msm_wcn_be_dai_links)];
+ARRAY_SIZE(msm_wcn_be_dai_links) +
+ARRAY_SIZE(ext_disp_be_dai_link)];
static struct snd_soc_dai_link msm_ext_tavil_dai_links[
ARRAY_SIZE(msm_ext_common_fe_dai) +
@@ -1877,7 +1896,8 @@ ARRAY_SIZE(msm_ext_common_be_dai) +
ARRAY_SIZE(msm_ext_tavil_be_dai) +
ARRAY_SIZE(msm_mi2s_be_dai_links) +
ARRAY_SIZE(msm_auxpcm_be_dai_links) +
-ARRAY_SIZE(msm_wcn_be_dai_links)];
+ARRAY_SIZE(msm_wcn_be_dai_links) +
+ARRAY_SIZE(ext_disp_be_dai_link)];
/**
* populate_snd_card_dailinks - prepares dailink array and initializes card.
@@ -1951,6 +1971,15 @@ struct snd_soc_card *populate_snd_card_dailinks(struct device *dev,
sizeof(msm_wcn_be_dai_links));
len4 += ARRAY_SIZE(msm_wcn_be_dai_links);
}
+ if (of_property_read_bool(dev->of_node,
+ "qcom,ext-disp-audio-rx")) {
+ dev_dbg(dev, "%s(): ext disp audio support present\n",
+ __func__);
+ memcpy(msm_ext_tasha_dai_links + len4,
+ ext_disp_be_dai_link,
+ sizeof(ext_disp_be_dai_link));
+ len4 += ARRAY_SIZE(ext_disp_be_dai_link);
+ }
msm_ext_dai_links = msm_ext_tasha_dai_links;
} else if (strnstr(card->name, "tavil", strlen(card->name))) {
len1 = ARRAY_SIZE(msm_ext_common_fe_dai);
@@ -1987,6 +2016,15 @@ struct snd_soc_card *populate_snd_card_dailinks(struct device *dev,
sizeof(msm_wcn_be_dai_links));
len4 += ARRAY_SIZE(msm_wcn_be_dai_links);
}
+ if (of_property_read_bool(dev->of_node,
+ "qcom,ext-disp-audio-rx")) {
+ dev_dbg(dev, "%s(): ext disp audio support present\n",
+ __func__);
+ memcpy(msm_ext_tavil_dai_links + len4,
+ ext_disp_be_dai_link,
+ sizeof(ext_disp_be_dai_link));
+ len4 += ARRAY_SIZE(ext_disp_be_dai_link);
+ }
msm_ext_dai_links = msm_ext_tavil_dai_links;
} else {
dev_err(dev, "%s: failing as no matching card name\n",
diff --git a/sound/soc/msm/sdm660-external.c b/sound/soc/msm/sdm660-external.c
index 3fe327ad0442..191db6c2fa9d 100644
--- a/sound/soc/msm/sdm660-external.c
+++ b/sound/soc/msm/sdm660-external.c
@@ -676,7 +676,7 @@ static int msm_ext_get_spk(struct snd_kcontrol *kcontrol,
static int msm_ext_set_spk(struct snd_kcontrol *kcontrol,
struct snd_ctl_elem_value *ucontrol)
{
- struct snd_soc_codec *codec = snd_kcontrol_chip(kcontrol);
+ struct snd_soc_codec *codec = snd_soc_kcontrol_codec(kcontrol);
pr_debug("%s()\n", __func__);
if (msm_ext_spk_control == ucontrol->value.integer.value[0])
diff --git a/sound/soc/msm/sdm660-internal.c b/sound/soc/msm/sdm660-internal.c
index 2546380a5a4e..37b34b231b72 100644
--- a/sound/soc/msm/sdm660-internal.c
+++ b/sound/soc/msm/sdm660-internal.c
@@ -136,7 +136,7 @@ static struct dev_config int_mi2s_cfg[] = {
[INT2_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
[INT3_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
[INT4_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 1},
- [INT5_MI2S] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
+ [INT5_MI2S] = {SAMPLING_RATE_48KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
[INT6_MI2S] = {SAMPLING_RATE_8KHZ, SNDRV_PCM_FORMAT_S16_LE, 2},
};
@@ -2896,6 +2896,24 @@ static struct snd_soc_dai_link msm_wsa_be_dai_links[] = {
},
};
+static struct snd_soc_dai_link ext_disp_be_dai_link[] = {
+ /* DISP PORT BACK END DAI Link */
+ {
+ .name = LPASS_BE_DISPLAY_PORT,
+ .stream_name = "Display Port Playback",
+ .cpu_dai_name = "msm-dai-q6-dp.24608",
+ .platform_name = "msm-pcm-routing",
+ .codec_name = "msm-ext-disp-audio-codec-rx",
+ .codec_dai_name = "msm_dp_audio_codec_rx_dai",
+ .no_pcm = 1,
+ .dpcm_playback = 1,
+ .be_id = MSM_BACKEND_DAI_DISPLAY_PORT_RX,
+ .be_hw_params_fixup = msm_common_be_hw_params_fixup,
+ .ignore_pmdown_time = 1,
+ .ignore_suspend = 1,
+ },
+};
+
static struct snd_soc_dai_link msm_int_dai_links[
ARRAY_SIZE(msm_int_dai) +
ARRAY_SIZE(msm_int_wsa_dai) +
@@ -2903,7 +2921,8 @@ ARRAY_SIZE(msm_int_be_dai) +
ARRAY_SIZE(msm_mi2s_be_dai_links) +
ARRAY_SIZE(msm_auxpcm_be_dai_links)+
ARRAY_SIZE(msm_wcn_be_dai_links) +
-ARRAY_SIZE(msm_wsa_be_dai_links)];
+ARRAY_SIZE(msm_wsa_be_dai_links) +
+ARRAY_SIZE(ext_disp_be_dai_link)];
static struct snd_soc_card sdm660_card = {
/* snd_soc_card_sdm660 */
@@ -3004,6 +3023,14 @@ static struct snd_soc_card *msm_int_populate_sndcard_dailinks(
sizeof(msm_wsa_be_dai_links));
len1 += ARRAY_SIZE(msm_wsa_be_dai_links);
}
+ if (of_property_read_bool(dev->of_node, "qcom,ext-disp-audio-rx")) {
+ dev_dbg(dev, "%s(): ext disp audio support present\n",
+ __func__);
+ memcpy(dailink + len1,
+ ext_disp_be_dai_link,
+ sizeof(ext_disp_be_dai_link));
+ len1 += ARRAY_SIZE(ext_disp_be_dai_link);
+ }
card->dai_link = dailink;
card->num_links = len1;
return card;