summaryrefslogtreecommitdiff
path: root/drivers/gpu/drm/msm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/msm')
-rw-r--r--drivers/gpu/drm/msm/Kconfig77
-rw-r--r--drivers/gpu/drm/msm/Makefile165
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h22
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h523
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c125
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h1365
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c213
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h3493
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_counters.c825
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c1426
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.h200
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_power.c499
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_preempt.c359
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_snapshot.c815
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h49
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c150
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c555
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h249
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h160
-rw-r--r--drivers/gpu/drm/msm/dba_bridge.c458
-rw-r--r--drivers/gpu/drm/msm/dba_bridge.h68
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c169
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h133
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.c727
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.h214
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c2312
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h490
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h578
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c1533
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h192
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_defs.h374
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display.c2920
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display.h359
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c114
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_display_test.h31
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_drm.c567
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_drm.h93
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_hw.h39
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_panel.c2039
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_panel.h217
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy.c862
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy.h197
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h164
-rw-r--r--drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c858
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c25
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h6
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c22
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c18
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h5
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h5
-rw-r--r--drivers/gpu/drm/msm/edp/edp.c4
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h5
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_connector.c127
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_connector.h28
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_crtc.c270
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_crtc.h62
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_drv.c439
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_encoder.c112
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_encoder.h50
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_kms.c717
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_kms.h58
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_plane.c912
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_plane.h41
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_splash.c77
-rw-r--r--drivers/gpu/drm/msm/ekms/edrm_splash.h44
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c3426
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h699
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c357
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c1088
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c1053
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h300
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c1142
-rw-r--r--drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h206
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.c88
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.h56
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h42
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_bridge.c4
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c114
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_i2c.c81
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi_util.c149
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c19
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c56
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c12
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c11
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c49
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h3
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c30
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_common.xml.h5
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_format.c8
-rw-r--r--drivers/gpu/drm/msm/mdp/mdp_kms.h4
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c527
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c1557
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h399
-rw-r--r--drivers/gpu/drm/msm/msm_fb.c167
-rw-r--r--drivers/gpu/drm/msm/msm_fbdev.c17
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c905
-rw-r--r--drivers/gpu/drm/msm/msm_gem.h75
-rw-r--r--drivers/gpu/drm/msm/msm_gem_prime.c12
-rw-r--r--drivers/gpu/drm/msm/msm_gem_submit.c244
-rw-r--r--drivers/gpu/drm/msm/msm_gem_vma.c217
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c706
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h169
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.c354
-rw-r--r--drivers/gpu/drm/msm/msm_iommu.h46
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h79
-rw-r--r--drivers/gpu/drm/msm/msm_mmu.h66
-rw-r--r--drivers/gpu/drm/msm/msm_prop.c734
-rw-r--r--drivers/gpu/drm/msm/msm_prop.h432
-rw-r--r--drivers/gpu/drm/msm/msm_rd.c65
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.c28
-rw-r--r--drivers/gpu/drm/msm/msm_ringbuffer.h48
-rw-r--r--drivers/gpu/drm/msm/msm_smmu.c496
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot.c105
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot.h85
-rw-r--r--drivers/gpu/drm/msm/msm_snapshot_api.h134
-rw-r--r--drivers/gpu/drm/msm/msm_submitqueue.c151
-rw-r--r--drivers/gpu/drm/msm/msm_trace.h98
-rw-r--r--drivers/gpu/drm/msm/msm_trace_points.c18
-rw-r--r--drivers/gpu/drm/msm/sde/sde_backlight.c103
-rw-r--r--drivers/gpu/drm/msm/sde/sde_backlight.h18
-rw-r--r--drivers/gpu/drm/msm/sde/sde_color_processing.c986
-rw-r--r--drivers/gpu/drm/msm/sde/sde_color_processing.h95
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.c1032
-rw-r--r--drivers/gpu/drm/msm/sde/sde_connector.h429
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_irq.c583
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_irq.h152
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_perf.c634
-rw-r--r--drivers/gpu/drm/msm/sde/sde_core_perf.h124
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.c2167
-rw-r--r--drivers/gpu/drm/msm/sde/sde_crtc.h308
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder.c1740
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder.h133
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys.h449
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c726
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_shd.c1041
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c1006
-rw-r--r--drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c1034
-rw-r--r--drivers/gpu/drm/msm/sde/sde_fence.c232
-rw-r--r--drivers/gpu/drm/msm/sde/sde_fence.h177
-rw-r--r--drivers/gpu/drm/msm/sde/sde_formats.c1294
-rw-r--r--drivers/gpu/drm/msm/sde/sde_formats.h123
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.c2428
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog.h775
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h177
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_cdm.c309
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_cdm.h128
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_color_processing.h18
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c494
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h85
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_ctl.c586
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_ctl.h234
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_dspp.c126
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_dspp.h162
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_hwio.h0
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_interrupts.c986
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_interrupts.h257
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_intf.c339
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_intf.h133
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_lm.c209
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_lm.h102
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_mdss.h470
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_pingpong.c173
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_pingpong.h123
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_sspp.c959
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_sspp.h479
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_top.c275
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_top.h170
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_util.c93
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_util.h57
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_vbif.c169
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_vbif.h90
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_wb.c229
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hw_wb.h105
-rw-r--r--drivers/gpu/drm/msm/sde/sde_hwio.h59
-rw-r--r--drivers/gpu/drm/msm/sde/sde_irq.c112
-rw-r--r--drivers/gpu/drm/msm/sde/sde_irq.h59
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.c1677
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms.h434
-rw-r--r--drivers/gpu/drm/msm/sde/sde_kms_utils.c235
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.c2929
-rw-r--r--drivers/gpu/drm/msm/sde/sde_plane.h114
-rw-r--r--drivers/gpu/drm/msm/sde/sde_recovery_manager.c399
-rw-r--r--drivers/gpu/drm/msm/sde/sde_recovery_manager.h124
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.c1763
-rw-r--r--drivers/gpu/drm/msm/sde/sde_rm.h253
-rw-r--r--drivers/gpu/drm/msm/sde/sde_shd.c1101
-rw-r--r--drivers/gpu/drm/msm/sde/sde_shd.h231
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.c1204
-rw-r--r--drivers/gpu/drm/msm/sde/sde_splash.h233
-rw-r--r--drivers/gpu/drm/msm/sde/sde_trace.h211
-rw-r--r--drivers/gpu/drm/msm/sde/sde_vbif.c284
-rw-r--r--drivers/gpu/drm/msm/sde/sde_vbif.h51
-rw-r--r--drivers/gpu/drm/msm/sde/sde_wb.c745
-rw-r--r--drivers/gpu/drm/msm/sde/sde_wb.h321
-rw-r--r--drivers/gpu/drm/msm/sde_dbg.c2323
-rw-r--r--drivers/gpu/drm/msm/sde_dbg.h341
-rw-r--r--drivers/gpu/drm/msm/sde_dbg_evtlog.c198
-rw-r--r--drivers/gpu/drm/msm/sde_edid_parser.c633
-rw-r--r--drivers/gpu/drm/msm/sde_edid_parser.h152
-rw-r--r--drivers/gpu/drm/msm/sde_hdcp.h88
-rw-r--r--drivers/gpu/drm/msm/sde_hdcp_1x.c1910
-rw-r--r--drivers/gpu/drm/msm/sde_io_util.c502
-rw-r--r--drivers/gpu/drm/msm/sde_power_handle.c925
-rw-r--r--drivers/gpu/drm/msm/sde_power_handle.h229
209 files changed, 90465 insertions, 1867 deletions
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 84d3ec98e6b9..d2240c53edd3 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -3,13 +3,18 @@ config DRM_MSM
tristate "MSM DRM"
depends on DRM
depends on ARCH_QCOM || (ARM && COMPILE_TEST)
- depends on OF && COMMON_CLK
+ depends on OF
+ depends on !MSM_GVM
select REGULATOR
select DRM_KMS_HELPER
select DRM_PANEL
select SHMEM
select TMPFS
select QCOM_SCM
+ select BACKLIGHT_CLASS_DEVICE
+ select MSM_EXT_DISPLAY
+ select MMU_NOTIFIER
+ select INTERVAL_TREE
default y
help
DRM/KMS driver for MSM/snapdragon.
@@ -23,6 +28,16 @@ config DRM_MSM_REGISTER_LOGGING
that can be parsed by envytools demsm tool. If enabled, register
logging can be switched on via msm.reglog=y module param.
+config DRM_MSM_EARLY_CARD
+ bool "Enable Early DRM in MSM DRM driver"
+ depends on DRM_MSM
+ default y
+ help
+ Choose this option if one wants to enable Early DRM driver
+ for MSM/snapdragon. Early DRM will create one DRI card to
+ support early application. One should also check device tree
+ to assign proper display resources to early DRM
+
config DRM_MSM_DSI
bool "Enable DSI support in MSM DRM driver"
depends on DRM_MSM
@@ -33,6 +48,18 @@ config DRM_MSM_DSI
Choose this option if you have a need for MIPI DSI connector
support.
+config DRM_MSM_DSI_STAGING
+ bool "Enable new DSI driver support in MSM DRM driver"
+ depends on DRM_MSM
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ default y
+ help
+ Choose this option if you need MIPI DSI connector support on MSM
+ which conforms to DRM. MIPI stands for Mobile Industry Processor
+ Interface and DSI stands for Display Serial Interface which powers
+ the primary display of your mobile device.
+
config DRM_MSM_DSI_PLL
bool "Enable DSI PLL driver in MSM DRM"
depends on DRM_MSM_DSI && COMMON_CLK
@@ -54,3 +81,51 @@ config DRM_MSM_DSI_20NM_PHY
default y
help
Choose this option if the 20nm DSI PHY is used on the platform.
+
+config DRM_MSM_MDP4
+ tristate "MSM MDP4 DRM driver"
+ depends on DRM_MSM
+ default n
+ help
+ Choose this option if MSM MDP4 revision support is needed in DRM/KMS.
+
+config DRM_MSM_HDCP
+ tristate "HDCP for MSM DRM"
+ depends on DRM_MSM
+ default n
+ help
+ Chose this option if HDCP supported is needed in DRM/KMS driver.
+
+config DRM_SDE_WB
+ bool "Enable Writeback support in SDE DRM"
+ depends on DRM_MSM
+ default y
+ help
+ Choose this option for writeback connector support.
+
+config DRM_SDE_SHD
+ bool "Enable Shared display support in SDE DRM"
+ depends on DRM_MSM
+ help
+ Choose this option for shared display support.
+ This option enables multiple logical displays
+ to share one base physical encoder/connector.
+ Each logical display will appear as different
+ connectors and report back to user.
+
+config DRM_SDE_HDMI
+ bool "Enable HDMI driver support in DRM SDE driver"
+ depends on DRM_MSM
+ default y
+ help
+ Choose this option if HDMI connector support is needed in SDE driver.
+
+config DRM_SDE_EVTLOG_DEBUG
+ bool "Enable event logging in MSM DRM"
+ depends on DRM_MSM
+ help
+ The SDE DRM debugging provides support to enable display debugging
+ features to: dump SDE registers during driver errors, panic
+ driver during fatal errors and enable some display-driver logging
+ into an internal buffer (this avoids logging overhead).
+
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index 1c90290be716..dd721cd8b0e6 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,17 +1,20 @@
-ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm
+ccflags-y := -Iinclude/drm -Idrivers/gpu/drm/msm -Idrivers/gpu/drm/msm/dsi-staging
+ccflags-y += -Idrivers/gpu/drm/msm/hdmi
+ccflags-y += -Idrivers/gpu/drm/msm/hdmi-staging
ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
+ccflags-$(CONFIG_SYNC) += -Idrivers/staging/android
+ccflags-$(CONFIG_DRM_MSM_DSI_PLL) += -Idrivers/gpu/drm/msm/dsi
+ccflags-y += -Idrivers/gpu/drm/msm/sde
+ccflags-$(CONFIG_DRM_MSM_EARLY_CARD) += -Idrivers/gpu/drm/msm/ekms
-msm-y := \
- adreno/adreno_device.o \
- adreno/adreno_gpu.o \
- adreno/a3xx_gpu.o \
- adreno/a4xx_gpu.o \
+msm_drm-y := \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
hdmi/hdmi_connector.o \
hdmi/hdmi_hdcp.o \
hdmi/hdmi_i2c.o \
+ hdmi/hdmi_util.o \
hdmi/hdmi_phy_8960.o \
hdmi/hdmi_phy_8x60.o \
hdmi/hdmi_phy_8x74.o \
@@ -23,13 +26,6 @@ msm-y := \
edp/edp_phy.o \
mdp/mdp_format.o \
mdp/mdp_kms.o \
- mdp/mdp4/mdp4_crtc.o \
- mdp/mdp4/mdp4_dtv_encoder.o \
- mdp/mdp4/mdp4_lcdc_encoder.o \
- mdp/mdp4/mdp4_lvds_connector.o \
- mdp/mdp4/mdp4_irq.o \
- mdp/mdp4/mdp4_kms.o \
- mdp/mdp4/mdp4_plane.o \
mdp/mdp5/mdp5_cfg.o \
mdp/mdp5/mdp5_ctl.o \
mdp/mdp5/mdp5_crtc.o \
@@ -38,34 +34,137 @@ msm-y := \
mdp/mdp5/mdp5_kms.o \
mdp/mdp5/mdp5_plane.o \
mdp/mdp5/mdp5_smp.o \
+ sde/sde_crtc.o \
+ sde/sde_encoder.o \
+ sde/sde_encoder_phys_vid.o \
+ sde/sde_encoder_phys_cmd.o \
+ sde/sde_irq.o \
+ sde/sde_core_irq.o \
+ sde/sde_core_perf.o \
+ sde/sde_rm.o \
+ sde/sde_kms_utils.o \
+ sde/sde_kms.o \
+ sde/sde_plane.o \
+ sde/sde_connector.o \
+ sde/sde_backlight.o \
+ sde/sde_color_processing.o \
+ sde/sde_vbif.o \
+ sde/sde_splash.o \
+ sde/sde_recovery_manager.o \
+ sde_dbg.o \
+ sde_dbg_evtlog.o \
+ sde_io_util.o \
+ dba_bridge.o \
+ sde_edid_parser.o \
+ sde_hdcp_1x.o
+
+msm_drm-$(CONFIG_DRM_MSM_EARLY_CARD) += ekms/edrm_kms.o \
+ ekms/edrm_plane.o \
+ ekms/edrm_encoder.o \
+ ekms/edrm_connector.o \
+ ekms/edrm_crtc.o \
+ ekms/edrm_drv.o \
+ ekms/edrm_splash.o
+
+# use drm gpu driver only if qcom_kgsl driver not available
+ifneq ($(CONFIG_QCOM_KGSL),y)
+msm_drm-y += adreno/adreno_device.o \
+ adreno/adreno_gpu.o \
+ adreno/a3xx_gpu.o \
+ adreno/a4xx_gpu.o \
+ adreno/a5xx_gpu.o \
+ adreno/a5xx_power.o \
+ adreno/a5xx_preempt.o \
+ adreno/a5xx_snapshot.o \
+ adreno/a5xx_counters.o
+endif
+
+msm_drm-$(CONFIG_DRM_MSM_MDP4) += mdp/mdp4/mdp4_crtc.o \
+ mdp/mdp4/mdp4_dtv_encoder.o \
+ mdp/mdp4/mdp4_lcdc_encoder.o \
+ mdp/mdp4/mdp4_lvds_connector.o \
+ mdp/mdp4/mdp4_irq.o \
+ mdp/mdp4/mdp4_kms.o \
+ mdp/mdp4/mdp4_plane.o
+
+msm_drm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
+msm_drm-$(CONFIG_SYNC) += sde/sde_fence.o
+msm_drm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
+
+msm_drm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
+ dsi/dsi_cfg.o \
+ dsi/dsi_host.o \
+ dsi/dsi_manager.o \
+ dsi/phy/dsi_phy.o \
+ dsi/dsi_manager.o \
+ mdp/mdp5/mdp5_cmd_encoder.o
+
+msm_drm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
+msm_drm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
+
+msm_drm-$(CONFIG_DRM_MSM_DSI_STAGING) += dsi-staging/dsi_phy.o \
+ dsi-staging/dsi_clk_pwr.o \
+ dsi-staging/dsi_phy.o \
+ dsi-staging/dsi_phy_hw_v4_0.o \
+ dsi-staging/dsi_ctrl_hw_1_4.o \
+ dsi-staging/dsi_ctrl.o \
+ dsi-staging/dsi_catalog.o \
+ dsi-staging/dsi_drm.o \
+ dsi-staging/dsi_display.o \
+ dsi-staging/dsi_panel.o \
+ dsi-staging/dsi_display_test.o
+
+msm_drm-$(CONFIG_DRM_SDE_HDMI) += \
+ hdmi-staging/sde_hdmi_util.o \
+ hdmi-staging/sde_hdmi.o \
+ hdmi-staging/sde_hdmi_bridge.o \
+ hdmi-staging/sde_hdmi_audio.o \
+ hdmi-staging/sde_hdmi_hdcp2p2.o \
+
+msm_drm-$(CONFIG_DRM_MSM_DSI_PLL) += dsi/pll/dsi_pll.o \
+ dsi/pll/dsi_pll_28nm.o
+
+msm_drm-$(CONFIG_DRM_MSM) += \
+ sde/sde_hw_catalog.o \
+ sde/sde_hw_cdm.o \
+ sde/sde_hw_dspp.o \
+ sde/sde_hw_intf.o \
+ sde/sde_hw_lm.o \
+ sde/sde_hw_ctl.o \
+ sde/sde_hw_util.o \
+ sde/sde_hw_sspp.o \
+ sde/sde_hw_wb.o \
+ sde/sde_hw_pingpong.o \
+ sde/sde_hw_top.o \
+ sde/sde_hw_interrupts.o \
+ sde/sde_hw_vbif.o \
+ sde/sde_formats.o \
+ sde_power_handle.o \
+ sde/sde_hw_color_processing_v1_7.o
+
+msm_drm-$(CONFIG_DRM_SDE_WB) += sde/sde_wb.o \
+ sde/sde_encoder_phys_wb.o
+
+msm_drm-$(CONFIG_DRM_SDE_SHD) += sde/sde_shd.o \
+ sde/sde_encoder_phys_shd.o
+
+msm_drm-$(CONFIG_DRM_MSM) += \
msm_atomic.o \
msm_drv.o \
msm_fb.o \
msm_gem.o \
msm_gem_prime.o \
msm_gem_submit.o \
+ msm_gem_vma.o \
msm_gpu.o \
msm_iommu.o \
+ msm_smmu.o \
msm_perf.o \
msm_rd.o \
- msm_ringbuffer.o
-
-msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
-msm-$(CONFIG_COMMON_CLK) += mdp/mdp4/mdp4_lvds_pll.o
-
-msm-$(CONFIG_DRM_MSM_DSI) += dsi/dsi.o \
- dsi/dsi_cfg.o \
- dsi/dsi_host.o \
- dsi/dsi_manager.o \
- dsi/phy/dsi_phy.o \
- mdp/mdp5/mdp5_cmd_encoder.o
-
-msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/phy/dsi_phy_28nm.o
-msm-$(CONFIG_DRM_MSM_DSI_20NM_PHY) += dsi/phy/dsi_phy_20nm.o
-
-ifeq ($(CONFIG_DRM_MSM_DSI_PLL),y)
-msm-y += dsi/pll/dsi_pll.o
-msm-$(CONFIG_DRM_MSM_DSI_28NM_PHY) += dsi/pll/dsi_pll_28nm.o
-endif
+ msm_ringbuffer.o \
+ msm_prop.o \
+ msm_snapshot.o \
+ msm_submitqueue.o \
+ msm_trace_points.o
-obj-$(CONFIG_DRM_MSM) += msm.o
+obj-$(CONFIG_DRM_MSM) += msm_drm.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 9e2aceb4ffe6..8d16b21ef8a5 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 97dc1c6ec107..d521b13bdf9f 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -111,10 +113,14 @@ enum a3xx_vtx_fmt {
VFMT_8_8_SNORM = 53,
VFMT_8_8_8_SNORM = 54,
VFMT_8_8_8_8_SNORM = 55,
- VFMT_10_10_10_2_UINT = 60,
- VFMT_10_10_10_2_UNORM = 61,
- VFMT_10_10_10_2_SINT = 62,
- VFMT_10_10_10_2_SNORM = 63,
+ VFMT_10_10_10_2_UINT = 56,
+ VFMT_10_10_10_2_UNORM = 57,
+ VFMT_10_10_10_2_SINT = 58,
+ VFMT_10_10_10_2_SNORM = 59,
+ VFMT_2_10_10_10_UINT = 60,
+ VFMT_2_10_10_10_UNORM = 61,
+ VFMT_2_10_10_10_SINT = 62,
+ VFMT_2_10_10_10_SNORM = 63,
};
enum a3xx_tex_fmt {
@@ -124,10 +130,14 @@ enum a3xx_tex_fmt {
TFMT_Z16_UNORM = 9,
TFMT_X8Z24_UNORM = 10,
TFMT_Z32_FLOAT = 11,
- TFMT_NV12_UV_TILED = 17,
- TFMT_NV12_Y_TILED = 19,
- TFMT_NV12_UV = 21,
- TFMT_NV12_Y = 23,
+ TFMT_UV_64X32 = 16,
+ TFMT_VU_64X32 = 17,
+ TFMT_Y_64X32 = 18,
+ TFMT_NV12_64X32 = 19,
+ TFMT_UV_LINEAR = 20,
+ TFMT_VU_LINEAR = 21,
+ TFMT_Y_LINEAR = 22,
+ TFMT_NV12_LINEAR = 23,
TFMT_I420_Y = 24,
TFMT_I420_U = 26,
TFMT_I420_V = 27,
@@ -138,10 +148,12 @@ enum a3xx_tex_fmt {
TFMT_DXT1 = 36,
TFMT_DXT3 = 37,
TFMT_DXT5 = 38,
+ TFMT_2_10_10_10_UNORM = 40,
TFMT_10_10_10_2_UNORM = 41,
TFMT_9_9_9_E5_FLOAT = 42,
TFMT_11_11_10_FLOAT = 43,
TFMT_A8_UNORM = 44,
+ TFMT_L8_UNORM = 45,
TFMT_L8_A8_UNORM = 47,
TFMT_8_UNORM = 48,
TFMT_8_8_UNORM = 49,
@@ -183,6 +195,8 @@ enum a3xx_tex_fmt {
TFMT_32_SINT = 92,
TFMT_32_32_SINT = 93,
TFMT_32_32_32_32_SINT = 95,
+ TFMT_2_10_10_10_UINT = 96,
+ TFMT_10_10_10_2_UINT = 97,
TFMT_ETC2_RG11_SNORM = 112,
TFMT_ETC2_RG11_UNORM = 113,
TFMT_ETC2_R11_SNORM = 114,
@@ -215,6 +229,9 @@ enum a3xx_color_fmt {
RB_R8_UINT = 14,
RB_R8_SINT = 15,
RB_R10G10B10A2_UNORM = 16,
+ RB_A2R10G10B10_UNORM = 17,
+ RB_R10G10B10A2_UINT = 18,
+ RB_A2R10G10B10_UINT = 19,
RB_A8_UNORM = 20,
RB_R8_UNORM = 21,
RB_R16_FLOAT = 24,
@@ -244,38 +261,273 @@ enum a3xx_color_fmt {
RB_R32G32B32A32_UINT = 59,
};
+enum a3xx_cp_perfcounter_select {
+ CP_ALWAYS_COUNT = 0,
+ CP_AHB_PFPTRANS_WAIT = 3,
+ CP_AHB_NRTTRANS_WAIT = 6,
+ CP_CSF_NRT_READ_WAIT = 8,
+ CP_CSF_I1_FIFO_FULL = 9,
+ CP_CSF_I2_FIFO_FULL = 10,
+ CP_CSF_ST_FIFO_FULL = 11,
+ CP_RESERVED_12 = 12,
+ CP_CSF_RING_ROQ_FULL = 13,
+ CP_CSF_I1_ROQ_FULL = 14,
+ CP_CSF_I2_ROQ_FULL = 15,
+ CP_CSF_ST_ROQ_FULL = 16,
+ CP_RESERVED_17 = 17,
+ CP_MIU_TAG_MEM_FULL = 18,
+ CP_MIU_NRT_WRITE_STALLED = 22,
+ CP_MIU_NRT_READ_STALLED = 23,
+ CP_ME_REGS_RB_DONE_FIFO_FULL = 26,
+ CP_ME_REGS_VS_EVENT_FIFO_FULL = 27,
+ CP_ME_REGS_PS_EVENT_FIFO_FULL = 28,
+ CP_ME_REGS_CF_EVENT_FIFO_FULL = 29,
+ CP_ME_MICRO_RB_STARVED = 30,
+ CP_AHB_RBBM_DWORD_SENT = 40,
+ CP_ME_BUSY_CLOCKS = 41,
+ CP_ME_WAIT_CONTEXT_AVAIL = 42,
+ CP_PFP_TYPE0_PACKET = 43,
+ CP_PFP_TYPE3_PACKET = 44,
+ CP_CSF_RB_WPTR_NEQ_RPTR = 45,
+ CP_CSF_I1_SIZE_NEQ_ZERO = 46,
+ CP_CSF_I2_SIZE_NEQ_ZERO = 47,
+ CP_CSF_RBI1I2_FETCHING = 48,
+};
+
+enum a3xx_gras_tse_perfcounter_select {
+ GRAS_TSEPERF_INPUT_PRIM = 0,
+ GRAS_TSEPERF_INPUT_NULL_PRIM = 1,
+ GRAS_TSEPERF_TRIVAL_REJ_PRIM = 2,
+ GRAS_TSEPERF_CLIPPED_PRIM = 3,
+ GRAS_TSEPERF_NEW_PRIM = 4,
+ GRAS_TSEPERF_ZERO_AREA_PRIM = 5,
+ GRAS_TSEPERF_FACENESS_CULLED_PRIM = 6,
+ GRAS_TSEPERF_ZERO_PIXEL_PRIM = 7,
+ GRAS_TSEPERF_OUTPUT_NULL_PRIM = 8,
+ GRAS_TSEPERF_OUTPUT_VISIBLE_PRIM = 9,
+ GRAS_TSEPERF_PRE_CLIP_PRIM = 10,
+ GRAS_TSEPERF_POST_CLIP_PRIM = 11,
+ GRAS_TSEPERF_WORKING_CYCLES = 12,
+ GRAS_TSEPERF_PC_STARVE = 13,
+ GRAS_TSERASPERF_STALL = 14,
+};
+
+enum a3xx_gras_ras_perfcounter_select {
+ GRAS_RASPERF_16X16_TILES = 0,
+ GRAS_RASPERF_8X8_TILES = 1,
+ GRAS_RASPERF_4X4_TILES = 2,
+ GRAS_RASPERF_WORKING_CYCLES = 3,
+ GRAS_RASPERF_STALL_CYCLES_BY_RB = 4,
+ GRAS_RASPERF_STALL_CYCLES_BY_VSC = 5,
+ GRAS_RASPERF_STARVE_CYCLES_BY_TSE = 6,
+};
+
+enum a3xx_hlsq_perfcounter_select {
+ HLSQ_PERF_SP_VS_CONSTANT = 0,
+ HLSQ_PERF_SP_VS_INSTRUCTIONS = 1,
+ HLSQ_PERF_SP_FS_CONSTANT = 2,
+ HLSQ_PERF_SP_FS_INSTRUCTIONS = 3,
+ HLSQ_PERF_TP_STATE = 4,
+ HLSQ_PERF_QUADS = 5,
+ HLSQ_PERF_PIXELS = 6,
+ HLSQ_PERF_VERTICES = 7,
+ HLSQ_PERF_FS8_THREADS = 8,
+ HLSQ_PERF_FS16_THREADS = 9,
+ HLSQ_PERF_FS32_THREADS = 10,
+ HLSQ_PERF_VS8_THREADS = 11,
+ HLSQ_PERF_VS16_THREADS = 12,
+ HLSQ_PERF_SP_VS_DATA_BYTES = 13,
+ HLSQ_PERF_SP_FS_DATA_BYTES = 14,
+ HLSQ_PERF_ACTIVE_CYCLES = 15,
+ HLSQ_PERF_STALL_CYCLES_SP_STATE = 16,
+ HLSQ_PERF_STALL_CYCLES_SP_VS = 17,
+ HLSQ_PERF_STALL_CYCLES_SP_FS = 18,
+ HLSQ_PERF_STALL_CYCLES_UCHE = 19,
+ HLSQ_PERF_RBBM_LOAD_CYCLES = 20,
+ HLSQ_PERF_DI_TO_VS_START_SP0 = 21,
+ HLSQ_PERF_DI_TO_FS_START_SP0 = 22,
+ HLSQ_PERF_VS_START_TO_DONE_SP0 = 23,
+ HLSQ_PERF_FS_START_TO_DONE_SP0 = 24,
+ HLSQ_PERF_SP_STATE_COPY_CYCLES_VS = 25,
+ HLSQ_PERF_SP_STATE_COPY_CYCLES_FS = 26,
+ HLSQ_PERF_UCHE_LATENCY_CYCLES = 27,
+ HLSQ_PERF_UCHE_LATENCY_COUNT = 28,
+};
+
+enum a3xx_pc_perfcounter_select {
+ PC_PCPERF_VISIBILITY_STREAMS = 0,
+ PC_PCPERF_TOTAL_INSTANCES = 1,
+ PC_PCPERF_PRIMITIVES_PC_VPC = 2,
+ PC_PCPERF_PRIMITIVES_KILLED_BY_VS = 3,
+ PC_PCPERF_PRIMITIVES_VISIBLE_BY_VS = 4,
+ PC_PCPERF_DRAWCALLS_KILLED_BY_VS = 5,
+ PC_PCPERF_DRAWCALLS_VISIBLE_BY_VS = 6,
+ PC_PCPERF_VERTICES_TO_VFD = 7,
+ PC_PCPERF_REUSED_VERTICES = 8,
+ PC_PCPERF_CYCLES_STALLED_BY_VFD = 9,
+ PC_PCPERF_CYCLES_STALLED_BY_TSE = 10,
+ PC_PCPERF_CYCLES_STALLED_BY_VBIF = 11,
+ PC_PCPERF_CYCLES_IS_WORKING = 12,
+};
+
+enum a3xx_rb_perfcounter_select {
+ RB_RBPERF_ACTIVE_CYCLES_ANY = 0,
+ RB_RBPERF_ACTIVE_CYCLES_ALL = 1,
+ RB_RBPERF_STARVE_CYCLES_BY_SP = 2,
+ RB_RBPERF_STARVE_CYCLES_BY_RAS = 3,
+ RB_RBPERF_STARVE_CYCLES_BY_MARB = 4,
+ RB_RBPERF_STALL_CYCLES_BY_MARB = 5,
+ RB_RBPERF_STALL_CYCLES_BY_HLSQ = 6,
+ RB_RBPERF_RB_MARB_DATA = 7,
+ RB_RBPERF_SP_RB_QUAD = 8,
+ RB_RBPERF_RAS_EARLY_Z_QUADS = 9,
+ RB_RBPERF_GMEM_CH0_READ = 10,
+ RB_RBPERF_GMEM_CH1_READ = 11,
+ RB_RBPERF_GMEM_CH0_WRITE = 12,
+ RB_RBPERF_GMEM_CH1_WRITE = 13,
+ RB_RBPERF_CP_CONTEXT_DONE = 14,
+ RB_RBPERF_CP_CACHE_FLUSH = 15,
+ RB_RBPERF_CP_ZPASS_DONE = 16,
+};
+
+enum a3xx_rbbm_perfcounter_select {
+ RBBM_ALAWYS_ON = 0,
+ RBBM_VBIF_BUSY = 1,
+ RBBM_TSE_BUSY = 2,
+ RBBM_RAS_BUSY = 3,
+ RBBM_PC_DCALL_BUSY = 4,
+ RBBM_PC_VSD_BUSY = 5,
+ RBBM_VFD_BUSY = 6,
+ RBBM_VPC_BUSY = 7,
+ RBBM_UCHE_BUSY = 8,
+ RBBM_VSC_BUSY = 9,
+ RBBM_HLSQ_BUSY = 10,
+ RBBM_ANY_RB_BUSY = 11,
+ RBBM_ANY_TEX_BUSY = 12,
+ RBBM_ANY_USP_BUSY = 13,
+ RBBM_ANY_MARB_BUSY = 14,
+ RBBM_ANY_ARB_BUSY = 15,
+ RBBM_AHB_STATUS_BUSY = 16,
+ RBBM_AHB_STATUS_STALLED = 17,
+ RBBM_AHB_STATUS_TXFR = 18,
+ RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+ RBBM_AHB_STATUS_TXFR_ERROR = 20,
+ RBBM_AHB_STATUS_LONG_STALL = 21,
+ RBBM_RBBM_STATUS_MASKED = 22,
+};
+
enum a3xx_sp_perfcounter_select {
+ SP_LM_LOAD_INSTRUCTIONS = 0,
+ SP_LM_STORE_INSTRUCTIONS = 1,
+ SP_LM_ATOMICS = 2,
+ SP_UCHE_LOAD_INSTRUCTIONS = 3,
+ SP_UCHE_STORE_INSTRUCTIONS = 4,
+ SP_UCHE_ATOMICS = 5,
+ SP_VS_TEX_INSTRUCTIONS = 6,
+ SP_VS_CFLOW_INSTRUCTIONS = 7,
+ SP_VS_EFU_INSTRUCTIONS = 8,
+ SP_VS_FULL_ALU_INSTRUCTIONS = 9,
+ SP_VS_HALF_ALU_INSTRUCTIONS = 10,
+ SP_FS_TEX_INSTRUCTIONS = 11,
SP_FS_CFLOW_INSTRUCTIONS = 12,
+ SP_FS_EFU_INSTRUCTIONS = 13,
SP_FS_FULL_ALU_INSTRUCTIONS = 14,
- SP0_ICL1_MISSES = 26,
+ SP_FS_HALF_ALU_INSTRUCTIONS = 15,
+ SP_FS_BARY_INSTRUCTIONS = 16,
+ SP_VS_INSTRUCTIONS = 17,
+ SP_FS_INSTRUCTIONS = 18,
+ SP_ADDR_LOCK_COUNT = 19,
+ SP_UCHE_READ_TRANS = 20,
+ SP_UCHE_WRITE_TRANS = 21,
+ SP_EXPORT_VPC_TRANS = 22,
+ SP_EXPORT_RB_TRANS = 23,
+ SP_PIXELS_KILLED = 24,
+ SP_ICL1_REQUESTS = 25,
+ SP_ICL1_MISSES = 26,
+ SP_ICL0_REQUESTS = 27,
+ SP_ICL0_MISSES = 28,
SP_ALU_ACTIVE_CYCLES = 29,
+ SP_EFU_ACTIVE_CYCLES = 30,
+ SP_STALL_CYCLES_BY_VPC = 31,
+ SP_STALL_CYCLES_BY_TP = 32,
+ SP_STALL_CYCLES_BY_UCHE = 33,
+ SP_STALL_CYCLES_BY_RB = 34,
+ SP_ACTIVE_CYCLES_ANY = 35,
+ SP_ACTIVE_CYCLES_ALL = 36,
+};
+
+enum a3xx_tp_perfcounter_select {
+ TPL1_TPPERF_L1_REQUESTS = 0,
+ TPL1_TPPERF_TP0_L1_REQUESTS = 1,
+ TPL1_TPPERF_TP0_L1_MISSES = 2,
+ TPL1_TPPERF_TP1_L1_REQUESTS = 3,
+ TPL1_TPPERF_TP1_L1_MISSES = 4,
+ TPL1_TPPERF_TP2_L1_REQUESTS = 5,
+ TPL1_TPPERF_TP2_L1_MISSES = 6,
+ TPL1_TPPERF_TP3_L1_REQUESTS = 7,
+ TPL1_TPPERF_TP3_L1_MISSES = 8,
+ TPL1_TPPERF_OUTPUT_TEXELS_POINT = 9,
+ TPL1_TPPERF_OUTPUT_TEXELS_BILINEAR = 10,
+ TPL1_TPPERF_OUTPUT_TEXELS_MIP = 11,
+ TPL1_TPPERF_OUTPUT_TEXELS_ANISO = 12,
+ TPL1_TPPERF_BILINEAR_OPS = 13,
+ TPL1_TPPERF_QUADSQUADS_OFFSET = 14,
+ TPL1_TPPERF_QUADQUADS_SHADOW = 15,
+ TPL1_TPPERF_QUADS_ARRAY = 16,
+ TPL1_TPPERF_QUADS_PROJECTION = 17,
+ TPL1_TPPERF_QUADS_GRADIENT = 18,
+ TPL1_TPPERF_QUADS_1D2D = 19,
+ TPL1_TPPERF_QUADS_3DCUBE = 20,
+ TPL1_TPPERF_ZERO_LOD = 21,
+ TPL1_TPPERF_OUTPUT_TEXELS = 22,
+ TPL1_TPPERF_ACTIVE_CYCLES_ANY = 23,
+ TPL1_TPPERF_ACTIVE_CYCLES_ALL = 24,
+ TPL1_TPPERF_STALL_CYCLES_BY_ARB = 25,
+ TPL1_TPPERF_LATENCY = 26,
+ TPL1_TPPERF_LATENCY_TRANS = 27,
+};
+
+enum a3xx_vfd_perfcounter_select {
+ VFD_PERF_UCHE_BYTE_FETCHED = 0,
+ VFD_PERF_UCHE_TRANS = 1,
+ VFD_PERF_VPC_BYPASS_COMPONENTS = 2,
+ VFD_PERF_FETCH_INSTRUCTIONS = 3,
+ VFD_PERF_DECODE_INSTRUCTIONS = 4,
+ VFD_PERF_ACTIVE_CYCLES = 5,
+ VFD_PERF_STALL_CYCLES_UCHE = 6,
+ VFD_PERF_STALL_CYCLES_HLSQ = 7,
+ VFD_PERF_STALL_CYCLES_VPC_BYPASS = 8,
+ VFD_PERF_STALL_CYCLES_VPC_ALLOC = 9,
};
-enum a3xx_rop_code {
- ROP_CLEAR = 0,
- ROP_NOR = 1,
- ROP_AND_INVERTED = 2,
- ROP_COPY_INVERTED = 3,
- ROP_AND_REVERSE = 4,
- ROP_INVERT = 5,
- ROP_XOR = 6,
- ROP_NAND = 7,
- ROP_AND = 8,
- ROP_EQUIV = 9,
- ROP_NOOP = 10,
- ROP_OR_INVERTED = 11,
- ROP_COPY = 12,
- ROP_OR_REVERSE = 13,
- ROP_OR = 14,
- ROP_SET = 15,
+enum a3xx_vpc_perfcounter_select {
+ VPC_PERF_SP_LM_PRIMITIVES = 0,
+ VPC_PERF_COMPONENTS_FROM_SP = 1,
+ VPC_PERF_SP_LM_COMPONENTS = 2,
+ VPC_PERF_ACTIVE_CYCLES = 3,
+ VPC_PERF_STALL_CYCLES_LM = 4,
+ VPC_PERF_STALL_CYCLES_RAS = 5,
};
-enum a3xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_DST_MINUS_SRC = 2,
- BLEND_MIN_DST_SRC = 3,
- BLEND_MAX_DST_SRC = 4,
+enum a3xx_uche_perfcounter_select {
+ UCHE_UCHEPERF_VBIF_READ_BEATS_TP = 0,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_VFD = 1,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_HLSQ = 2,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_MARB = 3,
+ UCHE_UCHEPERF_VBIF_READ_BEATS_SP = 4,
+ UCHE_UCHEPERF_READ_REQUESTS_TP = 8,
+ UCHE_UCHEPERF_READ_REQUESTS_VFD = 9,
+ UCHE_UCHEPERF_READ_REQUESTS_HLSQ = 10,
+ UCHE_UCHEPERF_READ_REQUESTS_MARB = 11,
+ UCHE_UCHEPERF_READ_REQUESTS_SP = 12,
+ UCHE_UCHEPERF_WRITE_REQUESTS_MARB = 13,
+ UCHE_UCHEPERF_WRITE_REQUESTS_SP = 14,
+ UCHE_UCHEPERF_TAG_CHECK_FAILS = 15,
+ UCHE_UCHEPERF_EVICTS = 16,
+ UCHE_UCHEPERF_FLUSHES = 17,
+ UCHE_UCHEPERF_VBIF_LATENCY_CYCLES = 18,
+ UCHE_UCHEPERF_VBIF_LATENCY_SAMPLES = 19,
+ UCHE_UCHEPERF_ACTIVE_CYCLES = 20,
};
enum a3xx_intp_mode {
@@ -1138,13 +1390,14 @@ static inline uint32_t A3XX_RB_COPY_CONTROL_MODE(enum adreno_rb_copy_control_mod
{
return ((val) << A3XX_RB_COPY_CONTROL_MODE__SHIFT) & A3XX_RB_COPY_CONTROL_MODE__MASK;
}
+#define A3XX_RB_COPY_CONTROL_MSAA_SRGB_DOWNSAMPLE 0x00000080
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK 0x00000f00
#define A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT 8
static inline uint32_t A3XX_RB_COPY_CONTROL_FASTCLEAR(uint32_t val)
{
return ((val) << A3XX_RB_COPY_CONTROL_FASTCLEAR__SHIFT) & A3XX_RB_COPY_CONTROL_FASTCLEAR__MASK;
}
-#define A3XX_RB_COPY_CONTROL_UNK12 0x00001000
+#define A3XX_RB_COPY_CONTROL_DEPTH32_RESOLVE 0x00001000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__MASK 0xffffc000
#define A3XX_RB_COPY_CONTROL_GMEM_BASE__SHIFT 14
static inline uint32_t A3XX_RB_COPY_CONTROL_GMEM_BASE(uint32_t val)
@@ -1217,7 +1470,7 @@ static inline uint32_t A3XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
{
return ((val) << A3XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A3XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
}
-#define A3XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A3XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
#define A3XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
#define REG_A3XX_RB_DEPTH_CLEAR 0x00002101
@@ -1429,15 +1682,23 @@ static inline uint32_t A3XX_PC_PRIM_VTX_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_
#define REG_A3XX_PC_RESTART_INDEX 0x000021ed
#define REG_A3XX_HLSQ_CONTROL_0_REG 0x00002200
-#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
+#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000030
#define A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE(enum a3xx_threadsize val)
{
return ((val) << A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK;
}
#define A3XX_HLSQ_CONTROL_0_REG_FSSUPERTHREADENABLE 0x00000040
+#define A3XX_HLSQ_CONTROL_0_REG_COMPUTEMODE 0x00000100
#define A3XX_HLSQ_CONTROL_0_REG_SPSHADERRESTART 0x00000200
#define A3XX_HLSQ_CONTROL_0_REG_RESERVED2 0x00000400
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK 0x00fff000
+#define A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT 12
+static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__SHIFT) & A3XX_HLSQ_CONTROL_0_REG_CYCLETIMEOUTLIMITVPC__MASK;
+}
+#define A3XX_HLSQ_CONTROL_0_REG_FSONLYTEX 0x02000000
#define A3XX_HLSQ_CONTROL_0_REG_CHUNKDISABLE 0x04000000
#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__MASK 0x08000000
#define A3XX_HLSQ_CONTROL_0_REG_CONSTMODE__SHIFT 27
@@ -1451,17 +1712,39 @@ static inline uint32_t A3XX_HLSQ_CONTROL_0_REG_CONSTMODE(uint32_t val)
#define A3XX_HLSQ_CONTROL_0_REG_SINGLECONTEXT 0x80000000
#define REG_A3XX_HLSQ_CONTROL_1_REG 0x00002201
-#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x00000040
+#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK 0x000000c0
#define A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT 6
static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE(enum a3xx_threadsize val)
{
return ((val) << A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_VSTHREADSIZE__MASK;
}
#define A3XX_HLSQ_CONTROL_1_REG_VSSUPERTHREADENABLE 0x00000100
-#define A3XX_HLSQ_CONTROL_1_REG_RESERVED1 0x00000200
-#define A3XX_HLSQ_CONTROL_1_REG_ZWCOORD 0x02000000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK 0x00ff0000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT 16
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDXYREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK 0xff000000
+#define A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT 24
+static inline uint32_t A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__SHIFT) & A3XX_HLSQ_CONTROL_1_REG_FRAGCOORDZWREGID__MASK;
+}
#define REG_A3XX_HLSQ_CONTROL_2_REG 0x00002202
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK 0x000003fc
+#define A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT 2
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_FACENESSREGID__MASK;
+}
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK 0x03fc0000
+#define A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT 18
+static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID(uint32_t val)
+{
+ return ((val) << A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__SHIFT) & A3XX_HLSQ_CONTROL_2_REG_COVVALUEREGID__MASK;
+}
#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__MASK 0xfc000000
#define A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD__SHIFT 26
static inline uint32_t A3XX_HLSQ_CONTROL_2_REG_PRIMALLOCTHRESHOLD(uint32_t val)
@@ -1478,13 +1761,13 @@ static inline uint32_t A3XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
}
#define REG_A3XX_HLSQ_VS_CONTROL_REG 0x00002204
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff
#define A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT 0
static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
+#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000
#define A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
{
@@ -1498,13 +1781,13 @@ static inline uint32_t A3XX_HLSQ_VS_CONTROL_REG_INSTRLENGTH(uint32_t val)
}
#define REG_A3XX_HLSQ_FS_CONTROL_REG 0x00002205
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x00000fff
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK 0x000003ff
#define A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT 0
static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A3XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x00fff000
+#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__MASK 0x001ff000
#define A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET__SHIFT 12
static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_CONSTSTARTOFFSET(uint32_t val)
{
@@ -1518,13 +1801,13 @@ static inline uint32_t A3XX_HLSQ_FS_CONTROL_REG_INSTRLENGTH(uint32_t val)
}
#define REG_A3XX_HLSQ_CONST_VSPRESV_RANGE_REG 0x00002206
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff
#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
{
return ((val) << A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_STARTENTRY__MASK;
}
-#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
+#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000
#define A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
{
@@ -1532,13 +1815,13 @@ static inline uint32_t A3XX_HLSQ_CONST_VSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
}
#define REG_A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x00002207
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x0000ffff
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK 0x000001ff
#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT 0
static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY(uint32_t val)
{
return ((val) << A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__SHIFT) & A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_STARTENTRY__MASK;
}
-#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0xffff0000
+#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__MASK 0x01ff0000
#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY__SHIFT 16
static inline uint32_t A3XX_HLSQ_CONST_FSPRESV_RANGE_REG_ENDENTRY(uint32_t val)
{
@@ -1620,12 +1903,24 @@ static inline uint32_t A3XX_VFD_CONTROL_0_STRMFETCHINSTRCNT(uint32_t val)
}
#define REG_A3XX_VFD_CONTROL_1 0x00002241
-#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000ffff
+#define A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK 0x0000000f
#define A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT 0
static inline uint32_t A3XX_VFD_CONTROL_1_MAXSTORAGE(uint32_t val)
{
return ((val) << A3XX_VFD_CONTROL_1_MAXSTORAGE__SHIFT) & A3XX_VFD_CONTROL_1_MAXSTORAGE__MASK;
}
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK 0x000000f0
+#define A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT 4
+static inline uint32_t A3XX_VFD_CONTROL_1_MAXTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MAXTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MAXTHRESHOLD__MASK;
+}
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK 0x00000f00
+#define A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT 8
+static inline uint32_t A3XX_VFD_CONTROL_1_MINTHRESHOLD(uint32_t val)
+{
+ return ((val) << A3XX_VFD_CONTROL_1_MINTHRESHOLD__SHIFT) & A3XX_VFD_CONTROL_1_MINTHRESHOLD__MASK;
+}
#define A3XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
#define A3XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
static inline uint32_t A3XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
@@ -2008,24 +2303,19 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffe
return ((val) << A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_VS_CTRL_REG0_INSTRBUFFERMODE__MASK;
}
#define A3XX_SP_VS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_VS_CTRL_REG0_ALUSCHMODE 0x00000008
#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
#define A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
static inline uint32_t A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
}
-#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
-#define A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
-static inline uint32_t A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
-{
- return ((val) << A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_VS_CTRL_REG0_INOUTREGOVERLAP__MASK;
-}
#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
#define A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
@@ -2033,8 +2323,6 @@ static inline uint32_t A3XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
return ((val) << A3XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A3XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
}
#define A3XX_SP_VS_CTRL_REG0_SUPERTHREADMODE 0x00200000
-#define A3XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00400000
-#define A3XX_SP_VS_CTRL_REG0_COMPUTEMODE 0x00800000
#define A3XX_SP_VS_CTRL_REG0_LENGTH__MASK 0xff000000
#define A3XX_SP_VS_CTRL_REG0_LENGTH__SHIFT 24
static inline uint32_t A3XX_SP_VS_CTRL_REG0_LENGTH(uint32_t val)
@@ -2075,7 +2363,8 @@ static inline uint32_t A3XX_SP_VS_PARAM_REG_PSIZEREGID(uint32_t val)
{
return ((val) << A3XX_SP_VS_PARAM_REG_PSIZEREGID__SHIFT) & A3XX_SP_VS_PARAM_REG_PSIZEREGID__MASK;
}
-#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0xfff00000
+#define A3XX_SP_VS_PARAM_REG_POS2DMODE 0x00010000
+#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__MASK 0x01f00000
#define A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR__SHIFT 20
static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
{
@@ -2085,24 +2374,26 @@ static inline uint32_t A3XX_SP_VS_PARAM_REG_TOTALVSOUTVAR(uint32_t val)
static inline uint32_t REG_A3XX_SP_VS_OUT(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
static inline uint32_t REG_A3XX_SP_VS_OUT_REG(uint32_t i0) { return 0x000022c7 + 0x1*i0; }
-#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000001ff
+#define A3XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
#define A3XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
static inline uint32_t A3XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
{
return ((val) << A3XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_A_REGID__MASK;
}
+#define A3XX_SP_VS_OUT_REG_A_HALF 0x00000100
#define A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00001e00
#define A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 9
static inline uint32_t A3XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
{
return ((val) << A3XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A3XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
}
-#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x01ff0000
+#define A3XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
#define A3XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
static inline uint32_t A3XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
{
return ((val) << A3XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A3XX_SP_VS_OUT_REG_B_REGID__MASK;
}
+#define A3XX_SP_VS_OUT_REG_B_HALF 0x01000000
#define A3XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x1e000000
#define A3XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 25
static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
@@ -2113,25 +2404,25 @@ static inline uint32_t A3XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
static inline uint32_t REG_A3XX_SP_VS_VPC_DST(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
static inline uint32_t REG_A3XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x000022d0 + 0x1*i0; }
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x0000007f
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
{
return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
}
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x00007f00
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
{
return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
}
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x007f0000
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
{
return ((val) << A3XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A3XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
}
-#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0x7f000000
#define A3XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
{
@@ -2139,6 +2430,12 @@ static inline uint32_t A3XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
}
#define REG_A3XX_SP_VS_OBJ_OFFSET_REG 0x000022d4
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff
+#define A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0
+static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_VS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
#define A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
@@ -2155,8 +2452,38 @@ static inline uint32_t A3XX_SP_VS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_VS_OBJ_START_REG 0x000022d5
#define REG_A3XX_SP_VS_PVT_MEM_PARAM_REG 0x000022d6
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_VS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
#define REG_A3XX_SP_VS_PVT_MEM_ADDR_REG 0x000022d7
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+ return ((val) << A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0
+#define A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
+static inline uint32_t A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+ return ((val >> 5) << A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_VS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
#define REG_A3XX_SP_VS_PVT_MEM_SIZE_REG 0x000022d8
@@ -2182,24 +2509,22 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE(enum a3xx_instrbuffe
return ((val) << A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__SHIFT) & A3XX_SP_FS_CTRL_REG0_INSTRBUFFERMODE__MASK;
}
#define A3XX_SP_FS_CTRL_REG0_CACHEINVALID 0x00000004
+#define A3XX_SP_FS_CTRL_REG0_ALUSCHMODE 0x00000008
#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
#define A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
static inline uint32_t A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
return ((val) << A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A3XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
}
-#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK 0x000c0000
-#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT 18
-static inline uint32_t A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP(uint32_t val)
-{
- return ((val) << A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__SHIFT) & A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP__MASK;
-}
+#define A3XX_SP_FS_CTRL_REG0_FSBYPASSENABLE 0x00020000
+#define A3XX_SP_FS_CTRL_REG0_INOUTREGOVERLAP 0x00040000
+#define A3XX_SP_FS_CTRL_REG0_OUTORDERED 0x00080000
#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
#define A3XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
static inline uint32_t A3XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
@@ -2235,7 +2560,7 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING(uint32_t val)
{
return ((val) << A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__SHIFT) & A3XX_SP_FS_CTRL_REG1_INITIALOUTSTANDING__MASK;
}
-#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x3f000000
+#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__MASK 0x7f000000
#define A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET__SHIFT 24
static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
{
@@ -2243,6 +2568,12 @@ static inline uint32_t A3XX_SP_FS_CTRL_REG1_HALFPRECVAROFFSET(uint32_t val)
}
#define REG_A3XX_SP_FS_OBJ_OFFSET_REG 0x000022e2
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK 0x0000ffff
+#define A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT 0
+static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__SHIFT) & A3XX_SP_FS_OBJ_OFFSET_REG_FIRSTEXECINSTROFFSET__MASK;
+}
#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__MASK 0x01ff0000
#define A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET__SHIFT 16
static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_CONSTOBJECTOFFSET(uint32_t val)
@@ -2259,8 +2590,38 @@ static inline uint32_t A3XX_SP_FS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A3XX_SP_FS_OBJ_START_REG 0x000022e3
#define REG_A3XX_SP_FS_PVT_MEM_PARAM_REG 0x000022e4
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK 0x000000ff
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT 0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_MEMSIZEPERITEM__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK 0x00ffff00
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT 8
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKOFFSET__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK 0xff000000
+#define A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT 24
+static inline uint32_t A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__SHIFT) & A3XX_SP_FS_PVT_MEM_PARAM_REG_HWSTACKSIZEPERTHREAD__MASK;
+}
#define REG_A3XX_SP_FS_PVT_MEM_ADDR_REG 0x000022e5
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK 0x0000001f
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT 0
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN(uint32_t val)
+{
+ return ((val) << A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_BURSTLEN__MASK;
+}
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK 0xffffffe0
+#define A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT 5
+static inline uint32_t A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS(uint32_t val)
+{
+ return ((val >> 5) << A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__SHIFT) & A3XX_SP_FS_PVT_MEM_ADDR_REG_SHADERSTARTADDRESS__MASK;
+}
#define REG_A3XX_SP_FS_PVT_MEM_SIZE_REG 0x000022e6
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 25a0e7d13340..3d09b084a844 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -40,10 +40,11 @@
extern bool hang_debug;
static void a3xx_dump(struct msm_gpu *gpu);
+static bool a3xx_idle(struct msm_gpu *gpu);
-static void a3xx_me_init(struct msm_gpu *gpu)
+static bool a3xx_me_init(struct msm_gpu *gpu)
{
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
@@ -64,8 +65,8 @@ static void a3xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu);
- gpu->funcs->idle(gpu);
+ gpu->funcs->flush(gpu, ring);
+ return a3xx_idle(gpu);
}
static int a3xx_hw_init(struct msm_gpu *gpu)
@@ -294,9 +295,7 @@ static int a3xx_hw_init(struct msm_gpu *gpu)
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_AXXX_CP_ME_CNTL, 0);
- a3xx_me_init(gpu);
-
- return 0;
+ return a3xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a3xx_recover(struct msm_gpu *gpu)
@@ -330,17 +329,22 @@ static void a3xx_destroy(struct msm_gpu *gpu)
kfree(a3xx_gpu);
}
-static void a3xx_idle(struct msm_gpu *gpu)
+static bool a3xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- adreno_idle(gpu);
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A3XX_RBBM_STATUS) &
- A3XX_RBBM_STATUS_GPU_BUSY)))
+ A3XX_RBBM_STATUS_GPU_BUSY))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
- /* TODO maybe we need to reset GPU here to recover from hang? */
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
+
+ return true;
}
static irqreturn_t a3xx_irq(struct msm_gpu *gpu)
@@ -400,10 +404,8 @@ static const unsigned int a3xx_registers[] = {
#ifdef CONFIG_DEBUG_FS
static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
- gpu->funcs->pm_resume(gpu);
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
- gpu->funcs->pm_suspend(gpu);
adreno_show(gpu, m);
}
#endif
@@ -417,91 +419,13 @@ static void a3xx_dump(struct msm_gpu *gpu)
}
/* Register offset defines for A3XX */
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_AXXX_CP_DEBUG),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_AXXX_CP_ME_RAM_WADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_AXXX_CP_ME_RAM_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_A3XX_CP_PFP_UCODE_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_A3XX_CP_PFP_UCODE_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A3XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_AXXX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_AXXX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_AXXX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A3XX_CP_PROTECT_CTRL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_AXXX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_AXXX_CP_RB_CNTL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_AXXX_CP_IB1_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_AXXX_CP_IB1_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_AXXX_CP_IB2_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_AXXX_CP_IB2_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_AXXX_CP_ME_RAM_RADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_AXXX_SCRATCH_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_AXXX_SCRATCH_UMSK),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A3XX_CP_ROQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A3XX_CP_ROQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A3XX_CP_MERCIU_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A3XX_CP_MERCIU_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A3XX_CP_MERCIU_DATA2),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A3XX_CP_MEQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A3XX_CP_MEQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A3XX_CP_HW_FAULT),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
- REG_A3XX_CP_PROTECT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A3XX_RBBM_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_A3XX_RBBM_PERFCTR_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_A3XX_RBBM_PERFCTR_LOAD_CMD0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_A3XX_RBBM_PERFCTR_LOAD_CMD1),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_A3XX_RBBM_PERFCTR_PWR_1_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A3XX_RBBM_INT_0_MASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
- REG_A3XX_RBBM_INT_0_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_A3XX_RBBM_AHB_ERROR_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A3XX_RBBM_AHB_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_A3XX_RBBM_INT_CLEAR_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A3XX_RBBM_CLOCK_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_A3XX_VPC_VPC_DEBUG_RAM_SEL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_A3XX_VPC_VPC_DEBUG_RAM_READ),
- REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_A3XX_VSC_SIZE_ADDRESS),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A3XX_VFD_CONTROL_0),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_INDEX_MAX, REG_A3XX_VFD_INDEX_MAX),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_A3XX_SP_VS_PVT_MEM_ADDR_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_A3XX_SP_FS_PVT_MEM_ADDR_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_A3XX_SP_VS_OBJ_START_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_A3XX_SP_FS_OBJ_START_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_PA_SC_AA_CONFIG, REG_A3XX_PA_SC_AA_CONFIG),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PM_OVERRIDE2,
- REG_A3XX_RBBM_PM_OVERRIDE2),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_REG2, REG_AXXX_CP_SCRATCH_REG2),
- REG_ADRENO_DEFINE(REG_ADRENO_SQ_GPR_MANAGEMENT,
- REG_A3XX_SQ_GPR_MANAGEMENT),
- REG_ADRENO_DEFINE(REG_ADRENO_SQ_INST_STORE_MANAGMENT,
- REG_A3XX_SQ_INST_STORE_MANAGMENT),
- REG_ADRENO_DEFINE(REG_ADRENO_TP0_CHICKEN, REG_A3XX_TP0_CHICKEN),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A3XX_RBBM_RBBM_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_A3XX_RBBM_SW_RESET_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
- REG_A3XX_UCHE_CACHE_INVALIDATE0_REG),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
- REG_A3XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static const struct adreno_gpu_funcs funcs = {
@@ -511,10 +435,10 @@ static const struct adreno_gpu_funcs funcs = {
.pm_suspend = msm_gpu_pm_suspend,
.pm_resume = msm_gpu_pm_resume,
.recover = a3xx_recover,
- .last_fence = adreno_last_fence,
+ .submitted_fence = adreno_submitted_fence,
.submit = adreno_submit,
.flush = adreno_flush,
- .idle = a3xx_idle,
+ .active_ring = adreno_active_ring,
.irq = a3xx_irq,
.destroy = a3xx_destroy,
#ifdef CONFIG_DEBUG_FS
@@ -537,6 +461,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct msm_gpu_config a3xx_config = { 0 };
int ret;
if (!pdev) {
@@ -562,7 +487,13 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a3xx_registers;
adreno_gpu->reg_offsets = a3xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ a3xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
+ a3xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
+ a3xx_config.nr_rings = 1;
+ a3xx_config.va_start = 0x300000;
+ a3xx_config.va_end = 0xffffffff;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a3xx_config);
if (ret)
goto fail;
@@ -581,7 +512,7 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev)
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 99de8271dba8..1004d4ecf8fe 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -45,13 +47,18 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
enum a4xx_color_fmt {
RB4_A8_UNORM = 1,
RB4_R8_UNORM = 2,
+ RB4_R8_SNORM = 3,
+ RB4_R8_UINT = 4,
+ RB4_R8_SINT = 5,
RB4_R4G4B4A4_UNORM = 8,
RB4_R5G5B5A1_UNORM = 10,
- RB4_R5G6R5_UNORM = 14,
+ RB4_R5G6B5_UNORM = 14,
RB4_R8G8_UNORM = 15,
RB4_R8G8_SNORM = 16,
RB4_R8G8_UINT = 17,
RB4_R8G8_SINT = 18,
+ RB4_R16_UNORM = 19,
+ RB4_R16_SNORM = 20,
RB4_R16_FLOAT = 21,
RB4_R16_UINT = 22,
RB4_R16_SINT = 23,
@@ -63,12 +70,16 @@ enum a4xx_color_fmt {
RB4_R10G10B10A2_UNORM = 31,
RB4_R10G10B10A2_UINT = 34,
RB4_R11G11B10_FLOAT = 39,
+ RB4_R16G16_UNORM = 40,
+ RB4_R16G16_SNORM = 41,
RB4_R16G16_FLOAT = 42,
RB4_R16G16_UINT = 43,
RB4_R16G16_SINT = 44,
RB4_R32_FLOAT = 45,
RB4_R32_UINT = 46,
RB4_R32_SINT = 47,
+ RB4_R16G16B16A16_UNORM = 52,
+ RB4_R16G16B16A16_SNORM = 53,
RB4_R16G16B16A16_FLOAT = 54,
RB4_R16G16B16A16_UINT = 55,
RB4_R16G16B16A16_SINT = 56,
@@ -82,17 +93,10 @@ enum a4xx_color_fmt {
enum a4xx_tile_mode {
TILE4_LINEAR = 0,
+ TILE4_2 = 2,
TILE4_3 = 3,
};
-enum a4xx_rb_blend_opcode {
- BLEND_DST_PLUS_SRC = 0,
- BLEND_SRC_MINUS_DST = 1,
- BLEND_DST_MINUS_SRC = 2,
- BLEND_MIN_DST_SRC = 3,
- BLEND_MAX_DST_SRC = 4,
-};
-
enum a4xx_vtx_fmt {
VFMT4_32_FLOAT = 1,
VFMT4_32_32_FLOAT = 2,
@@ -106,6 +110,7 @@ enum a4xx_vtx_fmt {
VFMT4_32_32_FIXED = 10,
VFMT4_32_32_32_FIXED = 11,
VFMT4_32_32_32_32_FIXED = 12,
+ VFMT4_11_11_10_FLOAT = 13,
VFMT4_16_SINT = 16,
VFMT4_16_16_SINT = 17,
VFMT4_16_16_16_SINT = 18,
@@ -146,52 +151,76 @@ enum a4xx_vtx_fmt {
VFMT4_8_8_SNORM = 53,
VFMT4_8_8_8_SNORM = 54,
VFMT4_8_8_8_8_SNORM = 55,
- VFMT4_10_10_10_2_UINT = 60,
- VFMT4_10_10_10_2_UNORM = 61,
- VFMT4_10_10_10_2_SINT = 62,
- VFMT4_10_10_10_2_SNORM = 63,
+ VFMT4_10_10_10_2_UINT = 56,
+ VFMT4_10_10_10_2_UNORM = 57,
+ VFMT4_10_10_10_2_SINT = 58,
+ VFMT4_10_10_10_2_SNORM = 59,
+ VFMT4_2_10_10_10_UINT = 60,
+ VFMT4_2_10_10_10_UNORM = 61,
+ VFMT4_2_10_10_10_SINT = 62,
+ VFMT4_2_10_10_10_SNORM = 63,
};
enum a4xx_tex_fmt {
- TFMT4_5_6_5_UNORM = 11,
- TFMT4_5_5_5_1_UNORM = 10,
- TFMT4_4_4_4_4_UNORM = 8,
- TFMT4_X8Z24_UNORM = 71,
- TFMT4_10_10_10_2_UNORM = 33,
TFMT4_A8_UNORM = 3,
- TFMT4_L8_A8_UNORM = 13,
TFMT4_8_UNORM = 4,
- TFMT4_8_8_UNORM = 14,
- TFMT4_8_8_8_8_UNORM = 28,
TFMT4_8_SNORM = 5,
- TFMT4_8_8_SNORM = 15,
- TFMT4_8_8_8_8_SNORM = 29,
TFMT4_8_UINT = 6,
- TFMT4_8_8_UINT = 16,
- TFMT4_8_8_8_8_UINT = 30,
TFMT4_8_SINT = 7,
+ TFMT4_4_4_4_4_UNORM = 8,
+ TFMT4_5_5_5_1_UNORM = 9,
+ TFMT4_5_6_5_UNORM = 11,
+ TFMT4_L8_A8_UNORM = 13,
+ TFMT4_8_8_UNORM = 14,
+ TFMT4_8_8_SNORM = 15,
+ TFMT4_8_8_UINT = 16,
TFMT4_8_8_SINT = 17,
- TFMT4_8_8_8_8_SINT = 31,
+ TFMT4_16_UNORM = 18,
+ TFMT4_16_SNORM = 19,
+ TFMT4_16_FLOAT = 20,
TFMT4_16_UINT = 21,
- TFMT4_16_16_UINT = 41,
- TFMT4_16_16_16_16_UINT = 54,
TFMT4_16_SINT = 22,
+ TFMT4_8_8_8_8_UNORM = 28,
+ TFMT4_8_8_8_8_SNORM = 29,
+ TFMT4_8_8_8_8_UINT = 30,
+ TFMT4_8_8_8_8_SINT = 31,
+ TFMT4_9_9_9_E5_FLOAT = 32,
+ TFMT4_10_10_10_2_UNORM = 33,
+ TFMT4_10_10_10_2_UINT = 34,
+ TFMT4_11_11_10_FLOAT = 37,
+ TFMT4_16_16_UNORM = 38,
+ TFMT4_16_16_SNORM = 39,
+ TFMT4_16_16_FLOAT = 40,
+ TFMT4_16_16_UINT = 41,
TFMT4_16_16_SINT = 42,
- TFMT4_16_16_16_16_SINT = 55,
+ TFMT4_32_FLOAT = 43,
TFMT4_32_UINT = 44,
- TFMT4_32_32_UINT = 57,
- TFMT4_32_32_32_32_UINT = 64,
TFMT4_32_SINT = 45,
- TFMT4_32_32_SINT = 58,
- TFMT4_32_32_32_32_SINT = 65,
- TFMT4_16_FLOAT = 20,
- TFMT4_16_16_FLOAT = 40,
+ TFMT4_16_16_16_16_UNORM = 51,
+ TFMT4_16_16_16_16_SNORM = 52,
TFMT4_16_16_16_16_FLOAT = 53,
- TFMT4_32_FLOAT = 43,
+ TFMT4_16_16_16_16_UINT = 54,
+ TFMT4_16_16_16_16_SINT = 55,
TFMT4_32_32_FLOAT = 56,
+ TFMT4_32_32_UINT = 57,
+ TFMT4_32_32_SINT = 58,
+ TFMT4_32_32_32_FLOAT = 59,
+ TFMT4_32_32_32_UINT = 60,
+ TFMT4_32_32_32_SINT = 61,
TFMT4_32_32_32_32_FLOAT = 63,
- TFMT4_9_9_9_E5_FLOAT = 32,
- TFMT4_11_11_10_FLOAT = 37,
+ TFMT4_32_32_32_32_UINT = 64,
+ TFMT4_32_32_32_32_SINT = 65,
+ TFMT4_X8Z24_UNORM = 71,
+ TFMT4_DXT1 = 86,
+ TFMT4_DXT3 = 87,
+ TFMT4_DXT5 = 88,
+ TFMT4_RGTC1_UNORM = 90,
+ TFMT4_RGTC1_SNORM = 91,
+ TFMT4_RGTC2_UNORM = 94,
+ TFMT4_RGTC2_SNORM = 95,
+ TFMT4_BPTC_UFLOAT = 97,
+ TFMT4_BPTC_FLOAT = 98,
+ TFMT4_BPTC = 99,
TFMT4_ATC_RGB = 100,
TFMT4_ATC_RGBA_EXPLICIT = 101,
TFMT4_ATC_RGBA_INTERPOLATED = 102,
@@ -240,6 +269,545 @@ enum a4xx_tess_spacing {
EVEN_SPACING = 3,
};
+enum a4xx_ccu_perfcounter_select {
+ CCU_BUSY_CYCLES = 0,
+ CCU_RB_DEPTH_RETURN_STALL = 2,
+ CCU_RB_COLOR_RETURN_STALL = 3,
+ CCU_DEPTH_BLOCKS = 6,
+ CCU_COLOR_BLOCKS = 7,
+ CCU_DEPTH_BLOCK_HIT = 8,
+ CCU_COLOR_BLOCK_HIT = 9,
+ CCU_DEPTH_FLAG1_COUNT = 10,
+ CCU_DEPTH_FLAG2_COUNT = 11,
+ CCU_DEPTH_FLAG3_COUNT = 12,
+ CCU_DEPTH_FLAG4_COUNT = 13,
+ CCU_COLOR_FLAG1_COUNT = 14,
+ CCU_COLOR_FLAG2_COUNT = 15,
+ CCU_COLOR_FLAG3_COUNT = 16,
+ CCU_COLOR_FLAG4_COUNT = 17,
+ CCU_PARTIAL_BLOCK_READ = 18,
+};
+
+enum a4xx_cp_perfcounter_select {
+ CP_ALWAYS_COUNT = 0,
+ CP_BUSY = 1,
+ CP_PFP_IDLE = 2,
+ CP_PFP_BUSY_WORKING = 3,
+ CP_PFP_STALL_CYCLES_ANY = 4,
+ CP_PFP_STARVE_CYCLES_ANY = 5,
+ CP_PFP_STARVED_PER_LOAD_ADDR = 6,
+ CP_PFP_STALLED_PER_STORE_ADDR = 7,
+ CP_PFP_PC_PROFILE = 8,
+ CP_PFP_MATCH_PM4_PKT_PROFILE = 9,
+ CP_PFP_COND_INDIRECT_DISCARDED = 10,
+ CP_LONG_RESUMPTIONS = 11,
+ CP_RESUME_CYCLES = 12,
+ CP_RESUME_TO_BOUNDARY_CYCLES = 13,
+ CP_LONG_PREEMPTIONS = 14,
+ CP_PREEMPT_CYCLES = 15,
+ CP_PREEMPT_TO_BOUNDARY_CYCLES = 16,
+ CP_ME_FIFO_EMPTY_PFP_IDLE = 17,
+ CP_ME_FIFO_EMPTY_PFP_BUSY = 18,
+ CP_ME_FIFO_NOT_EMPTY_NOT_FULL = 19,
+ CP_ME_FIFO_FULL_ME_BUSY = 20,
+ CP_ME_FIFO_FULL_ME_NON_WORKING = 21,
+ CP_ME_WAITING_FOR_PACKETS = 22,
+ CP_ME_BUSY_WORKING = 23,
+ CP_ME_STARVE_CYCLES_ANY = 24,
+ CP_ME_STARVE_CYCLES_PER_PROFILE = 25,
+ CP_ME_STALL_CYCLES_PER_PROFILE = 26,
+ CP_ME_PC_PROFILE = 27,
+ CP_RCIU_FIFO_EMPTY = 28,
+ CP_RCIU_FIFO_NOT_EMPTY_NOT_FULL = 29,
+ CP_RCIU_FIFO_FULL = 30,
+ CP_RCIU_FIFO_FULL_NO_CONTEXT = 31,
+ CP_RCIU_FIFO_FULL_AHB_MASTER = 32,
+ CP_RCIU_FIFO_FULL_OTHER = 33,
+ CP_AHB_IDLE = 34,
+ CP_AHB_STALL_ON_GRANT_NO_SPLIT = 35,
+ CP_AHB_STALL_ON_GRANT_SPLIT = 36,
+ CP_AHB_STALL_ON_GRANT_SPLIT_PROFILE = 37,
+ CP_AHB_BUSY_WORKING = 38,
+ CP_AHB_BUSY_STALL_ON_HRDY = 39,
+ CP_AHB_BUSY_STALL_ON_HRDY_PROFILE = 40,
+};
+
+enum a4xx_gras_ras_perfcounter_select {
+ RAS_SUPER_TILES = 0,
+ RAS_8X8_TILES = 1,
+ RAS_4X4_TILES = 2,
+ RAS_BUSY_CYCLES = 3,
+ RAS_STALL_CYCLES_BY_RB = 4,
+ RAS_STALL_CYCLES_BY_VSC = 5,
+ RAS_STARVE_CYCLES_BY_TSE = 6,
+ RAS_SUPERTILE_CYCLES = 7,
+ RAS_TILE_CYCLES = 8,
+ RAS_FULLY_COVERED_SUPER_TILES = 9,
+ RAS_FULLY_COVERED_8X8_TILES = 10,
+ RAS_4X4_PRIM = 11,
+ RAS_8X4_4X8_PRIM = 12,
+ RAS_8X8_PRIM = 13,
+};
+
+enum a4xx_gras_tse_perfcounter_select {
+ TSE_INPUT_PRIM = 0,
+ TSE_INPUT_NULL_PRIM = 1,
+ TSE_TRIVAL_REJ_PRIM = 2,
+ TSE_CLIPPED_PRIM = 3,
+ TSE_NEW_PRIM = 4,
+ TSE_ZERO_AREA_PRIM = 5,
+ TSE_FACENESS_CULLED_PRIM = 6,
+ TSE_ZERO_PIXEL_PRIM = 7,
+ TSE_OUTPUT_NULL_PRIM = 8,
+ TSE_OUTPUT_VISIBLE_PRIM = 9,
+ TSE_PRE_CLIP_PRIM = 10,
+ TSE_POST_CLIP_PRIM = 11,
+ TSE_BUSY_CYCLES = 12,
+ TSE_PC_STARVE = 13,
+ TSE_RAS_STALL = 14,
+ TSE_STALL_BARYPLANE_FIFO_FULL = 15,
+ TSE_STALL_ZPLANE_FIFO_FULL = 16,
+};
+
+enum a4xx_hlsq_perfcounter_select {
+ HLSQ_SP_VS_STAGE_CONSTANT = 0,
+ HLSQ_SP_VS_STAGE_INSTRUCTIONS = 1,
+ HLSQ_SP_FS_STAGE_CONSTANT = 2,
+ HLSQ_SP_FS_STAGE_INSTRUCTIONS = 3,
+ HLSQ_TP_STATE = 4,
+ HLSQ_QUADS = 5,
+ HLSQ_PIXELS = 6,
+ HLSQ_VERTICES = 7,
+ HLSQ_SP_VS_STAGE_DATA_BYTES = 13,
+ HLSQ_SP_FS_STAGE_DATA_BYTES = 14,
+ HLSQ_BUSY_CYCLES = 15,
+ HLSQ_STALL_CYCLES_SP_STATE = 16,
+ HLSQ_STALL_CYCLES_SP_VS_STAGE = 17,
+ HLSQ_STALL_CYCLES_SP_FS_STAGE = 18,
+ HLSQ_STALL_CYCLES_UCHE = 19,
+ HLSQ_RBBM_LOAD_CYCLES = 20,
+ HLSQ_DI_TO_VS_START_SP = 21,
+ HLSQ_DI_TO_FS_START_SP = 22,
+ HLSQ_VS_STAGE_START_TO_DONE_SP = 23,
+ HLSQ_FS_STAGE_START_TO_DONE_SP = 24,
+ HLSQ_SP_STATE_COPY_CYCLES_VS_STAGE = 25,
+ HLSQ_SP_STATE_COPY_CYCLES_FS_STAGE = 26,
+ HLSQ_UCHE_LATENCY_CYCLES = 27,
+ HLSQ_UCHE_LATENCY_COUNT = 28,
+ HLSQ_STARVE_CYCLES_VFD = 29,
+};
+
+enum a4xx_pc_perfcounter_select {
+ PC_VIS_STREAMS_LOADED = 0,
+ PC_VPC_PRIMITIVES = 2,
+ PC_DEAD_PRIM = 3,
+ PC_LIVE_PRIM = 4,
+ PC_DEAD_DRAWCALLS = 5,
+ PC_LIVE_DRAWCALLS = 6,
+ PC_VERTEX_MISSES = 7,
+ PC_STALL_CYCLES_VFD = 9,
+ PC_STALL_CYCLES_TSE = 10,
+ PC_STALL_CYCLES_UCHE = 11,
+ PC_WORKING_CYCLES = 12,
+ PC_IA_VERTICES = 13,
+ PC_GS_PRIMITIVES = 14,
+ PC_HS_INVOCATIONS = 15,
+ PC_DS_INVOCATIONS = 16,
+ PC_DS_PRIMITIVES = 17,
+ PC_STARVE_CYCLES_FOR_INDEX = 20,
+ PC_STARVE_CYCLES_FOR_TESS_FACTOR = 21,
+ PC_STARVE_CYCLES_FOR_VIZ_STREAM = 22,
+ PC_STALL_CYCLES_TESS = 23,
+ PC_STARVE_CYCLES_FOR_POSITION = 24,
+ PC_MODE0_DRAWCALL = 25,
+ PC_MODE1_DRAWCALL = 26,
+ PC_MODE2_DRAWCALL = 27,
+ PC_MODE3_DRAWCALL = 28,
+ PC_MODE4_DRAWCALL = 29,
+ PC_PREDICATED_DEAD_DRAWCALL = 30,
+ PC_STALL_CYCLES_BY_TSE_ONLY = 31,
+ PC_STALL_CYCLES_BY_VPC_ONLY = 32,
+ PC_VPC_POS_DATA_TRANSACTION = 33,
+ PC_BUSY_CYCLES = 34,
+ PC_STARVE_CYCLES_DI = 35,
+ PC_STALL_CYCLES_VPC = 36,
+ TESS_WORKING_CYCLES = 37,
+ TESS_NUM_CYCLES_SETUP_WORKING = 38,
+ TESS_NUM_CYCLES_PTGEN_WORKING = 39,
+ TESS_NUM_CYCLES_CONNGEN_WORKING = 40,
+ TESS_BUSY_CYCLES = 41,
+ TESS_STARVE_CYCLES_PC = 42,
+ TESS_STALL_CYCLES_PC = 43,
+};
+
+enum a4xx_pwr_perfcounter_select {
+ PWR_CORE_CLOCK_CYCLES = 0,
+ PWR_BUSY_CLOCK_CYCLES = 1,
+};
+
+enum a4xx_rb_perfcounter_select {
+ RB_BUSY_CYCLES = 0,
+ RB_BUSY_CYCLES_BINNING = 1,
+ RB_BUSY_CYCLES_RENDERING = 2,
+ RB_BUSY_CYCLES_RESOLVE = 3,
+ RB_STARVE_CYCLES_BY_SP = 4,
+ RB_STARVE_CYCLES_BY_RAS = 5,
+ RB_STARVE_CYCLES_BY_MARB = 6,
+ RB_STALL_CYCLES_BY_MARB = 7,
+ RB_STALL_CYCLES_BY_HLSQ = 8,
+ RB_RB_RB_MARB_DATA = 9,
+ RB_SP_RB_QUAD = 10,
+ RB_RAS_RB_Z_QUADS = 11,
+ RB_GMEM_CH0_READ = 12,
+ RB_GMEM_CH1_READ = 13,
+ RB_GMEM_CH0_WRITE = 14,
+ RB_GMEM_CH1_WRITE = 15,
+ RB_CP_CONTEXT_DONE = 16,
+ RB_CP_CACHE_FLUSH = 17,
+ RB_CP_ZPASS_DONE = 18,
+ RB_STALL_FIFO0_FULL = 19,
+ RB_STALL_FIFO1_FULL = 20,
+ RB_STALL_FIFO2_FULL = 21,
+ RB_STALL_FIFO3_FULL = 22,
+ RB_RB_HLSQ_TRANSACTIONS = 23,
+ RB_Z_READ = 24,
+ RB_Z_WRITE = 25,
+ RB_C_READ = 26,
+ RB_C_WRITE = 27,
+ RB_C_READ_LATENCY = 28,
+ RB_Z_READ_LATENCY = 29,
+ RB_STALL_BY_UCHE = 30,
+ RB_MARB_UCHE_TRANSACTIONS = 31,
+ RB_CACHE_STALL_MISS = 32,
+ RB_CACHE_STALL_FIFO_FULL = 33,
+ RB_8BIT_BLENDER_UNITS_ACTIVE = 34,
+ RB_16BIT_BLENDER_UNITS_ACTIVE = 35,
+ RB_SAMPLER_UNITS_ACTIVE = 36,
+ RB_TOTAL_PASS = 38,
+ RB_Z_PASS = 39,
+ RB_Z_FAIL = 40,
+ RB_S_FAIL = 41,
+ RB_POWER0 = 42,
+ RB_POWER1 = 43,
+ RB_POWER2 = 44,
+ RB_POWER3 = 45,
+ RB_POWER4 = 46,
+ RB_POWER5 = 47,
+ RB_POWER6 = 48,
+ RB_POWER7 = 49,
+};
+
+enum a4xx_rbbm_perfcounter_select {
+ RBBM_ALWAYS_ON = 0,
+ RBBM_VBIF_BUSY = 1,
+ RBBM_TSE_BUSY = 2,
+ RBBM_RAS_BUSY = 3,
+ RBBM_PC_DCALL_BUSY = 4,
+ RBBM_PC_VSD_BUSY = 5,
+ RBBM_VFD_BUSY = 6,
+ RBBM_VPC_BUSY = 7,
+ RBBM_UCHE_BUSY = 8,
+ RBBM_VSC_BUSY = 9,
+ RBBM_HLSQ_BUSY = 10,
+ RBBM_ANY_RB_BUSY = 11,
+ RBBM_ANY_TPL1_BUSY = 12,
+ RBBM_ANY_SP_BUSY = 13,
+ RBBM_ANY_MARB_BUSY = 14,
+ RBBM_ANY_ARB_BUSY = 15,
+ RBBM_AHB_STATUS_BUSY = 16,
+ RBBM_AHB_STATUS_STALLED = 17,
+ RBBM_AHB_STATUS_TXFR = 18,
+ RBBM_AHB_STATUS_TXFR_SPLIT = 19,
+ RBBM_AHB_STATUS_TXFR_ERROR = 20,
+ RBBM_AHB_STATUS_LONG_STALL = 21,
+ RBBM_STATUS_MASKED = 22,
+ RBBM_CP_BUSY_GFX_CORE_IDLE = 23,
+ RBBM_TESS_BUSY = 24,
+ RBBM_COM_BUSY = 25,
+ RBBM_DCOM_BUSY = 32,
+ RBBM_ANY_CCU_BUSY = 33,
+ RBBM_DPM_BUSY = 34,
+};
+
+enum a4xx_sp_perfcounter_select {
+ SP_LM_LOAD_INSTRUCTIONS = 0,
+ SP_LM_STORE_INSTRUCTIONS = 1,
+ SP_LM_ATOMICS = 2,
+ SP_GM_LOAD_INSTRUCTIONS = 3,
+ SP_GM_STORE_INSTRUCTIONS = 4,
+ SP_GM_ATOMICS = 5,
+ SP_VS_STAGE_TEX_INSTRUCTIONS = 6,
+ SP_VS_STAGE_CFLOW_INSTRUCTIONS = 7,
+ SP_VS_STAGE_EFU_INSTRUCTIONS = 8,
+ SP_VS_STAGE_FULL_ALU_INSTRUCTIONS = 9,
+ SP_VS_STAGE_HALF_ALU_INSTRUCTIONS = 10,
+ SP_FS_STAGE_TEX_INSTRUCTIONS = 11,
+ SP_FS_STAGE_CFLOW_INSTRUCTIONS = 12,
+ SP_FS_STAGE_EFU_INSTRUCTIONS = 13,
+ SP_FS_STAGE_FULL_ALU_INSTRUCTIONS = 14,
+ SP_FS_STAGE_HALF_ALU_INSTRUCTIONS = 15,
+ SP_VS_INSTRUCTIONS = 17,
+ SP_FS_INSTRUCTIONS = 18,
+ SP_ADDR_LOCK_COUNT = 19,
+ SP_UCHE_READ_TRANS = 20,
+ SP_UCHE_WRITE_TRANS = 21,
+ SP_EXPORT_VPC_TRANS = 22,
+ SP_EXPORT_RB_TRANS = 23,
+ SP_PIXELS_KILLED = 24,
+ SP_ICL1_REQUESTS = 25,
+ SP_ICL1_MISSES = 26,
+ SP_ICL0_REQUESTS = 27,
+ SP_ICL0_MISSES = 28,
+ SP_ALU_WORKING_CYCLES = 29,
+ SP_EFU_WORKING_CYCLES = 30,
+ SP_STALL_CYCLES_BY_VPC = 31,
+ SP_STALL_CYCLES_BY_TP = 32,
+ SP_STALL_CYCLES_BY_UCHE = 33,
+ SP_STALL_CYCLES_BY_RB = 34,
+ SP_BUSY_CYCLES = 35,
+ SP_HS_INSTRUCTIONS = 36,
+ SP_DS_INSTRUCTIONS = 37,
+ SP_GS_INSTRUCTIONS = 38,
+ SP_CS_INSTRUCTIONS = 39,
+ SP_SCHEDULER_NON_WORKING = 40,
+ SP_WAVE_CONTEXTS = 41,
+ SP_WAVE_CONTEXT_CYCLES = 42,
+ SP_POWER0 = 43,
+ SP_POWER1 = 44,
+ SP_POWER2 = 45,
+ SP_POWER3 = 46,
+ SP_POWER4 = 47,
+ SP_POWER5 = 48,
+ SP_POWER6 = 49,
+ SP_POWER7 = 50,
+ SP_POWER8 = 51,
+ SP_POWER9 = 52,
+ SP_POWER10 = 53,
+ SP_POWER11 = 54,
+ SP_POWER12 = 55,
+ SP_POWER13 = 56,
+ SP_POWER14 = 57,
+ SP_POWER15 = 58,
+};
+
+enum a4xx_tp_perfcounter_select {
+ TP_L1_REQUESTS = 0,
+ TP_L1_MISSES = 1,
+ TP_QUADS_OFFSET = 8,
+ TP_QUAD_SHADOW = 9,
+ TP_QUADS_ARRAY = 10,
+ TP_QUADS_GRADIENT = 11,
+ TP_QUADS_1D2D = 12,
+ TP_QUADS_3DCUBE = 13,
+ TP_BUSY_CYCLES = 16,
+ TP_STALL_CYCLES_BY_ARB = 17,
+ TP_STATE_CACHE_REQUESTS = 20,
+ TP_STATE_CACHE_MISSES = 21,
+ TP_POWER0 = 22,
+ TP_POWER1 = 23,
+ TP_POWER2 = 24,
+ TP_POWER3 = 25,
+ TP_POWER4 = 26,
+ TP_POWER5 = 27,
+ TP_POWER6 = 28,
+ TP_POWER7 = 29,
+};
+
+enum a4xx_uche_perfcounter_select {
+ UCHE_VBIF_READ_BEATS_TP = 0,
+ UCHE_VBIF_READ_BEATS_VFD = 1,
+ UCHE_VBIF_READ_BEATS_HLSQ = 2,
+ UCHE_VBIF_READ_BEATS_MARB = 3,
+ UCHE_VBIF_READ_BEATS_SP = 4,
+ UCHE_READ_REQUESTS_TP = 5,
+ UCHE_READ_REQUESTS_VFD = 6,
+ UCHE_READ_REQUESTS_HLSQ = 7,
+ UCHE_READ_REQUESTS_MARB = 8,
+ UCHE_READ_REQUESTS_SP = 9,
+ UCHE_WRITE_REQUESTS_MARB = 10,
+ UCHE_WRITE_REQUESTS_SP = 11,
+ UCHE_TAG_CHECK_FAILS = 12,
+ UCHE_EVICTS = 13,
+ UCHE_FLUSHES = 14,
+ UCHE_VBIF_LATENCY_CYCLES = 15,
+ UCHE_VBIF_LATENCY_SAMPLES = 16,
+ UCHE_BUSY_CYCLES = 17,
+ UCHE_VBIF_READ_BEATS_PC = 18,
+ UCHE_READ_REQUESTS_PC = 19,
+ UCHE_WRITE_REQUESTS_VPC = 20,
+ UCHE_STALL_BY_VBIF = 21,
+ UCHE_WRITE_REQUESTS_VSC = 22,
+ UCHE_POWER0 = 23,
+ UCHE_POWER1 = 24,
+ UCHE_POWER2 = 25,
+ UCHE_POWER3 = 26,
+ UCHE_POWER4 = 27,
+ UCHE_POWER5 = 28,
+ UCHE_POWER6 = 29,
+ UCHE_POWER7 = 30,
+};
+
+enum a4xx_vbif_perfcounter_select {
+ AXI_READ_REQUESTS_ID_0 = 0,
+ AXI_READ_REQUESTS_ID_1 = 1,
+ AXI_READ_REQUESTS_ID_2 = 2,
+ AXI_READ_REQUESTS_ID_3 = 3,
+ AXI_READ_REQUESTS_ID_4 = 4,
+ AXI_READ_REQUESTS_ID_5 = 5,
+ AXI_READ_REQUESTS_ID_6 = 6,
+ AXI_READ_REQUESTS_ID_7 = 7,
+ AXI_READ_REQUESTS_ID_8 = 8,
+ AXI_READ_REQUESTS_ID_9 = 9,
+ AXI_READ_REQUESTS_ID_10 = 10,
+ AXI_READ_REQUESTS_ID_11 = 11,
+ AXI_READ_REQUESTS_ID_12 = 12,
+ AXI_READ_REQUESTS_ID_13 = 13,
+ AXI_READ_REQUESTS_ID_14 = 14,
+ AXI_READ_REQUESTS_ID_15 = 15,
+ AXI0_READ_REQUESTS_TOTAL = 16,
+ AXI1_READ_REQUESTS_TOTAL = 17,
+ AXI2_READ_REQUESTS_TOTAL = 18,
+ AXI3_READ_REQUESTS_TOTAL = 19,
+ AXI_READ_REQUESTS_TOTAL = 20,
+ AXI_WRITE_REQUESTS_ID_0 = 21,
+ AXI_WRITE_REQUESTS_ID_1 = 22,
+ AXI_WRITE_REQUESTS_ID_2 = 23,
+ AXI_WRITE_REQUESTS_ID_3 = 24,
+ AXI_WRITE_REQUESTS_ID_4 = 25,
+ AXI_WRITE_REQUESTS_ID_5 = 26,
+ AXI_WRITE_REQUESTS_ID_6 = 27,
+ AXI_WRITE_REQUESTS_ID_7 = 28,
+ AXI_WRITE_REQUESTS_ID_8 = 29,
+ AXI_WRITE_REQUESTS_ID_9 = 30,
+ AXI_WRITE_REQUESTS_ID_10 = 31,
+ AXI_WRITE_REQUESTS_ID_11 = 32,
+ AXI_WRITE_REQUESTS_ID_12 = 33,
+ AXI_WRITE_REQUESTS_ID_13 = 34,
+ AXI_WRITE_REQUESTS_ID_14 = 35,
+ AXI_WRITE_REQUESTS_ID_15 = 36,
+ AXI0_WRITE_REQUESTS_TOTAL = 37,
+ AXI1_WRITE_REQUESTS_TOTAL = 38,
+ AXI2_WRITE_REQUESTS_TOTAL = 39,
+ AXI3_WRITE_REQUESTS_TOTAL = 40,
+ AXI_WRITE_REQUESTS_TOTAL = 41,
+ AXI_TOTAL_REQUESTS = 42,
+ AXI_READ_DATA_BEATS_ID_0 = 43,
+ AXI_READ_DATA_BEATS_ID_1 = 44,
+ AXI_READ_DATA_BEATS_ID_2 = 45,
+ AXI_READ_DATA_BEATS_ID_3 = 46,
+ AXI_READ_DATA_BEATS_ID_4 = 47,
+ AXI_READ_DATA_BEATS_ID_5 = 48,
+ AXI_READ_DATA_BEATS_ID_6 = 49,
+ AXI_READ_DATA_BEATS_ID_7 = 50,
+ AXI_READ_DATA_BEATS_ID_8 = 51,
+ AXI_READ_DATA_BEATS_ID_9 = 52,
+ AXI_READ_DATA_BEATS_ID_10 = 53,
+ AXI_READ_DATA_BEATS_ID_11 = 54,
+ AXI_READ_DATA_BEATS_ID_12 = 55,
+ AXI_READ_DATA_BEATS_ID_13 = 56,
+ AXI_READ_DATA_BEATS_ID_14 = 57,
+ AXI_READ_DATA_BEATS_ID_15 = 58,
+ AXI0_READ_DATA_BEATS_TOTAL = 59,
+ AXI1_READ_DATA_BEATS_TOTAL = 60,
+ AXI2_READ_DATA_BEATS_TOTAL = 61,
+ AXI3_READ_DATA_BEATS_TOTAL = 62,
+ AXI_READ_DATA_BEATS_TOTAL = 63,
+ AXI_WRITE_DATA_BEATS_ID_0 = 64,
+ AXI_WRITE_DATA_BEATS_ID_1 = 65,
+ AXI_WRITE_DATA_BEATS_ID_2 = 66,
+ AXI_WRITE_DATA_BEATS_ID_3 = 67,
+ AXI_WRITE_DATA_BEATS_ID_4 = 68,
+ AXI_WRITE_DATA_BEATS_ID_5 = 69,
+ AXI_WRITE_DATA_BEATS_ID_6 = 70,
+ AXI_WRITE_DATA_BEATS_ID_7 = 71,
+ AXI_WRITE_DATA_BEATS_ID_8 = 72,
+ AXI_WRITE_DATA_BEATS_ID_9 = 73,
+ AXI_WRITE_DATA_BEATS_ID_10 = 74,
+ AXI_WRITE_DATA_BEATS_ID_11 = 75,
+ AXI_WRITE_DATA_BEATS_ID_12 = 76,
+ AXI_WRITE_DATA_BEATS_ID_13 = 77,
+ AXI_WRITE_DATA_BEATS_ID_14 = 78,
+ AXI_WRITE_DATA_BEATS_ID_15 = 79,
+ AXI0_WRITE_DATA_BEATS_TOTAL = 80,
+ AXI1_WRITE_DATA_BEATS_TOTAL = 81,
+ AXI2_WRITE_DATA_BEATS_TOTAL = 82,
+ AXI3_WRITE_DATA_BEATS_TOTAL = 83,
+ AXI_WRITE_DATA_BEATS_TOTAL = 84,
+ AXI_DATA_BEATS_TOTAL = 85,
+ CYCLES_HELD_OFF_ID_0 = 86,
+ CYCLES_HELD_OFF_ID_1 = 87,
+ CYCLES_HELD_OFF_ID_2 = 88,
+ CYCLES_HELD_OFF_ID_3 = 89,
+ CYCLES_HELD_OFF_ID_4 = 90,
+ CYCLES_HELD_OFF_ID_5 = 91,
+ CYCLES_HELD_OFF_ID_6 = 92,
+ CYCLES_HELD_OFF_ID_7 = 93,
+ CYCLES_HELD_OFF_ID_8 = 94,
+ CYCLES_HELD_OFF_ID_9 = 95,
+ CYCLES_HELD_OFF_ID_10 = 96,
+ CYCLES_HELD_OFF_ID_11 = 97,
+ CYCLES_HELD_OFF_ID_12 = 98,
+ CYCLES_HELD_OFF_ID_13 = 99,
+ CYCLES_HELD_OFF_ID_14 = 100,
+ CYCLES_HELD_OFF_ID_15 = 101,
+ AXI_READ_REQUEST_HELD_OFF = 102,
+ AXI_WRITE_REQUEST_HELD_OFF = 103,
+ AXI_REQUEST_HELD_OFF = 104,
+ AXI_WRITE_DATA_HELD_OFF = 105,
+ OCMEM_AXI_READ_REQUEST_HELD_OFF = 106,
+ OCMEM_AXI_WRITE_REQUEST_HELD_OFF = 107,
+ OCMEM_AXI_REQUEST_HELD_OFF = 108,
+ OCMEM_AXI_WRITE_DATA_HELD_OFF = 109,
+ ELAPSED_CYCLES_DDR = 110,
+ ELAPSED_CYCLES_OCMEM = 111,
+};
+
+enum a4xx_vfd_perfcounter_select {
+ VFD_UCHE_BYTE_FETCHED = 0,
+ VFD_UCHE_TRANS = 1,
+ VFD_FETCH_INSTRUCTIONS = 3,
+ VFD_BUSY_CYCLES = 5,
+ VFD_STALL_CYCLES_UCHE = 6,
+ VFD_STALL_CYCLES_HLSQ = 7,
+ VFD_STALL_CYCLES_VPC_BYPASS = 8,
+ VFD_STALL_CYCLES_VPC_ALLOC = 9,
+ VFD_MODE_0_FIBERS = 13,
+ VFD_MODE_1_FIBERS = 14,
+ VFD_MODE_2_FIBERS = 15,
+ VFD_MODE_3_FIBERS = 16,
+ VFD_MODE_4_FIBERS = 17,
+ VFD_BFIFO_STALL = 18,
+ VFD_NUM_VERTICES_TOTAL = 19,
+ VFD_PACKER_FULL = 20,
+ VFD_UCHE_REQUEST_FIFO_FULL = 21,
+ VFD_STARVE_CYCLES_PC = 22,
+ VFD_STARVE_CYCLES_UCHE = 23,
+};
+
+enum a4xx_vpc_perfcounter_select {
+ VPC_SP_LM_COMPONENTS = 2,
+ VPC_SP0_LM_BYTES = 3,
+ VPC_SP1_LM_BYTES = 4,
+ VPC_SP2_LM_BYTES = 5,
+ VPC_SP3_LM_BYTES = 6,
+ VPC_WORKING_CYCLES = 7,
+ VPC_STALL_CYCLES_LM = 8,
+ VPC_STARVE_CYCLES_RAS = 9,
+ VPC_STREAMOUT_CYCLES = 10,
+ VPC_UCHE_TRANSACTIONS = 12,
+ VPC_STALL_CYCLES_UCHE = 13,
+ VPC_BUSY_CYCLES = 14,
+ VPC_STARVE_CYCLES_SP = 15,
+};
+
+enum a4xx_vsc_perfcounter_select {
+ VSC_BUSY_CYCLES = 0,
+ VSC_WORKING_CYCLES = 1,
+ VSC_STALL_CYCLES_UCHE = 2,
+ VSC_STARVE_CYCLES_RAS = 3,
+ VSC_EOT_NUM = 4,
+};
+
enum a4xx_tex_filter {
A4XX_TEX_NEAREST = 0,
A4XX_TEX_LINEAR = 1,
@@ -326,6 +894,12 @@ static inline uint32_t A4XX_CGC_HLSQ_EARLY_CYC(uint32_t val)
#define REG_A4XX_RB_PERFCTR_RB_SEL_7 0x00000cce
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_0 0x00000ccf
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_1 0x00000cd0
+
+#define REG_A4XX_RB_PERFCTR_CCU_SEL_2 0x00000cd1
+
#define REG_A4XX_RB_PERFCTR_CCU_SEL_3 0x00000cd2
#define REG_A4XX_RB_FRAME_BUFFER_DIMENSION 0x00000ce0
@@ -363,6 +937,7 @@ static inline uint32_t A4XX_RB_MODE_CONTROL_HEIGHT(uint32_t val)
{
return ((val >> 5) << A4XX_RB_MODE_CONTROL_HEIGHT__SHIFT) & A4XX_RB_MODE_CONTROL_HEIGHT__MASK;
}
+#define A4XX_RB_MODE_CONTROL_ENABLE_GMEM 0x00010000
#define REG_A4XX_RB_RENDER_CONTROL 0x000020a1
#define A4XX_RB_RENDER_CONTROL_BINNING_PASS 0x00000001
@@ -400,8 +975,13 @@ static inline uint32_t REG_A4XX_RB_MRT_CONTROL(uint32_t i0) { return 0x000020a4
#define A4XX_RB_MRT_CONTROL_READ_DEST_ENABLE 0x00000008
#define A4XX_RB_MRT_CONTROL_BLEND 0x00000010
#define A4XX_RB_MRT_CONTROL_BLEND2 0x00000020
-#define A4XX_RB_MRT_CONTROL_FASTCLEAR 0x00000400
-#define A4XX_RB_MRT_CONTROL_B11 0x00000800
+#define A4XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000040
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000f00
+#define A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 8
+static inline uint32_t A4XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A4XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A4XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x0f000000
#define A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 24
static inline uint32_t A4XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
@@ -461,7 +1041,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_b
}
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
#define A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
}
@@ -479,7 +1059,7 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb
}
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
#define A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
-static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a4xx_rb_blend_opcode val)
+static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
{
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
}
@@ -490,13 +1070,19 @@ static inline uint32_t A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_r
return ((val) << A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A4XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
}
-#define REG_A4XX_RB_BLEND_RED 0x000020f3
-#define A4XX_RB_BLEND_RED_UINT__MASK 0x00007fff
+#define REG_A4XX_RB_BLEND_RED 0x000020f0
+#define A4XX_RB_BLEND_RED_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_RED_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_RED_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_RED_UINT__SHIFT) & A4XX_RB_BLEND_RED_UINT__MASK;
}
+#define A4XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_RED_SINT__SHIFT) & A4XX_RB_BLEND_RED_SINT__MASK;
+}
#define A4XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_RED_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
@@ -504,13 +1090,27 @@ static inline uint32_t A4XX_RB_BLEND_RED_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_RED_FLOAT__SHIFT) & A4XX_RB_BLEND_RED_FLOAT__MASK;
}
-#define REG_A4XX_RB_BLEND_GREEN 0x000020f4
-#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x00007fff
+#define REG_A4XX_RB_BLEND_RED_F32 0x000020f1
+#define A4XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_RED_F32__SHIFT) & A4XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_GREEN 0x000020f2
+#define A4XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_GREEN_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_GREEN_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_GREEN_UINT__SHIFT) & A4XX_RB_BLEND_GREEN_UINT__MASK;
}
+#define A4XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_GREEN_SINT__SHIFT) & A4XX_RB_BLEND_GREEN_SINT__MASK;
+}
#define A4XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
@@ -518,13 +1118,27 @@ static inline uint32_t A4XX_RB_BLEND_GREEN_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A4XX_RB_BLEND_GREEN_FLOAT__MASK;
}
-#define REG_A4XX_RB_BLEND_BLUE 0x000020f5
-#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x00007fff
+#define REG_A4XX_RB_BLEND_GREEN_F32 0x000020f3
+#define A4XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_GREEN_F32__SHIFT) & A4XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A4XX_RB_BLEND_BLUE 0x000020f4
+#define A4XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_BLUE_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_BLUE_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_BLUE_UINT__SHIFT) & A4XX_RB_BLEND_BLUE_UINT__MASK;
}
+#define A4XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_BLUE_SINT__SHIFT) & A4XX_RB_BLEND_BLUE_SINT__MASK;
+}
#define A4XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
@@ -532,13 +1146,27 @@ static inline uint32_t A4XX_RB_BLEND_BLUE_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A4XX_RB_BLEND_BLUE_FLOAT__MASK;
}
+#define REG_A4XX_RB_BLEND_BLUE_F32 0x000020f5
+#define A4XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_BLUE_F32__SHIFT) & A4XX_RB_BLEND_BLUE_F32__MASK;
+}
+
#define REG_A4XX_RB_BLEND_ALPHA 0x000020f6
-#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x00007fff
+#define A4XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
#define A4XX_RB_BLEND_ALPHA_UINT__SHIFT 0
static inline uint32_t A4XX_RB_BLEND_ALPHA_UINT(uint32_t val)
{
return ((val) << A4XX_RB_BLEND_ALPHA_UINT__SHIFT) & A4XX_RB_BLEND_ALPHA_UINT__MASK;
}
+#define A4XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A4XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A4XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A4XX_RB_BLEND_ALPHA_SINT__SHIFT) & A4XX_RB_BLEND_ALPHA_SINT__MASK;
+}
#define A4XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
#define A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
@@ -546,6 +1174,14 @@ static inline uint32_t A4XX_RB_BLEND_ALPHA_FLOAT(float val)
return ((util_float_to_half(val)) << A4XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A4XX_RB_BLEND_ALPHA_FLOAT__MASK;
}
+#define REG_A4XX_RB_BLEND_ALPHA_F32 0x000020f7
+#define A4XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A4XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A4XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A4XX_RB_BLEND_ALPHA_F32__SHIFT) & A4XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
#define REG_A4XX_RB_ALPHA_CONTROL 0x000020f8
#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
#define A4XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
@@ -568,7 +1204,7 @@ static inline uint32_t A4XX_RB_FS_OUTPUT_ENABLE_BLEND(uint32_t val)
{
return ((val) << A4XX_RB_FS_OUTPUT_ENABLE_BLEND__SHIFT) & A4XX_RB_FS_OUTPUT_ENABLE_BLEND__MASK;
}
-#define A4XX_RB_FS_OUTPUT_FAST_CLEAR 0x00000100
+#define A4XX_RB_FS_OUTPUT_INDEPENDENT_BLEND 0x00000100
#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__MASK 0xffff0000
#define A4XX_RB_FS_OUTPUT_SAMPLE_MASK__SHIFT 16
static inline uint32_t A4XX_RB_FS_OUTPUT_SAMPLE_MASK(uint32_t val)
@@ -734,8 +1370,9 @@ static inline uint32_t A4XX_RB_DEPTH_CONTROL_ZFUNC(enum adreno_compare_func val)
{
return ((val) << A4XX_RB_DEPTH_CONTROL_ZFUNC__SHIFT) & A4XX_RB_DEPTH_CONTROL_ZFUNC__MASK;
}
-#define A4XX_RB_DEPTH_CONTROL_BF_ENABLE 0x00000080
+#define A4XX_RB_DEPTH_CONTROL_Z_CLAMP_ENABLE 0x00000080
#define A4XX_RB_DEPTH_CONTROL_EARLY_Z_DISABLE 0x00010000
+#define A4XX_RB_DEPTH_CONTROL_FORCE_FRAGZ_TO_FS 0x00020000
#define A4XX_RB_DEPTH_CONTROL_Z_TEST_ENABLE 0x80000000
#define REG_A4XX_RB_DEPTH_CLEAR 0x00002102
@@ -996,8 +1633,386 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_TP_REG(uint32_t i0) { return 0x
#define REG_A4XX_RBBM_CFG_DEBBUS_SEL_D 0x0000004d
+#define REG_A4XX_RBBM_POWER_CNTL_IP 0x00000098
+#define A4XX_RBBM_POWER_CNTL_IP_SW_COLLAPSE 0x00000001
+#define A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON 0x00100000
+
#define REG_A4XX_RBBM_PERFCTR_CP_0_LO 0x0000009c
+#define REG_A4XX_RBBM_PERFCTR_CP_0_HI 0x0000009d
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_LO 0x0000009e
+
+#define REG_A4XX_RBBM_PERFCTR_CP_1_HI 0x0000009f
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_LO 0x000000a0
+
+#define REG_A4XX_RBBM_PERFCTR_CP_2_HI 0x000000a1
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_LO 0x000000a2
+
+#define REG_A4XX_RBBM_PERFCTR_CP_3_HI 0x000000a3
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_LO 0x000000a4
+
+#define REG_A4XX_RBBM_PERFCTR_CP_4_HI 0x000000a5
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_LO 0x000000a6
+
+#define REG_A4XX_RBBM_PERFCTR_CP_5_HI 0x000000a7
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_LO 0x000000a8
+
+#define REG_A4XX_RBBM_PERFCTR_CP_6_HI 0x000000a9
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_LO 0x000000aa
+
+#define REG_A4XX_RBBM_PERFCTR_CP_7_HI 0x000000ab
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_LO 0x000000ac
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_0_HI 0x000000ad
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_LO 0x000000ae
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_1_HI 0x000000af
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_LO 0x000000b0
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_2_HI 0x000000b1
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_LO 0x000000b2
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_3_HI 0x000000b3
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_LO 0x000000b4
+
+#define REG_A4XX_RBBM_PERFCTR_PC_0_HI 0x000000b5
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_LO 0x000000b6
+
+#define REG_A4XX_RBBM_PERFCTR_PC_1_HI 0x000000b7
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_LO 0x000000b8
+
+#define REG_A4XX_RBBM_PERFCTR_PC_2_HI 0x000000b9
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_LO 0x000000ba
+
+#define REG_A4XX_RBBM_PERFCTR_PC_3_HI 0x000000bb
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_LO 0x000000bc
+
+#define REG_A4XX_RBBM_PERFCTR_PC_4_HI 0x000000bd
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_LO 0x000000be
+
+#define REG_A4XX_RBBM_PERFCTR_PC_5_HI 0x000000bf
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_LO 0x000000c0
+
+#define REG_A4XX_RBBM_PERFCTR_PC_6_HI 0x000000c1
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_LO 0x000000c2
+
+#define REG_A4XX_RBBM_PERFCTR_PC_7_HI 0x000000c3
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_LO 0x000000c4
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_0_HI 0x000000c5
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_LO 0x000000c6
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_1_HI 0x000000c7
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_LO 0x000000c8
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_2_HI 0x000000c9
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_LO 0x000000ca
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_3_HI 0x000000cb
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_LO 0x000000cc
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_4_HI 0x000000cd
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_LO 0x000000ce
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_5_HI 0x000000cf
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_LO 0x000000d0
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_6_HI 0x000000d1
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_LO 0x000000d2
+
+#define REG_A4XX_RBBM_PERFCTR_VFD_7_HI 0x000000d3
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_LO 0x000000d4
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_0_HI 0x000000d5
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_LO 0x000000d6
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_1_HI 0x000000d7
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_LO 0x000000d8
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_2_HI 0x000000d9
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_LO 0x000000da
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_3_HI 0x000000db
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_LO 0x000000dc
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_4_HI 0x000000dd
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_LO 0x000000de
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_5_HI 0x000000df
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_LO 0x000000e0
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_6_HI 0x000000e1
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_LO 0x000000e2
+
+#define REG_A4XX_RBBM_PERFCTR_HLSQ_7_HI 0x000000e3
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_LO 0x000000e4
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_0_HI 0x000000e5
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_LO 0x000000e6
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_1_HI 0x000000e7
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_LO 0x000000e8
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_2_HI 0x000000e9
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_LO 0x000000ea
+
+#define REG_A4XX_RBBM_PERFCTR_VPC_3_HI 0x000000eb
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_LO 0x000000ec
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_0_HI 0x000000ed
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_LO 0x000000ee
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_1_HI 0x000000ef
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_LO 0x000000f0
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_2_HI 0x000000f1
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_LO 0x000000f2
+
+#define REG_A4XX_RBBM_PERFCTR_CCU_3_HI 0x000000f3
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_LO 0x000000f4
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_0_HI 0x000000f5
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_LO 0x000000f6
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_1_HI 0x000000f7
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_LO 0x000000f8
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_2_HI 0x000000f9
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_LO 0x000000fa
+
+#define REG_A4XX_RBBM_PERFCTR_TSE_3_HI 0x000000fb
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_LO 0x000000fc
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_0_HI 0x000000fd
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_LO 0x000000fe
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_1_HI 0x000000ff
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_LO 0x00000100
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_2_HI 0x00000101
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_LO 0x00000102
+
+#define REG_A4XX_RBBM_PERFCTR_RAS_3_HI 0x00000103
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_LO 0x00000104
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_0_HI 0x00000105
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_LO 0x00000106
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_1_HI 0x00000107
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_LO 0x00000108
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_2_HI 0x00000109
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_LO 0x0000010a
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_3_HI 0x0000010b
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_LO 0x0000010c
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_4_HI 0x0000010d
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_LO 0x0000010e
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_5_HI 0x0000010f
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_LO 0x00000110
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_6_HI 0x00000111
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_LO 0x00000112
+
+#define REG_A4XX_RBBM_PERFCTR_UCHE_7_HI 0x00000113
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_LO 0x00000114
+
+#define REG_A4XX_RBBM_PERFCTR_TP_0_HI 0x00000115
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_LO 0x00000116
+
+#define REG_A4XX_RBBM_PERFCTR_TP_1_HI 0x00000117
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_LO 0x00000118
+
+#define REG_A4XX_RBBM_PERFCTR_TP_2_HI 0x00000119
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_LO 0x0000011a
+
+#define REG_A4XX_RBBM_PERFCTR_TP_3_HI 0x0000011b
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_LO 0x0000011c
+
+#define REG_A4XX_RBBM_PERFCTR_TP_4_HI 0x0000011d
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_LO 0x0000011e
+
+#define REG_A4XX_RBBM_PERFCTR_TP_5_HI 0x0000011f
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_LO 0x00000120
+
+#define REG_A4XX_RBBM_PERFCTR_TP_6_HI 0x00000121
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_LO 0x00000122
+
+#define REG_A4XX_RBBM_PERFCTR_TP_7_HI 0x00000123
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_LO 0x00000124
+
+#define REG_A4XX_RBBM_PERFCTR_SP_0_HI 0x00000125
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_LO 0x00000126
+
+#define REG_A4XX_RBBM_PERFCTR_SP_1_HI 0x00000127
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_LO 0x00000128
+
+#define REG_A4XX_RBBM_PERFCTR_SP_2_HI 0x00000129
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_LO 0x0000012a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_3_HI 0x0000012b
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_LO 0x0000012c
+
+#define REG_A4XX_RBBM_PERFCTR_SP_4_HI 0x0000012d
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_LO 0x0000012e
+
+#define REG_A4XX_RBBM_PERFCTR_SP_5_HI 0x0000012f
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_LO 0x00000130
+
+#define REG_A4XX_RBBM_PERFCTR_SP_6_HI 0x00000131
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_LO 0x00000132
+
+#define REG_A4XX_RBBM_PERFCTR_SP_7_HI 0x00000133
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_LO 0x00000134
+
+#define REG_A4XX_RBBM_PERFCTR_SP_8_HI 0x00000135
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_LO 0x00000136
+
+#define REG_A4XX_RBBM_PERFCTR_SP_9_HI 0x00000137
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_LO 0x00000138
+
+#define REG_A4XX_RBBM_PERFCTR_SP_10_HI 0x00000139
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_LO 0x0000013a
+
+#define REG_A4XX_RBBM_PERFCTR_SP_11_HI 0x0000013b
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_LO 0x0000013c
+
+#define REG_A4XX_RBBM_PERFCTR_RB_0_HI 0x0000013d
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_LO 0x0000013e
+
+#define REG_A4XX_RBBM_PERFCTR_RB_1_HI 0x0000013f
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_LO 0x00000140
+
+#define REG_A4XX_RBBM_PERFCTR_RB_2_HI 0x00000141
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_LO 0x00000142
+
+#define REG_A4XX_RBBM_PERFCTR_RB_3_HI 0x00000143
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_LO 0x00000144
+
+#define REG_A4XX_RBBM_PERFCTR_RB_4_HI 0x00000145
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_LO 0x00000146
+
+#define REG_A4XX_RBBM_PERFCTR_RB_5_HI 0x00000147
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_LO 0x00000148
+
+#define REG_A4XX_RBBM_PERFCTR_RB_6_HI 0x00000149
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_LO 0x0000014a
+
+#define REG_A4XX_RBBM_PERFCTR_RB_7_HI 0x0000014b
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_LO 0x0000014c
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_0_HI 0x0000014d
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_LO 0x0000014e
+
+#define REG_A4XX_RBBM_PERFCTR_VSC_1_HI 0x0000014f
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_LO 0x00000166
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_0_HI 0x00000167
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
+
+#define REG_A4XX_RBBM_PERFCTR_PWR_1_HI 0x00000169
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_LO 0x0000016e
+
+#define REG_A4XX_RBBM_ALWAYSON_COUNTER_HI 0x0000016f
+
static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP(uint32_t i0) { return 0x00000068 + 0x1*i0; }
static inline uint32_t REG_A4XX_RBBM_CLOCK_CTL_SP_REG(uint32_t i0) { return 0x00000068 + 0x1*i0; }
@@ -1046,6 +2061,10 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1(uint32_t i0) { r
static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0) { return 0x0000008e + 0x1*i0; }
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0 0x00000099
+
+#define REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1 0x0000009a
+
#define REG_A4XX_RBBM_PERFCTR_PWR_1_LO 0x00000168
#define REG_A4XX_RBBM_PERFCTR_CTL 0x00000170
@@ -1060,6 +2079,14 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000175
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_0 0x00000176
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_1 0x00000177
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_2 0x00000178
+
+#define REG_A4XX_RBBM_PERFCTR_RBBM_SEL_3 0x00000179
+
#define REG_A4XX_RBBM_GPU_BUSY_MASKED 0x0000017a
#define REG_A4XX_RBBM_INT_0_STATUS 0x0000017d
@@ -1099,6 +2126,11 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_RBBM_INTERFACE_RRDY_STATUS5 0x0000019f
+#define REG_A4XX_RBBM_POWER_STATUS 0x000001b0
+#define A4XX_RBBM_POWER_STATUS_SP_TP_PWR_ON 0x00100000
+
+#define REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2 0x000001b8
+
#define REG_A4XX_CP_SCRATCH_UMASK 0x00000228
#define REG_A4XX_CP_SCRATCH_ADDR 0x00000229
@@ -1167,11 +2199,23 @@ static inline uint32_t REG_A4XX_RBBM_CLOCK_DELAY_RB_MARB_CCU_L1_REG(uint32_t i0)
#define REG_A4XX_CP_DRAW_STATE_ADDR 0x00000232
-#define REG_A4XX_CP_PROTECT_REG_0 0x00000240
-
static inline uint32_t REG_A4XX_CP_PROTECT(uint32_t i0) { return 0x00000240 + 0x1*i0; }
static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240 + 0x1*i0; }
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A4XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A4XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A4XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A4XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A4XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A4XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A4XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A4XX_CP_PROTECT_REG_TRAP_READ 0x40000000
#define REG_A4XX_CP_PROTECT_CTRL 0x00000250
@@ -1191,6 +2235,20 @@ static inline uint32_t REG_A4XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000240
#define REG_A4XX_CP_PERFCTR_CP_SEL_0 0x00000500
+#define REG_A4XX_CP_PERFCTR_CP_SEL_1 0x00000501
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_2 0x00000502
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_3 0x00000503
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_4 0x00000504
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_5 0x00000505
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_6 0x00000506
+
+#define REG_A4XX_CP_PERFCTR_CP_SEL_7 0x00000507
+
#define REG_A4XX_CP_PERFCOMBINER_SELECT 0x0000050b
static inline uint32_t REG_A4XX_CP_SCRATCH(uint32_t i0) { return 0x00000578 + 0x1*i0; }
@@ -1201,6 +2259,28 @@ static inline uint32_t REG_A4XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000578
#define REG_A4XX_SP_MODE_CONTROL 0x00000ec3
+#define REG_A4XX_SP_PERFCTR_SP_SEL_0 0x00000ec4
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_1 0x00000ec5
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_2 0x00000ec6
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_3 0x00000ec7
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_4 0x00000ec8
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_5 0x00000ec9
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_6 0x00000eca
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_7 0x00000ecb
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_8 0x00000ecc
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_9 0x00000ecd
+
+#define REG_A4XX_SP_PERFCTR_SP_SEL_10 0x00000ece
+
#define REG_A4XX_SP_PERFCTR_SP_SEL_11 0x00000ecf
#define REG_A4XX_SP_SP_CTRL_REG 0x000022c0
@@ -1226,7 +2306,7 @@ static inline uint32_t A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A4XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
@@ -1374,7 +2454,7 @@ static inline uint32_t A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
{
return ((val) << A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A4XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
}
-#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0003fc00
+#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
#define A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
static inline uint32_t A4XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
{
@@ -1699,6 +2779,12 @@ static inline uint32_t A4XX_SP_GS_OBJ_OFFSET_REG_SHADEROBJOFFSET(uint32_t val)
#define REG_A4XX_VPC_DEBUG_ECO_CONTROL 0x00000e64
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_0 0x00000e65
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_1 0x00000e66
+
+#define REG_A4XX_VPC_PERFCTR_VPC_SEL_2 0x00000e67
+
#define REG_A4XX_VPC_PERFCTR_VPC_SEL_3 0x00000e68
#define REG_A4XX_VPC_ATTR 0x00002140
@@ -1811,6 +2897,20 @@ static inline uint32_t REG_A4XX_VSC_PIPE_DATA_LENGTH_REG(uint32_t i0) { return 0
#define REG_A4XX_VFD_DEBUG_CONTROL 0x00000e40
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_0 0x00000e43
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_1 0x00000e44
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_2 0x00000e45
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_3 0x00000e46
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_4 0x00000e47
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_5 0x00000e48
+
+#define REG_A4XX_VFD_PERFCTR_VFD_SEL_6 0x00000e49
+
#define REG_A4XX_VFD_PERFCTR_VFD_SEL_7 0x00000e4a
#define REG_A4XX_VGT_CL_INITIATOR 0x000021d0
@@ -1967,6 +3067,20 @@ static inline uint32_t A4XX_VFD_DECODE_INSTR_SHIFTCNT(uint32_t val)
#define REG_A4XX_TPL1_TP_MODE_CONTROL 0x00000f03
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_0 0x00000f04
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_1 0x00000f05
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_2 0x00000f06
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_3 0x00000f07
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_4 0x00000f08
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_5 0x00000f09
+
+#define REG_A4XX_TPL1_PERFCTR_TP_SEL_6 0x00000f0a
+
#define REG_A4XX_TPL1_PERFCTR_TP_SEL_7 0x00000f0b
#define REG_A4XX_TPL1_TP_TEX_OFFSET 0x00002380
@@ -2021,9 +3135,25 @@ static inline uint32_t A4XX_TPL1_TP_TEX_COUNT_GS(uint32_t val)
#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c88
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c89
+
+#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c8a
+
#define REG_A4XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c8b
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c8c
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c8d
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c8e
+
+#define REG_A4XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c8f
+
#define REG_A4XX_GRAS_CL_CLIP_CNTL 0x00002000
+#define A4XX_GRAS_CL_CLIP_CNTL_CLIP_DISABLE 0x00008000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZNEAR_CLIP_DISABLE 0x00010000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 0x00020000
+#define A4XX_GRAS_CL_CLIP_CNTL_ZERO_GB_SCALE_Z 0x00400000
#define REG_A4XX_GRAS_CLEAR_CNTL 0x00002003
#define A4XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR 0x00000001
@@ -2114,6 +3244,7 @@ static inline uint32_t A4XX_GRAS_SU_POINT_SIZE(float val)
#define REG_A4XX_GRAS_ALPHA_CONTROL 0x00002073
#define A4XX_GRAS_ALPHA_CONTROL_ALPHA_TEST_ENABLE 0x00000004
+#define A4XX_GRAS_ALPHA_CONTROL_FORCE_FRAGZ_TO_FS 0x00000008
#define REG_A4XX_GRAS_SU_POLY_OFFSET_SCALE 0x00002074
#define A4XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
@@ -2285,6 +3416,20 @@ static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val)
#define REG_A4XX_UCHE_CACHE_WAYS_VFD 0x00000e8c
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000e8e
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000e8f
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000e90
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000e91
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000e92
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000e93
+
+#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000e94
+
#define REG_A4XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e95
#define REG_A4XX_HLSQ_TIMEOUT_THRESHOLD 0x00000e00
@@ -2295,6 +3440,22 @@ static inline uint32_t A4XX_GRAS_SC_EXTENT_WINDOW_TL_Y(uint32_t val)
#define REG_A4XX_HLSQ_PERF_PIPE_MASK 0x00000e0e
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e06
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e07
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e08
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e09
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e0a
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e0b
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e0c
+
+#define REG_A4XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e0d
+
#define REG_A4XX_HLSQ_CONTROL_0_REG 0x000023c0
#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__MASK 0x00000010
#define A4XX_HLSQ_CONTROL_0_REG_FSTHREADSIZE__SHIFT 4
@@ -2545,14 +3706,42 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
#define REG_A4XX_PC_BINNING_COMMAND 0x00000d00
#define A4XX_PC_BINNING_COMMAND_BINNING_ENABLE 0x00000001
+#define REG_A4XX_PC_TESSFACTOR_ADDR 0x00000d08
+
#define REG_A4XX_PC_DRAWCALL_SETUP_OVERRIDE 0x00000d0c
#define REG_A4XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+#define REG_A4XX_PC_PERFCTR_PC_SEL_1 0x00000d11
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_2 0x00000d12
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_3 0x00000d13
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_4 0x00000d14
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_5 0x00000d15
+
+#define REG_A4XX_PC_PERFCTR_PC_SEL_6 0x00000d16
+
#define REG_A4XX_PC_PERFCTR_PC_SEL_7 0x00000d17
#define REG_A4XX_PC_BIN_BASE 0x000021c0
+#define REG_A4XX_PC_VSTREAM_CONTROL 0x000021c2
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__MASK 0x003f0000
+#define A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT 16
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_SIZE__SHIFT) & A4XX_PC_VSTREAM_CONTROL_SIZE__MASK;
+}
+#define A4XX_PC_VSTREAM_CONTROL_N__MASK 0x07c00000
+#define A4XX_PC_VSTREAM_CONTROL_N__SHIFT 22
+static inline uint32_t A4XX_PC_VSTREAM_CONTROL_N(uint32_t val)
+{
+ return ((val) << A4XX_PC_VSTREAM_CONTROL_N__SHIFT) & A4XX_PC_VSTREAM_CONTROL_N__MASK;
+}
+
#define REG_A4XX_PC_PRIM_VTX_CNTL 0x000021c4
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__MASK 0x0000000f
#define A4XX_PC_PRIM_VTX_CNTL_VAROUT__SHIFT 0
@@ -2564,7 +3753,20 @@ static inline uint32_t A4XX_PC_PRIM_VTX_CNTL_VAROUT(uint32_t val)
#define A4XX_PC_PRIM_VTX_CNTL_PROVOKING_VTX_LAST 0x02000000
#define A4XX_PC_PRIM_VTX_CNTL_PSIZE 0x04000000
-#define REG_A4XX_UNKNOWN_21C5 0x000021c5
+#define REG_A4XX_PC_PRIM_VTX_CNTL2 0x000021c5
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK 0x00000007
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT 0
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK 0x00000038
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT 3
+static inline uint32_t A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__SHIFT) & A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A4XX_PC_PRIM_VTX_CNTL2_POLYMODE_ENABLE 0x00000040
#define REG_A4XX_PC_RESTART_INDEX 0x000021c6
@@ -2602,12 +3804,8 @@ static inline uint32_t A4XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
{
return ((val) << A4XX_PC_HS_PARAM_SPACING__SHIFT) & A4XX_PC_HS_PARAM_SPACING__MASK;
}
-#define A4XX_PC_HS_PARAM_PRIMTYPE__MASK 0x01800000
-#define A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT 23
-static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
-{
- return ((val) << A4XX_PC_HS_PARAM_PRIMTYPE__SHIFT) & A4XX_PC_HS_PARAM_PRIMTYPE__MASK;
-}
+#define A4XX_PC_HS_PARAM_CW 0x00800000
+#define A4XX_PC_HS_PARAM_CONNECTED 0x01000000
#define REG_A4XX_VBIF_VERSION 0x00003000
@@ -2646,20 +3844,6 @@ static inline uint32_t A4XX_PC_HS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
#define REG_A4XX_UNKNOWN_20EF 0x000020ef
-#define REG_A4XX_UNKNOWN_20F0 0x000020f0
-
-#define REG_A4XX_UNKNOWN_20F1 0x000020f1
-
-#define REG_A4XX_UNKNOWN_20F2 0x000020f2
-
-#define REG_A4XX_UNKNOWN_20F7 0x000020f7
-#define A4XX_UNKNOWN_20F7__MASK 0xffffffff
-#define A4XX_UNKNOWN_20F7__SHIFT 0
-static inline uint32_t A4XX_UNKNOWN_20F7(float val)
-{
- return ((fui(val)) << A4XX_UNKNOWN_20F7__SHIFT) & A4XX_UNKNOWN_20F7__MASK;
-}
-
#define REG_A4XX_UNKNOWN_2152 0x00002152
#define REG_A4XX_UNKNOWN_2153 0x00002153
@@ -2720,6 +3904,12 @@ static inline uint32_t A4XX_TEX_SAMP_0_ANISO(enum a4xx_tex_aniso val)
{
return ((val) << A4XX_TEX_SAMP_0_ANISO__SHIFT) & A4XX_TEX_SAMP_0_ANISO__MASK;
}
+#define A4XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A4XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A4XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A4XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
#define REG_A4XX_TEX_SAMP_1 0x00000001
#define A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
@@ -2728,6 +3918,7 @@ static inline uint32_t A4XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val
{
return ((val) << A4XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A4XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
}
+#define A4XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
#define A4XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
#define A4XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
#define A4XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
@@ -2796,7 +3987,7 @@ static inline uint32_t A4XX_TEX_CONST_1_HEIGHT(uint32_t val)
{
return ((val) << A4XX_TEX_CONST_1_HEIGHT__SHIFT) & A4XX_TEX_CONST_1_HEIGHT__MASK;
}
-#define A4XX_TEX_CONST_1_WIDTH__MASK 0x1fff8000
+#define A4XX_TEX_CONST_1_WIDTH__MASK 0x3fff8000
#define A4XX_TEX_CONST_1_WIDTH__SHIFT 15
static inline uint32_t A4XX_TEX_CONST_1_WIDTH(uint32_t val)
{
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index a53f1be05f75..45c83fbe20e1 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -31,6 +31,7 @@
extern bool hang_debug;
static void a4xx_dump(struct msm_gpu *gpu);
+static bool a4xx_idle(struct msm_gpu *gpu);
/*
* a4xx_enable_hwcg() - Program the clock control registers
@@ -102,14 +103,20 @@ static void a4xx_enable_hwcg(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00000222);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL_HLSQ , 0x00000000);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_HYST_HLSQ, 0x00000000);
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00020000);
- gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, 0x00220000);
+ /* Early A430's have a timing issue with SP/TP power collapse;
+ disabling HW clock gating prevents it. */
+ if (adreno_is_a430(adreno_gpu) && adreno_gpu->rev.patchid < 2)
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0);
+ else
+ gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL, 0xAAAAAAAA);
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_CTL2, 0);
}
-static void a4xx_me_init(struct msm_gpu *gpu)
+
+static bool a4xx_me_init(struct msm_gpu *gpu)
{
- struct msm_ringbuffer *ring = gpu->rb;
+ struct msm_ringbuffer *ring = gpu->rb[0];
OUT_PKT3(ring, CP_ME_INIT, 17);
OUT_RING(ring, 0x000003f7);
@@ -130,8 +137,8 @@ static void a4xx_me_init(struct msm_gpu *gpu)
OUT_RING(ring, 0x00000000);
OUT_RING(ring, 0x00000000);
- gpu->funcs->flush(gpu);
- gpu->funcs->idle(gpu);
+ gpu->funcs->flush(gpu, ring);
+ return a4xx_idle(gpu);
}
static int a4xx_hw_init(struct msm_gpu *gpu)
@@ -141,7 +148,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
uint32_t *ptr, len;
int i, ret;
- if (adreno_is_a4xx(adreno_gpu)) {
+ if (adreno_is_a420(adreno_gpu)) {
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT, 0x0001001F);
gpu_write(gpu, REG_A4XX_VBIF_ABIT_SORT_CONF, 0x000000A4);
gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
@@ -150,6 +157,13 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ } else if (adreno_is_a430(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000001);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_RD_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF0, 0x18181818);
+ gpu_write(gpu, REG_A4XX_VBIF_IN_WR_LIM_CONF1, 0x00000018);
+ gpu_write(gpu, REG_A4XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
} else {
BUG();
}
@@ -161,6 +175,10 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_RBBM_SP_HYST_CNT, 0x10);
gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL, 0x10);
+ if (adreno_is_a430(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_WAIT_IDLE_CLOCKS_CTL2, 0x30);
+ }
+
/* Enable the RBBM error reporting bits */
gpu_write(gpu, REG_A4XX_RBBM_AHB_CTL0, 0x00000001);
@@ -183,6 +201,14 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
/* Turn on performance counters: */
gpu_write(gpu, REG_A4XX_RBBM_PERFCTR_CTL, 0x01);
+ /* use the first CP counter for timestamp queries.. userspace may set
+ * this as well but it selects the same counter/countable:
+ */
+ gpu_write(gpu, REG_A4XX_CP_PERFCTR_CP_SEL_0, CP_ALWAYS_COUNT);
+
+ if (adreno_is_a430(adreno_gpu))
+ gpu_write(gpu, REG_A4XX_UCHE_CACHE_WAYS_VFD, 0x07);
+
/* Disable L2 bypass to avoid UCHE out of bounds errors */
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_LO, 0xffff0000);
gpu_write(gpu, REG_A4XX_UCHE_TRAP_BASE_HI, 0xffff0000);
@@ -190,6 +216,15 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_CP_DEBUG, (1 << 25) |
(adreno_is_a420(adreno_gpu) ? (1 << 29) : 0));
+ /* On A430 enable SP regfile sleep for power savings */
+ /* TODO downstream does this for !420, so maybe applies for 405 too? */
+ if (!adreno_is_a420(adreno_gpu)) {
+ gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_0,
+ 0x00000441);
+ gpu_write(gpu, REG_A4XX_RBBM_SP_REGFILE_SLEEP_CNTL_1,
+ 0x00000441);
+ }
+
a4xx_enable_hwcg(gpu);
/*
@@ -204,10 +239,6 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
gpu_write(gpu, REG_A4XX_RBBM_CLOCK_DELAY_HLSQ, val);
}
- ret = adreno_hw_init(gpu);
- if (ret)
- return ret;
-
/* setup access protection: */
gpu_write(gpu, REG_A4XX_CP_PROTECT_CTRL, 0x00000007);
@@ -262,8 +293,7 @@ static int a4xx_hw_init(struct msm_gpu *gpu)
/* clear ME_HALT to start micro engine */
gpu_write(gpu, REG_A4XX_CP_ME_CNTL, 0);
- a4xx_me_init(gpu);
- return 0;
+ return a4xx_me_init(gpu) ? 0 : -EINVAL;
}
static void a4xx_recover(struct msm_gpu *gpu)
@@ -297,17 +327,21 @@ static void a4xx_destroy(struct msm_gpu *gpu)
kfree(a4xx_gpu);
}
-static void a4xx_idle(struct msm_gpu *gpu)
+static bool a4xx_idle(struct msm_gpu *gpu)
{
/* wait for ringbuffer to drain: */
- adreno_idle(gpu);
+ if (!adreno_idle(gpu, gpu->rb[0]))
+ return false;
/* then wait for GPU to finish: */
if (spin_until(!(gpu_read(gpu, REG_A4XX_RBBM_STATUS) &
- A4XX_RBBM_STATUS_GPU_BUSY)))
+ A4XX_RBBM_STATUS_GPU_BUSY))) {
DRM_ERROR("%s: timeout waiting for GPU to idle!\n", gpu->name);
+ /* TODO maybe we need to reset GPU here to recover from hang? */
+ return false;
+ }
- /* TODO maybe we need to reset GPU here to recover from hang? */
+ return true;
}
static irqreturn_t a4xx_irq(struct msm_gpu *gpu)
@@ -409,12 +443,8 @@ static const unsigned int a4xx_registers[] = {
#ifdef CONFIG_DEBUG_FS
static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
{
- gpu->funcs->pm_resume(gpu);
-
seq_printf(m, "status: %08x\n",
gpu_read(gpu, REG_A4XX_RBBM_STATUS));
- gpu->funcs->pm_suspend(gpu);
-
adreno_show(gpu, m);
}
@@ -422,87 +452,13 @@ static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
/* Register offset defines for A4XX, in order of enum adreno_regs */
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
- REG_ADRENO_DEFINE(REG_ADRENO_CP_DEBUG, REG_A4XX_CP_DEBUG),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_WADDR, REG_A4XX_CP_ME_RAM_WADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_DATA, REG_A4XX_CP_ME_RAM_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_A4XX_CP_PFP_UCODE_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_A4XX_CP_PFP_UCODE_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_WFI_PEND_CTR, REG_A4XX_CP_WFI_PEND_CTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A4XX_CP_RB_BASE),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_BASE_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A4XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_SKIP(REG_ADRENO_CP_RB_RPTR_ADDR_HI),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A4XX_CP_RB_RPTR),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A4XX_CP_RB_WPTR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_CTRL, REG_A4XX_CP_PROTECT_CTRL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_CNTL, REG_A4XX_CP_ME_CNTL),
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A4XX_CP_RB_CNTL),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BASE, REG_A4XX_CP_IB1_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB1_BUFSZ, REG_A4XX_CP_IB1_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BASE, REG_A4XX_CP_IB2_BASE),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_IB2_BUFSZ, REG_A4XX_CP_IB2_BUFSZ),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_TIMESTAMP, REG_AXXX_CP_SCRATCH_REG0),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ME_RAM_RADDR, REG_A4XX_CP_ME_RAM_RADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_ADDR, REG_A4XX_CP_ROQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_ROQ_DATA, REG_A4XX_CP_ROQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_ADDR, REG_A4XX_CP_MERCIU_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA, REG_A4XX_CP_MERCIU_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MERCIU_DATA2, REG_A4XX_CP_MERCIU_DATA2),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_ADDR, REG_A4XX_CP_MEQ_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_MEQ_DATA, REG_A4XX_CP_MEQ_DATA),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_HW_FAULT, REG_A4XX_CP_HW_FAULT),
- REG_ADRENO_DEFINE(REG_ADRENO_CP_PROTECT_STATUS,
- REG_A4XX_CP_PROTECT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_ADDR, REG_A4XX_CP_SCRATCH_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SCRATCH_UMSK, REG_A4XX_CP_SCRATCH_UMASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_STATUS, REG_A4XX_RBBM_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_A4XX_RBBM_PERFCTR_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD1),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
- REG_A4XX_RBBM_PERFCTR_LOAD_CMD2),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_A4XX_RBBM_PERFCTR_PWR_1_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_MASK, REG_A4XX_RBBM_INT_0_MASK),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_0_STATUS,
- REG_A4XX_RBBM_INT_0_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_A4XX_RBBM_AHB_ERROR_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_CMD, REG_A4XX_RBBM_AHB_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_CLOCK_CTL, REG_A4XX_RBBM_CLOCK_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
- REG_A4XX_RBBM_AHB_ME_SPLIT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
- REG_A4XX_RBBM_AHB_PFP_SPLIT_STATUS),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_A4XX_VPC_DEBUG_RAM_SEL),
- REG_ADRENO_DEFINE(REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_A4XX_VPC_DEBUG_RAM_READ),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_A4XX_RBBM_INT_CLEAR_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_A4XX_VSC_SIZE_ADDRESS),
- REG_ADRENO_DEFINE(REG_ADRENO_VFD_CONTROL_0, REG_A4XX_VFD_CONTROL_0),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_A4XX_SP_VS_PVT_MEM_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_A4XX_SP_FS_PVT_MEM_ADDR),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_A4XX_SP_VS_OBJ_START),
- REG_ADRENO_DEFINE(REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_A4XX_SP_FS_OBJ_START),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_RBBM_CTL, REG_A4XX_RBBM_RBBM_CTL),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_A4XX_RBBM_SW_RESET_CMD),
- REG_ADRENO_DEFINE(REG_ADRENO_UCHE_INVALIDATE0,
- REG_A4XX_UCHE_INVALIDATE0),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_LO),
- REG_ADRENO_DEFINE(REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
- REG_A4XX_RBBM_PERFCTR_LOAD_VALUE_HI),
};
static void a4xx_dump(struct msm_gpu *gpu)
@@ -512,23 +468,67 @@ static void a4xx_dump(struct msm_gpu *gpu)
adreno_dump(gpu);
}
+static int a4xx_pm_resume(struct msm_gpu *gpu) {
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ ret = msm_gpu_pm_resume(gpu);
+ if (ret)
+ return ret;
+
+ if (adreno_is_a430(adreno_gpu)) {
+ unsigned int reg;
+ /* Set the default register values; set SW_COLLAPSE to 0 */
+ gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778000);
+ do {
+ udelay(5);
+ reg = gpu_read(gpu, REG_A4XX_RBBM_POWER_STATUS);
+ } while (!(reg & A4XX_RBBM_POWER_CNTL_IP_SP_TP_PWR_ON));
+ }
+ return 0;
+}
+
+static int a4xx_pm_suspend(struct msm_gpu *gpu) {
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+
+ ret = msm_gpu_pm_suspend(gpu);
+ if (ret)
+ return ret;
+
+ if (adreno_is_a430(adreno_gpu)) {
+ /* Set the default register values; set SW_COLLAPSE to 1 */
+ gpu_write(gpu, REG_A4XX_RBBM_POWER_CNTL_IP, 0x778001);
+ }
+ return 0;
+}
+
+static int a4xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ *value = gpu_read64(gpu, REG_A4XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A4XX_RBBM_PERFCTR_CP_0_HI);
+
+ return 0;
+}
+
static const struct adreno_gpu_funcs funcs = {
.base = {
.get_param = adreno_get_param,
.hw_init = a4xx_hw_init,
- .pm_suspend = msm_gpu_pm_suspend,
- .pm_resume = msm_gpu_pm_resume,
+ .pm_suspend = a4xx_pm_suspend,
+ .pm_resume = a4xx_pm_resume,
.recover = a4xx_recover,
- .last_fence = adreno_last_fence,
+ .submitted_fence = adreno_submitted_fence,
.submit = adreno_submit,
.flush = adreno_flush,
- .idle = a4xx_idle,
+ .active_ring = adreno_active_ring,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
#ifdef CONFIG_DEBUG_FS
.show = a4xx_show,
#endif
},
+ .get_timestamp = a4xx_get_timestamp,
};
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
@@ -538,6 +538,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
struct msm_gpu *gpu;
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
+ struct msm_gpu_config a4xx_config = { 0 };
int ret;
if (!pdev) {
@@ -563,7 +564,13 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
adreno_gpu->registers = a4xx_registers;
adreno_gpu->reg_offsets = a4xx_register_offsets;
- ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs);
+ a4xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
+ a4xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
+ a4xx_config.nr_rings = 1;
+ a4xx_config.va_start = 0x300000;
+ a4xx_config.va_end = 0xffffffff;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a4xx_config);
if (ret)
goto fail;
@@ -582,7 +589,7 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev)
#endif
}
- if (!gpu->mmu) {
+ if (!gpu->aspace) {
/* TODO we think it is possible to configure the GPU to
* restrict access to VRAM carveout. But the required
* registers are unknown. For now just bail out and
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
new file mode 100644
index 000000000000..b73f4efb1b9d
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -0,0 +1,3493 @@
+#ifndef A5XX_XML
+#define A5XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- ./rnndb/adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./rnndb/adreno/a5xx.xml ( 86963 bytes, from 2017-03-03 16:01:09)
+- ./rnndb/adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2017 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a5xx_color_fmt {
+ RB5_R8_UNORM = 3,
+ RB5_R5G5B5A1_UNORM = 10,
+ RB5_R8G8B8A8_UNORM = 48,
+ RB5_R8G8B8_UNORM = 49,
+ RB5_R8G8B8A8_UINT = 51,
+ RB5_R10G10B10A2_UINT = 58,
+ RB5_R16G16B16A16_FLOAT = 98,
+};
+
+enum a5xx_tile_mode {
+ TILE5_LINEAR = 0,
+ TILE5_2 = 2,
+ TILE5_3 = 3,
+};
+
+enum a5xx_vtx_fmt {
+ VFMT5_8_UNORM = 3,
+ VFMT5_8_SNORM = 4,
+ VFMT5_8_UINT = 5,
+ VFMT5_8_SINT = 6,
+ VFMT5_8_8_UNORM = 15,
+ VFMT5_8_8_SNORM = 16,
+ VFMT5_8_8_UINT = 17,
+ VFMT5_8_8_SINT = 18,
+ VFMT5_16_UNORM = 21,
+ VFMT5_16_SNORM = 22,
+ VFMT5_16_FLOAT = 23,
+ VFMT5_16_UINT = 24,
+ VFMT5_16_SINT = 25,
+ VFMT5_8_8_8_UNORM = 33,
+ VFMT5_8_8_8_SNORM = 34,
+ VFMT5_8_8_8_UINT = 35,
+ VFMT5_8_8_8_SINT = 36,
+ VFMT5_8_8_8_8_UNORM = 48,
+ VFMT5_8_8_8_8_SNORM = 50,
+ VFMT5_8_8_8_8_UINT = 51,
+ VFMT5_8_8_8_8_SINT = 52,
+ VFMT5_16_16_UNORM = 67,
+ VFMT5_16_16_SNORM = 68,
+ VFMT5_16_16_FLOAT = 69,
+ VFMT5_16_16_UINT = 70,
+ VFMT5_16_16_SINT = 71,
+ VFMT5_32_UNORM = 72,
+ VFMT5_32_SNORM = 73,
+ VFMT5_32_FLOAT = 74,
+ VFMT5_32_UINT = 75,
+ VFMT5_32_SINT = 76,
+ VFMT5_32_FIXED = 77,
+ VFMT5_16_16_16_UNORM = 88,
+ VFMT5_16_16_16_SNORM = 89,
+ VFMT5_16_16_16_FLOAT = 90,
+ VFMT5_16_16_16_UINT = 91,
+ VFMT5_16_16_16_SINT = 92,
+ VFMT5_16_16_16_16_UNORM = 96,
+ VFMT5_16_16_16_16_SNORM = 97,
+ VFMT5_16_16_16_16_FLOAT = 98,
+ VFMT5_16_16_16_16_UINT = 99,
+ VFMT5_16_16_16_16_SINT = 100,
+ VFMT5_32_32_UNORM = 101,
+ VFMT5_32_32_SNORM = 102,
+ VFMT5_32_32_FLOAT = 103,
+ VFMT5_32_32_UINT = 104,
+ VFMT5_32_32_SINT = 105,
+ VFMT5_32_32_FIXED = 106,
+ VFMT5_32_32_32_UNORM = 112,
+ VFMT5_32_32_32_SNORM = 113,
+ VFMT5_32_32_32_UINT = 114,
+ VFMT5_32_32_32_SINT = 115,
+ VFMT5_32_32_32_FLOAT = 116,
+ VFMT5_32_32_32_FIXED = 117,
+ VFMT5_32_32_32_32_UNORM = 128,
+ VFMT5_32_32_32_32_SNORM = 129,
+ VFMT5_32_32_32_32_FLOAT = 130,
+ VFMT5_32_32_32_32_UINT = 131,
+ VFMT5_32_32_32_32_SINT = 132,
+ VFMT5_32_32_32_32_FIXED = 133,
+};
+
+enum a5xx_tex_fmt {
+ TFMT5_A8_UNORM = 2,
+ TFMT5_8_UNORM = 3,
+ TFMT5_4_4_4_4_UNORM = 8,
+ TFMT5_5_6_5_UNORM = 14,
+ TFMT5_L8_A8_UNORM = 19,
+ TFMT5_16_FLOAT = 23,
+ TFMT5_8_8_8_8_UNORM = 48,
+ TFMT5_10_10_10_2_UNORM = 54,
+ TFMT5_16_16_FLOAT = 69,
+ TFMT5_32_FLOAT = 74,
+ TFMT5_16_16_16_16_FLOAT = 98,
+ TFMT5_32_32_FLOAT = 103,
+ TFMT5_32_32_32_32_FLOAT = 130,
+ TFMT5_X8Z24_UNORM = 160,
+};
+
+enum a5xx_tex_fetchsize {
+ TFETCH5_1_BYTE = 0,
+ TFETCH5_2_BYTE = 1,
+ TFETCH5_4_BYTE = 2,
+ TFETCH5_8_BYTE = 3,
+ TFETCH5_16_BYTE = 4,
+};
+
+enum a5xx_depth_format {
+ DEPTH5_NONE = 0,
+ DEPTH5_16 = 1,
+ DEPTH5_24_8 = 2,
+ DEPTH5_32 = 4,
+};
+
+enum a5xx_debugbus {
+ A5XX_RBBM_DBGBUS_CP = 1,
+ A5XX_RBBM_DBGBUS_RBBM = 2,
+ A5XX_RBBM_DBGBUS_VBIF = 3,
+ A5XX_RBBM_DBGBUS_HLSQ = 4,
+ A5XX_RBBM_DBGBUS_UCHE = 5,
+ A5XX_RBBM_DBGBUS_DPM = 6,
+ A5XX_RBBM_DBGBUS_TESS = 7,
+ A5XX_RBBM_DBGBUS_PC = 8,
+ A5XX_RBBM_DBGBUS_VFDP = 9,
+ A5XX_RBBM_DBGBUS_VPC = 10,
+ A5XX_RBBM_DBGBUS_TSE = 11,
+ A5XX_RBBM_DBGBUS_RAS = 12,
+ A5XX_RBBM_DBGBUS_VSC = 13,
+ A5XX_RBBM_DBGBUS_COM = 14,
+ A5XX_RBBM_DBGBUS_DCOM = 15,
+ A5XX_RBBM_DBGBUS_LRZ = 16,
+ A5XX_RBBM_DBGBUS_A2D_DSP = 17,
+ A5XX_RBBM_DBGBUS_CCUFCHE = 18,
+ A5XX_RBBM_DBGBUS_GPMU = 19,
+ A5XX_RBBM_DBGBUS_RBP = 20,
+ A5XX_RBBM_DBGBUS_HM = 21,
+ A5XX_RBBM_DBGBUS_RBBM_CFG = 22,
+ A5XX_RBBM_DBGBUS_VBIF_CX = 23,
+ A5XX_RBBM_DBGBUS_GPC = 29,
+ A5XX_RBBM_DBGBUS_LARC = 30,
+ A5XX_RBBM_DBGBUS_HLSQ_SPTP = 31,
+ A5XX_RBBM_DBGBUS_RB_0 = 32,
+ A5XX_RBBM_DBGBUS_RB_1 = 33,
+ A5XX_RBBM_DBGBUS_RB_2 = 34,
+ A5XX_RBBM_DBGBUS_RB_3 = 35,
+ A5XX_RBBM_DBGBUS_CCU_0 = 40,
+ A5XX_RBBM_DBGBUS_CCU_1 = 41,
+ A5XX_RBBM_DBGBUS_CCU_2 = 42,
+ A5XX_RBBM_DBGBUS_CCU_3 = 43,
+ A5XX_RBBM_DBGBUS_A2D_RAS_0 = 48,
+ A5XX_RBBM_DBGBUS_A2D_RAS_1 = 49,
+ A5XX_RBBM_DBGBUS_A2D_RAS_2 = 50,
+ A5XX_RBBM_DBGBUS_A2D_RAS_3 = 51,
+ A5XX_RBBM_DBGBUS_VFD_0 = 56,
+ A5XX_RBBM_DBGBUS_VFD_1 = 57,
+ A5XX_RBBM_DBGBUS_VFD_2 = 58,
+ A5XX_RBBM_DBGBUS_VFD_3 = 59,
+ A5XX_RBBM_DBGBUS_SP_0 = 64,
+ A5XX_RBBM_DBGBUS_SP_1 = 65,
+ A5XX_RBBM_DBGBUS_SP_2 = 66,
+ A5XX_RBBM_DBGBUS_SP_3 = 67,
+ A5XX_RBBM_DBGBUS_TPL1_0 = 72,
+ A5XX_RBBM_DBGBUS_TPL1_1 = 73,
+ A5XX_RBBM_DBGBUS_TPL1_2 = 74,
+ A5XX_RBBM_DBGBUS_TPL1_3 = 75,
+};
+
+enum a5xx_shader_blocks {
+ A5XX_TP_W_MEMOBJ = 1,
+ A5XX_TP_W_SAMPLER = 2,
+ A5XX_TP_W_MIPMAP_BASE = 3,
+ A5XX_TP_W_MEMOBJ_TAG = 4,
+ A5XX_TP_W_SAMPLER_TAG = 5,
+ A5XX_TP_S_3D_MEMOBJ = 6,
+ A5XX_TP_S_3D_SAMPLER = 7,
+ A5XX_TP_S_3D_MEMOBJ_TAG = 8,
+ A5XX_TP_S_3D_SAMPLER_TAG = 9,
+ A5XX_TP_S_CS_MEMOBJ = 10,
+ A5XX_TP_S_CS_SAMPLER = 11,
+ A5XX_TP_S_CS_MEMOBJ_TAG = 12,
+ A5XX_TP_S_CS_SAMPLER_TAG = 13,
+ A5XX_SP_W_INSTR = 14,
+ A5XX_SP_W_CONST = 15,
+ A5XX_SP_W_UAV_SIZE = 16,
+ A5XX_SP_W_CB_SIZE = 17,
+ A5XX_SP_W_UAV_BASE = 18,
+ A5XX_SP_W_CB_BASE = 19,
+ A5XX_SP_W_INST_TAG = 20,
+ A5XX_SP_W_STATE = 21,
+ A5XX_SP_S_3D_INSTR = 22,
+ A5XX_SP_S_3D_CONST = 23,
+ A5XX_SP_S_3D_CB_BASE = 24,
+ A5XX_SP_S_3D_CB_SIZE = 25,
+ A5XX_SP_S_3D_UAV_BASE = 26,
+ A5XX_SP_S_3D_UAV_SIZE = 27,
+ A5XX_SP_S_CS_INSTR = 28,
+ A5XX_SP_S_CS_CONST = 29,
+ A5XX_SP_S_CS_CB_BASE = 30,
+ A5XX_SP_S_CS_CB_SIZE = 31,
+ A5XX_SP_S_CS_UAV_BASE = 32,
+ A5XX_SP_S_CS_UAV_SIZE = 33,
+ A5XX_SP_S_3D_INSTR_DIRTY = 34,
+ A5XX_SP_S_3D_CONST_DIRTY = 35,
+ A5XX_SP_S_3D_CB_BASE_DIRTY = 36,
+ A5XX_SP_S_3D_CB_SIZE_DIRTY = 37,
+ A5XX_SP_S_3D_UAV_BASE_DIRTY = 38,
+ A5XX_SP_S_3D_UAV_SIZE_DIRTY = 39,
+ A5XX_SP_S_CS_INSTR_DIRTY = 40,
+ A5XX_SP_S_CS_CONST_DIRTY = 41,
+ A5XX_SP_S_CS_CB_BASE_DIRTY = 42,
+ A5XX_SP_S_CS_CB_SIZE_DIRTY = 43,
+ A5XX_SP_S_CS_UAV_BASE_DIRTY = 44,
+ A5XX_SP_S_CS_UAV_SIZE_DIRTY = 45,
+ A5XX_HLSQ_ICB = 46,
+ A5XX_HLSQ_ICB_DIRTY = 47,
+ A5XX_HLSQ_ICB_CB_BASE_DIRTY = 48,
+ A5XX_SP_POWER_RESTORE_RAM = 64,
+ A5XX_SP_POWER_RESTORE_RAM_TAG = 65,
+ A5XX_TP_POWER_RESTORE_RAM = 66,
+ A5XX_TP_POWER_RESTORE_RAM_TAG = 67,
+};
+
+enum a5xx_tex_filter {
+ A5XX_TEX_NEAREST = 0,
+ A5XX_TEX_LINEAR = 1,
+ A5XX_TEX_ANISO = 2,
+};
+
+enum a5xx_tex_clamp {
+ A5XX_TEX_REPEAT = 0,
+ A5XX_TEX_CLAMP_TO_EDGE = 1,
+ A5XX_TEX_MIRROR_REPEAT = 2,
+ A5XX_TEX_CLAMP_TO_BORDER = 3,
+ A5XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a5xx_tex_aniso {
+ A5XX_TEX_ANISO_1 = 0,
+ A5XX_TEX_ANISO_2 = 1,
+ A5XX_TEX_ANISO_4 = 2,
+ A5XX_TEX_ANISO_8 = 3,
+ A5XX_TEX_ANISO_16 = 4,
+};
+
+enum a5xx_tex_swiz {
+ A5XX_TEX_X = 0,
+ A5XX_TEX_Y = 1,
+ A5XX_TEX_Z = 2,
+ A5XX_TEX_W = 3,
+ A5XX_TEX_ZERO = 4,
+ A5XX_TEX_ONE = 5,
+};
+
+enum a5xx_tex_type {
+ A5XX_TEX_1D = 0,
+ A5XX_TEX_2D = 1,
+ A5XX_TEX_CUBE = 2,
+ A5XX_TEX_3D = 3,
+};
+
+#define A5XX_INT0_RBBM_GPU_IDLE 0x00000001
+#define A5XX_INT0_RBBM_AHB_ERROR 0x00000002
+#define A5XX_INT0_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_INT0_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_INT0_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_INT0_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_INT0_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_INT0_RBBM_GPC_ERROR 0x00000080
+#define A5XX_INT0_CP_SW 0x00000100
+#define A5XX_INT0_CP_HW_ERROR 0x00000200
+#define A5XX_INT0_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_INT0_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_INT0_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_INT0_CP_IB2 0x00002000
+#define A5XX_INT0_CP_IB1 0x00004000
+#define A5XX_INT0_CP_RB 0x00008000
+#define A5XX_INT0_CP_UNUSED_1 0x00010000
+#define A5XX_INT0_CP_RB_DONE_TS 0x00020000
+#define A5XX_INT0_CP_WT_DONE_TS 0x00040000
+#define A5XX_INT0_UNKNOWN_1 0x00080000
+#define A5XX_INT0_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_INT0_UNUSED_2 0x00200000
+#define A5XX_INT0_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_INT0_MISC_HANG_DETECT 0x00800000
+#define A5XX_INT0_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_INT0_UCHE_TRAP_INTR 0x02000000
+#define A5XX_INT0_DEBBUS_INTR_0 0x04000000
+#define A5XX_INT0_DEBBUS_INTR_1 0x08000000
+#define A5XX_INT0_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_INT0_GPMU_FIRMWARE 0x20000000
+#define A5XX_INT0_ISDB_CPU_IRQ 0x40000000
+#define A5XX_INT0_ISDB_UNDER_DEBUG 0x80000000
+#define A5XX_CP_INT_CP_OPCODE_ERROR 0x00000001
+#define A5XX_CP_INT_CP_RESERVED_BIT_ERROR 0x00000002
+#define A5XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
+#define A5XX_CP_INT_CP_DMA_ERROR 0x00000008
+#define A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
+#define A5XX_CP_INT_CP_AHB_ERROR 0x00000020
+#define REG_A5XX_CP_RB_BASE 0x00000800
+
+#define REG_A5XX_CP_RB_BASE_HI 0x00000801
+
+#define REG_A5XX_CP_RB_CNTL 0x00000802
+
+#define REG_A5XX_CP_RB_RPTR_ADDR 0x00000804
+
+#define REG_A5XX_CP_RB_RPTR_ADDR_HI 0x00000805
+
+#define REG_A5XX_CP_RB_RPTR 0x00000806
+
+#define REG_A5XX_CP_RB_WPTR 0x00000807
+
+#define REG_A5XX_CP_PFP_STAT_ADDR 0x00000808
+
+#define REG_A5XX_CP_PFP_STAT_DATA 0x00000809
+
+#define REG_A5XX_CP_DRAW_STATE_ADDR 0x0000080b
+
+#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817
+
+#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818
+
+#define REG_A5XX_CP_CRASH_DUMP_CNTL 0x00000819
+
+#define REG_A5XX_CP_ME_STAT_ADDR 0x0000081a
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_1 0x0000081f
+
+#define REG_A5XX_CP_ROQ_THRESHOLDS_2 0x00000820
+
+#define REG_A5XX_CP_ROQ_DBG_ADDR 0x00000821
+
+#define REG_A5XX_CP_ROQ_DBG_DATA 0x00000822
+
+#define REG_A5XX_CP_MEQ_DBG_ADDR 0x00000823
+
+#define REG_A5XX_CP_MEQ_DBG_DATA 0x00000824
+
+#define REG_A5XX_CP_MEQ_THRESHOLDS 0x00000825
+
+#define REG_A5XX_CP_MERCIU_SIZE 0x00000826
+
+#define REG_A5XX_CP_MERCIU_DBG_ADDR 0x00000827
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_1 0x00000828
+
+#define REG_A5XX_CP_MERCIU_DBG_DATA_2 0x00000829
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_ADDR 0x0000082a
+
+#define REG_A5XX_CP_PFP_UCODE_DBG_DATA 0x0000082b
+
+#define REG_A5XX_CP_ME_UCODE_DBG_ADDR 0x0000082f
+
+#define REG_A5XX_CP_ME_UCODE_DBG_DATA 0x00000830
+
+#define REG_A5XX_CP_CNTL 0x00000831
+
+#define REG_A5XX_CP_PFP_ME_CNTL 0x00000832
+
+#define REG_A5XX_CP_CHICKEN_DBG 0x00000833
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_LO 0x00000835
+
+#define REG_A5XX_CP_PFP_INSTR_BASE_HI 0x00000836
+
+#define REG_A5XX_CP_ME_INSTR_BASE_LO 0x00000838
+
+#define REG_A5XX_CP_ME_INSTR_BASE_HI 0x00000839
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_CNTL 0x0000083b
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO 0x0000083c
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI 0x0000083d
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO 0x0000083e
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_HI 0x0000083f
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x00000840
+
+#define REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x00000841
+
+#define REG_A5XX_CP_ADDR_MODE_CNTL 0x00000860
+
+#define REG_A5XX_CP_ME_STAT_DATA 0x00000b14
+
+#define REG_A5XX_CP_WFI_PEND_CTR 0x00000b15
+
+#define REG_A5XX_CP_INTERRUPT_STATUS 0x00000b18
+
+#define REG_A5XX_CP_HW_FAULT 0x00000b1a
+
+#define REG_A5XX_CP_PROTECT_STATUS 0x00000b1c
+
+#define REG_A5XX_CP_IB1_BASE 0x00000b1f
+
+#define REG_A5XX_CP_IB1_BASE_HI 0x00000b20
+
+#define REG_A5XX_CP_IB1_BUFSZ 0x00000b21
+
+#define REG_A5XX_CP_IB2_BASE 0x00000b22
+
+#define REG_A5XX_CP_IB2_BASE_HI 0x00000b23
+
+#define REG_A5XX_CP_IB2_BUFSZ 0x00000b24
+
+static inline uint32_t REG_A5XX_CP_SCRATCH(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000b78 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000880 + 0x1*i0; }
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0001ffff
+#define A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A5XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A5XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A5XX_CP_PROTECT_REG_MASK_LEN__MASK 0x1f000000
+#define A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT 24
+static inline uint32_t A5XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A5XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A5XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A5XX_CP_PROTECT_REG_TRAP_WRITE 0x20000000
+#define A5XX_CP_PROTECT_REG_TRAP_READ 0x40000000
+
+#define REG_A5XX_CP_PROTECT_CNTL 0x000008a0
+
+#define REG_A5XX_CP_AHB_FAULT 0x00000b1b
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_0 0x00000bb0
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_1 0x00000bb1
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_2 0x00000bb2
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_3 0x00000bb3
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_4 0x00000bb4
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_5 0x00000bb5
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_6 0x00000bb6
+
+#define REG_A5XX_CP_PERFCTR_CP_SEL_7 0x00000bb7
+
+#define REG_A5XX_VSC_ADDR_MODE_CNTL 0x00000bc1
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_0 0x00000bba
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_1 0x00000bbb
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_2 0x00000bbc
+
+#define REG_A5XX_CP_POWERCTR_CP_SEL_3 0x00000bbd
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_A 0x00000004
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__MASK 0x000000ff
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT 0
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__SHIFT) & A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX__MASK;
+}
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK 0x0000ff00
+#define A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__SHIFT) & A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL__MASK;
+}
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_B 0x00000005
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_C 0x00000006
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_SEL_D 0x00000007
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLT 0x00000008
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CNTLM 0x00000009
+#define A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A5XX_RBBM_CFG_DEBBUS_CTLTM_ENABLE_SHIFT 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPL 0x0000000a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OPE 0x0000000b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_0 0x0000000c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_1 0x0000000d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_2 0x0000000e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTL_3 0x0000000f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_0 0x00000010
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_1 0x00000011
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_2 0x00000012
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKL_3 0x00000013
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_0 0x00000014
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_BYTEL_1 0x00000015
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_0 0x00000016
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_1 0x00000017
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_2 0x00000018
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IVTE_3 0x00000019
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_0 0x0000001a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_1 0x0000001b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_2 0x0000001c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MASKE_3 0x0000001d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_NIBBLEE 0x0000001e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC0 0x0000001f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_PTRC1 0x00000020
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADREG 0x00000021
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_IDX 0x00000022
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_CLRC 0x00000023
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_LOADIVT 0x00000024
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000002f
+
+#define REG_A5XX_RBBM_INT_CLEAR_CMD 0x00000037
+
+#define REG_A5XX_RBBM_INT_0_MASK 0x00000038
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
+#define A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR 0x00000002
+#define A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT 0x00000004
+#define A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT 0x00000008
+#define A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT 0x00000010
+#define A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT 0x00000020
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW 0x00000040
+#define A5XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
+#define A5XX_RBBM_INT_0_MASK_CP_SW 0x00000100
+#define A5XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A5XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
+#define A5XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
+#define A5XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
+#define A5XX_RBBM_INT_0_MASK_CP_RB 0x00008000
+#define A5XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
+#define A5XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
+#define A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
+#define A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT 0x00800000
+#define A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
+#define A5XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
+#define A5XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP 0x10000000
+#define A5XX_RBBM_INT_0_MASK_GPMU_FIRMWARE 0x20000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
+#define A5XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+
+#define REG_A5XX_RBBM_AHB_DBG_CNTL 0x0000003f
+
+#define REG_A5XX_RBBM_EXT_VBIF_DBG_CNTL 0x00000041
+
+#define REG_A5XX_RBBM_SW_RESET_CMD 0x00000043
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A5XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+
+#define REG_A5XX_RBBM_DBG_LO_HI_GPIO 0x00000048
+
+#define REG_A5XX_RBBM_EXT_TRACE_BUS_CNTL 0x00000049
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP0 0x0000004a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP1 0x0000004b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP2 0x0000004c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TP3 0x0000004d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP0 0x0000004e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP1 0x0000004f
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP2 0x00000050
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_TP3 0x00000051
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP0 0x00000052
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP1 0x00000053
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP2 0x00000054
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_TP3 0x00000055
+
+#define REG_A5XX_RBBM_READ_AHB_THROUGH_DBG 0x00000059
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_UCHE 0x0000005a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_UCHE 0x0000005b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL3_UCHE 0x0000005c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL4_UCHE 0x0000005d
+
+#define REG_A5XX_RBBM_CLOCK_HYST_UCHE 0x0000005e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_UCHE 0x0000005f
+
+#define REG_A5XX_RBBM_CLOCK_MODE_GPC 0x00000060
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPC 0x00000061
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPC 0x00000062
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000063
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x00000064
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000065
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_HLSQ 0x00000066
+
+#define REG_A5XX_RBBM_CLOCK_CNTL 0x00000067
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP0 0x00000068
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP1 0x00000069
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP2 0x0000006a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_SP3 0x0000006b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP0 0x0000006c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP1 0x0000006d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP2 0x0000006e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_SP3 0x0000006f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP0 0x00000070
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP1 0x00000071
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP2 0x00000072
+
+#define REG_A5XX_RBBM_CLOCK_HYST_SP3 0x00000073
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP0 0x00000074
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP1 0x00000075
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP2 0x00000076
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_SP3 0x00000077
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB0 0x00000078
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB1 0x00000079
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB2 0x0000007a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RB3 0x0000007b
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB0 0x0000007c
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB1 0x0000007d
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB2 0x0000007e
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RB3 0x0000007f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RAC 0x00000080
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RAC 0x00000081
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU0 0x00000082
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU1 0x00000083
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU2 0x00000084
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_CCU3 0x00000085
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000086
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000087
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000088
+
+#define REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000089
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_RAC 0x0000008a
+
+#define REG_A5XX_RBBM_CLOCK_CNTL2_RAC 0x0000008b
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0 0x0000008c
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1 0x0000008d
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2 0x0000008e
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3 0x0000008f
+
+#define REG_A5XX_RBBM_CLOCK_HYST_VFD 0x00000090
+
+#define REG_A5XX_RBBM_CLOCK_MODE_VFD 0x00000091
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_VFD 0x00000092
+
+#define REG_A5XX_RBBM_AHB_CNTL0 0x00000093
+
+#define REG_A5XX_RBBM_AHB_CNTL1 0x00000094
+
+#define REG_A5XX_RBBM_AHB_CNTL2 0x00000095
+
+#define REG_A5XX_RBBM_AHB_CMD 0x00000096
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11 0x0000009c
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12 0x0000009d
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13 0x0000009e
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14 0x0000009f
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15 0x000000a0
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16 0x000000a1
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17 0x000000a2
+
+#define REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18 0x000000a3
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP0 0x000000a4
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP1 0x000000a5
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP2 0x000000a6
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_TP3 0x000000a7
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP0 0x000000a8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP1 0x000000a9
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP2 0x000000aa
+
+#define REG_A5XX_RBBM_CLOCK_DELAY2_TP3 0x000000ab
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP0 0x000000ac
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP1 0x000000ad
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP2 0x000000ae
+
+#define REG_A5XX_RBBM_CLOCK_DELAY3_TP3 0x000000af
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP0 0x000000b0
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP1 0x000000b1
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP2 0x000000b2
+
+#define REG_A5XX_RBBM_CLOCK_HYST_TP3 0x000000b3
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP0 0x000000b4
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP1 0x000000b5
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP2 0x000000b6
+
+#define REG_A5XX_RBBM_CLOCK_HYST2_TP3 0x000000b7
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP0 0x000000b8
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP1 0x000000b9
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP2 0x000000ba
+
+#define REG_A5XX_RBBM_CLOCK_HYST3_TP3 0x000000bb
+
+#define REG_A5XX_RBBM_CLOCK_CNTL_GPMU 0x000000c8
+
+#define REG_A5XX_RBBM_CLOCK_DELAY_GPMU 0x000000c9
+
+#define REG_A5XX_RBBM_CLOCK_HYST_GPMU 0x000000ca
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_LO 0x000003a0
+
+#define REG_A5XX_RBBM_PERFCTR_CP_0_HI 0x000003a1
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_LO 0x000003a2
+
+#define REG_A5XX_RBBM_PERFCTR_CP_1_HI 0x000003a3
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_LO 0x000003a4
+
+#define REG_A5XX_RBBM_PERFCTR_CP_2_HI 0x000003a5
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_LO 0x000003a6
+
+#define REG_A5XX_RBBM_PERFCTR_CP_3_HI 0x000003a7
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_LO 0x000003a8
+
+#define REG_A5XX_RBBM_PERFCTR_CP_4_HI 0x000003a9
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_LO 0x000003aa
+
+#define REG_A5XX_RBBM_PERFCTR_CP_5_HI 0x000003ab
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_LO 0x000003ac
+
+#define REG_A5XX_RBBM_PERFCTR_CP_6_HI 0x000003ad
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_LO 0x000003ae
+
+#define REG_A5XX_RBBM_PERFCTR_CP_7_HI 0x000003af
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_LO 0x000003b0
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_0_HI 0x000003b1
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_LO 0x000003b2
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_1_HI 0x000003b3
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_LO 0x000003b4
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_2_HI 0x000003b5
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_LO 0x000003b6
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_3_HI 0x000003b7
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_LO 0x000003b8
+
+#define REG_A5XX_RBBM_PERFCTR_PC_0_HI 0x000003b9
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_LO 0x000003ba
+
+#define REG_A5XX_RBBM_PERFCTR_PC_1_HI 0x000003bb
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_LO 0x000003bc
+
+#define REG_A5XX_RBBM_PERFCTR_PC_2_HI 0x000003bd
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_LO 0x000003be
+
+#define REG_A5XX_RBBM_PERFCTR_PC_3_HI 0x000003bf
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_LO 0x000003c0
+
+#define REG_A5XX_RBBM_PERFCTR_PC_4_HI 0x000003c1
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_LO 0x000003c2
+
+#define REG_A5XX_RBBM_PERFCTR_PC_5_HI 0x000003c3
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_LO 0x000003c4
+
+#define REG_A5XX_RBBM_PERFCTR_PC_6_HI 0x000003c5
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_LO 0x000003c6
+
+#define REG_A5XX_RBBM_PERFCTR_PC_7_HI 0x000003c7
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_LO 0x000003c8
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_0_HI 0x000003c9
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_LO 0x000003ca
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_1_HI 0x000003cb
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_LO 0x000003cc
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_2_HI 0x000003cd
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_LO 0x000003ce
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_3_HI 0x000003cf
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_LO 0x000003d0
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_4_HI 0x000003d1
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_LO 0x000003d2
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_5_HI 0x000003d3
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_LO 0x000003d4
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_6_HI 0x000003d5
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_LO 0x000003d6
+
+#define REG_A5XX_RBBM_PERFCTR_VFD_7_HI 0x000003d7
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO 0x000003d8
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI 0x000003d9
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO 0x000003da
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI 0x000003db
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO 0x000003dc
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI 0x000003dd
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO 0x000003de
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI 0x000003df
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO 0x000003e0
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI 0x000003e1
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO 0x000003e2
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI 0x000003e3
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO 0x000003e4
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI 0x000003e5
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO 0x000003e6
+
+#define REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI 0x000003e7
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_LO 0x000003e8
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_0_HI 0x000003e9
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_LO 0x000003ea
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_1_HI 0x000003eb
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_LO 0x000003ec
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_2_HI 0x000003ed
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_LO 0x000003ee
+
+#define REG_A5XX_RBBM_PERFCTR_VPC_3_HI 0x000003ef
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_LO 0x000003f0
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_0_HI 0x000003f1
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_LO 0x000003f2
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_1_HI 0x000003f3
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_LO 0x000003f4
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_2_HI 0x000003f5
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_LO 0x000003f6
+
+#define REG_A5XX_RBBM_PERFCTR_CCU_3_HI 0x000003f7
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_LO 0x000003f8
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_0_HI 0x000003f9
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_LO 0x000003fa
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_1_HI 0x000003fb
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_LO 0x000003fc
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_2_HI 0x000003fd
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_LO 0x000003fe
+
+#define REG_A5XX_RBBM_PERFCTR_TSE_3_HI 0x000003ff
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_LO 0x00000400
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_0_HI 0x00000401
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_LO 0x00000402
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_1_HI 0x00000403
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_LO 0x00000404
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_2_HI 0x00000405
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_LO 0x00000406
+
+#define REG_A5XX_RBBM_PERFCTR_RAS_3_HI 0x00000407
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_LO 0x00000408
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_0_HI 0x00000409
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_LO 0x0000040a
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_1_HI 0x0000040b
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_LO 0x0000040c
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_2_HI 0x0000040d
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_LO 0x0000040e
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_3_HI 0x0000040f
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_LO 0x00000410
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_4_HI 0x00000411
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_LO 0x00000412
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_5_HI 0x00000413
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_LO 0x00000414
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_6_HI 0x00000415
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_LO 0x00000416
+
+#define REG_A5XX_RBBM_PERFCTR_UCHE_7_HI 0x00000417
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_LO 0x00000418
+
+#define REG_A5XX_RBBM_PERFCTR_TP_0_HI 0x00000419
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_LO 0x0000041a
+
+#define REG_A5XX_RBBM_PERFCTR_TP_1_HI 0x0000041b
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_LO 0x0000041c
+
+#define REG_A5XX_RBBM_PERFCTR_TP_2_HI 0x0000041d
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_LO 0x0000041e
+
+#define REG_A5XX_RBBM_PERFCTR_TP_3_HI 0x0000041f
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_LO 0x00000420
+
+#define REG_A5XX_RBBM_PERFCTR_TP_4_HI 0x00000421
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_LO 0x00000422
+
+#define REG_A5XX_RBBM_PERFCTR_TP_5_HI 0x00000423
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_LO 0x00000424
+
+#define REG_A5XX_RBBM_PERFCTR_TP_6_HI 0x00000425
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_LO 0x00000426
+
+#define REG_A5XX_RBBM_PERFCTR_TP_7_HI 0x00000427
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_LO 0x00000428
+
+#define REG_A5XX_RBBM_PERFCTR_SP_0_HI 0x00000429
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_LO 0x0000042a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_1_HI 0x0000042b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_LO 0x0000042c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_2_HI 0x0000042d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_LO 0x0000042e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_3_HI 0x0000042f
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_LO 0x00000430
+
+#define REG_A5XX_RBBM_PERFCTR_SP_4_HI 0x00000431
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_LO 0x00000432
+
+#define REG_A5XX_RBBM_PERFCTR_SP_5_HI 0x00000433
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_LO 0x00000434
+
+#define REG_A5XX_RBBM_PERFCTR_SP_6_HI 0x00000435
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_LO 0x00000436
+
+#define REG_A5XX_RBBM_PERFCTR_SP_7_HI 0x00000437
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_LO 0x00000438
+
+#define REG_A5XX_RBBM_PERFCTR_SP_8_HI 0x00000439
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_LO 0x0000043a
+
+#define REG_A5XX_RBBM_PERFCTR_SP_9_HI 0x0000043b
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_LO 0x0000043c
+
+#define REG_A5XX_RBBM_PERFCTR_SP_10_HI 0x0000043d
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_LO 0x0000043e
+
+#define REG_A5XX_RBBM_PERFCTR_SP_11_HI 0x0000043f
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_LO 0x00000440
+
+#define REG_A5XX_RBBM_PERFCTR_RB_0_HI 0x00000441
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_LO 0x00000442
+
+#define REG_A5XX_RBBM_PERFCTR_RB_1_HI 0x00000443
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_LO 0x00000444
+
+#define REG_A5XX_RBBM_PERFCTR_RB_2_HI 0x00000445
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_LO 0x00000446
+
+#define REG_A5XX_RBBM_PERFCTR_RB_3_HI 0x00000447
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_LO 0x00000448
+
+#define REG_A5XX_RBBM_PERFCTR_RB_4_HI 0x00000449
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_LO 0x0000044a
+
+#define REG_A5XX_RBBM_PERFCTR_RB_5_HI 0x0000044b
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_LO 0x0000044c
+
+#define REG_A5XX_RBBM_PERFCTR_RB_6_HI 0x0000044d
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_LO 0x0000044e
+
+#define REG_A5XX_RBBM_PERFCTR_RB_7_HI 0x0000044f
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_LO 0x00000450
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_0_HI 0x00000451
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_LO 0x00000452
+
+#define REG_A5XX_RBBM_PERFCTR_VSC_1_HI 0x00000453
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_LO 0x00000454
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_0_HI 0x00000455
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_LO 0x00000456
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_1_HI 0x00000457
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_LO 0x00000458
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_2_HI 0x00000459
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_LO 0x0000045a
+
+#define REG_A5XX_RBBM_PERFCTR_LRZ_3_HI 0x0000045b
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_LO 0x0000045c
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_0_HI 0x0000045d
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_LO 0x0000045e
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_1_HI 0x0000045f
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_LO 0x00000460
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_2_HI 0x00000461
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_LO 0x00000462
+
+#define REG_A5XX_RBBM_PERFCTR_CMP_3_HI 0x00000463
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_LO 0x000004d2
+
+#define REG_A5XX_RBBM_ALWAYSON_COUNTER_HI 0x000004d3
+
+#define REG_A5XX_RBBM_STATUS 0x000004f5
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x80000000
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x40000000
+#define A5XX_RBBM_STATUS_HLSQ_BUSY 0x20000000
+#define A5XX_RBBM_STATUS_VSC_BUSY 0x10000000
+#define A5XX_RBBM_STATUS_TPL1_BUSY 0x08000000
+#define A5XX_RBBM_STATUS_SP_BUSY 0x04000000
+#define A5XX_RBBM_STATUS_UCHE_BUSY 0x02000000
+#define A5XX_RBBM_STATUS_VPC_BUSY 0x01000000
+#define A5XX_RBBM_STATUS_VFDP_BUSY 0x00800000
+#define A5XX_RBBM_STATUS_VFD_BUSY 0x00400000
+#define A5XX_RBBM_STATUS_TESS_BUSY 0x00200000
+#define A5XX_RBBM_STATUS_PC_VSD_BUSY 0x00100000
+#define A5XX_RBBM_STATUS_PC_DCALL_BUSY 0x00080000
+#define A5XX_RBBM_STATUS_GPMU_SLAVE_BUSY 0x00040000
+#define A5XX_RBBM_STATUS_DCOM_BUSY 0x00020000
+#define A5XX_RBBM_STATUS_COM_BUSY 0x00010000
+#define A5XX_RBBM_STATUS_LRZ_BUZY 0x00008000
+#define A5XX_RBBM_STATUS_A2D_DSP_BUSY 0x00004000
+#define A5XX_RBBM_STATUS_CCUFCHE_BUSY 0x00002000
+#define A5XX_RBBM_STATUS_RB_BUSY 0x00001000
+#define A5XX_RBBM_STATUS_RAS_BUSY 0x00000800
+#define A5XX_RBBM_STATUS_TSE_BUSY 0x00000400
+#define A5XX_RBBM_STATUS_VBIF_BUSY 0x00000200
+#define A5XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_HYST 0x00000100
+#define A5XX_RBBM_STATUS_CP_BUSY_IGN_HYST 0x00000080
+#define A5XX_RBBM_STATUS_CP_BUSY 0x00000040
+#define A5XX_RBBM_STATUS_GPMU_MASTER_BUSY 0x00000020
+#define A5XX_RBBM_STATUS_CP_CRASH_BUSY 0x00000010
+#define A5XX_RBBM_STATUS_CP_ETS_BUSY 0x00000008
+#define A5XX_RBBM_STATUS_CP_PFP_BUSY 0x00000004
+#define A5XX_RBBM_STATUS_CP_ME_BUSY 0x00000002
+#define A5XX_RBBM_STATUS_HI_BUSY 0x00000001
+
+#define REG_A5XX_RBBM_STATUS3 0x00000530
+
+#define REG_A5XX_RBBM_INT_0_STATUS 0x000004e1
+
+#define REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS 0x000004f0
+
+#define REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS 0x000004f1
+
+#define REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS 0x000004f3
+
+#define REG_A5XX_RBBM_AHB_ERROR_STATUS 0x000004f4
+
+#define REG_A5XX_RBBM_PERFCTR_CNTL 0x00000464
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 0x00000465
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD1 0x00000466
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD2 0x00000467
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_CMD3 0x00000468
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000469
+
+#define REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x0000046a
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0 0x0000046b
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1 0x0000046c
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2 0x0000046d
+
+#define REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000046e
+
+#define REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000046f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_EVENT_LOGIC 0x00000504
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_OVER 0x00000505
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT0 0x00000506
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT1 0x00000507
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT2 0x00000508
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT3 0x00000509
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT4 0x0000050a
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_COUNT5 0x0000050b
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_ADDR 0x0000050c
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF0 0x0000050d
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1 0x0000050e
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2 0x0000050f
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF3 0x00000510
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF4 0x00000511
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR0 0x00000512
+
+#define REG_A5XX_RBBM_CFG_DBGBUS_MISR1 0x00000513
+
+#define REG_A5XX_RBBM_ISDB_CNT 0x00000533
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CONFIG 0x0000f000
+
+#define REG_A5XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801
+
+#define REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
+
+#define REG_A5XX_RBBM_SECVID_TSB_CNTL 0x0000f803
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_LO 0x0000f804
+
+#define REG_A5XX_RBBM_SECVID_TSB_COMP_STATUS_HI 0x0000f805
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_LO 0x0000f806
+
+#define REG_A5XX_RBBM_SECVID_TSB_UCHE_STATUS_HI 0x0000f807
+
+#define REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
+
+#define REG_A5XX_VSC_PIPE_DATA_LENGTH_0 0x00000c00
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_0 0x00000c60
+
+#define REG_A5XX_VSC_PERFCTR_VSC_SEL_1 0x00000c61
+
+#define REG_A5XX_VSC_BIN_SIZE 0x00000cdd
+#define A5XX_VSC_BIN_SIZE_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_VSC_BIN_SIZE_X__MASK 0x00007fff
+#define A5XX_VSC_BIN_SIZE_X__SHIFT 0
+static inline uint32_t A5XX_VSC_BIN_SIZE_X(uint32_t val)
+{
+ return ((val) << A5XX_VSC_BIN_SIZE_X__SHIFT) & A5XX_VSC_BIN_SIZE_X__MASK;
+}
+#define A5XX_VSC_BIN_SIZE_Y__MASK 0x7fff0000
+#define A5XX_VSC_BIN_SIZE_Y__SHIFT 16
+static inline uint32_t A5XX_VSC_BIN_SIZE_Y(uint32_t val)
+{
+ return ((val) << A5XX_VSC_BIN_SIZE_Y__SHIFT) & A5XX_VSC_BIN_SIZE_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_ADDR_MODE_CNTL 0x00000c81
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_0 0x00000c90
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_1 0x00000c91
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_2 0x00000c92
+
+#define REG_A5XX_GRAS_PERFCTR_TSE_SEL_3 0x00000c93
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_0 0x00000c94
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_1 0x00000c95
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_2 0x00000c96
+
+#define REG_A5XX_GRAS_PERFCTR_RAS_SEL_3 0x00000c97
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0 0x00000c98
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1 0x00000c99
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2 0x00000c9a
+
+#define REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3 0x00000c9b
+
+#define REG_A5XX_RB_DBG_ECO_CNTL 0x00000cc4
+
+#define REG_A5XX_RB_ADDR_MODE_CNTL 0x00000cc5
+
+#define REG_A5XX_RB_MODE_CNTL 0x00000cc6
+
+#define REG_A5XX_RB_CCU_CNTL 0x00000cc7
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_0 0x00000cd0
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_1 0x00000cd1
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_2 0x00000cd2
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_3 0x00000cd3
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_4 0x00000cd4
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_5 0x00000cd5
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_6 0x00000cd6
+
+#define REG_A5XX_RB_PERFCTR_RB_SEL_7 0x00000cd7
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_0 0x00000cd8
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_1 0x00000cd9
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_2 0x00000cda
+
+#define REG_A5XX_RB_PERFCTR_CCU_SEL_3 0x00000cdb
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_0 0x00000ce0
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_1 0x00000ce1
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_2 0x00000ce2
+
+#define REG_A5XX_RB_POWERCTR_RB_SEL_3 0x00000ce3
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_0 0x00000ce4
+
+#define REG_A5XX_RB_POWERCTR_CCU_SEL_1 0x00000ce5
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_0 0x00000cec
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_1 0x00000ced
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_2 0x00000cee
+
+#define REG_A5XX_RB_PERFCTR_CMP_SEL_3 0x00000cef
+
+#define REG_A5XX_PC_DBG_ECO_CNTL 0x00000d00
+#define A5XX_PC_DBG_ECO_CNTL_TWOPASSUSEWFI 0x00000100
+
+#define REG_A5XX_PC_ADDR_MODE_CNTL 0x00000d01
+
+#define REG_A5XX_PC_MODE_CNTL 0x00000d02
+
+#define REG_A5XX_UNKNOWN_0D08 0x00000d08
+
+#define REG_A5XX_UNKNOWN_0D09 0x00000d09
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_1 0x00000d11
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_2 0x00000d12
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_3 0x00000d13
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_4 0x00000d14
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_5 0x00000d15
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_6 0x00000d16
+
+#define REG_A5XX_PC_PERFCTR_PC_SEL_7 0x00000d17
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_0 0x00000e00
+
+#define REG_A5XX_HLSQ_TIMEOUT_THRESHOLD_1 0x00000e01
+
+#define REG_A5XX_HLSQ_ADDR_MODE_CNTL 0x00000e05
+
+#define REG_A5XX_HLSQ_MODE_CNTL 0x00000e06
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x00000e10
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x00000e11
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x00000e12
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x00000e13
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x00000e14
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x00000e15
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6 0x00000e16
+
+#define REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7 0x00000e17
+
+#define REG_A5XX_HLSQ_SPTP_RDSEL 0x00000f08
+
+#define REG_A5XX_HLSQ_DBG_READ_SEL 0x0000bc00
+#define A5XX_HLSQ_DBG_READ_SEL_STATETYPE__MASK 0x0000ff00
+#define A5XX_HLSQ_DBG_READ_SEL_STATETYPE__SHIFT 8
+static inline uint32_t A5XX_HLSQ_DBG_READ_SEL_STATETYPE(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DBG_READ_SEL_STATETYPE__SHIFT) & A5XX_HLSQ_DBG_READ_SEL_STATETYPE__MASK;
+}
+
+#define REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000a000
+
+#define REG_A5XX_VFD_ADDR_MODE_CNTL 0x00000e41
+
+#define REG_A5XX_VFD_MODE_CNTL 0x00000e42
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_0 0x00000e50
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_1 0x00000e51
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_2 0x00000e52
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_3 0x00000e53
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_4 0x00000e54
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_5 0x00000e55
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_6 0x00000e56
+
+#define REG_A5XX_VFD_PERFCTR_VFD_SEL_7 0x00000e57
+
+#define REG_A5XX_VPC_DBG_ECO_CNTL 0x00000e60
+
+#define REG_A5XX_VPC_ADDR_MODE_CNTL 0x00000e61
+
+#define REG_A5XX_VPC_MODE_CNTL 0x00000e62
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_0 0x00000e64
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_1 0x00000e65
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_2 0x00000e66
+
+#define REG_A5XX_VPC_PERFCTR_VPC_SEL_3 0x00000e67
+
+#define REG_A5XX_UCHE_ADDR_MODE_CNTL 0x00000e80
+
+#define REG_A5XX_UCHE_SVM_CNTL 0x00000e82
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_LO 0x00000e87
+
+#define REG_A5XX_UCHE_WRITE_THRU_BASE_HI 0x00000e88
+
+#define REG_A5XX_UCHE_TRAP_BASE_LO 0x00000e89
+
+#define REG_A5XX_UCHE_TRAP_BASE_HI 0x00000e8a
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e8b
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e8c
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e8d
+
+#define REG_A5XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e8e
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL_2 0x00000e8f
+
+#define REG_A5XX_UCHE_DBG_ECO_CNTL 0x00000e90
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_LO 0x00000e91
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MIN_HI 0x00000e92
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_LO 0x00000e93
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE_MAX_HI 0x00000e94
+
+#define REG_A5XX_UCHE_CACHE_INVALIDATE 0x00000e95
+
+#define REG_A5XX_UCHE_CACHE_WAYS 0x00000e96
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000ea0
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000ea1
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000ea2
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000ea3
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000ea4
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000ea5
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000ea6
+
+#define REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000ea7
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 0x00000ea8
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 0x00000ea9
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 0x00000eaa
+
+#define REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 0x00000eab
+
+#define REG_A5XX_UCHE_TRAP_LOG_LO 0x00000eb1
+
+#define REG_A5XX_UCHE_TRAP_LOG_HI 0x00000eb2
+
+#define REG_A5XX_SP_DBG_ECO_CNTL 0x00000ec0
+
+#define REG_A5XX_SP_ADDR_MODE_CNTL 0x00000ec1
+
+#define REG_A5XX_SP_MODE_CNTL 0x00000ec2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_0 0x00000ed0
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_1 0x00000ed1
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_2 0x00000ed2
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_3 0x00000ed3
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_4 0x00000ed4
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_5 0x00000ed5
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_6 0x00000ed6
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_7 0x00000ed7
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_8 0x00000ed8
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_9 0x00000ed9
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_10 0x00000eda
+
+#define REG_A5XX_SP_PERFCTR_SP_SEL_11 0x00000edb
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_0 0x00000edc
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_1 0x00000edd
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_2 0x00000ede
+
+#define REG_A5XX_SP_POWERCTR_SP_SEL_3 0x00000edf
+
+#define REG_A5XX_TPL1_ADDR_MODE_CNTL 0x00000f01
+
+#define REG_A5XX_TPL1_MODE_CNTL 0x00000f02
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_0 0x00000f10
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_1 0x00000f11
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_2 0x00000f12
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_3 0x00000f13
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_4 0x00000f14
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_5 0x00000f15
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_6 0x00000f16
+
+#define REG_A5XX_TPL1_PERFCTR_TP_SEL_7 0x00000f17
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_0 0x00000f18
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_1 0x00000f19
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_2 0x00000f1a
+
+#define REG_A5XX_TPL1_POWERCTR_TP_SEL_3 0x00000f1b
+
+#define REG_A5XX_VBIF_VERSION 0x00003000
+
+#define REG_A5XX_VBIF_CLKON 0x00003001
+#define A5XX_VBIF_CLKON_FORCE_ON 0x00000001
+#define A5XX_VBIF_CLKON_FORCE_ON_TESTBUS 0x00000002
+
+#define REG_A5XX_VBIF_ABIT_SORT 0x00003028
+
+#define REG_A5XX_VBIF_ABIT_SORT_CONF 0x00003029
+
+#define REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB 0x00003049
+
+#define REG_A5XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF0 0x0000302c
+
+#define REG_A5XX_VBIF_IN_RD_LIM_CONF1 0x0000302d
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL0 0x00003080
+
+#define REG_A5XX_VBIF_XIN_HALT_CTRL1 0x00003081
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT_CTRL 0x00003084
+#define A5XX_VBIF_TEST_BUS_OUT_CTRL_TEST_BUS_CTRL_EN 0x00000001
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL0 0x00003085
+
+#define REG_A5XX_VBIF_TEST_BUS1_CTRL1 0x00003086
+#define A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__MASK 0x0000000f
+#define A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__SHIFT 0
+static inline uint32_t A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL(uint32_t val)
+{
+ return ((val) << A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__SHIFT) & A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL__MASK;
+}
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL0 0x00003087
+
+#define REG_A5XX_VBIF_TEST_BUS2_CTRL1 0x00003088
+#define A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__MASK 0x0000001f
+#define A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__SHIFT 0
+static inline uint32_t A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(uint32_t val)
+{
+ return ((val) << A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__SHIFT) & A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL__MASK;
+}
+
+#define REG_A5XX_VBIF_TEST_BUS_OUT 0x0000308c
+
+static inline uint32_t REG_A5XX_VBIF_PERF_CNT_EN(uint32_t i0) { return 0x000030c0 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VBIF_PERF_CNT_CLR(uint32_t i0) { return 0x000030c8 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VBIF_PERF_CNT_SEL(uint32_t i0) { return 0x000030d0 + 0x1*i0; }
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A5XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A5XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+static inline uint32_t REG_A5XX_VBIF_PERF_PWR_CNT_EN(uint32_t i0) { return 0x00003100 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VBIF_PERF_PWR_CNT_CLR(uint32_t i0) { return 0x00003108 + 0x1*i0; }
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
+
+#define REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+
+#define REG_A5XX_GPMU_INST_RAM_BASE 0x00008800
+
+#define REG_A5XX_GPMU_DATA_RAM_BASE 0x00009800
+
+#define REG_A5XX_GPMU_SP_POWER_CNTL 0x0000a881
+
+#define REG_A5XX_GPMU_RBCCU_CLOCK_CNTL 0x0000a886
+
+#define REG_A5XX_GPMU_RBCCU_POWER_CNTL 0x0000a887
+
+#define REG_A5XX_GPMU_SP_PWR_CLK_STATUS 0x0000a88b
+#define A5XX_GPMU_SP_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS 0x0000a88d
+#define A5XX_GPMU_RBCCU_PWR_CLK_STATUS_PWR_ON 0x00100000
+
+#define REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY 0x0000a891
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL 0x0000a892
+
+#define REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST 0x0000a893
+
+#define REG_A5XX_GPMU_PWR_COL_BINNING_CTRL 0x0000a894
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_WFI_CONFIG 0x0000a8c1
+
+#define REG_A5XX_GPMU_RBBM_INTR_INFO 0x0000a8d6
+
+#define REG_A5XX_GPMU_CM3_SYSRESET 0x0000a8d8
+
+#define REG_A5XX_GPMU_GENERAL_0 0x0000a8e0
+
+#define REG_A5XX_GPMU_GENERAL_1 0x0000a8e1
+
+#define REG_A5XX_SP_POWER_COUNTER_0_LO 0x0000a840
+
+#define REG_A5XX_SP_POWER_COUNTER_0_HI 0x0000a841
+
+#define REG_A5XX_SP_POWER_COUNTER_1_LO 0x0000a842
+
+#define REG_A5XX_SP_POWER_COUNTER_1_HI 0x0000a843
+
+#define REG_A5XX_SP_POWER_COUNTER_2_LO 0x0000a844
+
+#define REG_A5XX_SP_POWER_COUNTER_2_HI 0x0000a845
+
+#define REG_A5XX_SP_POWER_COUNTER_3_LO 0x0000a846
+
+#define REG_A5XX_SP_POWER_COUNTER_3_HI 0x0000a847
+
+#define REG_A5XX_TP_POWER_COUNTER_0_LO 0x0000a848
+
+#define REG_A5XX_TP_POWER_COUNTER_0_HI 0x0000a849
+
+#define REG_A5XX_TP_POWER_COUNTER_1_LO 0x0000a84a
+
+#define REG_A5XX_TP_POWER_COUNTER_1_HI 0x0000a84b
+
+#define REG_A5XX_TP_POWER_COUNTER_2_LO 0x0000a84c
+
+#define REG_A5XX_TP_POWER_COUNTER_2_HI 0x0000a84d
+
+#define REG_A5XX_TP_POWER_COUNTER_3_LO 0x0000a84e
+
+#define REG_A5XX_TP_POWER_COUNTER_3_HI 0x0000a84f
+
+#define REG_A5XX_RB_POWER_COUNTER_0_LO 0x0000a850
+
+#define REG_A5XX_RB_POWER_COUNTER_0_HI 0x0000a851
+
+#define REG_A5XX_RB_POWER_COUNTER_1_LO 0x0000a852
+
+#define REG_A5XX_RB_POWER_COUNTER_1_HI 0x0000a853
+
+#define REG_A5XX_RB_POWER_COUNTER_2_LO 0x0000a854
+
+#define REG_A5XX_RB_POWER_COUNTER_2_HI 0x0000a855
+
+#define REG_A5XX_RB_POWER_COUNTER_3_LO 0x0000a856
+
+#define REG_A5XX_RB_POWER_COUNTER_3_HI 0x0000a857
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_LO 0x0000a858
+
+#define REG_A5XX_CCU_POWER_COUNTER_0_HI 0x0000a859
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_LO 0x0000a85a
+
+#define REG_A5XX_CCU_POWER_COUNTER_1_HI 0x0000a85b
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_LO 0x0000a85c
+
+#define REG_A5XX_UCHE_POWER_COUNTER_0_HI 0x0000a85d
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_LO 0x0000a85e
+
+#define REG_A5XX_UCHE_POWER_COUNTER_1_HI 0x0000a85f
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_LO 0x0000a860
+
+#define REG_A5XX_UCHE_POWER_COUNTER_2_HI 0x0000a861
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_LO 0x0000a862
+
+#define REG_A5XX_UCHE_POWER_COUNTER_3_HI 0x0000a863
+
+#define REG_A5XX_CP_POWER_COUNTER_0_LO 0x0000a864
+
+#define REG_A5XX_CP_POWER_COUNTER_0_HI 0x0000a865
+
+#define REG_A5XX_CP_POWER_COUNTER_1_LO 0x0000a866
+
+#define REG_A5XX_CP_POWER_COUNTER_1_HI 0x0000a867
+
+#define REG_A5XX_CP_POWER_COUNTER_2_LO 0x0000a868
+
+#define REG_A5XX_CP_POWER_COUNTER_2_HI 0x0000a869
+
+#define REG_A5XX_CP_POWER_COUNTER_3_LO 0x0000a86a
+
+#define REG_A5XX_CP_POWER_COUNTER_3_HI 0x0000a86b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_LO 0x0000a86c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_0_HI 0x0000a86d
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_LO 0x0000a86e
+
+#define REG_A5XX_GPMU_POWER_COUNTER_1_HI 0x0000a86f
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_LO 0x0000a870
+
+#define REG_A5XX_GPMU_POWER_COUNTER_2_HI 0x0000a871
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_LO 0x0000a872
+
+#define REG_A5XX_GPMU_POWER_COUNTER_3_HI 0x0000a873
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_LO 0x0000a874
+
+#define REG_A5XX_GPMU_POWER_COUNTER_4_HI 0x0000a875
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_LO 0x0000a876
+
+#define REG_A5XX_GPMU_POWER_COUNTER_5_HI 0x0000a877
+
+#define REG_A5XX_GPMU_POWER_COUNTER_ENABLE 0x0000a878
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO 0x0000a879
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI 0x0000a87a
+
+#define REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET 0x0000a87b
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_0 0x0000a87c
+
+#define REG_A5XX_GPMU_POWER_COUNTER_SELECT_1 0x0000a87d
+
+#define REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL 0x0000a8a3
+
+#define REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL 0x0000a8a8
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_ID 0x0000ac00
+
+#define REG_A5XX_GPMU_TEMP_SENSOR_CONFIG 0x0000ac01
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__MASK 0x0000000f
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__SHIFT 0
+static inline uint32_t A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS(uint32_t val)
+{
+ return ((val) << A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__SHIFT) & A5XX_GPMU_TEMP_SENSOR_CONFIG_ISENSE_STATUS__MASK;
+}
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_BCL_ENABLED 0x00000002
+#define A5XX_GPMU_TEMP_SENSOR_CONFIG_LLM_ENABLED 0x00000200
+
+#define REG_A5XX_GPMU_TEMP_VAL 0x0000ac02
+
+#define REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD 0x0000ac03
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_STATUS 0x0000ac05
+
+#define REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK 0x0000ac06
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_0_1 0x0000ac40
+
+#define REG_A5XX_GPMU_LEAKAGE_TEMP_COEFF_2_3 0x0000ac41
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_0_1 0x0000ac42
+
+#define REG_A5XX_GPMU_LEAKAGE_VTG_COEFF_2_3 0x0000ac43
+
+#define REG_A5XX_GPMU_BASE_LEAKAGE 0x0000ac46
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE 0x0000ac60
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_STATUS 0x0000ac61
+
+#define REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK 0x0000ac62
+
+#define REG_A5XX_GPMU_GPMU_PWR_THRESHOLD 0x0000ac80
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL 0x0000acc4
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_IDLE_FULL_LM 0x00000001
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__MASK 0x00000030
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__SHIFT 4
+static inline uint32_t A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD(uint32_t val)
+{
+ return ((val) << A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__SHIFT) & A5XX_GPMU_GPMU_LLM_GLM_SLEEP_CTRL_STATE_OF_CHILD__MASK;
+}
+
+#define REG_A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS 0x0000acc5
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS_IDLE_FULL_ACK 0x00000001
+#define A5XX_GPMU_GPMU_LLM_GLM_SLEEP_STATUS_WAKEUP_ACK 0x00000002
+
+#define REG_A5XX_GDPM_CONFIG1 0x0000b80c
+
+#define REG_A5XX_GDPM_CONFIG2 0x0000b80d
+
+#define REG_A5XX_GDPM_INT_EN 0x0000b80f
+
+#define REG_A5XX_GDPM_INT_MASK 0x0000b811
+
+#define REG_A5XX_GPMU_BEC_ENABLE 0x0000b9a0
+
+#define REG_A5XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000c41a
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000c41d
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000c41f
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x0000c421
+
+#define REG_A5XX_GPU_CS_ENABLE_REG 0x0000c520
+
+#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557
+
+#define REG_A5XX_GRAS_CL_CNTL 0x0000e000
+
+#define REG_A5XX_UNKNOWN_E001 0x0000e001
+
+#define REG_A5XX_UNKNOWN_E004 0x0000e004
+
+#define REG_A5XX_GRAS_CLEAR_CNTL 0x0000e005
+#define A5XX_GRAS_CLEAR_CNTL_NOT_FASTCLEAR 0x00000001
+
+#define REG_A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x0000e006
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A5XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XOFFSET_0 0x0000e010
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_XSCALE_0 0x0000e011
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YOFFSET_0 0x0000e012
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_YSCALE_0 0x0000e013
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZOFFSET_0 0x0000e014
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A5XX_GRAS_CL_VPORT_ZSCALE_0 0x0000e015
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A5XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A5XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CNTL 0x0000e090
+#define A5XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
+#define A5XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+
+#define REG_A5XX_GRAS_SU_POINT_MINMAX 0x0000e091
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A5XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A5XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POINT_SIZE 0x0000e092
+#define A5XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E093 0x0000e093
+
+#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094
+#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_ALPHA_TEST_ENABLE 0x00000001
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_SCALE 0x0000e095
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET 0x0000e096
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x0000e097
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
+#define A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A5XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_DEPTH_BUFFER_INFO 0x0000e098
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_GRAS_SU_CONSERVATIVE_RAS_CNTL 0x0000e099
+
+#define REG_A5XX_GRAS_SC_CNTL 0x0000e0a0
+
+#define REG_A5XX_GRAS_SC_BIN_CNTL 0x0000e0a1
+
+#define REG_A5XX_GRAS_SC_RAS_MSAA_CNTL 0x0000e0a2
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_DEST_MSAA_CNTL 0x0000e0a3
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_GRAS_SC_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_GRAS_SC_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_CNTL 0x0000e0a4
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x0000e0aa
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x0000e0ab
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x0000e0ca
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x0000e0cb
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+}
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A5XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_TL 0x0000e0ea
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_SC_WINDOW_SCISSOR_BR 0x0000e0eb
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A5XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A5XX_GRAS_LRZ_CNTL 0x0000e100
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_LO 0x0000e101
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_BASE_HI 0x0000e102
+
+#define REG_A5XX_GRAS_LRZ_BUFFER_PITCH 0x0000e103
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x0000e104
+
+#define REG_A5XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x0000e105
+
+#define REG_A5XX_RB_CNTL 0x0000e140
+#define A5XX_RB_CNTL_WIDTH__MASK 0x000000ff
+#define A5XX_RB_CNTL_WIDTH__SHIFT 0
+static inline uint32_t A5XX_RB_CNTL_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_WIDTH__SHIFT) & A5XX_RB_CNTL_WIDTH__MASK;
+}
+#define A5XX_RB_CNTL_HEIGHT__MASK 0x0001fe00
+#define A5XX_RB_CNTL_HEIGHT__SHIFT 9
+static inline uint32_t A5XX_RB_CNTL_HEIGHT(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_CNTL_HEIGHT__SHIFT) & A5XX_RB_CNTL_HEIGHT__MASK;
+}
+#define A5XX_RB_CNTL_BYPASS 0x00020000
+
+#define REG_A5XX_RB_RENDER_CNTL 0x0000e141
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS__MASK 0x00ff0000
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS__SHIFT 16
+static inline uint32_t A5XX_RB_RENDER_CNTL_ENABLED_MRTS(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_ENABLED_MRTS__SHIFT) & A5XX_RB_RENDER_CNTL_ENABLED_MRTS__MASK;
+}
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__MASK 0xff000000
+#define A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__SHIFT 24
+static inline uint32_t A5XX_RB_RENDER_CNTL_ENABLED_MRTS2(uint32_t val)
+{
+ return ((val) << A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__SHIFT) & A5XX_RB_RENDER_CNTL_ENABLED_MRTS2__MASK;
+}
+
+#define REG_A5XX_RB_RAS_MSAA_CNTL 0x0000e142
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A5XX_RB_DEST_MSAA_CNTL 0x0000e143
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A5XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A5XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A5XX_RB_RENDER_CONTROL0 0x0000e144
+#define A5XX_RB_RENDER_CONTROL0_VARYING 0x00000001
+#define A5XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
+#define A5XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
+#define A5XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
+#define A5XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
+
+#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145
+#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+
+#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_RB_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_RB_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_RB_FS_OUTPUT_CNTL_FRAG_WRITES_Z 0x00000020
+
+static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
+#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001
+#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
+#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
+static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x0000e151 + 0x7*i0; }
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A5XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x0000e152 + 0x7*i0; }
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x0000007f
+#define A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A5XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
+
+static inline uint32_t REG_A5XX_RB_MRT_PITCH(uint32_t i0) { return 0x0000e153 + 0x7*i0; }
+#define A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__MASK 0x0007ffff
+#define A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH(uint32_t val)
+{
+ return ((val >> 4) << A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__SHIFT) & A5XX_RB_MRT_PITCH_COLOR_BUF_PITCH__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x0000e154 + 0x7*i0; }
+#define A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__MASK 0x01ffffff
+#define A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__SHIFT 0
+static inline uint32_t A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE(uint32_t val)
+{
+ return ((val >> 6) << A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__SHIFT) & A5XX_RB_MRT_ARRAY_PITCH_COLOR_BUF_SIZE__MASK;
+}
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x0000e155 + 0x7*i0; }
+
+static inline uint32_t REG_A5XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x0000e156 + 0x7*i0; }
+
+#define REG_A5XX_RB_BLEND_RED 0x0000e1a0
+#define A5XX_RB_BLEND_RED_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_RED_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_UINT__SHIFT) & A5XX_RB_BLEND_RED_UINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_RED_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_RED_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_RED_SINT__SHIFT) & A5XX_RB_BLEND_RED_SINT__MASK;
+}
+#define A5XX_RB_BLEND_RED_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_RED_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_RED_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_RED_FLOAT__SHIFT) & A5XX_RB_BLEND_RED_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_RED_F32 0x0000e1a1
+#define A5XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_RED_F32__SHIFT) & A5XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN 0x0000e1a2
+#define A5XX_RB_BLEND_GREEN_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_GREEN_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_UINT__SHIFT) & A5XX_RB_BLEND_GREEN_UINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_GREEN_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_GREEN_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_GREEN_SINT__SHIFT) & A5XX_RB_BLEND_GREEN_SINT__MASK;
+}
+#define A5XX_RB_BLEND_GREEN_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_GREEN_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_GREEN_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_GREEN_FLOAT__SHIFT) & A5XX_RB_BLEND_GREEN_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_GREEN_F32 0x0000e1a3
+#define A5XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_GREEN_F32__SHIFT) & A5XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE 0x0000e1a4
+#define A5XX_RB_BLEND_BLUE_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_BLUE_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_UINT__SHIFT) & A5XX_RB_BLEND_BLUE_UINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_BLUE_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_BLUE_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_BLUE_SINT__SHIFT) & A5XX_RB_BLEND_BLUE_SINT__MASK;
+}
+#define A5XX_RB_BLEND_BLUE_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_BLUE_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_BLUE_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_BLUE_FLOAT__SHIFT) & A5XX_RB_BLEND_BLUE_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_BLUE_F32 0x0000e1a5
+#define A5XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_BLUE_F32__SHIFT) & A5XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA 0x0000e1a6
+#define A5XX_RB_BLEND_ALPHA_UINT__MASK 0x000000ff
+#define A5XX_RB_BLEND_ALPHA_UINT__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_UINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_UINT__SHIFT) & A5XX_RB_BLEND_ALPHA_UINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_SINT__MASK 0x0000ff00
+#define A5XX_RB_BLEND_ALPHA_SINT__SHIFT 8
+static inline uint32_t A5XX_RB_BLEND_ALPHA_SINT(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_ALPHA_SINT__SHIFT) & A5XX_RB_BLEND_ALPHA_SINT__MASK;
+}
+#define A5XX_RB_BLEND_ALPHA_FLOAT__MASK 0xffff0000
+#define A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_ALPHA_FLOAT(float val)
+{
+ return ((util_float_to_half(val)) << A5XX_RB_BLEND_ALPHA_FLOAT__SHIFT) & A5XX_RB_BLEND_ALPHA_FLOAT__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_ALPHA_F32 0x0000e1a7
+#define A5XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A5XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A5XX_RB_BLEND_ALPHA_F32__SHIFT) & A5XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A5XX_RB_ALPHA_CONTROL 0x0000e1a8
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A5XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A5XX_RB_BLEND_CNTL 0x0000e1a9
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
+#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_PLANE_CNTL 0x0000e1b0
+#define A5XX_RB_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
+
+#define REG_A5XX_RB_DEPTH_CNTL 0x0000e1b1
+#define A5XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
+#define A5XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
+#define A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
+static inline uint32_t A5XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A5XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A5XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000040
+
+#define REG_A5XX_RB_DEPTH_BUFFER_INFO 0x0000e1b2
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a5xx_depth_format val)
+{
+ return ((val) << A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A5XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_LO 0x0000e1b3
+
+#define REG_A5XX_RB_DEPTH_BUFFER_BASE_HI 0x0000e1b4
+
+#define REG_A5XX_RB_DEPTH_BUFFER_PITCH 0x0000e1b5
+#define A5XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x0000e1b6
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A5XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_CONTROL 0x0000e1c0
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A5XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A5XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A5XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A5XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A5XX_RB_STENCIL_INFO 0x0000e1c2
+#define A5XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+#define A5XX_RB_STENCIL_INFO_STENCIL_BASE__MASK 0xfffff000
+#define A5XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT 12
+static inline uint32_t A5XX_RB_STENCIL_INFO_STENCIL_BASE(uint32_t val)
+{
+ return ((val >> 12) << A5XX_RB_STENCIL_INFO_STENCIL_BASE__SHIFT) & A5XX_RB_STENCIL_INFO_STENCIL_BASE__MASK;
+}
+
+#define REG_A5XX_UNKNOWN_E1C3 0x0000e1c3
+
+#define REG_A5XX_RB_STENCILREFMASK 0x0000e1c6
+#define A5XX_RB_STENCILREFMASK_STENCILREF__MASK 0x000000ff
+#define A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT 0
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILREF(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__MASK 0x0000ff00
+#define A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT 8
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK 0x00ff0000
+#define A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
+}
+
+#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0
+#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A5XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_X__SHIFT) & A5XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A5XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A5XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A5XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_WINDOW_OFFSET_Y__SHIFT) & A5XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_1 0x0000e211
+#define A5XX_RB_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_1_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_1_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_1_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A5XX_RB_RESOLVE_CNTL_2 0x0000e212
+#define A5XX_RB_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A5XX_RB_RESOLVE_CNTL_2_X__MASK 0x00007fff
+#define A5XX_RB_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_X(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_X__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_X__MASK;
+}
+#define A5XX_RB_RESOLVE_CNTL_2_Y__MASK 0x7fff0000
+#define A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+ return ((val) << A5XX_RB_RESOLVE_CNTL_2_Y__SHIFT) & A5XX_RB_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x0000e240
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x0000e241
+
+#define REG_A5XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x0000e242
+
+#define REG_A5XX_VPC_CNTL_0 0x0000e280
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_VPC_CNTL_0_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_VPC_CNTL_0_STRIDE_IN_VPC__SHIFT) & A5XX_VPC_CNTL_0_STRIDE_IN_VPC__MASK;
+}
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x0000e282 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x0000e28a + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x0000e294 + 0x1*i0; }
+
+#define REG_A5XX_VPC_GS_SIV_CNTL 0x0000e298
+
+#define REG_A5XX_VPC_PACK 0x0000e29d
+#define A5XX_VPC_PACK_NUMNONPOSVAR__MASK 0x000000ff
+#define A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT 0
+static inline uint32_t A5XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A5XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A5XX_VPC_PACK_NUMNONPOSVAR__MASK;
+}
+
+#define REG_A5XX_VPC_FS_PRIMITIVEID_CNTL 0x0000e2a0
+
+#define REG_A5XX_VPC_SO_OVERRIDE 0x0000e2a2
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_LO_0 0x0000e2a7
+
+#define REG_A5XX_VPC_SO_BUFFER_BASE_HI_0 0x0000e2a8
+
+#define REG_A5XX_VPC_SO_BUFFER_SIZE_0 0x0000e2a9
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_LO_0 0x0000e2ac
+
+#define REG_A5XX_VPC_SO_FLUSH_BASE_HI_0 0x0000e2ad
+
+#define REG_A5XX_PC_PRIMITIVE_CNTL 0x0000e384
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK 0x0000007f
+#define A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
+}
+
+#define REG_A5XX_PC_RASTER_CNTL 0x0000e388
+
+#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c
+
+#define REG_A5XX_PC_GS_PARAM 0x0000e38e
+
+#define REG_A5XX_PC_HS_PARAM 0x0000e38f
+
+#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0
+
+#define REG_A5XX_VFD_CONTROL_0 0x0000e400
+#define A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK 0x0000003f
+#define A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_0_STRMDECINSTRCNT(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__SHIFT) & A5XX_VFD_CONTROL_0_STRMDECINSTRCNT__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_1 0x0000e401
+#define A5XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
+#define A5XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A5XX_VFD_CONTROL_1_REGID4VTX__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A5XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+
+#define REG_A5XX_VFD_CONTROL_2 0x0000e402
+
+#define REG_A5XX_VFD_CONTROL_3 0x0000e403
+
+#define REG_A5XX_VFD_CONTROL_4 0x0000e404
+
+#define REG_A5XX_VFD_CONTROL_5 0x0000e405
+
+#define REG_A5XX_VFD_INDEX_OFFSET 0x0000e408
+
+#define REG_A5XX_VFD_INSTANCE_START_OFFSET 0x0000e409
+
+static inline uint32_t REG_A5XX_VFD_FETCH(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000e40a + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000e40b + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000e40c + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000e40d + 0x4*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000e48a + 0x2*i0; }
+#define A5XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
+#define A5XX_VFD_DECODE_INSTR_IDX__SHIFT 0
+static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x3ff00000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
+static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+
+static inline uint32_t REG_A5XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000e48b + 0x2*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000e4ca + 0x1*i0; }
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
+#define A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
+#define A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
+static inline uint32_t A5XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A5XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A5XX_VFD_POWER_CNTL 0x0000e4f0
+
+#define REG_A5XX_SP_SP_CNTL 0x0000e580
+
+#define REG_A5XX_SP_VS_CONTROL_REG 0x0000e584
+#define A5XX_SP_VS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_FS_CONTROL_REG 0x0000e585
+#define A5XX_SP_FS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_HS_CONTROL_REG 0x0000e586
+#define A5XX_SP_HS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_DS_CONTROL_REG 0x0000e587
+#define A5XX_SP_DS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_GS_CONTROL_REG 0x0000e588
+#define A5XX_SP_GS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_SP_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_SP_CS_CONFIG 0x0000e589
+
+#define REG_A5XX_SP_VS_CONFIG_MAX_CONST 0x0000e58a
+
+#define REG_A5XX_SP_FS_CONFIG_MAX_CONST 0x0000e58b
+
+#define REG_A5XX_SP_VS_CTRL_REG0 0x0000e590
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_VS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x00100000
+
+static inline uint32_t REG_A5XX_SP_VS_OUT(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000e593 + 0x1*i0; }
+#define A5XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A5XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A5XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A5XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A5XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A5XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A5XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000e5a3 + 0x1*i0; }
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A5XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A5XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A5XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A5XX_SP_VS_OBJ_START_LO 0x0000e5ac
+
+#define REG_A5XX_SP_VS_OBJ_START_HI 0x0000e5ad
+
+#define REG_A5XX_SP_FS_CTRL_REG0 0x0000e5c0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_FS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x00100000
+
+#define REG_A5XX_SP_FS_OBJ_START_LO 0x0000e5c3
+
+#define REG_A5XX_SP_FS_OBJ_START_HI 0x0000e5c4
+
+#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
+
+#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
+#define A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_MRT(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_MRT__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK 0x00001fe0
+#define A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT 5
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_DEPTH_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK 0x001fe000
+#define A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT 13
+static inline uint32_t A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_CNTL_SAMPLEMASK_REGID__MASK;
+}
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000e5cb + 0x1*i0; }
+#define A5XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A5XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A5XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A5XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A5XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
+static inline uint32_t REG_A5XX_SP_FS_MRT(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+
+static inline uint32_t REG_A5XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000e5d3 + 0x1*i0; }
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x0000007f
+#define A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
+{
+ return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+
+#define REG_A5XX_SP_CS_CNTL_0 0x0000e5f0
+
+#define REG_A5XX_TPL1_TP_RAS_MSAA_CNTL 0x0000e704
+
+#define REG_A5XX_TPL1_TP_DEST_MSAA_CNTL 0x0000e705
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_LO 0x0000e722
+
+#define REG_A5XX_TPL1_VS_TEX_SAMP_HI 0x0000e723
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_LO 0x0000e72a
+
+#define REG_A5XX_TPL1_VS_TEX_CONST_HI 0x0000e72b
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_LO 0x0000e75a
+
+#define REG_A5XX_TPL1_FS_TEX_CONST_HI 0x0000e75b
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_LO 0x0000e75e
+
+#define REG_A5XX_TPL1_FS_TEX_SAMP_HI 0x0000e75f
+
+#define REG_A5XX_TPL1_TP_FS_ROTATION_CNTL 0x0000e764
+
+#define REG_A5XX_HLSQ_CONTROL_0_REG 0x0000e784
+
+#define REG_A5XX_HLSQ_CONTROL_1_REG 0x0000e785
+
+#define REG_A5XX_HLSQ_CONTROL_2_REG 0x0000e786
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787
+#define A5XX_HLSQ_CONTROL_3_REG_REGID__MASK 0x000000ff
+#define A5XX_HLSQ_CONTROL_3_REG_REGID__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CONTROL_3_REG_REGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_3_REG_REGID__SHIFT) & A5XX_HLSQ_CONTROL_3_REG_REGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_CONTROL_4_REG 0x0000e788
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A5XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A5XX_HLSQ_UPDATE_CNTL 0x0000e78a
+
+#define REG_A5XX_HLSQ_VS_CONTROL_REG 0x0000e78b
+#define A5XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_FS_CONTROL_REG 0x0000e78c
+#define A5XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_HS_CONTROL_REG 0x0000e78d
+#define A5XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_DS_CONTROL_REG 0x0000e78e
+#define A5XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_GS_CONTROL_REG 0x0000e78f
+#define A5XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00000001
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x000000fe
+#define A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 1
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00007f00
+#define A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 8
+static inline uint32_t A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A5XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+
+#define REG_A5XX_HLSQ_CS_CONFIG 0x0000e790
+
+#define REG_A5XX_HLSQ_VS_CNTL 0x0000e791
+
+#define REG_A5XX_HLSQ_FS_CNTL 0x0000e792
+
+#define REG_A5XX_HLSQ_CS_CNTL 0x0000e796
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_X 0x0000e7b9
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000e7ba
+
+#define REG_A5XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000e7bb
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_0 0x0000e7b0
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5
+
+#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6
+
+#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7
+
+#define REG_A5XX_HLSQ_CS_CNTL_1 0x0000e7b8
+
+#define REG_A5XX_HLSQ_VS_CONSTLEN 0x0000e7c3
+
+#define REG_A5XX_HLSQ_VS_INSTRLEN 0x0000e7c4
+
+#define REG_A5XX_HLSQ_FS_CONSTLEN 0x0000e7d7
+
+#define REG_A5XX_HLSQ_FS_INSTRLEN 0x0000e7d8
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_3 0x0000e7dc
+
+#define REG_A5XX_HLSQ_CONTEXT_SWITCH_CS_SW_4 0x0000e7dd
+
+#define REG_A5XX_TEX_SAMP_0 0x00000000
+#define A5XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A5XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A5XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MAG(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MAG__SHIFT) & A5XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A5XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A5XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A5XX_TEX_SAMP_0_XY_MIN(enum a5xx_tex_filter val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_XY_MIN__SHIFT) & A5XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A5XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_S(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_S__SHIFT) & A5XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A5XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_T(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_T__SHIFT) & A5XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A5XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A5XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A5XX_TEX_SAMP_0_WRAP_R(enum a5xx_tex_clamp val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_WRAP_R__SHIFT) & A5XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A5XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A5XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A5XX_TEX_SAMP_0_ANISO(enum a5xx_tex_aniso val)
+{
+ return ((val) << A5XX_TEX_SAMP_0_ANISO__SHIFT) & A5XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A5XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A5XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A5XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A5XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_1 0x00000001
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A5XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A5XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A5XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A5XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A5XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A5XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A5XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A5XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A5XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A5XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A5XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A5XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A5XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A5XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A5XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A5XX_TEX_SAMP_2 0x00000002
+
+#define REG_A5XX_TEX_SAMP_3 0x00000003
+
+#define REG_A5XX_TEX_CONST_0 0x00000000
+#define A5XX_TEX_CONST_0_TILED 0x00000001
+#define A5XX_TEX_CONST_0_SRGB 0x00000004
+#define A5XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A5XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_X(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_X__SHIFT) & A5XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A5XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Y(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A5XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_Z(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A5XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A5XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A5XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A5XX_TEX_CONST_0_SWIZ_W(enum a5xx_tex_swiz val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SWIZ_W__SHIFT) & A5XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000
+#define A5XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
+{
+ return ((val) << A5XX_TEX_CONST_0_FMT__SHIFT) & A5XX_TEX_CONST_0_FMT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_1 0x00000001
+#define A5XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
+#define A5XX_TEX_CONST_1_WIDTH__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_WIDTH__SHIFT) & A5XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A5XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
+#define A5XX_TEX_CONST_1_HEIGHT__SHIFT 15
+static inline uint32_t A5XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_1_HEIGHT__SHIFT) & A5XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_2 0x00000002
+#define A5XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f
+#define A5XX_TEX_CONST_2_FETCHSIZE__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_2_FETCHSIZE(enum a5xx_tex_fetchsize val)
+{
+ return ((val) << A5XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A5XX_TEX_CONST_2_FETCHSIZE__MASK;
+}
+#define A5XX_TEX_CONST_2_PITCH__MASK 0x1fffff00
+#define A5XX_TEX_CONST_2_PITCH__SHIFT 8
+static inline uint32_t A5XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_2_PITCH__SHIFT) & A5XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A5XX_TEX_CONST_2_TYPE__MASK 0x60000000
+#define A5XX_TEX_CONST_2_TYPE__SHIFT 29
+static inline uint32_t A5XX_TEX_CONST_2_TYPE(enum a5xx_tex_type val)
+{
+ return ((val) << A5XX_TEX_CONST_2_TYPE__SHIFT) & A5XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_3 0x00000003
+#define A5XX_TEX_CONST_3_LAYERSZ__MASK 0x00003fff
+#define A5XX_TEX_CONST_3_LAYERSZ__SHIFT 0
+static inline uint32_t A5XX_TEX_CONST_3_LAYERSZ(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_LAYERSZ__SHIFT) & A5XX_TEX_CONST_3_LAYERSZ__MASK;
+}
+#define A5XX_TEX_CONST_3_LAYERSZ2__MASK 0xff800000
+#define A5XX_TEX_CONST_3_LAYERSZ2__SHIFT 23
+static inline uint32_t A5XX_TEX_CONST_3_LAYERSZ2(uint32_t val)
+{
+ return ((val >> 12) << A5XX_TEX_CONST_3_LAYERSZ2__SHIFT) & A5XX_TEX_CONST_3_LAYERSZ2__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_4 0x00000004
+#define A5XX_TEX_CONST_4_BASE__MASK 0xffffffe0
+#define A5XX_TEX_CONST_4_BASE__SHIFT 5
+static inline uint32_t A5XX_TEX_CONST_4_BASE(uint32_t val)
+{
+ return ((val >> 5) << A5XX_TEX_CONST_4_BASE__SHIFT) & A5XX_TEX_CONST_4_BASE__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_5 0x00000005
+#define A5XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
+#define A5XX_TEX_CONST_5_DEPTH__SHIFT 17
+static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+ return ((val) << A5XX_TEX_CONST_5_DEPTH__SHIFT) & A5XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A5XX_TEX_CONST_6 0x00000006
+
+#define REG_A5XX_TEX_CONST_7 0x00000007
+
+#define REG_A5XX_TEX_CONST_8 0x00000008
+
+#define REG_A5XX_TEX_CONST_9 0x00000009
+
+#define REG_A5XX_TEX_CONST_10 0x0000000a
+
+#define REG_A5XX_TEX_CONST_11 0x0000000b
+
+
+#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_counters.c b/drivers/gpu/drm/msm/adreno/a5xx_counters.c
new file mode 100644
index 000000000000..a1b88529b986
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_counters.c
@@ -0,0 +1,825 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "a5xx_gpu.h"
+
+/*
+ * Fixed counters are not selectable, they always count the same thing.
+ * The countable is an index into the group: countable 0 = register 0,
+ * etc and they have no select register
+ */
+static int a5xx_counter_get_fixed(struct msm_gpu *gpu,
+ struct adreno_counter_group *group,
+ u32 countable, u32 *lo, u32 *hi)
+{
+ if (countable >= group->nr_counters)
+ return -EINVAL;
+
+ if (lo)
+ *lo = group->counters[countable].lo;
+ if (hi)
+ *hi = group->counters[countable].hi;
+
+ return countable;
+}
+
+/*
+ * Most counters are selectable in that they can be programmed to count
+ * different events; in most cases there are many more countables than
+ * counters. When a new counter is requested, first walk the list to see if any
+ * other counters in that group are counting the same countable and if so reuse
+ * that counter. If not find the first empty counter in the list and register
+ * that for the desired countable. If we are out of counters too bad so sad.
+ */
+static int a5xx_counter_get(struct msm_gpu *gpu,
+ struct adreno_counter_group *group,
+ u32 countable, u32 *lo, u32 *hi)
+{
+ struct adreno_counter *counter;
+ int i, empty = -1;
+
+ spin_lock(&group->lock);
+
+ for (i = 0; i < group->nr_counters; i++) {
+ counter = &group->counters[i];
+
+ if (counter->refcount) {
+ if (counter->countable == countable) {
+ counter->refcount++;
+
+ if (lo)
+ *lo = counter->lo;
+ if (hi)
+ *hi = counter->hi;
+
+ spin_unlock(&group->lock);
+ return i;
+ }
+ } else
+ empty = (empty == -1) ? i : empty;
+ }
+
+ if (empty == -1) {
+ spin_unlock(&group->lock);
+ return -EBUSY;
+ }
+
+ counter = &group->counters[empty];
+
+ counter->refcount = 1;
+ counter->countable = countable;
+
+ if (lo)
+ *lo = counter->lo;
+ if (hi)
+ *hi = counter->hi;
+
+ spin_unlock(&group->lock);
+
+ if (pm_runtime_active(&gpu->pdev->dev) && group->funcs.enable)
+ group->funcs.enable(gpu, group, empty, false);
+
+ return empty;
+}
+
+/* The majority of the non-fixed counter selects can be programmed by the CPU */
+static void a5xx_counter_enable_cpu(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid, bool restore)
+{
+ struct adreno_counter *counter = &group->counters[counterid];
+
+ gpu_write(gpu, counter->sel, counter->countable);
+}
+
+static void a5xx_counter_enable_pm4(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid, bool restore)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+ struct adreno_counter *counter = &group->counters[counterid];
+
+ /*
+ * If we are restoring the counters after a power cycle we can safely
+ * use AHB to enable the counters because we know SP/TP power collapse
+ * isn't active
+ */
+ if (restore) {
+ a5xx_counter_enable_cpu(gpu, group, counterid, true);
+ return;
+ }
+
+ mutex_lock(&gpu->dev->struct_mutex);
+
+ /*
+ * If HW init hasn't run yet we can use the CPU to program the counter
+ * (and indeed we must because we can't submit commands to the
+ * GPU if it isn't initalized)
+ */
+ if (gpu->needs_hw_init) {
+ a5xx_counter_enable_cpu(gpu, group, counterid, true);
+ mutex_unlock(&gpu->dev->struct_mutex);
+ return;
+ }
+
+ /* Turn off preemption for the duration of this command */
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ /* Idle the GPU */
+ OUT_PKT7(ring, CP_WAIT_FOR_IDLE, 0);
+
+ /* Enable the counter */
+ OUT_PKT4(ring, counter->sel, 1);
+ OUT_RING(ring, counter->countable);
+
+ /* Re-enable preemption */
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x00);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+ OUT_RING(ring, 0x01);
+
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Yield */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x01);
+ OUT_RING(ring, 0x01);
+
+ gpu->funcs->flush(gpu, ring);
+
+ /* Preempt into our ring if we need to */
+ a5xx_preempt_trigger(gpu);
+
+ /* wait for the operation to complete */
+ a5xx_idle(gpu, ring);
+
+ mutex_unlock(&gpu->dev->struct_mutex);
+}
+
+/*
+ * GPMU counters are selectable but the selects are muxed together in two
+ * registers
+ */
+static void a5xx_counter_enable_gpmu(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid, bool restore)
+{
+ struct adreno_counter *counter = &group->counters[counterid];
+ u32 reg;
+ int shift;
+
+ /*
+ * The selects for the GPMU counters are grouped together in two
+ * registers, a nibble for each counter. Counters 0-3 are located in
+ * GPMU_POWER_COUNTER_SELECT0 and 4-5 are in GPMU_POWER_COUNTER_SELECT1
+ */
+ if (counterid <= 3) {
+ shift = counterid << 3;
+ reg = REG_A5XX_GPMU_POWER_COUNTER_SELECT_0;
+ } else {
+ shift = (counterid - 4) << 3;
+ reg = REG_A5XX_GPMU_POWER_COUNTER_SELECT_1;
+ }
+
+ gpu_rmw(gpu, reg, 0xFF << shift, (counter->countable & 0xff) << shift);
+}
+
+/* VBIF counters are selectable but have their own programming process */
+static void a5xx_counter_enable_vbif(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid, bool restore)
+{
+ struct adreno_counter *counter = &group->counters[counterid];
+
+ gpu_write(gpu, REG_A5XX_VBIF_PERF_CNT_CLR(counterid), 1);
+ gpu_write(gpu, REG_A5XX_VBIF_PERF_CNT_CLR(counterid), 0);
+ gpu_write(gpu, REG_A5XX_VBIF_PERF_CNT_SEL(counterid),
+ counter->countable);
+ gpu_write(gpu, REG_A5XX_VBIF_PERF_CNT_EN(counterid), 1);
+}
+
+/*
+ * VBIF power counters are not slectable but need to be cleared/enabled before
+ * use
+ */
+static void a5xx_counter_enable_vbif_power(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid, bool restore)
+{
+ gpu_write(gpu, REG_A5XX_VBIF_PERF_PWR_CNT_CLR(counterid), 1);
+ gpu_write(gpu, REG_A5XX_VBIF_PERF_PWR_CNT_CLR(counterid), 0);
+ gpu_write(gpu, REG_A5XX_VBIF_PERF_PWR_CNT_EN(counterid), 1);
+}
+
+/* GPMU always on counter needs to be enabled before use */
+static void a5xx_counter_enable_alwayson_power(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid, bool restore)
+{
+ gpu_write(gpu, REG_A5XX_GPMU_ALWAYS_ON_COUNTER_RESET, 1);
+}
+
+static u64 a5xx_counter_read(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid)
+{
+ if (counterid >= group->nr_counters)
+ return 0;
+
+ /* If the power is off, return the shadow value */
+ if (!pm_runtime_active(&gpu->pdev->dev))
+ return group->counters[counterid].value;
+
+ return gpu_read64(gpu, group->counters[counterid].lo,
+ group->counters[counterid].hi);
+}
+
+/*
+ * Selectable counters that are no longer used reset the countable to 0 to mark
+ * the counter as free
+ */
+static void a5xx_counter_put(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, int counterid)
+{
+ struct adreno_counter *counter;
+
+ if (counterid >= group->nr_counters)
+ return;
+
+ counter = &group->counters[counterid];
+
+ spin_lock(&group->lock);
+ if (counter->refcount > 0)
+ counter->refcount--;
+ spin_unlock(&group->lock);
+}
+
+static void a5xx_counter_group_enable(struct msm_gpu *gpu,
+ struct adreno_counter_group *group, bool restore)
+{
+ int i;
+
+ if (!group || !group->funcs.enable)
+ return;
+
+ spin_lock(&group->lock);
+
+ for (i = 0; i < group->nr_counters; i++) {
+ if (!group->counters[i].refcount)
+ continue;
+
+ group->funcs.enable(gpu, group, i, restore);
+ }
+ spin_unlock(&group->lock);
+}
+
+static void a5xx_counter_restore(struct msm_gpu *gpu,
+ struct adreno_counter_group *group)
+{
+ int i;
+
+ spin_lock(&group->lock);
+ for (i = 0; i < group->nr_counters; i++) {
+ struct adreno_counter *counter = &group->counters[i];
+ uint32_t bit, offset = counter->load_bit;
+
+ /* Don't load if the counter isn't active or can't be loaded */
+ if (!counter->refcount)
+ continue;
+
+ /*
+ * Each counter has a specific bit in one of four load command
+ * registers. Figure out which register / relative bit to use
+ * for the counter
+ */
+ bit = do_div(offset, 32);
+
+ /* Write the counter value */
+ gpu_write64(gpu, REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_LO,
+ REG_A5XX_RBBM_PERFCTR_LOAD_VALUE_HI,
+ counter->value);
+
+ /*
+ * Write the load bit to load the counter - the command register
+ * will get reset to 0 after the operation completes
+ */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_LOAD_CMD0 + offset,
+ (1 << bit));
+ }
+ spin_unlock(&group->lock);
+}
+
+static void a5xx_counter_save(struct msm_gpu *gpu,
+ struct adreno_counter_group *group)
+{
+ int i;
+
+ spin_lock(&group->lock);
+ for (i = 0; i < group->nr_counters; i++) {
+ struct adreno_counter *counter = &group->counters[i];
+
+ if (counter->refcount > 0)
+ counter->value = gpu_read64(gpu, counter->lo,
+ counter->hi);
+ }
+ spin_unlock(&group->lock);
+}
+
+static struct adreno_counter a5xx_counters_alwayson[1] = {
+ { REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+ REG_A5XX_RBBM_ALWAYSON_COUNTER_HI },
+};
+
+static struct adreno_counter a5xx_counters_ccu[] = {
+ { REG_A5XX_RBBM_PERFCTR_CCU_0_LO, REG_A5XX_RBBM_PERFCTR_CCU_0_HI,
+ REG_A5XX_RB_PERFCTR_CCU_SEL_0, 40 },
+ { REG_A5XX_RBBM_PERFCTR_CCU_1_LO, REG_A5XX_RBBM_PERFCTR_CCU_1_HI,
+ REG_A5XX_RB_PERFCTR_CCU_SEL_1, 41 },
+ { REG_A5XX_RBBM_PERFCTR_CCU_2_LO, REG_A5XX_RBBM_PERFCTR_CCU_2_HI,
+ REG_A5XX_RB_PERFCTR_CCU_SEL_2, 42 },
+ { REG_A5XX_RBBM_PERFCTR_CCU_3_LO, REG_A5XX_RBBM_PERFCTR_CCU_3_HI,
+ REG_A5XX_RB_PERFCTR_CCU_SEL_3, 43 },
+};
+
+static struct adreno_counter a5xx_counters_cmp[] = {
+ { REG_A5XX_RBBM_PERFCTR_CMP_0_LO, REG_A5XX_RBBM_PERFCTR_CMP_0_HI,
+ REG_A5XX_RB_PERFCTR_CMP_SEL_0, 94 },
+ { REG_A5XX_RBBM_PERFCTR_CMP_1_LO, REG_A5XX_RBBM_PERFCTR_CMP_1_HI,
+ REG_A5XX_RB_PERFCTR_CMP_SEL_1, 95 },
+ { REG_A5XX_RBBM_PERFCTR_CMP_2_LO, REG_A5XX_RBBM_PERFCTR_CMP_2_HI,
+ REG_A5XX_RB_PERFCTR_CMP_SEL_2, 96 },
+ { REG_A5XX_RBBM_PERFCTR_CMP_3_LO, REG_A5XX_RBBM_PERFCTR_CMP_3_HI,
+ REG_A5XX_RB_PERFCTR_CMP_SEL_3, 97 },
+};
+
+static struct adreno_counter a5xx_counters_cp[] = {
+ { REG_A5XX_RBBM_PERFCTR_CP_0_LO, REG_A5XX_RBBM_PERFCTR_CP_0_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_0, 0 },
+ { REG_A5XX_RBBM_PERFCTR_CP_1_LO, REG_A5XX_RBBM_PERFCTR_CP_1_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_1, 1},
+ { REG_A5XX_RBBM_PERFCTR_CP_2_LO, REG_A5XX_RBBM_PERFCTR_CP_2_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_2, 2 },
+ { REG_A5XX_RBBM_PERFCTR_CP_3_LO, REG_A5XX_RBBM_PERFCTR_CP_3_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_3, 3 },
+ { REG_A5XX_RBBM_PERFCTR_CP_4_LO, REG_A5XX_RBBM_PERFCTR_CP_4_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_4, 4 },
+ { REG_A5XX_RBBM_PERFCTR_CP_5_LO, REG_A5XX_RBBM_PERFCTR_CP_5_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_5, 5 },
+ { REG_A5XX_RBBM_PERFCTR_CP_6_LO, REG_A5XX_RBBM_PERFCTR_CP_6_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_6, 6 },
+ { REG_A5XX_RBBM_PERFCTR_CP_7_LO, REG_A5XX_RBBM_PERFCTR_CP_7_HI,
+ REG_A5XX_CP_PERFCTR_CP_SEL_7, 7 },
+};
+
+static struct adreno_counter a5xx_counters_hlsq[] = {
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_0_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_0_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_0, 28 },
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_1_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_1_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_1, 29 },
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_2_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_2_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_2, 30 },
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_3_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_3_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_3, 31 },
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_4_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_4_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_4, 32 },
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_5_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_5_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_5, 33 },
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_6_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_6_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_6, 34 },
+ { REG_A5XX_RBBM_PERFCTR_HLSQ_7_LO, REG_A5XX_RBBM_PERFCTR_HLSQ_7_HI,
+ REG_A5XX_HLSQ_PERFCTR_HLSQ_SEL_7, 35 },
+};
+
+static struct adreno_counter a5xx_counters_lrz[] = {
+ { REG_A5XX_RBBM_PERFCTR_LRZ_0_LO, REG_A5XX_RBBM_PERFCTR_LRZ_0_HI,
+ REG_A5XX_GRAS_PERFCTR_LRZ_SEL_0, 90 },
+ { REG_A5XX_RBBM_PERFCTR_LRZ_1_LO, REG_A5XX_RBBM_PERFCTR_LRZ_1_HI,
+ REG_A5XX_GRAS_PERFCTR_LRZ_SEL_1, 91 },
+ { REG_A5XX_RBBM_PERFCTR_LRZ_2_LO, REG_A5XX_RBBM_PERFCTR_LRZ_2_HI,
+ REG_A5XX_GRAS_PERFCTR_LRZ_SEL_2, 92 },
+ { REG_A5XX_RBBM_PERFCTR_LRZ_3_LO, REG_A5XX_RBBM_PERFCTR_LRZ_3_HI,
+ REG_A5XX_GRAS_PERFCTR_LRZ_SEL_3, 93 },
+};
+
+static struct adreno_counter a5xx_counters_pc[] = {
+ { REG_A5XX_RBBM_PERFCTR_PC_0_LO, REG_A5XX_RBBM_PERFCTR_PC_0_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_0, 12 },
+ { REG_A5XX_RBBM_PERFCTR_PC_1_LO, REG_A5XX_RBBM_PERFCTR_PC_1_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_1, 13 },
+ { REG_A5XX_RBBM_PERFCTR_PC_2_LO, REG_A5XX_RBBM_PERFCTR_PC_2_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_2, 14 },
+ { REG_A5XX_RBBM_PERFCTR_PC_3_LO, REG_A5XX_RBBM_PERFCTR_PC_3_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_3, 15 },
+ { REG_A5XX_RBBM_PERFCTR_PC_4_LO, REG_A5XX_RBBM_PERFCTR_PC_4_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_4, 16 },
+ { REG_A5XX_RBBM_PERFCTR_PC_5_LO, REG_A5XX_RBBM_PERFCTR_PC_5_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_5, 17 },
+ { REG_A5XX_RBBM_PERFCTR_PC_6_LO, REG_A5XX_RBBM_PERFCTR_PC_6_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_6, 18 },
+ { REG_A5XX_RBBM_PERFCTR_PC_7_LO, REG_A5XX_RBBM_PERFCTR_PC_7_HI,
+ REG_A5XX_PC_PERFCTR_PC_SEL_7, 19 },
+};
+
+static struct adreno_counter a5xx_counters_ras[] = {
+ { REG_A5XX_RBBM_PERFCTR_RAS_0_LO, REG_A5XX_RBBM_PERFCTR_RAS_0_HI,
+ REG_A5XX_GRAS_PERFCTR_RAS_SEL_0, 48 },
+ { REG_A5XX_RBBM_PERFCTR_RAS_1_LO, REG_A5XX_RBBM_PERFCTR_RAS_1_HI,
+ REG_A5XX_GRAS_PERFCTR_RAS_SEL_1, 49 },
+ { REG_A5XX_RBBM_PERFCTR_RAS_2_LO, REG_A5XX_RBBM_PERFCTR_RAS_2_HI,
+ REG_A5XX_GRAS_PERFCTR_RAS_SEL_2, 50 },
+ { REG_A5XX_RBBM_PERFCTR_RAS_3_LO, REG_A5XX_RBBM_PERFCTR_RAS_3_HI,
+ REG_A5XX_GRAS_PERFCTR_RAS_SEL_3, 51 },
+};
+
+static struct adreno_counter a5xx_counters_rb[] = {
+ { REG_A5XX_RBBM_PERFCTR_RB_0_LO, REG_A5XX_RBBM_PERFCTR_RB_0_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_0, 80 },
+ { REG_A5XX_RBBM_PERFCTR_RB_1_LO, REG_A5XX_RBBM_PERFCTR_RB_1_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_1, 81 },
+ { REG_A5XX_RBBM_PERFCTR_RB_2_LO, REG_A5XX_RBBM_PERFCTR_RB_2_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_2, 82 },
+ { REG_A5XX_RBBM_PERFCTR_RB_3_LO, REG_A5XX_RBBM_PERFCTR_RB_3_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_3, 83 },
+ { REG_A5XX_RBBM_PERFCTR_RB_4_LO, REG_A5XX_RBBM_PERFCTR_RB_4_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_4, 84 },
+ { REG_A5XX_RBBM_PERFCTR_RB_5_LO, REG_A5XX_RBBM_PERFCTR_RB_5_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_5, 85 },
+ { REG_A5XX_RBBM_PERFCTR_RB_6_LO, REG_A5XX_RBBM_PERFCTR_RB_6_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_6, 86 },
+ { REG_A5XX_RBBM_PERFCTR_RB_7_LO, REG_A5XX_RBBM_PERFCTR_RB_7_HI,
+ REG_A5XX_RB_PERFCTR_RB_SEL_7, 87 },
+};
+
+static struct adreno_counter a5xx_counters_rbbm[] = {
+ { REG_A5XX_RBBM_PERFCTR_RBBM_0_LO, REG_A5XX_RBBM_PERFCTR_RBBM_0_HI,
+ REG_A5XX_RBBM_PERFCTR_RBBM_SEL_0, 8 },
+ { REG_A5XX_RBBM_PERFCTR_RBBM_1_LO, REG_A5XX_RBBM_PERFCTR_RBBM_1_HI,
+ REG_A5XX_RBBM_PERFCTR_RBBM_SEL_1, 9 },
+ { REG_A5XX_RBBM_PERFCTR_RBBM_2_LO, REG_A5XX_RBBM_PERFCTR_RBBM_2_HI,
+ REG_A5XX_RBBM_PERFCTR_RBBM_SEL_2, 10 },
+ { REG_A5XX_RBBM_PERFCTR_RBBM_3_LO, REG_A5XX_RBBM_PERFCTR_RBBM_3_HI,
+ REG_A5XX_RBBM_PERFCTR_RBBM_SEL_3, 11 },
+};
+
+static struct adreno_counter a5xx_counters_sp[] = {
+ { REG_A5XX_RBBM_PERFCTR_SP_0_LO, REG_A5XX_RBBM_PERFCTR_SP_0_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_0, 68 },
+ { REG_A5XX_RBBM_PERFCTR_SP_1_LO, REG_A5XX_RBBM_PERFCTR_SP_1_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_1, 69 },
+ { REG_A5XX_RBBM_PERFCTR_SP_2_LO, REG_A5XX_RBBM_PERFCTR_SP_2_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_2, 70 },
+ { REG_A5XX_RBBM_PERFCTR_SP_3_LO, REG_A5XX_RBBM_PERFCTR_SP_3_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_3, 71 },
+ { REG_A5XX_RBBM_PERFCTR_SP_4_LO, REG_A5XX_RBBM_PERFCTR_SP_4_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_4, 72 },
+ { REG_A5XX_RBBM_PERFCTR_SP_5_LO, REG_A5XX_RBBM_PERFCTR_SP_5_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_5, 73 },
+ { REG_A5XX_RBBM_PERFCTR_SP_6_LO, REG_A5XX_RBBM_PERFCTR_SP_6_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_6, 74 },
+ { REG_A5XX_RBBM_PERFCTR_SP_7_LO, REG_A5XX_RBBM_PERFCTR_SP_7_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_7, 75 },
+ { REG_A5XX_RBBM_PERFCTR_SP_8_LO, REG_A5XX_RBBM_PERFCTR_SP_8_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_8, 76 },
+ { REG_A5XX_RBBM_PERFCTR_SP_9_LO, REG_A5XX_RBBM_PERFCTR_SP_9_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_9, 77 },
+ { REG_A5XX_RBBM_PERFCTR_SP_10_LO, REG_A5XX_RBBM_PERFCTR_SP_10_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_10, 78 },
+ { REG_A5XX_RBBM_PERFCTR_SP_11_LO, REG_A5XX_RBBM_PERFCTR_SP_11_HI,
+ REG_A5XX_SP_PERFCTR_SP_SEL_11, 79 },
+};
+
+static struct adreno_counter a5xx_counters_tp[] = {
+ { REG_A5XX_RBBM_PERFCTR_TP_0_LO, REG_A5XX_RBBM_PERFCTR_TP_0_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_0, 60 },
+ { REG_A5XX_RBBM_PERFCTR_TP_1_LO, REG_A5XX_RBBM_PERFCTR_TP_1_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_1, 61 },
+ { REG_A5XX_RBBM_PERFCTR_TP_2_LO, REG_A5XX_RBBM_PERFCTR_TP_2_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_2, 62 },
+ { REG_A5XX_RBBM_PERFCTR_TP_3_LO, REG_A5XX_RBBM_PERFCTR_TP_3_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_3, 63 },
+ { REG_A5XX_RBBM_PERFCTR_TP_4_LO, REG_A5XX_RBBM_PERFCTR_TP_4_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_4, 64 },
+ { REG_A5XX_RBBM_PERFCTR_TP_5_LO, REG_A5XX_RBBM_PERFCTR_TP_5_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_5, 65 },
+ { REG_A5XX_RBBM_PERFCTR_TP_6_LO, REG_A5XX_RBBM_PERFCTR_TP_6_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_6, 66 },
+ { REG_A5XX_RBBM_PERFCTR_TP_7_LO, REG_A5XX_RBBM_PERFCTR_TP_7_HI,
+ REG_A5XX_TPL1_PERFCTR_TP_SEL_7, 67 },
+};
+
+static struct adreno_counter a5xx_counters_tse[] = {
+ { REG_A5XX_RBBM_PERFCTR_TSE_0_LO, REG_A5XX_RBBM_PERFCTR_TSE_0_HI,
+ REG_A5XX_GRAS_PERFCTR_TSE_SEL_0, 44 },
+ { REG_A5XX_RBBM_PERFCTR_TSE_1_LO, REG_A5XX_RBBM_PERFCTR_TSE_1_HI,
+ REG_A5XX_GRAS_PERFCTR_TSE_SEL_1, 45 },
+ { REG_A5XX_RBBM_PERFCTR_TSE_2_LO, REG_A5XX_RBBM_PERFCTR_TSE_2_HI,
+ REG_A5XX_GRAS_PERFCTR_TSE_SEL_2, 46 },
+ { REG_A5XX_RBBM_PERFCTR_TSE_3_LO, REG_A5XX_RBBM_PERFCTR_TSE_3_HI,
+ REG_A5XX_GRAS_PERFCTR_TSE_SEL_3, 47 },
+};
+
+static struct adreno_counter a5xx_counters_uche[] = {
+ { REG_A5XX_RBBM_PERFCTR_UCHE_0_LO, REG_A5XX_RBBM_PERFCTR_UCHE_0_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_0, 52 },
+ { REG_A5XX_RBBM_PERFCTR_UCHE_1_LO, REG_A5XX_RBBM_PERFCTR_UCHE_1_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_1, 53 },
+ { REG_A5XX_RBBM_PERFCTR_UCHE_2_LO, REG_A5XX_RBBM_PERFCTR_UCHE_2_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_2, 54 },
+ { REG_A5XX_RBBM_PERFCTR_UCHE_3_LO, REG_A5XX_RBBM_PERFCTR_UCHE_3_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_3, 55 },
+ { REG_A5XX_RBBM_PERFCTR_UCHE_4_LO, REG_A5XX_RBBM_PERFCTR_UCHE_4_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_4, 56 },
+ { REG_A5XX_RBBM_PERFCTR_UCHE_5_LO, REG_A5XX_RBBM_PERFCTR_UCHE_5_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_5, 57 },
+ { REG_A5XX_RBBM_PERFCTR_UCHE_6_LO, REG_A5XX_RBBM_PERFCTR_UCHE_6_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_6, 58 },
+ { REG_A5XX_RBBM_PERFCTR_UCHE_7_LO, REG_A5XX_RBBM_PERFCTR_UCHE_7_HI,
+ REG_A5XX_UCHE_PERFCTR_UCHE_SEL_7, 59 },
+};
+
+static struct adreno_counter a5xx_counters_vfd[] = {
+ { REG_A5XX_RBBM_PERFCTR_VFD_0_LO, REG_A5XX_RBBM_PERFCTR_VFD_0_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_0, 20 },
+ { REG_A5XX_RBBM_PERFCTR_VFD_1_LO, REG_A5XX_RBBM_PERFCTR_VFD_1_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_1, 21 },
+ { REG_A5XX_RBBM_PERFCTR_VFD_2_LO, REG_A5XX_RBBM_PERFCTR_VFD_2_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_2, 22 },
+ { REG_A5XX_RBBM_PERFCTR_VFD_3_LO, REG_A5XX_RBBM_PERFCTR_VFD_3_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_3, 23 },
+ { REG_A5XX_RBBM_PERFCTR_VFD_4_LO, REG_A5XX_RBBM_PERFCTR_VFD_4_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_4, 24 },
+ { REG_A5XX_RBBM_PERFCTR_VFD_5_LO, REG_A5XX_RBBM_PERFCTR_VFD_5_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_5, 25 },
+ { REG_A5XX_RBBM_PERFCTR_VFD_6_LO, REG_A5XX_RBBM_PERFCTR_VFD_6_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_6, 26 },
+ { REG_A5XX_RBBM_PERFCTR_VFD_7_LO, REG_A5XX_RBBM_PERFCTR_VFD_7_HI,
+ REG_A5XX_VFD_PERFCTR_VFD_SEL_7, 27 },
+};
+
+static struct adreno_counter a5xx_counters_vpc[] = {
+ { REG_A5XX_RBBM_PERFCTR_VPC_0_LO, REG_A5XX_RBBM_PERFCTR_VPC_0_HI,
+ REG_A5XX_VPC_PERFCTR_VPC_SEL_0, 36 },
+ { REG_A5XX_RBBM_PERFCTR_VPC_1_LO, REG_A5XX_RBBM_PERFCTR_VPC_1_HI,
+ REG_A5XX_VPC_PERFCTR_VPC_SEL_1, 37 },
+ { REG_A5XX_RBBM_PERFCTR_VPC_2_LO, REG_A5XX_RBBM_PERFCTR_VPC_2_HI,
+ REG_A5XX_VPC_PERFCTR_VPC_SEL_2, 38 },
+ { REG_A5XX_RBBM_PERFCTR_VPC_3_LO, REG_A5XX_RBBM_PERFCTR_VPC_3_HI,
+ REG_A5XX_VPC_PERFCTR_VPC_SEL_3, 39 },
+};
+
+static struct adreno_counter a5xx_counters_vsc[] = {
+ { REG_A5XX_RBBM_PERFCTR_VSC_0_LO, REG_A5XX_RBBM_PERFCTR_VSC_0_HI,
+ REG_A5XX_VSC_PERFCTR_VSC_SEL_0, 88 },
+ { REG_A5XX_RBBM_PERFCTR_VSC_1_LO, REG_A5XX_RBBM_PERFCTR_VSC_1_HI,
+ REG_A5XX_VSC_PERFCTR_VSC_SEL_1, 89 },
+};
+
+static struct adreno_counter a5xx_counters_power_ccu[] = {
+ { REG_A5XX_CCU_POWER_COUNTER_0_LO, REG_A5XX_CCU_POWER_COUNTER_0_HI,
+ REG_A5XX_RB_POWERCTR_CCU_SEL_0 },
+ { REG_A5XX_CCU_POWER_COUNTER_1_LO, REG_A5XX_CCU_POWER_COUNTER_1_HI,
+ REG_A5XX_RB_POWERCTR_CCU_SEL_1 },
+};
+
+static struct adreno_counter a5xx_counters_power_cp[] = {
+ { REG_A5XX_CP_POWER_COUNTER_0_LO, REG_A5XX_CP_POWER_COUNTER_0_HI,
+ REG_A5XX_CP_POWERCTR_CP_SEL_0 },
+ { REG_A5XX_CP_POWER_COUNTER_1_LO, REG_A5XX_CP_POWER_COUNTER_1_HI,
+ REG_A5XX_CP_POWERCTR_CP_SEL_1 },
+ { REG_A5XX_CP_POWER_COUNTER_2_LO, REG_A5XX_CP_POWER_COUNTER_2_HI,
+ REG_A5XX_CP_POWERCTR_CP_SEL_2 },
+ { REG_A5XX_CP_POWER_COUNTER_3_LO, REG_A5XX_CP_POWER_COUNTER_3_HI,
+ REG_A5XX_CP_POWERCTR_CP_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_power_rb[] = {
+ { REG_A5XX_RB_POWER_COUNTER_0_LO, REG_A5XX_RB_POWER_COUNTER_0_HI,
+ REG_A5XX_RB_POWERCTR_RB_SEL_0 },
+ { REG_A5XX_RB_POWER_COUNTER_1_LO, REG_A5XX_RB_POWER_COUNTER_1_HI,
+ REG_A5XX_RB_POWERCTR_RB_SEL_1 },
+ { REG_A5XX_RB_POWER_COUNTER_2_LO, REG_A5XX_RB_POWER_COUNTER_2_HI,
+ REG_A5XX_RB_POWERCTR_RB_SEL_2 },
+ { REG_A5XX_RB_POWER_COUNTER_3_LO, REG_A5XX_RB_POWER_COUNTER_3_HI,
+ REG_A5XX_RB_POWERCTR_RB_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_power_sp[] = {
+ { REG_A5XX_SP_POWER_COUNTER_0_LO, REG_A5XX_SP_POWER_COUNTER_0_HI,
+ REG_A5XX_SP_POWERCTR_SP_SEL_0 },
+ { REG_A5XX_SP_POWER_COUNTER_1_LO, REG_A5XX_SP_POWER_COUNTER_1_HI,
+ REG_A5XX_SP_POWERCTR_SP_SEL_1 },
+ { REG_A5XX_SP_POWER_COUNTER_2_LO, REG_A5XX_SP_POWER_COUNTER_2_HI,
+ REG_A5XX_SP_POWERCTR_SP_SEL_2 },
+ { REG_A5XX_SP_POWER_COUNTER_3_LO, REG_A5XX_SP_POWER_COUNTER_3_HI,
+ REG_A5XX_SP_POWERCTR_SP_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_power_tp[] = {
+ { REG_A5XX_TP_POWER_COUNTER_0_LO, REG_A5XX_TP_POWER_COUNTER_0_HI,
+ REG_A5XX_TPL1_POWERCTR_TP_SEL_0 },
+ { REG_A5XX_TP_POWER_COUNTER_1_LO, REG_A5XX_TP_POWER_COUNTER_1_HI,
+ REG_A5XX_TPL1_POWERCTR_TP_SEL_1 },
+ { REG_A5XX_TP_POWER_COUNTER_2_LO, REG_A5XX_TP_POWER_COUNTER_2_HI,
+ REG_A5XX_TPL1_POWERCTR_TP_SEL_2 },
+ { REG_A5XX_TP_POWER_COUNTER_3_LO, REG_A5XX_TP_POWER_COUNTER_3_HI,
+ REG_A5XX_TPL1_POWERCTR_TP_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_power_uche[] = {
+ { REG_A5XX_UCHE_POWER_COUNTER_0_LO, REG_A5XX_UCHE_POWER_COUNTER_0_HI,
+ REG_A5XX_UCHE_POWERCTR_UCHE_SEL_0 },
+ { REG_A5XX_UCHE_POWER_COUNTER_1_LO, REG_A5XX_UCHE_POWER_COUNTER_1_HI,
+ REG_A5XX_UCHE_POWERCTR_UCHE_SEL_1 },
+ { REG_A5XX_UCHE_POWER_COUNTER_2_LO, REG_A5XX_UCHE_POWER_COUNTER_2_HI,
+ REG_A5XX_UCHE_POWERCTR_UCHE_SEL_2 },
+ { REG_A5XX_UCHE_POWER_COUNTER_3_LO, REG_A5XX_UCHE_POWER_COUNTER_3_HI,
+ REG_A5XX_UCHE_POWERCTR_UCHE_SEL_3 },
+};
+
+static struct adreno_counter a5xx_counters_vbif[] = {
+ { REG_A5XX_VBIF_PERF_CNT_LOW0, REG_A5XX_VBIF_PERF_CNT_HIGH0 },
+ { REG_A5XX_VBIF_PERF_CNT_LOW1, REG_A5XX_VBIF_PERF_CNT_HIGH1 },
+ { REG_A5XX_VBIF_PERF_CNT_LOW2, REG_A5XX_VBIF_PERF_CNT_HIGH2 },
+ { REG_A5XX_VBIF_PERF_CNT_LOW3, REG_A5XX_VBIF_PERF_CNT_HIGH3 },
+};
+
+static struct adreno_counter a5xx_counters_gpmu[] = {
+ { REG_A5XX_GPMU_POWER_COUNTER_0_LO, REG_A5XX_GPMU_POWER_COUNTER_0_HI },
+ { REG_A5XX_GPMU_POWER_COUNTER_1_LO, REG_A5XX_GPMU_POWER_COUNTER_1_HI },
+ { REG_A5XX_GPMU_POWER_COUNTER_2_LO, REG_A5XX_GPMU_POWER_COUNTER_2_HI },
+ { REG_A5XX_GPMU_POWER_COUNTER_3_LO, REG_A5XX_GPMU_POWER_COUNTER_3_HI },
+ { REG_A5XX_GPMU_POWER_COUNTER_4_LO, REG_A5XX_GPMU_POWER_COUNTER_4_HI },
+ { REG_A5XX_GPMU_POWER_COUNTER_5_LO, REG_A5XX_GPMU_POWER_COUNTER_5_HI },
+};
+
+static struct adreno_counter a5xx_counters_vbif_power[] = {
+ { REG_A5XX_VBIF_PERF_PWR_CNT_LOW0, REG_A5XX_VBIF_PERF_PWR_CNT_HIGH0 },
+ { REG_A5XX_VBIF_PERF_PWR_CNT_LOW1, REG_A5XX_VBIF_PERF_PWR_CNT_HIGH1 },
+ { REG_A5XX_VBIF_PERF_PWR_CNT_LOW2, REG_A5XX_VBIF_PERF_PWR_CNT_HIGH2 },
+};
+
+static struct adreno_counter a5xx_counters_alwayson_power[] = {
+ { REG_A5XX_GPMU_ALWAYS_ON_COUNTER_LO,
+ REG_A5XX_GPMU_ALWAYS_ON_COUNTER_HI },
+};
+
+#define DEFINE_COUNTER_GROUP(_n, _a, _get, _enable, _put, _save, _restore) \
+static struct adreno_counter_group _n = { \
+ .counters = _a, \
+ .nr_counters = ARRAY_SIZE(_a), \
+ .lock = __SPIN_LOCK_UNLOCKED(_name.lock), \
+ .funcs = { \
+ .get = _get, \
+ .enable = _enable, \
+ .read = a5xx_counter_read, \
+ .put = _put, \
+ .save = _save, \
+ .restore = _restore \
+ }, \
+}
+
+#define COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \
+ _array, a5xx_counter_get, a5xx_counter_enable_cpu, a5xx_counter_put, \
+ a5xx_counter_save, a5xx_counter_restore)
+
+#define SPTP_COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \
+ _array, a5xx_counter_get, a5xx_counter_enable_pm4, a5xx_counter_put, \
+ a5xx_counter_save, a5xx_counter_restore)
+
+#define POWER_COUNTER_GROUP(_name, _array) DEFINE_COUNTER_GROUP(_name, \
+ _array, a5xx_counter_get, a5xx_counter_enable_cpu, a5xx_counter_put, \
+ NULL, NULL)
+
+/* "standard" counters */
+COUNTER_GROUP(a5xx_counter_group_cp, a5xx_counters_cp);
+COUNTER_GROUP(a5xx_counter_group_rbbm, a5xx_counters_rbbm);
+COUNTER_GROUP(a5xx_counter_group_pc, a5xx_counters_pc);
+COUNTER_GROUP(a5xx_counter_group_vfd, a5xx_counters_vfd);
+COUNTER_GROUP(a5xx_counter_group_vpc, a5xx_counters_vpc);
+COUNTER_GROUP(a5xx_counter_group_ccu, a5xx_counters_ccu);
+COUNTER_GROUP(a5xx_counter_group_cmp, a5xx_counters_cmp);
+COUNTER_GROUP(a5xx_counter_group_tse, a5xx_counters_tse);
+COUNTER_GROUP(a5xx_counter_group_ras, a5xx_counters_ras);
+COUNTER_GROUP(a5xx_counter_group_uche, a5xx_counters_uche);
+COUNTER_GROUP(a5xx_counter_group_rb, a5xx_counters_rb);
+COUNTER_GROUP(a5xx_counter_group_vsc, a5xx_counters_vsc);
+COUNTER_GROUP(a5xx_counter_group_lrz, a5xx_counters_lrz);
+
+/* SP/TP counters */
+SPTP_COUNTER_GROUP(a5xx_counter_group_hlsq, a5xx_counters_hlsq);
+SPTP_COUNTER_GROUP(a5xx_counter_group_tp, a5xx_counters_tp);
+SPTP_COUNTER_GROUP(a5xx_counter_group_sp, a5xx_counters_sp);
+
+/* Power counters */
+POWER_COUNTER_GROUP(a5xx_counter_group_power_ccu, a5xx_counters_power_ccu);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_cp, a5xx_counters_power_cp);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_rb, a5xx_counters_power_rb);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_sp, a5xx_counters_power_sp);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_tp, a5xx_counters_power_tp);
+POWER_COUNTER_GROUP(a5xx_counter_group_power_uche, a5xx_counters_power_uche);
+
+DEFINE_COUNTER_GROUP(a5xx_counter_group_alwayson, a5xx_counters_alwayson,
+ a5xx_counter_get_fixed, NULL, NULL, NULL, NULL);
+DEFINE_COUNTER_GROUP(a5xx_counter_group_vbif, a5xx_counters_vbif,
+ a5xx_counter_get, a5xx_counter_enable_vbif, a5xx_counter_put,
+ NULL, NULL);
+DEFINE_COUNTER_GROUP(a5xx_counter_group_gpmu, a5xx_counters_gpmu,
+ a5xx_counter_get, a5xx_counter_enable_gpmu, a5xx_counter_put,
+ NULL, NULL);
+DEFINE_COUNTER_GROUP(a5xx_counter_group_vbif_power, a5xx_counters_vbif_power,
+ a5xx_counter_get_fixed, a5xx_counter_enable_vbif_power, NULL, NULL,
+ NULL);
+DEFINE_COUNTER_GROUP(a5xx_counter_group_alwayson_power,
+ a5xx_counters_alwayson_power, a5xx_counter_get_fixed,
+ a5xx_counter_enable_alwayson_power, NULL, NULL, NULL);
+
+static const struct adreno_counter_group *a5xx_counter_groups[] = {
+ [MSM_COUNTER_GROUP_ALWAYSON] = &a5xx_counter_group_alwayson,
+ [MSM_COUNTER_GROUP_CCU] = &a5xx_counter_group_ccu,
+ [MSM_COUNTER_GROUP_CMP] = &a5xx_counter_group_cmp,
+ [MSM_COUNTER_GROUP_CP] = &a5xx_counter_group_cp,
+ [MSM_COUNTER_GROUP_HLSQ] = &a5xx_counter_group_hlsq,
+ [MSM_COUNTER_GROUP_LRZ] = &a5xx_counter_group_lrz,
+ [MSM_COUNTER_GROUP_PC] = &a5xx_counter_group_pc,
+ [MSM_COUNTER_GROUP_RAS] = &a5xx_counter_group_ras,
+ [MSM_COUNTER_GROUP_RB] = &a5xx_counter_group_rb,
+ [MSM_COUNTER_GROUP_RBBM] = &a5xx_counter_group_rbbm,
+ [MSM_COUNTER_GROUP_SP] = &a5xx_counter_group_sp,
+ [MSM_COUNTER_GROUP_TP] = &a5xx_counter_group_tp,
+ [MSM_COUNTER_GROUP_TSE] = &a5xx_counter_group_tse,
+ [MSM_COUNTER_GROUP_UCHE] = &a5xx_counter_group_uche,
+ [MSM_COUNTER_GROUP_VFD] = &a5xx_counter_group_vfd,
+ [MSM_COUNTER_GROUP_VPC] = &a5xx_counter_group_vpc,
+ [MSM_COUNTER_GROUP_VSC] = &a5xx_counter_group_vsc,
+ [MSM_COUNTER_GROUP_VBIF] = &a5xx_counter_group_vbif,
+ [MSM_COUNTER_GROUP_GPMU_PWR] = &a5xx_counter_group_gpmu,
+ [MSM_COUNTER_GROUP_CCU_PWR] = &a5xx_counter_group_power_ccu,
+ [MSM_COUNTER_GROUP_CP_PWR] = &a5xx_counter_group_power_cp,
+ [MSM_COUNTER_GROUP_RB_PWR] = &a5xx_counter_group_power_rb,
+ [MSM_COUNTER_GROUP_SP_PWR] = &a5xx_counter_group_power_sp,
+ [MSM_COUNTER_GROUP_TP_PWR] = &a5xx_counter_group_power_tp,
+ [MSM_COUNTER_GROUP_UCHE_PWR] = &a5xx_counter_group_power_uche,
+ [MSM_COUNTER_GROUP_VBIF_PWR] = &a5xx_counter_group_vbif_power,
+ [MSM_COUNTER_GROUP_ALWAYSON_PWR] =
+ &a5xx_counter_group_alwayson_power,
+};
+
+void a5xx_counters_restore(struct msm_gpu *gpu)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_counter_groups); i++) {
+ struct adreno_counter_group *group =
+ (struct adreno_counter_group *) a5xx_counter_groups[i];
+
+ if (group && group->funcs.restore)
+ group->funcs.restore(gpu, group);
+
+ a5xx_counter_group_enable(gpu, group, true);
+ }
+}
+
+
+void a5xx_counters_save(struct msm_gpu *gpu)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_counter_groups); i++) {
+ struct adreno_counter_group *group =
+ (struct adreno_counter_group *) a5xx_counter_groups[i];
+
+ if (group && group->funcs.save)
+ group->funcs.save(gpu, group);
+ }
+}
+
+int a5xx_counters_init(struct adreno_gpu *adreno_gpu)
+{
+ adreno_gpu->counter_groups = a5xx_counter_groups;
+ adreno_gpu->nr_counter_groups = ARRAY_SIZE(a5xx_counter_groups);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
new file mode 100644
index 000000000000..53951a3d355a
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -0,0 +1,1426 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "msm_iommu.h"
+#include "msm_trace.h"
+#include "a5xx_gpu.h"
+
+#define SECURE_VA_START 0xc0000000
+#define SECURE_VA_SIZE SZ_256M
+
+static void a5xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ uint32_t wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Make sure everything is posted before making a decision */
+ mb();
+
+ /* Update HW if this is the current ring and we are not in preempt */
+ if (a5xx_gpu->cur_ring == ring && !a5xx_in_preempt(a5xx_gpu))
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+static void a5xx_set_pagetable(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_mmu *mmu = aspace->mmu;
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ if (!iommu->ttbr0)
+ return;
+
+ /* Turn off protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Turn on APIV mode to access critical regions */
+ OUT_PKT4(ring, REG_A5XX_CP_CNTL, 1);
+ OUT_RING(ring, 1);
+
+ /* Make sure the ME is syncronized before staring the update */
+ OUT_PKT7(ring, CP_WAIT_FOR_ME, 0);
+
+ /* Execute the table update */
+ OUT_PKT7(ring, CP_SMMU_TABLE_UPDATE, 3);
+ OUT_RING(ring, lower_32_bits(iommu->ttbr0));
+ OUT_RING(ring, upper_32_bits(iommu->ttbr0));
+ OUT_RING(ring, iommu->contextidr);
+
+ /*
+ * Write the new TTBR0 to the preemption records - this will be used to
+ * reload the pagetable if the current ring gets preempted out.
+ */
+ OUT_PKT7(ring, CP_MEM_WRITE, 4);
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, ttbr0)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, ttbr0)));
+ OUT_RING(ring, lower_32_bits(iommu->ttbr0));
+ OUT_RING(ring, upper_32_bits(iommu->ttbr0));
+
+ /* Also write the current contextidr (ASID) */
+ OUT_PKT7(ring, CP_MEM_WRITE, 3);
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, contextidr)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, contextidr)));
+ OUT_RING(ring, iommu->contextidr);
+
+ /* Invalidate the draw state so we start off fresh */
+ OUT_PKT7(ring, CP_SET_DRAW_STATE, 3);
+ OUT_RING(ring, 0x40000);
+ OUT_RING(ring, 1);
+ OUT_RING(ring, 0);
+
+ /* Turn off APRIV */
+ OUT_PKT4(ring, REG_A5XX_CP_CNTL, 1);
+ OUT_RING(ring, 0);
+
+ /* Turn off protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+}
+
+/* Inline PM4 code to get the current value of the 19.2 Mhz always on counter */
+static void a5xx_get_ticks(struct msm_ringbuffer *ring, uint64_t iova)
+{
+ /*
+ * Set bit[30] to make this command a 64 bit write operation.
+ * bits[18-29] is to specify number of consecutive registers
+ * to copy, so set this space with 2, since we want to copy
+ * data from REG_A5XX_RBBM_ALWAYSON_COUNTER_LO and [HI].
+ */
+
+ OUT_PKT7(ring, CP_REG_TO_MEM, 3);
+ OUT_RING(ring, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO |
+ (1 << 30) | (2 << 18));
+ OUT_RING(ring, lower_32_bits(iova));
+ OUT_RING(ring, upper_32_bits(iova));
+}
+
+static void a5xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[submit->ring];
+ unsigned int i, ibs = 0;
+ unsigned long flags;
+ u64 ticks;
+ ktime_t time;
+
+ a5xx_set_pagetable(gpu, ring, submit->aspace);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[submit->ring]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[submit->ring]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ /* Enable local preemption for finegrain preemption */
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Allow CP_CONTEXT_SWITCH_YIELD packets in the IB2 */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x02);
+
+ /* Turn on secure mode if the submission is secure */
+ if (submit->secure) {
+ OUT_PKT7(ring, CP_SET_SECURE_MODE, 1);
+ OUT_RING(ring, 1);
+ }
+
+ /* Record the GPU ticks at command start for kernel side profiling */
+ a5xx_get_ticks(ring,
+ RING_TICKS_IOVA(ring, submit->tick_index, started));
+
+ /* And for the user profiling too if it is enabled */
+ if (submit->profile_buf_iova)
+ a5xx_get_ticks(ring, submit->profile_buf_iova +
+ offsetof(struct drm_msm_gem_submit_profile_buffer,
+ ticks_submitted));
+
+ /* Submit the commands */
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ case MSM_SUBMIT_CMD_PROFILE_BUF:
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ ibs++;
+ break;
+ }
+ }
+
+ /*
+ * Write the render mode to NULL (0) to indicate to the CP that the IBs
+ * are done rendering - otherwise a lucky preemption would start
+ * replaying from the last checkpoint
+ */
+ OUT_PKT7(ring, CP_SET_RENDER_MODE, 5);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+ OUT_RING(ring, 0);
+
+ /* Turn off IB level preemptions */
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Record the GPU ticks at command retire for kernel side profiling */
+ a5xx_get_ticks(ring,
+ RING_TICKS_IOVA(ring, submit->tick_index, retired));
+
+ /* Record the always on counter after command execution */
+ if (submit->profile_buf_iova)
+ a5xx_get_ticks(ring, submit->profile_buf_iova +
+ offsetof(struct drm_msm_gem_submit_profile_buffer,
+ ticks_retired));
+
+ /* Write the fence to the scratch register */
+ OUT_PKT4(ring, REG_A5XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->fence);
+
+ /*
+ * Execute a CACHE_FLUSH_TS event. This will ensure that the
+ * timestamp is written to the memory and then triggers the interrupt
+ */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, submit->fence);
+
+ if (submit->secure) {
+ OUT_PKT7(ring, CP_SET_SECURE_MODE, 1);
+ OUT_RING(ring, 0);
+ }
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ /*
+ * If dword[2:1] are non zero, they specify an address for the CP to
+ * write the value of dword[3] to on preemption complete. Write 0 to
+ * skip the write
+ */
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ /* Data value - not used if the address above is 0 */
+ OUT_RING(ring, 0x01);
+ /* Set bit 0 to trigger an interrupt on preempt complete */
+ OUT_RING(ring, 0x01);
+
+ /*
+ * Get the current kernel time and ticks with interrupts off so we don't
+ * get interrupted between the operations and skew the numbers
+ */
+
+ local_irq_save(flags);
+ ticks = gpu_read64(gpu, REG_A5XX_RBBM_ALWAYSON_COUNTER_LO,
+ REG_A5XX_RBBM_ALWAYSON_COUNTER_HI);
+ time = ktime_get_raw();
+ local_irq_restore(flags);
+
+ if (submit->profile_buf) {
+ struct timespec64 ts = ktime_to_timespec64(time);
+
+ /* Write the data into the user-specified profile buffer */
+ submit->profile_buf->time.tv_sec = ts.tv_sec;
+ submit->profile_buf->time.tv_nsec = ts.tv_nsec;
+ submit->profile_buf->ticks_queued = ticks;
+ }
+
+ trace_msm_submitted(submit, ticks, ktime_to_ns(time));
+
+ a5xx_flush(gpu, ring);
+
+ /* Check to see if we need to start preemption */
+ a5xx_preempt_trigger(gpu);
+}
+
+static const struct {
+ u32 offset;
+ u32 value;
+} a5xx_hwcg[] = {
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP0, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP1, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP2, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_SP3, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP0, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP1, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP2, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_SP3, 0x02222220},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP0, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP1, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP2, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_HYST_SP3, 0x0000F3CF},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP0, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP1, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP2, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_TP3, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP0, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP1, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP2, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_HYST3_TP3, 0x00007777},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP0, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP1, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP2, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_DELAY3_TP3, 0x00001111},
+ {REG_A5XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_HYST_UCHE, 0x00444444},
+ {REG_A5XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB0, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB1, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB2, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RB3, 0x00222222},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU0, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU1, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU2, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_CCU3, 0x00022220},
+ {REG_A5XX_RBBM_CLOCK_CNTL_RAC, 0x05522222},
+ {REG_A5XX_RBBM_CLOCK_CNTL2_RAC, 0x00505555},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU0, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU1, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU2, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RB_CCU3, 0x04040404},
+ {REG_A5XX_RBBM_CLOCK_HYST_RAC, 0x07444044},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_0, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_1, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_2, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RB_CCU_L1_3, 0x00000002},
+ {REG_A5XX_RBBM_CLOCK_DELAY_RAC, 0x00010011},
+ {REG_A5XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_GPC, 0x02222222},
+ {REG_A5XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A5XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A5XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A5XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A5XX_RBBM_CLOCK_DELAY_VFD, 0x00002222}
+};
+
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned int i;
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hwcg); i++)
+ gpu_write(gpu, a5xx_hwcg[i].offset,
+ state ? a5xx_hwcg[i].value : 0);
+
+ /* There are a few additional registers just for A540 */
+ if (adreno_is_a540(adreno_gpu)) {
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_DELAY_GPMU,
+ state ? 0x770 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_HYST_GPMU,
+ state ? 0x004 : 0);
+ }
+
+ gpu_write(gpu, REG_A5XX_RBBM_CLOCK_CNTL, state ? 0xAAA8AA00 : 0);
+ gpu_write(gpu, REG_A5XX_RBBM_ISDB_CNT, state ? 0x182 : 0x180);
+
+ if (state)
+ set_bit(A5XX_HWCG_ENABLED, &a5xx_gpu->flags);
+ else
+ clear_bit(A5XX_HWCG_ENABLED, &a5xx_gpu->flags);
+}
+
+static int a5xx_me_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_ME_INIT, 8);
+
+ OUT_RING(ring, 0x0000002F);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Don't enable header dump */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ /* Specify workarounds for various microcode issues */
+ if (adreno_is_a530(adreno_gpu)) {
+ /* Workaround for token end syncs
+ * Force a WFI after every direct-render 3D mode draw and every
+ * 2D mode 3 draw
+ */
+ OUT_RING(ring, 0x0000000B);
+ } else {
+ /* No workarounds enabled */
+ OUT_RING(ring, 0x00000000);
+ }
+
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu, ring);
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static int a5xx_preempt_start(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (gpu->nr_rings == 1)
+ return 0;
+
+ /* Turn off protected mode to write to special registers */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Set the save preemption record for the ring/command */
+ OUT_PKT4(ring, REG_A5XX_CP_CONTEXT_SWITCH_SAVE_ADDR_LO, 2);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->preempt_iova[ring->id]));
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_GLOBAL, 1);
+ OUT_RING(ring, 0x00);
+
+ OUT_PKT7(ring, CP_PREEMPT_ENABLE_LOCAL, 1);
+ OUT_RING(ring, 0x01);
+
+ OUT_PKT7(ring, CP_YIELD_ENABLE, 1);
+ OUT_RING(ring, 0x01);
+
+ /* Yield the floor on command completion */
+ OUT_PKT7(ring, CP_CONTEXT_SWITCH_YIELD, 4);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x00);
+ OUT_RING(ring, 0x01);
+ OUT_RING(ring, 0x01);
+
+ gpu->funcs->flush(gpu, ring);
+
+ return a5xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+
+static struct drm_gem_object *a5xx_ucode_load_bo(struct msm_gpu *gpu,
+ const struct firmware *fw, u64 *iova)
+{
+ struct drm_gem_object *bo;
+ void *ptr;
+
+ ptr = msm_gem_kernel_new(gpu->dev, fw->size - 4,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace, &bo, iova);
+
+ if (IS_ERR(ptr))
+ return ERR_CAST(ptr);
+
+ memcpy(ptr, &fw->data[4], fw->size - 4);
+ return bo;
+}
+
+static int a5xx_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int ret;
+
+ if (!a5xx_gpu->pm4_bo) {
+ a5xx_gpu->pm4_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pm4,
+ &a5xx_gpu->pm4_iova);
+
+ if (IS_ERR(a5xx_gpu->pm4_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pm4_bo);
+ a5xx_gpu->pm4_bo = NULL;
+ dev_err(gpu->dev->dev, "could not allocate PM4: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ if (!a5xx_gpu->pfp_bo) {
+ a5xx_gpu->pfp_bo = a5xx_ucode_load_bo(gpu, adreno_gpu->pfp,
+ &a5xx_gpu->pfp_iova);
+
+ if (IS_ERR(a5xx_gpu->pfp_bo)) {
+ ret = PTR_ERR(a5xx_gpu->pfp_bo);
+ a5xx_gpu->pfp_bo = NULL;
+ dev_err(gpu->dev->dev, "could not allocate PFP: %d\n",
+ ret);
+ return ret;
+ }
+ }
+
+ gpu_write64(gpu, REG_A5XX_CP_ME_INSTR_BASE_LO,
+ REG_A5XX_CP_ME_INSTR_BASE_HI, a5xx_gpu->pm4_iova);
+
+ gpu_write64(gpu, REG_A5XX_CP_PFP_INSTR_BASE_LO,
+ REG_A5XX_CP_PFP_INSTR_BASE_HI, a5xx_gpu->pfp_iova);
+
+ return 0;
+}
+
+#ifdef CONFIG_MSM_SUBSYSTEM_RESTART
+
+#include <soc/qcom/subsystem_restart.h>
+#include <soc/qcom/scm.h>
+
+static void a5xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ const char *name;
+ void *ptr;
+
+ /* If no zap shader was defined, we'll assume that none is needed */
+ if (of_property_read_string(GPU_OF_NODE(gpu), "qcom,zap-shader", &name))
+ return;
+
+ /*
+ * If the zap shader has already been loaded then just ask the SCM to
+ * re-initialize the registers (not needed if CPZ retention is a thing)
+ */
+ if (test_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags)) {
+ int ret;
+ struct scm_desc desc = { 0 };
+
+ if (of_property_read_bool(GPU_OF_NODE(gpu),
+ "qcom,cpz-retention"))
+ return;
+
+ desc.args[0] = 0;
+ desc.args[1] = 13;
+ desc.arginfo = SCM_ARGS(2);
+
+ ret = scm_call2(SCM_SIP_FNID(SCM_SVC_BOOT, 0x0A), &desc);
+ if (ret)
+ DRM_ERROR(
+ "%s: zap-shader resume failed with error %d\n",
+ gpu->name, ret);
+
+ return;
+ }
+
+ ptr = subsystem_get(name);
+
+ if (IS_ERR_OR_NULL(ptr)) {
+ DRM_ERROR("%s: Unable to load the zap shader: %ld\n", gpu->name,
+ IS_ERR(ptr) ? PTR_ERR(ptr) : -ENODEV);
+ } else {
+ set_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags);
+ }
+}
+#else
+static void a5xx_zap_shader_init(struct msm_gpu *gpu)
+{
+ if (of_find_property(GPU_OF_NODE(gpu), "qcom,zap-shader", NULL))
+ return;
+
+ DRM_INFO_ONCE("%s: Zap shader is defined but loader isn't available\n",
+ gpu->name);
+}
+#endif
+
+#define A5XX_INT_MASK (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW | \
+ A5XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT | \
+ A5XX_RBBM_INT_0_MASK_CP_SW | \
+ A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+
+static int a5xx_hw_init(struct msm_gpu *gpu)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ int ret, bit = 0;
+
+ pm_qos_update_request(&gpu->pm_qos_req_dma, 101);
+
+ gpu_write(gpu, REG_A5XX_VBIF_ROUND_ROBIN_QOS_ARB, 0x00000003);
+ if (adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xFFFFFFFF);
+
+ /* Enable RBBM error reporting bits */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL0, 0x00000001);
+
+ if (adreno_gpu->quirks & ADRENO_QUIRK_FAULT_DETECT_MASK) {
+ /*
+ * Mask out the activity signals from RB1-3 to avoid false
+ * positives
+ */
+
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL11,
+ 0xF0000000);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL12,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL13,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL14,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL15,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL16,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL17,
+ 0xFFFFFFFF);
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_MASK_CNTL18,
+ 0xFFFFFFFF);
+ }
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A5XX_RBBM_INTERFACE_HANG_INT_CNTL,
+ (1 << 30) | 0xFFFF);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A5XX_RBBM_PERFCTR_CNTL, 0x01);
+
+ /* Increase VFD cache access so LRZ and other data gets evicted less */
+ gpu_write(gpu, REG_A5XX_UCHE_CACHE_WAYS, 0x02);
+
+ /* Disable L2 bypass in the UCHE */
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_TRAP_BASE_HI, 0x0001FFFF);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_LO, 0xFFFF0000);
+ gpu_write(gpu, REG_A5XX_UCHE_WRITE_THRU_BASE_HI, 0x0001FFFF);
+
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A5XX_UCHE_GMEM_RANGE_MIN_LO,
+ REG_A5XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
+
+ gpu_write64(gpu, REG_A5XX_UCHE_GMEM_RANGE_MAX_LO,
+ REG_A5XX_UCHE_GMEM_RANGE_MAX_HI,
+ 0x00100000 + adreno_gpu->gmem - 1);
+
+ gpu_write(gpu, REG_A5XX_CP_MEQ_THRESHOLDS, 0x40);
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_SIZE, 0x40);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_2, 0x80000060);
+ gpu_write(gpu, REG_A5XX_CP_ROQ_THRESHOLDS_1, 0x40201B16);
+
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, (0x400 << 11 | 0x300 << 22));
+
+ if (adreno_gpu->quirks & ADRENO_QUIRK_TWO_PASS_USE_WFI)
+ gpu_rmw(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0, (1 << 8));
+
+ gpu_write(gpu, REG_A5XX_PC_DBG_ECO_CNTL, 0xc0200100);
+
+ /* Enable USE_RETENTION_FLOPS */
+ gpu_write(gpu, REG_A5XX_CP_CHICKEN_DBG, 0x02000000);
+
+ /* Enable ME/PFP split notification */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL1, 0xA6FFFFFF);
+
+ /* Enable HWCG */
+ a5xx_set_hwcg(gpu, true);
+
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CNTL2, 0x0000003F);
+
+ /* Set the highest bank bit if specified in the device tree */
+ if (!of_property_read_u32(pdev->dev.of_node, "qcom,highest-bank-bit",
+ &bit)) {
+ if (bit >= 13 && bit <= 16) {
+ bit -= 13;
+
+ gpu_write(gpu, REG_A5XX_TPL1_MODE_CNTL, bit << 7);
+ gpu_write(gpu, REG_A5XX_RB_MODE_CNTL, bit << 1);
+
+ if (adreno_is_a540(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_UCHE_DBG_ECO_CNTL_2,
+ bit);
+ }
+ }
+
+ /* Try to load and initialize the zap shader if applicable */
+ a5xx_zap_shader_init(gpu);
+
+ /* Protect registers from the CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT_CNTL, 0x00000007);
+
+ /* RBBM */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(0), ADRENO_PROTECT_RW(0x04, 4));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(1), ADRENO_PROTECT_RW(0x08, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(2), ADRENO_PROTECT_RW(0x10, 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(3), ADRENO_PROTECT_RW(0x20, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(4), ADRENO_PROTECT_RW(0x40, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(5), ADRENO_PROTECT_RW(0x80, 64));
+
+ /* Content protect */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(6),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ 16));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(7),
+ ADRENO_PROTECT_RW(REG_A5XX_RBBM_SECVID_TRUST_CNTL, 2));
+
+ /* CP */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(8), ADRENO_PROTECT_RW(0x800, 64));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(9), ADRENO_PROTECT_RW(0x840, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(10), ADRENO_PROTECT_RW(0x880, 32));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(11), ADRENO_PROTECT_RW(0xAA0, 1));
+
+ /* RB */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(12), ADRENO_PROTECT_RW(0xCC0, 1));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(13), ADRENO_PROTECT_RW(0xCF0, 2));
+
+ /* VPC */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(14), ADRENO_PROTECT_RW(0xE68, 8));
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(15), ADRENO_PROTECT_RW(0xE70, 4));
+
+ /* UCHE */
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(16), ADRENO_PROTECT_RW(0xE80, 16));
+
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_CP_PROTECT(17),
+ ADRENO_PROTECT_RW(0x10000, 0x8000));
+
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_CNTL, 0);
+
+ gpu_write64(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ REG_A5XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, SECURE_VA_START);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_TRUSTED_SIZE, SECURE_VA_SIZE);
+
+ /* Put the GPU into 64 bit by default */
+ gpu_write(gpu, REG_A5XX_CP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VSC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_GRAS_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_RB_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_PC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_HLSQ_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VFD_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_VPC_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_UCHE_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_SP_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_TPL1_ADDR_MODE_CNTL, 0x1);
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL, 0x1);
+
+ a5xx_gpu->timestamp_counter = adreno_get_counter(gpu,
+ MSM_COUNTER_GROUP_CP, 0, NULL, NULL);
+
+ /* Load the GPMU firmware before starting the HW init */
+ a5xx_gpmu_ucode_init(gpu);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ return ret;
+
+ a5xx_preempt_hw_init(gpu);
+
+ ret = a5xx_ucode_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Disable the interrupts through the initial bringup stage */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_0_MASK, A5XX_INT_MASK);
+
+ /* Clear ME_HALT to start the micro engine */
+ gpu_write(gpu, REG_A5XX_CP_PFP_ME_CNTL, 0);
+ ret = a5xx_me_init(gpu);
+ if (ret)
+ return ret;
+
+ /*
+ * Send a pipeline event stat to get misbehaving counters to start
+ * ticking correctly
+ */
+ if (adreno_is_a530(adreno_gpu)) {
+ OUT_PKT7(gpu->rb[0], CP_EVENT_WRITE, 1);
+ OUT_RING(gpu->rb[0], 0x0F);
+
+ gpu->funcs->flush(gpu, gpu->rb[0]);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ }
+
+ /*
+ * If a zap shader was specified in the device tree, assume that we are
+ * on a secure device that blocks access to the RBBM_SECVID registers
+ * so we need to use the CP to switch out of secure mode. If a zap
+ * shader was NOT specified then we assume we are on an unlocked device.
+ * If we guessed wrong then the access to the register will probably
+ * cause a XPU violation.
+ */
+ if (test_bit(A5XX_ZAP_SHADER_LOADED, &a5xx_gpu->flags)) {
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_SET_SECURE_MODE, 1);
+ OUT_RING(ring, 0x00000000);
+
+ gpu->funcs->flush(gpu, gpu->rb[0]);
+ if (!a5xx_idle(gpu, gpu->rb[0]))
+ return -EINVAL;
+ } else {
+ /* Print a warning so if we die, we know why */
+ dev_warn_once(gpu->dev->dev,
+ "Zap shader not enabled - using SECVID_TRUST_CNTL instead\n");
+ gpu_write(gpu, REG_A5XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+ }
+
+ /* Next, start the power */
+ ret = a5xx_power_init(gpu);
+ if (ret)
+ return ret;
+
+
+ /* Last step - yield the ringbuffer */
+ a5xx_preempt_start(gpu);
+
+ pm_qos_update_request(&gpu->pm_qos_req_dma, 501);
+
+ return 0;
+}
+
+static void a5xx_recover(struct msm_gpu *gpu)
+{
+ adreno_dump_info(gpu);
+
+ msm_gpu_snapshot(gpu, gpu->snapshot);
+
+ /* Reset the GPU so it can work again */
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 1);
+ gpu_read(gpu, REG_A5XX_RBBM_SW_RESET_CMD);
+ gpu_write(gpu, REG_A5XX_RBBM_SW_RESET_CMD, 0);
+
+ adreno_recover(gpu);
+}
+
+static void a5xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ DBG("%s", gpu->name);
+
+ a5xx_preempt_fini(gpu);
+
+ if (a5xx_gpu->pm4_bo) {
+ if (a5xx_gpu->pm4_iova)
+ msm_gem_put_iova(a5xx_gpu->pm4_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->pm4_bo);
+ }
+
+ if (a5xx_gpu->pfp_bo) {
+ if (a5xx_gpu->pfp_iova)
+ msm_gem_put_iova(a5xx_gpu->pfp_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->pfp_bo);
+ }
+
+ if (a5xx_gpu->gpmu_bo) {
+ if (a5xx_gpu->gpmu_iova)
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+ }
+
+ adreno_gpu_cleanup(adreno_gpu);
+ kfree(a5xx_gpu);
+}
+
+static inline bool _a5xx_check_idle(struct msm_gpu *gpu)
+{
+ if (gpu_read(gpu, REG_A5XX_RBBM_STATUS) & ~A5XX_RBBM_STATUS_HI_BUSY)
+ return false;
+
+ /*
+ * Nearly every abnormality ends up pausing the GPU and triggering a
+ * fault so we can safely just watch for this one interrupt to fire
+ */
+ return !(gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS) &
+ A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT);
+}
+
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu, ring))
+ return false;
+
+ if (spin_until(_a5xx_check_idle(gpu))) {
+ DRM_ERROR(
+ "%s: timeout waiting for GPU RB %d to idle: status %8.8X rptr/wptr: %4.4X/%4.4X irq %8.8X\n",
+ gpu->name, ring->id,
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+ gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS));
+
+ return false;
+ }
+
+ return true;
+}
+
+static void a5xx_cp_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_CP_INTERRUPT_STATUS);
+
+ if (status & A5XX_CP_INT_CP_OPCODE_ERROR) {
+ u32 val;
+
+ gpu_write(gpu, REG_A5XX_CP_PFP_STAT_ADDR, 0);
+
+ /*
+ * REG_A5XX_CP_PFP_STAT_DATA is indexed, and we want index 1 so
+ * read it twice
+ */
+
+ gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+ val = gpu_read(gpu, REG_A5XX_CP_PFP_STAT_DATA);
+
+ dev_err_ratelimited(gpu->dev->dev, "CP | opcode error | possible opcode=0x%8.8X\n",
+ val);
+ }
+
+ if (status & A5XX_CP_INT_CP_HW_FAULT_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | HW fault | status=0x%8.8X\n",
+ gpu_read(gpu, REG_A5XX_CP_HW_FAULT));
+
+ if (status & A5XX_CP_INT_CP_DMA_ERROR)
+ dev_err_ratelimited(gpu->dev->dev, "CP | DMA error\n");
+
+ if (status & A5XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_CP_PROTECT_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+ val & (1 << 24) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, val);
+ }
+
+ if (status & A5XX_CP_INT_CP_AHB_ERROR) {
+ u32 status = gpu_read(gpu, REG_A5XX_CP_AHB_FAULT);
+ const char *access[16] = { "reserved", "reserved",
+ "timestamp lo", "timestamp hi", "pfp read", "pfp write",
+ "", "", "me read", "me write", "", "", "crashdump read",
+ "crashdump write" };
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "CP | AHB error | addr=%X access=%s error=%d | status=0x%8.8X\n",
+ status & 0xFFFFF, access[(status >> 24) & 0xF],
+ (status & (1 << 31)), status);
+ }
+}
+
+static void a5xx_rbbm_err_irq(struct msm_gpu *gpu, u32 status)
+{
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR) {
+ u32 val = gpu_read(gpu, REG_A5XX_RBBM_AHB_ERROR_STATUS);
+
+ dev_err_ratelimited(gpu->dev->dev,
+ "RBBM | AHB bus error | %s | addr=0x%X | ports=0x%X:0x%X\n",
+ val & (1 << 28) ? "WRITE" : "READ",
+ (val & 0xFFFFF) >> 2, (val >> 20) & 0x3,
+ (val >> 24) & 0xF);
+
+ /* Clear the error */
+ gpu_write(gpu, REG_A5XX_RBBM_AHB_CMD, (1 << 4));
+
+ /* Clear the interrupt */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+ }
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | AHB transfer timeout\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ME master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ME_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | PFP master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_PFP_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ETS master split | status=0x%X\n",
+ gpu_read(gpu, REG_A5XX_RBBM_AHB_ETS_SPLIT_STATUS));
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A5XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(gpu->dev->dev, "RBBM | ATB bus overflow\n");
+}
+
+static void a5xx_uche_err_irq(struct msm_gpu *gpu)
+{
+ uint64_t addr = (uint64_t) gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_HI);
+
+ addr |= gpu_read(gpu, REG_A5XX_UCHE_TRAP_LOG_LO);
+
+ dev_err_ratelimited(gpu->dev->dev, "UCHE | Out of bounds access | addr=0x%llX\n",
+ addr);
+}
+
+static void a5xx_gpmu_err_irq(struct msm_gpu *gpu)
+{
+ dev_err_ratelimited(gpu->dev->dev, "GPMU | voltage droop\n");
+}
+
+static void a5xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+
+ dev_err(dev->dev, "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ ring ? ring->id : -1, adreno_submitted_fence(gpu, ring),
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A5XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A5XX_CP_RB_WPTR),
+ gpu_read64(gpu, REG_A5XX_CP_IB1_BASE, REG_A5XX_CP_IB1_BASE_HI),
+ gpu_read(gpu, REG_A5XX_CP_IB1_BUFSZ),
+ gpu_read64(gpu, REG_A5XX_CP_IB2_BASE, REG_A5XX_CP_IB2_BASE_HI),
+ gpu_read(gpu, REG_A5XX_CP_IB2_BUFSZ));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
+#define RBBM_ERROR_MASK \
+ (A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR | \
+ A5XX_RBBM_INT_0_MASK_RBBM_TRANSFER_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ME_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_PFP_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ETS_MS_TIMEOUT | \
+ A5XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNC_OVERFLOW)
+
+static irqreturn_t a5xx_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A5XX_RBBM_INT_0_STATUS);
+
+ /*
+ * Clear all the interrupts except for RBBM_AHB_ERROR
+ * which needs to be cleared after the error condition
+ * is cleared otherwise it will storm
+ */
+ gpu_write(gpu, REG_A5XX_RBBM_INT_CLEAR_CMD,
+ status & ~A5XX_RBBM_INT_0_MASK_RBBM_AHB_ERROR);
+
+ if (status & RBBM_ERROR_MASK)
+ a5xx_rbbm_err_irq(gpu, status);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a5xx_cp_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_MISC_HANG_DETECT)
+ a5xx_fault_detect_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ a5xx_uche_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_GPMU_VOLTAGE_DROOP)
+ a5xx_gpmu_err_irq(gpu);
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS) {
+ a5xx_preempt_trigger(gpu);
+ msm_gpu_retire(gpu);
+ }
+
+ if (status & A5XX_RBBM_INT_0_MASK_CP_SW)
+ a5xx_preempt_irq(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const u32 a5xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A5XX_CP_RB_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A5XX_CP_RB_BASE_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR, REG_A5XX_CP_RB_RPTR_ADDR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+ REG_A5XX_CP_RB_RPTR_ADDR_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A5XX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A5XX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A5XX_CP_RB_CNTL),
+};
+
+static const u32 a5xx_registers[] = {
+ 0x0000, 0x0002, 0x0004, 0x0020, 0x0022, 0x0026, 0x0029, 0x002b,
+ 0x002e, 0x0035, 0x0038, 0x0042, 0x0044, 0x0044, 0x0047, 0x0095,
+ 0x0097, 0x00bb, 0x03a0, 0x0464, 0x0469, 0x046f, 0x04d2, 0x04d3,
+ 0x04e0, 0x0533, 0x0540, 0x0555, 0x0800, 0x081a, 0x081f, 0x0841,
+ 0x0860, 0x0860, 0x0880, 0x08a0, 0x0b00, 0x0b12, 0x0b14, 0x0b28,
+ 0x0b78, 0x0b7f, 0x0bb0, 0x0bbd, 0x0bc0, 0x0bc6, 0x0bd0, 0x0c53,
+ 0x0c60, 0x0c61, 0x0c80, 0x0c82, 0x0c84, 0x0c85, 0x0c90, 0x0c9b,
+ 0x0ca0, 0x0ca0, 0x0cb0, 0x0cb2, 0x0cc1, 0x0cc1, 0x0cc4, 0x0cc7,
+ 0x0ccc, 0x0ccc, 0x0cd0, 0x0cdb, 0x0ce0, 0x0ce5, 0x0ce8, 0x0ce8,
+ 0x0cec, 0x0cf1, 0x0cfb, 0x0d0e, 0x0d10, 0x0d17, 0x0d20, 0x0d23,
+ 0x0d30, 0x0d30, 0x0e40, 0x0e43, 0x0e4a, 0x0e4a, 0x0e50, 0x0e57,
+ 0x0e60, 0x0e7c, 0x0e80, 0x0e8e, 0x0e90, 0x0e96, 0x0ea0, 0x0eab,
+ 0x0eb0, 0x0eb2, 0x2100, 0x211e, 0x2140, 0x2145, 0x2180, 0x2185,
+ 0x2500, 0x251e, 0x2540, 0x2545, 0x2580, 0x2585, 0x3000, 0x3014,
+ 0x3018, 0x302c, 0x3030, 0x3030, 0x3034, 0x3036, 0x303c, 0x303d,
+ 0x3040, 0x3040, 0x3042, 0x3042, 0x3049, 0x3049, 0x3058, 0x3058,
+ 0x305a, 0x3061, 0x3064, 0x3068, 0x306c, 0x306d, 0x3080, 0x3088,
+ 0x308b, 0x308c, 0x3090, 0x3094, 0x3098, 0x3098, 0x309c, 0x309c,
+ 0x3124, 0x3124, 0x340c, 0x340c, 0x3410, 0x3410, 0x3800, 0x3801,
+ 0xa800, 0xa800, 0xa820, 0xa828, 0xa840, 0xa87d, 0xa880, 0xa88d,
+ 0xa890, 0xa8a3, 0xa8a8, 0xa8aa, 0xa8c0, 0xa8c3, 0xa8c6, 0xa8ca,
+ 0xa8cc, 0xa8cf, 0xa8d1, 0xa8d8, 0xa8dc, 0xa8dc, 0xa8e0, 0xa8f5,
+ 0xac00, 0xac06, 0xac40, 0xac47, 0xac60, 0xac62, 0xac80, 0xac82,
+ 0xb800, 0xb808, 0xb80c, 0xb812, 0xb814, 0xb817, 0xb900, 0xb904,
+ 0xb906, 0xb90a, 0xb90c, 0xb90f, 0xb920, 0xb924, 0xb926, 0xb92a,
+ 0xb92c, 0xb92f, 0xb940, 0xb944, 0xb946, 0xb94a, 0xb94c, 0xb94f,
+ 0xb960, 0xb964, 0xb966, 0xb96a, 0xb96c, 0xb96f, 0xb980, 0xb984,
+ 0xb986, 0xb98a, 0xb98c, 0xb98f, 0xb9a0, 0xb9b0, 0xb9b8, 0xb9ba,
+ 0xd200, 0xd23f, 0xe000, 0xe006, 0xe010, 0xe09a, 0xe0a0, 0xe0a4,
+ 0xe0aa, 0xe0eb, 0xe100, 0xe105, 0xe140, 0xe147, 0xe150, 0xe187,
+ 0xe1a0, 0xe1a9, 0xe1b0, 0xe1b6, 0xe1c0, 0xe1c7, 0xe1d0, 0xe1d1,
+ 0xe200, 0xe201, 0xe210, 0xe21c, 0xe240, 0xe268, 0xe280, 0xe280,
+ 0xe282, 0xe2a3, 0xe2a5, 0xe2c2, 0xe380, 0xe38f, 0xe3b0, 0xe3b0,
+ 0xe400, 0xe405, 0xe408, 0xe4e9, 0xe4f0, 0xe4f0, 0xe800, 0xe806,
+ 0xe810, 0xe89a, 0xe8a0, 0xe8a4, 0xe8aa, 0xe8eb, 0xe900, 0xe905,
+ 0xe940, 0xe947, 0xe950, 0xe987, 0xe9a0, 0xe9a9, 0xe9b0, 0xe9b6,
+ 0xe9c0, 0xe9c7, 0xe9d0, 0xe9d1, 0xea00, 0xea01, 0xea10, 0xea1c,
+ 0xea40, 0xea68, 0xea80, 0xea80, 0xea82, 0xeaa3, 0xeaa5, 0xeac2,
+ 0xeb80, 0xeb8f, 0xebb0, 0xebb0, 0xec00, 0xec05, 0xec08, 0xece9,
+ 0xecf0, 0xecf0, 0xf800, 0xf807,
+ ~0
+};
+
+static int a5xx_pm_resume(struct msm_gpu *gpu)
+{
+ int ret;
+
+ /* Turn on the core power */
+ ret = msm_gpu_pm_resume(gpu);
+ if (ret)
+ return ret;
+
+ /* Turn the RBCCU domain first to limit the chances of voltage droop */
+ gpu_write(gpu, REG_A5XX_GPMU_RBCCU_POWER_CNTL, 0x778000);
+
+ /* Wait 3 usecs before polling */
+ udelay(3);
+
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret) {
+ DRM_ERROR("%s: timeout waiting for RBCCU GDSC enable: %X\n",
+ gpu->name,
+ gpu_read(gpu, REG_A5XX_GPMU_RBCCU_PWR_CLK_STATUS));
+ return ret;
+ }
+
+ /* Turn on the SP domain */
+ gpu_write(gpu, REG_A5XX_GPMU_SP_POWER_CNTL, 0x778000);
+ ret = spin_usecs(gpu, 20, REG_A5XX_GPMU_SP_PWR_CLK_STATUS,
+ (1 << 20), (1 << 20));
+ if (ret)
+ DRM_ERROR("%s: timeout waiting for SP GDSC enable\n",
+ gpu->name);
+
+ a5xx_counters_restore(gpu);
+
+ return ret;
+}
+
+static int a5xx_pm_suspend(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ /* Clear the VBIF pipe before shutting down */
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0xF);
+ spin_until((gpu_read(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL1) & 0xF)
+ == 0xF);
+
+ gpu_write(gpu, REG_A5XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ /* Save the counters before going down */
+ a5xx_counters_save(gpu);
+
+ /*
+ * Reset the VBIF before power collapse to avoid issue with FIFO
+ * entries
+ */
+ if (adreno_is_a530(adreno_gpu)) {
+ /* These only need to be done for A530 */
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD,
+ 0x003C0000);
+ gpu_write(gpu, REG_A5XX_RBBM_BLOCK_SW_RESET_CMD,
+ 0x00000000);
+ }
+
+ return msm_gpu_pm_suspend(gpu);
+}
+
+static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ *value = adreno_read_counter(gpu, MSM_COUNTER_GROUP_CP,
+ a5xx_gpu->timestamp_counter);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
+{
+ seq_printf(m, "status: %08x\n",
+ gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ adreno_show(gpu, m);
+
+}
+#endif
+
+static struct msm_ringbuffer *a5xx_active_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+
+ return a5xx_gpu->cur_ring;
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a5xx_hw_init,
+ .pm_suspend = a5xx_pm_suspend,
+ .pm_resume = a5xx_pm_resume,
+ .recover = a5xx_recover,
+ .submitted_fence = adreno_submitted_fence,
+ .submit = a5xx_submit,
+ .flush = a5xx_flush,
+ .active_ring = a5xx_active_ring,
+ .irq = a5xx_irq,
+ .destroy = a5xx_destroy,
+#ifdef CONFIG_DEBUG_FS
+ .show = a5xx_show,
+#endif
+ .snapshot = a5xx_snapshot,
+ .get_counter = adreno_get_counter,
+ .read_counter = adreno_read_counter,
+ .put_counter = adreno_put_counter,
+ },
+ .get_timestamp = a5xx_get_timestamp,
+};
+
+/* Read the limits management leakage from the efuses */
+static void a530_efuse_leakage(struct platform_device *pdev,
+ struct adreno_gpu *adreno_gpu, void *base,
+ size_t size)
+{
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned int row0, row2;
+ unsigned int leakage_pwr_on, coeff;
+
+ if (size < 0x148)
+ return;
+
+ /* Leakage */
+ row0 = readl_relaxed(base + 0x134);
+ row2 = readl_relaxed(base + 0x144);
+
+ /* Read barrier to get the previous two reads */
+ rmb();
+
+ /* Get the leakage coefficient from device tree */
+ if (of_property_read_u32(pdev->dev.of_node,
+ "qcom,base-leakage-coefficent", &coeff))
+ return;
+
+ leakage_pwr_on = ((row2 >> 2) & 0xFF) * (1 << (row0 >> 1) & 0x03);
+ a5xx_gpu->lm_leakage = (leakage_pwr_on << 16) |
+ ((leakage_pwr_on * coeff) / 100);
+}
+
+/* Read the speed bin from the efuses */
+static void a530_efuse_bin(struct platform_device *pdev,
+ struct adreno_gpu *adreno_gpu, void *base,
+ size_t size)
+{
+ uint32_t speed_bin[3];
+ uint32_t val;
+
+ if (of_property_read_u32_array(pdev->dev.of_node,
+ "qcom,gpu-speed-bin", speed_bin, 3))
+ return;
+
+ if (size < speed_bin[0] + 4)
+ return;
+
+ val = readl_relaxed(base + speed_bin[0]);
+
+ adreno_gpu->speed_bin = (val & speed_bin[1]) >> speed_bin[2];
+}
+
+/* Read target specific configuration from the efuses */
+static void a5xx_efuses_read(struct platform_device *pdev,
+ struct adreno_gpu *adreno_gpu)
+{
+ struct adreno_platform_config *config = pdev->dev.platform_data;
+ const struct adreno_info *info = adreno_info(config->rev);
+ struct resource *res;
+ void *base;
+
+ /*
+ * The adreno_gpu->revn mechanism isn't set up yet so we need to check
+ * it directly here
+ */
+ if (info->revn != 530)
+ return;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ "qfprom_memory");
+ if (!res)
+ return;
+
+ base = ioremap(res->start, resource_size(res));
+ if (!base)
+ return;
+
+ a530_efuse_bin(pdev, adreno_gpu, base, resource_size(res));
+ a530_efuse_leakage(pdev, adreno_gpu, base, resource_size(res));
+
+ iounmap(base);
+}
+
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct a5xx_gpu *a5xx_gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ struct msm_gpu_config a5xx_config = { 0 };
+ int ret;
+
+ if (!pdev) {
+ dev_err(dev->dev, "No A5XX device is defined\n");
+ return ERR_PTR(-ENXIO);
+ }
+
+ a5xx_gpu = kzalloc(sizeof(*a5xx_gpu), GFP_KERNEL);
+ if (!a5xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a5xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ a5xx_gpu->pdev = pdev;
+ adreno_gpu->registers = a5xx_registers;
+ adreno_gpu->reg_offsets = a5xx_register_offsets;
+
+ a5xx_gpu->lm_leakage = 0x4E001A;
+
+ /* Check the efuses for some configuration */
+ a5xx_efuses_read(pdev, adreno_gpu);
+
+ a5xx_config.ioname = MSM_GPU_DEFAULT_IONAME;
+ a5xx_config.irqname = MSM_GPU_DEFAULT_IRQNAME;
+
+ /* Set the number of rings to 4 - yay preemption */
+ a5xx_config.nr_rings = 4;
+
+ /*
+ * Set the user domain range to fall into the TTBR1 region for global
+ * objects
+ */
+ a5xx_config.va_start = 0xfffffff000000000ULL;
+ a5xx_config.va_end = 0xffffffffffffffffULL;
+
+ a5xx_config.secure_va_start = SECURE_VA_START;
+ a5xx_config.secure_va_end = SECURE_VA_START + SECURE_VA_SIZE - 1;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, &a5xx_config);
+ if (ret) {
+ a5xx_destroy(&(a5xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ /* Set up the preemption specific bits and pieces for each ringbuffer */
+ a5xx_preempt_init(gpu);
+
+ a5xx_counters_init(adreno_gpu);
+
+ return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.h b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
new file mode 100644
index 000000000000..9c62f861136d
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.h
@@ -0,0 +1,200 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#ifndef __A5XX_GPU_H__
+#define __A5XX_GPU_H__
+
+#include "adreno_gpu.h"
+
+/* Bringing over the hack from the previous targets */
+#undef ROP_COPY
+#undef ROP_XOR
+
+#include "a5xx.xml.h"
+
+enum {
+ A5XX_ZAP_SHADER_LOADED = 1,
+ A5XX_HWCG_ENABLED = 2,
+};
+
+struct a5xx_gpu {
+ unsigned long flags;
+
+ struct adreno_gpu base;
+ struct platform_device *pdev;
+
+ struct drm_gem_object *pm4_bo;
+ uint64_t pm4_iova;
+
+ struct drm_gem_object *pfp_bo;
+ uint64_t pfp_iova;
+
+ struct drm_gem_object *gpmu_bo;
+ uint64_t gpmu_iova;
+ uint32_t gpmu_dwords;
+
+ uint32_t lm_leakage;
+
+ struct msm_ringbuffer *cur_ring;
+ struct msm_ringbuffer *next_ring;
+
+ struct drm_gem_object *preempt_bo[MSM_GPU_MAX_RINGS];
+ struct a5xx_preempt_record *preempt[MSM_GPU_MAX_RINGS];
+ uint64_t preempt_iova[MSM_GPU_MAX_RINGS];
+
+ atomic_t preempt_state;
+ struct timer_list preempt_timer;
+
+ struct a5xx_smmu_info *smmu_info;
+ struct drm_gem_object *smmu_info_bo;
+ uint64_t smmu_info_iova;
+
+ int timestamp_counter;
+};
+
+#define to_a5xx_gpu(x) container_of(x, struct a5xx_gpu, base)
+
+/*
+ * In order to do lockless preemption we use a simple state machine to progress
+ * through the process.
+ *
+ * PREEMPT_NONE - no preemption in progress. Next state START.
+ * PREEMPT_START - The trigger is evaulating if preemption is possible. Next
+ * states: TRIGGERED, NONE
+ * PREEMPT_ABORT - An intermediate state before moving back to NONE. Next
+ * state: NONE.
+ * PREEMPT_TRIGGERED: A preemption has been executed on the hardware. Next
+ * states: FAULTED, PENDING
+ * PREEMPT_FAULTED: A preemption timed out (never completed). This will trigger
+ * recovery. Next state: N/A
+ * PREEMPT_PENDING: Preemption complete interrupt fired - the callback is
+ * checking the success of the operation. Next state: FAULTED, NONE.
+ */
+
+enum preempt_state {
+ PREEMPT_NONE = 0,
+ PREEMPT_START,
+ PREEMPT_ABORT,
+ PREEMPT_TRIGGERED,
+ PREEMPT_FAULTED,
+ PREEMPT_PENDING,
+};
+
+/*
+ * struct a5xx_preempt_record is a shared buffer between the microcode and the
+ * CPU to store the state for preemption. The record itself is much larger
+ * (64k) but most of that is used by the CP for storage.
+ *
+ * There is a preemption record assigned per ringbuffer. When the CPU triggers a
+ * preemption, it fills out the record with the useful information (wptr, ring
+ * base, etc) and the microcode uses that information to set up the CP following
+ * the preemption. When a ring is switched out, the CP will save the ringbuffer
+ * state back to the record. In this way, once the records are properly set up
+ * the CPU can quickly switch back and forth between ringbuffers by only
+ * updating a few registers (often only the wptr).
+ *
+ * These are the CPU aware registers in the record:
+ * @magic: Must always be 0x27C4BAFC
+ * @info: Type of the record - written 0 by the CPU, updated by the CP
+ * @data: Data field from SET_RENDER_MODE or a checkpoint. Written and used by
+ * the CP
+ * @cntl: Value of RB_CNTL written by CPU, save/restored by CP
+ * @rptr: Value of RB_RPTR written by CPU, save/restored by CP
+ * @wptr: Value of RB_WPTR written by CPU, save/restored by CP
+ * @rptr_addr: Value of RB_RPTR_ADDR written by CPU, save/restored by CP
+ * @rbase: Value of RB_BASE written by CPU, save/restored by CP
+ * @counter: GPU address of the storage area for the performance counters
+ */
+struct a5xx_preempt_record {
+ uint32_t magic;
+ uint32_t info;
+ uint32_t data;
+ uint32_t cntl;
+ uint32_t rptr;
+ uint32_t wptr;
+ uint64_t rptr_addr;
+ uint64_t rbase;
+ uint64_t counter;
+};
+
+/* Magic identifier for the preemption record */
+#define A5XX_PREEMPT_RECORD_MAGIC 0x27C4BAFCUL
+
+/*
+ * Even though the structure above is only a few bytes, we need a full 64k to
+ * store the entire preemption record from the CP
+ */
+#define A5XX_PREEMPT_RECORD_SIZE (64 * 1024)
+
+/*
+ * The preemption counter block is a storage area for the value of the
+ * preemption counters that are saved immediately before context switch. We
+ * append it on to the end of the allocadtion for the preemption record.
+ */
+#define A5XX_PREEMPT_COUNTER_SIZE (16 * 4)
+
+/*
+ * This is a global structure that the preemption code uses to switch in the
+ * pagetable for the preempted process - the code switches in whatever we
+ * after preempting in a new ring.
+ */
+struct a5xx_smmu_info {
+ uint32_t magic;
+ uint32_t _pad4;
+ uint64_t ttbr0;
+ uint32_t asid;
+ uint32_t contextidr;
+};
+
+#define A5XX_SMMU_INFO_MAGIC 0x3618CDA3UL
+
+int a5xx_power_init(struct msm_gpu *gpu);
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu);
+
+static inline int spin_usecs(struct msm_gpu *gpu, uint32_t usecs,
+ uint32_t reg, uint32_t mask, uint32_t value)
+{
+ while (usecs--) {
+ udelay(1);
+ if ((gpu_read(gpu, reg) & mask) == value)
+ return 0;
+ cpu_relax();
+ }
+
+ return -ETIMEDOUT;
+}
+
+void a5xx_set_hwcg(struct msm_gpu *gpu, bool state);
+bool a5xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+
+void a5xx_preempt_init(struct msm_gpu *gpu);
+void a5xx_preempt_hw_init(struct msm_gpu *gpu);
+void a5xx_preempt_trigger(struct msm_gpu *gpu);
+void a5xx_preempt_irq(struct msm_gpu *gpu);
+void a5xx_preempt_fini(struct msm_gpu *gpu);
+
+int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+
+/* Return true if we are in a preempt state */
+static inline bool a5xx_in_preempt(struct a5xx_gpu *a5xx_gpu)
+{
+ int preempt_state = atomic_read(&a5xx_gpu->preempt_state);
+
+ return !(preempt_state == PREEMPT_NONE ||
+ preempt_state == PREEMPT_ABORT);
+}
+
+int a5xx_counters_init(struct adreno_gpu *adreno_gpu);
+void a5xx_counters_save(struct msm_gpu *gpu);
+void a5xx_counters_restore(struct msm_gpu *gpu);
+
+#endif /* __A5XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_power.c b/drivers/gpu/drm/msm/adreno/a5xx_power.c
new file mode 100644
index 000000000000..647b61313fc2
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_power.c
@@ -0,0 +1,499 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/pm_opp.h>
+#include "a5xx_gpu.h"
+
+/*
+ * The GPMU data block is a block of shared registers that can be used to
+ * communicate back and forth. These "registers" are by convention with the GPMU
+ * firwmare and not bound to any specific hardware design
+ */
+
+#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
+#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
+#define AGC_MSG_BASE (AGC_INIT_BASE + 7)
+
+#define AGC_MSG_STATE (AGC_MSG_BASE + 0)
+#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
+#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
+#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
+
+#define AGC_POWER_CONFIG_PRODUCTION_ID 1
+#define AGC_INIT_MSG_VALUE 0xBABEFACE
+
+/* AGC_LM_CONFIG (A540+) */
+#define AGC_LM_CONFIG (136/4)
+#define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17
+#define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1
+#define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8)
+#define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4)
+#define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4)
+#define AGC_LM_CONFIG_LLM_ENABLED (1 << 16)
+#define AGC_LM_CONFIG_BCL_DISABLED (1 << 24)
+
+#define AGC_LEVEL_CONFIG (140/4)
+
+static struct {
+ uint32_t reg;
+ uint32_t value;
+} a5xx_sequence_regs[] = {
+ { 0xB9A1, 0x00010303 },
+ { 0xB9A2, 0x13000000 },
+ { 0xB9A3, 0x00460020 },
+ { 0xB9A4, 0x10000000 },
+ { 0xB9A5, 0x040A1707 },
+ { 0xB9A6, 0x00010000 },
+ { 0xB9A7, 0x0E000904 },
+ { 0xB9A8, 0x10000000 },
+ { 0xB9A9, 0x01165000 },
+ { 0xB9AA, 0x000E0002 },
+ { 0xB9AB, 0x03884141 },
+ { 0xB9AC, 0x10000840 },
+ { 0xB9AD, 0x572A5000 },
+ { 0xB9AE, 0x00000003 },
+ { 0xB9AF, 0x00000000 },
+ { 0xB9B0, 0x10000000 },
+ { 0xB828, 0x6C204010 },
+ { 0xB829, 0x6C204011 },
+ { 0xB82A, 0x6C204012 },
+ { 0xB82B, 0x6C204013 },
+ { 0xB82C, 0x6C204014 },
+ { 0xB90F, 0x00000004 },
+ { 0xB910, 0x00000002 },
+ { 0xB911, 0x00000002 },
+ { 0xB912, 0x00000002 },
+ { 0xB913, 0x00000002 },
+ { 0xB92F, 0x00000004 },
+ { 0xB930, 0x00000005 },
+ { 0xB931, 0x00000005 },
+ { 0xB932, 0x00000005 },
+ { 0xB933, 0x00000005 },
+ { 0xB96F, 0x00000001 },
+ { 0xB970, 0x00000003 },
+ { 0xB94F, 0x00000004 },
+ { 0xB950, 0x0000000B },
+ { 0xB951, 0x0000000B },
+ { 0xB952, 0x0000000B },
+ { 0xB953, 0x0000000B },
+ { 0xB907, 0x00000019 },
+ { 0xB927, 0x00000019 },
+ { 0xB947, 0x00000019 },
+ { 0xB967, 0x00000019 },
+ { 0xB987, 0x00000019 },
+ { 0xB906, 0x00220001 },
+ { 0xB926, 0x00220001 },
+ { 0xB946, 0x00220001 },
+ { 0xB966, 0x00220001 },
+ { 0xB986, 0x00300000 },
+ { 0xAC40, 0x0340FF41 },
+ { 0xAC41, 0x03BEFED0 },
+ { 0xAC42, 0x00331FED },
+ { 0xAC43, 0x021FFDD3 },
+ { 0xAC44, 0x5555AAAA },
+ { 0xAC45, 0x5555AAAA },
+ { 0xB9BA, 0x00000008 },
+};
+
+/*
+ * Get the actual voltage value for the operating point at the specified
+ * frequency
+ */
+static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct dev_pm_opp *opp;
+
+ opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
+
+ return (!IS_ERR(opp)) ? dev_pm_opp_get_voltage(opp) / 1000 : 0;
+}
+
+#define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32))
+#define LM_DCVS_LIMIT 1
+#define LEVEL_CONFIG ~(0x303)
+
+/* Setup thermal limit management for A540 */
+static void a540_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ u32 max_power = 0;
+ u32 rate = gpu->gpufreq[gpu->active_level];
+ u32 config;
+
+ /* The battery current limiter isn't enabled for A540 */
+ config = AGC_LM_CONFIG_BCL_DISABLED;
+ config |= adreno_gpu->rev.patchid << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
+
+ /* For now disable GPMU side throttling */
+ config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
+
+ /* Get the max-power from the device tree */
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-max-power", &max_power);
+
+ gpu_write(gpu, AGC_MSG_STATE, 0x80000001);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ /*
+ * For now just write the one voltage level - we will do more when we
+ * can do scaling
+ */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), max_power);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG);
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE,
+ PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1));
+
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Setup thermal limit management for A530 */
+static void a530_lm_setup(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ uint32_t rate = gpu->gpufreq[gpu->active_level];
+ uint32_t tsens = 0;
+ uint32_t max_power = 0;
+ unsigned int i;
+
+ /* Write the block of sequence registers */
+ for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
+ gpu_write(gpu, a5xx_sequence_regs[i].reg,
+ a5xx_sequence_regs[i].value);
+
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,gpmu-tsens", &tsens);
+
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, tsens);
+ gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
+
+ /* Write the voltage table */
+
+ /* Get the max-power from the device tree */
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-max-power", &max_power);
+
+ gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
+ gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
+
+ gpu_write(gpu, AGC_MSG_STATE, 1);
+ gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
+
+ /*
+ * For now just write the one voltage level - we will do more when we
+ * can do scaling
+ */
+ gpu_write(gpu, AGC_MSG_PAYLOAD(0), max_power);
+ gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, rate));
+ gpu_write(gpu, AGC_MSG_PAYLOAD(3), rate / 1000000);
+
+ gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
+ gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
+}
+
+/* Enable SP/TP cpower collapse */
+static void a5xx_pc_init(struct msm_gpu *gpu)
+{
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
+ gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
+}
+
+/* Enable the GPMU microcontroller */
+static int a5xx_gpmu_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ if (!a5xx_gpu->gpmu_dwords)
+ return 0;
+
+ /* Turn off protected mode for this operation */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 0);
+
+ /* Kick off the IB to load the GPMU microcode */
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
+ OUT_RING(ring, a5xx_gpu->gpmu_dwords);
+
+ /* Turn back on protected mode */
+ OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
+ OUT_RING(ring, 1);
+
+ gpu->funcs->flush(gpu, ring);
+
+ /* This is "fatal" because the CP is left in a bad state */
+ if (!a5xx_idle(gpu, ring)) {
+ DRM_ERROR("%s: Unable to load GPMU firmwaren",
+ gpu->name);
+ return -EINVAL;
+ }
+
+ /* Clock gating setup for A530 targets */
+ if (adreno_is_a530(adreno_gpu))
+ gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
+
+ /* Kick off the GPMU */
+ gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
+
+ /*
+ * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
+ * won't have advanced power collapse.
+ */
+ if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
+ 0xBABEFACE)) {
+ DRM_ERROR("%s: GPMU firmware initialization timed out\n",
+ gpu->name);
+ return 0;
+ }
+
+ if (!adreno_is_a530(adreno_gpu)) {
+ u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1);
+
+ if (val)
+ DRM_ERROR("%s: GPMU firmare initialization failed: %d\n",
+ gpu->name, val);
+ }
+
+ /* FIXME: Clear GPMU interrupts? */
+ return 0;
+}
+
+/* Enable limits management */
+static void a5xx_lm_enable(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ /* This init sequence only applies to A530 */
+ if (!adreno_is_a530(adreno_gpu))
+ return;
+
+ gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
+ gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
+ gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
+ gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
+
+ gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
+}
+
+int a5xx_power_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int ret;
+ u32 lm_limit = 6000;
+
+ /*
+ * Set up the limit management
+ * first, do some generic setup:
+ */
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
+
+ of_property_read_u32(GPU_OF_NODE(gpu), "qcom,lm-limit", &lm_limit);
+ gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | lm_limit);
+
+ /* Now do the target specific setup */
+ if (adreno_is_a530(adreno_gpu))
+ a530_lm_setup(gpu);
+ else
+ a540_lm_setup(gpu);
+
+ /* Set up SP/TP power collpase */
+ a5xx_pc_init(gpu);
+
+ /* Start the GPMU */
+ ret = a5xx_gpmu_init(gpu);
+ if (ret)
+ return ret;
+
+ /* Start the limits management */
+ a5xx_lm_enable(gpu);
+
+ return 0;
+}
+
+static int _read_header(unsigned int *data, uint32_t fwsize,
+ unsigned int *major, unsigned int *minor)
+{
+ uint32_t size;
+ unsigned int i;
+
+ /* First dword of the header is the header size */
+ if (fwsize < 4)
+ return -EINVAL;
+
+ size = data[0];
+
+ /* Make sure the header isn't too big and is a multiple of two */
+ if ((size % 2) || (size > 10) || size > (fwsize >> 2))
+ return -EINVAL;
+
+ /* Read the values in pairs */
+ for (i = 1; i < size; i += 2) {
+ switch (data[i]) {
+ case 1:
+ *major = data[i + 1];
+ break;
+ case 2:
+ *minor = data[i + 1];
+ break;
+ default:
+ /* Invalid values are non fatal */
+ break;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * Make sure cur_major and cur_minor are greater than or equal to the minimum
+ * allowable major/minor
+ */
+static inline bool _check_gpmu_version(uint32_t cur_major, uint32_t cur_minor,
+ uint32_t min_major, uint32_t min_minor)
+{
+ return ((cur_major > min_major) ||
+ ((cur_major == min_major) && (cur_minor >= min_minor)));
+}
+
+void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *drm = gpu->dev;
+ const char *name;
+ const struct firmware *fw;
+ uint32_t version[2] = { 0, 0 };
+ uint32_t dwords = 0, offset = 0;
+ uint32_t major = 0, minor = 0, bosize;
+ unsigned int *data, *ptr, *cmds;
+ unsigned int cmds_size;
+
+ if (a5xx_gpu->gpmu_bo)
+ return;
+
+ /*
+ * Read the firmware name from the device tree - if it doesn't exist
+ * then don't initialize the GPMU for this target
+ */
+ if (of_property_read_string(GPU_OF_NODE(gpu), "qcom,gpmu-firmware",
+ &name))
+ return;
+
+ /*
+ * The version isn't mandatory, but if it exists, we need to enforce
+ * that the version of the GPMU firmware matches or is newer than the
+ * value
+ */
+ of_property_read_u32_array(GPU_OF_NODE(gpu), "qcom,gpmu-version",
+ version, 2);
+
+ /* Get the firmware */
+ if (request_firmware(&fw, name, drm->dev)) {
+ DRM_ERROR("%s: Could not get GPMU firmware. GPMU will not be active\n",
+ gpu->name);
+ return;
+ }
+
+ data = (unsigned int *) fw->data;
+
+ /*
+ * The first dword is the size of the remaining data in dwords. Use it
+ * as a checksum of sorts and make sure it matches the actual size of
+ * the firmware that we read
+ */
+
+ if (fw->size < 8 || (data[0] < 2) || (data[0] >= (fw->size >> 2)))
+ goto out;
+
+ /* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
+ if (data[1] != 2)
+ goto out;
+
+ /* Read the header and get the major/minor of the read firmware */
+ if (_read_header(&data[2], fw->size - 8, &major, &minor))
+ goto out;
+
+ if (!_check_gpmu_version(major, minor, version[0], version[1])) {
+ DRM_ERROR("%s: Loaded GPMU version %d.%d is too old\n",
+ gpu->name, major, minor);
+ goto out;
+ }
+
+ cmds = data + data[2] + 3;
+ cmds_size = data[0] - data[2] - 2;
+
+ /*
+ * A single type4 opcode can only have so many values attached so
+ * add enough opcodes to load the all the commands
+ */
+ bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
+
+ ptr = msm_gem_kernel_new(drm, bosize,
+ MSM_BO_UNCACHED | MSM_BO_GPU_READONLY, gpu->aspace,
+ &a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
+ if (IS_ERR(ptr))
+ goto err;
+
+ while (cmds_size > 0) {
+ int i;
+ uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
+ TYPE4_MAX_PAYLOAD : cmds_size;
+
+ ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
+ _size);
+
+ for (i = 0; i < _size; i++)
+ ptr[dwords++] = *cmds++;
+
+ offset += _size;
+ cmds_size -= _size;
+ }
+
+ a5xx_gpu->gpmu_dwords = dwords;
+
+ goto out;
+
+err:
+ if (a5xx_gpu->gpmu_iova)
+ msm_gem_put_iova(a5xx_gpu->gpmu_bo, gpu->aspace);
+ if (a5xx_gpu->gpmu_bo)
+ drm_gem_object_unreference_unlocked(a5xx_gpu->gpmu_bo);
+
+ a5xx_gpu->gpmu_bo = NULL;
+ a5xx_gpu->gpmu_iova = 0;
+ a5xx_gpu->gpmu_dwords = 0;
+
+out:
+ /* No need to keep that firmware laying around anymore */
+ release_firmware(fw);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_preempt.c b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
new file mode 100644
index 000000000000..44d4ca35fa09
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_preempt.c
@@ -0,0 +1,359 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "msm_iommu.h"
+#include "a5xx_gpu.h"
+
+/*
+ * Try to transition the preemption state from old to new. Return
+ * true on success or false if the original state wasn't 'old'
+ */
+static inline bool try_preempt_state(struct a5xx_gpu *a5xx_gpu,
+ enum preempt_state old, enum preempt_state new)
+{
+ enum preempt_state cur = atomic_cmpxchg(&a5xx_gpu->preempt_state,
+ old, new);
+
+ return (cur == old);
+}
+
+/*
+ * Force the preemption state to the specified state. This is used in cases
+ * where the current state is known and won't change
+ */
+static inline void set_preempt_state(struct a5xx_gpu *gpu,
+ enum preempt_state new)
+{
+ /*
+ * preempt_state may be read by other cores trying to trigger a
+ * preemption or in the interrupt handler so barriers are needed
+ * before...
+ */
+ smp_mb__before_atomic();
+ atomic_set(&gpu->preempt_state, new);
+ /* ... and after*/
+ smp_mb__after_atomic();
+}
+
+/* Write the most recent wptr for the given ring into the hardware */
+static inline void update_wptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ unsigned long flags;
+ uint32_t wptr;
+
+ if (!ring)
+ return;
+
+ spin_lock_irqsave(&ring->lock, flags);
+ wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ gpu_write(gpu, REG_A5XX_CP_RB_WPTR, wptr);
+}
+
+/* Return the highest priority ringbuffer with something in it */
+static struct msm_ringbuffer *get_next_ring(struct msm_gpu *gpu)
+{
+ unsigned long flags;
+ int i;
+
+ /*
+ * Find the highest prority ringbuffer that isn't empty and jump
+ * to it (0 being the highest and gpu->nr_rings - 1 being the
+ * lowest)
+ */
+ for (i = 0; i < gpu->nr_rings; i++) {
+ bool empty;
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ spin_lock_irqsave(&ring->lock, flags);
+ empty = (get_wptr(ring) == ring->memptrs->rptr);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ if (!empty)
+ return ring;
+ }
+
+ return NULL;
+}
+
+static void a5xx_preempt_timer(unsigned long data)
+{
+ struct a5xx_gpu *a5xx_gpu = (struct a5xx_gpu *) data;
+ struct msm_gpu *gpu = &a5xx_gpu->base.base;
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_FAULTED))
+ return;
+
+ dev_err(dev->dev, "%s: preemption timed out\n", gpu->name);
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
+/* Try to trigger a preemption switch */
+void a5xx_preempt_trigger(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ unsigned long flags;
+ struct msm_ringbuffer *ring;
+
+ if (gpu->nr_rings == 1)
+ return;
+
+ /*
+ * Try to start preemption by moving from NONE to START. If
+ * unsuccessful, a preemption is already in flight
+ */
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_NONE, PREEMPT_START))
+ return;
+
+ /* Get the next ring to preempt to */
+ ring = get_next_ring(gpu);
+
+ /*
+ * If no ring is populated or the highest priority ring is the current
+ * one do nothing except to update the wptr to the latest and greatest
+ */
+ if (!ring || (a5xx_gpu->cur_ring == ring)) {
+ /*
+ * Its possible that while a preemption request is in progress
+ * from an irq context, a user context trying to submit might
+ * fail to update the write pointer, because it determines
+ * that the preempt state is not PREEMPT_NONE.
+ *
+ * Close the race by introducing an intermediate
+ * state PREEMPT_ABORT to let the submit path
+ * know that the ringbuffer is not going to change
+ * and can safely update the write pointer.
+ */
+
+ set_preempt_state(a5xx_gpu, PREEMPT_ABORT);
+ update_wptr(gpu, a5xx_gpu->cur_ring);
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+ return;
+ }
+
+ /* Make sure the wptr doesn't update while we're in motion */
+ spin_lock_irqsave(&ring->lock, flags);
+ a5xx_gpu->preempt[ring->id]->wptr = get_wptr(ring);
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Do read barrier to make sure we have updated pagetable info */
+ rmb();
+
+ /* Set the SMMU info for the preemption */
+ if (a5xx_gpu->smmu_info) {
+ a5xx_gpu->smmu_info->ttbr0 = ring->memptrs->ttbr0;
+ a5xx_gpu->smmu_info->contextidr = ring->memptrs->contextidr;
+ }
+
+ /* Set the address of the incoming preemption record */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_LO,
+ REG_A5XX_CP_CONTEXT_SWITCH_RESTORE_ADDR_HI,
+ a5xx_gpu->preempt_iova[ring->id]);
+
+ a5xx_gpu->next_ring = ring;
+
+ /* Start a timer to catch a stuck preemption */
+ mod_timer(&a5xx_gpu->preempt_timer, jiffies + msecs_to_jiffies(10000));
+
+ /* Set the preemption state to triggered */
+ set_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED);
+
+ /* Make sure everything is written before hitting the button */
+ wmb();
+
+ /* And actually start the preemption */
+ gpu_write(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL, 1);
+}
+
+void a5xx_preempt_irq(struct msm_gpu *gpu)
+{
+ uint32_t status;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (!try_preempt_state(a5xx_gpu, PREEMPT_TRIGGERED, PREEMPT_PENDING))
+ return;
+
+ /* Delete the preemption watchdog timer */
+ del_timer(&a5xx_gpu->preempt_timer);
+
+ /*
+ * The hardware should be setting CP_CONTEXT_SWITCH_CNTL to zero before
+ * firing the interrupt, but there is a non zero chance of a hardware
+ * condition or a software race that could set it again before we have a
+ * chance to finish. If that happens, log and go for recovery
+ */
+ status = gpu_read(gpu, REG_A5XX_CP_CONTEXT_SWITCH_CNTL);
+ if (unlikely(status)) {
+ set_preempt_state(a5xx_gpu, PREEMPT_FAULTED);
+ dev_err(dev->dev, "%s: Preemption failed to complete\n",
+ gpu->name);
+ queue_work(priv->wq, &gpu->recover_work);
+ return;
+ }
+
+ a5xx_gpu->cur_ring = a5xx_gpu->next_ring;
+ a5xx_gpu->next_ring = NULL;
+
+ update_wptr(gpu, a5xx_gpu->cur_ring);
+
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+}
+
+void a5xx_preempt_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring;
+ int i;
+
+ if (gpu->nr_rings > 1) {
+ /* Clear the preemption records */
+ FOR_EACH_RING(gpu, ring, i) {
+ if (ring) {
+ a5xx_gpu->preempt[ring->id]->wptr = 0;
+ a5xx_gpu->preempt[ring->id]->rptr = 0;
+ a5xx_gpu->preempt[ring->id]->rbase = ring->iova;
+ }
+ }
+ }
+
+ /* Tell the CP where to find the smmu_info buffer */
+ gpu_write64(gpu, REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO,
+ REG_A5XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI,
+ a5xx_gpu->smmu_info_iova);
+
+ /* Reset the preemption state */
+ set_preempt_state(a5xx_gpu, PREEMPT_NONE);
+
+ /* Always come up on rb 0 */
+ a5xx_gpu->cur_ring = gpu->rb[0];
+}
+
+static int preempt_init_ring(struct a5xx_gpu *a5xx_gpu,
+ struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = &a5xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct a5xx_preempt_record *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+
+ ptr = msm_gem_kernel_new(gpu->dev,
+ A5XX_PREEMPT_RECORD_SIZE + A5XX_PREEMPT_COUNTER_SIZE,
+ MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
+ gpu->aspace, &bo, &iova);
+
+ if (IS_ERR(ptr))
+ return PTR_ERR(ptr);
+
+ a5xx_gpu->preempt_bo[ring->id] = bo;
+ a5xx_gpu->preempt_iova[ring->id] = iova;
+ a5xx_gpu->preempt[ring->id] = ptr;
+
+ /* Set up the defaults on the preemption record */
+
+ ptr->magic = A5XX_PREEMPT_RECORD_MAGIC;
+ ptr->info = 0;
+ ptr->data = 0;
+ ptr->cntl = MSM_GPU_RB_CNTL_DEFAULT;
+ ptr->rptr_addr = rbmemptr(ring, rptr);
+ ptr->counter = iova + A5XX_PREEMPT_RECORD_SIZE;
+
+ return 0;
+}
+
+void a5xx_preempt_fini(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring;
+ int i;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring || !a5xx_gpu->preempt_bo[i])
+ continue;
+
+ if (a5xx_gpu->preempt_iova[i])
+ msm_gem_put_iova(a5xx_gpu->preempt_bo[i], gpu->aspace);
+
+ drm_gem_object_unreference_unlocked(a5xx_gpu->preempt_bo[i]);
+
+ a5xx_gpu->preempt_bo[i] = NULL;
+ }
+
+ if (a5xx_gpu->smmu_info_bo) {
+ if (a5xx_gpu->smmu_info_iova)
+ msm_gem_put_iova(a5xx_gpu->smmu_info_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a5xx_gpu->smmu_info_bo);
+ a5xx_gpu->smmu_info_bo = NULL;
+ }
+}
+
+void a5xx_preempt_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_ringbuffer *ring;
+ struct a5xx_smmu_info *ptr;
+ struct drm_gem_object *bo;
+ uint64_t iova;
+ int i;
+
+ /* No preemption if we only have one ring */
+ if (gpu->nr_rings <= 1)
+ return;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
+
+ if (preempt_init_ring(a5xx_gpu, ring))
+ goto fail;
+ }
+
+ if (msm_iommu_allow_dynamic(gpu->aspace->mmu)) {
+ ptr = msm_gem_kernel_new(gpu->dev,
+ sizeof(struct a5xx_smmu_info),
+ MSM_BO_UNCACHED | MSM_BO_PRIVILEGED,
+ gpu->aspace, &bo, &iova);
+
+ if (IS_ERR(ptr))
+ goto fail;
+
+ ptr->magic = A5XX_SMMU_INFO_MAGIC;
+
+ a5xx_gpu->smmu_info_bo = bo;
+ a5xx_gpu->smmu_info_iova = iova;
+ a5xx_gpu->smmu_info = ptr;
+ }
+
+ setup_timer(&a5xx_gpu->preempt_timer, a5xx_preempt_timer,
+ (unsigned long) a5xx_gpu);
+
+ return;
+fail:
+ /*
+ * On any failure our adventure is over. Clean up and
+ * set nr_rings to 1 to force preemption off
+ */
+ a5xx_preempt_fini(gpu);
+ gpu->nr_rings = 1;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
new file mode 100644
index 000000000000..d1c1ab460c95
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a5xx_snapshot.c
@@ -0,0 +1,815 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+#include "a5xx_gpu.h"
+#include "msm_snapshot_api.h"
+
+#define A5XX_NR_SHADER_BANKS 4
+
+/*
+ * These are a list of the registers that need to be read through the HLSQ
+ * aperture through the crashdumper. These are not nominally accessible from
+ * the CPU on a secure platform.
+ */
+static const struct {
+ u32 type;
+ u32 regoffset;
+ u32 count;
+} a5xx_hlsq_aperture_regs[] = {
+ { 0x35, 0xE00, 0x32 }, /* HSLQ non-context */
+ { 0x31, 0x2080, 0x1 }, /* HLSQ 2D context 0 */
+ { 0x33, 0x2480, 0x1 }, /* HLSQ 2D context 1 */
+ { 0x32, 0xE780, 0x62 }, /* HLSQ 3D context 0 */
+ { 0x34, 0xEF80, 0x62 }, /* HLSQ 3D context 1 */
+ { 0x3f, 0x0EC0, 0x40 }, /* SP non-context */
+ { 0x3d, 0x2040, 0x1 }, /* SP 2D context 0 */
+ { 0x3b, 0x2440, 0x1 }, /* SP 2D context 1 */
+ { 0x3e, 0xE580, 0x180 }, /* SP 3D context 0 */
+ { 0x3c, 0xED80, 0x180 }, /* SP 3D context 1 */
+ { 0x3a, 0x0F00, 0x1c }, /* TP non-context */
+ { 0x38, 0x2000, 0xa }, /* TP 2D context 0 */
+ { 0x36, 0x2400, 0xa }, /* TP 2D context 1 */
+ { 0x39, 0xE700, 0x80 }, /* TP 3D context 0 */
+ { 0x37, 0xEF00, 0x80 }, /* TP 3D context 1 */
+};
+
+/*
+ * The debugbus registers contain device state that presumably makes
+ * sense to the hardware designers. 'count' is the number of indexes to read,
+ * each index value is 64 bits
+ */
+static const struct {
+ enum a5xx_debugbus id;
+ u32 count;
+} a5xx_debugbus_blocks[] = {
+ { A5XX_RBBM_DBGBUS_CP, 0x100, },
+ { A5XX_RBBM_DBGBUS_RBBM, 0x100, },
+ { A5XX_RBBM_DBGBUS_HLSQ, 0x100, },
+ { A5XX_RBBM_DBGBUS_UCHE, 0x100, },
+ { A5XX_RBBM_DBGBUS_DPM, 0x100, },
+ { A5XX_RBBM_DBGBUS_TESS, 0x100, },
+ { A5XX_RBBM_DBGBUS_PC, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFDP, 0x100, },
+ { A5XX_RBBM_DBGBUS_VPC, 0x100, },
+ { A5XX_RBBM_DBGBUS_TSE, 0x100, },
+ { A5XX_RBBM_DBGBUS_RAS, 0x100, },
+ { A5XX_RBBM_DBGBUS_VSC, 0x100, },
+ { A5XX_RBBM_DBGBUS_COM, 0x100, },
+ { A5XX_RBBM_DBGBUS_DCOM, 0x100, },
+ { A5XX_RBBM_DBGBUS_LRZ, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_DSP, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCUFCHE, 0x100, },
+ { A5XX_RBBM_DBGBUS_GPMU, 0x100, },
+ { A5XX_RBBM_DBGBUS_RBP, 0x100, },
+ { A5XX_RBBM_DBGBUS_HM, 0x100, },
+ { A5XX_RBBM_DBGBUS_RBBM_CFG, 0x100, },
+ { A5XX_RBBM_DBGBUS_VBIF_CX, 0x100, },
+ { A5XX_RBBM_DBGBUS_GPC, 0x100, },
+ { A5XX_RBBM_DBGBUS_LARC, 0x100, },
+ { A5XX_RBBM_DBGBUS_HLSQ_SPTP, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_RB_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_CCU_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_A2D_RAS_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_VFD_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_SP_3, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_0, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_1, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_2, 0x100, },
+ { A5XX_RBBM_DBGBUS_TPL1_3, 0x100, },
+};
+
+/*
+ * The shader blocks are read from the HLSQ aperture - each one has its own
+ * identifier for the aperture read
+ */
+static const struct {
+ enum a5xx_shader_blocks id;
+ u32 size;
+} a5xx_shader_blocks[] = {
+ {A5XX_TP_W_MEMOBJ, 0x200},
+ {A5XX_TP_W_MIPMAP_BASE, 0x3C0},
+ {A5XX_TP_W_SAMPLER_TAG, 0x40},
+ {A5XX_TP_S_3D_SAMPLER, 0x80},
+ {A5XX_TP_S_3D_SAMPLER_TAG, 0x20},
+ {A5XX_TP_S_CS_SAMPLER, 0x40},
+ {A5XX_TP_S_CS_SAMPLER_TAG, 0x10},
+ {A5XX_SP_W_CONST, 0x800},
+ {A5XX_SP_W_CB_SIZE, 0x30},
+ {A5XX_SP_W_CB_BASE, 0xF0},
+ {A5XX_SP_W_STATE, 0x1},
+ {A5XX_SP_S_3D_CONST, 0x800},
+ {A5XX_SP_S_3D_CB_SIZE, 0x28},
+ {A5XX_SP_S_3D_UAV_SIZE, 0x80},
+ {A5XX_SP_S_CS_CONST, 0x400},
+ {A5XX_SP_S_CS_CB_SIZE, 0x8},
+ {A5XX_SP_S_CS_UAV_SIZE, 0x80},
+ {A5XX_SP_S_3D_CONST_DIRTY, 0x12},
+ {A5XX_SP_S_3D_CB_SIZE_DIRTY, 0x1},
+ {A5XX_SP_S_3D_UAV_SIZE_DIRTY, 0x2},
+ {A5XX_SP_S_CS_CONST_DIRTY, 0xA},
+ {A5XX_SP_S_CS_CB_SIZE_DIRTY, 0x1},
+ {A5XX_SP_S_CS_UAV_SIZE_DIRTY, 0x2},
+ {A5XX_HLSQ_ICB_DIRTY, 0xB},
+ {A5XX_SP_POWER_RESTORE_RAM_TAG, 0xA},
+ {A5XX_TP_POWER_RESTORE_RAM_TAG, 0xA},
+ {A5XX_TP_W_SAMPLER, 0x80},
+ {A5XX_TP_W_MEMOBJ_TAG, 0x40},
+ {A5XX_TP_S_3D_MEMOBJ, 0x200},
+ {A5XX_TP_S_3D_MEMOBJ_TAG, 0x20},
+ {A5XX_TP_S_CS_MEMOBJ, 0x100},
+ {A5XX_TP_S_CS_MEMOBJ_TAG, 0x10},
+ {A5XX_SP_W_INSTR, 0x800},
+ {A5XX_SP_W_UAV_SIZE, 0x80},
+ {A5XX_SP_W_UAV_BASE, 0x80},
+ {A5XX_SP_W_INST_TAG, 0x40},
+ {A5XX_SP_S_3D_INSTR, 0x800},
+ {A5XX_SP_S_3D_CB_BASE, 0xC8},
+ {A5XX_SP_S_3D_UAV_BASE, 0x80},
+ {A5XX_SP_S_CS_INSTR, 0x400},
+ {A5XX_SP_S_CS_CB_BASE, 0x28},
+ {A5XX_SP_S_CS_UAV_BASE, 0x80},
+ {A5XX_SP_S_3D_INSTR_DIRTY, 0x1},
+ {A5XX_SP_S_3D_CB_BASE_DIRTY, 0x5},
+ {A5XX_SP_S_3D_UAV_BASE_DIRTY, 0x2},
+ {A5XX_SP_S_CS_INSTR_DIRTY, 0x1},
+ {A5XX_SP_S_CS_CB_BASE_DIRTY, 0x1},
+ {A5XX_SP_S_CS_UAV_BASE_DIRTY, 0x2},
+ {A5XX_HLSQ_ICB, 0x200},
+ {A5XX_HLSQ_ICB_CB_BASE_DIRTY, 0x4},
+ {A5XX_SP_POWER_RESTORE_RAM, 0x140},
+ {A5XX_TP_POWER_RESTORE_RAM, 0x40},
+};
+
+/*
+ * The A5XX architecture has a a built in engine to asynchronously dump
+ * registers from the GPU. It is used to accelerate the copy of hundreds
+ * (thousands) of registers and as a safe way to access registers that might
+ * have secure data in them (if the GPU is in secure, the crashdumper returns
+ * bogus values for those registers). On a fully secured device the CPU will be
+ * blocked from accessing those registers directly and so the crashdump is the
+ * only way that we can access context registers and the shader banks for debug
+ * purposes.
+ *
+ * The downside of the crashdump is that it requires access to GPU accessible
+ * memory (so the VBIF and the bus and the SMMU need to be up and working) and
+ * you need enough memory to write the script for the crashdumper and to store
+ * the data that you are dumping so there is a balancing act between the work to
+ * set up a crash dumper and the value we get out of it.
+ */
+
+/*
+ * The crashdump uses a pseudo-script format to read and write registers. Each
+ * operation is two 64 bit values.
+ *
+ * READ:
+ * [qword 0] [64:00] - The absolute IOVA address target for the register value
+ * [qword 1] [63:44] - the dword address of the register offset to read
+ * [15:00] - Number of dwords to read at once
+ *
+ * WRITE:
+ * [qword 0] [31:0] 32 bit value to write to the register
+ * [qword 1] [63:44] - the dword address of the register offset to write
+ * [21:21] - set 1 to write
+ * [15:00] - Number of dwords to write (usually 1)
+ *
+ * At the bottom of the script, write quadword zeros to trigger the end.
+ */
+struct crashdump {
+ struct drm_gem_object *bo;
+ void *ptr;
+ u64 iova;
+ u32 index;
+};
+
+#define CRASHDUMP_BO_SIZE (SZ_1M)
+#define CRASHDUMP_SCRIPT_SIZE (256 * SZ_1K)
+#define CRASHDUMP_DATA_SIZE (CRASHDUMP_BO_SIZE - CRASHDUMP_SCRIPT_SIZE)
+
+static int crashdump_init(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+ int ret = 0;
+
+ crashdump->ptr = msm_gem_kernel_new_locked(gpu->dev,
+ CRASHDUMP_BO_SIZE, MSM_BO_UNCACHED,
+ gpu->aspace, &crashdump->bo, &crashdump->iova);
+ if (IS_ERR(crashdump->ptr)) {
+ ret = PTR_ERR(crashdump->ptr);
+ crashdump->ptr = NULL;
+ }
+
+ return ret;
+}
+
+static int crashdump_run(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+ if (!crashdump->ptr || !crashdump->index)
+ return -EINVAL;
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
+ lower_32_bits(crashdump->iova));
+ gpu_write(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_HI,
+ upper_32_bits(crashdump->iova));
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+
+ return spin_until(gpu_read(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL) & 0x04);
+}
+
+static void crashdump_destroy(struct msm_gpu *gpu, struct crashdump *crashdump)
+{
+ if (!crashdump->bo)
+ return;
+
+ if (crashdump->iova)
+ msm_gem_put_iova(crashdump->bo, gpu->aspace);
+
+ drm_gem_object_unreference(crashdump->bo);
+
+ memset(crashdump, 0, sizeof(*crashdump));
+}
+
+static inline void CRASHDUMP_SCRIPT_WRITE(struct crashdump *crashdump,
+ u32 reg, u32 val)
+{
+ u64 *ptr = crashdump->ptr + crashdump->index;
+
+ if (WARN_ON(crashdump->index + (2 * sizeof(u64))
+ >= CRASHDUMP_SCRIPT_SIZE))
+ return;
+
+ /* This is the value to write */
+ ptr[0] = (u64) val;
+
+ /*
+ * This triggers a write to the specified register. 1 is the size of
+ * the write in dwords
+ */
+ ptr[1] = (((u64) reg) << 44) | (1 << 21) | 1;
+
+ crashdump->index += 2 * sizeof(u64);
+}
+
+static inline void CRASHDUMP_SCRIPT_READ(struct crashdump *crashdump,
+ u32 reg, u32 count, u32 offset)
+{
+ u64 *ptr = crashdump->ptr + crashdump->index;
+
+ if (WARN_ON(crashdump->index + (2 * sizeof(u64))
+ >= CRASHDUMP_SCRIPT_SIZE))
+ return;
+
+ if (WARN_ON(offset + (count * sizeof(u32)) >= CRASHDUMP_DATA_SIZE))
+ return;
+
+ ptr[0] = (u64) crashdump->iova + CRASHDUMP_SCRIPT_SIZE + offset;
+ ptr[1] = (((u64) reg) << 44) | count;
+
+ crashdump->index += 2 * sizeof(u64);
+}
+
+static inline void *CRASHDUMP_DATA_PTR(struct crashdump *crashdump, u32 offset)
+{
+ if (WARN_ON(!crashdump->ptr || offset >= CRASHDUMP_DATA_SIZE))
+ return NULL;
+
+ return crashdump->ptr + CRASHDUMP_SCRIPT_SIZE + offset;
+}
+
+static inline u32 CRASHDUMP_DATA_READ(struct crashdump *crashdump, u32 offset)
+{
+ return *((u32 *) CRASHDUMP_DATA_PTR(crashdump, offset));
+}
+
+static inline void CRASHDUMP_RESET(struct crashdump *crashdump)
+{
+ crashdump->index = 0;
+}
+
+static inline void CRASHDUMP_END(struct crashdump *crashdump)
+{
+ u64 *ptr = crashdump->ptr + crashdump->index;
+
+ if (WARN_ON((crashdump->index + (2 * sizeof(u64)))
+ >= CRASHDUMP_SCRIPT_SIZE))
+ return;
+
+ ptr[0] = 0;
+ ptr[1] = 0;
+
+ crashdump->index += 2 * sizeof(u64);
+}
+
+static u32 _crashdump_read_hlsq_aperture(struct crashdump *crashdump,
+ u32 offset, u32 statetype, u32 bank,
+ u32 count)
+{
+ CRASHDUMP_SCRIPT_WRITE(crashdump, REG_A5XX_HLSQ_DBG_READ_SEL,
+ A5XX_HLSQ_DBG_READ_SEL_STATETYPE(statetype) | bank);
+
+ CRASHDUMP_SCRIPT_READ(crashdump, REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE,
+ count, offset);
+
+ return count * sizeof(u32);
+}
+
+static u32 _copy_registers(struct msm_snapshot *snapshot,
+ struct crashdump *crashdump, u32 reg, u32 count,
+ u32 offset)
+{
+ int i;
+ u32 *ptr = (u32 *) (crashdump->ptr + CRASHDUMP_SCRIPT_SIZE + offset);
+ /*
+ * Write the offset of the first register of the group and the number of
+ * registers in the group
+ */
+ SNAPSHOT_WRITE_U32(snapshot, ((count << 16) | reg));
+
+ /* Followed by each register value in the group */
+ for (i = 0; i < count; i++)
+ SNAPSHOT_WRITE_U32(snapshot, ptr[i]);
+
+ return count * sizeof(u32);
+}
+
+/*
+ * Return the number of registers in each register group from the
+ * adreno_gpu->rgisters
+ */
+static inline u32 REG_COUNT(const unsigned int *ptr)
+{
+ return (ptr[1] - ptr[0]) + 1;
+}
+
+/*
+ * Capture what registers we can from the CPU in case the crashdumper is
+ * unavailable or broken. This will omit the SP,TP and HLSQ registers, but
+ * you'll get everything else and that ain't bad
+ */
+static void a5xx_snapshot_registers_cpu(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_snapshot_regs header;
+ u32 regcount = 0, groups = 0;
+ int i;
+
+ /*
+ * Before we write the section we need to figure out how big our data
+ * section will be
+ */
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ regcount += REG_COUNT(&(adreno_gpu->registers[i]));
+ groups++;
+ }
+
+ header.count = groups;
+
+ /*
+ * We need one dword for each group and then one dword for each register
+ * value in that group
+ */
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_REGS_V2,
+ regcount + groups))
+ return;
+
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ u32 count = REG_COUNT(&(adreno_gpu->registers[i]));
+ u32 reg = adreno_gpu->registers[i];
+ int j;
+
+ /* Write the offset and count for the group */
+ SNAPSHOT_WRITE_U32(snapshot, (count << 16) | reg);
+
+ /* Write each value in the group */
+ for (j = 0; j < count; j++)
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu, reg++));
+ }
+}
+
+static void a5xx_snapshot_registers(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct msm_snapshot_regs header;
+ struct crashdump *crashdump = snapshot->priv;
+ u32 offset = 0, regcount = 0, groups = 0;
+ int i;
+
+ /*
+ * First snapshot all the registers that we can from the CPU. Do this
+ * because the crashdumper has a tendency to "taint" the value of some
+ * of the registers (because the GPU implements the crashdumper) so we
+ * only want to use the crash dump facility if we have to
+ */
+ a5xx_snapshot_registers_cpu(gpu, snapshot);
+
+ if (!crashdump)
+ return;
+
+ CRASHDUMP_RESET(crashdump);
+
+ /* HLSQ and context registers behind the aperture */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 count = a5xx_hlsq_aperture_regs[i].count;
+
+ offset += _crashdump_read_hlsq_aperture(crashdump, offset,
+ a5xx_hlsq_aperture_regs[i].type, 0, count);
+ regcount += count;
+
+ groups++;
+ }
+
+ CRASHDUMP_END(crashdump);
+
+ if (crashdump_run(gpu, crashdump))
+ return;
+
+ header.count = groups;
+
+ /*
+ * The size of the data will be one dword for each "group" of registers,
+ * and then one dword for each of the registers in that group
+ */
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_REGS_V2,
+ groups + regcount))
+ return;
+
+ /* Copy the registers to the snapshot */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
+ offset += _copy_registers(snapshot, crashdump,
+ a5xx_hlsq_aperture_regs[i].regoffset,
+ a5xx_hlsq_aperture_regs[i].count, offset);
+}
+
+static void _a5xx_snapshot_shader_bank(struct msm_snapshot *snapshot,
+ struct crashdump *crashdump, u32 block, u32 bank,
+ u32 size, u32 offset)
+{
+ void *src;
+
+ struct msm_snapshot_shader header = {
+ .type = block,
+ .index = bank,
+ .size = size,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_SHADER, size))
+ return;
+
+ src = CRASHDUMP_DATA_PTR(crashdump, offset);
+
+ if (src)
+ SNAPSHOT_MEMCPY(snapshot, src, size * sizeof(u32));
+}
+
+static void a5xx_snapshot_shader_memory(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct crashdump *crashdump = snapshot->priv;
+ u32 offset = 0;
+ int i;
+
+ /* We can only get shader memory through the crashdump */
+ if (!crashdump)
+ return;
+
+ CRASHDUMP_RESET(crashdump);
+
+ /* For each shader block */
+ for (i = 0; i < ARRAY_SIZE(a5xx_shader_blocks); i++) {
+ int j;
+
+ /* For each block, dump 4 banks */
+ for (j = 0; j < A5XX_NR_SHADER_BANKS; j++)
+ offset += _crashdump_read_hlsq_aperture(crashdump,
+ offset, a5xx_shader_blocks[i].id, j,
+ a5xx_shader_blocks[i].size);
+ }
+
+ CRASHDUMP_END(crashdump);
+
+ /* If the crashdump fails we can't get shader memory any other way */
+ if (crashdump_run(gpu, crashdump))
+ return;
+
+ /* Each bank of each shader gets its own snapshot section */
+ for (offset = 0, i = 0; i < ARRAY_SIZE(a5xx_shader_blocks); i++) {
+ int j;
+
+ for (j = 0; j < A5XX_NR_SHADER_BANKS; j++) {
+ _a5xx_snapshot_shader_bank(snapshot, crashdump,
+ a5xx_shader_blocks[i].id, j,
+ a5xx_shader_blocks[i].size, offset);
+ offset += a5xx_shader_blocks[i].size * sizeof(u32);
+ }
+ }
+}
+
+#define A5XX_NUM_AXI_ARB_BLOCKS 2
+#define A5XX_NUM_XIN_BLOCKS 4
+#define VBIF_DATA_SIZE ((16 * A5XX_NUM_AXI_ARB_BLOCKS) + \
+ (18 * A5XX_NUM_XIN_BLOCKS) + (12 * A5XX_NUM_XIN_BLOCKS))
+
+static void a5xx_snapshot_debugbus_vbif(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+ struct msm_snapshot_debugbus header = {
+ .id = A5XX_RBBM_DBGBUS_VBIF,
+ .count = VBIF_DATA_SIZE,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUGBUS,
+ VBIF_DATA_SIZE))
+ return;
+
+ gpu_rmw(gpu, REG_A5XX_VBIF_CLKON, A5XX_VBIF_CLKON_FORCE_ON_TESTBUS,
+ A5XX_VBIF_CLKON_FORCE_ON_TESTBUS);
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL0, 0);
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS_OUT_CTRL,
+ A5XX_VBIF_TEST_BUS_OUT_CTRL_TEST_BUS_CTRL_EN);
+
+ for (i = 0; i < A5XX_NUM_AXI_ARB_BLOCKS; i++) {
+ int j;
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL0, 1 << (i + 16));
+ for (j = 0; j < 16; j++) {
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL1,
+ A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(j));
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+ REG_A5XX_VBIF_TEST_BUS_OUT));
+ }
+ }
+
+ for (i = 0; i < A5XX_NUM_XIN_BLOCKS; i++) {
+ int j;
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL0, 1 << i);
+ for (j = 0; j < 18; j++) {
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS2_CTRL1,
+ A5XX_VBIF_TEST_BUS2_CTRL1_TEST_BUS2_DATA_SEL(j));
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_VBIF_TEST_BUS_OUT));
+ }
+ }
+
+ for (i = 0; i < A5XX_NUM_XIN_BLOCKS; i++) {
+ int j;
+
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL0, 1 << i);
+ for (j = 0; j < 12; j++) {
+ gpu_write(gpu, REG_A5XX_VBIF_TEST_BUS1_CTRL1,
+ A5XX_VBIF_TEST_BUS1_CTRL1_TEST_BUS1_DATA_SEL(j));
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+ REG_A5XX_VBIF_TEST_BUS_OUT));
+ }
+ }
+
+}
+
+static void a5xx_snapshot_debugbus_block(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot, u32 block, u32 count)
+{
+ int i;
+ struct msm_snapshot_debugbus header = {
+ .id = block,
+ .count = count * 2, /* Each value is 2 dwords */
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUGBUS,
+ (count * 2)))
+ return;
+
+ for (i = 0; i < count; i++) {
+ u32 reg = A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_INDEX(i) |
+ A5XX_RBBM_CFG_DBGBUS_SEL_A_PING_BLK_SEL(block);
+
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_A, reg);
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_B, reg);
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_C, reg);
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_SEL_D, reg);
+
+ /* Each debugbus entry is a quad word */
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu,
+ REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF2));
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_RBBM_CFG_DBGBUS_TRACE_BUF1));
+ }
+}
+
+static void a5xx_snapshot_debugbus(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+
+ gpu_write(gpu, REG_A5XX_RBBM_CFG_DBGBUS_CNTLM,
+ A5XX_RBBM_CFG_DBGBUS_CNTLM_ENABLE(0xF));
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_debugbus_blocks); i++)
+ a5xx_snapshot_debugbus_block(gpu, snapshot,
+ a5xx_debugbus_blocks[i].id,
+ a5xx_debugbus_blocks[i].count);
+
+ /* VBIF is special and not in a good way */
+ a5xx_snapshot_debugbus_vbif(gpu, snapshot);
+}
+
+static void a5xx_snapshot_cp_merciu(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ unsigned int i;
+ struct msm_snapshot_debug header = {
+ .type = SNAPSHOT_DEBUG_CP_MERCIU,
+ .size = 64 << 1, /* Data size is 2 dwords per entry */
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 64 << 1))
+ return;
+
+ gpu_write(gpu, REG_A5XX_CP_MERCIU_DBG_ADDR, 0);
+ for (i = 0; i < 64; i++) {
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_MERCIU_DBG_DATA_1));
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_MERCIU_DBG_DATA_2));
+ }
+}
+
+static void a5xx_snapshot_cp_roq(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+ struct msm_snapshot_debug header = {
+ .type = SNAPSHOT_DEBUG_CP_ROQ,
+ .size = 512,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 512))
+ return;
+
+ gpu_write(gpu, REG_A5XX_CP_ROQ_DBG_ADDR, 0);
+ for (i = 0; i < 512; i++)
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_ROQ_DBG_DATA));
+}
+
+static void a5xx_snapshot_cp_meq(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ int i;
+ struct msm_snapshot_debug header = {
+ .type = SNAPSHOT_DEBUG_CP_MEQ,
+ .size = 64,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_DEBUG, 64))
+ return;
+
+ gpu_write(gpu, REG_A5XX_CP_MEQ_DBG_ADDR, 0);
+ for (i = 0; i < 64; i++)
+ SNAPSHOT_WRITE_U32(snapshot,
+ gpu_read(gpu, REG_A5XX_CP_MEQ_DBG_DATA));
+}
+
+static void a5xx_snapshot_indexed_registers(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot, u32 addr, u32 data,
+ u32 count)
+{
+ unsigned int i;
+ struct msm_snapshot_indexed_regs header = {
+ .index_reg = addr,
+ .data_reg = data,
+ .start = 0,
+ .count = count,
+ };
+
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_INDEXED_REGS,
+ count))
+ return;
+
+ for (i = 0; i < count; i++) {
+ gpu_write(gpu, addr, i);
+ SNAPSHOT_WRITE_U32(snapshot, gpu_read(gpu, data));
+ }
+}
+
+static void a5xx_snapshot_preemption(struct msm_gpu *gpu, struct msm_snapshot
+ *snapshot)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
+ struct msm_snapshot_gpu_object header = {
+ .type = SNAPSHOT_GPU_OBJECT_GLOBAL,
+ .size = A5XX_PREEMPT_RECORD_SIZE >> 2,
+ .pt_base = 0,
+ };
+ int index;
+
+ if (gpu->nr_rings <= 1)
+ return;
+
+ for (index = 0; index < gpu->nr_rings; index++) {
+
+ header.gpuaddr = a5xx_gpu->preempt_iova[index];
+
+ if (!SNAPSHOT_HEADER(snapshot, header,
+ SNAPSHOT_SECTION_GPU_OBJECT_V2,
+ A5XX_PREEMPT_RECORD_SIZE >> 2))
+ return;
+
+ SNAPSHOT_MEMCPY(snapshot, a5xx_gpu->preempt[index],
+ A5XX_PREEMPT_RECORD_SIZE);
+ }
+}
+
+int a5xx_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ struct crashdump crashdump = { 0 };
+
+ if (!crashdump_init(gpu, &crashdump))
+ snapshot->priv = &crashdump;
+
+ /* To accurately read all registers, disable hardware clock gating */
+ a5xx_set_hwcg(gpu, false);
+
+ /* Kick it up to the generic level */
+ adreno_snapshot(gpu, snapshot);
+
+ /* Read the GPU registers */
+ a5xx_snapshot_registers(gpu, snapshot);
+
+ /* Read the shader memory banks */
+ a5xx_snapshot_shader_memory(gpu, snapshot);
+
+ /* Read the debugbus registers */
+ a5xx_snapshot_debugbus(gpu, snapshot);
+
+ /* PFP data */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_PFP_STAT_ADDR, REG_A5XX_CP_PFP_STAT_DATA, 36);
+
+ /* ME data */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_ME_STAT_ADDR, REG_A5XX_CP_ME_STAT_DATA, 29);
+
+ /* DRAW_STATE data */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_DRAW_STATE_ADDR, REG_A5XX_CP_DRAW_STATE_DATA,
+ 256);
+
+ /* ME cache */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_ME_UCODE_DBG_ADDR, REG_A5XX_CP_ME_UCODE_DBG_DATA,
+ 0x53F);
+
+ /* PFP cache */
+ a5xx_snapshot_indexed_registers(gpu, snapshot,
+ REG_A5XX_CP_PFP_UCODE_DBG_ADDR, REG_A5XX_CP_PFP_UCODE_DBG_DATA,
+ 0x53F);
+
+ /* ME queue */
+ a5xx_snapshot_cp_meq(gpu, snapshot);
+
+ /* CP ROQ */
+ a5xx_snapshot_cp_roq(gpu, snapshot);
+
+ /* CP MERCIU */
+ a5xx_snapshot_cp_merciu(gpu, snapshot);
+
+ /* Preemption records*/
+ a5xx_snapshot_preemption(gpu, snapshot);
+
+ crashdump_destroy(gpu, &crashdump);
+ snapshot->priv = NULL;
+
+ /* Re-enable HWCG */
+ a5xx_set_hwcg(gpu, true);
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index c304468cf2bd..1cf84479e447 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -119,6 +121,25 @@ enum adreno_rb_copy_control_mode {
RB_COPY_DEPTH_STENCIL = 5,
};
+enum a3xx_rop_code {
+ ROP_CLEAR = 0,
+ ROP_NOR = 1,
+ ROP_AND_INVERTED = 2,
+ ROP_COPY_INVERTED = 3,
+ ROP_AND_REVERSE = 4,
+ ROP_INVERT = 5,
+ ROP_XOR = 6,
+ ROP_NAND = 7,
+ ROP_AND = 8,
+ ROP_EQUIV = 9,
+ ROP_NOOP = 10,
+ ROP_OR_INVERTED = 11,
+ ROP_COPY = 12,
+ ROP_OR_REVERSE = 13,
+ ROP_OR = 14,
+ ROP_SET = 15,
+};
+
enum a3xx_render_mode {
RB_RENDERING_PASS = 0,
RB_TILING_PASS = 1,
@@ -154,6 +175,14 @@ enum a3xx_color_swap {
XYZW = 3,
};
+enum a3xx_rb_blend_opcode {
+ BLEND_DST_PLUS_SRC = 0,
+ BLEND_SRC_MINUS_DST = 1,
+ BLEND_DST_MINUS_SRC = 2,
+ BLEND_MIN_DST_SRC = 3,
+ BLEND_MAX_DST_SRC = 4,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 1ea2df524fac..4ecc3ad762ef 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -19,10 +19,6 @@
#include "adreno_gpu.h"
-#if defined(DOWNSTREAM_CONFIG_MSM_BUS_SCALING) && !defined(CONFIG_OF)
-# include <mach/kgsl.h>
-#endif
-
#define ANY_ID 0xff
bool hang_debug = false;
@@ -31,6 +27,7 @@ module_param_named(hang_debug, hang_debug, bool, 0600);
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
static const struct adreno_info gpulist[] = {
{
@@ -73,6 +70,30 @@ static const struct adreno_info gpulist[] = {
.pfpfw = "a420_pfp.fw",
.gmem = (SZ_1M + SZ_512K),
.init = a4xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(4, 3, 0, ANY_ID),
+ .revn = 430,
+ .name = "A430",
+ .pm4fw = "a420_pm4.fw",
+ .pfpfw = "a420_pfp.fw",
+ .gmem = (SZ_1M + SZ_512K),
+ .init = a4xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(5, 3, 0, ANY_ID),
+ .revn = 530,
+ .name = "A530",
+ .pm4fw = "a530_pm4.fw",
+ .pfpfw = "a530_pfp.fw",
+ .gmem = SZ_1M,
+ .init = a5xx_gpu_init,
+ }, {
+ .rev = ADRENO_REV(5, 4, 0, ANY_ID),
+ .revn = 540,
+ .name = "A540",
+ .pm4fw = "a530_pm4.fw",
+ .pfpfw = "a530_pfp.fw",
+ .gmem = SZ_1M,
+ .init = a5xx_gpu_init,
},
};
@@ -82,6 +103,8 @@ MODULE_FIRMWARE("a330_pm4.fw");
MODULE_FIRMWARE("a330_pfp.fw");
MODULE_FIRMWARE("a420_pm4.fw");
MODULE_FIRMWARE("a420_pfp.fw");
+MODULE_FIRMWARE("a530_fm4.fw");
+MODULE_FIRMWARE("a530_pfp.fw");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
@@ -141,17 +164,17 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
if (gpu) {
int ret;
- mutex_lock(&dev->struct_mutex);
- gpu->funcs->pm_resume(gpu);
- mutex_unlock(&dev->struct_mutex);
- ret = gpu->funcs->hw_init(gpu);
+
+ pm_runtime_get_sync(&pdev->dev);
+ ret = msm_gpu_hw_init(gpu);
+ pm_runtime_put_sync_autosuspend(&pdev->dev);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
+ mutex_lock(&dev->struct_mutex);
+ gpu->funcs->pm_suspend(gpu);
+ mutex_unlock(&dev->struct_mutex);
gpu->funcs->destroy(gpu);
gpu = NULL;
- } else {
- /* give inactive pm a chance to kick in: */
- msm_gpu_retire(gpu);
}
}
@@ -168,12 +191,16 @@ static void set_gpu_pdev(struct drm_device *dev,
static int adreno_bind(struct device *dev, struct device *master, void *data)
{
static struct adreno_platform_config config = {};
-#ifdef CONFIG_OF
- struct device_node *child, *node = dev->of_node;
- u32 val;
+ uint32_t val = 0;
int ret;
- ret = of_property_read_u32(node, "qcom,chipid", &val);
+ /*
+ * Read the chip ID from the device tree at bind time - we use this
+ * information to load the correct functions. All the rest of the
+ * (extensive) device tree probing should happen in the GPU specific
+ * code
+ */
+ ret = of_property_read_u32(dev->of_node, "qcom,chipid", &val);
if (ret) {
dev_err(dev, "could not find chipid: %d\n", ret);
return ret;
@@ -182,76 +209,6 @@ static int adreno_bind(struct device *dev, struct device *master, void *data)
config.rev = ADRENO_REV((val >> 24) & 0xff,
(val >> 16) & 0xff, (val >> 8) & 0xff, val & 0xff);
- /* find clock rates: */
- config.fast_rate = 0;
- config.slow_rate = ~0;
- for_each_child_of_node(node, child) {
- if (of_device_is_compatible(child, "qcom,gpu-pwrlevels")) {
- struct device_node *pwrlvl;
- for_each_child_of_node(child, pwrlvl) {
- ret = of_property_read_u32(pwrlvl, "qcom,gpu-freq", &val);
- if (ret) {
- dev_err(dev, "could not find gpu-freq: %d\n", ret);
- return ret;
- }
- config.fast_rate = max(config.fast_rate, val);
- config.slow_rate = min(config.slow_rate, val);
- }
- }
- }
-
- if (!config.fast_rate) {
- dev_err(dev, "could not find clk rates\n");
- return -ENXIO;
- }
-
-#else
- struct kgsl_device_platform_data *pdata = dev->platform_data;
- uint32_t version = socinfo_get_version();
- if (cpu_is_apq8064ab()) {
- config.fast_rate = 450000000;
- config.slow_rate = 27000000;
- config.bus_freq = 4;
- config.rev = ADRENO_REV(3, 2, 1, 0);
- } else if (cpu_is_apq8064()) {
- config.fast_rate = 400000000;
- config.slow_rate = 27000000;
- config.bus_freq = 4;
-
- if (SOCINFO_VERSION_MAJOR(version) == 2)
- config.rev = ADRENO_REV(3, 2, 0, 2);
- else if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
- (SOCINFO_VERSION_MINOR(version) == 1))
- config.rev = ADRENO_REV(3, 2, 0, 1);
- else
- config.rev = ADRENO_REV(3, 2, 0, 0);
-
- } else if (cpu_is_msm8960ab()) {
- config.fast_rate = 400000000;
- config.slow_rate = 320000000;
- config.bus_freq = 4;
-
- if (SOCINFO_VERSION_MINOR(version) == 0)
- config.rev = ADRENO_REV(3, 2, 1, 0);
- else
- config.rev = ADRENO_REV(3, 2, 1, 1);
-
- } else if (cpu_is_msm8930()) {
- config.fast_rate = 400000000;
- config.slow_rate = 27000000;
- config.bus_freq = 3;
-
- if ((SOCINFO_VERSION_MAJOR(version) == 1) &&
- (SOCINFO_VERSION_MINOR(version) == 2))
- config.rev = ADRENO_REV(3, 0, 5, 2);
- else
- config.rev = ADRENO_REV(3, 0, 5, 0);
-
- }
-# ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- config.bus_scale_table = pdata->bus_scale_table;
-# endif
-#endif
dev->platform_data = &config;
set_gpu_pdev(dev_get_drvdata(master), to_platform_device(dev));
return 0;
@@ -286,12 +243,35 @@ static const struct of_device_id dt_match[] = {
{}
};
+#ifdef CONFIG_PM
+static int adreno_resume(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_gpu *gpu = platform_get_drvdata(pdev);
+
+ return gpu->funcs->pm_resume(gpu);
+}
+
+static int adreno_suspend(struct device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_gpu *gpu = platform_get_drvdata(pdev);
+
+ return gpu->funcs->pm_suspend(gpu);
+}
+#endif
+
+static const struct dev_pm_ops adreno_pm_ops = {
+ SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
+};
+
static struct platform_driver adreno_driver = {
.probe = adreno_probe,
.remove = adreno_remove,
.driver = {
.name = "adreno",
.of_match_table = dt_match,
+ .pm = &adreno_pm_ops,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index b66ffd44ff26..16b10b608855 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -2,7 +2,7 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -17,12 +17,12 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/utsname.h>
#include "adreno_gpu.h"
+#include "msm_snapshot.h"
#include "msm_gem.h"
#include "msm_mmu.h"
-#define RB_SIZE SZ_32K
-#define RB_BLKSIZE 16
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
{
@@ -35,123 +35,167 @@ int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value)
case MSM_PARAM_GMEM_SIZE:
*value = adreno_gpu->gmem;
return 0;
+ case MSM_PARAM_GMEM_BASE:
+ *value = 0x100000;
+ return 0;
case MSM_PARAM_CHIP_ID:
*value = adreno_gpu->rev.patchid |
(adreno_gpu->rev.minor << 8) |
(adreno_gpu->rev.major << 16) |
(adreno_gpu->rev.core << 24);
return 0;
+ case MSM_PARAM_MAX_FREQ:
+ *value = gpu->gpufreq[gpu->active_level];
+ return 0;
+ case MSM_PARAM_TIMESTAMP:
+ if (adreno_gpu->funcs->get_timestamp) {
+ int ret;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ ret = adreno_gpu->funcs->get_timestamp(gpu, value);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+
+ return ret;
+ }
+ return -EINVAL;
+ case MSM_PARAM_NR_RINGS:
+ *value = gpu->nr_rings;
+ return 0;
+ case MSM_PARAM_GPU_HANG_TIMEOUT:
+ *value = DRM_MSM_HANGCHECK_PERIOD;
+ return 0;
default:
DBG("%s: invalid param: %u", gpu->name, param);
return -EINVAL;
}
}
-#define rbmemptr(adreno_gpu, member) \
- ((adreno_gpu)->memptrs_iova + offsetof(struct adreno_rbmemptrs, member))
-
int adreno_hw_init(struct msm_gpu *gpu)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- int ret;
+ int i;
DBG("%s", gpu->name);
- ret = msm_gem_get_iova(gpu->rb->bo, gpu->id, &gpu->rb_iova);
- if (ret) {
- gpu->rb_iova = 0;
- dev_err(gpu->dev->dev, "could not map ringbuffer: %d\n", ret);
- return ret;
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ int ret = msm_gem_get_iova(ring->bo, gpu->aspace,
+ &ring->iova);
+ if (ret) {
+ ring->iova = 0;
+ dev_err(gpu->dev->dev,
+ "could not map ringbuffer %d: %d\n", i, ret);
+ return ret;
+ }
+
+ /* reset ringbuffer(s): */
+ /* No need for a lock here, nobody else is peeking in */
+ ring->cur = ring->start;
+ ring->next = ring->start;
+
+ /* reset completed fence seqno, discard anything pending: */
+ ring->memptrs->fence = adreno_submitted_fence(gpu, ring);
+ ring->memptrs->rptr = 0;
}
- /* Setup REG_CP_RB_CNTL: */
+ /*
+ * Setup REG_CP_RB_CNTL. The same value is used across targets (with
+ * the excpetion of A430 that disables the RPTR shadow) - the cacluation
+ * for the ringbuffer size and block size is moved to msm_gpu.h for the
+ * pre-processor to deal with and the A430 variant is ORed in here
+ */
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_CNTL,
- /* size is log2(quad-words): */
- AXXX_CP_RB_CNTL_BUFSZ(ilog2(gpu->rb->size / 8)) |
- AXXX_CP_RB_CNTL_BLKSZ(ilog2(RB_BLKSIZE / 8)));
+ MSM_GPU_RB_CNTL_DEFAULT |
+ (adreno_is_a430(adreno_gpu) ? AXXX_CP_RB_CNTL_NO_UPDATE : 0));
- /* Setup ringbuffer address: */
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_BASE, gpu->rb_iova);
- adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
- rbmemptr(adreno_gpu, rptr));
+ /* Setup ringbuffer address - use ringbuffer[0] for GPU init */
+ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_BASE_HI, gpu->rb[0]->iova);
- /* Setup scratch/timestamp: */
- adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_ADDR,
- rbmemptr(adreno_gpu, fence));
-
- adreno_gpu_write(adreno_gpu, REG_ADRENO_SCRATCH_UMSK, 0x1);
+ adreno_gpu_write64(adreno_gpu, REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI, rbmemptr(gpu->rb[0], rptr));
return 0;
}
-static uint32_t get_wptr(struct msm_ringbuffer *ring)
+/* Use this helper to read rptr, since a430 doesn't update rptr in memory */
+static uint32_t get_rptr(struct adreno_gpu *adreno_gpu,
+ struct msm_ringbuffer *ring)
{
- return ring->cur - ring->start;
+ if (adreno_is_a430(adreno_gpu)) {
+ /*
+ * If index is anything but 0 this will probably break horribly,
+ * but I think that we have enough infrastructure in place to
+ * ensure that it won't be. If not then this is why your
+ * a430 stopped working.
+ */
+ return ring->memptrs->rptr =
+ adreno_gpu_read(adreno_gpu, REG_ADRENO_CP_RB_RPTR);
+ }
+
+ return ring->memptrs->rptr;
}
-uint32_t adreno_last_fence(struct msm_gpu *gpu)
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- return adreno_gpu->memptrs->fence;
+ return gpu->rb[0];
+}
+
+uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring)
+{
+ if (!ring)
+ return 0;
+
+ return ring->submitted_fence;
}
void adreno_recover(struct msm_gpu *gpu)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
struct drm_device *dev = gpu->dev;
int ret;
- gpu->funcs->pm_suspend(gpu);
-
- /* reset ringbuffer: */
- gpu->rb->cur = gpu->rb->start;
+ /*
+ * XXX pm-runtime?? we *need* the device to be off after this
+ * so maybe continuing to call ->pm_suspend/resume() is better?
+ */
- /* reset completed fence seqno, just discard anything pending: */
- adreno_gpu->memptrs->fence = gpu->submitted_fence;
- adreno_gpu->memptrs->rptr = 0;
- adreno_gpu->memptrs->wptr = 0;
+ gpu->funcs->pm_suspend(gpu);
gpu->funcs->pm_resume(gpu);
- ret = gpu->funcs->hw_init(gpu);
+
+ ret = msm_gpu_hw_init(gpu);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
/* hmm, oh well? */
}
}
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx)
+void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- struct msm_drm_private *priv = gpu->dev->dev_private;
- struct msm_ringbuffer *ring = gpu->rb;
- unsigned i, ibs = 0;
+ struct msm_ringbuffer *ring = gpu->rb[submit->ring];
+ unsigned i;
for (i = 0; i < submit->nr_cmds; i++) {
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
/* ignore IB-targets */
break;
+ case MSM_SUBMIT_CMD_PROFILE_BUF:
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
- /* ignore if there has not been a ctx switch: */
- if (priv->lastctx == ctx)
break;
case MSM_SUBMIT_CMD_BUF:
- OUT_PKT3(ring, CP_INDIRECT_BUFFER_PFD, 2);
- OUT_RING(ring, submit->cmd[i].iova);
+ OUT_PKT3(ring, adreno_is_a430(adreno_gpu) ?
+ CP_INDIRECT_BUFFER_PFE : CP_INDIRECT_BUFFER_PFD, 2);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
OUT_RING(ring, submit->cmd[i].size);
- ibs++;
+ OUT_PKT2(ring);
break;
}
}
- /* on a320, at least, we seem to need to pad things out to an
- * even number of qwords to avoid issue w/ CP hanging on wrap-
- * around:
- */
- if (ibs % 2)
- OUT_PKT2(ring);
-
OUT_PKT0(ring, REG_AXXX_CP_SCRATCH_REG2, 1);
OUT_RING(ring, submit->fence);
@@ -169,7 +213,7 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
OUT_PKT3(ring, CP_EVENT_WRITE, 3);
OUT_RING(ring, CACHE_FLUSH_TS);
- OUT_RING(ring, rbmemptr(adreno_gpu, fence));
+ OUT_RING(ring, rbmemptr(ring, fence));
OUT_RING(ring, submit->fence);
/* we could maybe be clever and only CP_COND_EXEC the interrupt: */
@@ -196,22 +240,23 @@ int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
}
#endif
- gpu->funcs->flush(gpu);
-
- return 0;
+ gpu->funcs->flush(gpu, ring);
}
-void adreno_flush(struct msm_gpu *gpu)
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
uint32_t wptr;
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
/*
- * Mask wptr value that we calculate to fit in the HW range. This is
+ * Mask the wptr value that we calculate to fit in the HW range. This is
* to account for the possibility that the last command fit exactly into
* the ringbuffer and rb->next hasn't wrapped to zero yet
*/
- wptr = get_wptr(gpu->rb) & ((gpu->rb->size / 4) - 1);
+ wptr = get_wptr(ring);
/* ensure writes to ringbuffer have hit system memory: */
mb();
@@ -219,22 +264,27 @@ void adreno_flush(struct msm_gpu *gpu)
adreno_gpu_write(adreno_gpu, REG_ADRENO_CP_RB_WPTR, wptr);
}
-void adreno_idle(struct msm_gpu *gpu)
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t wptr = get_wptr(gpu->rb);
+ uint32_t wptr = get_wptr(ring);
/* wait for CP to drain ringbuffer: */
- if (spin_until(adreno_gpu->memptrs->rptr == wptr))
- DRM_ERROR("%s: timeout waiting to drain ringbuffer!\n", gpu->name);
+ if (!spin_until(get_rptr(adreno_gpu, ring) == wptr))
+ return true;
/* TODO maybe we need to reset GPU here to recover from hang? */
+ DRM_ERROR("%s: timeout waiting to drain ringbuffer %d rptr/wptr = %X/%X\n",
+ gpu->name, ring->id, get_rptr(adreno_gpu, ring), wptr);
+
+ return false;
}
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring;
int i;
seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
@@ -242,13 +292,18 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- seq_printf(m, "fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->submitted_fence);
- seq_printf(m, "rptr: %d\n", adreno_gpu->memptrs->rptr);
- seq_printf(m, "wptr: %d\n", adreno_gpu->memptrs->wptr);
- seq_printf(m, "rb wptr: %d\n", get_wptr(gpu->rb));
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
- gpu->funcs->pm_resume(gpu);
+ seq_printf(m, "rb %d: fence: %d/%d\n", i,
+ ring->memptrs->fence,
+ adreno_submitted_fence(gpu, ring));
+
+ seq_printf(m, " rptr: %d\n",
+ get_rptr(adreno_gpu, ring));
+ seq_printf(m, "rb wptr: %d\n", get_wptr(ring));
+ }
/* dump these out in a form that can be parsed by demsm: */
seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
@@ -262,8 +317,6 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
}
}
-
- gpu->funcs->pm_suspend(gpu);
}
#endif
@@ -275,22 +328,29 @@ void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
*/
void adreno_dump_info(struct msm_gpu *gpu)
{
+ struct drm_device *dev = gpu->dev;
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_ringbuffer *ring;
int i;
- printk("revision: %d (%d.%d.%d.%d)\n",
+ dev_err(dev->dev, "revision: %d (%d.%d.%d.%d)\n",
adreno_gpu->info->revn, adreno_gpu->rev.core,
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- printk("fence: %d/%d\n", adreno_gpu->memptrs->fence,
- gpu->submitted_fence);
- printk("rptr: %d\n", adreno_gpu->memptrs->rptr);
- printk("wptr: %d\n", adreno_gpu->memptrs->wptr);
- printk("rb wptr: %d\n", get_wptr(gpu->rb));
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
+
+ dev_err(dev->dev, " ring %d: fence %d/%d rptr/wptr %x/%x\n", i,
+ ring->memptrs->fence,
+ adreno_submitted_fence(gpu, ring),
+ get_rptr(adreno_gpu, ring),
+ get_wptr(ring));
+ }
for (i = 0; i < 8; i++) {
- printk("CP_SCRATCH_REG%d: %u\n", i,
+ pr_err("CP_SCRATCH_REG%d: %u\n", i,
gpu_read(gpu, REG_AXXX_CP_SCRATCH_REG0 + i));
}
}
@@ -315,32 +375,144 @@ void adreno_dump(struct msm_gpu *gpu)
}
}
-static uint32_t ring_freewords(struct msm_gpu *gpu)
+static uint32_t ring_freewords(struct msm_ringbuffer *ring)
{
- struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
- uint32_t size = gpu->rb->size / 4;
- uint32_t wptr = get_wptr(gpu->rb);
- uint32_t rptr = adreno_gpu->memptrs->rptr;
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(ring->gpu);
+ uint32_t size = MSM_GPU_RINGBUFFER_SZ >> 2;
+ /* Use ring->next to calculate free size */
+ uint32_t wptr = ring->next - ring->start;
+ uint32_t rptr = get_rptr(adreno_gpu, ring);
return (rptr + (size - 1) - wptr) % size;
}
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords)
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords)
{
- if (spin_until(ring_freewords(gpu) >= ndwords))
- DRM_ERROR("%s: timeout waiting for ringbuffer space\n", gpu->name);
+ if (spin_until(ring_freewords(ring) >= ndwords))
+ DRM_ERROR("%s: timeout waiting for space in ringubffer %d\n",
+ ring->gpu->name, ring->id);
}
-static const char *iommu_ports[] = {
- "gfx3d_user", "gfx3d_priv",
- "gfx3d1_user", "gfx3d1_priv",
+/* Read the set of powerlevels */
+static int _adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *node)
+{
+ struct device_node *child;
+
+ for_each_child_of_node(node, child) {
+ unsigned int index;
+
+ if (of_property_read_u32(child, "reg", &index))
+ return -EINVAL;
+
+ if (index >= ARRAY_SIZE(gpu->gpufreq))
+ continue;
+
+ gpu->nr_pwrlevels = max(gpu->nr_pwrlevels, index + 1);
+
+ of_property_read_u32(child, "qcom,gpu-freq",
+ &gpu->gpufreq[index]);
+ of_property_read_u32(child, "qcom,bus-freq",
+ &gpu->busfreq[index]);
+ }
+
+ DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
+ gpu->gpufreq[gpu->active_level],
+ gpu->gpufreq[gpu->nr_pwrlevels - 1],
+ gpu->busfreq[gpu->active_level]);
+
+ return 0;
+}
+
+/*
+ * Escape valve for targets that don't define the binning nodes. Get the
+ * first powerlevel node and parse it
+ */
+static int adreno_get_legacy_pwrlevels(struct msm_gpu *gpu,
+ struct device_node *parent)
+{
+ struct device_node *child;
+
+ child = of_find_node_by_name(parent, "qcom,gpu-pwrlevels");
+ if (child)
+ return _adreno_get_pwrlevels(gpu, child);
+
+ dev_err(gpu->dev->dev, "Unable to parse any powerlevels\n");
+ return -EINVAL;
+}
+
+/* Get the powerlevels for the target */
+static int adreno_get_pwrlevels(struct msm_gpu *gpu, struct device_node *parent)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct device_node *node, *child;
+
+ gpu->active_level = 1;
+
+ /* The device tree will tell us the best clock to initialize with */
+ of_property_read_u32(parent, "qcom,initial-pwrlevel",
+ &gpu->active_level);
+
+ if (gpu->active_level >= ARRAY_SIZE(gpu->gpufreq))
+ gpu->active_level = 1;
+
+ /* See if the target has defined a number of power bins */
+ node = of_find_node_by_name(parent, "qcom,gpu-pwrlevel-bins");
+ if (!node) {
+ /* If not look for the qcom,gpu-pwrlevels node */
+ return adreno_get_legacy_pwrlevels(gpu, parent);
+ }
+
+ for_each_child_of_node(node, child) {
+ unsigned int bin;
+
+ if (of_property_read_u32(child, "qcom,speed-bin", &bin))
+ continue;
+
+ /*
+ * If the bin matches the bin specified by the fuses, then we
+ * have a winner - parse it
+ */
+ if (adreno_gpu->speed_bin == bin)
+ return _adreno_get_pwrlevels(gpu, child);
+ }
+
+ return -ENODEV;
+}
+
+static const struct {
+ const char *str;
+ uint32_t flag;
+} quirks[] = {
+ { "qcom,gpu-quirk-two-pass-use-wfi", ADRENO_QUIRK_TWO_PASS_USE_WFI },
+ { "qcom,gpu-quirk-fault-detect-mask", ADRENO_QUIRK_FAULT_DETECT_MASK },
};
+/* Parse the statistics from the device tree */
+static int adreno_of_parse(struct platform_device *pdev, struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct device_node *node = pdev->dev.of_node;
+ int i, ret;
+
+ /* Probe the powerlevels */
+ ret = adreno_get_pwrlevels(gpu, node);
+ if (ret)
+ return ret;
+
+ /* Check to see if any quirks were specified in the device tree */
+ for (i = 0; i < ARRAY_SIZE(quirks); i++)
+ if (of_property_read_bool(node, quirks[i].str))
+ adreno_gpu->quirks |= quirks[i].flag;
+
+ return 0;
+}
+
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
- struct adreno_gpu *adreno_gpu, const struct adreno_gpu_funcs *funcs)
+ struct adreno_gpu *adreno_gpu,
+ const struct adreno_gpu_funcs *funcs,
+ struct msm_gpu_config *gpu_config)
{
struct adreno_platform_config *config = pdev->dev.platform_data;
struct msm_gpu *gpu = &adreno_gpu->base;
- struct msm_mmu *mmu;
int ret;
adreno_gpu->funcs = funcs;
@@ -349,22 +521,18 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_gpu->revn = adreno_gpu->info->revn;
adreno_gpu->rev = config->rev;
- gpu->fast_rate = config->fast_rate;
- gpu->slow_rate = config->slow_rate;
- gpu->bus_freq = config->bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- gpu->bus_scale_table = config->bus_scale_table;
-#endif
-
- DBG("fast_rate=%u, slow_rate=%u, bus_freq=%u",
- gpu->fast_rate, gpu->slow_rate, gpu->bus_freq);
+ /* Get the rest of the target configuration from the device tree */
+ adreno_of_parse(pdev, gpu);
ret = msm_gpu_init(drm, pdev, &adreno_gpu->base, &funcs->base,
- adreno_gpu->info->name, "kgsl_3d0_reg_memory", "kgsl_3d0_irq",
- RB_SIZE);
+ adreno_gpu->info->name, gpu_config);
if (ret)
return ret;
+ pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
+ pm_runtime_use_autosuspend(&pdev->dev);
+ pm_runtime_enable(&pdev->dev);
+
ret = request_firmware(&adreno_gpu->pm4, adreno_gpu->info->pm4fw, drm->dev);
if (ret) {
dev_err(drm->dev, "failed to load %s PM4 firmware: %d\n",
@@ -373,55 +541,150 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
}
ret = request_firmware(&adreno_gpu->pfp, adreno_gpu->info->pfpfw, drm->dev);
- if (ret) {
+ if (ret)
dev_err(drm->dev, "failed to load %s PFP firmware: %d\n",
adreno_gpu->info->pfpfw, ret);
- return ret;
- }
- mmu = gpu->mmu;
- if (mmu) {
- ret = mmu->funcs->attach(mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
- if (ret)
- return ret;
- }
+ return ret;
+}
- mutex_lock(&drm->struct_mutex);
- adreno_gpu->memptrs_bo = msm_gem_new(drm, sizeof(*adreno_gpu->memptrs),
- MSM_BO_UNCACHED);
- mutex_unlock(&drm->struct_mutex);
- if (IS_ERR(adreno_gpu->memptrs_bo)) {
- ret = PTR_ERR(adreno_gpu->memptrs_bo);
- adreno_gpu->memptrs_bo = NULL;
- dev_err(drm->dev, "could not allocate memptrs: %d\n", ret);
- return ret;
- }
+void adreno_gpu_cleanup(struct adreno_gpu *adreno_gpu)
+{
+ struct msm_gpu *gpu = &adreno_gpu->base;
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
- adreno_gpu->memptrs = msm_gem_vaddr(adreno_gpu->memptrs_bo);
- if (!adreno_gpu->memptrs) {
- dev_err(drm->dev, "could not vmap memptrs\n");
- return -ENOMEM;
- }
+ release_firmware(adreno_gpu->pm4);
+ release_firmware(adreno_gpu->pfp);
- ret = msm_gem_get_iova(adreno_gpu->memptrs_bo, gpu->id,
- &adreno_gpu->memptrs_iova);
- if (ret) {
- dev_err(drm->dev, "could not map memptrs: %d\n", ret);
- return ret;
+ pm_runtime_disable(&pdev->dev);
+ msm_gpu_cleanup(gpu);
+}
+
+static void adreno_snapshot_os(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct msm_snapshot_linux header;
+
+ memset(&header, 0, sizeof(header));
+
+ header.osid = SNAPSHOT_OS_LINUX_V3;
+ strlcpy(header.release, utsname()->release, sizeof(header.release));
+ strlcpy(header.version, utsname()->version, sizeof(header.version));
+
+ header.seconds = get_seconds();
+ header.ctxtcount = 0;
+
+ SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_OS, 0);
+}
+
+static void adreno_snapshot_ringbuffer(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot, struct msm_ringbuffer *ring)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct msm_snapshot_ringbuffer header;
+ unsigned int i, end = 0;
+ unsigned int *data = ring->start;
+
+ memset(&header, 0, sizeof(header));
+
+ /*
+ * We only want to copy the active contents of each ring, so find the
+ * last valid entry in the ringbuffer
+ */
+ for (i = 0; i < MSM_GPU_RINGBUFFER_SZ >> 2; i++) {
+ if (data[i])
+ end = i;
}
+ /* The dump always starts at 0 */
+ header.start = 0;
+ header.end = end;
+
+ /* This is the number of dwords being dumped */
+ header.count = end + 1;
+
+ /* This is the size of the actual ringbuffer */
+ header.rbsize = MSM_GPU_RINGBUFFER_SZ >> 2;
+
+ header.id = ring->id;
+ header.gpuaddr = ring->iova;
+ header.rptr = get_rptr(adreno_gpu, ring);
+ header.wptr = get_wptr(ring);
+ header.timestamp_queued = adreno_submitted_fence(gpu, ring);
+ header.timestamp_retired = ring->memptrs->fence;
+
+ /* Write the header even if the ringbuffer data is empty */
+ if (!SNAPSHOT_HEADER(snapshot, header, SNAPSHOT_SECTION_RB_V2,
+ header.count))
+ return;
+
+ SNAPSHOT_MEMCPY(snapshot, ring->start, header.count * sizeof(u32));
+}
+
+static void adreno_snapshot_ringbuffers(struct msm_gpu *gpu,
+ struct msm_snapshot *snapshot)
+{
+ struct msm_ringbuffer *ring;
+ int i;
+
+ /* Write a new section for each ringbuffer */
+ FOR_EACH_RING(gpu, ring, i)
+ adreno_snapshot_ringbuffer(gpu, snapshot, ring);
+}
+
+void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ adreno_snapshot_os(gpu, snapshot);
+ adreno_snapshot_ringbuffers(gpu, snapshot);
+}
+
+/* Return the group struct associated with the counter id */
+
+static struct adreno_counter_group *get_counter_group(struct msm_gpu *gpu,
+ u32 groupid)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+
+ if (!adreno_gpu->counter_groups)
+ return ERR_PTR(-ENODEV);
+
+ if (groupid >= adreno_gpu->nr_counter_groups)
+ return ERR_PTR(-ENODEV);
+
+ return (struct adreno_counter_group *)
+ adreno_gpu->counter_groups[groupid];
+}
+
+int adreno_get_counter(struct msm_gpu *gpu, u32 groupid, u32 countable,
+ u32 *lo, u32 *hi)
+{
+ struct adreno_counter_group *group =
+ get_counter_group(gpu, groupid);
+
+ if (!IS_ERR_OR_NULL(group) && group->funcs.get)
+ return group->funcs.get(gpu, group, countable, lo, hi);
+
+ return -ENODEV;
+}
+
+u64 adreno_read_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
+{
+ struct adreno_counter_group *group =
+ get_counter_group(gpu, groupid);
+
+ if (!IS_ERR_OR_NULL(group) && group->funcs.read)
+ return group->funcs.read(gpu, group, counterid);
+
return 0;
}
-void adreno_gpu_cleanup(struct adreno_gpu *gpu)
+void adreno_put_counter(struct msm_gpu *gpu, u32 groupid, int counterid)
{
- if (gpu->memptrs_bo) {
- if (gpu->memptrs_iova)
- msm_gem_put_iova(gpu->memptrs_bo, gpu->base.id);
- drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
- }
- release_firmware(gpu->pm4);
- release_firmware(gpu->pfp);
- msm_gpu_cleanup(&gpu->base);
+ struct adreno_counter_group *group =
+ get_counter_group(gpu, groupid);
+
+ if (!IS_ERR_OR_NULL(group) && group->funcs.put)
+ group->funcs.put(gpu, group, counterid);
}
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index 0a312e9d3afd..462352f7fc9a 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -2,7 +2,7 @@
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2016-2017 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published by
@@ -24,10 +24,17 @@
#include "msm_gpu.h"
+/* arrg, somehow fb.h is getting pulled in: */
+#undef ROP_COPY
+#undef ROP_XOR
+
#include "adreno_common.xml.h"
#include "adreno_pm4.xml.h"
#define REG_ADRENO_DEFINE(_offset, _reg) [_offset] = (_reg) + 1
+#define REG_SKIP ~0
+#define REG_ADRENO_SKIP(_offset) [_offset] = REG_SKIP
+
/**
* adreno_regs: List of registers that are used in across all
* 3D devices. Each device type has different offset value for the same
@@ -35,73 +42,21 @@
* and are indexed by the enumeration values defined in this enum
*/
enum adreno_regs {
- REG_ADRENO_CP_DEBUG,
- REG_ADRENO_CP_ME_RAM_WADDR,
- REG_ADRENO_CP_ME_RAM_DATA,
- REG_ADRENO_CP_PFP_UCODE_DATA,
- REG_ADRENO_CP_PFP_UCODE_ADDR,
- REG_ADRENO_CP_WFI_PEND_CTR,
REG_ADRENO_CP_RB_BASE,
+ REG_ADRENO_CP_RB_BASE_HI,
REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_ADRENO_CP_RB_RPTR_ADDR_HI,
REG_ADRENO_CP_RB_RPTR,
REG_ADRENO_CP_RB_WPTR,
- REG_ADRENO_CP_PROTECT_CTRL,
- REG_ADRENO_CP_ME_CNTL,
REG_ADRENO_CP_RB_CNTL,
- REG_ADRENO_CP_IB1_BASE,
- REG_ADRENO_CP_IB1_BUFSZ,
- REG_ADRENO_CP_IB2_BASE,
- REG_ADRENO_CP_IB2_BUFSZ,
- REG_ADRENO_CP_TIMESTAMP,
- REG_ADRENO_CP_ME_RAM_RADDR,
- REG_ADRENO_CP_ROQ_ADDR,
- REG_ADRENO_CP_ROQ_DATA,
- REG_ADRENO_CP_MERCIU_ADDR,
- REG_ADRENO_CP_MERCIU_DATA,
- REG_ADRENO_CP_MERCIU_DATA2,
- REG_ADRENO_CP_MEQ_ADDR,
- REG_ADRENO_CP_MEQ_DATA,
- REG_ADRENO_CP_HW_FAULT,
- REG_ADRENO_CP_PROTECT_STATUS,
- REG_ADRENO_SCRATCH_ADDR,
- REG_ADRENO_SCRATCH_UMSK,
- REG_ADRENO_SCRATCH_REG2,
- REG_ADRENO_RBBM_STATUS,
- REG_ADRENO_RBBM_PERFCTR_CTL,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD0,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD1,
- REG_ADRENO_RBBM_PERFCTR_LOAD_CMD2,
- REG_ADRENO_RBBM_PERFCTR_PWR_1_LO,
- REG_ADRENO_RBBM_INT_0_MASK,
- REG_ADRENO_RBBM_INT_0_STATUS,
- REG_ADRENO_RBBM_AHB_ERROR_STATUS,
- REG_ADRENO_RBBM_PM_OVERRIDE2,
- REG_ADRENO_RBBM_AHB_CMD,
- REG_ADRENO_RBBM_INT_CLEAR_CMD,
- REG_ADRENO_RBBM_SW_RESET_CMD,
- REG_ADRENO_RBBM_CLOCK_CTL,
- REG_ADRENO_RBBM_AHB_ME_SPLIT_STATUS,
- REG_ADRENO_RBBM_AHB_PFP_SPLIT_STATUS,
- REG_ADRENO_VPC_DEBUG_RAM_SEL,
- REG_ADRENO_VPC_DEBUG_RAM_READ,
- REG_ADRENO_VSC_SIZE_ADDRESS,
- REG_ADRENO_VFD_CONTROL_0,
- REG_ADRENO_VFD_INDEX_MAX,
- REG_ADRENO_SP_VS_PVT_MEM_ADDR_REG,
- REG_ADRENO_SP_FS_PVT_MEM_ADDR_REG,
- REG_ADRENO_SP_VS_OBJ_START_REG,
- REG_ADRENO_SP_FS_OBJ_START_REG,
- REG_ADRENO_PA_SC_AA_CONFIG,
- REG_ADRENO_SQ_GPR_MANAGEMENT,
- REG_ADRENO_SQ_INST_STORE_MANAGMENT,
- REG_ADRENO_TP0_CHICKEN,
- REG_ADRENO_RBBM_RBBM_CTL,
- REG_ADRENO_UCHE_INVALIDATE0,
- REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_LO,
- REG_ADRENO_RBBM_PERFCTR_LOAD_VALUE_HI,
REG_ADRENO_REGISTER_MAX,
};
+enum adreno_quirks {
+ ADRENO_QUIRK_TWO_PASS_USE_WFI = 1,
+ ADRENO_QUIRK_FAULT_DETECT_MASK = 2,
+};
+
struct adreno_rev {
uint8_t core;
uint8_t major;
@@ -114,6 +69,7 @@ struct adreno_rev {
struct adreno_gpu_funcs {
struct msm_gpu_funcs base;
+ int (*get_timestamp)(struct msm_gpu *gpu, uint64_t *value);
};
struct adreno_info {
@@ -127,10 +83,34 @@ struct adreno_info {
const struct adreno_info *adreno_info(struct adreno_rev rev);
-struct adreno_rbmemptrs {
- volatile uint32_t rptr;
- volatile uint32_t wptr;
- volatile uint32_t fence;
+struct adreno_counter {
+ u32 lo;
+ u32 hi;
+ u32 sel;
+ int load_bit;
+ u32 countable;
+ u32 refcount;
+ u64 value;
+};
+
+struct adreno_counter_group {
+ struct adreno_counter *counters;
+ size_t nr_counters;
+ spinlock_t lock;
+ struct {
+ int (*get)(struct msm_gpu *,
+ struct adreno_counter_group *, u32, u32 *, u32 *);
+ void (*enable)(struct msm_gpu *,
+ struct adreno_counter_group *, int, bool);
+ u64 (*read)(struct msm_gpu *,
+ struct adreno_counter_group *, int);
+ void (*put)(struct msm_gpu *,
+ struct adreno_counter_group *, int);
+ void (*save)(struct msm_gpu *,
+ struct adreno_counter_group *);
+ void (*restore)(struct msm_gpu *,
+ struct adreno_counter_group *);
+ } funcs;
};
struct adreno_gpu {
@@ -147,29 +127,24 @@ struct adreno_gpu {
/* firmware: */
const struct firmware *pm4, *pfp;
- /* ringbuffer rptr/wptr: */
- // TODO should this be in msm_ringbuffer? I think it would be
- // different for z180..
- struct adreno_rbmemptrs *memptrs;
- struct drm_gem_object *memptrs_bo;
- uint32_t memptrs_iova;
-
/*
* Register offsets are different between some GPUs.
* GPU specific offsets will be exported by GPU specific
* code (a3xx_gpu.c) and stored in this common location.
*/
const unsigned int *reg_offsets;
+
+ uint32_t quirks;
+ uint32_t speed_bin;
+
+ const struct adreno_counter_group **counter_groups;
+ int nr_counter_groups;
};
#define to_adreno_gpu(x) container_of(x, struct adreno_gpu, base)
/* platform config data (ie. from DT, or pdata) */
struct adreno_platform_config {
struct adreno_rev rev;
- uint32_t fast_rate, slow_rate, bus_freq;
-#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
- struct msm_bus_scale_pdata *bus_scale_table;
-#endif
};
#define ADRENO_IDLE_TIMEOUT msecs_to_jiffies(1000)
@@ -186,6 +161,9 @@ struct adreno_platform_config {
__ret; \
})
+#define GPU_OF_NODE(_g) \
+ (((struct msm_drm_private *) \
+ ((_g)->dev->dev_private))->gpu_pdev->dev.of_node)
static inline bool adreno_is_a3xx(struct adreno_gpu *gpu)
{
@@ -228,32 +206,55 @@ static inline int adreno_is_a420(struct adreno_gpu *gpu)
return gpu->revn == 420;
}
+static inline int adreno_is_a430(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 430;
+}
+
+static inline int adreno_is_a530(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 530;
+}
+
+static inline int adreno_is_a540(struct adreno_gpu *gpu)
+{
+ return gpu->revn == 540;
+}
+
int adreno_get_param(struct msm_gpu *gpu, uint32_t param, uint64_t *value);
int adreno_hw_init(struct msm_gpu *gpu);
-uint32_t adreno_last_fence(struct msm_gpu *gpu);
+uint32_t adreno_submitted_fence(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring);
void adreno_recover(struct msm_gpu *gpu);
-int adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx);
-void adreno_flush(struct msm_gpu *gpu);
-void adreno_idle(struct msm_gpu *gpu);
+void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
+bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
#ifdef CONFIG_DEBUG_FS
void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
#endif
void adreno_dump_info(struct msm_gpu *gpu);
void adreno_dump(struct msm_gpu *gpu);
-void adreno_wait_ring(struct msm_gpu *gpu, uint32_t ndwords);
+void adreno_wait_ring(struct msm_ringbuffer *ring, uint32_t ndwords);
+struct msm_ringbuffer *adreno_active_ring(struct msm_gpu *gpu);
int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
- struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs);
+ struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
+ struct msm_gpu_config *config);
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
+void adreno_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+
+int adreno_get_counter(struct msm_gpu *gpu, u32 groupid, u32 countable,
+ u32 *lo, u32 *hi);
+u64 adreno_read_counter(struct msm_gpu *gpu, u32 groupid, int counterid);
+void adreno_put_counter(struct msm_gpu *gpu, u32 groupid, int counterid);
/* ringbuffer helpers (the parts that are adreno specific) */
static inline void
OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt+1);
+ adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE0_PKT | ((cnt-1) << 16) | (regindx & 0x7FFF));
}
@@ -261,19 +262,49 @@ OUT_PKT0(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
static inline void
OUT_PKT2(struct msm_ringbuffer *ring)
{
- adreno_wait_ring(ring->gpu, 1);
+ adreno_wait_ring(ring, 1);
OUT_RING(ring, CP_TYPE2_PKT);
}
static inline void
OUT_PKT3(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
{
- adreno_wait_ring(ring->gpu, cnt+1);
+ adreno_wait_ring(ring, cnt+1);
OUT_RING(ring, CP_TYPE3_PKT | ((cnt-1) << 16) | ((opcode & 0xFF) << 8));
}
+static inline u32 PM4_PARITY(u32 val)
+{
+ return (0x9669 >> (0xF & (val ^
+ (val >> 4) ^ (val >> 8) ^ (val >> 12) ^
+ (val >> 16) ^ ((val) >> 20) ^ (val >> 24) ^
+ (val >> 28)))) & 1;
+}
+
+/* Maximum number of values that can be executed for one opcode */
+#define TYPE4_MAX_PAYLOAD 127
+
+#define PKT4(_reg, _cnt) \
+ (CP_TYPE4_PKT | ((_cnt) << 0) | (PM4_PARITY((_cnt)) << 7) | \
+ (((_reg) & 0x3FFFF) << 8) | (PM4_PARITY((_reg)) << 27))
+
+static inline void
+OUT_PKT4(struct msm_ringbuffer *ring, uint16_t regindx, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt + 1);
+ OUT_RING(ring, PKT4(regindx, cnt));
+}
+
+static inline void
+OUT_PKT7(struct msm_ringbuffer *ring, uint8_t opcode, uint16_t cnt)
+{
+ adreno_wait_ring(ring, cnt + 1);
+ OUT_RING(ring, CP_TYPE7_PKT | (cnt << 0) | (PM4_PARITY(cnt) << 15) |
+ ((opcode & 0x7F) << 16) | (PM4_PARITY(opcode) << 23));
+}
+
/*
- * adreno_checkreg_off() - Checks the validity of a register enum
+ * adreno_reg_check() - Checks the validity of a register enum
* @gpu: Pointer to struct adreno_gpu
* @offset_name: The register enum that is checked
*/
@@ -284,6 +315,16 @@ static inline bool adreno_reg_check(struct adreno_gpu *gpu,
!gpu->reg_offsets[offset_name]) {
BUG();
}
+
+ /*
+ * REG_SKIP is a special value that tell us that the register in
+ * question isn't implemented on target but don't trigger a BUG(). This
+ * is used to cleanly implement adreno_gpu_write64() and
+ * adreno_gpu_read64() in a generic fashion
+ */
+ if (gpu->reg_offsets[offset_name] == REG_SKIP)
+ return false;
+
return true;
}
@@ -305,4 +346,40 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
gpu_write(&gpu->base, reg - 1, data);
}
+static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
+ enum adreno_regs lo, enum adreno_regs hi, u64 data)
+{
+ adreno_gpu_write(gpu, lo, lower_32_bits(data));
+ adreno_gpu_write(gpu, hi, upper_32_bits(data));
+}
+
+static inline uint32_t get_wptr(struct msm_ringbuffer *ring)
+{
+ return (ring->cur - ring->start) % (MSM_GPU_RINGBUFFER_SZ >> 2);
+}
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ *
+ * The register base needs to be a multiple of the length. If it is not, the
+ * hardware will quietly mask off the bits for you and shift the size. For
+ * example, if you intend the protection to start at 0x07 for a length of 4
+ * (0x07-0x0A) the hardware will actually protect (0x04-0x07) which might
+ * expose registers you intended to protect!
+ */
+#define ADRENO_PROTECT_RW(_reg, _len) \
+ ((1 << 30) | (1 << 29) | \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define ADRENO_PROTECT_RDONLY(_reg, _len) \
+ ((1 << 29) \
+ ((ilog2((_len)) & 0x1F) << 24) | (((_reg) << 2) & 0xFFFFF))
+
#endif /* __ADRENO_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index a22fef569499..9911a181f9c2 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 398 bytes, from 2015-09-24 17:25:31)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 32901 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 10755 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 14968 bytes, from 2015-05-20 20:12:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 67771 bytes, from 2015-09-14 20:46:55)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 63970 bytes, from 2015-09-14 20:50:12)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2015-09-24 17:30:00)
-
-Copyright (C) 2013-2015 by the following authors:
+- ./adreno.xml ( 431 bytes, from 2016-10-24 21:12:27)
+- ./freedreno_copyright.xml ( 1572 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a2xx.xml ( 32901 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_common.xml ( 12025 bytes, from 2016-10-24 21:12:27)
+- ./adreno/adreno_pm4.xml ( 19684 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a3xx.xml ( 83840 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a4xx.xml ( 110708 bytes, from 2016-10-24 21:12:27)
+- ./adreno/a5xx.xml ( 81207 bytes, from 2016-10-26 19:36:59)
+- ./adreno/ocmem.xml ( 1773 bytes, from 2016-10-24 21:12:27)
+
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -57,6 +59,7 @@ enum vgt_event_type {
RST_PIX_CNT = 13,
RST_VTX_CNT = 14,
TILE_FLUSH = 15,
+ STAT_EVENT = 16,
CACHE_FLUSH_AND_INV_TS_EVENT = 20,
ZPASS_DONE = 21,
CACHE_FLUSH_AND_INV_EVENT = 22,
@@ -81,7 +84,6 @@ enum pc_di_primtype {
DI_PT_LINESTRIP_ADJ = 11,
DI_PT_TRI_ADJ = 12,
DI_PT_TRISTRIP_ADJ = 13,
- DI_PT_PATCHES = 34,
};
enum pc_di_src_sel {
@@ -109,11 +111,15 @@ enum adreno_pm4_packet_type {
CP_TYPE1_PKT = 0x40000000,
CP_TYPE2_PKT = 0x80000000,
CP_TYPE3_PKT = 0xc0000000,
+ CP_TYPE4_PKT = 0x40000000,
+ CP_TYPE7_PKT = 0x70000000,
};
enum adreno_pm4_type3_packets {
CP_ME_INIT = 72,
CP_NOP = 16,
+ CP_PREEMPT_ENABLE = 28,
+ CP_PREEMPT_TOKEN = 30,
CP_INDIRECT_BUFFER = 63,
CP_INDIRECT_BUFFER_PFD = 55,
CP_WAIT_FOR_IDLE = 38,
@@ -162,6 +168,7 @@ enum adreno_pm4_type3_packets {
CP_TEST_TWO_MEMS = 113,
CP_REG_WR_NO_CTXT = 120,
CP_RECORD_PFP_TIMESTAMP = 17,
+ CP_SET_SECURE_MODE = 102,
CP_WAIT_FOR_ME = 19,
CP_SET_DRAW_STATE = 67,
CP_DRAW_INDX_OFFSET = 56,
@@ -172,6 +179,26 @@ enum adreno_pm4_type3_packets {
CP_UNKNOWN_1A = 26,
CP_UNKNOWN_4E = 78,
CP_WIDE_REG_WRITE = 116,
+ CP_SCRATCH_TO_REG = 77,
+ CP_REG_TO_SCRATCH = 74,
+ CP_WAIT_MEM_WRITES = 18,
+ CP_COND_REG_EXEC = 71,
+ CP_MEM_TO_REG = 66,
+ CP_EXEC_CS = 51,
+ CP_PERFCOUNTER_ACTION = 80,
+ CP_SMMU_TABLE_UPDATE = 83,
+ CP_CONTEXT_REG_BUNCH = 92,
+ CP_YIELD_ENABLE = 28,
+ CP_SKIP_IB2_ENABLE_GLOBAL = 29,
+ CP_SKIP_IB2_ENABLE_LOCAL = 35,
+ CP_SET_SUBDRAW_SIZE = 53,
+ CP_SET_VISIBILITY_OVERRIDE = 100,
+ CP_PREEMPT_ENABLE_GLOBAL = 105,
+ CP_PREEMPT_ENABLE_LOCAL = 106,
+ CP_CONTEXT_SWITCH_YIELD = 107,
+ CP_SET_RENDER_MODE = 108,
+ CP_COMPUTE_CHECKPOINT = 110,
+ CP_MEM_TO_MEM = 115,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -190,6 +217,7 @@ enum adreno_state_block {
SB_VERT_SHADER = 4,
SB_GEOM_SHADER = 5,
SB_FRAG_SHADER = 6,
+ SB_COMPUTE_SHADER = 7,
};
enum adreno_state_type {
@@ -199,7 +227,11 @@ enum adreno_state_type {
enum adreno_state_src {
SS_DIRECT = 0,
+ SS_INVALID_ALL_IC = 2,
+ SS_INVALID_PART_IC = 3,
SS_INDIRECT = 4,
+ SS_INDIRECT_TCM = 5,
+ SS_INDIRECT_STM = 6,
};
enum a4xx_index_size {
@@ -227,7 +259,7 @@ static inline uint32_t CP_LOAD_STATE_0_STATE_BLOCK(enum adreno_state_block val)
{
return ((val) << CP_LOAD_STATE_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE_0_STATE_BLOCK__MASK;
}
-#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0x7fc00000
+#define CP_LOAD_STATE_0_NUM_UNIT__MASK 0xffc00000
#define CP_LOAD_STATE_0_NUM_UNIT__SHIFT 22
static inline uint32_t CP_LOAD_STATE_0_NUM_UNIT(uint32_t val)
{
@@ -379,7 +411,12 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT(enum pc_di_src_sel va
{
return ((val) << CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__SHIFT) & CP_DRAW_INDX_OFFSET_0_SOURCE_SELECT__MASK;
}
-#define CP_DRAW_INDX_OFFSET_0_TESSELLATE 0x00000100
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK 0x00000300
+#define CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT 8
+static inline uint32_t CP_DRAW_INDX_OFFSET_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << CP_DRAW_INDX_OFFSET_0_VIS_CULL__SHIFT) & CP_DRAW_INDX_OFFSET_0_VIS_CULL__MASK;
+}
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__MASK 0x00000c00
#define CP_DRAW_INDX_OFFSET_0_INDEX_SIZE__SHIFT 10
static inline uint32_t CP_DRAW_INDX_OFFSET_0_INDEX_SIZE(enum a4xx_index_size val)
@@ -499,5 +536,102 @@ static inline uint32_t CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS(uint32_t val)
return ((val) << CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__SHIFT) & CP_SET_BIN_DATA_1_BIN_SIZE_ADDRESS__MASK;
}
+#define REG_CP_REG_TO_MEM_0 0x00000000
+#define CP_REG_TO_MEM_0_REG__MASK 0x0000ffff
+#define CP_REG_TO_MEM_0_REG__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_0_REG(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_0_REG__SHIFT) & CP_REG_TO_MEM_0_REG__MASK;
+}
+#define CP_REG_TO_MEM_0_CNT__MASK 0x3ff80000
+#define CP_REG_TO_MEM_0_CNT__SHIFT 19
+static inline uint32_t CP_REG_TO_MEM_0_CNT(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_0_CNT__SHIFT) & CP_REG_TO_MEM_0_CNT__MASK;
+}
+#define CP_REG_TO_MEM_0_64B 0x40000000
+#define CP_REG_TO_MEM_0_ACCUMULATE 0x80000000
+
+#define REG_CP_REG_TO_MEM_1 0x00000001
+#define CP_REG_TO_MEM_1_DEST__MASK 0xffffffff
+#define CP_REG_TO_MEM_1_DEST__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_0 0x00000000
+
+#define REG_CP_DISPATCH_COMPUTE_1 0x00000001
+#define CP_DISPATCH_COMPUTE_1_X__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_1_X__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_1_X(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_1_X__SHIFT) & CP_DISPATCH_COMPUTE_1_X__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_2 0x00000002
+#define CP_DISPATCH_COMPUTE_2_Y__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_2_Y__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_2_Y(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_2_Y__SHIFT) & CP_DISPATCH_COMPUTE_2_Y__MASK;
+}
+
+#define REG_CP_DISPATCH_COMPUTE_3 0x00000003
+#define CP_DISPATCH_COMPUTE_3_Z__MASK 0xffffffff
+#define CP_DISPATCH_COMPUTE_3_Z__SHIFT 0
+static inline uint32_t CP_DISPATCH_COMPUTE_3_Z(uint32_t val)
+{
+ return ((val) << CP_DISPATCH_COMPUTE_3_Z__SHIFT) & CP_DISPATCH_COMPUTE_3_Z__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_0 0x00000000
+
+#define REG_CP_SET_RENDER_MODE_1 0x00000001
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_1_ADDR_0_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_1_ADDR_0_LO__SHIFT) & CP_SET_RENDER_MODE_1_ADDR_0_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_2 0x00000002
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_2_ADDR_0_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_2_ADDR_0_HI__SHIFT) & CP_SET_RENDER_MODE_2_ADDR_0_HI__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_3 0x00000003
+#define CP_SET_RENDER_MODE_3_GMEM_ENABLE 0x00000010
+
+#define REG_CP_SET_RENDER_MODE_4 0x00000004
+
+#define REG_CP_SET_RENDER_MODE_5 0x00000005
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_5_ADDR_1_LEN(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_5_ADDR_1_LEN__SHIFT) & CP_SET_RENDER_MODE_5_ADDR_1_LEN__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_6 0x00000006
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_6_ADDR_1_LO(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_6_ADDR_1_LO__SHIFT) & CP_SET_RENDER_MODE_6_ADDR_1_LO__MASK;
+}
+
+#define REG_CP_SET_RENDER_MODE_7 0x00000007
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK 0xffffffff
+#define CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT 0
+static inline uint32_t CP_SET_RENDER_MODE_7_ADDR_1_HI(uint32_t val)
+{
+ return ((val) << CP_SET_RENDER_MODE_7_ADDR_1_HI__SHIFT) & CP_SET_RENDER_MODE_7_ADDR_1_HI__MASK;
+}
+
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/dba_bridge.c b/drivers/gpu/drm/msm/dba_bridge.c
new file mode 100644
index 000000000000..5ae564001309
--- /dev/null
+++ b/drivers/gpu/drm/msm/dba_bridge.c
@@ -0,0 +1,458 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <video/msm_dba.h>
+#include "drm_edid.h"
+#include "sde_kms.h"
+#include "dba_bridge.h"
+#include "sde/sde_recovery_manager.h"
+
+#undef pr_fmt
+#define pr_fmt(fmt) "dba_bridge:[%s] " fmt, __func__
+
+/**
+ * struct dba_bridge - DBA bridge information
+ * @base: drm_bridge base
+ * @client_name: Client's name who calls the init
+ * @chip_name: Bridge chip name
+ * @name: Bridge chip name
+ * @id: Bridge driver index
+ * @display: Private display handle
+ * @list: Bridge chip driver list node
+ * @ops: DBA operation container
+ * @dba_ctx: DBA context
+ * @mode: DRM mode info
+ * @hdmi_mode: HDMI or DVI mode for the sink
+ * @num_of_input_lanes: Number of input lanes in case of DSI/LVDS
+ * @pluggable: If it's pluggable
+ * @panel_count: Number of panels attached to this display
+ * @client_info: bridge chip specific information for recovery manager
+ */
+struct dba_bridge {
+ struct drm_bridge base;
+ char client_name[MSM_DBA_CLIENT_NAME_LEN];
+ char chip_name[MSM_DBA_CHIP_NAME_MAX_LEN];
+ u32 id;
+ void *display;
+ struct list_head list;
+ struct msm_dba_ops ops;
+ void *dba_ctx;
+ struct drm_display_mode mode;
+ bool hdmi_mode;
+ u32 num_of_input_lanes;
+ bool pluggable;
+ u32 panel_count;
+ bool cont_splash_enabled;
+ struct recovery_client_info client_info;
+};
+#define to_dba_bridge(x) container_of((x), struct dba_bridge, base)
+
+static int _dba_bridge_recovery_callback(int err_code,
+ struct recovery_client_info *client_info);
+
+static void _dba_bridge_cb(void *data, enum msm_dba_callback_event event)
+{
+ struct dba_bridge *d_bridge = data;
+ int chip_err;
+
+ if (!d_bridge) {
+ SDE_ERROR("Invalid data\n");
+ return;
+ }
+
+ DRM_DEBUG("event: %d\n", event);
+
+ switch (event) {
+ case MSM_DBA_CB_HPD_CONNECT:
+ DRM_DEBUG("HPD CONNECT\n");
+ break;
+ case MSM_DBA_CB_HPD_DISCONNECT:
+ DRM_DEBUG("HPD DISCONNECT\n");
+ break;
+ case MSM_DBA_CB_DDC_I2C_ERROR:
+ case MSM_DBA_CB_DDC_TIMEOUT:
+ DRM_DEBUG("DDC FAILURE\n");
+ chip_err = DBA_BRIDGE_CRITICAL_ERR + d_bridge->id;
+ sde_recovery_set_events(chip_err);
+ break;
+ default:
+ DRM_DEBUG("event:%d is not supported\n", event);
+ break;
+ }
+}
+
+static int _dba_bridge_attach(struct drm_bridge *bridge)
+{
+ struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+ struct msm_dba_reg_info info;
+ struct recovery_client_info *client_info = &d_bridge->client_info;
+ int ret = 0;
+
+ if (!bridge) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+
+ memset(&info, 0, sizeof(info));
+ /* initialize DBA registration data */
+ strlcpy(info.client_name, d_bridge->client_name,
+ MSM_DBA_CLIENT_NAME_LEN);
+ strlcpy(info.chip_name, d_bridge->chip_name,
+ MSM_DBA_CHIP_NAME_MAX_LEN);
+ info.instance_id = d_bridge->id;
+ info.cb = _dba_bridge_cb;
+ info.cb_data = d_bridge;
+
+ /* register client with DBA and get device's ops*/
+ if (IS_ENABLED(CONFIG_MSM_DBA)) {
+ d_bridge->dba_ctx = msm_dba_register_client(&info,
+ &d_bridge->ops);
+ if (IS_ERR_OR_NULL(d_bridge->dba_ctx)) {
+ SDE_ERROR("dba register failed\n");
+ ret = PTR_ERR(d_bridge->dba_ctx);
+ goto error;
+ }
+ } else {
+ SDE_ERROR("DBA not enabled\n");
+ ret = -ENODEV;
+ goto error;
+ }
+
+ snprintf(client_info->name, MAX_REC_NAME_LEN, "%s_%d",
+ d_bridge->chip_name, d_bridge->id);
+
+ client_info->recovery_cb = _dba_bridge_recovery_callback;
+
+ /* Identify individual chip by different error codes */
+ client_info->err_supported[0].reported_err_code =
+ DBA_BRIDGE_CRITICAL_ERR + d_bridge->id;
+ client_info->err_supported[0].pre_err_code = 0;
+ client_info->err_supported[0].post_err_code = 0;
+ client_info->no_of_err = 1;
+ /* bridge chip context */
+ client_info->pdata = d_bridge;
+
+ ret = sde_recovery_client_register(client_info);
+ if (ret)
+ SDE_ERROR("%s recovery mgr register failed %d\n",
+ __func__, ret);
+
+ DRM_INFO("client:%s bridge:[%s:%d] attached\n",
+ d_bridge->client_name, d_bridge->chip_name, d_bridge->id);
+
+error:
+ return ret;
+}
+
+static void _dba_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct dba_bridge *d_bridge;
+
+ if (!bridge) {
+ SDE_ERROR("Invalid params\n");
+ return;
+ }
+
+ d_bridge = to_dba_bridge(bridge);
+
+ /* Skip power_on calling when splash is enabled in bootloader. */
+ if ((d_bridge->ops.power_on) && (!d_bridge->cont_splash_enabled))
+ d_bridge->ops.power_on(d_bridge->dba_ctx, true, 0);
+}
+
+static void _dba_bridge_enable(struct drm_bridge *bridge)
+{
+ int rc = 0;
+ struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+ struct msm_dba_video_cfg video_cfg;
+ struct drm_display_mode *mode;
+ struct hdmi_avi_infoframe avi_frame;
+
+ if (!bridge) {
+ SDE_ERROR("Invalid params\n");
+ return;
+ }
+
+ memset(&video_cfg, 0, sizeof(video_cfg));
+ memset(&avi_frame, 0, sizeof(avi_frame));
+ mode = &d_bridge->mode;
+ video_cfg.h_active = mode->hdisplay;
+ video_cfg.v_active = mode->vdisplay;
+ video_cfg.h_front_porch = mode->hsync_start - mode->hdisplay;
+ video_cfg.v_front_porch = mode->vsync_start - mode->vdisplay;
+ video_cfg.h_back_porch = mode->htotal - mode->hsync_end;
+ video_cfg.v_back_porch = mode->vtotal - mode->vsync_end;
+ video_cfg.h_pulse_width = mode->hsync_end - mode->hsync_start;
+ video_cfg.v_pulse_width = mode->vsync_end - mode->vsync_start;
+ video_cfg.pclk_khz = mode->clock;
+ video_cfg.hdmi_mode = d_bridge->hdmi_mode;
+ video_cfg.num_of_input_lanes = d_bridge->num_of_input_lanes;
+
+ SDE_DEBUG(
+ "video=h[%d,%d,%d,%d] v[%d,%d,%d,%d] pclk=%d hdmi=%d lane=%d\n",
+ video_cfg.h_active, video_cfg.h_front_porch,
+ video_cfg.h_pulse_width, video_cfg.h_back_porch,
+ video_cfg.v_active, video_cfg.v_front_porch,
+ video_cfg.v_pulse_width, video_cfg.v_back_porch,
+ video_cfg.pclk_khz, video_cfg.hdmi_mode,
+ video_cfg.num_of_input_lanes);
+
+ rc = drm_hdmi_avi_infoframe_from_display_mode(&avi_frame, mode);
+ if (rc) {
+ SDE_ERROR("get avi frame failed ret=%d\n", rc);
+ } else {
+ video_cfg.scaninfo = avi_frame.scan_mode;
+ switch (avi_frame.picture_aspect) {
+ case HDMI_PICTURE_ASPECT_4_3:
+ video_cfg.ar = MSM_DBA_AR_4_3;
+ break;
+ case HDMI_PICTURE_ASPECT_16_9:
+ video_cfg.ar = MSM_DBA_AR_16_9;
+ break;
+ default:
+ break;
+ }
+ video_cfg.vic = avi_frame.video_code;
+ DRM_INFO("scaninfo=%d ar=%d vic=%d\n",
+ video_cfg.scaninfo, video_cfg.ar, video_cfg.vic);
+ }
+
+ /* Skip video_on calling if splash is enabled in bootloader. */
+ if ((d_bridge->ops.video_on) && (!d_bridge->cont_splash_enabled)) {
+ rc = d_bridge->ops.video_on(d_bridge->dba_ctx, true,
+ &video_cfg, 0);
+ if (rc)
+ SDE_ERROR("video on failed ret=%d\n", rc);
+ }
+}
+
+static void _dba_bridge_disable(struct drm_bridge *bridge)
+{
+ int rc = 0;
+ struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+
+ if (!bridge) {
+ SDE_ERROR("Invalid params\n");
+ return;
+ }
+
+ if (d_bridge->ops.video_on) {
+ rc = d_bridge->ops.video_on(d_bridge->dba_ctx,
+ false, NULL, 0);
+ if (rc)
+ SDE_ERROR("video off failed ret=%d\n", rc);
+ }
+}
+
+static void _dba_bridge_post_disable(struct drm_bridge *bridge)
+{
+ int rc = 0;
+ struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+
+ if (!bridge) {
+ SDE_ERROR("Invalid params\n");
+ return;
+ }
+
+ if (d_bridge->cont_splash_enabled)
+ d_bridge->cont_splash_enabled = false;
+
+ if (d_bridge->ops.power_on) {
+ rc = d_bridge->ops.power_on(d_bridge->dba_ctx, false, 0);
+ if (rc)
+ SDE_ERROR("power off failed ret=%d\n", rc);
+ }
+}
+
+static int _dba_bridge_recovery_callback(int err_code,
+ struct recovery_client_info *client_info)
+{
+ int rc = 0;
+ struct dba_bridge *d_bridge;
+
+ if (!client_info) {
+ SDE_ERROR("Invalid client info\n");
+ rc = -EINVAL;
+ return rc;
+ }
+
+ d_bridge = client_info->pdata;
+
+ err_code = err_code - d_bridge->id;
+
+ switch (err_code) {
+ case DBA_BRIDGE_CRITICAL_ERR:
+ SDE_DEBUG("%s critical bridge chip error\n", __func__);
+
+ /* Power OFF */
+ _dba_bridge_disable(&d_bridge->base);
+ _dba_bridge_post_disable(&d_bridge->base);
+
+ /* settle power rails */
+ msleep(100);
+
+ /* Power On */
+ _dba_bridge_pre_enable(&d_bridge->base);
+ _dba_bridge_enable(&d_bridge->base);
+
+ break;
+ default:
+ SDE_ERROR("%s error %d undefined\n", __func__, err_code);
+ }
+ return rc;
+}
+
+static void _dba_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+
+ if (!bridge || !mode || !adjusted_mode || !d_bridge) {
+ SDE_ERROR("Invalid params\n");
+ return;
+ } else if (!d_bridge->panel_count) {
+ SDE_ERROR("Panel count is 0\n");
+ return;
+ }
+
+ d_bridge->mode = *adjusted_mode;
+ /* Adjust mode according to number of panels */
+ d_bridge->mode.hdisplay /= d_bridge->panel_count;
+ d_bridge->mode.hsync_start /= d_bridge->panel_count;
+ d_bridge->mode.hsync_end /= d_bridge->panel_count;
+ d_bridge->mode.htotal /= d_bridge->panel_count;
+ d_bridge->mode.clock /= d_bridge->panel_count;
+}
+
+static bool _dba_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ bool ret = true;
+
+ if (!bridge || !mode || !adjusted_mode) {
+ SDE_ERROR("Invalid params\n");
+ return false;
+ }
+
+ return ret;
+}
+
+static const struct drm_bridge_funcs _dba_bridge_ops = {
+ .attach = _dba_bridge_attach,
+ .mode_fixup = _dba_bridge_mode_fixup,
+ .pre_enable = _dba_bridge_pre_enable,
+ .enable = _dba_bridge_enable,
+ .disable = _dba_bridge_disable,
+ .post_disable = _dba_bridge_post_disable,
+ .mode_set = _dba_bridge_mode_set,
+};
+
+struct drm_bridge *dba_bridge_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ struct dba_bridge_init *data)
+{
+ int rc = 0;
+ struct dba_bridge *bridge;
+ struct msm_drm_private *priv = NULL;
+
+ if (!dev || !encoder || !data) {
+ SDE_ERROR("dev=%pK or encoder=%pK or data=%pK is NULL\n",
+ dev, encoder, data);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ SDE_ERROR("Private data is not present\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge) {
+ SDE_ERROR("out of memory\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ INIT_LIST_HEAD(&bridge->list);
+ strlcpy(bridge->client_name, data->client_name,
+ MSM_DBA_CLIENT_NAME_LEN);
+ strlcpy(bridge->chip_name, data->chip_name,
+ MSM_DBA_CHIP_NAME_MAX_LEN);
+ bridge->id = data->id;
+ bridge->display = data->display;
+ bridge->hdmi_mode = data->hdmi_mode;
+ bridge->num_of_input_lanes = data->num_of_input_lanes;
+ bridge->pluggable = data->pluggable;
+ bridge->panel_count = data->panel_count;
+ bridge->base.funcs = &_dba_bridge_ops;
+ bridge->base.encoder = encoder;
+ bridge->cont_splash_enabled = data->cont_splash_enabled;
+
+ rc = drm_bridge_attach(dev, &bridge->base);
+ if (rc) {
+ SDE_ERROR("failed to attach bridge, rc=%d\n", rc);
+ goto error_free_bridge;
+ }
+
+ if (data->precede_bridge) {
+ /* Insert current bridge */
+ bridge->base.next = data->precede_bridge->next;
+ data->precede_bridge->next = &bridge->base;
+ } else {
+ encoder->bridge = &bridge->base;
+ }
+
+ /* If early splash has enabled bridge chip in bootloader,
+ * below call should be skipped.
+ */
+ if (!bridge->pluggable && !bridge->cont_splash_enabled) {
+ if (bridge->ops.power_on)
+ bridge->ops.power_on(bridge->dba_ctx, true, 0);
+ if (bridge->ops.check_hpd)
+ bridge->ops.check_hpd(bridge->dba_ctx, 0);
+ }
+
+ return &bridge->base;
+
+error_free_bridge:
+ kfree(bridge);
+error:
+ return ERR_PTR(rc);
+}
+
+void dba_bridge_cleanup(struct drm_bridge *bridge)
+{
+ struct dba_bridge *d_bridge = to_dba_bridge(bridge);
+
+ if (!bridge)
+ return;
+
+ sde_recovery_client_unregister(d_bridge->client_info.handle);
+ d_bridge->client_info.handle = NULL;
+
+ if (IS_ENABLED(CONFIG_MSM_DBA)) {
+ if (!IS_ERR_OR_NULL(d_bridge->dba_ctx))
+ msm_dba_deregister_client(d_bridge->dba_ctx);
+ }
+
+ if (d_bridge->base.encoder)
+ d_bridge->base.encoder->bridge = NULL;
+
+ kfree(bridge);
+}
diff --git a/drivers/gpu/drm/msm/dba_bridge.h b/drivers/gpu/drm/msm/dba_bridge.h
new file mode 100644
index 000000000000..edc130f92257
--- /dev/null
+++ b/drivers/gpu/drm/msm/dba_bridge.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DBA_BRIDGE_H_
+#define _DBA_BRIDGE_H_
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "msm_drv.h"
+
+/**
+ * struct dba_bridge_init - Init parameters for DBA bridge
+ * @client_name: Client's name who calls the init
+ * @chip_name: Bridge chip name
+ * @id: Bridge driver index
+ * @display: Private display handle
+ * @hdmi_mode: HDMI or DVI mode for the sink
+ * @num_of_input_lanes: Number of input lanes in case of DSI/LVDS
+ * @precede_bridge: Precede bridge chip
+ * @pluggable: If it's pluggable
+ * @panel_count: Number of panels attached to this display
+ */
+struct dba_bridge_init {
+ const char *client_name;
+ const char *chip_name;
+ u32 id;
+ void *display;
+ bool hdmi_mode;
+ u32 num_of_input_lanes;
+ struct drm_bridge *precede_bridge;
+ bool pluggable;
+ u32 panel_count;
+ bool cont_splash_enabled;
+};
+
+/**
+ * dba_bridge_init - Initialize the DBA bridge
+ * @dev: Pointer to drm device handle
+ * @encoder: Pointer to drm encoder handle
+ * @data: Pointer to init data
+ * Returns: pointer of struct drm_bridge
+ */
+struct drm_bridge *dba_bridge_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ struct dba_bridge_init *data);
+
+/**
+ * dba_bridge_cleanup - Clean up the DBA bridge
+ * @bridge: Pointer to DBA bridge handle
+ * Returns: void
+ */
+void dba_bridge_cleanup(struct drm_bridge *bridge);
+
+#endif /* _DBA_BRIDGE_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
new file mode 100644
index 000000000000..06027a963be1
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "msm-dsi-catalog:[%s] " fmt, __func__
+#include <linux/errno.h>
+
+#include "dsi_catalog.h"
+
+/**
+ * dsi_catalog_14_init() - catalog init for dsi controller v1.4
+ */
+static void dsi_catalog_14_init(struct dsi_ctrl_hw *ctrl)
+{
+ ctrl->ops.host_setup = dsi_ctrl_hw_14_host_setup;
+ ctrl->ops.setup_lane_map = dsi_ctrl_hw_14_setup_lane_map;
+ ctrl->ops.video_engine_en = dsi_ctrl_hw_14_video_engine_en;
+ ctrl->ops.video_engine_setup = dsi_ctrl_hw_14_video_engine_setup;
+ ctrl->ops.set_video_timing = dsi_ctrl_hw_14_set_video_timing;
+ ctrl->ops.cmd_engine_setup = dsi_ctrl_hw_14_cmd_engine_setup;
+ ctrl->ops.setup_cmd_stream = dsi_ctrl_hw_14_setup_cmd_stream;
+ ctrl->ops.ctrl_en = dsi_ctrl_hw_14_ctrl_en;
+ ctrl->ops.cmd_engine_en = dsi_ctrl_hw_14_cmd_engine_en;
+ ctrl->ops.phy_sw_reset = dsi_ctrl_hw_14_phy_sw_reset;
+ ctrl->ops.soft_reset = dsi_ctrl_hw_14_soft_reset;
+ ctrl->ops.kickoff_command = dsi_ctrl_hw_14_kickoff_command;
+ ctrl->ops.kickoff_fifo_command = dsi_ctrl_hw_14_kickoff_fifo_command;
+ ctrl->ops.reset_cmd_fifo = dsi_ctrl_hw_14_reset_cmd_fifo;
+ ctrl->ops.trigger_command_dma = dsi_ctrl_hw_14_trigger_command_dma;
+ ctrl->ops.ulps_request = dsi_ctrl_hw_14_ulps_request;
+ ctrl->ops.ulps_exit = dsi_ctrl_hw_14_ulps_exit;
+ ctrl->ops.clear_ulps_request = dsi_ctrl_hw_14_clear_ulps_request;
+ ctrl->ops.get_lanes_in_ulps = dsi_ctrl_hw_14_get_lanes_in_ulps;
+ ctrl->ops.clamp_enable = dsi_ctrl_hw_14_clamp_enable;
+ ctrl->ops.clamp_disable = dsi_ctrl_hw_14_clamp_disable;
+ ctrl->ops.get_interrupt_status = dsi_ctrl_hw_14_get_interrupt_status;
+ ctrl->ops.get_error_status = dsi_ctrl_hw_14_get_error_status;
+ ctrl->ops.clear_error_status = dsi_ctrl_hw_14_clear_error_status;
+ ctrl->ops.clear_interrupt_status =
+ dsi_ctrl_hw_14_clear_interrupt_status;
+ ctrl->ops.enable_status_interrupts =
+ dsi_ctrl_hw_14_enable_status_interrupts;
+ ctrl->ops.enable_error_interrupts =
+ dsi_ctrl_hw_14_enable_error_interrupts;
+ ctrl->ops.video_test_pattern_setup =
+ dsi_ctrl_hw_14_video_test_pattern_setup;
+ ctrl->ops.cmd_test_pattern_setup =
+ dsi_ctrl_hw_14_cmd_test_pattern_setup;
+ ctrl->ops.test_pattern_enable = dsi_ctrl_hw_14_test_pattern_enable;
+ ctrl->ops.trigger_cmd_test_pattern =
+ dsi_ctrl_hw_14_trigger_cmd_test_pattern;
+ ctrl->ops.reg_dump_to_buffer = dsi_ctrl_hw_14_reg_dump_to_buffer;
+}
+
+/**
+ * dsi_catalog_20_init() - catalog init for dsi controller v2.0
+ */
+static void dsi_catalog_20_init(struct dsi_ctrl_hw *ctrl)
+{
+ set_bit(DSI_CTRL_CPHY, ctrl->feature_map);
+}
+
+/**
+ * dsi_catalog_ctrl_setup() - return catalog info for dsi controller
+ * @ctrl: Pointer to DSI controller hw object.
+ * @version: DSI controller version.
+ * @index: DSI controller instance ID.
+ *
+ * This function setups the catalog information in the dsi_ctrl_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_ctrl_version version,
+ u32 index)
+{
+ int rc = 0;
+
+ if (version == DSI_CTRL_VERSION_UNKNOWN ||
+ version >= DSI_CTRL_VERSION_MAX) {
+ pr_err("Unsupported version: %d\n", version);
+ return -ENOTSUPP;
+ }
+
+ ctrl->index = index;
+ set_bit(DSI_CTRL_VIDEO_TPG, ctrl->feature_map);
+ set_bit(DSI_CTRL_CMD_TPG, ctrl->feature_map);
+ set_bit(DSI_CTRL_VARIABLE_REFRESH_RATE, ctrl->feature_map);
+ set_bit(DSI_CTRL_DYNAMIC_REFRESH, ctrl->feature_map);
+ set_bit(DSI_CTRL_DESKEW_CALIB, ctrl->feature_map);
+ set_bit(DSI_CTRL_DPHY, ctrl->feature_map);
+
+ switch (version) {
+ case DSI_CTRL_VERSION_1_4:
+ dsi_catalog_14_init(ctrl);
+ break;
+ case DSI_CTRL_VERSION_2_0:
+ dsi_catalog_20_init(ctrl);
+ break;
+ default:
+ return -ENOTSUPP;
+ }
+
+ return rc;
+}
+
+/**
+ * dsi_catalog_phy_4_0_init() - catalog init for DSI PHY v4.0
+ */
+static void dsi_catalog_phy_4_0_init(struct dsi_phy_hw *phy)
+{
+ phy->ops.regulator_enable = dsi_phy_hw_v4_0_regulator_enable;
+ phy->ops.regulator_disable = dsi_phy_hw_v4_0_regulator_disable;
+ phy->ops.enable = dsi_phy_hw_v4_0_enable;
+ phy->ops.disable = dsi_phy_hw_v4_0_disable;
+ phy->ops.calculate_timing_params =
+ dsi_phy_hw_v4_0_calculate_timing_params;
+}
+
+/**
+ * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
+ * @ctrl: Pointer to DSI PHY hw object.
+ * @version: DSI PHY version.
+ * @index: DSI PHY instance ID.
+ *
+ * This function setups the catalog information in the dsi_phy_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
+ enum dsi_phy_version version,
+ u32 index)
+{
+ int rc = 0;
+
+ if (version == DSI_PHY_VERSION_UNKNOWN ||
+ version >= DSI_PHY_VERSION_MAX) {
+ pr_err("Unsupported version: %d\n", version);
+ return -ENOTSUPP;
+ }
+
+ phy->index = index;
+ set_bit(DSI_PHY_DPHY, phy->feature_map);
+
+ switch (version) {
+ case DSI_PHY_VERSION_4_0:
+ dsi_catalog_phy_4_0_init(phy);
+ break;
+ case DSI_PHY_VERSION_1_0:
+ case DSI_PHY_VERSION_2_0:
+ case DSI_PHY_VERSION_3_0:
+ default:
+ return -ENOTSUPP;
+ }
+
+ return rc;
+}
+
+
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
new file mode 100644
index 000000000000..98bd9b039f09
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_catalog.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CATALOG_H_
+#define _DSI_CATALOG_H_
+
+#include "dsi_ctrl_hw.h"
+#include "dsi_phy_hw.h"
+
+/**
+ * dsi_catalog_ctrl_setup() - return catalog info for dsi controller
+ * @ctrl: Pointer to DSI controller hw object.
+ * @version: DSI controller version.
+ * @index: DSI controller instance ID.
+ *
+ * This function setups the catalog information in the dsi_ctrl_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_ctrl_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_ctrl_version version,
+ u32 index);
+
+/**
+ * dsi_catalog_phy_setup() - return catalog info for dsi phy hardware
+ * @ctrl: Pointer to DSI PHY hw object.
+ * @version: DSI PHY version.
+ * @index: DSI PHY instance ID.
+ *
+ * This function setups the catalog information in the dsi_phy_hw object.
+ *
+ * return: error code for failure and 0 for success.
+ */
+int dsi_catalog_phy_setup(struct dsi_phy_hw *phy,
+ enum dsi_phy_version version,
+ u32 index);
+
+/* Definitions for 4.0 PHY hardware driver */
+void dsi_phy_hw_v4_0_regulator_enable(struct dsi_phy_hw *phy,
+ struct dsi_phy_per_lane_cfgs *cfg);
+void dsi_phy_hw_v4_0_regulator_disable(struct dsi_phy_hw *phy);
+void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
+void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy);
+int dsi_phy_hw_v4_0_calculate_timing_params(struct dsi_phy_hw *phy,
+ struct dsi_mode_info *mode,
+ struct dsi_host_common_cfg *cfg,
+ struct dsi_phy_per_lane_cfgs
+ *timing);
+
+/* Definitions for 1.4 controller hardware driver */
+void dsi_ctrl_hw_14_host_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *config);
+void dsi_ctrl_hw_14_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
+void dsi_ctrl_hw_14_video_engine_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_video_engine_cfg *cfg);
+void dsi_ctrl_hw_14_set_video_timing(struct dsi_ctrl_hw *ctrl,
+ struct dsi_mode_info *mode);
+
+void dsi_ctrl_hw_14_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_cmd_engine_cfg *cfg);
+
+void dsi_ctrl_hw_14_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on);
+void dsi_ctrl_hw_14_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on);
+
+void dsi_ctrl_hw_14_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
+ u32 width_in_pixels,
+ u32 h_stride,
+ u32 height_in_lines,
+ u32 vc_id);
+void dsi_ctrl_hw_14_phy_sw_reset(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_soft_reset(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
+ struct dsi_lane_mapping *lane_map);
+void dsi_ctrl_hw_14_kickoff_command(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_info *cmd,
+ u32 flags);
+
+void dsi_ctrl_hw_14_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+ u32 flags);
+void dsi_ctrl_hw_14_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_trigger_command_dma(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes);
+void dsi_ctrl_hw_14_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes);
+void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes);
+u32 dsi_ctrl_hw_14_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl);
+
+void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool enable_ulps);
+
+void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool disable_ulps);
+u32 dsi_ctrl_hw_14_get_interrupt_status(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints);
+void dsi_ctrl_hw_14_enable_status_interrupts(struct dsi_ctrl_hw *ctrl,
+ u32 ints);
+
+u64 dsi_ctrl_hw_14_get_error_status(struct dsi_ctrl_hw *ctrl);
+void dsi_ctrl_hw_14_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors);
+void dsi_ctrl_hw_14_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
+ u64 errors);
+
+void dsi_ctrl_hw_14_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val);
+void dsi_ctrl_hw_14_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val,
+ u32 stream_id);
+void dsi_ctrl_hw_14_test_pattern_enable(struct dsi_ctrl_hw *ctrl, bool enable);
+void dsi_ctrl_hw_14_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
+ u32 stream_id);
+ssize_t dsi_ctrl_hw_14_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
+ char *buf,
+ u32 size);
+#endif /* _DSI_CATALOG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.c b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.c
new file mode 100644
index 000000000000..7def847f6f2a
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.c
@@ -0,0 +1,727 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/of.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "dsi_clk_pwr.h"
+
+#define INC_REFCOUNT(s, start_func) \
+ ({ \
+ int rc = 0; \
+ if ((s)->refcount == 0) { \
+ rc = start_func(s); \
+ if (rc) \
+ pr_err("failed to enable, rc = %d\n", rc); \
+ } \
+ (s)->refcount++; \
+ rc; \
+ })
+
+#define DEC_REFCOUNT(s, stop_func) \
+ ({ \
+ int rc = 0; \
+ if ((s)->refcount == 0) { \
+ pr_err("unbalanced refcount\n"); \
+ } else { \
+ (s)->refcount--; \
+ if ((s)->refcount == 0) { \
+ rc = stop_func(s); \
+ if (rc) \
+ pr_err("disable failed, rc=%d\n", rc); \
+ } \
+ } \
+ rc; \
+ })
+
+static int dsi_core_clk_start(struct dsi_core_clk_info *clks)
+{
+ int rc = 0;
+
+ rc = clk_prepare_enable(clks->mdp_core_clk);
+ if (rc) {
+ pr_err("failed to enable mdp_core_clk, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = clk_prepare_enable(clks->iface_clk);
+ if (rc) {
+ pr_err("failed to enable iface_clk, rc=%d\n", rc);
+ goto error_disable_core_clk;
+ }
+
+ rc = clk_prepare_enable(clks->bus_clk);
+ if (rc) {
+ pr_err("failed to enable bus_clk, rc=%d\n", rc);
+ goto error_disable_iface_clk;
+ }
+
+ rc = clk_prepare_enable(clks->core_mmss_clk);
+ if (rc) {
+ pr_err("failed to enable core_mmss_clk, rc=%d\n", rc);
+ goto error_disable_bus_clk;
+ }
+
+ return rc;
+
+error_disable_bus_clk:
+ clk_disable_unprepare(clks->bus_clk);
+error_disable_iface_clk:
+ clk_disable_unprepare(clks->iface_clk);
+error_disable_core_clk:
+ clk_disable_unprepare(clks->mdp_core_clk);
+error:
+ return rc;
+}
+
+static int dsi_core_clk_stop(struct dsi_core_clk_info *clks)
+{
+ clk_disable_unprepare(clks->core_mmss_clk);
+ clk_disable_unprepare(clks->bus_clk);
+ clk_disable_unprepare(clks->iface_clk);
+ clk_disable_unprepare(clks->mdp_core_clk);
+
+ return 0;
+}
+
+static int dsi_link_clk_set_rate(struct dsi_link_clk_info *l_clks)
+{
+ int rc = 0;
+
+ rc = clk_set_rate(l_clks->esc_clk, l_clks->esc_clk_rate);
+ if (rc) {
+ pr_err("clk_set_rate failed for esc_clk rc = %d\n", rc);
+ goto error;
+ }
+
+ rc = clk_set_rate(l_clks->byte_clk, l_clks->byte_clk_rate);
+ if (rc) {
+ pr_err("clk_set_rate failed for byte_clk rc = %d\n", rc);
+ goto error;
+ }
+
+ rc = clk_set_rate(l_clks->pixel_clk, l_clks->pixel_clk_rate);
+ if (rc) {
+ pr_err("clk_set_rate failed for pixel_clk rc = %d\n", rc);
+ goto error;
+ }
+error:
+ return rc;
+}
+
+static int dsi_link_clk_prepare(struct dsi_link_clk_info *l_clks)
+{
+ int rc = 0;
+
+ rc = clk_prepare(l_clks->esc_clk);
+ if (rc) {
+ pr_err("Failed to prepare dsi esc clk, rc=%d\n", rc);
+ goto esc_clk_err;
+ }
+
+ rc = clk_prepare(l_clks->byte_clk);
+ if (rc) {
+ pr_err("Failed to prepare dsi byte clk, rc=%d\n", rc);
+ goto byte_clk_err;
+ }
+
+ rc = clk_prepare(l_clks->pixel_clk);
+ if (rc) {
+ pr_err("Failed to prepare dsi pixel clk, rc=%d\n", rc);
+ goto pixel_clk_err;
+ }
+
+ return rc;
+
+pixel_clk_err:
+ clk_unprepare(l_clks->byte_clk);
+byte_clk_err:
+ clk_unprepare(l_clks->esc_clk);
+esc_clk_err:
+ return rc;
+}
+
+static void dsi_link_clk_unprepare(struct dsi_link_clk_info *l_clks)
+{
+ clk_unprepare(l_clks->pixel_clk);
+ clk_unprepare(l_clks->byte_clk);
+ clk_unprepare(l_clks->esc_clk);
+}
+
+static int dsi_link_clk_enable(struct dsi_link_clk_info *l_clks)
+{
+ int rc = 0;
+
+ rc = clk_enable(l_clks->esc_clk);
+ if (rc) {
+ pr_err("Failed to enable dsi esc clk, rc=%d\n", rc);
+ goto esc_clk_err;
+ }
+
+ rc = clk_enable(l_clks->byte_clk);
+ if (rc) {
+ pr_err("Failed to enable dsi byte clk, rc=%d\n", rc);
+ goto byte_clk_err;
+ }
+
+ rc = clk_enable(l_clks->pixel_clk);
+ if (rc) {
+ pr_err("Failed to enable dsi pixel clk, rc=%d\n", rc);
+ goto pixel_clk_err;
+ }
+
+ return rc;
+
+pixel_clk_err:
+ clk_disable(l_clks->byte_clk);
+byte_clk_err:
+ clk_disable(l_clks->esc_clk);
+esc_clk_err:
+ return rc;
+}
+
+static void dsi_link_clk_disable(struct dsi_link_clk_info *l_clks)
+{
+ clk_disable(l_clks->esc_clk);
+ clk_disable(l_clks->pixel_clk);
+ clk_disable(l_clks->byte_clk);
+}
+
+/**
+ * dsi_link_clk_start() - enable dsi link clocks
+ */
+static int dsi_link_clk_start(struct dsi_link_clk_info *clks)
+{
+ int rc = 0;
+
+ if (clks->set_new_rate) {
+ rc = dsi_link_clk_set_rate(clks);
+ if (rc) {
+ pr_err("failed to set clk rates, rc = %d\n", rc);
+ goto error;
+ } else {
+ clks->set_new_rate = false;
+ }
+ }
+
+ rc = dsi_link_clk_prepare(clks);
+ if (rc) {
+ pr_err("failed to prepare link clks, rc = %d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_link_clk_enable(clks);
+ if (rc) {
+ pr_err("failed to enable link clks, rc = %d\n", rc);
+ goto error_unprepare;
+ }
+
+ pr_debug("Link clocks are enabled\n");
+ return rc;
+error_unprepare:
+ dsi_link_clk_unprepare(clks);
+error:
+ return rc;
+}
+
+/**
+ * dsi_link_clk_stop() - Stop DSI link clocks.
+ */
+static int dsi_link_clk_stop(struct dsi_link_clk_info *clks)
+{
+ dsi_link_clk_disable(clks);
+ dsi_link_clk_unprepare(clks);
+
+ pr_debug("Link clocks disabled\n");
+
+ return 0;
+}
+
+/*
+ * dsi_pwr_parse_supply_node() - parse power supply node from root device node
+ */
+static int dsi_pwr_parse_supply_node(struct device_node *root,
+ struct dsi_regulator_info *regs)
+{
+ int rc = 0;
+ int i = 0;
+ u32 tmp = 0;
+ struct device_node *node = NULL;
+
+ for_each_child_of_node(root, node) {
+ const char *st = NULL;
+
+ rc = of_property_read_string(node, "qcom,supply-name", &st);
+ if (rc) {
+ pr_err("failed to read name, rc = %d\n", rc);
+ goto error;
+ }
+
+ snprintf(regs->vregs[i].vreg_name,
+ ARRAY_SIZE(regs->vregs[i].vreg_name),
+ "%s", st);
+
+ rc = of_property_read_u32(node, "qcom,supply-min-voltage",
+ &tmp);
+ if (rc) {
+ pr_err("failed to read min voltage, rc = %d\n", rc);
+ goto error;
+ }
+ regs->vregs[i].min_voltage = tmp;
+
+ rc = of_property_read_u32(node, "qcom,supply-max-voltage",
+ &tmp);
+ if (rc) {
+ pr_err("failed to read max voltage, rc = %d\n", rc);
+ goto error;
+ }
+ regs->vregs[i].max_voltage = tmp;
+
+ rc = of_property_read_u32(node, "qcom,supply-enable-load",
+ &tmp);
+ if (rc) {
+ pr_err("failed to read enable load, rc = %d\n", rc);
+ goto error;
+ }
+ regs->vregs[i].enable_load = tmp;
+
+ rc = of_property_read_u32(node, "qcom,supply-disable-load",
+ &tmp);
+ if (rc) {
+ pr_err("failed to read disable load, rc = %d\n", rc);
+ goto error;
+ }
+ regs->vregs[i].disable_load = tmp;
+
+ /* Optional values */
+ rc = of_property_read_u32(node, "qcom,supply-pre-on-sleep",
+ &tmp);
+ if (rc) {
+ pr_debug("pre-on-sleep not specified\n");
+ rc = 0;
+ } else {
+ regs->vregs[i].pre_on_sleep = tmp;
+ }
+
+ rc = of_property_read_u32(node, "qcom,supply-pre-off-sleep",
+ &tmp);
+ if (rc) {
+ pr_debug("pre-off-sleep not specified\n");
+ rc = 0;
+ } else {
+ regs->vregs[i].pre_off_sleep = tmp;
+ }
+
+ rc = of_property_read_u32(node, "qcom,supply-post-on-sleep",
+ &tmp);
+ if (rc) {
+ pr_debug("post-on-sleep not specified\n");
+ rc = 0;
+ } else {
+ regs->vregs[i].post_on_sleep = tmp;
+ }
+
+ rc = of_property_read_u32(node, "qcom,supply-post-off-sleep",
+ &tmp);
+ if (rc) {
+ pr_debug("post-off-sleep not specified\n");
+ rc = 0;
+ } else {
+ regs->vregs[i].post_off_sleep = tmp;
+ }
+
+ ++i;
+ pr_debug("[%s] minv=%d maxv=%d, en_load=%d, dis_load=%d\n",
+ regs->vregs[i].vreg_name,
+ regs->vregs[i].min_voltage,
+ regs->vregs[i].max_voltage,
+ regs->vregs[i].enable_load,
+ regs->vregs[i].disable_load);
+ }
+
+error:
+ return rc;
+}
+
+/**
+ * dsi_pwr_enable_vregs() - enable/disable regulators
+ */
+static int dsi_pwr_enable_vregs(struct dsi_regulator_info *regs, bool enable)
+{
+ int rc = 0, i = 0;
+ struct dsi_vreg *vreg;
+ int num_of_v = 0;
+
+ if (enable) {
+ for (i = 0; i < regs->count; i++) {
+ vreg = &regs->vregs[i];
+ if (vreg->pre_on_sleep)
+ msleep(vreg->pre_on_sleep);
+
+ rc = regulator_set_load(vreg->vreg,
+ vreg->enable_load);
+ if (rc < 0) {
+ pr_err("Setting optimum mode failed for %s\n",
+ vreg->vreg_name);
+ goto error;
+ }
+ num_of_v = regulator_count_voltages(vreg->vreg);
+ if (num_of_v > 0) {
+ rc = regulator_set_voltage(vreg->vreg,
+ vreg->min_voltage,
+ vreg->max_voltage);
+ if (rc) {
+ pr_err("Set voltage(%s) fail, rc=%d\n",
+ vreg->vreg_name, rc);
+ goto error_disable_opt_mode;
+ }
+ }
+
+ rc = regulator_enable(vreg->vreg);
+ if (rc) {
+ pr_err("enable failed for %s, rc=%d\n",
+ vreg->vreg_name, rc);
+ goto error_disable_voltage;
+ }
+
+ if (vreg->post_on_sleep)
+ msleep(vreg->post_on_sleep);
+ }
+ } else {
+ for (i = (regs->count - 1); i >= 0; i--) {
+ if (regs->vregs[i].pre_off_sleep)
+ msleep(regs->vregs[i].pre_off_sleep);
+
+ (void)regulator_set_load(regs->vregs[i].vreg,
+ regs->vregs[i].disable_load);
+ (void)regulator_disable(regs->vregs[i].vreg);
+
+ if (regs->vregs[i].post_off_sleep)
+ msleep(regs->vregs[i].post_off_sleep);
+ }
+ }
+
+ return 0;
+error_disable_opt_mode:
+ (void)regulator_set_load(regs->vregs[i].vreg,
+ regs->vregs[i].disable_load);
+
+error_disable_voltage:
+ if (num_of_v > 0)
+ (void)regulator_set_voltage(regs->vregs[i].vreg,
+ 0, regs->vregs[i].max_voltage);
+error:
+ for (i--; i >= 0; i--) {
+ if (regs->vregs[i].pre_off_sleep)
+ msleep(regs->vregs[i].pre_off_sleep);
+
+ (void)regulator_set_load(regs->vregs[i].vreg,
+ regs->vregs[i].disable_load);
+
+ num_of_v = regulator_count_voltages(regs->vregs[i].vreg);
+ if (num_of_v > 0)
+ (void)regulator_set_voltage(regs->vregs[i].vreg,
+ 0, regs->vregs[i].max_voltage);
+
+ (void)regulator_disable(regs->vregs[i].vreg);
+
+ if (regs->vregs[i].post_off_sleep)
+ msleep(regs->vregs[i].post_off_sleep);
+ }
+
+ return rc;
+}
+
+/**
+* dsi_clk_pwr_of_get_vreg_data - Parse regulator supply information
+* @of_node: Device of node to parse for supply information.
+* @regs: Pointer where regulator information will be copied to.
+* @supply_name: Name of the supply node.
+*
+* return: error code in case of failure or 0 for success.
+*/
+int dsi_clk_pwr_of_get_vreg_data(struct device_node *of_node,
+ struct dsi_regulator_info *regs,
+ char *supply_name)
+{
+ int rc = 0;
+ struct device_node *supply_root_node = NULL;
+
+ if (!of_node || !regs) {
+ pr_err("Bad params\n");
+ return -EINVAL;
+ }
+
+ regs->count = 0;
+ supply_root_node = of_get_child_by_name(of_node, supply_name);
+ if (!supply_root_node) {
+ supply_root_node = of_parse_phandle(of_node, supply_name, 0);
+ if (!supply_root_node) {
+ pr_err("No supply entry present for %s\n", supply_name);
+ return -EINVAL;
+ }
+ }
+
+ regs->count = of_get_available_child_count(supply_root_node);
+ if (regs->count == 0) {
+ pr_err("No vregs defined for %s\n", supply_name);
+ return -EINVAL;
+ }
+
+ regs->vregs = kcalloc(regs->count, sizeof(*regs->vregs), GFP_KERNEL);
+ if (!regs->vregs) {
+ regs->count = 0;
+ return -ENOMEM;
+ }
+
+ rc = dsi_pwr_parse_supply_node(supply_root_node, regs);
+ if (rc) {
+ pr_err("failed to parse supply node for %s, rc = %d\n",
+ supply_name, rc);
+
+ kfree(regs->vregs);
+ regs->vregs = NULL;
+ regs->count = 0;
+ }
+
+ return rc;
+}
+
+/**
+ * dsi_clk_pwr_get_dt_vreg_data - parse regulator supply information
+ * @dev: Device whose of_node needs to be parsed.
+ * @regs: Pointer where regulator information will be copied to.
+ * @supply_name: Name of the supply node.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_pwr_get_dt_vreg_data(struct device *dev,
+ struct dsi_regulator_info *regs,
+ char *supply_name)
+{
+ int rc = 0;
+ struct device_node *of_node = NULL;
+ struct device_node *supply_node = NULL;
+ struct device_node *supply_root_node = NULL;
+
+ if (!dev || !regs) {
+ pr_err("Bad params\n");
+ return -EINVAL;
+ }
+
+ of_node = dev->of_node;
+ regs->count = 0;
+ supply_root_node = of_get_child_by_name(of_node, supply_name);
+ if (!supply_root_node) {
+ supply_root_node = of_parse_phandle(of_node, supply_name, 0);
+ if (!supply_root_node) {
+ pr_err("No supply entry present for %s\n", supply_name);
+ return -EINVAL;
+ }
+ }
+
+ for_each_child_of_node(supply_root_node, supply_node)
+ regs->count++;
+
+ if (regs->count == 0) {
+ pr_err("No vregs defined for %s\n", supply_name);
+ return -EINVAL;
+ }
+
+ regs->vregs = devm_kcalloc(dev, regs->count, sizeof(*regs->vregs),
+ GFP_KERNEL);
+ if (!regs->vregs) {
+ regs->count = 0;
+ return -ENOMEM;
+ }
+
+ rc = dsi_pwr_parse_supply_node(supply_root_node, regs);
+ if (rc) {
+ pr_err("failed to parse supply node for %s, rc = %d\n",
+ supply_name, rc);
+ devm_kfree(dev, regs->vregs);
+ regs->vregs = NULL;
+ regs->count = 0;
+ }
+
+ return rc;
+}
+
+/**
+ * dsi_pwr_enable_regulator() - enable a set of regulators
+ * @regs: Pointer to set of regulators to enable or disable.
+ * @enable: Enable/Disable regulators.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_pwr_enable_regulator(struct dsi_regulator_info *regs, bool enable)
+{
+ int rc = 0;
+
+ if (enable) {
+ if (regs->refcount == 0) {
+ rc = dsi_pwr_enable_vregs(regs, true);
+ if (rc)
+ pr_err("failed to enable regulators\n");
+ }
+ regs->refcount++;
+ } else {
+ if (regs->refcount == 0) {
+ pr_err("Unbalanced regulator off\n");
+ } else {
+ regs->refcount--;
+ if (regs->refcount == 0) {
+ rc = dsi_pwr_enable_vregs(regs, false);
+ if (rc)
+ pr_err("failed to disable vregs\n");
+ }
+ }
+ }
+
+ return rc;
+}
+
+/**
+ * dsi_clk_enable_core_clks() - enable DSI core clocks
+ * @clks: DSI core clock information.
+ * @enable: enable/disable DSI core clocks.
+ *
+ * A ref count is maintained, so caller should make sure disable and enable
+ * calls are balanced.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_enable_core_clks(struct dsi_core_clk_info *clks, bool enable)
+{
+ int rc = 0;
+
+ if (enable)
+ rc = INC_REFCOUNT(clks, dsi_core_clk_start);
+ else
+ rc = DEC_REFCOUNT(clks, dsi_core_clk_stop);
+
+ return rc;
+}
+
+/**
+ * dsi_clk_enable_link_clks() - enable DSI link clocks
+ * @clks: DSI link clock information.
+ * @enable: enable/disable DSI link clocks.
+ *
+ * A ref count is maintained, so caller should make sure disable and enable
+ * calls are balanced.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_enable_link_clks(struct dsi_link_clk_info *clks, bool enable)
+{
+ int rc = 0;
+
+ if (enable)
+ rc = INC_REFCOUNT(clks, dsi_link_clk_start);
+ else
+ rc = DEC_REFCOUNT(clks, dsi_link_clk_stop);
+
+ return rc;
+}
+
+/**
+ * dsi_clk_set_link_frequencies() - set frequencies for link clks
+ * @clks: Link clock information
+ * @pixel_clk: pixel clock frequency in KHz.
+ * @byte_clk: Byte clock frequency in KHz.
+ * @esc_clk: Escape clock frequency in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_link_frequencies(struct dsi_link_clk_info *clks,
+ u64 pixel_clk,
+ u64 byte_clk,
+ u64 esc_clk)
+{
+ int rc = 0;
+
+ clks->pixel_clk_rate = pixel_clk;
+ clks->byte_clk_rate = byte_clk;
+ clks->esc_clk_rate = esc_clk;
+ clks->set_new_rate = true;
+
+ return rc;
+}
+
+/**
+ * dsi_clk_set_pixel_clk_rate() - set frequency for pixel clock
+ * @clks: DSI link clock information.
+ * @pixel_clk: Pixel clock rate in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_pixel_clk_rate(struct dsi_link_clk_info *clks, u64 pixel_clk)
+{
+ int rc = 0;
+
+ rc = clk_set_rate(clks->pixel_clk, pixel_clk);
+ if (rc)
+ pr_err("failed to set clk rate for pixel clk, rc=%d\n", rc);
+ else
+ clks->pixel_clk_rate = pixel_clk;
+
+ return rc;
+}
+
+/**
+ * dsi_clk_set_byte_clk_rate() - set frequency for byte clock
+ * @clks: DSI link clock information.
+ * @byte_clk: Byte clock rate in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_byte_clk_rate(struct dsi_link_clk_info *clks, u64 byte_clk)
+{
+ int rc = 0;
+
+ rc = clk_set_rate(clks->byte_clk, byte_clk);
+ if (rc)
+ pr_err("failed to set clk rate for byte clk, rc=%d\n", rc);
+ else
+ clks->byte_clk_rate = byte_clk;
+
+ return rc;
+}
+
+/**
+ * dsi_clk_update_parent() - update parent clocks for specified clock
+ * @parent: link clock pair which are set as parent.
+ * @child: link clock pair whose parent has to be set.
+ */
+int dsi_clk_update_parent(struct dsi_clk_link_set *parent,
+ struct dsi_clk_link_set *child)
+{
+ int rc = 0;
+
+ rc = clk_set_parent(child->byte_clk, parent->byte_clk);
+ if (rc) {
+ pr_err("failed to set byte clk parent\n");
+ goto error;
+ }
+
+ rc = clk_set_parent(child->pixel_clk, parent->pixel_clk);
+ if (rc) {
+ pr_err("failed to set pixel clk parent\n");
+ goto error;
+ }
+error:
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.h b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.h
new file mode 100644
index 000000000000..223ca4ec4290
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_clk_pwr.h
@@ -0,0 +1,214 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CLK_PWR_H_
+#define _DSI_CLK_PWR_H_
+
+#include <linux/device.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+
+/**
+ * struct dsi_vreg - regulator information for DSI regulators
+ * @vreg: Handle to the regulator.
+ * @vreg_name: Regulator name.
+ * @min_voltage: Minimum voltage in uV.
+ * @max_voltage: Maximum voltage in uV.
+ * @enable_load: Load, in uA, when enabled.
+ * @disable_load: Load, in uA, when disabled.
+ * @pre_on_sleep: Sleep, in ms, before enabling the regulator.
+ * @post_on_sleep: Sleep, in ms, after enabling the regulator.
+ * @pre_off_sleep: Sleep, in ms, before disabling the regulator.
+ * @post_off_sleep: Sleep, in ms, after disabling the regulator.
+ */
+struct dsi_vreg {
+ struct regulator *vreg;
+ char vreg_name[32];
+ u32 min_voltage;
+ u32 max_voltage;
+ u32 enable_load;
+ u32 disable_load;
+ u32 pre_on_sleep;
+ u32 post_on_sleep;
+ u32 pre_off_sleep;
+ u32 post_off_sleep;
+};
+
+/**
+ * struct dsi_regulator_info - set of vregs that are turned on/off together.
+ * @vregs: Array of dsi_vreg structures.
+ * @count: Number of vregs.
+ * @refcount: Reference counting for enabling.
+ */
+struct dsi_regulator_info {
+ struct dsi_vreg *vregs;
+ u32 count;
+ u32 refcount;
+};
+
+/**
+ * struct dsi_core_clk_info - Core clock information for DSI hardware
+ * @mdp_core_clk: Handle to MDP core clock.
+ * @iface_clk: Handle to MDP interface clock.
+ * @core_mmss_clk: Handle to MMSS core clock.
+ * @bus_clk: Handle to bus clock.
+ * @refcount: Reference count for core clocks.
+ * @clk_state: Current clock state.
+ */
+struct dsi_core_clk_info {
+ struct clk *mdp_core_clk;
+ struct clk *iface_clk;
+ struct clk *core_mmss_clk;
+ struct clk *bus_clk;
+
+ u32 refcount;
+ u32 clk_state;
+};
+
+/**
+ * struct dsi_link_clk_info - Link clock information for DSI hardware.
+ * @byte_clk: Handle to DSI byte clock.
+ * @byte_clk_rate: Frequency of DSI byte clock in KHz.
+ * @pixel_clk: Handle to DSI pixel clock.
+ * @pixel_clk_rate: Frequency of DSI pixel clock in KHz.
+ * @esc_clk: Handle to DSI escape clock.
+ * @esc_clk_rate: Frequency of DSI escape clock in KHz.
+ * @refcount: Reference count for link clocks.
+ * @clk_state: Current clock state.
+ * @set_new_rate: private flag used by clock utility.
+ */
+struct dsi_link_clk_info {
+ struct clk *byte_clk;
+ u64 byte_clk_rate;
+
+ struct clk *pixel_clk;
+ u64 pixel_clk_rate;
+
+ struct clk *esc_clk;
+ u64 esc_clk_rate;
+
+ u32 refcount;
+ u32 clk_state;
+ bool set_new_rate;
+};
+
+/**
+ * struct dsi_clk_link_set - Pair of clock handles to describe link clocks
+ * @byte_clk: Handle to DSi byte clock.
+ * @pixel_clk: Handle to DSI pixel clock.
+ */
+struct dsi_clk_link_set {
+ struct clk *byte_clk;
+ struct clk *pixel_clk;
+};
+
+/**
+ * dsi_clk_pwr_of_get_vreg_data - parse regulator supply information
+ * @of_node: Device of node to parse for supply information.
+ * @regs: Pointer where regulator information will be copied to.
+ * @supply_name: Name of the supply node.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_pwr_of_get_vreg_data(struct device_node *of_node,
+ struct dsi_regulator_info *regs,
+ char *supply_name);
+
+/**
+ * dsi_clk_pwr_get_dt_vreg_data - parse regulator supply information
+ * @dev: Device whose of_node needs to be parsed.
+ * @regs: Pointer where regulator information will be copied to.
+ * @supply_name: Name of the supply node.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_pwr_get_dt_vreg_data(struct device *dev,
+ struct dsi_regulator_info *regs,
+ char *supply_name);
+
+/**
+ * dsi_pwr_enable_regulator() - enable a set of regulators
+ * @regs: Pointer to set of regulators to enable or disable.
+ * @enable: Enable/Disable regulators.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_pwr_enable_regulator(struct dsi_regulator_info *regs, bool enable);
+
+/**
+ * dsi_clk_enable_core_clks() - enable DSI core clocks
+ * @clks: DSI core clock information.
+ * @enable: enable/disable DSI core clocks.
+ *
+ * A ref count is maintained, so caller should make sure disable and enable
+ * calls are balanced.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_enable_core_clks(struct dsi_core_clk_info *clks, bool enable);
+
+/**
+ * dsi_clk_enable_link_clks() - enable DSI link clocks
+ * @clks: DSI link clock information.
+ * @enable: enable/disable DSI link clocks.
+ *
+ * A ref count is maintained, so caller should make sure disable and enable
+ * calls are balanced.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_enable_link_clks(struct dsi_link_clk_info *clks, bool enable);
+
+/**
+ * dsi_clk_set_link_frequencies() - set frequencies for link clks
+ * @clks: Link clock information
+ * @pixel_clk: pixel clock frequency in KHz.
+ * @byte_clk: Byte clock frequency in KHz.
+ * @esc_clk: Escape clock frequency in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_link_frequencies(struct dsi_link_clk_info *clks,
+ u64 pixel_clk,
+ u64 byte_clk,
+ u64 esc_clk);
+
+/**
+ * dsi_clk_set_pixel_clk_rate() - set frequency for pixel clock
+ * @clks: DSI link clock information.
+ * @pixel_clk: Pixel clock rate in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_pixel_clk_rate(struct dsi_link_clk_info *clks, u64 pixel_clk);
+
+/**
+ * dsi_clk_set_byte_clk_rate() - set frequency for byte clock
+ * @clks: DSI link clock information.
+ * @byte_clk: Byte clock rate in KHz.
+ *
+ * return: error code in case of failure or 0 for success.
+ */
+int dsi_clk_set_byte_clk_rate(struct dsi_link_clk_info *clks, u64 byte_clk);
+
+/**
+ * dsi_clk_update_parent() - update parent clocks for specified clock
+ * @parent: link clock pair which are set as parent.
+ * @child: link clock pair whose parent has to be set.
+ */
+int dsi_clk_update_parent(struct dsi_clk_link_set *parent,
+ struct dsi_clk_link_set *child);
+#endif /* _DSI_CLK_PWR_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
new file mode 100644
index 000000000000..75543c768d45
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.c
@@ -0,0 +1,2312 @@
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "dsi-ctrl:[%s] " fmt, __func__
+
+#include <linux/of_device.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include <linux/of_irq.h>
+#include <video/mipi_display.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_gpu.h"
+#include "dsi_ctrl.h"
+#include "dsi_ctrl_hw.h"
+#include "dsi_clk_pwr.h"
+#include "dsi_catalog.h"
+
+#define DSI_CTRL_DEFAULT_LABEL "MDSS DSI CTRL"
+
+#define DSI_CTRL_TX_TO_MS 200
+
+#define TO_ON_OFF(x) ((x) ? "ON" : "OFF")
+/**
+ * enum dsi_ctrl_driver_ops - controller driver ops
+ */
+enum dsi_ctrl_driver_ops {
+ DSI_CTRL_OP_POWER_STATE_CHANGE,
+ DSI_CTRL_OP_CMD_ENGINE,
+ DSI_CTRL_OP_VID_ENGINE,
+ DSI_CTRL_OP_HOST_ENGINE,
+ DSI_CTRL_OP_CMD_TX,
+ DSI_CTRL_OP_ULPS_TOGGLE,
+ DSI_CTRL_OP_CLAMP_TOGGLE,
+ DSI_CTRL_OP_SET_CLK_SOURCE,
+ DSI_CTRL_OP_HOST_INIT,
+ DSI_CTRL_OP_TPG,
+ DSI_CTRL_OP_PHY_SW_RESET,
+ DSI_CTRL_OP_ASYNC_TIMING,
+ DSI_CTRL_OP_MAX
+};
+
+struct dsi_ctrl_list_item {
+ struct dsi_ctrl *ctrl;
+ struct list_head list;
+};
+
+static LIST_HEAD(dsi_ctrl_list);
+static DEFINE_MUTEX(dsi_ctrl_list_lock);
+
+static const enum dsi_ctrl_version dsi_ctrl_v1_4 = DSI_CTRL_VERSION_1_4;
+static const enum dsi_ctrl_version dsi_ctrl_v2_0 = DSI_CTRL_VERSION_2_0;
+
+static const struct of_device_id msm_dsi_of_match[] = {
+ {
+ .compatible = "qcom,dsi-ctrl-hw-v1.4",
+ .data = &dsi_ctrl_v1_4,
+ },
+ {
+ .compatible = "qcom,dsi-ctrl-hw-v2.0",
+ .data = &dsi_ctrl_v2_0,
+ },
+ {}
+};
+
+static ssize_t debugfs_state_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct dsi_ctrl *dsi_ctrl = file->private_data;
+ char *buf;
+ u32 len = 0;
+
+ if (!dsi_ctrl)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ /* Dump current state */
+ len += snprintf((buf + len), (SZ_4K - len), "Current State:\n");
+ len += snprintf((buf + len), (SZ_4K - len),
+ "\tPOWER_STATUS = %s\n\tCORE_CLOCK = %s\n",
+ TO_ON_OFF(dsi_ctrl->current_state.pwr_enabled),
+ TO_ON_OFF(dsi_ctrl->current_state.core_clk_enabled));
+ len += snprintf((buf + len), (SZ_4K - len),
+ "\tLINK_CLOCK = %s\n\tULPS_STATUS = %s\n",
+ TO_ON_OFF(dsi_ctrl->current_state.link_clk_enabled),
+ TO_ON_OFF(dsi_ctrl->current_state.ulps_enabled));
+ len += snprintf((buf + len), (SZ_4K - len),
+ "\tCLAMP_STATUS = %s\n\tCTRL_ENGINE = %s\n",
+ TO_ON_OFF(dsi_ctrl->current_state.clamp_enabled),
+ TO_ON_OFF(dsi_ctrl->current_state.controller_state));
+ len += snprintf((buf + len), (SZ_4K - len),
+ "\tVIDEO_ENGINE = %s\n\tCOMMAND_ENGINE = %s\n",
+ TO_ON_OFF(dsi_ctrl->current_state.vid_engine_state),
+ TO_ON_OFF(dsi_ctrl->current_state.cmd_engine_state));
+
+ /* Dump clock information */
+ len += snprintf((buf + len), (SZ_4K - len), "\nClock Info:\n");
+ len += snprintf((buf + len), (SZ_4K - len),
+ "\tBYTE_CLK = %llu, PIXEL_CLK = %llu, ESC_CLK = %llu\n",
+ dsi_ctrl->clk_info.link_clks.byte_clk_rate,
+ dsi_ctrl->clk_info.link_clks.pixel_clk_rate,
+ dsi_ctrl->clk_info.link_clks.esc_clk_rate);
+
+ if (len > count)
+ len = count;
+
+ /* TODO: make sure that this does not exceed 4K */
+ if (copy_to_user(buff, buf, len)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+ kfree(buf);
+ return len;
+}
+
+static ssize_t debugfs_reg_dump_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct dsi_ctrl *dsi_ctrl = file->private_data;
+ char *buf;
+ u32 len = 0;
+
+ if (!dsi_ctrl)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ if (dsi_ctrl->current_state.core_clk_enabled) {
+ len = dsi_ctrl->hw.ops.reg_dump_to_buffer(&dsi_ctrl->hw,
+ buf,
+ SZ_4K);
+ } else {
+ len = snprintf((buf + len), (SZ_4K - len),
+ "Core clocks are not turned on, cannot read\n");
+ }
+
+ if (len > count)
+ len = count;
+
+ /* TODO: make sure that this does not exceed 4K */
+ if (copy_to_user(buff, buf, len)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+ kfree(buf);
+ return len;
+}
+
+static const struct file_operations state_info_fops = {
+ .open = simple_open,
+ .read = debugfs_state_info_read,
+};
+
+static const struct file_operations reg_dump_fops = {
+ .open = simple_open,
+ .read = debugfs_reg_dump_read,
+};
+
+static int dsi_ctrl_debugfs_init(struct dsi_ctrl *dsi_ctrl,
+ struct dentry *parent)
+{
+ int rc = 0;
+ struct dentry *dir, *state_file, *reg_dump;
+
+ dir = debugfs_create_dir(dsi_ctrl->name, parent);
+ if (IS_ERR_OR_NULL(dir)) {
+ rc = PTR_ERR(dir);
+ pr_err("[DSI_%d] debugfs create dir failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ state_file = debugfs_create_file("state_info",
+ 0444,
+ dir,
+ dsi_ctrl,
+ &state_info_fops);
+ if (IS_ERR_OR_NULL(state_file)) {
+ rc = PTR_ERR(state_file);
+ pr_err("[DSI_%d] state file failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error_remove_dir;
+ }
+
+ reg_dump = debugfs_create_file("reg_dump",
+ 0444,
+ dir,
+ dsi_ctrl,
+ &reg_dump_fops);
+ if (IS_ERR_OR_NULL(reg_dump)) {
+ rc = PTR_ERR(reg_dump);
+ pr_err("[DSI_%d] reg dump file failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error_remove_dir;
+ }
+
+ dsi_ctrl->debugfs_root = dir;
+error_remove_dir:
+ debugfs_remove(dir);
+error:
+ return rc;
+}
+
+static int dsi_ctrl_debugfs_deinit(struct dsi_ctrl *dsi_ctrl)
+{
+ debugfs_remove(dsi_ctrl->debugfs_root);
+ return 0;
+}
+
+static int dsi_ctrl_check_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_ctrl_driver_ops op,
+ u32 op_state)
+{
+ int rc = 0;
+ struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
+
+ switch (op) {
+ case DSI_CTRL_OP_POWER_STATE_CHANGE:
+ if (state->power_state == op_state) {
+ pr_debug("[%d] No change in state, pwr_state=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if (state->power_state == DSI_CTRL_POWER_LINK_CLK_ON) {
+ if ((state->cmd_engine_state == DSI_CTRL_ENGINE_ON) ||
+ (state->vid_engine_state == DSI_CTRL_ENGINE_ON) ||
+ (state->controller_state == DSI_CTRL_ENGINE_ON)) {
+ pr_debug("[%d]State error: op=%d: %d, %d, %d\n",
+ dsi_ctrl->index,
+ op_state,
+ state->cmd_engine_state,
+ state->vid_engine_state,
+ state->controller_state);
+ rc = -EINVAL;
+ }
+ }
+ break;
+ case DSI_CTRL_OP_CMD_ENGINE:
+ if (state->cmd_engine_state == op_state) {
+ pr_debug("[%d] No change in state, cmd_state=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+ (state->controller_state != DSI_CTRL_ENGINE_ON)) {
+ pr_debug("[%d]State error: op=%d: %d, %d\n",
+ dsi_ctrl->index,
+ op,
+ state->power_state,
+ state->controller_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_VID_ENGINE:
+ if (state->vid_engine_state == op_state) {
+ pr_debug("[%d] No change in state, cmd_state=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+ (state->controller_state != DSI_CTRL_ENGINE_ON)) {
+ pr_debug("[%d]State error: op=%d: %d, %d\n",
+ dsi_ctrl->index,
+ op,
+ state->power_state,
+ state->controller_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_HOST_ENGINE:
+ if (state->controller_state == op_state) {
+ pr_debug("[%d] No change in state, ctrl_state=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if (state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) {
+ pr_debug("[%d]State error (link is off): op=%d:, %d\n",
+ dsi_ctrl->index,
+ op_state,
+ state->power_state);
+ rc = -EINVAL;
+ } else if ((op_state == DSI_CTRL_ENGINE_OFF) &&
+ ((state->cmd_engine_state != DSI_CTRL_ENGINE_OFF) ||
+ (state->vid_engine_state != DSI_CTRL_ENGINE_OFF))) {
+ pr_debug("[%d]State error (eng on): op=%d: %d, %d\n",
+ dsi_ctrl->index,
+ op_state,
+ state->cmd_engine_state,
+ state->vid_engine_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_CMD_TX:
+ if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+ (state->host_initialized != true) ||
+ (state->cmd_engine_state != DSI_CTRL_ENGINE_ON)) {
+ pr_debug("[%d]State error: op=%d: %d, %d, %d\n",
+ dsi_ctrl->index,
+ op,
+ state->power_state,
+ state->host_initialized,
+ state->cmd_engine_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_HOST_INIT:
+ if (state->host_initialized == op_state) {
+ pr_debug("[%d] No change in state, host_init=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if (state->power_state != DSI_CTRL_POWER_CORE_CLK_ON) {
+ pr_debug("[%d]State error: op=%d: %d\n",
+ dsi_ctrl->index, op, state->power_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_ULPS_TOGGLE:
+ if (state->ulps_enabled == op_state) {
+ pr_debug("[%d] No change in state, ulps_enabled=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+ (state->controller_state != DSI_CTRL_ENGINE_ON)) {
+ pr_debug("[%d]State error: op=%d: %d, %d\n",
+ dsi_ctrl->index,
+ op,
+ state->power_state,
+ state->controller_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_CLAMP_TOGGLE:
+ if (state->clamp_enabled == op_state) {
+ pr_debug("[%d] No change in state, clamp_enabled=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+ (state->controller_state != DSI_CTRL_ENGINE_ON)) {
+ pr_debug("[%d]State error: op=%d: %d, %d\n",
+ dsi_ctrl->index,
+ op,
+ state->power_state,
+ state->controller_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_SET_CLK_SOURCE:
+ if (state->power_state == DSI_CTRL_POWER_LINK_CLK_ON) {
+ pr_debug("[%d] State error: op=%d: %d\n",
+ dsi_ctrl->index,
+ op,
+ state->power_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_TPG:
+ if (state->tpg_enabled == op_state) {
+ pr_debug("[%d] No change in state, tpg_enabled=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ } else if ((state->power_state != DSI_CTRL_POWER_LINK_CLK_ON) ||
+ (state->controller_state != DSI_CTRL_ENGINE_ON)) {
+ pr_debug("[%d]State error: op=%d: %d, %d\n",
+ dsi_ctrl->index,
+ op,
+ state->power_state,
+ state->controller_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_PHY_SW_RESET:
+ if (state->power_state != DSI_CTRL_POWER_CORE_CLK_ON) {
+ pr_debug("[%d]State error: op=%d: %d\n",
+ dsi_ctrl->index, op, state->power_state);
+ rc = -EINVAL;
+ }
+ break;
+ case DSI_CTRL_OP_ASYNC_TIMING:
+ if (state->vid_engine_state != op_state) {
+ pr_err("[%d] Unexpected engine state vid_state=%d\n",
+ dsi_ctrl->index, op_state);
+ rc = -EINVAL;
+ }
+ break;
+ default:
+ rc = -ENOTSUPP;
+ break;
+ }
+
+ return rc;
+}
+
+static void dsi_ctrl_update_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_ctrl_driver_ops op,
+ u32 op_state)
+{
+ struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
+
+ switch (op) {
+ case DSI_CTRL_OP_POWER_STATE_CHANGE:
+ state->power_state = op_state;
+ if (op_state == DSI_CTRL_POWER_OFF) {
+ state->pwr_enabled = false;
+ state->core_clk_enabled = false;
+ state->link_clk_enabled = false;
+ } else if (op_state == DSI_CTRL_POWER_VREG_ON) {
+ state->pwr_enabled = true;
+ state->core_clk_enabled = false;
+ state->link_clk_enabled = false;
+ } else if (op_state == DSI_CTRL_POWER_CORE_CLK_ON) {
+ state->pwr_enabled = true;
+ state->core_clk_enabled = true;
+ state->link_clk_enabled = false;
+ } else if (op_state == DSI_CTRL_POWER_LINK_CLK_ON) {
+ state->pwr_enabled = true;
+ state->core_clk_enabled = true;
+ state->link_clk_enabled = true;
+ }
+ break;
+ case DSI_CTRL_OP_CMD_ENGINE:
+ state->cmd_engine_state = op_state;
+ break;
+ case DSI_CTRL_OP_VID_ENGINE:
+ state->vid_engine_state = op_state;
+ break;
+ case DSI_CTRL_OP_HOST_ENGINE:
+ state->controller_state = op_state;
+ break;
+ case DSI_CTRL_OP_ULPS_TOGGLE:
+ state->ulps_enabled = (op_state == 1) ? true : false;
+ break;
+ case DSI_CTRL_OP_CLAMP_TOGGLE:
+ state->clamp_enabled = (op_state == 1) ? true : false;
+ break;
+ case DSI_CTRL_OP_SET_CLK_SOURCE:
+ state->clk_source_set = (op_state == 1) ? true : false;
+ break;
+ case DSI_CTRL_OP_HOST_INIT:
+ state->host_initialized = (op_state == 1) ? true : false;
+ break;
+ case DSI_CTRL_OP_TPG:
+ state->tpg_enabled = (op_state == 1) ? true : false;
+ break;
+ case DSI_CTRL_OP_CMD_TX:
+ case DSI_CTRL_OP_PHY_SW_RESET:
+ default:
+ break;
+ }
+}
+
+static int dsi_ctrl_init_regmap(struct platform_device *pdev,
+ struct dsi_ctrl *ctrl)
+{
+ int rc = 0;
+ void __iomem *ptr;
+
+ ptr = msm_ioremap(pdev, "dsi_ctrl", ctrl->name);
+ if (IS_ERR(ptr)) {
+ rc = PTR_ERR(ptr);
+ return rc;
+ }
+
+ ctrl->hw.base = ptr;
+ pr_debug("[%s] map dsi_ctrl registers to %pK\n", ctrl->name,
+ ctrl->hw.base);
+
+ ptr = msm_ioremap(pdev, "mmss_misc", ctrl->name);
+ if (IS_ERR(ptr)) {
+ rc = PTR_ERR(ptr);
+ return rc;
+ }
+
+ ctrl->hw.mmss_misc_base = ptr;
+ pr_debug("[%s] map mmss_misc registers to %p\n", ctrl->name,
+ ctrl->hw.mmss_misc_base);
+ return rc;
+}
+
+static int dsi_ctrl_clocks_deinit(struct dsi_ctrl *ctrl)
+{
+ struct dsi_core_clk_info *core = &ctrl->clk_info.core_clks;
+ struct dsi_link_clk_info *link = &ctrl->clk_info.link_clks;
+ struct dsi_clk_link_set *rcg = &ctrl->clk_info.rcg_clks;
+
+ if (core->mdp_core_clk)
+ devm_clk_put(&ctrl->pdev->dev, core->mdp_core_clk);
+ if (core->iface_clk)
+ devm_clk_put(&ctrl->pdev->dev, core->iface_clk);
+ if (core->core_mmss_clk)
+ devm_clk_put(&ctrl->pdev->dev, core->core_mmss_clk);
+ if (core->bus_clk)
+ devm_clk_put(&ctrl->pdev->dev, core->bus_clk);
+
+ memset(core, 0x0, sizeof(*core));
+
+ if (link->byte_clk)
+ devm_clk_put(&ctrl->pdev->dev, link->byte_clk);
+ if (link->pixel_clk)
+ devm_clk_put(&ctrl->pdev->dev, link->pixel_clk);
+ if (link->esc_clk)
+ devm_clk_put(&ctrl->pdev->dev, link->esc_clk);
+
+ memset(link, 0x0, sizeof(*link));
+
+ if (rcg->byte_clk)
+ devm_clk_put(&ctrl->pdev->dev, rcg->byte_clk);
+ if (rcg->pixel_clk)
+ devm_clk_put(&ctrl->pdev->dev, rcg->pixel_clk);
+
+ memset(rcg, 0x0, sizeof(*rcg));
+
+ return 0;
+}
+
+static int dsi_ctrl_clocks_init(struct platform_device *pdev,
+ struct dsi_ctrl *ctrl)
+{
+ int rc = 0;
+ struct dsi_core_clk_info *core = &ctrl->clk_info.core_clks;
+ struct dsi_link_clk_info *link = &ctrl->clk_info.link_clks;
+ struct dsi_clk_link_set *rcg = &ctrl->clk_info.rcg_clks;
+
+ core->mdp_core_clk = devm_clk_get(&pdev->dev, "mdp_core_clk");
+ if (IS_ERR(core->mdp_core_clk)) {
+ rc = PTR_ERR(core->mdp_core_clk);
+ pr_err("failed to get mdp_core_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ core->iface_clk = devm_clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(core->iface_clk)) {
+ rc = PTR_ERR(core->iface_clk);
+ pr_err("failed to get iface_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ core->core_mmss_clk = devm_clk_get(&pdev->dev, "core_mmss_clk");
+ if (IS_ERR(core->core_mmss_clk)) {
+ rc = PTR_ERR(core->core_mmss_clk);
+ pr_err("failed to get core_mmss_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ core->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ if (IS_ERR(core->bus_clk)) {
+ rc = PTR_ERR(core->bus_clk);
+ pr_err("failed to get bus_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ link->byte_clk = devm_clk_get(&pdev->dev, "byte_clk");
+ if (IS_ERR(link->byte_clk)) {
+ rc = PTR_ERR(link->byte_clk);
+ pr_err("failed to get byte_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ link->pixel_clk = devm_clk_get(&pdev->dev, "pixel_clk");
+ if (IS_ERR(link->pixel_clk)) {
+ rc = PTR_ERR(link->pixel_clk);
+ pr_err("failed to get pixel_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ link->esc_clk = devm_clk_get(&pdev->dev, "core_clk");
+ if (IS_ERR(link->esc_clk)) {
+ rc = PTR_ERR(link->esc_clk);
+ pr_err("failed to get esc_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rcg->byte_clk = devm_clk_get(&pdev->dev, "byte_clk_rcg");
+ if (IS_ERR(rcg->byte_clk)) {
+ rc = PTR_ERR(rcg->byte_clk);
+ pr_err("failed to get byte_clk_rcg, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rcg->pixel_clk = devm_clk_get(&pdev->dev, "pixel_clk_rcg");
+ if (IS_ERR(rcg->pixel_clk)) {
+ rc = PTR_ERR(rcg->pixel_clk);
+ pr_err("failed to get pixel_clk_rcg, rc=%d\n", rc);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ dsi_ctrl_clocks_deinit(ctrl);
+ return rc;
+}
+
+static int dsi_ctrl_supplies_deinit(struct dsi_ctrl *ctrl)
+{
+ int i = 0;
+ int rc = 0;
+ struct dsi_regulator_info *regs;
+
+ regs = &ctrl->pwr_info.digital;
+ for (i = 0; i < regs->count; i++) {
+ if (!regs->vregs[i].vreg)
+ pr_err("vreg is NULL, should not reach here\n");
+ else
+ devm_regulator_put(regs->vregs[i].vreg);
+ }
+
+ regs = &ctrl->pwr_info.host_pwr;
+ for (i = 0; i < regs->count; i++) {
+ if (!regs->vregs[i].vreg)
+ pr_err("vreg is NULL, should not reach here\n");
+ else
+ devm_regulator_put(regs->vregs[i].vreg);
+ }
+
+ if (!ctrl->pwr_info.host_pwr.vregs) {
+ devm_kfree(&ctrl->pdev->dev, ctrl->pwr_info.host_pwr.vregs);
+ ctrl->pwr_info.host_pwr.vregs = NULL;
+ ctrl->pwr_info.host_pwr.count = 0;
+ }
+
+ if (!ctrl->pwr_info.digital.vregs) {
+ devm_kfree(&ctrl->pdev->dev, ctrl->pwr_info.digital.vregs);
+ ctrl->pwr_info.digital.vregs = NULL;
+ ctrl->pwr_info.digital.count = 0;
+ }
+
+ return rc;
+}
+
+static int dsi_ctrl_supplies_init(struct platform_device *pdev,
+ struct dsi_ctrl *ctrl)
+{
+ int rc = 0;
+ int i = 0;
+ struct dsi_regulator_info *regs;
+ struct regulator *vreg = NULL;
+
+ rc = dsi_clk_pwr_get_dt_vreg_data(&pdev->dev,
+ &ctrl->pwr_info.digital,
+ "qcom,core-supply-entries");
+ if (rc) {
+ pr_err("failed to get digital supply, rc = %d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_clk_pwr_get_dt_vreg_data(&pdev->dev,
+ &ctrl->pwr_info.host_pwr,
+ "qcom,ctrl-supply-entries");
+ if (rc) {
+ pr_err("failed to get host power supplies, rc = %d\n", rc);
+ goto error_digital;
+ }
+
+ regs = &ctrl->pwr_info.digital;
+ for (i = 0; i < regs->count; i++) {
+ vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
+ if (IS_ERR(vreg)) {
+ pr_err("failed to get %s regulator\n",
+ regs->vregs[i].vreg_name);
+ rc = PTR_ERR(vreg);
+ goto error_host_pwr;
+ }
+ regs->vregs[i].vreg = vreg;
+ }
+
+ regs = &ctrl->pwr_info.host_pwr;
+ for (i = 0; i < regs->count; i++) {
+ vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
+ if (IS_ERR(vreg)) {
+ pr_err("failed to get %s regulator\n",
+ regs->vregs[i].vreg_name);
+ for (--i; i >= 0; i--)
+ devm_regulator_put(regs->vregs[i].vreg);
+ rc = PTR_ERR(vreg);
+ goto error_digital_put;
+ }
+ regs->vregs[i].vreg = vreg;
+ }
+
+ return rc;
+
+error_digital_put:
+ regs = &ctrl->pwr_info.digital;
+ for (i = 0; i < regs->count; i++)
+ devm_regulator_put(regs->vregs[i].vreg);
+error_host_pwr:
+ devm_kfree(&pdev->dev, ctrl->pwr_info.host_pwr.vregs);
+ ctrl->pwr_info.host_pwr.vregs = NULL;
+ ctrl->pwr_info.host_pwr.count = 0;
+error_digital:
+ devm_kfree(&pdev->dev, ctrl->pwr_info.digital.vregs);
+ ctrl->pwr_info.digital.vregs = NULL;
+ ctrl->pwr_info.digital.count = 0;
+error:
+ return rc;
+}
+
+static int dsi_ctrl_axi_bus_client_init(struct platform_device *pdev,
+ struct dsi_ctrl *ctrl)
+{
+ int rc = 0;
+ struct dsi_ctrl_bus_scale_info *bus = &ctrl->axi_bus_info;
+
+ bus->bus_scale_table = msm_bus_cl_get_pdata(pdev);
+ if (IS_ERR_OR_NULL(bus->bus_scale_table)) {
+ rc = PTR_ERR(bus->bus_scale_table);
+ pr_err("msm_bus_cl_get_pdata() failed, rc = %d\n", rc);
+ bus->bus_scale_table = NULL;
+ return rc;
+ }
+
+ bus->bus_handle = msm_bus_scale_register_client(bus->bus_scale_table);
+ if (!bus->bus_handle) {
+ rc = -EINVAL;
+ pr_err("failed to register axi bus client\n");
+ }
+
+ return rc;
+}
+
+static int dsi_ctrl_axi_bus_client_deinit(struct dsi_ctrl *ctrl)
+{
+ struct dsi_ctrl_bus_scale_info *bus = &ctrl->axi_bus_info;
+
+ if (bus->bus_handle) {
+ msm_bus_scale_unregister_client(bus->bus_handle);
+
+ bus->bus_handle = 0;
+ }
+
+ return 0;
+}
+
+static int dsi_ctrl_validate_panel_info(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_host_config *config)
+{
+ int rc = 0;
+ struct dsi_host_common_cfg *host_cfg = &config->common_config;
+
+ if (config->panel_mode >= DSI_OP_MODE_MAX) {
+ pr_err("Invalid dsi operation mode (%d)\n", config->panel_mode);
+ rc = -EINVAL;
+ goto err;
+ }
+
+ if ((host_cfg->data_lanes & (DSI_CLOCK_LANE - 1)) == 0) {
+ pr_err("No data lanes are enabled\n");
+ rc = -EINVAL;
+ goto err;
+ }
+err:
+ return rc;
+}
+
+static int dsi_ctrl_update_link_freqs(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_host_config *config)
+{
+ int rc = 0;
+ u32 num_of_lanes = 0;
+ u32 bpp = 3;
+ u64 h_period, v_period, bit_rate, pclk_rate, bit_rate_per_lane,
+ byte_clk_rate;
+ struct dsi_host_common_cfg *host_cfg = &config->common_config;
+ struct dsi_mode_info *timing = &config->video_timing;
+
+ if (host_cfg->data_lanes & DSI_DATA_LANE_0)
+ num_of_lanes++;
+ if (host_cfg->data_lanes & DSI_DATA_LANE_1)
+ num_of_lanes++;
+ if (host_cfg->data_lanes & DSI_DATA_LANE_2)
+ num_of_lanes++;
+ if (host_cfg->data_lanes & DSI_DATA_LANE_3)
+ num_of_lanes++;
+
+ h_period = DSI_H_TOTAL(timing);
+ v_period = DSI_V_TOTAL(timing);
+
+ bit_rate = h_period * v_period * timing->refresh_rate * bpp * 8;
+ bit_rate_per_lane = bit_rate;
+ do_div(bit_rate_per_lane, num_of_lanes);
+ pclk_rate = bit_rate;
+ do_div(pclk_rate, (8 * bpp));
+ byte_clk_rate = bit_rate_per_lane;
+ do_div(byte_clk_rate, 8);
+ pr_debug("bit_clk_rate = %llu, bit_clk_rate_per_lane = %llu\n",
+ bit_rate, bit_rate_per_lane);
+ pr_debug("byte_clk_rate = %llu, pclk_rate = %llu\n",
+ byte_clk_rate, pclk_rate);
+
+ rc = dsi_clk_set_link_frequencies(&dsi_ctrl->clk_info.link_clks,
+ pclk_rate,
+ byte_clk_rate,
+ config->esc_clk_rate_hz);
+ if (rc)
+ pr_err("Failed to update link frequencies\n");
+
+ return rc;
+}
+
+static int dsi_ctrl_enable_supplies(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+ int rc = 0;
+
+ if (enable) {
+ rc = dsi_pwr_enable_regulator(&dsi_ctrl->pwr_info.host_pwr,
+ true);
+ if (rc) {
+ pr_err("failed to enable host power regs, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_pwr_enable_regulator(&dsi_ctrl->pwr_info.digital,
+ true);
+ if (rc) {
+ pr_err("failed to enable gdsc, rc=%d\n", rc);
+ (void)dsi_pwr_enable_regulator(
+ &dsi_ctrl->pwr_info.host_pwr,
+ false
+ );
+ goto error;
+ }
+ } else {
+ rc = dsi_pwr_enable_regulator(&dsi_ctrl->pwr_info.digital,
+ false);
+ if (rc) {
+ pr_err("failed to disable gdsc, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_pwr_enable_regulator(&dsi_ctrl->pwr_info.host_pwr,
+ false);
+ if (rc) {
+ pr_err("failed to disable host power regs, rc=%d\n",
+ rc);
+ goto error;
+ }
+ }
+error:
+ return rc;
+}
+
+static int dsi_ctrl_vote_for_bandwidth(struct dsi_ctrl *dsi_ctrl, bool on)
+{
+ int rc = 0;
+ bool changed = false;
+ struct dsi_ctrl_bus_scale_info *axi_bus = &dsi_ctrl->axi_bus_info;
+
+ if (on) {
+ if (axi_bus->refcount == 0)
+ changed = true;
+
+ axi_bus->refcount++;
+ } else {
+ if (axi_bus->refcount != 0) {
+ axi_bus->refcount--;
+
+ if (axi_bus->refcount == 0)
+ changed = true;
+ } else {
+ pr_err("bus bw votes are not balanced\n");
+ }
+ }
+
+ if (changed) {
+ rc = msm_bus_scale_client_update_request(axi_bus->bus_handle,
+ on ? 1 : 0);
+ if (rc)
+ pr_err("bus scale client update failed, rc=%d\n", rc);
+ }
+
+ return rc;
+}
+
+static int dsi_ctrl_copy_and_pad_cmd(struct dsi_ctrl *dsi_ctrl,
+ const struct mipi_dsi_packet *packet,
+ u8 **buffer,
+ u32 *size)
+{
+ int rc = 0;
+ u8 *buf = NULL;
+ u32 len, i;
+
+ len = packet->size;
+ len += 0x3; len &= ~0x03; /* Align to 32 bits */
+
+ buf = devm_kzalloc(&dsi_ctrl->pdev->dev, len * sizeof(u8), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ for (i = 0; i < len; i++) {
+ if (i >= packet->size)
+ buf[i] = 0xFF;
+ else if (i < sizeof(packet->header))
+ buf[i] = packet->header[i];
+ else
+ buf[i] = packet->payload[i - sizeof(packet->header)];
+ }
+
+ if (packet->payload_length > 0)
+ buf[3] |= BIT(6);
+
+ buf[3] |= BIT(7);
+ *buffer = buf;
+ *size = len;
+
+ return rc;
+}
+
+static int dsi_message_tx(struct dsi_ctrl *dsi_ctrl,
+ const struct mipi_dsi_msg *msg,
+ u32 flags)
+{
+ int rc = 0;
+ struct mipi_dsi_packet packet;
+ struct dsi_ctrl_cmd_dma_fifo_info cmd;
+ u32 hw_flags = 0;
+ u32 length = 0;
+ u8 *buffer = NULL;
+
+ if (!(flags & DSI_CTRL_CMD_FIFO_STORE)) {
+ pr_err("Memory DMA is not supported, use FIFO\n");
+ goto error;
+ }
+
+ rc = mipi_dsi_create_packet(&packet, msg);
+ if (rc) {
+ pr_err("Failed to create message packet, rc=%d\n", rc);
+ goto error;
+ }
+
+ if (flags & DSI_CTRL_CMD_FIFO_STORE) {
+ rc = dsi_ctrl_copy_and_pad_cmd(dsi_ctrl,
+ &packet,
+ &buffer,
+ &length);
+ if (rc) {
+ pr_err("[%s] failed to copy message, rc=%d\n",
+ dsi_ctrl->name, rc);
+ goto error;
+ }
+ cmd.command = (u32 *)buffer;
+ cmd.size = length;
+ cmd.en_broadcast = (flags & DSI_CTRL_CMD_BROADCAST) ?
+ true : false;
+ cmd.is_master = (flags & DSI_CTRL_CMD_BROADCAST_MASTER) ?
+ true : false;
+ cmd.use_lpm = (msg->flags & MIPI_DSI_MSG_USE_LPM) ?
+ true : false;
+ }
+
+ hw_flags |= (flags & DSI_CTRL_CMD_DEFER_TRIGGER) ?
+ DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER : 0;
+
+ if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER))
+ reinit_completion(&dsi_ctrl->int_info.cmd_dma_done);
+
+ if (flags & DSI_CTRL_CMD_FIFO_STORE)
+ dsi_ctrl->hw.ops.kickoff_fifo_command(&dsi_ctrl->hw,
+ &cmd,
+ hw_flags);
+
+ if (!(flags & DSI_CTRL_CMD_DEFER_TRIGGER)) {
+ u32 retry = 10;
+ u32 status = 0;
+ u64 error = 0;
+ u32 mask = (DSI_CMD_MODE_DMA_DONE);
+
+ while ((status == 0) && (retry > 0)) {
+ udelay(1000);
+ status = dsi_ctrl->hw.ops.get_interrupt_status(
+ &dsi_ctrl->hw);
+ error = dsi_ctrl->hw.ops.get_error_status(
+ &dsi_ctrl->hw);
+ status &= mask;
+ retry--;
+ dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
+ status);
+ dsi_ctrl->hw.ops.clear_error_status(&dsi_ctrl->hw,
+ error);
+ }
+ pr_debug("INT STATUS = %x, retry = %d\n", status, retry);
+ if (retry == 0)
+ pr_err("[DSI_%d]Command transfer failed\n",
+ dsi_ctrl->index);
+
+ dsi_ctrl->hw.ops.reset_cmd_fifo(&dsi_ctrl->hw);
+ }
+error:
+ if (buffer)
+ devm_kfree(&dsi_ctrl->pdev->dev, buffer);
+ return rc;
+}
+
+static int dsi_set_max_return_size(struct dsi_ctrl *dsi_ctrl,
+ const struct mipi_dsi_msg *rx_msg,
+ u32 size)
+{
+ int rc = 0;
+ u8 tx[2] = { (u8)(size & 0xFF), (u8)(size >> 8) };
+ struct mipi_dsi_msg msg = {
+ .channel = rx_msg->channel,
+ .type = MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE,
+ .tx_len = 2,
+ .tx_buf = tx,
+ };
+
+ rc = dsi_message_tx(dsi_ctrl, &msg, 0x0);
+ if (rc)
+ pr_err("failed to send max return size packet, rc=%d\n", rc);
+
+ return rc;
+}
+
+static int dsi_message_rx(struct dsi_ctrl *dsi_ctrl,
+ const struct mipi_dsi_msg *msg,
+ u32 flags)
+{
+ int rc = 0;
+ u32 rd_pkt_size;
+ u32 total_read_len;
+ u32 bytes_read = 0, tot_bytes_read = 0;
+ u32 current_read_len;
+ bool short_resp = false;
+ bool read_done = false;
+
+ if (msg->rx_len <= 2) {
+ short_resp = true;
+ rd_pkt_size = msg->rx_len;
+ total_read_len = 4;
+ } else {
+ short_resp = false;
+ current_read_len = 10;
+ if (msg->rx_len < current_read_len)
+ rd_pkt_size = msg->rx_len;
+ else
+ rd_pkt_size = current_read_len;
+
+ total_read_len = current_read_len + 6;
+ }
+
+ while (!read_done) {
+ rc = dsi_set_max_return_size(dsi_ctrl, msg, rd_pkt_size);
+ if (rc) {
+ pr_err("Failed to set max return packet size, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = dsi_message_tx(dsi_ctrl, msg, flags);
+ if (rc) {
+ pr_err("Message transmission failed, rc=%d\n", rc);
+ goto error;
+ }
+
+
+ tot_bytes_read += bytes_read;
+ if (short_resp)
+ read_done = true;
+ else if (msg->rx_len <= tot_bytes_read)
+ read_done = true;
+ }
+error:
+ return rc;
+}
+
+
+static int dsi_enable_ulps(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+ u32 lanes = 0;
+ u32 ulps_lanes;
+
+ if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE)
+ lanes = dsi_ctrl->host_config.common_config.data_lanes;
+
+ lanes |= DSI_CLOCK_LANE;
+ dsi_ctrl->hw.ops.ulps_request(&dsi_ctrl->hw, lanes);
+
+ ulps_lanes = dsi_ctrl->hw.ops.get_lanes_in_ulps(&dsi_ctrl->hw);
+
+ if ((lanes & ulps_lanes) != lanes) {
+ pr_err("Failed to enter ULPS, request=0x%x, actual=0x%x\n",
+ lanes, ulps_lanes);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static int dsi_disable_ulps(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+ u32 ulps_lanes, lanes = 0;
+
+ if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE)
+ lanes = dsi_ctrl->host_config.common_config.data_lanes;
+
+ lanes |= DSI_CLOCK_LANE;
+ ulps_lanes = dsi_ctrl->hw.ops.get_lanes_in_ulps(&dsi_ctrl->hw);
+
+ if ((lanes & ulps_lanes) != lanes)
+ pr_err("Mismatch between lanes in ULPS\n");
+
+ lanes &= ulps_lanes;
+
+ dsi_ctrl->hw.ops.ulps_exit(&dsi_ctrl->hw, lanes);
+
+ /* 1 ms delay is recommended by specification */
+ udelay(1000);
+
+ dsi_ctrl->hw.ops.clear_ulps_request(&dsi_ctrl->hw, lanes);
+
+ ulps_lanes = dsi_ctrl->hw.ops.get_lanes_in_ulps(&dsi_ctrl->hw);
+ if (ulps_lanes & lanes) {
+ pr_err("Lanes (0x%x) stuck in ULPS\n", ulps_lanes);
+ rc = -EIO;
+ }
+
+ return rc;
+}
+
+static int dsi_ctrl_drv_state_init(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+ bool splash_enabled = false;
+ struct dsi_ctrl_state_info *state = &dsi_ctrl->current_state;
+
+ if (!splash_enabled) {
+ state->power_state = DSI_CTRL_POWER_OFF;
+ state->cmd_engine_state = DSI_CTRL_ENGINE_OFF;
+ state->vid_engine_state = DSI_CTRL_ENGINE_OFF;
+ state->pwr_enabled = false;
+ state->core_clk_enabled = false;
+ state->link_clk_enabled = false;
+ state->ulps_enabled = false;
+ state->clamp_enabled = false;
+ state->clk_source_set = false;
+ }
+
+ return rc;
+}
+
+int dsi_ctrl_intr_deinit(struct dsi_ctrl *dsi_ctrl)
+{
+ struct dsi_ctrl_interrupts *ints = &dsi_ctrl->int_info;
+
+ devm_free_irq(&dsi_ctrl->pdev->dev, ints->irq, dsi_ctrl);
+
+ return 0;
+}
+
+static int dsi_ctrl_buffer_deinit(struct dsi_ctrl *dsi_ctrl)
+{
+ if (dsi_ctrl->tx_cmd_buf) {
+ msm_gem_put_iova(dsi_ctrl->tx_cmd_buf, 0);
+
+ msm_gem_free_object(dsi_ctrl->tx_cmd_buf);
+ dsi_ctrl->tx_cmd_buf = NULL;
+ }
+
+ return 0;
+}
+
+int dsi_ctrl_buffer_init(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+ u64 iova = 0;
+
+ dsi_ctrl->tx_cmd_buf = msm_gem_new(dsi_ctrl->drm_dev,
+ SZ_4K,
+ MSM_BO_UNCACHED);
+
+ if (IS_ERR(dsi_ctrl->tx_cmd_buf)) {
+ rc = PTR_ERR(dsi_ctrl->tx_cmd_buf);
+ pr_err("failed to allocate gem, rc=%d\n", rc);
+ dsi_ctrl->tx_cmd_buf = NULL;
+ goto error;
+ }
+
+ dsi_ctrl->cmd_buffer_size = SZ_4K;
+
+ rc = msm_gem_get_iova(dsi_ctrl->tx_cmd_buf, 0, &iova);
+ if (rc) {
+ pr_err("failed to get iova, rc=%d\n", rc);
+ (void)dsi_ctrl_buffer_deinit(dsi_ctrl);
+ goto error;
+ }
+
+ if (iova & 0x07) {
+ pr_err("Tx command buffer is not 8 byte aligned\n");
+ rc = -ENOTSUPP;
+ (void)dsi_ctrl_buffer_deinit(dsi_ctrl);
+ goto error;
+ }
+error:
+ return rc;
+}
+
+static int dsi_enable_io_clamp(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+ bool en_ulps = dsi_ctrl->current_state.ulps_enabled;
+ u32 lanes = 0;
+
+ if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE)
+ lanes = dsi_ctrl->host_config.common_config.data_lanes;
+
+ lanes |= DSI_CLOCK_LANE;
+
+ if (enable)
+ dsi_ctrl->hw.ops.clamp_enable(&dsi_ctrl->hw, lanes, en_ulps);
+ else
+ dsi_ctrl->hw.ops.clamp_disable(&dsi_ctrl->hw, lanes, en_ulps);
+
+ return 0;
+}
+
+static int dsi_ctrl_dev_probe(struct platform_device *pdev)
+{
+ struct dsi_ctrl *dsi_ctrl;
+ struct dsi_ctrl_list_item *item;
+ const struct of_device_id *id;
+ enum dsi_ctrl_version version;
+ u32 index = 0;
+ int rc = 0;
+
+ id = of_match_node(msm_dsi_of_match, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ version = *(enum dsi_ctrl_version *)id->data;
+
+ item = devm_kzalloc(&pdev->dev, sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+ dsi_ctrl = devm_kzalloc(&pdev->dev, sizeof(*dsi_ctrl), GFP_KERNEL);
+ if (!dsi_ctrl)
+ return -ENOMEM;
+
+ rc = of_property_read_u32(pdev->dev.of_node, "cell-index", &index);
+ if (rc) {
+ pr_debug("cell index not set, default to 0\n");
+ index = 0;
+ }
+
+ dsi_ctrl->index = index;
+
+ dsi_ctrl->name = of_get_property(pdev->dev.of_node, "label", NULL);
+ if (!dsi_ctrl->name)
+ dsi_ctrl->name = DSI_CTRL_DEFAULT_LABEL;
+
+ rc = dsi_ctrl_init_regmap(pdev, dsi_ctrl);
+ if (rc) {
+ pr_err("Failed to parse register information, rc = %d\n", rc);
+ goto fail;
+ }
+
+ rc = dsi_ctrl_clocks_init(pdev, dsi_ctrl);
+ if (rc) {
+ pr_err("Failed to parse clock information, rc = %d\n", rc);
+ goto fail;
+ }
+
+ rc = dsi_ctrl_supplies_init(pdev, dsi_ctrl);
+ if (rc) {
+ pr_err("Failed to parse voltage supplies, rc = %d\n", rc);
+ goto fail_clks;
+ }
+
+ dsi_ctrl->version = version;
+ rc = dsi_catalog_ctrl_setup(&dsi_ctrl->hw, dsi_ctrl->version,
+ dsi_ctrl->index);
+ if (rc) {
+ pr_err("Catalog does not support version (%d)\n",
+ dsi_ctrl->version);
+ goto fail_supplies;
+ }
+
+ rc = dsi_ctrl_axi_bus_client_init(pdev, dsi_ctrl);
+ if (rc)
+ pr_err("failed to init axi bus client, rc = %d\n", rc);
+
+ item->ctrl = dsi_ctrl;
+
+ mutex_lock(&dsi_ctrl_list_lock);
+ list_add(&item->list, &dsi_ctrl_list);
+ mutex_unlock(&dsi_ctrl_list_lock);
+
+ mutex_init(&dsi_ctrl->ctrl_lock);
+
+ dsi_ctrl->pdev = pdev;
+ platform_set_drvdata(pdev, dsi_ctrl);
+
+ pr_debug("Probe successful for %s\n", dsi_ctrl->name);
+
+ return 0;
+
+fail_supplies:
+ (void)dsi_ctrl_supplies_deinit(dsi_ctrl);
+fail_clks:
+ (void)dsi_ctrl_clocks_deinit(dsi_ctrl);
+fail:
+ return rc;
+}
+
+static int dsi_ctrl_dev_remove(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct dsi_ctrl *dsi_ctrl;
+ struct list_head *pos, *tmp;
+
+ dsi_ctrl = platform_get_drvdata(pdev);
+
+ mutex_lock(&dsi_ctrl_list_lock);
+ list_for_each_safe(pos, tmp, &dsi_ctrl_list) {
+ struct dsi_ctrl_list_item *n = list_entry(pos,
+ struct dsi_ctrl_list_item,
+ list);
+ if (n->ctrl == dsi_ctrl) {
+ list_del(&n->list);
+ break;
+ }
+ }
+ mutex_unlock(&dsi_ctrl_list_lock);
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ rc = dsi_ctrl_axi_bus_client_deinit(dsi_ctrl);
+ if (rc)
+ pr_err("failed to deinitialize axi bus client, rc = %d\n", rc);
+
+ rc = dsi_ctrl_supplies_deinit(dsi_ctrl);
+ if (rc)
+ pr_err("failed to deinitialize voltage supplies, rc=%d\n", rc);
+
+ rc = dsi_ctrl_clocks_deinit(dsi_ctrl);
+ if (rc)
+ pr_err("failed to deinitialize clocks, rc=%d\n", rc);
+
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+ mutex_destroy(&dsi_ctrl->ctrl_lock);
+ devm_kfree(&pdev->dev, dsi_ctrl);
+
+ platform_set_drvdata(pdev, NULL);
+ return 0;
+}
+
+static struct platform_driver dsi_ctrl_driver = {
+ .probe = dsi_ctrl_dev_probe,
+ .remove = dsi_ctrl_dev_remove,
+ .driver = {
+ .name = "drm_dsi_ctrl",
+ .of_match_table = msm_dsi_of_match,
+ },
+};
+
+/**
+ * dsi_ctrl_get() - get a dsi_ctrl handle from an of_node
+ * @of_node: of_node of the DSI controller.
+ *
+ * Gets the DSI controller handle for the corresponding of_node. The ref count
+ * is incremented to one and all subsequent gets will fail until the original
+ * clients calls a put.
+ *
+ * Return: DSI Controller handle.
+ */
+struct dsi_ctrl *dsi_ctrl_get(struct device_node *of_node)
+{
+ struct list_head *pos, *tmp;
+ struct dsi_ctrl *ctrl = NULL;
+
+ mutex_lock(&dsi_ctrl_list_lock);
+ list_for_each_safe(pos, tmp, &dsi_ctrl_list) {
+ struct dsi_ctrl_list_item *n;
+
+ n = list_entry(pos, struct dsi_ctrl_list_item, list);
+ if (n->ctrl->pdev->dev.of_node == of_node) {
+ ctrl = n->ctrl;
+ break;
+ }
+ }
+ mutex_unlock(&dsi_ctrl_list_lock);
+
+ if (!ctrl) {
+ pr_err("Device with of node not found\n");
+ ctrl = ERR_PTR(-EPROBE_DEFER);
+ return ctrl;
+ }
+
+ mutex_lock(&ctrl->ctrl_lock);
+ if (ctrl->refcount == 1) {
+ pr_err("[%s] Device in use\n", ctrl->name);
+ ctrl = ERR_PTR(-EBUSY);
+ } else {
+ ctrl->refcount++;
+ }
+ mutex_unlock(&ctrl->ctrl_lock);
+ return ctrl;
+}
+
+/**
+ * dsi_ctrl_put() - releases a dsi controller handle.
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Releases the DSI controller. Driver will clean up all resources and puts back
+ * the DSI controller into reset state.
+ */
+void dsi_ctrl_put(struct dsi_ctrl *dsi_ctrl)
+{
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ if (dsi_ctrl->refcount == 0)
+ pr_err("Unbalanced dsi_ctrl_put call\n");
+ else
+ dsi_ctrl->refcount--;
+
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+}
+
+/**
+ * dsi_ctrl_drv_init() - initialize dsi controller driver.
+ * @dsi_ctrl: DSI controller handle.
+ * @parent: Parent directory for debug fs.
+ *
+ * Initializes DSI controller driver. Driver should be initialized after
+ * dsi_ctrl_get() succeeds.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_drv_init(struct dsi_ctrl *dsi_ctrl, struct dentry *parent)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl || !parent) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ rc = dsi_ctrl_drv_state_init(dsi_ctrl);
+ if (rc) {
+ pr_err("Failed to initialize driver state, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_ctrl_debugfs_init(dsi_ctrl, parent);
+ if (rc) {
+ pr_err("[DSI_%d] failed to init debug fs, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_drv_deinit() - de-initializes dsi controller driver
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Releases all resources acquired by dsi_ctrl_drv_init().
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_drv_deinit(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_debugfs_deinit(dsi_ctrl);
+ if (rc)
+ pr_err("failed to release debugfs root, rc=%d\n", rc);
+
+ rc = dsi_ctrl_buffer_deinit(dsi_ctrl);
+ if (rc)
+ pr_err("Failed to free cmd buffers, rc=%d\n", rc);
+
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_phy_sw_reset() - perform a PHY software reset
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Performs a PHY software reset on the DSI controller. Reset should be done
+ * when the controller power state is DSI_CTRL_POWER_CORE_CLK_ON and the PHY is
+ * not enabled.
+ *
+ * This function will fail if driver is in any other state.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_PHY_SW_RESET, 0x0);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ dsi_ctrl->hw.ops.phy_sw_reset(&dsi_ctrl->hw);
+
+ pr_debug("[DSI_%d] PHY soft reset done\n", dsi_ctrl->index);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_PHY_SW_RESET, 0x0);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_seamless_timing_update() - update only controller timing
+ * @dsi_ctrl: DSI controller handle.
+ * @timing: New DSI timing info
+ *
+ * Updates host timing values to conduct a seamless transition to new timing
+ * For example, to update the porch values in a dynamic fps switch.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_async_timing_update(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_mode_info *timing)
+{
+ struct dsi_mode_info *host_mode;
+ int rc = 0;
+
+ if (!dsi_ctrl || !timing) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_ASYNC_TIMING,
+ DSI_CTRL_ENGINE_ON);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto exit;
+ }
+
+ host_mode = &dsi_ctrl->host_config.video_timing;
+ memcpy(host_mode, timing, sizeof(*host_mode));
+
+ dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw, host_mode);
+
+exit:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_host_init() - Initialize DSI host hardware.
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Initializes DSI controller hardware with host configuration provided by
+ * dsi_ctrl_update_host_config(). Initialization can be performed only during
+ * DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
+ * performed.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool cont_splash_enabled)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ if (!cont_splash_enabled) {
+ rc = dsi_ctrl_check_state(
+ dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1);
+ if (rc) {
+ pr_err("[DSI_%d] Ctrl state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ dsi_ctrl->hw.ops.setup_lane_map(&dsi_ctrl->hw,
+ &dsi_ctrl->host_config.lane_map);
+
+ dsi_ctrl->hw.ops.host_setup(&dsi_ctrl->hw,
+ &dsi_ctrl->host_config.common_config);
+
+ if (dsi_ctrl->host_config.panel_mode == DSI_OP_CMD_MODE) {
+ dsi_ctrl->hw.ops.cmd_engine_setup(&dsi_ctrl->hw,
+ &dsi_ctrl->host_config.common_config,
+ &dsi_ctrl->host_config.u.cmd_engine);
+
+ dsi_ctrl->hw.ops.setup_cmd_stream(&dsi_ctrl->hw,
+ dsi_ctrl->host_config.video_timing.h_active,
+ dsi_ctrl->host_config.video_timing.h_active * 3,
+ dsi_ctrl->host_config.video_timing.v_active,
+ 0x0);
+ } else {
+ dsi_ctrl->hw.ops.video_engine_setup(&dsi_ctrl->hw,
+ &dsi_ctrl->host_config.common_config,
+ &dsi_ctrl->host_config.u.video_engine);
+ dsi_ctrl->hw.ops.set_video_timing(&dsi_ctrl->hw,
+ &dsi_ctrl->host_config.video_timing);
+ }
+ }
+
+ dsi_ctrl->hw.ops.enable_status_interrupts(&dsi_ctrl->hw, 0x0);
+ dsi_ctrl->hw.ops.enable_error_interrupts(&dsi_ctrl->hw, 0x0);
+
+ /* Perform a soft reset before enabling dsi controller
+ * But skip the reset if dsi is enabled in bootloader.
+ */
+ if (!cont_splash_enabled)
+ dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
+ pr_debug("[DSI_%d]Host initialization complete\n", dsi_ctrl->index);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x1);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_host_deinit() - De-Initialize DSI host hardware.
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * De-initializes DSI controller hardware. It can be performed only during
+ * DSI_CTRL_POWER_CORE_CLK_ON state after LINK clocks have been turned off.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x0);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ pr_err("driver state check failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ pr_debug("[DSI_%d] Host deinitization complete\n", dsi_ctrl->index);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_INIT, 0x0);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_update_host_config() - update dsi host configuration
+ * @dsi_ctrl: DSI controller handle.
+ * @config: DSI host configuration.
+ * @flags: dsi_mode_flags modifying the behavior
+ *
+ * Updates driver with new Host configuration to use for host initialization.
+ * This function call will only update the software context. The stored
+ * configuration information will be used when the host is initialized.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_update_host_config(struct dsi_ctrl *ctrl,
+ struct dsi_host_config *config,
+ int flags)
+{
+ int rc = 0;
+
+ if (!ctrl || !config) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_validate_panel_info(ctrl, config);
+ if (rc) {
+ pr_err("panel validation failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ if (!(flags & DSI_MODE_FLAG_SEAMLESS)) {
+ rc = dsi_ctrl_update_link_freqs(ctrl, config);
+ if (rc) {
+ pr_err("[%s] failed to update link frequencies, rc=%d\n",
+ ctrl->name, rc);
+ goto error;
+ }
+ }
+
+ pr_debug("[DSI_%d]Host config updated\n", ctrl->index);
+ memcpy(&ctrl->host_config, config, sizeof(ctrl->host_config));
+error:
+ mutex_unlock(&ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_validate_timing() - validate a video timing configuration
+ * @dsi_ctrl: DSI controller handle.
+ * @timing: Pointer to timing data.
+ *
+ * Driver will validate if the timing configuration is supported on the
+ * controller hardware.
+ *
+ * Return: error code if timing is not supported.
+ */
+int dsi_ctrl_validate_timing(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_mode_info *mode)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl || !mode) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+
+ return rc;
+}
+
+/**
+ * dsi_ctrl_cmd_transfer() - Transfer commands on DSI link
+ * @dsi_ctrl: DSI controller handle.
+ * @msg: Message to transfer on DSI link.
+ * @flags: Modifiers for message transfer.
+ *
+ * Command transfer can be done only when command engine is enabled. The
+ * transfer API will block until either the command transfer finishes or
+ * the timeout value is reached. If the trigger is deferred, it will return
+ * without triggering the transfer. Command parameters are programmed to
+ * hardware.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_cmd_transfer(struct dsi_ctrl *dsi_ctrl,
+ const struct mipi_dsi_msg *msg,
+ u32 flags)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl || !msg) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_CMD_TX, 0x0);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ rc = dsi_ctrl_vote_for_bandwidth(dsi_ctrl, true);
+ if (rc) {
+ pr_err("bandwidth request failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ if (flags & DSI_CTRL_CMD_READ) {
+ rc = dsi_message_rx(dsi_ctrl, msg, flags);
+ if (rc)
+ pr_err("read message failed, rc=%d\n", rc);
+ } else {
+ rc = dsi_message_tx(dsi_ctrl, msg, flags);
+ if (rc)
+ pr_err("command msg transfer failed, rc = %d\n", rc);
+ }
+
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CMD_TX, 0x0);
+
+ (void)dsi_ctrl_vote_for_bandwidth(dsi_ctrl, false);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_cmd_tx_trigger() - Trigger a deferred command.
+ * @dsi_ctrl: DSI controller handle.
+ * @flags: Modifiers.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags)
+{
+ int rc = 0;
+ u32 status = 0;
+ u32 mask = (DSI_CMD_MODE_DMA_DONE);
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ reinit_completion(&dsi_ctrl->int_info.cmd_dma_done);
+
+ dsi_ctrl->hw.ops.trigger_command_dma(&dsi_ctrl->hw);
+
+ if ((flags & DSI_CTRL_CMD_BROADCAST) &&
+ (flags & DSI_CTRL_CMD_BROADCAST_MASTER)) {
+ u32 retry = 10;
+
+ while ((status == 0) && (retry > 0)) {
+ udelay(1000);
+ status = dsi_ctrl->hw.ops.get_interrupt_status(
+ &dsi_ctrl->hw);
+ status &= mask;
+ retry--;
+ dsi_ctrl->hw.ops.clear_interrupt_status(&dsi_ctrl->hw,
+ status);
+ }
+ pr_debug("INT STATUS = %x, retry = %d\n", status, retry);
+ if (retry == 0)
+ pr_err("[DSI_%d]Command transfer failed\n",
+ dsi_ctrl->index);
+ }
+
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_power_state() - set power state for dsi controller
+ * @dsi_ctrl: DSI controller handle.
+ * @state: Power state.
+ *
+ * Set power state for DSI controller. Power state can be changed only when
+ * Controller, Video and Command engines are turned off.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_power_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_power_state state)
+{
+ int rc = 0;
+ bool core_clk_enable = false;
+ bool link_clk_enable = false;
+ bool reg_enable = false;
+ struct dsi_ctrl_state_info *drv_state;
+
+ if (!dsi_ctrl || (state >= DSI_CTRL_POWER_MAX)) {
+ pr_err("Invalid Params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_POWER_STATE_CHANGE,
+ state);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ if (state == DSI_CTRL_POWER_LINK_CLK_ON)
+ reg_enable = core_clk_enable = link_clk_enable = true;
+ else if (state == DSI_CTRL_POWER_CORE_CLK_ON)
+ reg_enable = core_clk_enable = true;
+ else if (state == DSI_CTRL_POWER_VREG_ON)
+ reg_enable = true;
+
+ drv_state = &dsi_ctrl->current_state;
+
+ if ((reg_enable) && (reg_enable != drv_state->pwr_enabled)) {
+ rc = dsi_ctrl_enable_supplies(dsi_ctrl, true);
+ if (rc) {
+ pr_err("[%d]failed to enable voltage supplies, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+ }
+
+ if ((core_clk_enable) &&
+ (core_clk_enable != drv_state->core_clk_enabled)) {
+ rc = dsi_clk_enable_core_clks(&dsi_ctrl->clk_info.core_clks,
+ true);
+ if (rc) {
+ pr_err("[%d] failed to enable core clocks, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+ }
+
+ if (link_clk_enable != drv_state->link_clk_enabled) {
+ rc = dsi_clk_enable_link_clks(&dsi_ctrl->clk_info.link_clks,
+ link_clk_enable);
+ if (rc) {
+ pr_err("[%d] failed to enable link clocks, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+ }
+
+ if ((!core_clk_enable) &&
+ (core_clk_enable != drv_state->core_clk_enabled)) {
+ rc = dsi_clk_enable_core_clks(&dsi_ctrl->clk_info.core_clks,
+ false);
+ if (rc) {
+ pr_err("[%d] failed to disable core clocks, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+ }
+
+ if ((!reg_enable) && (reg_enable != drv_state->pwr_enabled)) {
+ rc = dsi_ctrl_enable_supplies(dsi_ctrl, false);
+ if (rc) {
+ pr_err("[%d]failed to disable vreg supplies, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+ }
+
+ pr_debug("[DSI_%d] Power state updated to %d\n", dsi_ctrl->index,
+ state);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_POWER_STATE_CHANGE, state);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_tpg_state() - enable/disable test pattern on the controller
+ * @dsi_ctrl: DSI controller handle.
+ * @on: enable/disable test pattern.
+ *
+ * Test pattern can be enabled only after Video engine (for video mode panels)
+ * or command engine (for cmd mode panels) is enabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_TPG, on);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ if (on) {
+ if (dsi_ctrl->host_config.panel_mode == DSI_OP_VIDEO_MODE) {
+ dsi_ctrl->hw.ops.video_test_pattern_setup(&dsi_ctrl->hw,
+ DSI_TEST_PATTERN_INC,
+ 0xFFFF);
+ } else {
+ dsi_ctrl->hw.ops.cmd_test_pattern_setup(
+ &dsi_ctrl->hw,
+ DSI_TEST_PATTERN_INC,
+ 0xFFFF,
+ 0x0);
+ }
+ }
+ dsi_ctrl->hw.ops.test_pattern_enable(&dsi_ctrl->hw, on);
+
+ pr_debug("[DSI_%d]Set test pattern state=%d\n", dsi_ctrl->index, on);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_TPG, on);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_host_engine_state() - set host engine state
+ * @dsi_ctrl: DSI Controller handle.
+ * @state: Engine state.
+ *
+ * Host engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON and cmd, video engines are disabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_host_engine_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_engine_state state)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl || (state >= DSI_CTRL_ENGINE_MAX)) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ if (state == DSI_CTRL_ENGINE_ON)
+ dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, true);
+ else
+ dsi_ctrl->hw.ops.ctrl_en(&dsi_ctrl->hw, false);
+
+ pr_debug("[DSI_%d] Set host engine state = %d\n", dsi_ctrl->index,
+ state);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_HOST_ENGINE, state);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_cmd_engine_state() - set command engine state
+ * @dsi_ctrl: DSI Controller handle.
+ * @state: Engine state.
+ *
+ * Command engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_cmd_engine_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_engine_state state)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl || (state >= DSI_CTRL_ENGINE_MAX)) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ if (state == DSI_CTRL_ENGINE_ON)
+ dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, true);
+ else
+ dsi_ctrl->hw.ops.cmd_engine_en(&dsi_ctrl->hw, false);
+
+ pr_debug("[DSI_%d] Set cmd engine state = %d\n", dsi_ctrl->index,
+ state);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CMD_ENGINE, state);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_vid_engine_state() - set video engine state
+ * @dsi_ctrl: DSI Controller handle.
+ * @state: Engine state.
+ *
+ * Video engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_vid_engine_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_engine_state state)
+{
+ int rc = 0;
+ bool on;
+
+ if (!dsi_ctrl || (state >= DSI_CTRL_ENGINE_MAX)) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_VID_ENGINE, state);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ on = (state == DSI_CTRL_ENGINE_ON) ? true : false;
+ dsi_ctrl->hw.ops.video_engine_en(&dsi_ctrl->hw, on);
+
+ /* perform a reset when turning off video engine */
+ if (!on)
+ dsi_ctrl->hw.ops.soft_reset(&dsi_ctrl->hw);
+
+ pr_debug("[DSI_%d] Set video engine state = %d\n", dsi_ctrl->index,
+ state);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_VID_ENGINE, state);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: enable/disable ULPS.
+ *
+ * ULPS can be enabled/disabled after DSI host engine is turned on.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_ULPS_TOGGLE, enable);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ if (enable)
+ rc = dsi_enable_ulps(dsi_ctrl);
+ else
+ rc = dsi_disable_ulps(dsi_ctrl);
+
+ if (rc) {
+ pr_err("[DSI_%d] Ulps state change(%d) failed, rc=%d\n",
+ dsi_ctrl->index, enable, rc);
+ goto error;
+ }
+
+ pr_debug("[DSI_%d] ULPS state = %d\n", dsi_ctrl->index, enable);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_ULPS_TOGGLE, enable);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_clamp_state() - set clamp state for DSI phy
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: enable/disable clamping.
+ *
+ * Clamps can be enabled/disabled while DSI contoller is still turned on.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_clamp_state(struct dsi_ctrl *dsi_ctrl, bool enable)
+{
+ int rc = 0;
+
+ if (!dsi_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_CLAMP_TOGGLE, enable);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ rc = dsi_enable_io_clamp(dsi_ctrl, enable);
+ if (rc) {
+ pr_err("[DSI_%d] Failed to enable IO clamp\n", dsi_ctrl->index);
+ goto error;
+ }
+
+ pr_debug("[DSI_%d] Clamp state = %d\n", dsi_ctrl->index, enable);
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_CLAMP_TOGGLE, enable);
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_set_clock_source() - set clock source fpr dsi link clocks
+ * @dsi_ctrl: DSI controller handle.
+ * @source_clks: Source clocks for DSI link clocks.
+ *
+ * Clock source should be changed while link clocks are disabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_clock_source(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_clk_link_set *source_clks)
+{
+ int rc = 0;
+ u32 op_state = 0;
+
+ if (!dsi_ctrl || !source_clks) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_ctrl->ctrl_lock);
+
+ if (source_clks->pixel_clk && source_clks->byte_clk)
+ op_state = 1;
+
+ rc = dsi_ctrl_check_state(dsi_ctrl, DSI_CTRL_OP_SET_CLK_SOURCE,
+ op_state);
+ if (rc) {
+ pr_err("[DSI_%d] Controller state check failed, rc=%d\n",
+ dsi_ctrl->index, rc);
+ goto error;
+ }
+
+ rc = dsi_clk_update_parent(source_clks, &dsi_ctrl->clk_info.rcg_clks);
+ if (rc) {
+ pr_err("[DSI_%d]Failed to update link clk parent, rc=%d\n",
+ dsi_ctrl->index, rc);
+ (void)dsi_clk_update_parent(&dsi_ctrl->clk_info.pll_op_clks,
+ &dsi_ctrl->clk_info.rcg_clks);
+ goto error;
+ }
+
+ dsi_ctrl->clk_info.pll_op_clks.byte_clk = source_clks->byte_clk;
+ dsi_ctrl->clk_info.pll_op_clks.pixel_clk = source_clks->pixel_clk;
+
+ pr_debug("[DSI_%d] Source clocks are updated\n", dsi_ctrl->index);
+
+ dsi_ctrl_update_state(dsi_ctrl, DSI_CTRL_OP_SET_CLK_SOURCE, op_state);
+
+error:
+ mutex_unlock(&dsi_ctrl->ctrl_lock);
+ return rc;
+}
+
+/**
+ * dsi_ctrl_drv_register() - register platform driver for dsi controller
+ */
+void dsi_ctrl_drv_register(void)
+{
+ platform_driver_register(&dsi_ctrl_driver);
+}
+
+/**
+ * dsi_ctrl_drv_unregister() - unregister platform driver
+ */
+void dsi_ctrl_drv_unregister(void)
+{
+ platform_driver_unregister(&dsi_ctrl_driver);
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
new file mode 100644
index 000000000000..c343c41eb8e1
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl.h
@@ -0,0 +1,490 @@
+/*
+ * Copyright (c) 2015-2016, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CTRL_H_
+#define _DSI_CTRL_H_
+
+#include <linux/debugfs.h>
+
+#include "dsi_defs.h"
+#include "dsi_ctrl_hw.h"
+#include "dsi_clk_pwr.h"
+#include "drm_mipi_dsi.h"
+
+/*
+ * DSI Command transfer modifiers
+ * @DSI_CTRL_CMD_READ: The current transfer involves reading data.
+ * @DSI_CTRL_CMD_BROADCAST: The current transfer needs to be done in
+ * broadcast mode to multiple slaves.
+ * @DSI_CTRL_CMD_BROADCAST_MASTER: This controller is the master and the slaves
+ * sync to this trigger.
+ * @DSI_CTRL_CMD_DEFER_TRIGGER: Defer the command trigger to later.
+ * @DSI_CTRL_CMD_FIFO_STORE: Use FIFO for command transfer in place of
+ * reading data from memory.
+ */
+#define DSI_CTRL_CMD_READ 0x1
+#define DSI_CTRL_CMD_BROADCAST 0x2
+#define DSI_CTRL_CMD_BROADCAST_MASTER 0x4
+#define DSI_CTRL_CMD_DEFER_TRIGGER 0x8
+#define DSI_CTRL_CMD_FIFO_STORE 0x10
+
+/**
+ * enum dsi_power_state - defines power states for dsi controller.
+ * @DSI_CTRL_POWER_OFF: DSI controller is powered down.
+ * @DSI_CTRL_POWER_VREG_ON: Digital and analog supplies for DSI controller
+ * are powered on.
+ * @DSI_CTRL_POWER_CORE_CLK_ON: DSI core clocks for register access are enabled.
+ * @DSI_CTRL_POWER_LINK_CLK_ON: DSI link clocks for link transfer are enabled.
+ * @DSI_CTRL_POWER_MAX: Maximum value.
+ */
+enum dsi_power_state {
+ DSI_CTRL_POWER_OFF = 0,
+ DSI_CTRL_POWER_VREG_ON,
+ DSI_CTRL_POWER_CORE_CLK_ON,
+ DSI_CTRL_POWER_LINK_CLK_ON,
+ DSI_CTRL_POWER_MAX,
+};
+
+/**
+ * enum dsi_engine_state - define engine status for dsi controller.
+ * @DSI_CTRL_ENGINE_OFF: Engine is turned off.
+ * @DSI_CTRL_ENGINE_ON: Engine is turned on.
+ * @DSI_CTRL_ENGINE_MAX: Maximum value.
+ */
+enum dsi_engine_state {
+ DSI_CTRL_ENGINE_OFF = 0,
+ DSI_CTRL_ENGINE_ON,
+ DSI_CTRL_ENGINE_MAX,
+};
+
+/**
+ * struct dsi_ctrl_power_info - digital and analog power supplies for dsi host
+ * @digital: Digital power supply required to turn on DSI controller hardware.
+ * @host_pwr: Analog power supplies required to turn on DSI controller hardware.
+ * Even though DSI controller it self does not require an analog
+ * power supply, supplies required for PLL can be defined here to
+ * allow proper control over these supplies.
+ */
+struct dsi_ctrl_power_info {
+ struct dsi_regulator_info digital;
+ struct dsi_regulator_info host_pwr;
+};
+
+/**
+ * struct dsi_ctrl_clk_info - clock information for DSI controller
+ * @core_clks: Core clocks needed to access DSI controller registers.
+ * @link_clks: Link clocks required to transmit data over DSI link.
+ * @rcg_clks: Root clock generation clocks generated in MMSS_CC. The
+ * output of the PLL is set as parent for these root
+ * clocks. These clocks are specific to controller
+ * instance.
+ * @mux_clks: Mux clocks used for Dynamic refresh feature.
+ * @ext_clks: External byte/pixel clocks from the MMSS block. These
+ * clocks are set as parent to rcg clocks.
+ * @pll_op_clks: TODO:
+ * @shadow_clks: TODO:
+ */
+struct dsi_ctrl_clk_info {
+ /* Clocks parsed from DT */
+ struct dsi_core_clk_info core_clks;
+ struct dsi_link_clk_info link_clks;
+ struct dsi_clk_link_set rcg_clks;
+
+ /* Clocks set by DSI Manager */
+ struct dsi_clk_link_set mux_clks;
+ struct dsi_clk_link_set ext_clks;
+ struct dsi_clk_link_set pll_op_clks;
+ struct dsi_clk_link_set shadow_clks;
+};
+
+/**
+ * struct dsi_ctrl_bus_scale_info - Bus scale info for msm-bus bandwidth voting
+ * @bus_scale_table: Bus scale voting usecases.
+ * @bus_handle: Handle used for voting bandwidth.
+ * @refcount: reference count.
+ */
+struct dsi_ctrl_bus_scale_info {
+ struct msm_bus_scale_pdata *bus_scale_table;
+ u32 bus_handle;
+ u32 refcount;
+};
+
+/**
+ * struct dsi_ctrl_state_info - current driver state information
+ * @power_state: Controller power state.
+ * @cmd_engine_state: Status of DSI command engine.
+ * @vid_engine_state: Status of DSI video engine.
+ * @controller_state: Status of DSI Controller engine.
+ * @pwr_enabled: Set to true, if voltage supplies are enabled.
+ * @core_clk_enabled: Set to true, if core clocks are enabled.
+ * @lin_clk_enabled: Set to true, if link clocks are enabled.
+ * @ulps_enabled: Set to true, if lanes are in ULPS state.
+ * @clamp_enabled: Set to true, if PHY output is clamped.
+ * @clk_source_set: Set to true, if parent is set for DSI link clocks.
+ */
+struct dsi_ctrl_state_info {
+ enum dsi_power_state power_state;
+ enum dsi_engine_state cmd_engine_state;
+ enum dsi_engine_state vid_engine_state;
+ enum dsi_engine_state controller_state;
+ bool pwr_enabled;
+ bool core_clk_enabled;
+ bool link_clk_enabled;
+ bool ulps_enabled;
+ bool clamp_enabled;
+ bool clk_source_set;
+ bool host_initialized;
+ bool tpg_enabled;
+};
+
+/**
+ * struct dsi_ctrl_interrupts - define interrupt information
+ * @irq: IRQ id for the DSI controller.
+ * @intr_lock: Spinlock to protect access to interrupt registers.
+ * @interrupt_status: Status interrupts which need to be serviced.
+ * @error_status: Error interurpts which need to be serviced.
+ * @interrupts_enabled: Status interrupts which are enabled.
+ * @errors_enabled: Error interrupts which are enabled.
+ * @cmd_dma_done: Completion signal for DSI_CMD_MODE_DMA_DONE interrupt
+ * @vid_frame_done: Completion signal for DSI_VIDEO_MODE_FRAME_DONE int.
+ * @cmd_frame_done: Completion signal for DSI_CMD_FRAME_DONE interrupt.
+ * @interrupt_done_work: Work item for servicing status interrupts.
+ * @error_status_work: Work item for servicing error interrupts.
+ */
+struct dsi_ctrl_interrupts {
+ u32 irq;
+ spinlock_t intr_lock; /* protects access to interrupt registers */
+ u32 interrupt_status;
+ u64 error_status;
+
+ u32 interrupts_enabled;
+ u64 errors_enabled;
+
+ struct completion cmd_dma_done;
+ struct completion vid_frame_done;
+ struct completion cmd_frame_done;
+
+ struct work_struct interrupt_done_work;
+ struct work_struct error_status_work;
+};
+
+/**
+ * struct dsi_ctrl - DSI controller object
+ * @pdev: Pointer to platform device.
+ * @index: Instance id.
+ * @name: Name of the controller instance.
+ * @refcount: ref counter.
+ * @ctrl_lock: Mutex for hardware and object access.
+ * @drm_dev: Pointer to DRM device.
+ * @version: DSI controller version.
+ * @hw: DSI controller hardware object.
+ * @current_state; Current driver and hardware state.
+ * @int_info: Interrupt information.
+ * @clk_info: Clock information.
+ * @pwr_info: Power information.
+ * @axi_bus_info: AXI bus information.
+ * @host_config: Current host configuration.
+ * @tx_cmd_buf: Tx command buffer.
+ * @cmd_buffer_size: Size of command buffer.
+ * @debugfs_root: Root for debugfs entries.
+ */
+struct dsi_ctrl {
+ struct platform_device *pdev;
+ u32 index;
+ const char *name;
+ u32 refcount;
+ struct mutex ctrl_lock;
+ struct drm_device *drm_dev;
+
+ enum dsi_ctrl_version version;
+ struct dsi_ctrl_hw hw;
+
+ /* Current state */
+ struct dsi_ctrl_state_info current_state;
+
+ struct dsi_ctrl_interrupts int_info;
+ /* Clock and power states */
+ struct dsi_ctrl_clk_info clk_info;
+ struct dsi_ctrl_power_info pwr_info;
+ struct dsi_ctrl_bus_scale_info axi_bus_info;
+
+ struct dsi_host_config host_config;
+ /* Command tx and rx */
+ struct drm_gem_object *tx_cmd_buf;
+ u32 cmd_buffer_size;
+
+ /* Debug Information */
+ struct dentry *debugfs_root;
+
+};
+
+/**
+ * dsi_ctrl_get() - get a dsi_ctrl handle from an of_node
+ * @of_node: of_node of the DSI controller.
+ *
+ * Gets the DSI controller handle for the corresponding of_node. The ref count
+ * is incremented to one and all subsequent gets will fail until the original
+ * clients calls a put.
+ *
+ * Return: DSI Controller handle.
+ */
+struct dsi_ctrl *dsi_ctrl_get(struct device_node *of_node);
+
+/**
+ * dsi_ctrl_put() - releases a dsi controller handle.
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Releases the DSI controller. Driver will clean up all resources and puts back
+ * the DSI controller into reset state.
+ */
+void dsi_ctrl_put(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_drv_init() - initialize dsi controller driver.
+ * @dsi_ctrl: DSI controller handle.
+ * @parent: Parent directory for debug fs.
+ *
+ * Initializes DSI controller driver. Driver should be initialized after
+ * dsi_ctrl_get() succeeds.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_drv_init(struct dsi_ctrl *dsi_ctrl, struct dentry *parent);
+
+/**
+ * dsi_ctrl_drv_deinit() - de-initializes dsi controller driver
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Releases all resources acquired by dsi_ctrl_drv_init().
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_drv_deinit(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_validate_timing() - validate a video timing configuration
+ * @dsi_ctrl: DSI controller handle.
+ * @timing: Pointer to timing data.
+ *
+ * Driver will validate if the timing configuration is supported on the
+ * controller hardware.
+ *
+ * Return: error code if timing is not supported.
+ */
+int dsi_ctrl_validate_timing(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_mode_info *timing);
+
+/**
+ * dsi_ctrl_update_host_config() - update dsi host configuration
+ * @dsi_ctrl: DSI controller handle.
+ * @config: DSI host configuration.
+ * @flags: dsi_mode_flags modifying the behavior
+ *
+ * Updates driver with new Host configuration to use for host initialization.
+ * This function call will only update the software context. The stored
+ * configuration information will be used when the host is initialized.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_update_host_config(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_host_config *config,
+ int flags);
+
+/**
+ * dsi_ctrl_async_timing_update() - update only controller timing
+ * @dsi_ctrl: DSI controller handle.
+ * @timing: New DSI timing info
+ *
+ * Updates host timing values to asynchronously transition to new timing
+ * For example, to update the porch values in a seamless/dynamic fps switch.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_async_timing_update(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_mode_info *timing);
+
+/**
+ * dsi_ctrl_phy_sw_reset() - perform a PHY software reset
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * Performs a PHY software reset on the DSI controller. Reset should be done
+ * when the controller power state is DSI_CTRL_POWER_CORE_CLK_ON and the PHY is
+ * not enabled.
+ *
+ * This function will fail if driver is in any other state.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_phy_sw_reset(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_host_init() - Initialize DSI host hardware.
+ * @dsi_ctrl: DSI controller handle.
+ * @cont_splash_enabled: Flag for DSI splash enabled in bootloader.
+ *
+ * Initializes DSI controller hardware with host configuration provided by
+ * dsi_ctrl_update_host_config(). Initialization can be performed only during
+ * DSI_CTRL_POWER_CORE_CLK_ON state and after the PHY SW reset has been
+ * performed.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_host_init(struct dsi_ctrl *dsi_ctrl, bool cont_splash_enabled);
+
+/**
+ * dsi_ctrl_host_deinit() - De-Initialize DSI host hardware.
+ * @dsi_ctrl: DSI controller handle.
+ *
+ * De-initializes DSI controller hardware. It can be performed only during
+ * DSI_CTRL_POWER_CORE_CLK_ON state after LINK clocks have been turned off.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_host_deinit(struct dsi_ctrl *dsi_ctrl);
+
+/**
+ * dsi_ctrl_set_tpg_state() - enable/disable test pattern on the controller
+ * @dsi_ctrl: DSI controller handle.
+ * @on: enable/disable test pattern.
+ *
+ * Test pattern can be enabled only after Video engine (for video mode panels)
+ * or command engine (for cmd mode panels) is enabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_tpg_state(struct dsi_ctrl *dsi_ctrl, bool on);
+
+/**
+ * dsi_ctrl_cmd_transfer() - Transfer commands on DSI link
+ * @dsi_ctrl: DSI controller handle.
+ * @msg: Message to transfer on DSI link.
+ * @flags: Modifiers for message transfer.
+ *
+ * Command transfer can be done only when command engine is enabled. The
+ * transfer API will until either the command transfer finishes or the timeout
+ * value is reached. If the trigger is deferred, it will return without
+ * triggering the transfer. Command parameters are programmed to hardware.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_cmd_transfer(struct dsi_ctrl *dsi_ctrl,
+ const struct mipi_dsi_msg *msg,
+ u32 flags);
+
+/**
+ * dsi_ctrl_cmd_tx_trigger() - Trigger a deferred command.
+ * @dsi_ctrl: DSI controller handle.
+ * @flags: Modifiers.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_cmd_tx_trigger(struct dsi_ctrl *dsi_ctrl, u32 flags);
+
+/**
+ * dsi_ctrl_set_power_state() - set power state for dsi controller
+ * @dsi_ctrl: DSI controller handle.
+ * @state: Power state.
+ *
+ * Set power state for DSI controller. Power state can be changed only when
+ * Controller, Video and Command engines are turned off.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_power_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_power_state state);
+
+/**
+ * dsi_ctrl_set_cmd_engine_state() - set command engine state
+ * @dsi_ctrl: DSI Controller handle.
+ * @state: Engine state.
+ *
+ * Command engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_cmd_engine_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_engine_state state);
+
+/**
+ * dsi_ctrl_set_vid_engine_state() - set video engine state
+ * @dsi_ctrl: DSI Controller handle.
+ * @state: Engine state.
+ *
+ * Video engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_vid_engine_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_engine_state state);
+
+/**
+ * dsi_ctrl_set_host_engine_state() - set host engine state
+ * @dsi_ctrl: DSI Controller handle.
+ * @state: Engine state.
+ *
+ * Host engine state can be modified only when DSI controller power state is
+ * set to DSI_CTRL_POWER_LINK_CLK_ON and cmd, video engines are disabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_host_engine_state(struct dsi_ctrl *dsi_ctrl,
+ enum dsi_engine_state state);
+
+/**
+ * dsi_ctrl_set_ulps() - set ULPS state for DSI lanes.
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: enable/disable ULPS.
+ *
+ * ULPS can be enabled/disabled after DSI host engine is turned on.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_ulps(struct dsi_ctrl *dsi_ctrl, bool enable);
+
+/**
+ * dsi_ctrl_set_clamp_state() - set clamp state for DSI phy
+ * @dsi_ctrl: DSI controller handle.
+ * @enable: enable/disable clamping.
+ *
+ * Clamps can be enabled/disabled while DSI contoller is still turned on.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_clamp_state(struct dsi_ctrl *dsi_Ctrl, bool enable);
+
+/**
+ * dsi_ctrl_set_clock_source() - set clock source fpr dsi link clocks
+ * @dsi_ctrl: DSI controller handle.
+ * @source_clks: Source clocks for DSI link clocks.
+ *
+ * Clock source should be changed while link clocks are disabled.
+ *
+ * Return: error code.
+ */
+int dsi_ctrl_set_clock_source(struct dsi_ctrl *dsi_ctrl,
+ struct dsi_clk_link_set *source_clks);
+
+/**
+ * dsi_ctrl_drv_register() - register platform driver for dsi controller
+ */
+void dsi_ctrl_drv_register(void);
+
+/**
+ * dsi_ctrl_drv_unregister() - unregister platform driver
+ */
+void dsi_ctrl_drv_unregister(void);
+
+#endif /* _DSI_CTRL_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
new file mode 100644
index 000000000000..b81cdaf4ba02
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw.h
@@ -0,0 +1,578 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CTRL_HW_H_
+#define _DSI_CTRL_HW_H_
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/bitmap.h>
+
+#include "dsi_defs.h"
+
+/**
+ * Modifier flag for command transmission. If this flag is set, command
+ * information is programmed to hardware and transmission is not triggered.
+ * Caller should call the trigger_command_dma() to start the transmission. This
+ * flag is valed for kickoff_command() and kickoff_fifo_command() operations.
+ */
+#define DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER 0x1
+
+/**
+ * enum dsi_ctrl_version - version of the dsi host controller
+ * @DSI_CTRL_VERSION_UNKNOWN: Unknown controller version
+ * @DSI_CTRL_VERSION_1_4: DSI host v1.4 controller
+ * @DSI_CTRL_VERSION_2_0: DSI host v2.0 controller
+ * @DSI_CTRL_VERSION_MAX: max version
+ */
+enum dsi_ctrl_version {
+ DSI_CTRL_VERSION_UNKNOWN,
+ DSI_CTRL_VERSION_1_4,
+ DSI_CTRL_VERSION_2_0,
+ DSI_CTRL_VERSION_MAX
+};
+
+/**
+ * enum dsi_ctrl_hw_features - features supported by dsi host controller
+ * @DSI_CTRL_VIDEO_TPG: Test pattern support for video mode.
+ * @DSI_CTRL_CMD_TPG: Test pattern support for command mode.
+ * @DSI_CTRL_VARIABLE_REFRESH_RATE: variable panel timing
+ * @DSI_CTRL_DYNAMIC_REFRESH: variable pixel clock rate
+ * @DSI_CTRL_NULL_PACKET_INSERTION: NULL packet insertion
+ * @DSI_CTRL_DESKEW_CALIB: Deskew calibration support
+ * @DSI_CTRL_DPHY: Controller support for DPHY
+ * @DSI_CTRL_CPHY: Controller support for CPHY
+ * @DSI_CTRL_MAX_FEATURES:
+ */
+enum dsi_ctrl_hw_features {
+ DSI_CTRL_VIDEO_TPG,
+ DSI_CTRL_CMD_TPG,
+ DSI_CTRL_VARIABLE_REFRESH_RATE,
+ DSI_CTRL_DYNAMIC_REFRESH,
+ DSI_CTRL_NULL_PACKET_INSERTION,
+ DSI_CTRL_DESKEW_CALIB,
+ DSI_CTRL_DPHY,
+ DSI_CTRL_CPHY,
+ DSI_CTRL_MAX_FEATURES
+};
+
+/**
+ * enum dsi_test_pattern - test pattern type
+ * @DSI_TEST_PATTERN_FIXED: Test pattern is fixed, based on init value.
+ * @DSI_TEST_PATTERN_INC: Incremental test pattern, base on init value.
+ * @DSI_TEST_PATTERN_POLY: Pattern generated from polynomial and init val.
+ * @DSI_TEST_PATTERN_MAX:
+ */
+enum dsi_test_pattern {
+ DSI_TEST_PATTERN_FIXED = 0,
+ DSI_TEST_PATTERN_INC,
+ DSI_TEST_PATTERN_POLY,
+ DSI_TEST_PATTERN_MAX
+};
+
+/**
+ * enum dsi_status_int_type - status interrupts generated by DSI controller
+ * @DSI_CMD_MODE_DMA_DONE: Command mode DMA packets are sent out.
+ * @DSI_CMD_STREAM0_FRAME_DONE: A frame of command mode stream0 is sent out.
+ * @DSI_CMD_STREAM1_FRAME_DONE: A frame of command mode stream1 is sent out.
+ * @DSI_CMD_STREAM2_FRAME_DONE: A frame of command mode stream2 is sent out.
+ * @DSI_VIDEO_MODE_FRAME_DONE: A frame of video mode stream is sent out.
+ * @DSI_BTA_DONE: A BTA is completed.
+ * @DSI_CMD_FRAME_DONE: A frame of selected command mode stream is
+ * sent out by MDP.
+ * @DSI_DYN_REFRESH_DONE: The dynamic refresh operation has completed.
+ * @DSI_DESKEW_DONE: The deskew calibration operation has completed
+ * @DSI_DYN_BLANK_DMA_DONE: The dynamic blankin DMA operation has
+ * completed.
+ */
+enum dsi_status_int_type {
+ DSI_CMD_MODE_DMA_DONE = BIT(0),
+ DSI_CMD_STREAM0_FRAME_DONE = BIT(1),
+ DSI_CMD_STREAM1_FRAME_DONE = BIT(2),
+ DSI_CMD_STREAM2_FRAME_DONE = BIT(3),
+ DSI_VIDEO_MODE_FRAME_DONE = BIT(4),
+ DSI_BTA_DONE = BIT(5),
+ DSI_CMD_FRAME_DONE = BIT(6),
+ DSI_DYN_REFRESH_DONE = BIT(7),
+ DSI_DESKEW_DONE = BIT(8),
+ DSI_DYN_BLANK_DMA_DONE = BIT(9)
+};
+
+/**
+ * enum dsi_error_int_type - error interrupts generated by DSI controller
+ * @DSI_RDBK_SINGLE_ECC_ERR: Single bit ECC error in read packet.
+ * @DSI_RDBK_MULTI_ECC_ERR: Multi bit ECC error in read packet.
+ * @DSI_RDBK_CRC_ERR: CRC error in read packet.
+ * @DSI_RDBK_INCOMPLETE_PKT: Incomplete read packet.
+ * @DSI_PERIPH_ERROR_PKT: Error packet returned from peripheral,
+ * @DSI_LP_RX_TIMEOUT: Low power reverse transmission timeout.
+ * @DSI_HS_TX_TIMEOUT: High speed forward transmission timeout.
+ * @DSI_BTA_TIMEOUT: BTA timeout.
+ * @DSI_PLL_UNLOCK: PLL has unlocked.
+ * @DSI_DLN0_ESC_ENTRY_ERR: Incorrect LP Rx escape entry.
+ * @DSI_DLN0_ESC_SYNC_ERR: LP Rx data is not byte aligned.
+ * @DSI_DLN0_LP_CONTROL_ERR: Incorrect LP Rx state sequence.
+ * @DSI_PENDING_HS_TX_TIMEOUT: Pending High-speed transfer timeout.
+ * @DSI_INTERLEAVE_OP_CONTENTION: Interleave operation contention.
+ * @DSI_CMD_DMA_FIFO_UNDERFLOW: Command mode DMA FIFO underflow.
+ * @DSI_CMD_MDP_FIFO_UNDERFLOW: Command MDP FIFO underflow (failed to
+ * receive one complete line from MDP).
+ * @DSI_DLN0_HS_FIFO_OVERFLOW: High speed FIFO for data lane 0 overflows.
+ * @DSI_DLN1_HS_FIFO_OVERFLOW: High speed FIFO for data lane 1 overflows.
+ * @DSI_DLN2_HS_FIFO_OVERFLOW: High speed FIFO for data lane 2 overflows.
+ * @DSI_DLN3_HS_FIFO_OVERFLOW: High speed FIFO for data lane 3 overflows.
+ * @DSI_DLN0_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 0 underflows.
+ * @DSI_DLN1_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 1 underflows.
+ * @DSI_DLN2_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 2 underflows.
+ * @DSI_DLN3_HS_FIFO_UNDERFLOW: High speed FIFO for data lane 3 undeflows.
+ * @DSI_DLN0_LP0_CONTENTION: PHY level contention while lane 0 is low.
+ * @DSI_DLN1_LP0_CONTENTION: PHY level contention while lane 1 is low.
+ * @DSI_DLN2_LP0_CONTENTION: PHY level contention while lane 2 is low.
+ * @DSI_DLN3_LP0_CONTENTION: PHY level contention while lane 3 is low.
+ * @DSI_DLN0_LP1_CONTENTION: PHY level contention while lane 0 is high.
+ * @DSI_DLN1_LP1_CONTENTION: PHY level contention while lane 1 is high.
+ * @DSI_DLN2_LP1_CONTENTION: PHY level contention while lane 2 is high.
+ * @DSI_DLN3_LP1_CONTENTION: PHY level contention while lane 3 is high.
+ */
+enum dsi_error_int_type {
+ DSI_RDBK_SINGLE_ECC_ERR = BIT(0),
+ DSI_RDBK_MULTI_ECC_ERR = BIT(1),
+ DSI_RDBK_CRC_ERR = BIT(2),
+ DSI_RDBK_INCOMPLETE_PKT = BIT(3),
+ DSI_PERIPH_ERROR_PKT = BIT(4),
+ DSI_LP_RX_TIMEOUT = BIT(5),
+ DSI_HS_TX_TIMEOUT = BIT(6),
+ DSI_BTA_TIMEOUT = BIT(7),
+ DSI_PLL_UNLOCK = BIT(8),
+ DSI_DLN0_ESC_ENTRY_ERR = BIT(9),
+ DSI_DLN0_ESC_SYNC_ERR = BIT(10),
+ DSI_DLN0_LP_CONTROL_ERR = BIT(11),
+ DSI_PENDING_HS_TX_TIMEOUT = BIT(12),
+ DSI_INTERLEAVE_OP_CONTENTION = BIT(13),
+ DSI_CMD_DMA_FIFO_UNDERFLOW = BIT(14),
+ DSI_CMD_MDP_FIFO_UNDERFLOW = BIT(15),
+ DSI_DLN0_HS_FIFO_OVERFLOW = BIT(16),
+ DSI_DLN1_HS_FIFO_OVERFLOW = BIT(17),
+ DSI_DLN2_HS_FIFO_OVERFLOW = BIT(18),
+ DSI_DLN3_HS_FIFO_OVERFLOW = BIT(19),
+ DSI_DLN0_HS_FIFO_UNDERFLOW = BIT(20),
+ DSI_DLN1_HS_FIFO_UNDERFLOW = BIT(21),
+ DSI_DLN2_HS_FIFO_UNDERFLOW = BIT(22),
+ DSI_DLN3_HS_FIFO_UNDERFLOW = BIT(23),
+ DSI_DLN0_LP0_CONTENTION = BIT(24),
+ DSI_DLN1_LP0_CONTENTION = BIT(25),
+ DSI_DLN2_LP0_CONTENTION = BIT(26),
+ DSI_DLN3_LP0_CONTENTION = BIT(27),
+ DSI_DLN0_LP1_CONTENTION = BIT(28),
+ DSI_DLN1_LP1_CONTENTION = BIT(29),
+ DSI_DLN2_LP1_CONTENTION = BIT(30),
+ DSI_DLN3_LP1_CONTENTION = BIT(31),
+};
+
+/**
+ * struct dsi_ctrl_cmd_dma_info - command buffer information
+ * @offset: IOMMU VA for command buffer address.
+ * @length: Length of the command buffer.
+ * @en_broadcast: Enable broadcast mode if set to true.
+ * @is_master: Is master in broadcast mode.
+ * @use_lpm: Use low power mode for command transmission.
+ */
+struct dsi_ctrl_cmd_dma_info {
+ u32 offset;
+ u32 length;
+ bool en_broadcast;
+ bool is_master;
+ bool use_lpm;
+};
+
+/**
+ * struct dsi_ctrl_cmd_dma_fifo_info - command payload tp be sent using FIFO
+ * @command: VA for command buffer.
+ * @size: Size of the command buffer.
+ * @en_broadcast: Enable broadcast mode if set to true.
+ * @is_master: Is master in broadcast mode.
+ * @use_lpm: Use low power mode for command transmission.
+ */
+struct dsi_ctrl_cmd_dma_fifo_info {
+ u32 *command;
+ u32 size;
+ bool en_broadcast;
+ bool is_master;
+ bool use_lpm;
+};
+
+struct dsi_ctrl_hw;
+
+/**
+ * struct dsi_ctrl_hw_ops - operations supported by dsi host hardware
+ */
+struct dsi_ctrl_hw_ops {
+
+ /**
+ * host_setup() - Setup DSI host configuration
+ * @ctrl: Pointer to controller host hardware.
+ * @config: Configuration for DSI host controller
+ */
+ void (*host_setup)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *config);
+
+ /**
+ * video_engine_en() - enable DSI video engine
+ * @ctrl: Pointer to controller host hardware.
+ * @on: Enable/disabel video engine.
+ */
+ void (*video_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+ /**
+ * video_engine_setup() - Setup dsi host controller for video mode
+ * @ctrl: Pointer to controller host hardware.
+ * @common_cfg: Common configuration parameters.
+ * @cfg: Video mode configuration.
+ *
+ * Set up DSI video engine with a specific configuration. Controller and
+ * video engine are not enabled as part of this function.
+ */
+ void (*video_engine_setup)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_video_engine_cfg *cfg);
+
+ /**
+ * set_video_timing() - set up the timing for video frame
+ * @ctrl: Pointer to controller host hardware.
+ * @mode: Video mode information.
+ *
+ * Set up the video timing parameters for the DSI video mode operation.
+ */
+ void (*set_video_timing)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_mode_info *mode);
+
+ /**
+ * cmd_engine_setup() - setup dsi host controller for command mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @common_cfg: Common configuration parameters.
+ * @cfg: Command mode configuration.
+ *
+ * Setup DSI CMD engine with a specific configuration. Controller and
+ * command engine are not enabled as part of this function.
+ */
+ void (*cmd_engine_setup)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_cmd_engine_cfg *cfg);
+
+ /**
+ * setup_cmd_stream() - set up parameters for command pixel streams
+ * @ctrl: Pointer to controller host hardware.
+ * @width_in_pixels: Width of the stream in pixels.
+ * @h_stride: Horizontal stride in bytes.
+ * @height_inLines: Number of lines in the stream.
+ * @vc_id: stream_id.
+ *
+ * Setup parameters for command mode pixel stream size.
+ */
+ void (*setup_cmd_stream)(struct dsi_ctrl_hw *ctrl,
+ u32 width_in_pixels,
+ u32 h_stride,
+ u32 height_in_lines,
+ u32 vc_id);
+
+ /**
+ * ctrl_en() - enable DSI controller engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @on: turn on/off the DSI controller engine.
+ */
+ void (*ctrl_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+ /**
+ * cmd_engine_en() - enable DSI controller command engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @on: Turn on/off the DSI command engine.
+ */
+ void (*cmd_engine_en)(struct dsi_ctrl_hw *ctrl, bool on);
+
+ /**
+ * phy_sw_reset() - perform a soft reset on the PHY.
+ * @ctrl: Pointer to the controller host hardware.
+ */
+ void (*phy_sw_reset)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * soft_reset() - perform a soft reset on DSI controller
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * The video, command and controller engines will be disable before the
+ * reset is triggered. These engines will not be enabled after the reset
+ * is complete. Caller must re-enable the engines.
+ *
+ * If the reset is done while MDP timing engine is turned on, the video
+ * enigne should be re-enabled only during the vertical blanking time.
+ */
+ void (*soft_reset)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * setup_lane_map() - setup mapping between logical and physical lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lane_map: Structure defining the mapping between DSI logical
+ * lanes and physical lanes.
+ */
+ void (*setup_lane_map)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_lane_mapping *lane_map);
+
+ /**
+ * kickoff_command() - transmits commands stored in memory
+ * @ctrl: Pointer to the controller host hardware.
+ * @cmd: Command information.
+ * @flags: Modifiers for command transmission.
+ *
+ * The controller hardware is programmed with address and size of the
+ * command buffer. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+ void (*kickoff_command)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_info *cmd,
+ u32 flags);
+
+ /**
+ * kickoff_fifo_command() - transmits a command using FIFO in dsi
+ * hardware.
+ * @ctrl: Pointer to the controller host hardware.
+ * @cmd: Command information.
+ * @flags: Modifiers for command transmission.
+ *
+ * The controller hardware FIFO is programmed with command header and
+ * payload. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+ void (*kickoff_fifo_command)(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+ u32 flags);
+
+ void (*reset_cmd_fifo)(struct dsi_ctrl_hw *ctrl);
+ /**
+ * trigger_command_dma() - trigger transmission of command buffer.
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * This trigger can be only used if there was a prior call to
+ * kickoff_command() of kickoff_fifo_command() with
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag.
+ */
+ void (*trigger_command_dma)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * get_cmd_read_data() - get data read from the peripheral
+ * @ctrl: Pointer to the controller host hardware.
+ * @rd_buf: Buffer where data will be read into.
+ * @total_read_len: Number of bytes to read.
+ */
+ u32 (*get_cmd_read_data)(struct dsi_ctrl_hw *ctrl,
+ u8 *rd_buf,
+ u32 total_read_len);
+
+ /**
+ * ulps_request() - request ulps entry for specified lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes) which need
+ * to enter ULPS.
+ *
+ * Caller should check if lanes are in ULPS mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+ void (*ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+ /**
+ * ulps_exit() - exit ULPS on specified lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes) which need
+ * to exit ULPS.
+ *
+ * Caller should check if lanes are in active mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+ void (*ulps_exit)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+ /**
+ * clear_ulps_request() - clear ulps request once all lanes are active
+ * @ctrl: Pointer to controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes).
+ *
+ * ULPS request should be cleared after the lanes have exited ULPS.
+ */
+ void (*clear_ulps_request)(struct dsi_ctrl_hw *ctrl, u32 lanes);
+
+ /**
+ * get_lanes_in_ulps() - returns the list of lanes in ULPS mode
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
+ * state. If 0 is returned, all the lanes are active.
+ *
+ * Return: List of lanes in ULPS state.
+ */
+ u32 (*get_lanes_in_ulps)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * clamp_enable() - enable DSI clamps to keep PHY driving a stable link
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes which need to be clamped.
+ * @enable_ulps: TODO:??
+ */
+ void (*clamp_enable)(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool enable_ulps);
+
+ /**
+ * clamp_disable() - disable DSI clamps
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes which need to have clamps released.
+ * @disable_ulps: TODO:??
+ */
+ void (*clamp_disable)(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool disable_ulps);
+
+ /**
+ * get_interrupt_status() - returns the interrupt status
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of interrupts(enum dsi_status_int_type) that
+ * are active. This list does not include any error interrupts. Caller
+ * should call get_error_status for error interrupts.
+ *
+ * Return: List of active interrupts.
+ */
+ u32 (*get_interrupt_status)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * clear_interrupt_status() - clears the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @ints: List of interrupts to be cleared.
+ */
+ void (*clear_interrupt_status)(struct dsi_ctrl_hw *ctrl, u32 ints);
+
+ /**
+ * enable_status_interrupts() - enable the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @ints: List of interrupts to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set ints to 0.
+ */
+ void (*enable_status_interrupts)(struct dsi_ctrl_hw *ctrl, u32 ints);
+
+ /**
+ * get_error_status() - returns the error status
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of errors(enum dsi_error_int_type) that are
+ * active. This list does not include any status interrupts. Caller
+ * should call get_interrupt_status for status interrupts.
+ *
+ * Return: List of active error interrupts.
+ */
+ u64 (*get_error_status)(struct dsi_ctrl_hw *ctrl);
+
+ /**
+ * clear_error_status() - clears the specified errors
+ * @ctrl: Pointer to the controller host hardware.
+ * @errors: List of errors to be cleared.
+ */
+ void (*clear_error_status)(struct dsi_ctrl_hw *ctrl, u64 errors);
+
+ /**
+ * enable_error_interrupts() - enable the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @errors: List of errors to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set errors to 0.
+ */
+ void (*enable_error_interrupts)(struct dsi_ctrl_hw *ctrl, u64 errors);
+
+ /**
+ * video_test_pattern_setup() - setup test pattern engine for video mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @type: Type of test pattern.
+ * @init_val: Initial value to use for generating test pattern.
+ */
+ void (*video_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val);
+
+ /**
+ * cmd_test_pattern_setup() - setup test patttern engine for cmd mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @type: Type of test pattern.
+ * @init_val: Initial value to use for generating test pattern.
+ * @stream_id: Stream Id on which packets are generated.
+ */
+ void (*cmd_test_pattern_setup)(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val,
+ u32 stream_id);
+
+ /**
+ * test_pattern_enable() - enable test pattern engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @enable: Enable/Disable test pattern engine.
+ */
+ void (*test_pattern_enable)(struct dsi_ctrl_hw *ctrl, bool enable);
+
+ /**
+ * trigger_cmd_test_pattern() - trigger a command mode frame update with
+ * test pattern
+ * @ctrl: Pointer to the controller host hardware.
+ * @stream_id: Stream on which frame update is sent.
+ */
+ void (*trigger_cmd_test_pattern)(struct dsi_ctrl_hw *ctrl,
+ u32 stream_id);
+
+ ssize_t (*reg_dump_to_buffer)(struct dsi_ctrl_hw *ctrl,
+ char *buf,
+ u32 size);
+};
+
+/*
+ * struct dsi_ctrl_hw - DSI controller hardware object specific to an instance
+ * @base: VA for the DSI controller base address.
+ * @length: Length of the DSI controller register map.
+ * @index: Instance ID of the controller.
+ * @feature_map: Features supported by the DSI controller.
+ * @ops: Function pointers to the operations supported by the
+ * controller.
+ */
+struct dsi_ctrl_hw {
+ void __iomem *base;
+ u32 length;
+ void __iomem *mmss_misc_base;
+ u32 mmss_misc_length;
+ u32 index;
+
+ /* features */
+ DECLARE_BITMAP(feature_map, DSI_CTRL_MAX_FEATURES);
+ struct dsi_ctrl_hw_ops ops;
+
+ /* capabilities */
+ u32 supported_interrupts;
+ u64 supported_errors;
+};
+
+#endif /* _DSI_CTRL_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
new file mode 100644
index 000000000000..caba50832cca
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_hw_1_4.c
@@ -0,0 +1,1533 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "dsi-hw:" fmt
+#include <linux/delay.h>
+
+#include "dsi_ctrl_hw.h"
+#include "dsi_ctrl_reg_1_4.h"
+#include "dsi_hw.h"
+
+#define MMSS_MISC_CLAMP_REG_OFF 0x0014
+
+/* Unsupported formats default to RGB888 */
+static const u8 cmd_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
+ 0x6, 0x7, 0x8, 0x8, 0x0, 0x3, 0x4 };
+static const u8 video_mode_format_map[DSI_PIXEL_FORMAT_MAX] = {
+ 0x0, 0x1, 0x2, 0x3, 0x3, 0x3, 0x3 };
+
+
+/**
+ * dsi_setup_trigger_controls() - setup dsi trigger configurations
+ * @ctrl: Pointer to the controller host hardware.
+ * @cfg: DSI host configuration that is common to both video and
+ * command modes.
+ */
+static void dsi_setup_trigger_controls(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *cfg)
+{
+ u32 reg = 0;
+ const u8 trigger_map[DSI_TRIGGER_MAX] = {
+ 0x0, 0x2, 0x1, 0x4, 0x5, 0x6 };
+
+ reg |= (cfg->te_mode == DSI_TE_ON_EXT_PIN) ? BIT(31) : 0;
+ reg |= (trigger_map[cfg->dma_cmd_trigger] & 0x7);
+ reg |= (trigger_map[cfg->mdp_cmd_trigger] & 0x7) << 4;
+ DSI_W32(ctrl, DSI_TRIG_CTRL, reg);
+}
+
+/**
+ * dsi_ctrl_hw_14_host_setup() - setup dsi host configuration
+ * @ctrl: Pointer to the controller host hardware.
+ * @cfg: DSI host configuration that is common to both video and
+ * command modes.
+ */
+void dsi_ctrl_hw_14_host_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *cfg)
+{
+ u32 reg_value = 0;
+
+ dsi_setup_trigger_controls(ctrl, cfg);
+
+ /* Setup clocking timing controls */
+ reg_value = ((cfg->t_clk_post & 0x3F) << 8);
+ reg_value |= (cfg->t_clk_pre & 0x3F);
+ DSI_W32(ctrl, DSI_CLKOUT_TIMING_CTRL, reg_value);
+
+ /* EOT packet control */
+ reg_value = cfg->append_tx_eot ? 1 : 0;
+ reg_value |= (cfg->ignore_rx_eot ? (1 << 4) : 0);
+ DSI_W32(ctrl, DSI_EOT_PACKET_CTRL, reg_value);
+
+ /* Turn on dsi clocks */
+ DSI_W32(ctrl, DSI_CLK_CTRL, 0x23F);
+
+ /* Setup DSI control register */
+ reg_value = 0;
+ reg_value |= (cfg->en_crc_check ? BIT(24) : 0);
+ reg_value |= (cfg->en_ecc_check ? BIT(20) : 0);
+ reg_value |= BIT(8); /* Clock lane */
+ reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_3) ? BIT(7) : 0);
+ reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_2) ? BIT(6) : 0);
+ reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_1) ? BIT(5) : 0);
+ reg_value |= ((cfg->data_lanes & DSI_DATA_LANE_0) ? BIT(4) : 0);
+
+ DSI_W32(ctrl, DSI_CTRL, reg_value);
+
+ /* Force clock lane in HS */
+ reg_value = DSI_R32(ctrl, DSI_LANE_CTRL);
+ if (cfg->force_clk_lane_hs)
+ reg_value |= BIT(28);
+ else
+ reg_value &= ~BIT(28);
+ DSI_W32(ctrl, DSI_LANE_CTRL, reg_value);
+
+ pr_debug("[DSI_%d]Host configuration complete\n", ctrl->index);
+}
+
+/**
+ * phy_sw_reset() - perform a soft reset on the PHY.
+ * @ctrl: Pointer to the controller host hardware.
+ */
+void dsi_ctrl_hw_14_phy_sw_reset(struct dsi_ctrl_hw *ctrl)
+{
+ DSI_W32(ctrl, DSI_PHY_SW_RESET, 0x1);
+ udelay(1000);
+ DSI_W32(ctrl, DSI_PHY_SW_RESET, 0x0);
+ udelay(100);
+
+ pr_debug("[DSI_%d] phy sw reset done\n", ctrl->index);
+}
+
+/**
+ * soft_reset() - perform a soft reset on DSI controller
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * The video, command and controller engines will be disable before the
+ * reset is triggered. These engines will not be enabled after the reset
+ * is complete. Caller must re-enable the engines.
+ *
+ * If the reset is done while MDP timing engine is turned on, the video
+ * enigne should be re-enabled only during the vertical blanking time.
+ */
+void dsi_ctrl_hw_14_soft_reset(struct dsi_ctrl_hw *ctrl)
+{
+ u32 reg = 0;
+ u32 reg_ctrl = 0;
+
+ /* Clear DSI_EN, VIDEO_MODE_EN, CMD_MODE_EN */
+ reg_ctrl = DSI_R32(ctrl, DSI_CTRL);
+ DSI_W32(ctrl, DSI_CTRL, reg_ctrl & ~0x7);
+
+ /* Force enable PCLK, BYTECLK, AHBM_HCLK */
+ reg = DSI_R32(ctrl, DSI_CLK_CTRL);
+ reg |= 0x23F;
+ DSI_W32(ctrl, DSI_CLK_CTRL, reg);
+
+ /* Trigger soft reset */
+ DSI_W32(ctrl, DSI_SOFT_RESET, 0x1);
+ udelay(1);
+ DSI_W32(ctrl, DSI_SOFT_RESET, 0x0);
+
+ /* Disable force clock on */
+ reg &= ~(BIT(20) | BIT(11));
+ DSI_W32(ctrl, DSI_CLK_CTRL, reg);
+
+ /* Re-enable DSI controller */
+ DSI_W32(ctrl, DSI_CTRL, reg_ctrl);
+ pr_debug("[DSI_%d] ctrl soft reset done\n", ctrl->index);
+}
+
+/**
+ * set_video_timing() - set up the timing for video frame
+ * @ctrl: Pointer to controller host hardware.
+ * @mode: Video mode information.
+ *
+ * Set up the video timing parameters for the DSI video mode operation.
+ */
+void dsi_ctrl_hw_14_set_video_timing(struct dsi_ctrl_hw *ctrl,
+ struct dsi_mode_info *mode)
+{
+ u32 reg = 0;
+ u32 hs_start = 0;
+ u32 hs_end, active_h_start, active_h_end, h_total;
+ u32 vs_start = 0, vs_end = 0;
+ u32 vpos_start = 0, vpos_end, active_v_start, active_v_end, v_total;
+
+ hs_end = mode->h_sync_width;
+ active_h_start = mode->h_sync_width + mode->h_back_porch;
+ active_h_end = active_h_start + mode->h_active;
+ h_total = (mode->h_sync_width + mode->h_back_porch + mode->h_active +
+ mode->h_front_porch) - 1;
+
+ vpos_end = mode->v_sync_width;
+ active_v_start = mode->v_sync_width + mode->v_back_porch;
+ active_v_end = active_v_start + mode->v_active;
+ v_total = (mode->v_sync_width + mode->v_back_porch + mode->v_active +
+ mode->v_front_porch) - 1;
+
+ reg = ((active_h_end & 0xFFFF) << 16) | (active_h_start & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_H, reg);
+
+ reg = ((active_v_end & 0xFFFF) << 16) | (active_v_start & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_ACTIVE_V, reg);
+
+ reg = ((v_total & 0xFFFF) << 16) | (h_total & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_TOTAL, reg);
+
+ reg = ((hs_end & 0xFFFF) << 16) | (hs_start & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_HSYNC, reg);
+
+ reg = ((vs_end & 0xFFFF) << 16) | (vs_start & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC, reg);
+
+ reg = ((vpos_end & 0xFFFF) << 16) | (vpos_start & 0xFFFF);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_VSYNC_VPOS, reg);
+
+ /* TODO: HS TIMER value? */
+ DSI_W32(ctrl, DSI_HS_TIMER_CTRL, 0x3FD08);
+ DSI_W32(ctrl, DSI_MISR_VIDEO_CTRL, 0x10100);
+ DSI_W32(ctrl, DSI_DSI_TIMING_FLUSH, 0x1);
+ pr_debug("[DSI_%d] ctrl video parameters updated\n", ctrl->index);
+}
+
+/**
+ * setup_cmd_stream() - set up parameters for command pixel streams
+ * @ctrl: Pointer to controller host hardware.
+ * @width_in_pixels: Width of the stream in pixels.
+ * @h_stride: Horizontal stride in bytes.
+ * @height_inLines: Number of lines in the stream.
+ * @vc_id: stream_id
+ *
+ * Setup parameters for command mode pixel stream size.
+ */
+void dsi_ctrl_hw_14_setup_cmd_stream(struct dsi_ctrl_hw *ctrl,
+ u32 width_in_pixels,
+ u32 h_stride,
+ u32 height_in_lines,
+ u32 vc_id)
+{
+ u32 reg = 0;
+
+ reg = (h_stride + 1) << 16;
+ reg |= (vc_id & 0x3) << 8;
+ reg |= 0x39; /* packet data type */
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_CTRL, reg);
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_CTRL, reg);
+
+ reg = (height_in_lines << 16) | width_in_pixels;
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM0_TOTAL, reg);
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_STREAM1_TOTAL, reg);
+}
+
+/**
+ * video_engine_setup() - Setup dsi host controller for video mode
+ * @ctrl: Pointer to controller host hardware.
+ * @common_cfg: Common configuration parameters.
+ * @cfg: Video mode configuration.
+ *
+ * Set up DSI video engine with a specific configuration. Controller and
+ * video engine are not enabled as part of this function.
+ */
+void dsi_ctrl_hw_14_video_engine_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_video_engine_cfg *cfg)
+{
+ u32 reg = 0;
+
+ reg |= (cfg->last_line_interleave_en ? BIT(31) : 0);
+ reg |= (cfg->pulse_mode_hsa_he ? BIT(28) : 0);
+ reg |= (cfg->hfp_lp11_en ? BIT(24) : 0);
+ reg |= (cfg->hbp_lp11_en ? BIT(20) : 0);
+ reg |= (cfg->hsa_lp11_en ? BIT(16) : 0);
+ reg |= (cfg->eof_bllp_lp11_en ? BIT(15) : 0);
+ reg |= (cfg->bllp_lp11_en ? BIT(12) : 0);
+ reg |= (cfg->traffic_mode & 0x3) << 8;
+ reg |= (cfg->vc_id & 0x3);
+ reg |= (video_mode_format_map[common_cfg->dst_format] & 0x3) << 4;
+ DSI_W32(ctrl, DSI_VIDEO_MODE_CTRL, reg);
+
+ reg = (common_cfg->swap_mode & 0x7) << 12;
+ reg |= (common_cfg->bit_swap_red ? BIT(0) : 0);
+ reg |= (common_cfg->bit_swap_green ? BIT(4) : 0);
+ reg |= (common_cfg->bit_swap_blue ? BIT(8) : 0);
+ DSI_W32(ctrl, DSI_VIDEO_MODE_DATA_CTRL, reg);
+ /* Enable Timing double buffering */
+ DSI_W32(ctrl, DSI_DSI_TIMING_DB_MODE, 0x1);
+
+
+ pr_debug("[DSI_%d] Video engine setup done\n", ctrl->index);
+}
+
+/**
+ * cmd_engine_setup() - setup dsi host controller for command mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @common_cfg: Common configuration parameters.
+ * @cfg: Command mode configuration.
+ *
+ * Setup DSI CMD engine with a specific configuration. Controller and
+ * command engine are not enabled as part of this function.
+ */
+void dsi_ctrl_hw_14_cmd_engine_setup(struct dsi_ctrl_hw *ctrl,
+ struct dsi_host_common_cfg *common_cfg,
+ struct dsi_cmd_engine_cfg *cfg)
+{
+ u32 reg = 0;
+
+ reg = (cfg->max_cmd_packets_interleave & 0xF) << 20;
+ reg |= (common_cfg->bit_swap_red ? BIT(4) : 0);
+ reg |= (common_cfg->bit_swap_green ? BIT(8) : 0);
+ reg |= (common_cfg->bit_swap_blue ? BIT(12) : 0);
+ reg |= cmd_mode_format_map[common_cfg->dst_format];
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_CTRL, reg);
+
+ reg = DSI_R32(ctrl, DSI_COMMAND_MODE_MDP_CTRL2);
+ reg |= BIT(16);
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_CTRL2, reg);
+
+ reg = cfg->wr_mem_start & 0xFF;
+ reg |= (cfg->wr_mem_continue & 0xFF) << 8;
+ reg |= (cfg->insert_dcs_command ? BIT(16) : 0);
+ DSI_W32(ctrl, DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL, reg);
+
+ pr_debug("[DSI_%d] Cmd engine setup done\n", ctrl->index);
+}
+
+/**
+ * video_engine_en() - enable DSI video engine
+ * @ctrl: Pointer to controller host hardware.
+ * @on: Enable/disabel video engine.
+ */
+void dsi_ctrl_hw_14_video_engine_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+ u32 reg = 0;
+
+ /* Set/Clear VIDEO_MODE_EN bit */
+ reg = DSI_R32(ctrl, DSI_CTRL);
+ if (on)
+ reg |= BIT(1);
+ else
+ reg &= ~BIT(1);
+
+ DSI_W32(ctrl, DSI_CTRL, reg);
+
+ pr_debug("[DSI_%d] Video engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * ctrl_en() - enable DSI controller engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @on: turn on/off the DSI controller engine.
+ */
+void dsi_ctrl_hw_14_ctrl_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+ u32 reg = 0;
+
+ /* Set/Clear DSI_EN bit */
+ reg = DSI_R32(ctrl, DSI_CTRL);
+ if (on)
+ reg |= BIT(0);
+ else
+ reg &= ~BIT(0);
+
+ DSI_W32(ctrl, DSI_CTRL, reg);
+
+ pr_debug("[DSI_%d] Controller engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * cmd_engine_en() - enable DSI controller command engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @on: Turn on/off the DSI command engine.
+ */
+void dsi_ctrl_hw_14_cmd_engine_en(struct dsi_ctrl_hw *ctrl, bool on)
+{
+ u32 reg = 0;
+
+ /* Set/Clear CMD_MODE_EN bit */
+ reg = DSI_R32(ctrl, DSI_CTRL);
+ if (on)
+ reg |= BIT(2);
+ else
+ reg &= ~BIT(2);
+
+ DSI_W32(ctrl, DSI_CTRL, reg);
+
+ pr_debug("[DSI_%d] command engine = %d\n", ctrl->index, on);
+}
+
+/**
+ * setup_lane_map() - setup mapping between logical and physical lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lane_map: Structure defining the mapping between DSI logical
+ * lanes and physical lanes.
+ */
+void dsi_ctrl_hw_14_setup_lane_map(struct dsi_ctrl_hw *ctrl,
+ struct dsi_lane_mapping *lane_map)
+{
+ u32 reg_value = 0;
+ u32 lane_number = ((lane_map->physical_lane0 * 1000)+
+ (lane_map->physical_lane1 * 100) +
+ (lane_map->physical_lane2 * 10) +
+ (lane_map->physical_lane3));
+
+ if (lane_number == 123)
+ reg_value = 0;
+ else if (lane_number == 3012)
+ reg_value = 1;
+ else if (lane_number == 2301)
+ reg_value = 2;
+ else if (lane_number == 1230)
+ reg_value = 3;
+ else if (lane_number == 321)
+ reg_value = 4;
+ else if (lane_number == 1032)
+ reg_value = 5;
+ else if (lane_number == 2103)
+ reg_value = 6;
+ else if (lane_number == 3210)
+ reg_value = 7;
+
+ DSI_W32(ctrl, DSI_LANE_SWAP_CTRL, reg_value);
+
+ pr_debug("[DSI_%d] Lane swap setup complete\n", ctrl->index);
+}
+
+/**
+ * kickoff_command() - transmits commands stored in memory
+ * @ctrl: Pointer to the controller host hardware.
+ * @cmd: Command information.
+ * @flags: Modifiers for command transmission.
+ *
+ * The controller hardware is programmed with address and size of the
+ * command buffer. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+void dsi_ctrl_hw_14_kickoff_command(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_info *cmd,
+ u32 flags)
+{
+ u32 reg = 0;
+
+ /*Set BROADCAST_EN and EMBEDDED_MODE */
+ reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
+ if (cmd->en_broadcast)
+ reg |= BIT(31);
+ else
+ reg &= ~BIT(31);
+
+ if (cmd->is_master)
+ reg |= BIT(30);
+ else
+ reg &= ~BIT(30);
+
+ if (cmd->use_lpm)
+ reg |= BIT(26);
+ else
+ reg &= ~BIT(26);
+
+ reg |= BIT(28);
+ DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
+
+ DSI_W32(ctrl, DSI_DMA_CMD_OFFSET, cmd->offset);
+ DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->length & 0xFFFFFF));
+
+ /* wait for writes to complete before kick off */
+ wmb();
+
+ if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
+ DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+}
+
+/**
+ * kickoff_fifo_command() - transmits a command using FIFO in dsi
+ * hardware.
+ * @ctrl: Pointer to the controller host hardware.
+ * @cmd: Command information.
+ * @flags: Modifiers for command transmission.
+ *
+ * The controller hardware FIFO is programmed with command header and
+ * payload. The transmission is kicked off if
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag is not set. If this flag is
+ * set, caller should make a separate call to trigger_command_dma() to
+ * transmit the command.
+ */
+void dsi_ctrl_hw_14_kickoff_fifo_command(struct dsi_ctrl_hw *ctrl,
+ struct dsi_ctrl_cmd_dma_fifo_info *cmd,
+ u32 flags)
+{
+ u32 reg = 0, i = 0;
+ u32 *ptr = cmd->command;
+ /*
+ * Set CMD_DMA_TPG_EN, TPG_DMA_FIFO_MODE and
+ * CMD_DMA_PATTERN_SEL = custom pattern stored in TPG DMA FIFO
+ */
+ reg = (BIT(1) | BIT(2) | (0x3 << 16));
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+ /*
+ * Program the FIFO with command buffer. Hardware requires an extra
+ * DWORD (set to zero) if the length of command buffer is odd DWORDS.
+ */
+ for (i = 0; i < cmd->size; i += 4) {
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, *ptr);
+ ptr++;
+ }
+
+ if ((cmd->size / 4) & 0x1)
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL, 0);
+
+ /*Set BROADCAST_EN and EMBEDDED_MODE */
+ reg = DSI_R32(ctrl, DSI_COMMAND_MODE_DMA_CTRL);
+ if (cmd->en_broadcast)
+ reg |= BIT(31);
+ else
+ reg &= ~BIT(31);
+
+ if (cmd->is_master)
+ reg |= BIT(30);
+ else
+ reg &= ~BIT(30);
+
+ if (cmd->use_lpm)
+ reg |= BIT(26);
+ else
+ reg &= ~BIT(26);
+
+ reg |= BIT(28);
+
+ DSI_W32(ctrl, DSI_COMMAND_MODE_DMA_CTRL, reg);
+
+ DSI_W32(ctrl, DSI_DMA_CMD_LENGTH, (cmd->size & 0xFFFFFFFF));
+ /* Finish writes before command trigger */
+ wmb();
+
+ if (!(flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER))
+ DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+
+ pr_debug("[DSI_%d]size=%d, trigger = %d\n",
+ ctrl->index, cmd->size,
+ (flags & DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER) ? false : true);
+}
+
+void dsi_ctrl_hw_14_reset_cmd_fifo(struct dsi_ctrl_hw *ctrl)
+{
+ /* disable cmd dma tpg */
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, 0x0);
+
+ DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x1);
+ udelay(1);
+ DSI_W32(ctrl, DSI_TPG_DMA_FIFO_RESET, 0x0);
+}
+
+/**
+ * trigger_command_dma() - trigger transmission of command buffer.
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * This trigger can be only used if there was a prior call to
+ * kickoff_command() of kickoff_fifo_command() with
+ * DSI_CTRL_HW_CMD_WAIT_FOR_TRIGGER flag.
+ */
+void dsi_ctrl_hw_14_trigger_command_dma(struct dsi_ctrl_hw *ctrl)
+{
+ DSI_W32(ctrl, DSI_CMD_MODE_DMA_SW_TRIGGER, 0x1);
+ pr_debug("[DSI_%d] CMD DMA triggered\n", ctrl->index);
+}
+
+/**
+ * get_cmd_read_data() - get data read from the peripheral
+ * @ctrl: Pointer to the controller host hardware.
+ * @rd_buf: Buffer where data will be read into.
+ * @total_read_len: Number of bytes to read.
+ *
+ * return: number of bytes read.
+ */
+u32 dsi_ctrl_hw_14_get_cmd_read_data(struct dsi_ctrl_hw *ctrl,
+ u8 *rd_buf,
+ u32 read_offset,
+ u32 total_read_len)
+{
+ u32 *lp, *temp, data;
+ int i, j = 0, cnt;
+ u32 read_cnt;
+ u32 rx_byte = 0;
+ u32 repeated_bytes = 0;
+ u8 reg[16] = {0};
+ u32 pkt_size = 0;
+ int buf_offset = read_offset;
+
+ lp = (u32 *)rd_buf;
+ temp = (u32 *)reg;
+ cnt = (rx_byte + 3) >> 2;
+
+ if (cnt > 4)
+ cnt = 4;
+
+ if (rx_byte == 4)
+ read_cnt = 4;
+ else
+ read_cnt = pkt_size + 6;
+
+ if (read_cnt > 16) {
+ int bytes_shifted;
+
+ bytes_shifted = read_cnt - 16;
+ repeated_bytes = buf_offset - bytes_shifted;
+ }
+
+ for (i = cnt - 1; i >= 0; i--) {
+ data = DSI_R32(ctrl, DSI_RDBK_DATA0 + i*4);
+ *temp++ = ntohl(data);
+ }
+
+ for (i = repeated_bytes; i < 16; i++)
+ rd_buf[j++] = reg[i];
+
+ pr_debug("[DSI_%d] Read %d bytes\n", ctrl->index, j);
+ return j;
+}
+/**
+ * ulps_request() - request ulps entry for specified lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes) which need
+ * to enter ULPS.
+ *
+ * Caller should check if lanes are in ULPS mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+void dsi_ctrl_hw_14_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+ u32 reg = 0;
+
+ reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+ if (lanes & DSI_CLOCK_LANE)
+ reg |= BIT(4);
+ if (lanes & DSI_DATA_LANE_0)
+ reg |= BIT(0);
+ if (lanes & DSI_DATA_LANE_1)
+ reg |= BIT(1);
+ if (lanes & DSI_DATA_LANE_2)
+ reg |= BIT(2);
+ if (lanes & DSI_DATA_LANE_3)
+ reg |= BIT(3);
+
+ DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+
+ pr_debug("[DSI_%d] ULPS requested for lanes 0x%x\n", ctrl->index,
+ lanes);
+}
+
+/**
+ * ulps_exit() - exit ULPS on specified lanes
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes) which need
+ * to exit ULPS.
+ *
+ * Caller should check if lanes are in active mode by calling
+ * get_lanes_in_ulps() operation.
+ */
+void dsi_ctrl_hw_14_ulps_exit(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+ u32 reg = 0;
+
+ reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+ if (lanes & DSI_CLOCK_LANE)
+ reg |= BIT(12);
+ if (lanes & DSI_DATA_LANE_0)
+ reg |= BIT(8);
+ if (lanes & DSI_DATA_LANE_1)
+ reg |= BIT(9);
+ if (lanes & DSI_DATA_LANE_2)
+ reg |= BIT(10);
+ if (lanes & DSI_DATA_LANE_3)
+ reg |= BIT(11);
+
+ DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+
+ pr_debug("[DSI_%d] ULPS exit request for lanes=0x%x\n",
+ ctrl->index, lanes);
+}
+
+/**
+ * clear_ulps_request() - clear ulps request once all lanes are active
+ * @ctrl: Pointer to controller host hardware.
+ * @lanes: ORed list of lanes (enum dsi_data_lanes).
+ *
+ * ULPS request should be cleared after the lanes have exited ULPS.
+ */
+void dsi_ctrl_hw_14_clear_ulps_request(struct dsi_ctrl_hw *ctrl, u32 lanes)
+{
+ u32 reg = 0;
+
+ reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+ if (lanes & DSI_CLOCK_LANE)
+ reg &= ~BIT(4); /* clock lane */
+ if (lanes & DSI_DATA_LANE_0)
+ reg &= ~BIT(0);
+ if (lanes & DSI_DATA_LANE_1)
+ reg &= ~BIT(1);
+ if (lanes & DSI_DATA_LANE_2)
+ reg &= ~BIT(2);
+ if (lanes & DSI_DATA_LANE_3)
+ reg &= ~BIT(3);
+
+ DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+ /*
+ * HPG recommends separate writes for clearing ULPS_REQUEST and
+ * ULPS_EXIT.
+ */
+ reg = DSI_R32(ctrl, DSI_LANE_CTRL);
+ if (lanes & DSI_CLOCK_LANE)
+ reg &= ~BIT(12);
+ if (lanes & DSI_DATA_LANE_0)
+ reg &= ~BIT(8);
+ if (lanes & DSI_DATA_LANE_1)
+ reg &= ~BIT(9);
+ if (lanes & DSI_DATA_LANE_2)
+ reg &= ~BIT(10);
+ if (lanes & DSI_DATA_LANE_3)
+ reg &= ~BIT(11);
+ DSI_W32(ctrl, DSI_LANE_CTRL, reg);
+
+ pr_debug("[DSI_%d] ULPS request cleared\n", ctrl->index);
+}
+
+/**
+ * get_lanes_in_ulps() - returns the list of lanes in ULPS mode
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns an ORed list of lanes (enum dsi_data_lanes) that are in ULPS
+ * state. If 0 is returned, all the lanes are active.
+ *
+ * Return: List of lanes in ULPS state.
+ */
+u32 dsi_ctrl_hw_14_get_lanes_in_ulps(struct dsi_ctrl_hw *ctrl)
+{
+ u32 reg = 0;
+ u32 lanes = 0;
+
+ reg = DSI_R32(ctrl, DSI_LANE_STATUS);
+ if (!(reg & BIT(8)))
+ lanes |= DSI_DATA_LANE_0;
+ if (!(reg & BIT(9)))
+ lanes |= DSI_DATA_LANE_1;
+ if (!(reg & BIT(10)))
+ lanes |= DSI_DATA_LANE_2;
+ if (!(reg & BIT(11)))
+ lanes |= DSI_DATA_LANE_3;
+ if (!(reg & BIT(12)))
+ lanes |= DSI_CLOCK_LANE;
+
+ pr_debug("[DSI_%d] lanes in ulps = 0x%x\n", ctrl->index, lanes);
+ return lanes;
+}
+
+/**
+ * clamp_enable() - enable DSI clamps to keep PHY driving a stable link
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes which need to be clamped.
+ * @enable_ulps: TODO:??
+ */
+void dsi_ctrl_hw_14_clamp_enable(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool enable_ulps)
+{
+ u32 clamp_reg = 0;
+ u32 bit_shift = 0;
+ u32 reg = 0;
+
+ if (ctrl->index == 1)
+ bit_shift = 16;
+
+ if (lanes & DSI_CLOCK_LANE) {
+ clamp_reg |= BIT(9);
+ if (enable_ulps)
+ clamp_reg |= BIT(8);
+ }
+
+ if (lanes & DSI_DATA_LANE_0) {
+ clamp_reg |= BIT(7);
+ if (enable_ulps)
+ clamp_reg |= BIT(6);
+ }
+
+ if (lanes & DSI_DATA_LANE_1) {
+ clamp_reg |= BIT(5);
+ if (enable_ulps)
+ clamp_reg |= BIT(4);
+ }
+
+ if (lanes & DSI_DATA_LANE_2) {
+ clamp_reg |= BIT(3);
+ if (enable_ulps)
+ clamp_reg |= BIT(2);
+ }
+
+ if (lanes & DSI_DATA_LANE_3) {
+ clamp_reg |= BIT(1);
+ if (enable_ulps)
+ clamp_reg |= BIT(0);
+ }
+
+ clamp_reg |= BIT(15); /* Enable clamp */
+
+ reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+ reg |= (clamp_reg << bit_shift);
+ DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+
+ reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+ reg |= BIT(30);
+ DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+ pr_debug("[DSI_%d] Clamps enabled for lanes=0x%x\n", ctrl->index,
+ lanes);
+}
+
+/**
+ * clamp_disable() - disable DSI clamps
+ * @ctrl: Pointer to the controller host hardware.
+ * @lanes: ORed list of lanes which need to have clamps released.
+ * @disable_ulps: TODO:??
+ */
+void dsi_ctrl_hw_14_clamp_disable(struct dsi_ctrl_hw *ctrl,
+ u32 lanes,
+ bool disable_ulps)
+{
+ u32 clamp_reg = 0;
+ u32 bit_shift = 0;
+ u32 reg = 0;
+
+ if (ctrl->index == 1)
+ bit_shift = 16;
+
+ if (lanes & DSI_CLOCK_LANE) {
+ clamp_reg |= BIT(9);
+ if (disable_ulps)
+ clamp_reg |= BIT(8);
+ }
+
+ if (lanes & DSI_DATA_LANE_0) {
+ clamp_reg |= BIT(7);
+ if (disable_ulps)
+ clamp_reg |= BIT(6);
+ }
+
+ if (lanes & DSI_DATA_LANE_1) {
+ clamp_reg |= BIT(5);
+ if (disable_ulps)
+ clamp_reg |= BIT(4);
+ }
+
+ if (lanes & DSI_DATA_LANE_2) {
+ clamp_reg |= BIT(3);
+ if (disable_ulps)
+ clamp_reg |= BIT(2);
+ }
+
+ if (lanes & DSI_DATA_LANE_3) {
+ clamp_reg |= BIT(1);
+ if (disable_ulps)
+ clamp_reg |= BIT(0);
+ }
+
+ clamp_reg |= BIT(15); /* Enable clamp */
+ clamp_reg <<= bit_shift;
+
+ /* Disable PHY reset skip */
+ reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+ reg &= ~BIT(30);
+ DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+ reg = DSI_MMSS_MISC_R32(ctrl, MMSS_MISC_CLAMP_REG_OFF);
+ reg &= ~(clamp_reg);
+ DSI_MMSS_MISC_W32(ctrl, MMSS_MISC_CLAMP_REG_OFF, reg);
+
+ pr_debug("[DSI_%d] Disable clamps for lanes=%d\n", ctrl->index, lanes);
+}
+
+/**
+ * get_interrupt_status() - returns the interrupt status
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of interrupts(enum dsi_status_int_type) that
+ * are active. This list does not include any error interrupts. Caller
+ * should call get_error_status for error interrupts.
+ *
+ * Return: List of active interrupts.
+ */
+u32 dsi_ctrl_hw_14_get_interrupt_status(struct dsi_ctrl_hw *ctrl)
+{
+ u32 reg = 0;
+ u32 ints = 0;
+
+ reg = DSI_R32(ctrl, DSI_INT_CTRL);
+
+ if (reg & BIT(0))
+ ints |= DSI_CMD_MODE_DMA_DONE;
+ if (reg & BIT(8))
+ ints |= DSI_CMD_FRAME_DONE;
+ if (reg & BIT(10))
+ ints |= DSI_CMD_STREAM0_FRAME_DONE;
+ if (reg & BIT(12))
+ ints |= DSI_CMD_STREAM1_FRAME_DONE;
+ if (reg & BIT(14))
+ ints |= DSI_CMD_STREAM2_FRAME_DONE;
+ if (reg & BIT(16))
+ ints |= DSI_VIDEO_MODE_FRAME_DONE;
+ if (reg & BIT(20))
+ ints |= DSI_BTA_DONE;
+ if (reg & BIT(28))
+ ints |= DSI_DYN_REFRESH_DONE;
+ if (reg & BIT(30))
+ ints |= DSI_DESKEW_DONE;
+
+ pr_debug("[DSI_%d] Interrupt status = 0x%x, INT_CTRL=0x%x\n",
+ ctrl->index, ints, reg);
+ return ints;
+}
+
+/**
+ * clear_interrupt_status() - clears the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @ints: List of interrupts to be cleared.
+ */
+void dsi_ctrl_hw_14_clear_interrupt_status(struct dsi_ctrl_hw *ctrl, u32 ints)
+{
+ u32 reg = 0;
+
+ if (ints & DSI_CMD_MODE_DMA_DONE)
+ reg |= BIT(0);
+ if (ints & DSI_CMD_FRAME_DONE)
+ reg |= BIT(8);
+ if (ints & DSI_CMD_STREAM0_FRAME_DONE)
+ reg |= BIT(10);
+ if (ints & DSI_CMD_STREAM1_FRAME_DONE)
+ reg |= BIT(12);
+ if (ints & DSI_CMD_STREAM2_FRAME_DONE)
+ reg |= BIT(14);
+ if (ints & DSI_VIDEO_MODE_FRAME_DONE)
+ reg |= BIT(16);
+ if (ints & DSI_BTA_DONE)
+ reg |= BIT(20);
+ if (ints & DSI_DYN_REFRESH_DONE)
+ reg |= BIT(28);
+ if (ints & DSI_DESKEW_DONE)
+ reg |= BIT(30);
+
+ DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+ pr_debug("[DSI_%d] Clear interrupts, ints = 0x%x, INT_CTRL=0x%x\n",
+ ctrl->index, ints, reg);
+}
+
+/**
+ * enable_status_interrupts() - enable the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @ints: List of interrupts to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set ints to 0.
+ */
+void dsi_ctrl_hw_14_enable_status_interrupts(struct dsi_ctrl_hw *ctrl, u32 ints)
+{
+ u32 reg = 0;
+
+ /* Do not change value of DSI_ERROR_MASK bit */
+ reg |= (DSI_R32(ctrl, DSI_INT_CTRL) & BIT(25));
+ if (ints & DSI_CMD_MODE_DMA_DONE)
+ reg |= BIT(1);
+ if (ints & DSI_CMD_FRAME_DONE)
+ reg |= BIT(9);
+ if (ints & DSI_CMD_STREAM0_FRAME_DONE)
+ reg |= BIT(11);
+ if (ints & DSI_CMD_STREAM1_FRAME_DONE)
+ reg |= BIT(13);
+ if (ints & DSI_CMD_STREAM2_FRAME_DONE)
+ reg |= BIT(15);
+ if (ints & DSI_VIDEO_MODE_FRAME_DONE)
+ reg |= BIT(17);
+ if (ints & DSI_BTA_DONE)
+ reg |= BIT(21);
+ if (ints & DSI_DYN_REFRESH_DONE)
+ reg |= BIT(29);
+ if (ints & DSI_DESKEW_DONE)
+ reg |= BIT(31);
+
+ DSI_W32(ctrl, DSI_INT_CTRL, reg);
+
+ pr_debug("[DSI_%d] Enable interrupts 0x%x, INT_CTRL=0x%x\n",
+ ctrl->index, ints, reg);
+}
+
+/**
+ * get_error_status() - returns the error status
+ * @ctrl: Pointer to the controller host hardware.
+ *
+ * Returns the ORed list of errors(enum dsi_error_int_type) that are
+ * active. This list does not include any status interrupts. Caller
+ * should call get_interrupt_status for status interrupts.
+ *
+ * Return: List of active error interrupts.
+ */
+u64 dsi_ctrl_hw_14_get_error_status(struct dsi_ctrl_hw *ctrl)
+{
+ u32 dln0_phy_err;
+ u32 fifo_status;
+ u32 ack_error;
+ u32 timeout_errors;
+ u32 clk_error;
+ u32 dsi_status;
+ u64 errors = 0;
+
+ dln0_phy_err = DSI_R32(ctrl, DSI_DLN0_PHY_ERR);
+ if (dln0_phy_err & BIT(0))
+ errors |= DSI_DLN0_ESC_ENTRY_ERR;
+ if (dln0_phy_err & BIT(4))
+ errors |= DSI_DLN0_ESC_SYNC_ERR;
+ if (dln0_phy_err & BIT(8))
+ errors |= DSI_DLN0_LP_CONTROL_ERR;
+ if (dln0_phy_err & BIT(12))
+ errors |= DSI_DLN0_LP0_CONTENTION;
+ if (dln0_phy_err & BIT(16))
+ errors |= DSI_DLN0_LP1_CONTENTION;
+
+ fifo_status = DSI_R32(ctrl, DSI_FIFO_STATUS);
+ if (fifo_status & BIT(7))
+ errors |= DSI_CMD_MDP_FIFO_UNDERFLOW;
+ if (fifo_status & BIT(10))
+ errors |= DSI_CMD_DMA_FIFO_UNDERFLOW;
+ if (fifo_status & BIT(18))
+ errors |= DSI_DLN0_HS_FIFO_OVERFLOW;
+ if (fifo_status & BIT(19))
+ errors |= DSI_DLN0_HS_FIFO_UNDERFLOW;
+ if (fifo_status & BIT(22))
+ errors |= DSI_DLN1_HS_FIFO_OVERFLOW;
+ if (fifo_status & BIT(23))
+ errors |= DSI_DLN1_HS_FIFO_UNDERFLOW;
+ if (fifo_status & BIT(26))
+ errors |= DSI_DLN2_HS_FIFO_OVERFLOW;
+ if (fifo_status & BIT(27))
+ errors |= DSI_DLN2_HS_FIFO_UNDERFLOW;
+ if (fifo_status & BIT(30))
+ errors |= DSI_DLN3_HS_FIFO_OVERFLOW;
+ if (fifo_status & BIT(31))
+ errors |= DSI_DLN3_HS_FIFO_UNDERFLOW;
+
+ ack_error = DSI_R32(ctrl, DSI_ACK_ERR_STATUS);
+ if (ack_error & BIT(16))
+ errors |= DSI_RDBK_SINGLE_ECC_ERR;
+ if (ack_error & BIT(17))
+ errors |= DSI_RDBK_MULTI_ECC_ERR;
+ if (ack_error & BIT(20))
+ errors |= DSI_RDBK_CRC_ERR;
+ if (ack_error & BIT(23))
+ errors |= DSI_RDBK_INCOMPLETE_PKT;
+ if (ack_error & BIT(24))
+ errors |= DSI_PERIPH_ERROR_PKT;
+
+ timeout_errors = DSI_R32(ctrl, DSI_TIMEOUT_STATUS);
+ if (timeout_errors & BIT(0))
+ errors |= DSI_HS_TX_TIMEOUT;
+ if (timeout_errors & BIT(4))
+ errors |= DSI_LP_RX_TIMEOUT;
+ if (timeout_errors & BIT(8))
+ errors |= DSI_BTA_TIMEOUT;
+
+ clk_error = DSI_R32(ctrl, DSI_CLK_STATUS);
+ if (clk_error & BIT(16))
+ errors |= DSI_PLL_UNLOCK;
+
+ dsi_status = DSI_R32(ctrl, DSI_STATUS);
+ if (dsi_status & BIT(31))
+ errors |= DSI_INTERLEAVE_OP_CONTENTION;
+
+ pr_debug("[DSI_%d] Error status = 0x%llx, phy=0x%x, fifo=0x%x",
+ ctrl->index, errors, dln0_phy_err, fifo_status);
+ pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
+ ctrl->index, ack_error, timeout_errors, clk_error, dsi_status);
+ return errors;
+}
+
+/**
+ * clear_error_status() - clears the specified errors
+ * @ctrl: Pointer to the controller host hardware.
+ * @errors: List of errors to be cleared.
+ */
+void dsi_ctrl_hw_14_clear_error_status(struct dsi_ctrl_hw *ctrl, u64 errors)
+{
+ u32 dln0_phy_err = 0;
+ u32 fifo_status = 0;
+ u32 ack_error = 0;
+ u32 timeout_error = 0;
+ u32 clk_error = 0;
+ u32 dsi_status = 0;
+ u32 int_ctrl = 0;
+
+ if (errors & DSI_RDBK_SINGLE_ECC_ERR)
+ ack_error |= BIT(16);
+ if (errors & DSI_RDBK_MULTI_ECC_ERR)
+ ack_error |= BIT(17);
+ if (errors & DSI_RDBK_CRC_ERR)
+ ack_error |= BIT(20);
+ if (errors & DSI_RDBK_INCOMPLETE_PKT)
+ ack_error |= BIT(23);
+ if (errors & DSI_PERIPH_ERROR_PKT)
+ ack_error |= BIT(24);
+
+ if (errors & DSI_LP_RX_TIMEOUT)
+ timeout_error |= BIT(4);
+ if (errors & DSI_HS_TX_TIMEOUT)
+ timeout_error |= BIT(0);
+ if (errors & DSI_BTA_TIMEOUT)
+ timeout_error |= BIT(8);
+
+ if (errors & DSI_PLL_UNLOCK)
+ clk_error |= BIT(16);
+
+ if (errors & DSI_DLN0_LP0_CONTENTION)
+ dln0_phy_err |= BIT(12);
+ if (errors & DSI_DLN0_LP1_CONTENTION)
+ dln0_phy_err |= BIT(16);
+ if (errors & DSI_DLN0_ESC_ENTRY_ERR)
+ dln0_phy_err |= BIT(0);
+ if (errors & DSI_DLN0_ESC_SYNC_ERR)
+ dln0_phy_err |= BIT(4);
+ if (errors & DSI_DLN0_LP_CONTROL_ERR)
+ dln0_phy_err |= BIT(8);
+
+ if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW)
+ fifo_status |= BIT(10);
+ if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW)
+ fifo_status |= BIT(7);
+ if (errors & DSI_DLN0_HS_FIFO_OVERFLOW)
+ fifo_status |= BIT(18);
+ if (errors & DSI_DLN1_HS_FIFO_OVERFLOW)
+ fifo_status |= BIT(22);
+ if (errors & DSI_DLN2_HS_FIFO_OVERFLOW)
+ fifo_status |= BIT(26);
+ if (errors & DSI_DLN3_HS_FIFO_OVERFLOW)
+ fifo_status |= BIT(30);
+ if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW)
+ fifo_status |= BIT(19);
+ if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW)
+ fifo_status |= BIT(23);
+ if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW)
+ fifo_status |= BIT(27);
+ if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW)
+ fifo_status |= BIT(31);
+
+ if (errors & DSI_INTERLEAVE_OP_CONTENTION)
+ dsi_status |= BIT(31);
+
+ DSI_W32(ctrl, DSI_DLN0_PHY_ERR, dln0_phy_err);
+ DSI_W32(ctrl, DSI_FIFO_STATUS, fifo_status);
+ DSI_W32(ctrl, DSI_ACK_ERR_STATUS, ack_error);
+ DSI_W32(ctrl, DSI_TIMEOUT_STATUS, timeout_error);
+ DSI_W32(ctrl, DSI_CLK_STATUS, clk_error);
+ DSI_W32(ctrl, DSI_STATUS, dsi_status);
+
+ int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
+ int_ctrl |= BIT(24);
+ DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl);
+ pr_debug("[DSI_%d] clear errors = 0x%llx, phy=0x%x, fifo=0x%x",
+ ctrl->index, errors, dln0_phy_err, fifo_status);
+ pr_debug("[DSI_%d] ack=0x%x, timeout=0x%x, clk=0x%x, dsi=0x%x\n",
+ ctrl->index, ack_error, timeout_error, clk_error, dsi_status);
+}
+
+/**
+ * enable_error_interrupts() - enable the specified interrupts
+ * @ctrl: Pointer to the controller host hardware.
+ * @errors: List of errors to be enabled.
+ *
+ * Enables the specified interrupts. This list will override the
+ * previous interrupts enabled through this function. Caller has to
+ * maintain the state of the interrupts enabled. To disable all
+ * interrupts, set errors to 0.
+ */
+void dsi_ctrl_hw_14_enable_error_interrupts(struct dsi_ctrl_hw *ctrl,
+ u64 errors)
+{
+ u32 int_ctrl = 0;
+ u32 int_mask0 = 0x7FFF3BFF;
+
+ int_ctrl = DSI_R32(ctrl, DSI_INT_CTRL);
+ if (errors)
+ int_ctrl |= BIT(25);
+ else
+ int_ctrl &= ~BIT(25);
+
+ if (errors & DSI_RDBK_SINGLE_ECC_ERR)
+ int_mask0 &= ~BIT(0);
+ if (errors & DSI_RDBK_MULTI_ECC_ERR)
+ int_mask0 &= ~BIT(1);
+ if (errors & DSI_RDBK_CRC_ERR)
+ int_mask0 &= ~BIT(2);
+ if (errors & DSI_RDBK_INCOMPLETE_PKT)
+ int_mask0 &= ~BIT(3);
+ if (errors & DSI_PERIPH_ERROR_PKT)
+ int_mask0 &= ~BIT(4);
+
+ if (errors & DSI_LP_RX_TIMEOUT)
+ int_mask0 &= ~BIT(5);
+ if (errors & DSI_HS_TX_TIMEOUT)
+ int_mask0 &= ~BIT(6);
+ if (errors & DSI_BTA_TIMEOUT)
+ int_mask0 &= ~BIT(7);
+
+ if (errors & DSI_PLL_UNLOCK)
+ int_mask0 &= ~BIT(28);
+
+ if (errors & DSI_DLN0_LP0_CONTENTION)
+ int_mask0 &= ~BIT(24);
+ if (errors & DSI_DLN0_LP1_CONTENTION)
+ int_mask0 &= ~BIT(25);
+ if (errors & DSI_DLN0_ESC_ENTRY_ERR)
+ int_mask0 &= ~BIT(21);
+ if (errors & DSI_DLN0_ESC_SYNC_ERR)
+ int_mask0 &= ~BIT(22);
+ if (errors & DSI_DLN0_LP_CONTROL_ERR)
+ int_mask0 &= ~BIT(23);
+
+ if (errors & DSI_CMD_DMA_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(9);
+ if (errors & DSI_CMD_MDP_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(11);
+ if (errors & DSI_DLN0_HS_FIFO_OVERFLOW)
+ int_mask0 &= ~BIT(16);
+ if (errors & DSI_DLN1_HS_FIFO_OVERFLOW)
+ int_mask0 &= ~BIT(17);
+ if (errors & DSI_DLN2_HS_FIFO_OVERFLOW)
+ int_mask0 &= ~BIT(18);
+ if (errors & DSI_DLN3_HS_FIFO_OVERFLOW)
+ int_mask0 &= ~BIT(19);
+ if (errors & DSI_DLN0_HS_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(26);
+ if (errors & DSI_DLN1_HS_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(27);
+ if (errors & DSI_DLN2_HS_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(29);
+ if (errors & DSI_DLN3_HS_FIFO_UNDERFLOW)
+ int_mask0 &= ~BIT(30);
+
+ if (errors & DSI_INTERLEAVE_OP_CONTENTION)
+ int_mask0 &= ~BIT(8);
+
+ DSI_W32(ctrl, DSI_INT_CTRL, int_ctrl);
+ DSI_W32(ctrl, DSI_ERR_INT_MASK0, int_mask0);
+
+ pr_debug("[DSI_%d] enable errors = 0x%llx, int_mask0=0x%x\n",
+ ctrl->index, errors, int_mask0);
+}
+
+/**
+ * video_test_pattern_setup() - setup test pattern engine for video mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @type: Type of test pattern.
+ * @init_val: Initial value to use for generating test pattern.
+ */
+void dsi_ctrl_hw_14_video_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val)
+{
+ u32 reg = 0;
+
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL, init_val);
+
+ switch (type) {
+ case DSI_TEST_PATTERN_FIXED:
+ reg |= (0x2 << 4);
+ break;
+ case DSI_TEST_PATTERN_INC:
+ reg |= (0x1 << 4);
+ break;
+ case DSI_TEST_PATTERN_POLY:
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_VIDEO_POLY, 0xF0F0F);
+ break;
+ default:
+ break;
+ }
+
+ DSI_W32(ctrl, DSI_TPG_MAIN_CONTROL, 0x100);
+ DSI_W32(ctrl, DSI_TPG_VIDEO_CONFIG, 0x5);
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+ pr_debug("[DSI_%d] Video test pattern setup done\n", ctrl->index);
+}
+
+/**
+ * cmd_test_pattern_setup() - setup test patttern engine for cmd mode
+ * @ctrl: Pointer to the controller host hardware.
+ * @type: Type of test pattern.
+ * @init_val: Initial value to use for generating test pattern.
+ * @stream_id: Stream Id on which packets are generated.
+ */
+void dsi_ctrl_hw_14_cmd_test_pattern_setup(struct dsi_ctrl_hw *ctrl,
+ enum dsi_test_pattern type,
+ u32 init_val,
+ u32 stream_id)
+{
+ u32 reg = 0;
+ u32 init_offset;
+ u32 poly_offset;
+ u32 pattern_sel_shift;
+
+ switch (stream_id) {
+ case 0:
+ init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0;
+ poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY;
+ pattern_sel_shift = 8;
+ break;
+ case 1:
+ init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1;
+ poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY;
+ pattern_sel_shift = 12;
+ break;
+ case 2:
+ init_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2;
+ poly_offset = DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY;
+ pattern_sel_shift = 20;
+ break;
+ default:
+ return;
+ }
+
+ DSI_W32(ctrl, init_offset, init_val);
+
+ switch (type) {
+ case DSI_TEST_PATTERN_FIXED:
+ reg |= (0x2 << pattern_sel_shift);
+ break;
+ case DSI_TEST_PATTERN_INC:
+ reg |= (0x1 << pattern_sel_shift);
+ break;
+ case DSI_TEST_PATTERN_POLY:
+ DSI_W32(ctrl, poly_offset, 0xF0F0F);
+ break;
+ default:
+ break;
+ }
+
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+ pr_debug("[DSI_%d] Cmd test pattern setup done\n", ctrl->index);
+}
+
+/**
+ * test_pattern_enable() - enable test pattern engine
+ * @ctrl: Pointer to the controller host hardware.
+ * @enable: Enable/Disable test pattern engine.
+ */
+void dsi_ctrl_hw_14_test_pattern_enable(struct dsi_ctrl_hw *ctrl,
+ bool enable)
+{
+ u32 reg = DSI_R32(ctrl, DSI_TEST_PATTERN_GEN_CTRL);
+
+ if (enable)
+ reg |= BIT(0);
+ else
+ reg &= ~BIT(0);
+
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CTRL, reg);
+
+ pr_debug("[DSI_%d] Test pattern enable=%d\n", ctrl->index, enable);
+}
+
+/**
+ * trigger_cmd_test_pattern() - trigger a command mode frame update with
+ * test pattern
+ * @ctrl: Pointer to the controller host hardware.
+ * @stream_id: Stream on which frame update is sent.
+ */
+void dsi_ctrl_hw_14_trigger_cmd_test_pattern(struct dsi_ctrl_hw *ctrl,
+ u32 stream_id)
+{
+ switch (stream_id) {
+ case 0:
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER, 0x1);
+ break;
+ case 1:
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER, 0x1);
+ break;
+ case 2:
+ DSI_W32(ctrl, DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER, 0x1);
+ break;
+ default:
+ break;
+ }
+
+ pr_debug("[DSI_%d] Cmd Test pattern trigger\n", ctrl->index);
+}
+
+#define DUMP_REG_VALUE(off) "\t%-30s: 0x%08x\n", #off, DSI_R32(ctrl, off)
+ssize_t dsi_ctrl_hw_14_reg_dump_to_buffer(struct dsi_ctrl_hw *ctrl,
+ char *buf,
+ u32 size)
+{
+ u32 len = 0;
+
+ len += snprintf((buf + len), (size - len), "CONFIGURATION REGS:\n");
+
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_HW_VERSION));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_STATUS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_FIFO_STATUS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_SYNC_DATATYPE));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_PIXEL_DATATYPE));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_BLANKING_DATATYPE));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_DATA_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_H));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_ACTIVE_V));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_TOTAL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_HSYNC));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VIDEO_MODE_VSYNC_VPOS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_DMA_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_DMA_CMD_OFFSET));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_DMA_CMD_LENGTH));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_DMA_FIFO_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_DMA_NULL_PACKET_DATA));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM0_TOTAL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM1_TOTAL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_ACK_ERR_STATUS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RDBK_DATA0));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RDBK_DATA1));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RDBK_DATA2));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RDBK_DATA3));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RDBK_DATATYPE0));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RDBK_DATATYPE1));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_TRIG_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_EXT_MUX));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_EXT_MUX_TE_PULSE_DETECT_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_CMD_MODE_DMA_SW_TRIGGER));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_CMD_MODE_MDP_SW_TRIGGER));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_CMD_MODE_BTA_SW_TRIGGER));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RESET_SW_TRIGGER));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_LANE_STATUS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_LANE_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_LANE_SWAP_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_DLN0_PHY_ERR));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_LP_TIMER_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_HS_TIMER_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_TIMEOUT_STATUS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_CLKOUT_TIMING_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_EOT_PACKET));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_EOT_PACKET_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_GENERIC_ESC_TX_TRIGGER));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_ERR_INT_MASK0));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_INT_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_SOFT_RESET));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_CLK_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_CLK_STATUS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_PHY_SW_RESET));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_AXI2AHB_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_CTRL2));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_COMMAND_MODE_MDP_STREAM2_TOTAL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VBIF_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_AES_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_RDBK_DATA_CTRL));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_STATUS));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_WRITE_TRIGGER));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_DSI_TIMING_FLUSH));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_DSI_TIMING_DB_MODE));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_TPG_DMA_FIFO_RESET));
+ len += snprintf((buf + len), (size - len),
+ DUMP_REG_VALUE(DSI_VERSION));
+
+ pr_err("LLENGTH = %d\n", len);
+ return len;
+}
+
+
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h
new file mode 100644
index 000000000000..028ad46664a7
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_ctrl_reg_1_4.h
@@ -0,0 +1,192 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_CTRL_REG_H_
+#define _DSI_CTRL_REG_H_
+
+#define DSI_HW_VERSION (0x0000)
+#define DSI_CTRL (0x0004)
+#define DSI_STATUS (0x0008)
+#define DSI_FIFO_STATUS (0x000C)
+#define DSI_VIDEO_MODE_CTRL (0x0010)
+#define DSI_VIDEO_MODE_SYNC_DATATYPE (0x0014)
+#define DSI_VIDEO_MODE_PIXEL_DATATYPE (0x0018)
+#define DSI_VIDEO_MODE_BLANKING_DATATYPE (0x001C)
+#define DSI_VIDEO_MODE_DATA_CTRL (0x0020)
+#define DSI_VIDEO_MODE_ACTIVE_H (0x0024)
+#define DSI_VIDEO_MODE_ACTIVE_V (0x0028)
+#define DSI_VIDEO_MODE_TOTAL (0x002C)
+#define DSI_VIDEO_MODE_HSYNC (0x0030)
+#define DSI_VIDEO_MODE_VSYNC (0x0034)
+#define DSI_VIDEO_MODE_VSYNC_VPOS (0x0038)
+#define DSI_COMMAND_MODE_DMA_CTRL (0x003C)
+#define DSI_COMMAND_MODE_MDP_CTRL (0x0040)
+#define DSI_COMMAND_MODE_MDP_DCS_CMD_CTRL (0x0044)
+#define DSI_DMA_CMD_OFFSET (0x0048)
+#define DSI_DMA_CMD_LENGTH (0x004C)
+#define DSI_DMA_FIFO_CTRL (0x0050)
+#define DSI_DMA_NULL_PACKET_DATA (0x0054)
+#define DSI_COMMAND_MODE_MDP_STREAM0_CTRL (0x0058)
+#define DSI_COMMAND_MODE_MDP_STREAM0_TOTAL (0x005C)
+#define DSI_COMMAND_MODE_MDP_STREAM1_CTRL (0x0060)
+#define DSI_COMMAND_MODE_MDP_STREAM1_TOTAL (0x0064)
+#define DSI_ACK_ERR_STATUS (0x0068)
+#define DSI_RDBK_DATA0 (0x006C)
+#define DSI_RDBK_DATA1 (0x0070)
+#define DSI_RDBK_DATA2 (0x0074)
+#define DSI_RDBK_DATA3 (0x0078)
+#define DSI_RDBK_DATATYPE0 (0x007C)
+#define DSI_RDBK_DATATYPE1 (0x0080)
+#define DSI_TRIG_CTRL (0x0084)
+#define DSI_EXT_MUX (0x0088)
+#define DSI_EXT_MUX_TE_PULSE_DETECT_CTRL (0x008C)
+#define DSI_CMD_MODE_DMA_SW_TRIGGER (0x0090)
+#define DSI_CMD_MODE_MDP_SW_TRIGGER (0x0094)
+#define DSI_CMD_MODE_BTA_SW_TRIGGER (0x0098)
+#define DSI_RESET_SW_TRIGGER (0x009C)
+#define DSI_MISR_CMD_CTRL (0x00A0)
+#define DSI_MISR_VIDEO_CTRL (0x00A4)
+#define DSI_LANE_STATUS (0x00A8)
+#define DSI_LANE_CTRL (0x00AC)
+#define DSI_LANE_SWAP_CTRL (0x00B0)
+#define DSI_DLN0_PHY_ERR (0x00B4)
+#define DSI_LP_TIMER_CTRL (0x00B8)
+#define DSI_HS_TIMER_CTRL (0x00BC)
+#define DSI_TIMEOUT_STATUS (0x00C0)
+#define DSI_CLKOUT_TIMING_CTRL (0x00C4)
+#define DSI_EOT_PACKET (0x00C8)
+#define DSI_EOT_PACKET_CTRL (0x00CC)
+#define DSI_GENERIC_ESC_TX_TRIGGER (0x00D0)
+#define DSI_CAM_BIST_CTRL (0x00D4)
+#define DSI_CAM_BIST_FRAME_SIZE (0x00D8)
+#define DSI_CAM_BIST_BLOCK_SIZE (0x00DC)
+#define DSI_CAM_BIST_FRAME_CONFIG (0x00E0)
+#define DSI_CAM_BIST_LSFR_CTRL (0x00E4)
+#define DSI_CAM_BIST_LSFR_INIT (0x00E8)
+#define DSI_CAM_BIST_START (0x00EC)
+#define DSI_CAM_BIST_STATUS (0x00F0)
+#define DSI_ERR_INT_MASK0 (0x010C)
+#define DSI_INT_CTRL (0x0110)
+#define DSI_IOBIST_CTRL (0x0114)
+#define DSI_SOFT_RESET (0x0118)
+#define DSI_CLK_CTRL (0x011C)
+#define DSI_CLK_STATUS (0x0120)
+#define DSI_PHY_SW_RESET (0x012C)
+#define DSI_AXI2AHB_CTRL (0x0130)
+#define DSI_MISR_CMD_MDP0_32BIT (0x0134)
+#define DSI_MISR_CMD_MDP1_32BIT (0x0138)
+#define DSI_MISR_CMD_DMA_32BIT (0x013C)
+#define DSI_MISR_VIDEO_32BIT (0x0140)
+#define DSI_LANE_MISR_CTRL (0x0144)
+#define DSI_LANE0_MISR (0x0148)
+#define DSI_LANE1_MISR (0x014C)
+#define DSI_LANE2_MISR (0x0150)
+#define DSI_LANE3_MISR (0x0154)
+#define DSI_TEST_PATTERN_GEN_CTRL (0x015C)
+#define DSI_TEST_PATTERN_GEN_VIDEO_POLY (0x0160)
+#define DSI_TEST_PATTERN_GEN_VIDEO_INIT_VAL (0x0164)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM0_POLY (0x0168)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL0 (0x016C)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM1_POLY (0x0170)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL1 (0x0174)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_POLY (0x0178)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL (0x017C)
+#define DSI_TEST_PATTERN_GEN_VIDEO_ENABLE (0x0180)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM0_TRIGGER (0x0184)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM1_TRIGGER (0x0188)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_INIT_VAL2 (0x018C)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY (0x0190)
+#define DSI_TEST_PATTERN_GEN_CMD_MDP_STREAM2_POLY (0x0190)
+#define DSI_COMMAND_MODE_MDP_IDLE_CTRL (0x0194)
+#define DSI_TEST_PATTERN_GEN_CMD_STREAM2_TRIGGER (0x0198)
+#define DSI_TPG_MAIN_CONTROL (0x019C)
+#define DSI_TPG_MAIN_CONTROL2 (0x01A0)
+#define DSI_TPG_VIDEO_CONFIG (0x01A4)
+#define DSI_TPG_COMPONENT_LIMITS (0x01A8)
+#define DSI_TPG_RECTANGLE (0x01AC)
+#define DSI_TPG_BLACK_WHITE_PATTERN_FRAMES (0x01B0)
+#define DSI_TPG_RGB_MAPPING (0x01B4)
+#define DSI_COMMAND_MODE_MDP_CTRL2 (0x01B8)
+#define DSI_COMMAND_MODE_MDP_STREAM2_CTRL (0x01BC)
+#define DSI_COMMAND_MODE_MDP_STREAM2_TOTAL (0x01C0)
+#define DSI_MISR_CMD_MDP2_8BIT (0x01C4)
+#define DSI_MISR_CMD_MDP2_32BIT (0x01C8)
+#define DSI_VBIF_CTRL (0x01CC)
+#define DSI_AES_CTRL (0x01D0)
+#define DSI_RDBK_DATA_CTRL (0x01D4)
+#define DSI_TEST_PATTERN_GEN_CMD_DMA_INIT_VAL2 (0x01D8)
+#define DSI_TPG_DMA_FIFO_STATUS (0x01DC)
+#define DSI_TPG_DMA_FIFO_WRITE_TRIGGER (0x01E0)
+#define DSI_DSI_TIMING_FLUSH (0x01E4)
+#define DSI_DSI_TIMING_DB_MODE (0x01E8)
+#define DSI_TPG_DMA_FIFO_RESET (0x01EC)
+#define DSI_SCRATCH_REGISTER_0 (0x01F0)
+#define DSI_VERSION (0x01F4)
+#define DSI_SCRATCH_REGISTER_1 (0x01F8)
+#define DSI_SCRATCH_REGISTER_2 (0x01FC)
+#define DSI_DYNAMIC_REFRESH_CTRL (0x0200)
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY (0x0204)
+#define DSI_DYNAMIC_REFRESH_PIPE_DELAY2 (0x0208)
+#define DSI_DYNAMIC_REFRESH_PLL_DELAY (0x020C)
+#define DSI_DYNAMIC_REFRESH_STATUS (0x0210)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL0 (0x0214)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL1 (0x0218)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL2 (0x021C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL3 (0x0220)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL4 (0x0224)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL5 (0x0228)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL6 (0x022C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL7 (0x0230)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL8 (0x0234)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL9 (0x0238)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL10 (0x023C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL11 (0x0240)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL12 (0x0244)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL13 (0x0248)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL14 (0x024C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL15 (0x0250)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL16 (0x0254)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL17 (0x0258)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL18 (0x025C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL19 (0x0260)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL20 (0x0264)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL21 (0x0268)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL22 (0x026C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL23 (0x0270)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL24 (0x0274)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL25 (0x0278)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL26 (0x027C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL27 (0x0280)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL28 (0x0284)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL29 (0x0288)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL30 (0x028C)
+#define DSI_DYNAMIC_REFRESH_PLL_CTRL31 (0x0290)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR (0x0294)
+#define DSI_DYNAMIC_REFRESH_PLL_UPPER_ADDR2 (0x0298)
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL (0x02A0)
+#define DSI_VIDEO_COMPRESSION_MODE_CTRL2 (0x02A4)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL (0x02A8)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL2 (0x02AC)
+#define DSI_COMMAND_COMPRESSION_MODE_CTRL3 (0x02B0)
+#define DSI_COMMAND_MODE_NULL_INSERTION_CTRL (0x02B4)
+#define DSI_READ_BACK_DISABLE_STATUS (0x02B8)
+#define DSI_DESKEW_CTRL (0x02BC)
+#define DSI_DESKEW_DELAY_CTRL (0x02C0)
+#define DSI_DESKEW_SW_TRIGGER (0x02C4)
+#define DSI_SECURE_DISPLAY_STATUS (0x02CC)
+#define DSI_SECURE_DISPLAY_BLOCK_COMMAND_COLOR (0x02D0)
+#define DSI_SECURE_DISPLAY_BLOCK_VIDEO_COLOR (0x02D4)
+
+
+#endif /* _DSI_CTRL_REG_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
new file mode 100644
index 000000000000..91501a2efd20
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_defs.h
@@ -0,0 +1,374 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_DEFS_H_
+#define _DSI_DEFS_H_
+
+#include <linux/types.h>
+
+#define DSI_H_TOTAL(t) (((t)->h_active) + ((t)->h_back_porch) + \
+ ((t)->h_sync_width) + ((t)->h_front_porch))
+
+#define DSI_V_TOTAL(t) (((t)->v_active) + ((t)->v_back_porch) + \
+ ((t)->v_sync_width) + ((t)->v_front_porch))
+
+/**
+ * enum dsi_pixel_format - DSI pixel formats
+ * @DSI_PIXEL_FORMAT_RGB565:
+ * @DSI_PIXEL_FORMAT_RGB666:
+ * @DSI_PIXEL_FORMAT_RGB666_LOOSE:
+ * @DSI_PIXEL_FORMAT_RGB888:
+ * @DSI_PIXEL_FORMAT_RGB111:
+ * @DSI_PIXEL_FORMAT_RGB332:
+ * @DSI_PIXEL_FORMAT_RGB444:
+ * @DSI_PIXEL_FORMAT_MAX:
+ */
+enum dsi_pixel_format {
+ DSI_PIXEL_FORMAT_RGB565 = 0,
+ DSI_PIXEL_FORMAT_RGB666,
+ DSI_PIXEL_FORMAT_RGB666_LOOSE,
+ DSI_PIXEL_FORMAT_RGB888,
+ DSI_PIXEL_FORMAT_RGB111,
+ DSI_PIXEL_FORMAT_RGB332,
+ DSI_PIXEL_FORMAT_RGB444,
+ DSI_PIXEL_FORMAT_MAX
+};
+
+/**
+ * enum dsi_op_mode - dsi operation mode
+ * @DSI_OP_VIDEO_MODE: DSI video mode operation
+ * @DSI_OP_CMD_MODE: DSI Command mode operation
+ * @DSI_OP_MODE_MAX:
+ */
+enum dsi_op_mode {
+ DSI_OP_VIDEO_MODE = 0,
+ DSI_OP_CMD_MODE,
+ DSI_OP_MODE_MAX
+};
+
+/**
+ * enum dsi_mode_flags - flags to signal other drm components via private flags
+ * @DSI_MODE_FLAG_SEAMLESS: Seamless transition requested by user
+ * @DSI_MODE_FLAG_DFPS: Seamless transition is DynamicFPS
+ * @DSI_MODE_FLAG_VBLANK_PRE_MODESET: Transition needs VBLANK before Modeset
+ */
+enum dsi_mode_flags {
+ DSI_MODE_FLAG_SEAMLESS = BIT(0),
+ DSI_MODE_FLAG_DFPS = BIT(1),
+ DSI_MODE_FLAG_VBLANK_PRE_MODESET = BIT(2)
+};
+
+/**
+ * enum dsi_data_lanes - dsi physical lanes
+ * @DSI_DATA_LANE_0: Physical lane 0
+ * @DSI_DATA_LANE_1: Physical lane 1
+ * @DSI_DATA_LANE_2: Physical lane 2
+ * @DSI_DATA_LANE_3: Physical lane 3
+ * @DSI_CLOCK_LANE: Physical clock lane
+ */
+enum dsi_data_lanes {
+ DSI_DATA_LANE_0 = BIT(0),
+ DSI_DATA_LANE_1 = BIT(1),
+ DSI_DATA_LANE_2 = BIT(2),
+ DSI_DATA_LANE_3 = BIT(3),
+ DSI_CLOCK_LANE = BIT(4)
+};
+
+/**
+ * enum dsi_logical_lane - dsi logical lanes
+ * @DSI_LOGICAL_LANE_0: Logical lane 0
+ * @DSI_LOGICAL_LANE_1: Logical lane 1
+ * @DSI_LOGICAL_LANE_2: Logical lane 2
+ * @DSI_LOGICAL_LANE_3: Logical lane 3
+ * @DSI_LOGICAL_CLOCK_LANE: Clock lane
+ * @DSI_LANE_MAX: Maximum lanes supported
+ */
+enum dsi_logical_lane {
+ DSI_LOGICAL_LANE_0 = 0,
+ DSI_LOGICAL_LANE_1,
+ DSI_LOGICAL_LANE_2,
+ DSI_LOGICAL_LANE_3,
+ DSI_LOGICAL_CLOCK_LANE,
+ DSI_LANE_MAX
+};
+
+/**
+ * enum dsi_trigger_type - dsi trigger type
+ * @DSI_TRIGGER_NONE: No trigger.
+ * @DSI_TRIGGER_TE: TE trigger.
+ * @DSI_TRIGGER_SEOF: Start or End of frame.
+ * @DSI_TRIGGER_SW: Software trigger.
+ * @DSI_TRIGGER_SW_SEOF: Software trigger and start/end of frame.
+ * @DSI_TRIGGER_SW_TE: Software and TE triggers.
+ * @DSI_TRIGGER_MAX: Max trigger values.
+ */
+enum dsi_trigger_type {
+ DSI_TRIGGER_NONE = 0,
+ DSI_TRIGGER_TE,
+ DSI_TRIGGER_SEOF,
+ DSI_TRIGGER_SW,
+ DSI_TRIGGER_SW_SEOF,
+ DSI_TRIGGER_SW_TE,
+ DSI_TRIGGER_MAX
+};
+
+/**
+ * enum dsi_color_swap_mode - color swap mode
+ * @DSI_COLOR_SWAP_RGB:
+ * @DSI_COLOR_SWAP_RBG:
+ * @DSI_COLOR_SWAP_BGR:
+ * @DSI_COLOR_SWAP_BRG:
+ * @DSI_COLOR_SWAP_GRB:
+ * @DSI_COLOR_SWAP_GBR:
+ */
+enum dsi_color_swap_mode {
+ DSI_COLOR_SWAP_RGB = 0,
+ DSI_COLOR_SWAP_RBG,
+ DSI_COLOR_SWAP_BGR,
+ DSI_COLOR_SWAP_BRG,
+ DSI_COLOR_SWAP_GRB,
+ DSI_COLOR_SWAP_GBR
+};
+
+/**
+ * enum dsi_dfps_type - Dynamic FPS support type
+ * @DSI_DFPS_NONE: Dynamic FPS is not supported.
+ * @DSI_DFPS_SUSPEND_RESUME:
+ * @DSI_DFPS_IMMEDIATE_CLK:
+ * @DSI_DFPS_IMMEDIATE_HFP:
+ * @DSI_DFPS_IMMEDIATE_VFP:
+ * @DSI_DPFS_MAX:
+ */
+enum dsi_dfps_type {
+ DSI_DFPS_NONE = 0,
+ DSI_DFPS_SUSPEND_RESUME,
+ DSI_DFPS_IMMEDIATE_CLK,
+ DSI_DFPS_IMMEDIATE_HFP,
+ DSI_DFPS_IMMEDIATE_VFP,
+ DSI_DFPS_MAX
+};
+
+/**
+ * enum dsi_phy_type - DSI phy types
+ * @DSI_PHY_TYPE_DPHY:
+ * @DSI_PHY_TYPE_CPHY:
+ */
+enum dsi_phy_type {
+ DSI_PHY_TYPE_DPHY,
+ DSI_PHY_TYPE_CPHY
+};
+
+/**
+ * enum dsi_te_mode - dsi te source
+ * @DSI_TE_ON_DATA_LINK: TE read from DSI link
+ * @DSI_TE_ON_EXT_PIN: TE signal on an external GPIO
+ */
+enum dsi_te_mode {
+ DSI_TE_ON_DATA_LINK = 0,
+ DSI_TE_ON_EXT_PIN,
+};
+
+/**
+ * enum dsi_video_traffic_mode - video mode pixel transmission type
+ * @DSI_VIDEO_TRAFFIC_SYNC_PULSES: Non-burst mode with sync pulses.
+ * @DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS: Non-burst mode with sync start events.
+ * @DSI_VIDEO_TRAFFIC_BURST_MODE: Burst mode using sync start events.
+ */
+enum dsi_video_traffic_mode {
+ DSI_VIDEO_TRAFFIC_SYNC_PULSES = 0,
+ DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS,
+ DSI_VIDEO_TRAFFIC_BURST_MODE,
+};
+
+/**
+ * struct dsi_mode_info - video mode information dsi frame
+ * @h_active: Active width of one frame in pixels.
+ * @h_back_porch: Horizontal back porch in pixels.
+ * @h_sync_width: HSYNC width in pixels.
+ * @h_front_porch: Horizontal fron porch in pixels.
+ * @h_skew:
+ * @h_sync_polarity: Polarity of HSYNC (false is active high).
+ * @v_active: Active height of one frame in lines.
+ * @v_back_porch: Vertical back porch in lines.
+ * @v_sync_width: VSYNC width in lines.
+ * @v_front_porch: Vertical front porch in lines.
+ * @v_sync_polarity: Polarity of VSYNC (false is active high).
+ * @refresh_rate: Refresh rate in Hz.
+ */
+struct dsi_mode_info {
+ u32 h_active;
+ u32 h_back_porch;
+ u32 h_sync_width;
+ u32 h_front_porch;
+ u32 h_skew;
+ bool h_sync_polarity;
+
+ u32 v_active;
+ u32 v_back_porch;
+ u32 v_sync_width;
+ u32 v_front_porch;
+ bool v_sync_polarity;
+
+ u32 refresh_rate;
+};
+
+/**
+ * struct dsi_lane_mapping - Mapping between DSI logical and physical lanes
+ * @physical_lane0: Logical lane to which physical lane 0 is mapped.
+ * @physical_lane1: Logical lane to which physical lane 1 is mapped.
+ * @physical_lane2: Logical lane to which physical lane 2 is mapped.
+ * @physical_lane3: Logical lane to which physical lane 3 is mapped.
+ */
+struct dsi_lane_mapping {
+ enum dsi_logical_lane physical_lane0;
+ enum dsi_logical_lane physical_lane1;
+ enum dsi_logical_lane physical_lane2;
+ enum dsi_logical_lane physical_lane3;
+};
+
+/**
+ * struct dsi_host_common_cfg - Host configuration common to video and cmd mode
+ * @dst_format: Destination pixel format.
+ * @data_lanes: Physical data lanes to be enabled.
+ * @en_crc_check: Enable CRC checks.
+ * @en_ecc_check: Enable ECC checks.
+ * @te_mode: Source for TE signalling.
+ * @mdp_cmd_trigger: MDP frame update trigger for command mode.
+ * @dma_cmd_trigger: Command DMA trigger.
+ * @cmd_trigger_stream: Command mode stream to trigger.
+ * @bit_swap_read: Is red color bit swapped.
+ * @bit_swap_green: Is green color bit swapped.
+ * @bit_swap_blue: Is blue color bit swapped.
+ * @t_clk_post: Number of byte clock cycles that the transmitter shall
+ * continue sending after last data lane has transitioned
+ * to LP mode.
+ * @t_clk_pre: Number of byte clock cycles that the high spped clock
+ * shall be driven prior to data lane transitions from LP
+ * to HS mode.
+ * @ignore_rx_eot: Ignore Rx EOT packets if set to true.
+ * @append_tx_eot: Append EOT packets for forward transmissions if set to
+ * true.
+ * @force_clk_lane_hs: Force clock lane in high speed mode.
+ */
+struct dsi_host_common_cfg {
+ enum dsi_pixel_format dst_format;
+ enum dsi_data_lanes data_lanes;
+ bool en_crc_check;
+ bool en_ecc_check;
+ enum dsi_te_mode te_mode;
+ enum dsi_trigger_type mdp_cmd_trigger;
+ enum dsi_trigger_type dma_cmd_trigger;
+ u32 cmd_trigger_stream;
+ enum dsi_color_swap_mode swap_mode;
+ bool bit_swap_red;
+ bool bit_swap_green;
+ bool bit_swap_blue;
+ u32 t_clk_post;
+ u32 t_clk_pre;
+ bool ignore_rx_eot;
+ bool append_tx_eot;
+ bool force_clk_lane_hs;
+};
+
+/**
+ * struct dsi_video_engine_cfg - DSI video engine configuration
+ * @host_cfg: Pointer to host common configuration.
+ * @last_line_interleave_en: Allow command mode op interleaved on last line of
+ * video stream.
+ * @pulse_mode_hsa_he: Send HSA and HE following VS/VE packet if set to
+ * true.
+ * @hfp_lp11_en: Enter low power stop mode (LP-11) during HFP.
+ * @hbp_lp11_en: Enter low power stop mode (LP-11) during HBP.
+ * @hsa_lp11_en: Enter low power stop mode (LP-11) during HSA.
+ * @eof_bllp_lp11_en: Enter low power stop mode (LP-11) during BLLP of
+ * last line of a frame.
+ * @bllp_lp11_en: Enter low power stop mode (LP-11) during BLLP.
+ * @traffic_mode: Traffic mode for video stream.
+ * @vc_id: Virtual channel identifier.
+ */
+struct dsi_video_engine_cfg {
+ bool last_line_interleave_en;
+ bool pulse_mode_hsa_he;
+ bool hfp_lp11_en;
+ bool hbp_lp11_en;
+ bool hsa_lp11_en;
+ bool eof_bllp_lp11_en;
+ bool bllp_lp11_en;
+ enum dsi_video_traffic_mode traffic_mode;
+ u32 vc_id;
+};
+
+/**
+ * struct dsi_cmd_engine_cfg - DSI command engine configuration
+ * @host_cfg: Pointer to host common configuration.
+ * @host_cfg: Common host configuration
+ * @max_cmd_packets_interleave Maximum number of command mode RGB packets to
+ * send with in one horizontal blanking period
+ * of the video mode frame.
+ * @wr_mem_start: DCS command for write_memory_start.
+ * @wr_mem_continue: DCS command for write_memory_continue.
+ * @insert_dcs_command: Insert DCS command as first byte of payload
+ * of the pixel data.
+ * @mdp_transfer_time_us Specifies the mdp transfer time for command mode
+ * panels in microseconds
+ */
+struct dsi_cmd_engine_cfg {
+ u32 max_cmd_packets_interleave;
+ u32 wr_mem_start;
+ u32 wr_mem_continue;
+ bool insert_dcs_command;
+ u32 mdp_transfer_time_us;
+};
+
+/**
+ * struct dsi_host_config - DSI host configuration parameters.
+ * @panel_mode: Operation mode for panel (video or cmd mode).
+ * @common_config: Host configuration common to both Video and Cmd mode.
+ * @video_engine: Video engine configuration if panel is in video mode.
+ * @cmd_engine: Cmd engine configuration if panel is in cmd mode.
+ * @esc_clk_rate_khz: Esc clock frequency in Hz.
+ * @bit_clk_rate_hz: Bit clock frequency in Hz.
+ * @video_timing: Video timing information of a frame.
+ * @lane_map: Mapping between logical and physical lanes.
+ * @phy_type: PHY type to be used.
+ */
+struct dsi_host_config {
+ enum dsi_op_mode panel_mode;
+ struct dsi_host_common_cfg common_config;
+ union {
+ struct dsi_video_engine_cfg video_engine;
+ struct dsi_cmd_engine_cfg cmd_engine;
+ } u;
+ u64 esc_clk_rate_hz;
+ u64 bit_clk_rate_hz;
+ struct dsi_mode_info video_timing;
+ struct dsi_lane_mapping lane_map;
+};
+
+/**
+ * struct dsi_display_mode - specifies mode for dsi display
+ * @timing: Timing parameters for the panel.
+ * @pixel_clk_khz: Pixel clock in Khz.
+ * @panel_mode: Panel operation mode.
+ * @flags: Additional flags.
+ */
+struct dsi_display_mode {
+ struct dsi_mode_info timing;
+ u32 pixel_clk_khz;
+ enum dsi_op_mode panel_mode;
+
+ u32 flags;
+};
+
+#endif /* _DSI_DEFS_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
new file mode 100644
index 000000000000..8e25260d21c0
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.c
@@ -0,0 +1,2920 @@
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "msm-dsi-display:[%s] " fmt, __func__
+
+#include <linux/list.h>
+#include <linux/of.h>
+
+#include "msm_drv.h"
+#include "sde_kms.h"
+#include "dsi_display.h"
+#include "dsi_panel.h"
+#include "dsi_ctrl.h"
+#include "dsi_ctrl_hw.h"
+#include "dsi_drm.h"
+#include "dba_bridge.h"
+
+#define to_dsi_display(x) container_of(x, struct dsi_display, host)
+#define DSI_DBA_CLIENT_NAME "dsi"
+
+static DEFINE_MUTEX(dsi_display_list_lock);
+static LIST_HEAD(dsi_display_list);
+
+static const struct of_device_id dsi_display_dt_match[] = {
+ {.compatible = "qcom,dsi-display"},
+ {}
+};
+
+static struct dsi_display *main_display;
+
+int dsi_display_set_backlight(void *display, u32 bl_lvl)
+{
+ struct dsi_display *dsi_display = display;
+ struct dsi_panel *panel;
+ int rc = 0;
+
+ if (dsi_display == NULL)
+ return -EINVAL;
+
+ panel = dsi_display->panel[0];
+
+ rc = dsi_panel_set_backlight(panel, bl_lvl);
+ if (rc)
+ pr_err("unable to set backlight\n");
+
+ return rc;
+}
+
+static ssize_t debugfs_dump_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct dsi_display *display = file->private_data;
+ char *buf;
+ u32 len = 0;
+ int i;
+
+ if (!display)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += snprintf(buf + len, (SZ_4K - len), "name = %s\n", display->name);
+ len += snprintf(buf + len, (SZ_4K - len),
+ "\tResolution = %dx%d\n",
+ display->config.video_timing.h_active,
+ display->config.video_timing.v_active);
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ len += snprintf(buf + len, (SZ_4K - len),
+ "\tCTRL_%d:\n\t\tctrl = %s\n\t\tphy = %s\n",
+ i, display->ctrl[i].ctrl->name,
+ display->ctrl[i].phy->name);
+ }
+
+ for (i = 0; i < display->panel_count; i++)
+ len += snprintf(buf + len, (SZ_4K - len),
+ "\tPanel_%d = %s\n", i, display->panel[i]->name);
+
+ len += snprintf(buf + len, (SZ_4K - len),
+ "\tClock master = %s\n",
+ display->ctrl[display->clk_master_idx].ctrl->name);
+
+ if (len > count)
+ len = count;
+
+ if (copy_to_user(buff, buf, len)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+
+ kfree(buf);
+ return len;
+}
+
+
+static const struct file_operations dump_info_fops = {
+ .open = simple_open,
+ .read = debugfs_dump_info_read,
+};
+
+static int dsi_display_debugfs_init(struct dsi_display *display)
+{
+ int rc = 0;
+ struct dentry *dir, *dump_file;
+
+ dir = debugfs_create_dir(display->name, NULL);
+ if (IS_ERR_OR_NULL(dir)) {
+ rc = PTR_ERR(dir);
+ pr_err("[%s] debugfs create dir failed, rc = %d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ dump_file = debugfs_create_file("dump_info",
+ 0444,
+ dir,
+ display,
+ &dump_info_fops);
+ if (IS_ERR_OR_NULL(dump_file)) {
+ rc = PTR_ERR(dump_file);
+ pr_err("[%s] debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ display->root = dir;
+ return rc;
+error_remove_dir:
+ debugfs_remove(dir);
+error:
+ return rc;
+}
+
+static int dsi_display_debugfs_deinit(struct dsi_display *display)
+{
+ debugfs_remove_recursive(display->root);
+
+ return 0;
+}
+
+static void adjust_timing_by_ctrl_count(const struct dsi_display *display,
+ struct dsi_display_mode *mode)
+{
+ if (display->ctrl_count > 1) {
+ mode->timing.h_active /= display->ctrl_count;
+ mode->timing.h_front_porch /= display->ctrl_count;
+ mode->timing.h_sync_width /= display->ctrl_count;
+ mode->timing.h_back_porch /= display->ctrl_count;
+ mode->timing.h_skew /= display->ctrl_count;
+ mode->pixel_clk_khz /= display->ctrl_count;
+ }
+}
+
+static int dsi_display_ctrl_power_on(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ if (display->cont_splash_enabled) {
+ pr_debug("skip ctrl power on\n");
+ return rc;
+ }
+
+ /* Sequence does not matter for split dsi usecases */
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl)
+ continue;
+
+ rc = dsi_ctrl_set_power_state(ctrl->ctrl,
+ DSI_CTRL_POWER_VREG_ON);
+ if (rc) {
+ pr_err("[%s] Failed to set power state, rc=%d\n",
+ ctrl->ctrl->name, rc);
+ goto error;
+ }
+ }
+
+ return rc;
+error:
+ for (i = i - 1; i >= 0; i--) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl)
+ continue;
+ (void)dsi_ctrl_set_power_state(ctrl->ctrl, DSI_CTRL_POWER_OFF);
+ }
+ return rc;
+}
+
+static int dsi_display_ctrl_power_off(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ /* Sequence does not matter for split dsi usecases */
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl)
+ continue;
+
+ rc = dsi_ctrl_set_power_state(ctrl->ctrl, DSI_CTRL_POWER_OFF);
+ if (rc) {
+ pr_err("[%s] Failed to power off, rc=%d\n",
+ ctrl->ctrl->name, rc);
+ goto error;
+ }
+ }
+error:
+ return rc;
+}
+
+static int dsi_display_phy_power_on(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ /* early return for splash enabled case */
+ if (display->cont_splash_enabled) {
+ pr_debug("skip phy power on\n");
+ return rc;
+ }
+
+ /* Sequence does not matter for split dsi usecases */
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl)
+ continue;
+
+ rc = dsi_phy_set_power_state(ctrl->phy, true);
+ if (rc) {
+ pr_err("[%s] Failed to set power state, rc=%d\n",
+ ctrl->phy->name, rc);
+ goto error;
+ }
+ }
+
+ return rc;
+error:
+ for (i = i - 1; i >= 0; i--) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->phy)
+ continue;
+ (void)dsi_phy_set_power_state(ctrl->phy, false);
+ }
+ return rc;
+}
+
+static int dsi_display_phy_power_off(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ /* Sequence does not matter for split dsi usecases */
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->phy)
+ continue;
+
+ rc = dsi_phy_set_power_state(ctrl->phy, false);
+ if (rc) {
+ pr_err("[%s] Failed to power off, rc=%d\n",
+ ctrl->ctrl->name, rc);
+ goto error;
+ }
+ }
+error:
+ return rc;
+}
+
+static int dsi_display_ctrl_core_clk_on(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ /* early return for splash enabled case */
+ if (display->cont_splash_enabled) {
+ pr_debug("skip core clk on calling\n");
+ return rc;
+ }
+
+ /*
+ * In case of split DSI usecases, the clock for master controller should
+ * be enabled before the other controller. Master controller in the
+ * clock context refers to the controller that sources the clock.
+ */
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+
+ rc = dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_CORE_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to turn on clocks, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ /* Turn on rest of the controllers */
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_power_state(ctrl->ctrl,
+ DSI_CTRL_POWER_CORE_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to turn on clock, rc=%d\n",
+ display->name, rc);
+ goto error_disable_master;
+ }
+ }
+ return rc;
+error_disable_master:
+ (void)dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_VREG_ON);
+error:
+ return rc;
+}
+
+static int dsi_display_ctrl_link_clk_on(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ /* early return for splash enabled case */
+ if (display->cont_splash_enabled) {
+ pr_debug("skip ctrl link clk on calling\n");
+ return rc;
+ }
+
+ /*
+ * In case of split DSI usecases, the clock for master controller should
+ * be enabled before the other controller. Master controller in the
+ * clock context refers to the controller that sources the clock.
+ */
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+
+ rc = dsi_ctrl_set_clock_source(m_ctrl->ctrl,
+ &display->clock_info.src_clks);
+ if (rc) {
+ pr_err("[%s] failed to set source clocks for master, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ rc = dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_LINK_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to turn on clocks, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ /* Turn on rest of the controllers */
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_clock_source(ctrl->ctrl,
+ &display->clock_info.src_clks);
+ if (rc) {
+ pr_err("[%s] failed to set source clocks, rc=%d\n",
+ display->name, rc);
+ goto error_disable_master;
+ }
+
+ rc = dsi_ctrl_set_power_state(ctrl->ctrl,
+ DSI_CTRL_POWER_LINK_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to turn on clock, rc=%d\n",
+ display->name, rc);
+ goto error_disable_master;
+ }
+ }
+ return rc;
+error_disable_master:
+ (void)dsi_ctrl_set_power_state(m_ctrl->ctrl,
+ DSI_CTRL_POWER_CORE_CLK_ON);
+error:
+ return rc;
+}
+
+static int dsi_display_ctrl_core_clk_off(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ /*
+ * In case of split DSI usecases, clock for slave DSI controllers should
+ * be disabled first before disabling clock for master controller. Slave
+ * controllers in the clock context refer to controller which source
+ * clock from another controller.
+ */
+
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_power_state(ctrl->ctrl,
+ DSI_CTRL_POWER_VREG_ON);
+ if (rc) {
+ pr_err("[%s] failed to turn off clock, rc=%d\n",
+ display->name, rc);
+ }
+ }
+
+ rc = dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_VREG_ON);
+ if (rc)
+ pr_err("[%s] failed to turn off clocks, rc=%d\n",
+ display->name, rc);
+
+ return rc;
+}
+
+static int dsi_display_ctrl_link_clk_off(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ /*
+ * In case of split DSI usecases, clock for slave DSI controllers should
+ * be disabled first before disabling clock for master controller. Slave
+ * controllers in the clock context refer to controller which source
+ * clock from another controller.
+ */
+
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_power_state(ctrl->ctrl,
+ DSI_CTRL_POWER_CORE_CLK_ON);
+ if (rc) {
+ pr_err("[%s] failed to turn off clock, rc=%d\n",
+ display->name, rc);
+ }
+ }
+ rc = dsi_ctrl_set_power_state(m_ctrl->ctrl, DSI_CTRL_POWER_CORE_CLK_ON);
+ if (rc)
+ pr_err("[%s] failed to turn off clocks, rc=%d\n",
+ display->name, rc);
+ return rc;
+}
+
+static int dsi_display_ctrl_init(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ for (i = 0 ; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ rc = dsi_ctrl_host_init(ctrl->ctrl,
+ display->cont_splash_enabled);
+ if (rc) {
+ pr_err("[%s] failed to init host_%d, rc=%d\n",
+ display->name, i, rc);
+ goto error_host_deinit;
+ }
+ }
+
+ return 0;
+error_host_deinit:
+ for (i = i - 1; i >= 0; i--) {
+ ctrl = &display->ctrl[i];
+ (void)dsi_ctrl_host_deinit(ctrl->ctrl);
+ }
+ return rc;
+}
+
+static int dsi_display_ctrl_deinit(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ for (i = 0 ; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ rc = dsi_ctrl_host_deinit(ctrl->ctrl);
+ if (rc) {
+ pr_err("[%s] failed to deinit host_%d, rc=%d\n",
+ display->name, i, rc);
+ }
+ }
+
+ return rc;
+}
+
+static int dsi_display_cmd_engine_enable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ if (display->cmd_engine_refcount > 0) {
+ display->cmd_engine_refcount++;
+ return 0;
+ }
+
+ m_ctrl = &display->ctrl[display->cmd_master_idx];
+
+ rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
+ DSI_CTRL_ENGINE_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+ display->name, rc);
+ goto error_disable_master;
+ }
+ }
+
+ display->cmd_engine_refcount++;
+ return rc;
+error_disable_master:
+ (void)dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+error:
+ return rc;
+}
+
+static int dsi_display_cmd_engine_disable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ if (display->cmd_engine_refcount == 0) {
+ pr_err("[%s] Invalid refcount\n", display->name);
+ return 0;
+ } else if (display->cmd_engine_refcount > 1) {
+ display->cmd_engine_refcount--;
+ return 0;
+ }
+
+ m_ctrl = &display->ctrl[display->cmd_master_idx];
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_cmd_engine_state(ctrl->ctrl,
+ DSI_CTRL_ENGINE_OFF);
+ if (rc)
+ pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+ display->name, rc);
+ }
+
+ rc = dsi_ctrl_set_cmd_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+ if (rc) {
+ pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+error:
+ display->cmd_engine_refcount = 0;
+ return rc;
+}
+
+static int dsi_display_ctrl_host_enable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ m_ctrl = &display->ctrl[display->cmd_master_idx];
+
+ rc = dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable host engine, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_host_engine_state(ctrl->ctrl,
+ DSI_CTRL_ENGINE_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable sl host engine, rc=%d\n",
+ display->name, rc);
+ goto error_disable_master;
+ }
+ }
+
+ return rc;
+error_disable_master:
+ (void)dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+error:
+ return rc;
+}
+
+static int dsi_display_ctrl_host_disable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ m_ctrl = &display->ctrl[display->cmd_master_idx];
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_host_engine_state(ctrl->ctrl,
+ DSI_CTRL_ENGINE_OFF);
+ if (rc)
+ pr_err("[%s] failed to disable host engine, rc=%d\n",
+ display->name, rc);
+ }
+
+ rc = dsi_ctrl_set_host_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+ if (rc) {
+ pr_err("[%s] failed to disable host engine, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_display_vid_engine_enable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ m_ctrl = &display->ctrl[display->video_master_idx];
+
+ rc = dsi_ctrl_set_vid_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable vid engine, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_vid_engine_state(ctrl->ctrl,
+ DSI_CTRL_ENGINE_ON);
+ if (rc) {
+ pr_err("[%s] failed to enable vid engine, rc=%d\n",
+ display->name, rc);
+ goto error_disable_master;
+ }
+ }
+
+ return rc;
+error_disable_master:
+ (void)dsi_ctrl_set_vid_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+error:
+ return rc;
+}
+
+static int dsi_display_vid_engine_disable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ m_ctrl = &display->ctrl[display->video_master_idx];
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_set_vid_engine_state(ctrl->ctrl,
+ DSI_CTRL_ENGINE_OFF);
+ if (rc)
+ pr_err("[%s] failed to disable vid engine, rc=%d\n",
+ display->name, rc);
+ }
+
+ rc = dsi_ctrl_set_vid_engine_state(m_ctrl->ctrl, DSI_CTRL_ENGINE_OFF);
+ if (rc)
+ pr_err("[%s] failed to disable mvid engine, rc=%d\n",
+ display->name, rc);
+
+ return rc;
+}
+
+static int dsi_display_phy_enable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+ enum dsi_phy_pll_source m_src = DSI_PLL_SOURCE_STANDALONE;
+
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+ if (display->ctrl_count > 1)
+ m_src = DSI_PLL_SOURCE_NATIVE;
+
+ rc = dsi_phy_enable(m_ctrl->phy,
+ &display->config,
+ m_src,
+ true, display->cont_splash_enabled);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_phy_enable(ctrl->phy,
+ &display->config,
+ DSI_PLL_SOURCE_NON_NATIVE,
+ true, display->cont_splash_enabled);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
+ display->name, rc);
+ goto error_disable_master;
+ }
+ }
+
+ return rc;
+
+error_disable_master:
+ (void)dsi_phy_disable(m_ctrl->phy);
+error:
+ return rc;
+}
+
+static int dsi_display_phy_disable(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_phy_disable(ctrl->phy);
+ if (rc)
+ pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
+ display->name, rc);
+ }
+
+ rc = dsi_phy_disable(m_ctrl->phy);
+ if (rc)
+ pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
+ display->name, rc);
+
+ return rc;
+}
+
+static int dsi_display_wake_up(struct dsi_display *display)
+{
+ return 0;
+}
+
+static int dsi_display_broadcast_cmd(struct dsi_display *display,
+ const struct mipi_dsi_msg *msg)
+{
+ int rc = 0;
+ u32 flags, m_flags;
+ struct dsi_display_ctrl *ctrl, *m_ctrl;
+ int i;
+
+ m_flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_BROADCAST_MASTER |
+ DSI_CTRL_CMD_DEFER_TRIGGER | DSI_CTRL_CMD_FIFO_STORE);
+ flags = (DSI_CTRL_CMD_BROADCAST | DSI_CTRL_CMD_DEFER_TRIGGER |
+ DSI_CTRL_CMD_FIFO_STORE);
+
+ /*
+ * 1. Setup commands in FIFO
+ * 2. Trigger commands
+ */
+ m_ctrl = &display->ctrl[display->cmd_master_idx];
+ rc = dsi_ctrl_cmd_transfer(m_ctrl->ctrl, msg, m_flags);
+ if (rc) {
+ pr_err("[%s] cmd transfer failed on master,rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (ctrl == m_ctrl)
+ continue;
+
+ rc = dsi_ctrl_cmd_transfer(ctrl->ctrl, msg, flags);
+ if (rc) {
+ pr_err("[%s] cmd transfer failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ rc = dsi_ctrl_cmd_tx_trigger(ctrl->ctrl,
+ DSI_CTRL_CMD_BROADCAST);
+ if (rc) {
+ pr_err("[%s] cmd trigger failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+ }
+
+ rc = dsi_ctrl_cmd_tx_trigger(m_ctrl->ctrl,
+ (DSI_CTRL_CMD_BROADCAST_MASTER |
+ DSI_CTRL_CMD_BROADCAST));
+ if (rc) {
+ pr_err("[%s] cmd trigger failed for master, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_display_phy_sw_reset(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+
+ if (display->cont_splash_enabled) {
+ pr_debug("skip phy sw reset\n");
+ return 0;
+ }
+
+ m_ctrl = &display->ctrl[display->cmd_master_idx];
+
+ rc = dsi_ctrl_phy_sw_reset(m_ctrl->ctrl);
+ if (rc) {
+ pr_err("[%s] failed to reset phy, rc=%d\n", display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_phy_sw_reset(ctrl->ctrl);
+ if (rc) {
+ pr_err("[%s] failed to reset phy, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_host_attach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static int dsi_host_detach(struct mipi_dsi_host *host,
+ struct mipi_dsi_device *dsi)
+{
+ return 0;
+}
+
+static ssize_t dsi_host_transfer(struct mipi_dsi_host *host,
+ const struct mipi_dsi_msg *msg)
+{
+ struct dsi_display *display = to_dsi_display(host);
+
+ int rc = 0;
+
+ if (!host || !msg) {
+ pr_err("Invalid params\n");
+ return 0;
+ }
+
+ rc = dsi_display_wake_up(display);
+ if (rc) {
+ pr_err("[%s] failed to wake up display, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ rc = dsi_display_cmd_engine_enable(display);
+ if (rc) {
+ pr_err("[%s] failed to enable cmd engine, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ if (display->ctrl_count > 1) {
+ rc = dsi_display_broadcast_cmd(display, msg);
+ if (rc) {
+ pr_err("[%s] cmd broadcast failed, rc=%d\n",
+ display->name, rc);
+ goto error_disable_cmd_engine;
+ }
+ } else {
+ rc = dsi_ctrl_cmd_transfer(display->ctrl[0].ctrl, msg,
+ DSI_CTRL_CMD_FIFO_STORE);
+ if (rc) {
+ pr_err("[%s] cmd transfer failed, rc=%d\n",
+ display->name, rc);
+ goto error_disable_cmd_engine;
+ }
+ }
+error_disable_cmd_engine:
+ (void)dsi_display_cmd_engine_disable(display);
+error:
+ return rc;
+}
+
+
+static struct mipi_dsi_host_ops dsi_host_ops = {
+ .attach = dsi_host_attach,
+ .detach = dsi_host_detach,
+ .transfer = dsi_host_transfer,
+};
+
+static int dsi_display_mipi_host_init(struct dsi_display *display)
+{
+ int rc = 0;
+ struct mipi_dsi_host *host = &display->host;
+
+ host->dev = &display->pdev->dev;
+ host->ops = &dsi_host_ops;
+
+ rc = mipi_dsi_host_register(host);
+ if (rc) {
+ pr_err("[%s] failed to register mipi dsi host, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+static int dsi_display_mipi_host_deinit(struct dsi_display *display)
+{
+ int rc = 0;
+ struct mipi_dsi_host *host = &display->host;
+
+ mipi_dsi_host_unregister(host);
+
+ host->dev = NULL;
+ host->ops = NULL;
+
+ return rc;
+}
+
+static int dsi_display_clocks_deinit(struct dsi_display *display)
+{
+ int rc = 0;
+ struct dsi_clk_link_set *src = &display->clock_info.src_clks;
+ struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
+ struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
+
+ if (src->byte_clk) {
+ devm_clk_put(&display->pdev->dev, src->byte_clk);
+ src->byte_clk = NULL;
+ }
+
+ if (src->pixel_clk) {
+ devm_clk_put(&display->pdev->dev, src->pixel_clk);
+ src->pixel_clk = NULL;
+ }
+
+ if (mux->byte_clk) {
+ devm_clk_put(&display->pdev->dev, mux->byte_clk);
+ mux->byte_clk = NULL;
+ }
+
+ if (mux->pixel_clk) {
+ devm_clk_put(&display->pdev->dev, mux->pixel_clk);
+ mux->pixel_clk = NULL;
+ }
+
+ if (shadow->byte_clk) {
+ devm_clk_put(&display->pdev->dev, shadow->byte_clk);
+ shadow->byte_clk = NULL;
+ }
+
+ if (shadow->pixel_clk) {
+ devm_clk_put(&display->pdev->dev, shadow->pixel_clk);
+ shadow->pixel_clk = NULL;
+ }
+
+ return rc;
+}
+
+static int dsi_display_clocks_init(struct dsi_display *display)
+{
+ int rc = 0;
+ struct dsi_clk_link_set *src = &display->clock_info.src_clks;
+ struct dsi_clk_link_set *mux = &display->clock_info.mux_clks;
+ struct dsi_clk_link_set *shadow = &display->clock_info.shadow_clks;
+
+ src->byte_clk = devm_clk_get(&display->pdev->dev, "src_byte_clk");
+ if (IS_ERR_OR_NULL(src->byte_clk)) {
+ rc = PTR_ERR(src->byte_clk);
+ src->byte_clk = NULL;
+ pr_err("failed to get src_byte_clk, rc=%d\n", rc);
+ goto error;
+ }
+
+ src->pixel_clk = devm_clk_get(&display->pdev->dev, "src_pixel_clk");
+ if (IS_ERR_OR_NULL(src->pixel_clk)) {
+ rc = PTR_ERR(src->pixel_clk);
+ src->pixel_clk = NULL;
+ pr_err("failed to get src_pixel_clk, rc=%d\n", rc);
+ goto error;
+ }
+
+ mux->byte_clk = devm_clk_get(&display->pdev->dev, "mux_byte_clk");
+ if (IS_ERR_OR_NULL(mux->byte_clk)) {
+ rc = PTR_ERR(mux->byte_clk);
+ pr_err("failed to get mux_byte_clk, rc=%d\n", rc);
+ mux->byte_clk = NULL;
+ /*
+ * Skip getting rest of clocks since one failed. This is a
+ * non-critical failure since these clocks are requied only for
+ * dynamic refresh use cases.
+ */
+ rc = 0;
+ goto done;
+ };
+
+ mux->pixel_clk = devm_clk_get(&display->pdev->dev, "mux_pixel_clk");
+ if (IS_ERR_OR_NULL(mux->pixel_clk)) {
+ rc = PTR_ERR(mux->pixel_clk);
+ mux->pixel_clk = NULL;
+ pr_err("failed to get mux_pixel_clk, rc=%d\n", rc);
+ /*
+ * Skip getting rest of clocks since one failed. This is a
+ * non-critical failure since these clocks are requied only for
+ * dynamic refresh use cases.
+ */
+ rc = 0;
+ goto done;
+ };
+
+ shadow->byte_clk = devm_clk_get(&display->pdev->dev, "shadow_byte_clk");
+ if (IS_ERR_OR_NULL(shadow->byte_clk)) {
+ rc = PTR_ERR(shadow->byte_clk);
+ shadow->byte_clk = NULL;
+ pr_err("failed to get shadow_byte_clk, rc=%d\n", rc);
+ /*
+ * Skip getting rest of clocks since one failed. This is a
+ * non-critical failure since these clocks are requied only for
+ * dynamic refresh use cases.
+ */
+ rc = 0;
+ goto done;
+ };
+
+ shadow->pixel_clk = devm_clk_get(&display->pdev->dev,
+ "shadow_pixel_clk");
+ if (IS_ERR_OR_NULL(shadow->pixel_clk)) {
+ rc = PTR_ERR(shadow->pixel_clk);
+ shadow->pixel_clk = NULL;
+ pr_err("failed to get shadow_pixel_clk, rc=%d\n", rc);
+ /*
+ * Skip getting rest of clocks since one failed. This is a
+ * non-critical failure since these clocks are requied only for
+ * dynamic refresh use cases.
+ */
+ rc = 0;
+ goto done;
+ };
+
+done:
+ return 0;
+error:
+ (void)dsi_display_clocks_deinit(display);
+ return rc;
+}
+
+static int dsi_display_parse_lane_map(struct dsi_display *display)
+{
+ int rc = 0;
+
+ display->lane_map.physical_lane0 = DSI_LOGICAL_LANE_0;
+ display->lane_map.physical_lane1 = DSI_LOGICAL_LANE_1;
+ display->lane_map.physical_lane2 = DSI_LOGICAL_LANE_2;
+ display->lane_map.physical_lane3 = DSI_LOGICAL_LANE_3;
+ return rc;
+}
+
+static int dsi_display_parse_dt(struct dsi_display *display)
+{
+ int rc = 0;
+ int i, size;
+ u32 phy_count = 0;
+ struct device_node *of_node;
+ const char *name;
+ u32 top = 0;
+
+ /* Parse controllers */
+ for (i = 0; i < MAX_DSI_CTRLS_PER_DISPLAY; i++) {
+ of_node = of_parse_phandle(display->pdev->dev.of_node,
+ "qcom,dsi-ctrl", i);
+ if (!of_node) {
+ if (!i) {
+ pr_err("No controllers present\n");
+ return -ENODEV;
+ }
+ break;
+ }
+
+ display->ctrl[i].ctrl_of_node = of_node;
+ display->ctrl_count++;
+ }
+
+ /* Parse Phys */
+ for (i = 0; i < MAX_DSI_CTRLS_PER_DISPLAY; i++) {
+ of_node = of_parse_phandle(display->pdev->dev.of_node,
+ "qcom,dsi-phy", i);
+ if (!of_node) {
+ if (!i) {
+ pr_err("No PHY devices present\n");
+ rc = -ENODEV;
+ goto error;
+ }
+ break;
+ }
+
+ display->ctrl[i].phy_of_node = of_node;
+ phy_count++;
+ }
+
+ if (phy_count != display->ctrl_count) {
+ pr_err("Number of controllers does not match PHYs\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ /* Only read swap property in split case */
+ if (display->ctrl_count > 1) {
+ display->dsi_split_swap =
+ of_property_read_bool(display->pdev->dev.of_node,
+ "qcom,dsi-split-swap");
+ }
+
+ rc = of_property_read_string(display->pdev->dev.of_node,
+ "qcom,display-topology-control",
+ &name);
+ if (rc) {
+ SDE_ERROR("unable to get qcom,display-topology-control,rc=%d\n",
+ rc);
+ } else {
+ SDE_DEBUG("%s qcom,display-topology-control = %s\n",
+ __func__, name);
+
+ if (!strcmp(name, "force-mixer"))
+ top = BIT(SDE_RM_TOPCTL_FORCE_MIXER);
+ else if (!strcmp(name, "force-tiling"))
+ top = BIT(SDE_RM_TOPCTL_FORCE_TILING);
+
+ display->display_topology = top;
+ }
+
+ if (of_get_property(display->pdev->dev.of_node, "qcom,dsi-panel",
+ &size)) {
+ display->panel_count = size / sizeof(int);
+ display->panel_of = devm_kzalloc(&display->pdev->dev,
+ sizeof(struct device_node *) * display->panel_count,
+ GFP_KERNEL);
+ if (!display->panel_of) {
+ SDE_ERROR("out of memory for panel_of\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+ display->panel = devm_kzalloc(&display->pdev->dev,
+ sizeof(struct dsi_panel *) * display->panel_count,
+ GFP_KERNEL);
+ if (!display->panel) {
+ SDE_ERROR("out of memory for panel\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+ for (i = 0; i < display->panel_count; i++) {
+ display->panel_of[i] =
+ of_parse_phandle(display->pdev->dev.of_node,
+ "qcom,dsi-panel", i);
+ if (!display->panel_of[i]) {
+ SDE_ERROR("of_parse dsi-panel failed\n");
+ rc = -ENODEV;
+ goto error;
+ }
+ }
+ } else {
+ SDE_ERROR("No qcom,dsi-panel of node\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ if (of_get_property(display->pdev->dev.of_node, "qcom,bridge-index",
+ &size)) {
+ if (size / sizeof(int) != display->panel_count) {
+ SDE_ERROR("size=%lu is different than count=%u\n",
+ size / sizeof(int), display->panel_count);
+ rc = -EINVAL;
+ goto error;
+ }
+ display->bridge_idx = devm_kzalloc(&display->pdev->dev,
+ sizeof(u32) * display->panel_count, GFP_KERNEL);
+ if (!display->bridge_idx) {
+ SDE_ERROR("out of memory for bridge_idx\n");
+ rc = -ENOMEM;
+ goto error;
+ }
+ for (i = 0; i < display->panel_count; i++) {
+ rc = of_property_read_u32_index(
+ display->pdev->dev.of_node,
+ "qcom,bridge-index", i,
+ &(display->bridge_idx[i]));
+ if (rc) {
+ SDE_ERROR(
+ "read bridge-index error,i=%d rc=%d\n",
+ i, rc);
+ rc = -ENODEV;
+ goto error;
+ }
+ }
+ }
+
+ rc = dsi_display_parse_lane_map(display);
+ if (rc) {
+ pr_err("Lane map not found, rc=%d\n", rc);
+ goto error;
+ }
+error:
+ if (rc) {
+ if (display->panel_of)
+ for (i = 0; i < display->panel_count; i++)
+ if (display->panel_of[i])
+ of_node_put(display->panel_of[i]);
+ devm_kfree(&display->pdev->dev, display->panel_of);
+ devm_kfree(&display->pdev->dev, display->panel);
+ devm_kfree(&display->pdev->dev, display->bridge_idx);
+ display->panel_count = 0;
+ }
+ return rc;
+}
+
+static int dsi_display_res_init(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ ctrl->ctrl = dsi_ctrl_get(ctrl->ctrl_of_node);
+ if (IS_ERR_OR_NULL(ctrl->ctrl)) {
+ rc = PTR_ERR(ctrl->ctrl);
+ pr_err("failed to get dsi controller, rc=%d\n", rc);
+ ctrl->ctrl = NULL;
+ goto error_ctrl_put;
+ }
+
+ ctrl->phy = dsi_phy_get(ctrl->phy_of_node);
+ if (IS_ERR_OR_NULL(ctrl->phy)) {
+ rc = PTR_ERR(ctrl->phy);
+ pr_err("failed to get phy controller, rc=%d\n", rc);
+ dsi_ctrl_put(ctrl->ctrl);
+ ctrl->phy = NULL;
+ goto error_ctrl_put;
+ }
+ }
+
+ for (i = 0; i < display->panel_count; i++) {
+ display->panel[i] = dsi_panel_get(&display->pdev->dev,
+ display->panel_of[i]);
+ if (IS_ERR_OR_NULL(display->panel)) {
+ rc = PTR_ERR(display->panel);
+ pr_err("failed to get panel, rc=%d\n", rc);
+ display->panel[i] = NULL;
+ goto error_ctrl_put;
+ }
+ }
+
+ rc = dsi_display_clocks_init(display);
+ if (rc) {
+ pr_err("Failed to parse clock data, rc=%d\n", rc);
+ goto error_ctrl_put;
+ }
+
+ return 0;
+error_ctrl_put:
+ for (i = i - 1; i >= 0; i--) {
+ ctrl = &display->ctrl[i];
+ dsi_ctrl_put(ctrl->ctrl);
+ dsi_phy_put(ctrl->phy);
+ }
+ return rc;
+}
+
+static int dsi_display_res_deinit(struct dsi_display *display)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ rc = dsi_display_clocks_deinit(display);
+ if (rc)
+ pr_err("clocks deinit failed, rc=%d\n", rc);
+
+ for (i = 0; i < display->panel_count; i++)
+ dsi_panel_put(display->panel[i]);
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ dsi_phy_put(ctrl->phy);
+ dsi_ctrl_put(ctrl->ctrl);
+ }
+
+ return rc;
+}
+
+static int dsi_display_validate_mode_set(struct dsi_display *display,
+ struct dsi_display_mode *mode,
+ u32 flags)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ /*
+ * To set a mode:
+ * 1. Controllers should be turned off.
+ * 2. Link clocks should be off.
+ * 3. Phy should be disabled.
+ */
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if ((ctrl->power_state > DSI_CTRL_POWER_VREG_ON) ||
+ (ctrl->phy_enabled)) {
+ rc = -EINVAL;
+ goto error;
+ }
+ }
+
+error:
+ return rc;
+}
+
+static bool dsi_display_is_seamless_dfps_possible(
+ const struct dsi_display *display,
+ const struct dsi_display_mode *tgt,
+ const enum dsi_dfps_type dfps_type)
+{
+ struct dsi_display_mode *cur;
+
+ if (!display || !tgt) {
+ pr_err("Invalid params\n");
+ return false;
+ }
+
+ cur = &display->panel[0]->mode;
+
+ if (cur->timing.h_active != tgt->timing.h_active) {
+ pr_debug("timing.h_active differs %d %d\n",
+ cur->timing.h_active, tgt->timing.h_active);
+ return false;
+ }
+
+ if (cur->timing.h_back_porch != tgt->timing.h_back_porch) {
+ pr_debug("timing.h_back_porch differs %d %d\n",
+ cur->timing.h_back_porch,
+ tgt->timing.h_back_porch);
+ return false;
+ }
+
+ if (cur->timing.h_sync_width != tgt->timing.h_sync_width) {
+ pr_debug("timing.h_sync_width differs %d %d\n",
+ cur->timing.h_sync_width,
+ tgt->timing.h_sync_width);
+ return false;
+ }
+
+ if (cur->timing.h_front_porch != tgt->timing.h_front_porch) {
+ pr_debug("timing.h_front_porch differs %d %d\n",
+ cur->timing.h_front_porch,
+ tgt->timing.h_front_porch);
+ if (dfps_type != DSI_DFPS_IMMEDIATE_HFP)
+ return false;
+ }
+
+ if (cur->timing.h_skew != tgt->timing.h_skew) {
+ pr_debug("timing.h_skew differs %d %d\n",
+ cur->timing.h_skew,
+ tgt->timing.h_skew);
+ return false;
+ }
+
+ /* skip polarity comparison */
+
+ if (cur->timing.v_active != tgt->timing.v_active) {
+ pr_debug("timing.v_active differs %d %d\n",
+ cur->timing.v_active,
+ tgt->timing.v_active);
+ return false;
+ }
+
+ if (cur->timing.v_back_porch != tgt->timing.v_back_porch) {
+ pr_debug("timing.v_back_porch differs %d %d\n",
+ cur->timing.v_back_porch,
+ tgt->timing.v_back_porch);
+ return false;
+ }
+
+ if (cur->timing.v_sync_width != tgt->timing.v_sync_width) {
+ pr_debug("timing.v_sync_width differs %d %d\n",
+ cur->timing.v_sync_width,
+ tgt->timing.v_sync_width);
+ return false;
+ }
+
+ if (cur->timing.v_front_porch != tgt->timing.v_front_porch) {
+ pr_debug("timing.v_front_porch differs %d %d\n",
+ cur->timing.v_front_porch,
+ tgt->timing.v_front_porch);
+ if (dfps_type != DSI_DFPS_IMMEDIATE_VFP)
+ return false;
+ }
+
+ /* skip polarity comparison */
+
+ if (cur->timing.refresh_rate == tgt->timing.refresh_rate) {
+ pr_debug("timing.refresh_rate identical %d %d\n",
+ cur->timing.refresh_rate,
+ tgt->timing.refresh_rate);
+ return false;
+ }
+
+ if (cur->pixel_clk_khz != tgt->pixel_clk_khz)
+ pr_debug("pixel_clk_khz differs %d %d\n",
+ cur->pixel_clk_khz, tgt->pixel_clk_khz);
+
+ if (cur->panel_mode != tgt->panel_mode) {
+ pr_debug("panel_mode differs %d %d\n",
+ cur->panel_mode, tgt->panel_mode);
+ return false;
+ }
+
+ if (cur->flags != tgt->flags)
+ pr_debug("flags differs %d %d\n", cur->flags, tgt->flags);
+
+ return true;
+}
+
+static int dsi_display_dfps_update(struct dsi_display *display,
+ struct dsi_display_mode *dsi_mode)
+{
+ struct dsi_mode_info *timing;
+ struct dsi_display_ctrl *m_ctrl, *ctrl;
+ struct dsi_display_mode *panel_mode;
+ struct dsi_dfps_capabilities dfps_caps;
+ int rc = 0;
+ int i;
+
+ if (!display || !dsi_mode) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+ timing = &dsi_mode->timing;
+
+ dsi_panel_get_dfps_caps(display->panel[0], &dfps_caps);
+ if (!dfps_caps.dfps_support) {
+ pr_err("dfps not supported\n");
+ return -ENOTSUPP;
+ }
+
+ if (dfps_caps.type == DSI_DFPS_IMMEDIATE_CLK) {
+ pr_err("dfps clock method not supported\n");
+ return -ENOTSUPP;
+ }
+
+ /* For split DSI, update the clock master first */
+
+ pr_debug("configuring seamless dynamic fps\n\n");
+
+ m_ctrl = &display->ctrl[display->clk_master_idx];
+ rc = dsi_ctrl_async_timing_update(m_ctrl->ctrl, timing);
+ if (rc) {
+ pr_err("[%s] failed to dfps update clock master, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ /* Update the rest of the controllers */
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl->ctrl || (ctrl == m_ctrl))
+ continue;
+
+ rc = dsi_ctrl_async_timing_update(ctrl->ctrl, timing);
+ if (rc) {
+ pr_err("[%s] failed to dfps update host_%d, rc=%d\n",
+ display->name, i, rc);
+ goto error;
+ }
+ }
+
+ panel_mode = &display->panel[0]->mode;
+ memcpy(panel_mode, dsi_mode, sizeof(*panel_mode));
+
+error:
+ return rc;
+}
+
+static int dsi_display_dfps_calc_front_porch(
+ u64 clk_hz,
+ u32 new_fps,
+ u32 a_total,
+ u32 b_total,
+ u32 b_fp,
+ u32 *b_fp_out)
+{
+ s32 b_fp_new;
+
+ if (!b_fp_out) {
+ pr_err("Invalid params");
+ return -EINVAL;
+ }
+
+ if (!a_total || !new_fps) {
+ pr_err("Invalid pixel total or new fps in mode request\n");
+ return -EINVAL;
+ }
+
+ /**
+ * Keep clock, other porches constant, use new fps, calc front porch
+ * clk = (hor * ver * fps)
+ * hfront = clk / (vtotal * fps)) - hactive - hback - hsync
+ */
+ b_fp_new = (clk_hz / (a_total * new_fps)) - (b_total - b_fp);
+
+ pr_debug("clk %llu fps %u a %u b %u b_fp %u new_fp %d\n",
+ clk_hz, new_fps, a_total, b_total, b_fp, b_fp_new);
+
+ if (b_fp_new < 0) {
+ pr_err("Invalid new_hfp calcluated%d\n", b_fp_new);
+ return -EINVAL;
+ }
+
+ /**
+ * TODO: To differentiate from clock method when communicating to the
+ * other components, perhaps we should set clk here to original value
+ */
+ *b_fp_out = b_fp_new;
+
+ return 0;
+}
+
+static int dsi_display_get_dfps_timing(struct dsi_display *display,
+ struct dsi_display_mode *adj_mode)
+{
+ struct dsi_dfps_capabilities dfps_caps;
+ struct dsi_display_mode per_ctrl_mode;
+ struct dsi_mode_info *timing;
+ struct dsi_ctrl *m_ctrl;
+ u64 clk_hz;
+
+ int rc = 0;
+
+ if (!display || !adj_mode) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+ m_ctrl = display->ctrl[display->clk_master_idx].ctrl;
+
+ /* Only check the first panel */
+ dsi_panel_get_dfps_caps(display->panel[0], &dfps_caps);
+ if (!dfps_caps.dfps_support) {
+ pr_err("dfps not supported by panel\n");
+ return -EINVAL;
+ }
+
+ per_ctrl_mode = *adj_mode;
+ adjust_timing_by_ctrl_count(display, &per_ctrl_mode);
+
+ if (!dsi_display_is_seamless_dfps_possible(display,
+ &per_ctrl_mode, dfps_caps.type)) {
+ pr_err("seamless dynamic fps not supported for mode\n");
+ return -EINVAL;
+ }
+
+ /* TODO: Remove this direct reference to the dsi_ctrl */
+ clk_hz = m_ctrl->clk_info.link_clks.pixel_clk_rate;
+ timing = &per_ctrl_mode.timing;
+
+ switch (dfps_caps.type) {
+ case DSI_DFPS_IMMEDIATE_VFP:
+ rc = dsi_display_dfps_calc_front_porch(
+ clk_hz,
+ timing->refresh_rate,
+ DSI_H_TOTAL(timing),
+ DSI_V_TOTAL(timing),
+ timing->v_front_porch,
+ &adj_mode->timing.v_front_porch);
+ break;
+
+ case DSI_DFPS_IMMEDIATE_HFP:
+ rc = dsi_display_dfps_calc_front_porch(
+ clk_hz,
+ timing->refresh_rate,
+ DSI_V_TOTAL(timing),
+ DSI_H_TOTAL(timing),
+ timing->h_front_porch,
+ &adj_mode->timing.h_front_porch);
+ if (!rc)
+ adj_mode->timing.h_front_porch *= display->ctrl_count;
+ break;
+
+ default:
+ pr_err("Unsupported DFPS mode %d\n", dfps_caps.type);
+ rc = -ENOTSUPP;
+ }
+
+ return rc;
+}
+
+static bool dsi_display_validate_mode_seamless(struct dsi_display *display,
+ struct dsi_display_mode *adj_mode)
+{
+ int rc = 0;
+
+ if (!display || !adj_mode) {
+ pr_err("Invalid params\n");
+ return false;
+ }
+
+ /* Currently the only seamless transition is dynamic fps */
+ rc = dsi_display_get_dfps_timing(display, adj_mode);
+ if (rc) {
+ pr_debug("Dynamic FPS not supported for seamless\n");
+ } else {
+ pr_debug("Mode switch is seamless Dynamic FPS\n");
+ adj_mode->flags |= DSI_MODE_FLAG_DFPS |
+ DSI_MODE_FLAG_VBLANK_PRE_MODESET;
+ }
+
+ return rc;
+}
+
+static int dsi_display_set_mode_sub(struct dsi_display *display,
+ struct dsi_display_mode *mode,
+ u32 flags)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ rc = dsi_panel_get_host_cfg_for_mode(display->panel[0],
+ mode,
+ &display->config);
+ if (rc) {
+ pr_err("[%s] failed to get host config for mode, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ memcpy(&display->config.lane_map, &display->lane_map,
+ sizeof(display->lane_map));
+
+ if (mode->flags & DSI_MODE_FLAG_DFPS) {
+ rc = dsi_display_dfps_update(display, mode);
+ if (rc) {
+ pr_err("[%s]DSI dfps update failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ rc = dsi_ctrl_update_host_config(ctrl->ctrl, &display->config,
+ mode->flags);
+ if (rc) {
+ pr_err("[%s] failed to update ctrl config, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ }
+error:
+ return rc;
+}
+
+/**
+ * _dsi_display_dev_init - initializes the display device
+ * Initialization will acquire references to the resources required for the
+ * display hardware to function.
+ * @display: Handle to the display
+ * Returns: Zero on success
+ */
+static int _dsi_display_dev_init(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ pr_err("invalid display\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_display_parse_dt(display);
+ if (rc) {
+ pr_err("[%s] failed to parse dt, rc=%d\n", display->name, rc);
+ goto error;
+ }
+
+ rc = dsi_display_res_init(display);
+ if (rc) {
+ pr_err("[%s] failed to initialize resources, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+/**
+ * _dsi_display_dev_deinit - deinitializes the display device
+ * All the resources acquired during device init will be released.
+ * @display: Handle to the display
+ * Returns: Zero on success
+ */
+static int _dsi_display_dev_deinit(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ pr_err("invalid display\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_display_res_deinit(display);
+ if (rc)
+ pr_err("[%s] failed to deinitialize resource, rc=%d\n",
+ display->name, rc);
+
+ mutex_unlock(&display->display_lock);
+
+ return rc;
+}
+
+/*
+ * _dsi_display_config_ctrl_for_splash
+ *
+ * Config ctrl engine for DSI display.
+ * @display: Handle to the display
+ * Returns: Zero on success
+ */
+static int _dsi_display_config_ctrl_for_splash(struct dsi_display *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
+ rc = dsi_display_vid_engine_enable(display);
+ if (rc) {
+ pr_err("[%s]failed to enable video engine, rc=%d\n",
+ display->name, rc);
+ goto error_out;
+ }
+ } else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
+ rc = dsi_display_cmd_engine_enable(display);
+ if (rc) {
+ pr_err("[%s]failed to enable cmd engine, rc=%d\n",
+ display->name, rc);
+ goto error_out;
+ }
+ } else {
+ pr_err("[%s] Invalid configuration\n", display->name);
+ rc = -EINVAL;
+ }
+
+error_out:
+ return rc;
+}
+
+/**
+ * dsi_display_bind - bind dsi device with controlling device
+ * @dev: Pointer to base of platform device
+ * @master: Pointer to container of drm device
+ * @data: Pointer to private data
+ * Returns: Zero on success
+ */
+static int dsi_display_bind(struct device *dev,
+ struct device *master,
+ void *data)
+{
+ struct dsi_display_ctrl *display_ctrl;
+ struct drm_device *drm;
+ struct dsi_display *display;
+ struct platform_device *pdev = to_platform_device(dev);
+ int i, j, rc = 0;
+
+ if (!dev || !pdev || !master) {
+ pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
+ dev, pdev, master);
+ return -EINVAL;
+ }
+
+ drm = dev_get_drvdata(master);
+ display = platform_get_drvdata(pdev);
+ if (!drm || !display) {
+ pr_err("invalid param(s), drm %pK, display %pK\n",
+ drm, display);
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_display_debugfs_init(display);
+ if (rc) {
+ pr_err("[%s] debugfs init failed, rc=%d\n", display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ display_ctrl = &display->ctrl[i];
+
+ rc = dsi_ctrl_drv_init(display_ctrl->ctrl, display->root);
+ if (rc) {
+ pr_err("[%s] failed to initialize ctrl[%d], rc=%d\n",
+ display->name, i, rc);
+ goto error_ctrl_deinit;
+ }
+
+ rc = dsi_phy_drv_init(display_ctrl->phy);
+ if (rc) {
+ pr_err("[%s] Failed to initialize phy[%d], rc=%d\n",
+ display->name, i, rc);
+ (void)dsi_ctrl_drv_deinit(display_ctrl->ctrl);
+ goto error_ctrl_deinit;
+ }
+ }
+
+ rc = dsi_display_mipi_host_init(display);
+ if (rc) {
+ pr_err("[%s] failed to initialize mipi host, rc=%d\n",
+ display->name, rc);
+ goto error_ctrl_deinit;
+ }
+
+ for (j = 0; j < display->panel_count; j++) {
+ rc = dsi_panel_drv_init(display->panel[j], &display->host);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ SDE_ERROR(
+ "[%s]Failed to init panel driver, rc=%d\n",
+ display->name, rc);
+ goto error_panel_deinit;
+ }
+ }
+
+ rc = dsi_panel_get_mode_count(display->panel[0],
+ &display->num_of_modes);
+ if (rc) {
+ pr_err("[%s] failed to get mode count, rc=%d\n",
+ display->name, rc);
+ goto error_panel_deinit;
+ }
+
+ display->drm_dev = drm;
+ goto error;
+
+error_panel_deinit:
+ for (j--; j >= 0; j--)
+ (void)dsi_panel_drv_deinit(display->panel[j]);
+ (void)dsi_display_mipi_host_deinit(display);
+error_ctrl_deinit:
+ for (i = i - 1; i >= 0; i--) {
+ display_ctrl = &display->ctrl[i];
+ (void)dsi_phy_drv_deinit(display_ctrl->phy);
+ (void)dsi_ctrl_drv_deinit(display_ctrl->ctrl);
+ }
+ (void)dsi_display_debugfs_deinit(display);
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+/**
+ * dsi_display_unbind - unbind dsi from controlling device
+ * @dev: Pointer to base of platform device
+ * @master: Pointer to container of drm device
+ * @data: Pointer to private data
+ */
+static void dsi_display_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ struct dsi_display_ctrl *display_ctrl;
+ struct dsi_display *display;
+ struct platform_device *pdev = to_platform_device(dev);
+ int i, rc = 0;
+
+ if (!dev || !pdev) {
+ pr_err("invalid param(s)\n");
+ return;
+ }
+
+ display = platform_get_drvdata(pdev);
+ if (!display) {
+ pr_err("invalid display\n");
+ return;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_drv_deinit(display->panel[i]);
+ if (rc)
+ SDE_ERROR("[%s] failed to deinit panel driver, rc=%d\n",
+ display->name, rc);
+ }
+
+ rc = dsi_display_mipi_host_deinit(display);
+ if (rc)
+ pr_err("[%s] failed to deinit mipi hosts, rc=%d\n",
+ display->name,
+ rc);
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ display_ctrl = &display->ctrl[i];
+
+ rc = dsi_phy_drv_deinit(display_ctrl->phy);
+ if (rc)
+ pr_err("[%s] failed to deinit phy%d driver, rc=%d\n",
+ display->name, i, rc);
+
+ rc = dsi_ctrl_drv_deinit(display_ctrl->ctrl);
+ if (rc)
+ pr_err("[%s] failed to deinit ctrl%d driver, rc=%d\n",
+ display->name, i, rc);
+ }
+ (void)dsi_display_debugfs_deinit(display);
+
+ mutex_unlock(&display->display_lock);
+}
+
+static const struct component_ops dsi_display_comp_ops = {
+ .bind = dsi_display_bind,
+ .unbind = dsi_display_unbind,
+};
+
+static struct platform_driver dsi_display_driver = {
+ .probe = dsi_display_dev_probe,
+ .remove = dsi_display_dev_remove,
+ .driver = {
+ .name = "msm-dsi-display",
+ .of_match_table = dsi_display_dt_match,
+ },
+};
+
+int dsi_display_dev_probe(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct dsi_display *display;
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("pdev not found\n");
+ return -ENODEV;
+ }
+
+ display = devm_kzalloc(&pdev->dev, sizeof(*display), GFP_KERNEL);
+ if (!display)
+ return -ENOMEM;
+
+ display->name = of_get_property(pdev->dev.of_node, "label", NULL);
+
+ display->is_active = of_property_read_bool(pdev->dev.of_node,
+ "qcom,dsi-display-active");
+
+ display->display_type = of_get_property(pdev->dev.of_node,
+ "qcom,display-type", NULL);
+ if (!display->display_type)
+ display->display_type = "unknown";
+
+ mutex_init(&display->display_lock);
+
+ display->pdev = pdev;
+ platform_set_drvdata(pdev, display);
+ mutex_lock(&dsi_display_list_lock);
+ list_add_tail(&display->list, &dsi_display_list);
+ mutex_unlock(&dsi_display_list_lock);
+
+ if (display->is_active) {
+ main_display = display;
+ rc = _dsi_display_dev_init(display);
+ if (rc) {
+ pr_err("device init failed, rc=%d\n", rc);
+ return rc;
+ }
+
+ rc = component_add(&pdev->dev, &dsi_display_comp_ops);
+ if (rc)
+ pr_err("component add failed, rc=%d\n", rc);
+ }
+ return rc;
+}
+
+int dsi_display_dev_remove(struct platform_device *pdev)
+{
+ int rc = 0, i;
+ struct dsi_display *display;
+ struct dsi_display *pos, *tmp;
+
+ if (!pdev) {
+ pr_err("Invalid device\n");
+ return -EINVAL;
+ }
+
+ display = platform_get_drvdata(pdev);
+
+ (void)_dsi_display_dev_deinit(display);
+
+ mutex_lock(&dsi_display_list_lock);
+ list_for_each_entry_safe(pos, tmp, &dsi_display_list, list) {
+ if (pos == display) {
+ list_del(&display->list);
+ break;
+ }
+ }
+ mutex_unlock(&dsi_display_list_lock);
+
+ platform_set_drvdata(pdev, NULL);
+ if (display->panel_of)
+ for (i = 0; i < display->panel_count; i++)
+ if (display->panel_of[i])
+ of_node_put(display->panel_of[i]);
+ devm_kfree(&pdev->dev, display->panel_of);
+ devm_kfree(&pdev->dev, display->panel);
+ devm_kfree(&pdev->dev, display->bridge_idx);
+ devm_kfree(&pdev->dev, display);
+ return rc;
+}
+
+int dsi_display_get_num_of_displays(void)
+{
+ int count = 0;
+ struct dsi_display *display;
+
+ mutex_lock(&dsi_display_list_lock);
+
+ list_for_each_entry(display, &dsi_display_list, list) {
+ count++;
+ }
+
+ mutex_unlock(&dsi_display_list_lock);
+ return count;
+}
+
+int dsi_display_get_active_displays(void **display_array, u32 max_display_count)
+{
+ struct dsi_display *pos;
+ int i = 0;
+
+ if (!display_array || !max_display_count) {
+ if (!display_array)
+ pr_err("invalid params\n");
+ return 0;
+ }
+
+ mutex_lock(&dsi_display_list_lock);
+
+ list_for_each_entry(pos, &dsi_display_list, list) {
+ if (i >= max_display_count) {
+ pr_err("capping display count to %d\n", i);
+ break;
+ }
+ if (pos->is_active)
+ display_array[i++] = pos;
+ }
+
+ mutex_unlock(&dsi_display_list_lock);
+ return i;
+}
+
+struct dsi_display *dsi_display_get_display_by_name(const char *name)
+{
+ struct dsi_display *display = NULL, *pos;
+
+ mutex_lock(&dsi_display_list_lock);
+
+ list_for_each_entry(pos, &dsi_display_list, list) {
+ if (!strcmp(name, pos->name))
+ display = pos;
+ }
+
+ mutex_unlock(&dsi_display_list_lock);
+
+ return display;
+}
+
+void dsi_display_set_active_state(struct dsi_display *display, bool is_active)
+{
+ mutex_lock(&display->display_lock);
+ display->is_active = is_active;
+ mutex_unlock(&display->display_lock);
+}
+
+int dsi_display_drm_bridge_init(struct dsi_display *display,
+ struct drm_encoder *enc)
+{
+ int rc = 0, i;
+ struct dsi_bridge *bridge;
+ struct drm_bridge *dba_bridge;
+ struct dba_bridge_init init_data;
+ struct drm_bridge *precede_bridge;
+ struct msm_drm_private *priv = NULL;
+ struct dsi_panel *panel;
+ u32 *bridge_idx;
+ u32 num_of_lanes = 0;
+
+ if (!display || !display->drm_dev || !enc) {
+ pr_err("invalid param(s)\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+ priv = display->drm_dev->dev_private;
+
+ if (!priv) {
+ SDE_ERROR("Private data is not present\n");
+ rc = -EINVAL;
+ goto out;
+ }
+
+ if (display->bridge) {
+ SDE_ERROR("display is already initialize\n");
+ goto out;
+ }
+
+ bridge = dsi_drm_bridge_init(display, display->drm_dev, enc);
+ if (IS_ERR_OR_NULL(bridge)) {
+ rc = PTR_ERR(bridge);
+ SDE_ERROR("[%s] brige init failed, %d\n", display->name, rc);
+ goto out;
+ }
+
+ display->bridge = bridge;
+ priv->bridges[priv->num_bridges++] = &bridge->base;
+ precede_bridge = &bridge->base;
+
+ if (display->panel_count >= MAX_BRIDGES - 1) {
+ SDE_ERROR("too many bridge chips=%d\n", display->panel_count);
+ goto error_bridge;
+ }
+
+ for (i = 0; i < display->panel_count; i++) {
+ panel = display->panel[i];
+ if (panel && display->bridge_idx &&
+ panel->dba_config.dba_panel) {
+ bridge_idx = display->bridge_idx + i;
+ num_of_lanes = 0;
+ memset(&init_data, 0x00, sizeof(init_data));
+ if (panel->host_config.data_lanes & DSI_DATA_LANE_0)
+ num_of_lanes++;
+ if (panel->host_config.data_lanes & DSI_DATA_LANE_1)
+ num_of_lanes++;
+ if (panel->host_config.data_lanes & DSI_DATA_LANE_2)
+ num_of_lanes++;
+ if (panel->host_config.data_lanes & DSI_DATA_LANE_3)
+ num_of_lanes++;
+ init_data.client_name = DSI_DBA_CLIENT_NAME;
+ init_data.chip_name = panel->dba_config.bridge_name;
+ init_data.id = *bridge_idx;
+ init_data.display = display;
+ init_data.hdmi_mode = panel->dba_config.hdmi_mode;
+ init_data.num_of_input_lanes = num_of_lanes;
+ init_data.precede_bridge = precede_bridge;
+ init_data.panel_count = display->panel_count;
+ init_data.cont_splash_enabled =
+ display->cont_splash_enabled;
+ dba_bridge = dba_bridge_init(display->drm_dev, enc,
+ &init_data);
+ if (IS_ERR_OR_NULL(dba_bridge)) {
+ rc = PTR_ERR(dba_bridge);
+ SDE_ERROR("[%s:%d] dba brige init failed, %d\n",
+ init_data.chip_name, init_data.id, rc);
+ goto error_dba_bridge;
+ }
+ priv->bridges[priv->num_bridges++] = dba_bridge;
+ precede_bridge = dba_bridge;
+ }
+ }
+
+ goto out;
+
+error_dba_bridge:
+ for (i = 1; i < MAX_BRIDGES; i++) {
+ dba_bridge_cleanup(priv->bridges[i]);
+ priv->bridges[i] = NULL;
+ }
+error_bridge:
+ dsi_drm_bridge_cleanup(display->bridge);
+ display->bridge = NULL;
+ priv->bridges[0] = NULL;
+ priv->num_bridges = 0;
+out:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_drm_bridge_deinit(struct dsi_display *display)
+{
+ int rc = 0, i;
+ struct msm_drm_private *priv = NULL;
+
+ if (!display) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+ priv = display->drm_dev->dev_private;
+
+ if (!priv) {
+ SDE_ERROR("Private data is not present\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ for (i = 1; i < MAX_BRIDGES; i++) {
+ dba_bridge_cleanup(priv->bridges[i]);
+ priv->bridges[i] = NULL;
+ }
+
+ dsi_drm_bridge_cleanup(display->bridge);
+ display->bridge = NULL;
+ priv->bridges[0] = NULL;
+ priv->num_bridges = 0;
+
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_get_info(struct msm_display_info *info, void *disp)
+{
+ struct dsi_display *display;
+ struct dsi_panel_phy_props phy_props;
+ int i, rc;
+
+ if (!info || !disp) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+ display = disp;
+
+ mutex_lock(&display->display_lock);
+ rc = dsi_panel_get_phy_props(display->panel[0], &phy_props);
+ if (rc) {
+ pr_err("[%s] failed to get panel phy props, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ info->intf_type = DRM_MODE_CONNECTOR_DSI;
+
+ info->num_of_h_tiles = display->ctrl_count;
+ for (i = 0; i < info->num_of_h_tiles; i++)
+ info->h_tile_instance[i] = display->ctrl[i].ctrl->index;
+
+ /*
+ * h_tile_instance[2] = {0, 1} means DSI0 left(master), DSI1 right
+ * h_tile_instance[2] = {1, 0} means DSI1 left(master), DSI0 right
+ * So in case of split case and swap property is set, swap two DSIs.
+ */
+ if (info->num_of_h_tiles > 1 && display->dsi_split_swap)
+ swap(info->h_tile_instance[0], info->h_tile_instance[1]);
+
+ info->is_connected = true;
+ info->width_mm = phy_props.panel_width_mm;
+ info->height_mm = phy_props.panel_height_mm;
+ info->max_width = 1920;
+ info->max_height = 1080;
+ info->compression = MSM_DISPLAY_COMPRESS_NONE;
+
+ switch (display->panel[0]->mode.panel_mode) {
+ case DSI_OP_VIDEO_MODE:
+ info->capabilities |= MSM_DISPLAY_CAP_VID_MODE;
+ break;
+ case DSI_OP_CMD_MODE:
+ info->capabilities |= MSM_DISPLAY_CAP_CMD_MODE;
+ break;
+ default:
+ pr_err("unknwown dsi panel mode %d\n",
+ display->panel[0]->mode.panel_mode);
+ break;
+ }
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_get_modes(struct dsi_display *display,
+ struct dsi_display_mode *modes,
+ u32 *count)
+{
+ int rc = 0;
+ int i;
+ struct dsi_dfps_capabilities dfps_caps;
+ int num_dfps_rates;
+
+ if (!display || !count) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_panel_get_dfps_caps(display->panel[0], &dfps_caps);
+ if (rc) {
+ pr_err("[%s] failed to get dfps caps from panel\n",
+ display->name);
+ goto error;
+ }
+
+ num_dfps_rates = !dfps_caps.dfps_support ? 1 :
+ dfps_caps.max_refresh_rate -
+ dfps_caps.min_refresh_rate + 1;
+
+ if (!modes) {
+ /* Inflate num_of_modes by fps in dfps */
+ *count = display->num_of_modes * num_dfps_rates;
+ goto error;
+ }
+
+ for (i = 0; i < *count; i++) {
+ /* Insert the dfps "sub-modes" between main panel modes */
+ int panel_mode_idx = i / num_dfps_rates;
+
+ rc = dsi_panel_get_mode(display->panel[0], panel_mode_idx,
+ modes);
+ if (rc) {
+ pr_err("[%s] failed to get mode from panel\n",
+ display->name);
+ goto error;
+ }
+
+ if (dfps_caps.dfps_support) {
+ modes->timing.refresh_rate = dfps_caps.min_refresh_rate
+ + (i % num_dfps_rates);
+ modes->pixel_clk_khz = (DSI_H_TOTAL(&modes->timing) *
+ DSI_V_TOTAL(&modes->timing) *
+ modes->timing.refresh_rate) / 1000;
+ }
+
+ if (display->ctrl_count > 1) { /* TODO: remove if */
+ modes->timing.h_active *= display->ctrl_count;
+ modes->timing.h_front_porch *= display->ctrl_count;
+ modes->timing.h_sync_width *= display->ctrl_count;
+ modes->timing.h_back_porch *= display->ctrl_count;
+ modes->timing.h_skew *= display->ctrl_count;
+ modes->pixel_clk_khz *= display->ctrl_count;
+ }
+
+ modes++;
+ }
+
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_validate_mode(struct dsi_display *display,
+ struct dsi_display_mode *mode,
+ u32 flags)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+ struct dsi_display_mode adj_mode;
+
+ if (!display || !mode) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ adj_mode = *mode;
+ adjust_timing_by_ctrl_count(display, &adj_mode);
+
+ rc = dsi_panel_validate_mode(display->panel[0], &adj_mode);
+ if (rc) {
+ pr_err("[%s] panel mode validation failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ rc = dsi_ctrl_validate_timing(ctrl->ctrl, &adj_mode.timing);
+ if (rc) {
+ pr_err("[%s] ctrl mode validation failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ rc = dsi_phy_validate_mode(ctrl->phy, &adj_mode.timing);
+ if (rc) {
+ pr_err("[%s] phy mode validation failed, rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+ }
+
+ if ((flags & DSI_VALIDATE_FLAG_ALLOW_ADJUST) &&
+ (mode->flags & DSI_MODE_FLAG_SEAMLESS)) {
+ rc = dsi_display_validate_mode_seamless(display, mode);
+ if (rc) {
+ pr_err("[%s] seamless not possible rc=%d\n",
+ display->name, rc);
+ goto error;
+ }
+ }
+
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_set_mode(struct dsi_display *display,
+ struct dsi_display_mode *mode,
+ u32 flags)
+{
+ int rc = 0;
+ struct dsi_display_mode adj_mode;
+
+ if (!display || !mode) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ adj_mode = *mode;
+ adjust_timing_by_ctrl_count(display, &adj_mode);
+
+ rc = dsi_display_validate_mode_set(display, &adj_mode, flags);
+ if (rc) {
+ pr_err("[%s] mode cannot be set\n", display->name);
+ goto error;
+ }
+
+ rc = dsi_display_set_mode_sub(display, &adj_mode, flags);
+ if (rc) {
+ pr_err("[%s] failed to set mode\n", display->name);
+ goto error;
+ }
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_set_tpg_state(struct dsi_display *display, bool enable)
+{
+ int rc = 0;
+ int i;
+ struct dsi_display_ctrl *ctrl;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ rc = dsi_ctrl_set_tpg_state(ctrl->ctrl, enable);
+ if (rc) {
+ pr_err("[%s] failed to set tpg state for host_%d\n",
+ display->name, i);
+ goto error;
+ }
+ }
+
+ display->is_tpg_enabled = enable;
+error:
+ return rc;
+}
+
+int dsi_display_prepare(struct dsi_display *display)
+{
+ int rc = 0, i = 0, j = 0;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ if (!display->cont_splash_enabled) {
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_pre_prepare(display->panel[i]);
+ if (rc) {
+ SDE_ERROR("[%s]pre-prepare failed, rc=%d\n",
+ display->name, rc);
+ goto error_panel_post_unprep;
+ }
+ }
+ }
+
+ rc = dsi_display_ctrl_power_on(display);
+ if (rc) {
+ pr_err("[%s] failed to power on dsi controllers, rc=%d\n",
+ display->name, rc);
+ goto error_panel_post_unprep;
+ }
+
+ rc = dsi_display_phy_power_on(display);
+ if (rc) {
+ pr_err("[%s] failed to power on dsi phy, rc = %d\n",
+ display->name, rc);
+ goto error_ctrl_pwr_off;
+ }
+
+ rc = dsi_display_ctrl_core_clk_on(display);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI core clocks, rc=%d\n",
+ display->name, rc);
+ goto error_phy_pwr_off;
+ }
+
+ rc = dsi_display_phy_sw_reset(display);
+ if (rc) {
+ pr_err("[%s] failed to reset phy, rc=%d\n", display->name, rc);
+ goto error_ctrl_clk_off;
+ }
+
+ rc = dsi_display_phy_enable(display);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI PHY, rc=%d\n",
+ display->name, rc);
+ goto error_ctrl_clk_off;
+ }
+
+ rc = dsi_display_ctrl_init(display);
+ if (rc) {
+ pr_err("[%s] failed to setup DSI controller, rc=%d\n",
+ display->name, rc);
+ goto error_phy_disable;
+ }
+
+ rc = dsi_display_ctrl_link_clk_on(display);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI link clocks, rc=%d\n",
+ display->name, rc);
+ goto error_ctrl_deinit;
+ }
+
+ rc = dsi_display_ctrl_host_enable(display);
+ if (rc) {
+ pr_err("[%s] failed to enable DSI host, rc=%d\n",
+ display->name, rc);
+ goto error_ctrl_link_off;
+ }
+
+ for (j = 0; j < display->panel_count; j++) {
+ rc = dsi_panel_prepare(display->panel[j]);
+ if (rc) {
+ SDE_ERROR("[%s] panel prepare failed, rc=%d\n",
+ display->name, rc);
+ goto error_panel_unprep;
+ }
+ }
+ goto error;
+
+error_panel_unprep:
+ for (j--; j >= 0; j--)
+ (void)dsi_panel_unprepare(display->panel[j]);
+ (void)dsi_display_ctrl_host_disable(display);
+error_ctrl_link_off:
+ (void)dsi_display_ctrl_link_clk_off(display);
+error_ctrl_deinit:
+ (void)dsi_display_ctrl_deinit(display);
+error_phy_disable:
+ (void)dsi_display_phy_disable(display);
+error_ctrl_clk_off:
+ (void)dsi_display_ctrl_core_clk_off(display);
+error_phy_pwr_off:
+ (void)dsi_display_phy_power_off(display);
+error_ctrl_pwr_off:
+ (void)dsi_display_ctrl_power_off(display);
+error_panel_post_unprep:
+ for (i--; i >= 0; i--)
+ (void)dsi_panel_post_unprepare(display->panel[i]);
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_enable(struct dsi_display *display)
+{
+ int rc = 0, i;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ if (display->cont_splash_enabled) {
+ _dsi_display_config_ctrl_for_splash(display);
+ display->cont_splash_enabled = false;
+ return 0;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_enable(display->panel[i]);
+ if (rc) {
+ SDE_ERROR("[%s] failed to enable DSI panel, rc=%d\n",
+ display->name, rc);
+ goto error_disable_panel;
+ }
+ }
+
+ if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
+ rc = dsi_display_vid_engine_enable(display);
+ if (rc) {
+ pr_err("[%s]failed to enable DSI video engine, rc=%d\n",
+ display->name, rc);
+ goto error_disable_panel;
+ }
+ } else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
+ rc = dsi_display_cmd_engine_enable(display);
+ if (rc) {
+ pr_err("[%s]failed to enable DSI cmd engine, rc=%d\n",
+ display->name, rc);
+ goto error_disable_panel;
+ }
+ } else {
+ pr_err("[%s] Invalid configuration\n", display->name);
+ rc = -EINVAL;
+ goto error_disable_panel;
+ }
+
+ goto error;
+
+error_disable_panel:
+ for (i--; i >= 0; i--)
+ (void)dsi_panel_disable(display->panel[i]);
+error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_post_enable(struct dsi_display *display)
+{
+ int rc = 0, i;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_post_enable(display->panel[i]);
+ if (rc)
+ SDE_ERROR("[%s] panel post-enable failed, rc=%d\n",
+ display->name, rc);
+ }
+
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_pre_disable(struct dsi_display *display)
+{
+ int rc = 0, i;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_pre_disable(display->panel[i]);
+ if (rc)
+ SDE_ERROR("[%s] panel pre-disable failed, rc=%d\n",
+ display->name, rc);
+ }
+
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_disable(struct dsi_display *display)
+{
+ int rc = 0, i;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_display_wake_up(display);
+ if (rc)
+ pr_err("[%s] display wake up failed, rc=%d\n",
+ display->name, rc);
+
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_disable(display->panel[i]);
+ if (rc)
+ SDE_ERROR("[%s] failed to disable DSI panel, rc=%d\n",
+ display->name, rc);
+ }
+
+ if (display->config.panel_mode == DSI_OP_VIDEO_MODE) {
+ rc = dsi_display_vid_engine_disable(display);
+ if (rc)
+ pr_err("[%s]failed to disable DSI vid engine, rc=%d\n",
+ display->name, rc);
+ } else if (display->config.panel_mode == DSI_OP_CMD_MODE) {
+ rc = dsi_display_cmd_engine_disable(display);
+ if (rc)
+ pr_err("[%s]failed to disable DSI cmd engine, rc=%d\n",
+ display->name, rc);
+ } else {
+ pr_err("[%s] Invalid configuration\n", display->name);
+ rc = -EINVAL;
+ }
+
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_display_unprepare(struct dsi_display *display)
+{
+ int rc = 0, i;
+
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+
+ rc = dsi_display_wake_up(display);
+ if (rc)
+ pr_err("[%s] display wake up failed, rc=%d\n",
+ display->name, rc);
+
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_unprepare(display->panel[i]);
+ if (rc)
+ SDE_ERROR("[%s] panel unprepare failed, rc=%d\n",
+ display->name, rc);
+ }
+
+ rc = dsi_display_ctrl_host_disable(display);
+ if (rc)
+ pr_err("[%s] failed to disable DSI host, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_display_ctrl_link_clk_off(display);
+ if (rc)
+ pr_err("[%s] failed to disable Link clocks, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_display_ctrl_deinit(display);
+ if (rc)
+ pr_err("[%s] failed to deinit controller, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_display_phy_disable(display);
+ if (rc)
+ pr_err("[%s] failed to disable DSI PHY, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_display_ctrl_core_clk_off(display);
+ if (rc)
+ pr_err("[%s] failed to disable DSI clocks, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_display_phy_power_off(display);
+ if (rc)
+ pr_err("[%s] failed to power off PHY, rc=%d\n",
+ display->name, rc);
+
+ rc = dsi_display_ctrl_power_off(display);
+ if (rc)
+ pr_err("[%s] failed to power DSI vregs, rc=%d\n",
+ display->name, rc);
+
+ for (i = 0; i < display->panel_count; i++) {
+ rc = dsi_panel_post_unprepare(display->panel[i]);
+ if (rc)
+ pr_err("[%s] panel post-unprepare failed, rc=%d\n",
+ display->name, rc);
+ }
+
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int dsi_dsiplay_setup_splash_resource(struct dsi_display *display)
+{
+ int ret = 0, i = 0;
+ struct dsi_display_ctrl *ctrl;
+
+ if (!display)
+ return -EINVAL;
+
+ for (i = 0; i < display->ctrl_count; i++) {
+ ctrl = &display->ctrl[i];
+ if (!ctrl)
+ return -EINVAL;
+
+ /* set dsi ctrl power state */
+ ret = dsi_ctrl_set_power_state(ctrl->ctrl,
+ DSI_CTRL_POWER_LINK_CLK_ON);
+ if (ret) {
+ pr_err("%s:fail to call dsi_ctrl_set_power_state\n",
+ __func__);
+ return ret;
+ }
+
+ /* set dsi phy power state */
+ ret = dsi_phy_set_power_state(ctrl->phy, true);
+ if (ret) {
+ pr_err("%s:fail to call dsi_phy_set_power_state\n",
+ __func__);
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+static int __init dsi_display_register(void)
+{
+ dsi_phy_drv_register();
+ dsi_ctrl_drv_register();
+ return platform_driver_register(&dsi_display_driver);
+}
+
+static void __exit dsi_display_unregister(void)
+{
+ platform_driver_unregister(&dsi_display_driver);
+ dsi_ctrl_drv_unregister();
+ dsi_phy_drv_unregister();
+}
+
+module_init(dsi_display_register);
+module_exit(dsi_display_unregister);
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
new file mode 100644
index 000000000000..d285779e07c3
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display.h
@@ -0,0 +1,359 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_DISPLAY_H_
+#define _DSI_DISPLAY_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+
+#include "msm_drv.h"
+#include "dsi_defs.h"
+#include "dsi_ctrl.h"
+#include "dsi_phy.h"
+#include "dsi_panel.h"
+
+#define MAX_DSI_CTRLS_PER_DISPLAY 2
+
+/*
+ * DSI Validate Mode modifiers
+ * @DSI_VALIDATE_FLAG_ALLOW_ADJUST: Allow mode validation to also do fixup
+ */
+#define DSI_VALIDATE_FLAG_ALLOW_ADJUST 0x1
+
+/**
+ * enum dsi_display_type - enumerates DSI display types
+ * @DSI_DISPLAY_SINGLE: A panel connected on a single DSI interface.
+ * @DSI_DISPLAY_EXT_BRIDGE: A bridge is connected between panel and DSI host.
+ * It utilizes a single DSI interface.
+ * @DSI_DISPLAY_SPLIT: A panel that utilizes more than one DSI
+ * interfaces.
+ * @DSI_DISPLAY_SPLIT_EXT_BRIDGE: A bridge is present between panel and DSI
+ * host. It utilizes more than one DSI interface.
+ */
+enum dsi_display_type {
+ DSI_DISPLAY_SINGLE = 0,
+ DSI_DISPLAY_EXT_BRIDGE,
+ DSI_DISPLAY_SPLIT,
+ DSI_DISPLAY_SPLIT_EXT_BRIDGE,
+ DSI_DISPLAY_MAX,
+};
+
+/**
+ * struct dsi_display_ctrl - dsi ctrl/phy information for the display
+ * @ctrl: Handle to the DSI controller device.
+ * @ctrl_of_node: pHandle to the DSI controller device.
+ * @dsi_ctrl_idx: DSI controller instance id.
+ * @power_state: Current power state of the DSI controller.
+ * @phy: Handle to the DSI PHY device.
+ * @phy_of_node: pHandle to the DSI PHY device.
+ * @phy_enabled: PHY power status.
+ */
+struct dsi_display_ctrl {
+ /* controller info */
+ struct dsi_ctrl *ctrl;
+ struct device_node *ctrl_of_node;
+ u32 dsi_ctrl_idx;
+
+ enum dsi_power_state power_state;
+
+ /* phy info */
+ struct msm_dsi_phy *phy;
+ struct device_node *phy_of_node;
+
+ bool phy_enabled;
+};
+
+/**
+ * struct dsi_display_clk_info - dsi display clock source information
+ * @src_clks: Source clocks for DSI display.
+ * @mux_clks: Mux clocks used for DFPS.
+ * @shadow_clks: Used for DFPS.
+ */
+struct dsi_display_clk_info {
+ struct dsi_clk_link_set src_clks;
+ struct dsi_clk_link_set mux_clks;
+ struct dsi_clk_link_set shadow_clks;
+};
+
+/**
+ * struct dsi_display - dsi display information
+ * @pdev: Pointer to platform device.
+ * @drm_dev: DRM device associated with the display.
+ * @name: Name of the display.
+ * @display_type: Display type as defined in device tree.
+ * @list: List pointer.
+ * @is_active: Is display active.
+ * @display_lock: Mutex for dsi_display interface.
+ * @ctrl_count: Number of DSI interfaces required by panel.
+ * @ctrl: Controller information for DSI display.
+ * @panel_count: Number of DSI panel.
+ * @panel: Handle to DSI panel.
+ * @panel_of: pHandle to DSI panel, it's an array with panel_count
+ * of struct device_node pointers.
+ * @bridge_idx: Bridge chip index for each panel_of.
+ * @type: DSI display type.
+ * @clk_master_idx: The master controller for controlling clocks. This is an
+ * index into the ctrl[MAX_DSI_CTRLS_PER_DISPLAY] array.
+ * @cmd_master_idx: The master controller for sending DSI commands to panel.
+ * @video_master_idx: The master controller for enabling video engine.
+ * @clock_info: Clock sourcing for DSI display.
+ * @lane_map: Lane mapping between DSI host and Panel.
+ * @num_of_modes: Number of modes supported by display.
+ * @is_tpg_enabled: TPG state.
+ * @host: DRM MIPI DSI Host.
+ * @connector: Pointer to DRM connector object.
+ * @bridge: Pointer to DRM bridge object.
+ * @cmd_engine_refcount: Reference count enforcing single instance of cmd eng
+ * @root: Debugfs root directory
+ * @cont_splash_enabled: Early splash status.
+ * @dsi_split_swap: Swap dsi output in split mode.
+ * @display_topology: user requested display topology
+ */
+struct dsi_display {
+ struct platform_device *pdev;
+ struct drm_device *drm_dev;
+
+ const char *name;
+ const char *display_type;
+ struct list_head list;
+ bool is_active;
+ struct mutex display_lock;
+
+ u32 ctrl_count;
+ struct dsi_display_ctrl ctrl[MAX_DSI_CTRLS_PER_DISPLAY];
+
+ /* panel info */
+ u32 panel_count;
+ struct dsi_panel **panel;
+ struct device_node **panel_of;
+ u32 *bridge_idx;
+
+ enum dsi_display_type type;
+ u32 clk_master_idx;
+ u32 cmd_master_idx;
+ u32 video_master_idx;
+
+ struct dsi_display_clk_info clock_info;
+ struct dsi_host_config config;
+ struct dsi_lane_mapping lane_map;
+ u32 num_of_modes;
+ bool is_tpg_enabled;
+
+ struct mipi_dsi_host host;
+ struct dsi_bridge *bridge;
+ u32 cmd_engine_refcount;
+
+ /* DEBUG FS */
+ struct dentry *root;
+
+ bool cont_splash_enabled;
+ bool dsi_split_swap;
+ u32 display_topology;
+};
+
+int dsi_display_dev_probe(struct platform_device *pdev);
+int dsi_display_dev_remove(struct platform_device *pdev);
+
+/**
+ * dsi_display_get_num_of_displays() - returns number of display devices
+ * supported.
+ *
+ * Return: number of displays.
+ */
+int dsi_display_get_num_of_displays(void);
+
+/**
+ * dsi_display_get_active_displays - returns pointers for active display devices
+ * @display_array: Pointer to display array to be filled
+ * @max_display_count: Size of display_array
+ * @Returns: Number of display entries filled
+ */
+int dsi_display_get_active_displays(void **display_array,
+ u32 max_display_count);
+
+/**
+ * dsi_display_get_display_by_name()- finds display by name
+ * @index: name of the display.
+ *
+ * Return: handle to the display or error code.
+ */
+struct dsi_display *dsi_display_get_display_by_name(const char *name);
+
+/**
+ * dsi_display_set_active_state() - sets the state of the display
+ * @display: Handle to display.
+ * @is_active: state
+ */
+void dsi_display_set_active_state(struct dsi_display *display, bool is_active);
+
+/**
+ * dsi_display_drm_bridge_init() - initializes DRM bridge object for DSI
+ * @display: Handle to the display.
+ * @encoder: Pointer to the encoder object which is connected to the
+ * display.
+ *
+ * Return: error code.
+ */
+int dsi_display_drm_bridge_init(struct dsi_display *display,
+ struct drm_encoder *enc);
+
+/**
+ * dsi_display_drm_bridge_deinit() - destroys DRM bridge for the display
+ * @display: Handle to the display.
+ *
+ * Return: error code.
+ */
+int dsi_display_drm_bridge_deinit(struct dsi_display *display);
+
+/**
+ * dsi_display_get_info() - returns the display properties
+ * @info: Pointer to the structure where info is stored.
+ * @disp: Handle to the display.
+ *
+ * Return: error code.
+ */
+int dsi_display_get_info(struct msm_display_info *info, void *disp);
+
+/**
+ * dsi_display_get_modes() - get modes supported by display
+ * @display: Handle to display.
+ * @modes; Pointer to array of modes. Memory allocated should be
+ * big enough to store (count * struct dsi_display_mode)
+ * elements. If modes pointer is NULL, number of modes will
+ * be stored in the memory pointed to by count.
+ * @count: If modes is NULL, number of modes will be stored. If
+ * not, mode information will be copied (number of modes
+ * copied will be equal to *count).
+ *
+ * Return: error code.
+ */
+int dsi_display_get_modes(struct dsi_display *display,
+ struct dsi_display_mode *modes,
+ u32 *count);
+
+/**
+ * dsi_display_validate_mode() - validates if mode is supported by display
+ * @display: Handle to display.
+ * @mode: Mode to be validated.
+ * @flags: Modifier flags.
+ *
+ * Return: 0 if supported or error code.
+ */
+int dsi_display_validate_mode(struct dsi_display *display,
+ struct dsi_display_mode *mode,
+ u32 flags);
+
+/**
+ * dsi_display_set_mode() - Set mode on the display.
+ * @display: Handle to display.
+ * @mode: mode to be set.
+ * @flags: Modifier flags.
+ *
+ * Return: error code.
+ */
+int dsi_display_set_mode(struct dsi_display *display,
+ struct dsi_display_mode *mode,
+ u32 flags);
+
+/**
+ * dsi_display_prepare() - prepare display
+ * @display: Handle to display.
+ *
+ * Prepare will perform power up sequences for the host and panel hardware.
+ * Power and clock resources might be turned on (depending on the panel mode).
+ * The video engine is not enabled.
+ *
+ * Return: error code.
+ */
+int dsi_display_prepare(struct dsi_display *display);
+
+/**
+ * dsi_display_enable() - enable display
+ * @display: Handle to display.
+ *
+ * Enable will turn on the host engine and the panel. At the end of the enable
+ * function, Host and panel hardware are ready to accept pixel data from
+ * upstream.
+ *
+ * Return: error code.
+ */
+int dsi_display_enable(struct dsi_display *display);
+
+/**
+ * dsi_display_post_enable() - perform post enable operations.
+ * @display: Handle to display.
+ *
+ * Some panels might require some commands to be sent after pixel data
+ * transmission has started. Such commands are sent as part of the post_enable
+ * function.
+ *
+ * Return: error code.
+ */
+int dsi_display_post_enable(struct dsi_display *display);
+
+/**
+ * dsi_display_pre_disable() - perform pre disable operations.
+ * @display: Handle to display.
+ *
+ * If a panel requires commands to be sent before pixel data transmission is
+ * stopped, those can be sent as part of pre_disable.
+ *
+ * Return: error code.
+ */
+int dsi_display_pre_disable(struct dsi_display *display);
+
+/**
+ * dsi_display_disable() - disable panel and host hardware.
+ * @display: Handle to display.
+ *
+ * Disable host and panel hardware and pixel data transmission can not continue.
+ *
+ * Return: error code.
+ */
+int dsi_display_disable(struct dsi_display *display);
+
+/**
+ * dsi_display_unprepare() - power off display hardware.
+ * @display: Handle to display.
+ *
+ * Host and panel hardware is turned off. Panel will be in reset state at the
+ * end of the function.
+ *
+ * Return: error code.
+ */
+int dsi_display_unprepare(struct dsi_display *display);
+
+int dsi_display_set_tpg_state(struct dsi_display *display, bool enable);
+
+int dsi_display_clock_gate(struct dsi_display *display, bool enable);
+int dsi_dispaly_static_frame(struct dsi_display *display, bool enable);
+
+int dsi_display_set_backlight(void *display, u32 bl_lvl);
+
+/**
+ * dsi_dsiplay_setup_splash_resource
+ * @display: Handle to display.
+ *
+ * Setup DSI splash resource to avoid reset and glitch if DSI is enabled
+ * in bootloder.
+ *
+ * Return: error code.
+ */
+int dsi_dsiplay_setup_splash_resource(struct dsi_display *display);
+#endif /* _DSI_DISPLAY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c b/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c
new file mode 100644
index 000000000000..93fb041399e2
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.c
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include "dsi_display_test.h"
+
+static void dsi_display_test_dump_modes(struct dsi_display_mode *mode, u32
+ count)
+{
+}
+
+static void dsi_display_test_work(struct work_struct *work)
+{
+ struct dsi_display_test *test;
+ struct dsi_display *display;
+ struct dsi_display_mode *modes;
+ u32 count = 0;
+ u32 size = 0;
+ int rc = 0;
+
+ test = container_of(work, struct dsi_display_test, test_work);
+
+ display = test->display;
+ rc = dsi_display_get_modes(display, NULL, &count);
+ if (rc) {
+ pr_err("failed to get modes count, rc=%d\n", rc);
+ goto test_fail;
+ }
+
+ size = count * sizeof(*modes);
+ modes = kzalloc(size, GFP_KERNEL);
+ if (!modes) {
+ rc = -ENOMEM;
+ goto test_fail;
+ }
+
+ rc = dsi_display_get_modes(display, modes, &count);
+ if (rc) {
+ pr_err("failed to get modes, rc=%d\n", rc);
+ goto test_fail_free_modes;
+ }
+
+ dsi_display_test_dump_modes(modes, count);
+
+ rc = dsi_display_set_mode(display, &modes[0], 0x0);
+ if (rc) {
+ pr_err("failed to set mode, rc=%d\n", rc);
+ goto test_fail_free_modes;
+ }
+
+ rc = dsi_display_prepare(display);
+ if (rc) {
+ pr_err("failed to prepare display, rc=%d\n", rc);
+ goto test_fail_free_modes;
+ }
+
+ rc = dsi_display_enable(display);
+ if (rc) {
+ pr_err("failed to enable display, rc=%d\n", rc);
+ goto test_fail_unprep_disp;
+ }
+ return;
+
+test_fail_unprep_disp:
+ if (rc) {
+ pr_err("failed to unprep display, rc=%d\n", rc);
+ goto test_fail_free_modes;
+ }
+
+test_fail_free_modes:
+ kfree(modes);
+test_fail:
+ return;
+}
+
+int dsi_display_test_init(struct dsi_display *display)
+{
+ static int done;
+ int rc = 0;
+ struct dsi_display_test *test;
+
+ if (done)
+ return rc;
+
+ done = 1;
+ if (!display) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ test = kzalloc(sizeof(*test), GFP_KERNEL);
+ if (!test)
+ return -ENOMEM;
+
+ test->display = display;
+ INIT_WORK(&test->test_work, dsi_display_test_work);
+
+ dsi_display_test_work(&test->test_work);
+ return rc;
+}
+
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.h b/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.h
new file mode 100644
index 000000000000..e36569854ab1
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_display_test.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_DISPLAY_TEST_H_
+#define _DSI_DISPLAY_TEST_H_
+
+#include "dsi_display.h"
+#include "dsi_ctrl_hw.h"
+#include "dsi_ctrl.h"
+
+struct dsi_display_test {
+ struct dsi_display *display;
+
+ struct work_struct test_work;
+};
+
+int dsi_display_test_init(struct dsi_display *display);
+
+
+#endif /* _DSI_DISPLAY_TEST_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
new file mode 100644
index 000000000000..ad9e553785b6
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.c
@@ -0,0 +1,567 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#define pr_fmt(fmt) "dsi-drm:[%s] " fmt, __func__
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic.h>
+
+#include "msm_kms.h"
+#include "sde_connector.h"
+#include "dsi_drm.h"
+#include "sde_trace.h"
+
+#define to_dsi_bridge(x) container_of((x), struct dsi_bridge, base)
+#define to_dsi_state(x) container_of((x), struct dsi_connector_state, base)
+
+static void convert_to_dsi_mode(const struct drm_display_mode *drm_mode,
+ struct dsi_display_mode *dsi_mode)
+{
+ memset(dsi_mode, 0, sizeof(*dsi_mode));
+
+ dsi_mode->timing.h_active = drm_mode->hdisplay;
+ dsi_mode->timing.h_back_porch = drm_mode->htotal - drm_mode->hsync_end;
+ dsi_mode->timing.h_sync_width = drm_mode->htotal -
+ (drm_mode->hsync_start + dsi_mode->timing.h_back_porch);
+ dsi_mode->timing.h_front_porch = drm_mode->hsync_start -
+ drm_mode->hdisplay;
+ dsi_mode->timing.h_skew = drm_mode->hskew;
+
+ dsi_mode->timing.v_active = drm_mode->vdisplay;
+ dsi_mode->timing.v_back_porch = drm_mode->vtotal - drm_mode->vsync_end;
+ dsi_mode->timing.v_sync_width = drm_mode->vtotal -
+ (drm_mode->vsync_start + dsi_mode->timing.v_back_porch);
+
+ dsi_mode->timing.v_front_porch = drm_mode->vsync_start -
+ drm_mode->vdisplay;
+
+ dsi_mode->timing.refresh_rate = drm_mode->vrefresh;
+
+ dsi_mode->pixel_clk_khz = drm_mode->clock;
+ dsi_mode->panel_mode = 0; /* TODO: Panel Mode */
+
+ if (msm_is_mode_seamless(drm_mode))
+ dsi_mode->flags |= DSI_MODE_FLAG_SEAMLESS;
+ if (msm_is_mode_dynamic_fps(drm_mode))
+ dsi_mode->flags |= DSI_MODE_FLAG_DFPS;
+ if (msm_needs_vblank_pre_modeset(drm_mode))
+ dsi_mode->flags |= DSI_MODE_FLAG_VBLANK_PRE_MODESET;
+ dsi_mode->timing.h_sync_polarity =
+ (drm_mode->flags & DRM_MODE_FLAG_PHSYNC) ? false : true;
+ dsi_mode->timing.v_sync_polarity =
+ (drm_mode->flags & DRM_MODE_FLAG_PVSYNC) ? false : true;
+}
+
+static void convert_to_drm_mode(const struct dsi_display_mode *dsi_mode,
+ struct drm_display_mode *drm_mode)
+{
+ memset(drm_mode, 0, sizeof(*drm_mode));
+
+ drm_mode->hdisplay = dsi_mode->timing.h_active;
+ drm_mode->hsync_start = drm_mode->hdisplay +
+ dsi_mode->timing.h_front_porch;
+ drm_mode->hsync_end = drm_mode->hsync_start +
+ dsi_mode->timing.h_sync_width;
+ drm_mode->htotal = drm_mode->hsync_end + dsi_mode->timing.h_back_porch;
+ drm_mode->hskew = dsi_mode->timing.h_skew;
+
+ drm_mode->vdisplay = dsi_mode->timing.v_active;
+ drm_mode->vsync_start = drm_mode->vdisplay +
+ dsi_mode->timing.v_front_porch;
+ drm_mode->vsync_end = drm_mode->vsync_start +
+ dsi_mode->timing.v_sync_width;
+ drm_mode->vtotal = drm_mode->vsync_end + dsi_mode->timing.v_back_porch;
+
+ drm_mode->vrefresh = dsi_mode->timing.refresh_rate;
+ drm_mode->clock = dsi_mode->pixel_clk_khz;
+
+ if (dsi_mode->flags & DSI_MODE_FLAG_SEAMLESS)
+ drm_mode->flags |= DRM_MODE_FLAG_SEAMLESS;
+ if (dsi_mode->flags & DSI_MODE_FLAG_DFPS)
+ drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS;
+ if (dsi_mode->flags & DSI_MODE_FLAG_VBLANK_PRE_MODESET)
+ drm_mode->private_flags |= MSM_MODE_FLAG_VBLANK_PRE_MODESET;
+ drm_mode->flags |= (dsi_mode->timing.h_sync_polarity) ?
+ DRM_MODE_FLAG_NHSYNC : DRM_MODE_FLAG_PHSYNC;
+ drm_mode->flags |= (dsi_mode->timing.v_sync_polarity) ?
+ DRM_MODE_FLAG_NVSYNC : DRM_MODE_FLAG_PVSYNC;
+
+ drm_mode_set_name(drm_mode);
+}
+
+static int dsi_bridge_attach(struct drm_bridge *bridge)
+{
+ struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+ if (!bridge) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ pr_debug("[%d] attached\n", c_bridge->id);
+
+ return 0;
+
+}
+
+static void dsi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ int rc = 0;
+ struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+ if (!bridge) {
+ pr_err("Invalid params\n");
+ return;
+ }
+
+ /* By this point mode should have been validated through mode_fixup */
+ rc = dsi_display_set_mode(c_bridge->display,
+ &(c_bridge->dsi_mode), 0x0);
+ if (rc) {
+ pr_err("[%d] failed to perform a mode set, rc=%d\n",
+ c_bridge->id, rc);
+ return;
+ }
+
+ if (c_bridge->dsi_mode.flags & DSI_MODE_FLAG_SEAMLESS) {
+ pr_debug("[%d] seamless pre-enable\n", c_bridge->id);
+ return;
+ }
+
+ SDE_ATRACE_BEGIN("dsi_bridge_pre_enable");
+ rc = dsi_display_prepare(c_bridge->display);
+ if (rc) {
+ pr_err("[%d] DSI display prepare failed, rc=%d\n",
+ c_bridge->id, rc);
+ SDE_ATRACE_END("dsi_bridge_pre_enable");
+ return;
+ }
+
+ SDE_ATRACE_BEGIN("dsi_display_enable");
+ rc = dsi_display_enable(c_bridge->display);
+ if (rc) {
+ pr_err("[%d] DSI display enable failed, rc=%d\n",
+ c_bridge->id, rc);
+ (void)dsi_display_unprepare(c_bridge->display);
+ }
+ SDE_ATRACE_END("dsi_display_enable");
+ SDE_ATRACE_END("dsi_bridge_pre_enable");
+}
+
+static void dsi_bridge_enable(struct drm_bridge *bridge)
+{
+ int rc = 0;
+ struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+ if (!bridge) {
+ pr_err("Invalid params\n");
+ return;
+ }
+
+ if (c_bridge->dsi_mode.flags & DSI_MODE_FLAG_SEAMLESS) {
+ pr_debug("[%d] seamless enable\n", c_bridge->id);
+ return;
+ }
+
+ rc = dsi_display_post_enable(c_bridge->display);
+ if (rc)
+ pr_err("[%d] DSI display post enabled failed, rc=%d\n",
+ c_bridge->id, rc);
+}
+
+static void dsi_bridge_disable(struct drm_bridge *bridge)
+{
+ int rc = 0;
+ struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+ if (!bridge) {
+ pr_err("Invalid params\n");
+ return;
+ }
+
+ rc = dsi_display_pre_disable(c_bridge->display);
+ if (rc) {
+ pr_err("[%d] DSI display pre disable failed, rc=%d\n",
+ c_bridge->id, rc);
+ }
+}
+
+static void dsi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ int rc = 0;
+ struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+
+ if (!bridge) {
+ pr_err("Invalid params\n");
+ return;
+ }
+
+ SDE_ATRACE_BEGIN("dsi_bridge_post_disable");
+ SDE_ATRACE_BEGIN("dsi_display_disable");
+ rc = dsi_display_disable(c_bridge->display);
+ if (rc) {
+ pr_err("[%d] DSI display disable failed, rc=%d\n",
+ c_bridge->id, rc);
+ SDE_ATRACE_END("dsi_display_disable");
+ return;
+ }
+ SDE_ATRACE_END("dsi_display_disable");
+
+ rc = dsi_display_unprepare(c_bridge->display);
+ if (rc) {
+ pr_err("[%d] DSI display unprepare failed, rc=%d\n",
+ c_bridge->id, rc);
+ SDE_ATRACE_END("dsi_bridge_post_disable");
+ return;
+ }
+ SDE_ATRACE_END("dsi_bridge_post_disable");
+}
+
+static void dsi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+ struct dsi_panel *panel;
+
+ if (!bridge || !mode || !adjusted_mode || !c_bridge->display ||
+ !c_bridge->display->panel[0]) {
+ pr_err("Invalid params\n");
+ return;
+ }
+
+ /* dsi drm bridge is always the first panel */
+ panel = c_bridge->display->panel[0];
+ memset(&(c_bridge->dsi_mode), 0x0, sizeof(struct dsi_display_mode));
+ convert_to_dsi_mode(adjusted_mode, &(c_bridge->dsi_mode));
+
+ pr_debug("note: using panel cmd/vid mode instead of user val\n");
+ c_bridge->dsi_mode.panel_mode = panel->mode.panel_mode;
+}
+
+static bool dsi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ int rc = 0;
+ bool ret = true;
+ struct dsi_bridge *c_bridge = to_dsi_bridge(bridge);
+ struct dsi_display_mode dsi_mode;
+
+ if (!bridge || !mode || !adjusted_mode) {
+ pr_err("Invalid params\n");
+ return false;
+ }
+
+ convert_to_dsi_mode(mode, &dsi_mode);
+
+ rc = dsi_display_validate_mode(c_bridge->display, &dsi_mode,
+ DSI_VALIDATE_FLAG_ALLOW_ADJUST);
+ if (rc) {
+ pr_err("[%d] mode is not valid, rc=%d\n", c_bridge->id, rc);
+ ret = false;
+ } else {
+ convert_to_drm_mode(&dsi_mode, adjusted_mode);
+ }
+
+ return ret;
+}
+
+static const struct drm_bridge_funcs dsi_bridge_ops = {
+ .attach = dsi_bridge_attach,
+ .mode_fixup = dsi_bridge_mode_fixup,
+ .pre_enable = dsi_bridge_pre_enable,
+ .enable = dsi_bridge_enable,
+ .disable = dsi_bridge_disable,
+ .post_disable = dsi_bridge_post_disable,
+ .mode_set = dsi_bridge_mode_set,
+};
+
+int dsi_display_set_top_ctl(struct drm_connector *connector,
+ struct drm_display_mode *adj_mode, void *display)
+{
+ int rc = 0;
+ struct dsi_display *dsi_display = (struct dsi_display *)display;
+
+ if (!dsi_display) {
+ SDE_ERROR("dsi_display is NULL\n");
+ return -EINVAL;
+ }
+
+ if (dsi_display->display_topology) {
+ SDE_DEBUG("%s, set display topology %d\n",
+ __func__, dsi_display->display_topology);
+
+ msm_property_set_property(sde_connector_get_propinfo(connector),
+ sde_connector_get_property_values(connector->state),
+ CONNECTOR_PROP_TOPOLOGY_CONTROL,
+ dsi_display->display_topology);
+ }
+ return rc;
+}
+
+int dsi_conn_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ struct dsi_display *dsi_display = display;
+ struct dsi_panel *panel;
+ int i;
+
+ if (!info || !dsi_display)
+ return -EINVAL;
+
+ sde_kms_info_add_keystr(info,
+ "display type", dsi_display->display_type);
+
+ switch (dsi_display->type) {
+ case DSI_DISPLAY_SINGLE:
+ sde_kms_info_add_keystr(info, "display config",
+ "single display");
+ break;
+ case DSI_DISPLAY_EXT_BRIDGE:
+ sde_kms_info_add_keystr(info, "display config", "ext bridge");
+ break;
+ case DSI_DISPLAY_SPLIT:
+ sde_kms_info_add_keystr(info, "display config",
+ "split display");
+ break;
+ case DSI_DISPLAY_SPLIT_EXT_BRIDGE:
+ sde_kms_info_add_keystr(info, "display config",
+ "split ext bridge");
+ break;
+ default:
+ pr_debug("invalid display type:%d\n", dsi_display->type);
+ break;
+ }
+
+ for (i = 0; i < dsi_display->panel_count; i++) {
+ if (!dsi_display->panel[i]) {
+ pr_debug("invalid panel data\n");
+ goto end;
+ }
+
+ panel = dsi_display->panel[i];
+ sde_kms_info_add_keystr(info, "panel name", panel->name);
+
+ switch (panel->mode.panel_mode) {
+ case DSI_OP_VIDEO_MODE:
+ sde_kms_info_add_keystr(info, "panel mode", "video");
+ break;
+ case DSI_OP_CMD_MODE:
+ sde_kms_info_add_keystr(info, "panel mode", "command");
+ break;
+ default:
+ pr_debug("invalid panel type:%d\n",
+ panel->mode.panel_mode);
+ break;
+ }
+ sde_kms_info_add_keystr(info, "dfps support",
+ panel->dfps_caps.dfps_support ?
+ "true" : "false");
+
+ switch (panel->phy_props.rotation) {
+ case DSI_PANEL_ROTATE_NONE:
+ sde_kms_info_add_keystr(info, "panel orientation",
+ "none");
+ break;
+ case DSI_PANEL_ROTATE_H_FLIP:
+ sde_kms_info_add_keystr(info, "panel orientation",
+ "horz flip");
+ break;
+ case DSI_PANEL_ROTATE_V_FLIP:
+ sde_kms_info_add_keystr(info, "panel orientation",
+ "vert flip");
+ break;
+ default:
+ pr_debug("invalid panel rotation:%d\n",
+ panel->phy_props.rotation);
+ break;
+ }
+
+ switch (panel->bl_config.type) {
+ case DSI_BACKLIGHT_PWM:
+ sde_kms_info_add_keystr(info, "backlight type", "pwm");
+ break;
+ case DSI_BACKLIGHT_WLED:
+ sde_kms_info_add_keystr(info, "backlight type", "wled");
+ break;
+ case DSI_BACKLIGHT_DCS:
+ sde_kms_info_add_keystr(info, "backlight type", "dcs");
+ break;
+ default:
+ pr_debug("invalid panel backlight type:%d\n",
+ panel->bl_config.type);
+ break;
+ }
+ }
+
+end:
+ return 0;
+}
+
+enum drm_connector_status dsi_conn_detect(struct drm_connector *conn,
+ bool force,
+ void *display)
+{
+ enum drm_connector_status status = connector_status_unknown;
+ struct msm_display_info info;
+ int rc;
+
+ if (!conn || !display)
+ return status;
+
+ /* get display dsi_info */
+ memset(&info, 0x0, sizeof(info));
+ rc = dsi_display_get_info(&info, display);
+ if (rc) {
+ pr_err("failed to get display info, rc=%d\n", rc);
+ return connector_status_disconnected;
+ }
+
+ if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+ status = (info.is_connected ? connector_status_connected :
+ connector_status_disconnected);
+ else
+ status = connector_status_connected;
+
+ conn->display_info.width_mm = info.width_mm;
+ conn->display_info.height_mm = info.height_mm;
+
+ return status;
+}
+
+int dsi_connector_get_modes(struct drm_connector *connector,
+ void *display)
+{
+ u32 count = 0;
+ u32 size = 0;
+ struct dsi_display_mode *modes;
+ struct drm_display_mode drm_mode;
+ int rc, i;
+
+ if (sde_connector_get_panel(connector)) {
+ /*
+ * TODO: If drm_panel is attached, query modes from the panel.
+ * This is complicated in split dsi cases because panel is not
+ * attached to both connectors.
+ */
+ goto end;
+ }
+ rc = dsi_display_get_modes(display, NULL, &count);
+ if (rc) {
+ pr_err("failed to get num of modes, rc=%d\n", rc);
+ goto end;
+ }
+
+ size = count * sizeof(*modes);
+ modes = kzalloc(size, GFP_KERNEL);
+ if (!modes) {
+ count = 0;
+ goto end;
+ }
+
+ rc = dsi_display_get_modes(display, modes, &count);
+ if (rc) {
+ pr_err("failed to get modes, rc=%d\n", rc);
+ count = 0;
+ goto error;
+ }
+
+ for (i = 0; i < count; i++) {
+ struct drm_display_mode *m;
+
+ memset(&drm_mode, 0x0, sizeof(drm_mode));
+ convert_to_drm_mode(&modes[i], &drm_mode);
+ m = drm_mode_duplicate(connector->dev, &drm_mode);
+ if (!m) {
+ pr_err("failed to add mode %ux%u\n",
+ drm_mode.hdisplay,
+ drm_mode.vdisplay);
+ count = -ENOMEM;
+ goto error;
+ }
+ m->width_mm = connector->display_info.width_mm;
+ m->height_mm = connector->display_info.height_mm;
+ drm_mode_probed_add(connector, m);
+ }
+error:
+ kfree(modes);
+end:
+ pr_debug("MODE COUNT =%d\n\n", count);
+ return count;
+}
+
+enum drm_mode_status dsi_conn_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display)
+{
+ struct dsi_display_mode dsi_mode;
+ int rc;
+
+ if (!connector || !mode) {
+ pr_err("Invalid params\n");
+ return MODE_ERROR;
+ }
+
+ convert_to_dsi_mode(mode, &dsi_mode);
+
+ rc = dsi_display_validate_mode(display, &dsi_mode,
+ DSI_VALIDATE_FLAG_ALLOW_ADJUST);
+ if (rc) {
+ pr_err("mode not supported, rc=%d\n", rc);
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
+ struct drm_device *dev,
+ struct drm_encoder *encoder)
+{
+ int rc = 0;
+ struct dsi_bridge *bridge;
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ bridge->display = display;
+ bridge->base.funcs = &dsi_bridge_ops;
+ bridge->base.encoder = encoder;
+
+ rc = drm_bridge_attach(dev, &bridge->base);
+ if (rc) {
+ pr_err("failed to attach bridge, rc=%d\n", rc);
+ goto error_free_bridge;
+ }
+
+ encoder->bridge = &bridge->base;
+ return bridge;
+error_free_bridge:
+ kfree(bridge);
+error:
+ return ERR_PTR(rc);
+}
+
+void dsi_drm_bridge_cleanup(struct dsi_bridge *bridge)
+{
+ if (bridge && bridge->base.encoder)
+ bridge->base.encoder->bridge = NULL;
+
+ kfree(bridge);
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
new file mode 100644
index 000000000000..89ad0da21946
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_drm.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_DRM_H_
+#define _DSI_DRM_H_
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+
+#include "msm_drv.h"
+
+#include "dsi_display.h"
+
+struct dsi_bridge {
+ struct drm_bridge base;
+ u32 id;
+
+ struct dsi_display *display;
+ struct dsi_display_mode dsi_mode;
+};
+
+/**
+ * dsi_display_set_top_ctl - callback to set display topology property
+ * @connector: Pointer to drm connector structure
+ * @adj_mode: adjusted mode
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+int dsi_display_set_top_ctl(struct drm_connector *connector,
+ struct drm_display_mode *adj_mode, void *display);
+
+/**
+ * dsi_conn_post_init - callback to perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to sde connector info structure
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+int dsi_conn_post_init(struct drm_connector *connector,
+ void *info,
+ void *display);
+
+/**
+ * dsi_conn_detect - callback to determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ * Returns: Connector 'is connected' status
+ */
+enum drm_connector_status dsi_conn_detect(struct drm_connector *conn,
+ bool force,
+ void *display);
+
+/**
+ * dsi_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Number of modes added
+ */
+int dsi_connector_get_modes(struct drm_connector *connector,
+ void *display);
+
+/**
+ * dsi_conn_mode_valid - callback to determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status dsi_conn_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display);
+
+struct dsi_bridge *dsi_drm_bridge_init(struct dsi_display *display,
+ struct drm_device *dev,
+ struct drm_encoder *encoder);
+
+void dsi_drm_bridge_cleanup(struct dsi_bridge *bridge);
+
+#endif /* _DSI_DRM_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
new file mode 100644
index 000000000000..01535c02a7f8
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_hw.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_HW_H_
+#define _DSI_HW_H_
+#include <linux/io.h>
+
+#define DSI_R32(dsi_hw, off) readl_relaxed((dsi_hw)->base + (off))
+#define DSI_W32(dsi_hw, off, val) \
+ do {\
+ pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
+ (dsi_hw)->index, #off, val); \
+ writel_relaxed((val), (dsi_hw)->base + (off)); \
+ } while (0)
+
+#define DSI_MMSS_MISC_R32(dsi_hw, off) \
+ readl_relaxed((dsi_hw)->mmss_misc_base + (off))
+#define DSI_MMSS_MISC_W32(dsi_hw, off, val) \
+ do {\
+ pr_debug("[DSI_%d][%s] - [0x%08x]\n", \
+ (dsi_hw)->index, #off, val); \
+ writel_relaxed((val), (dsi_hw)->mmss_misc_base + (off)); \
+ } while (0)
+
+#define DSI_R64(dsi_hw, off) readq_relaxed((dsi_hw)->base + (off))
+#define DSI_W64(dsi_hw, off, val) writeq_relaxed((val), (dsi_hw)->base + (off))
+
+#endif /* _DSI_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
new file mode 100644
index 000000000000..b1319a68429f
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.c
@@ -0,0 +1,2039 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/of_gpio.h>
+
+#include "sde_kms.h"
+#include "dsi_panel.h"
+#include "dsi_ctrl_hw.h"
+
+#define DSI_PANEL_DEFAULT_LABEL "Default dsi panel"
+
+#define DEFAULT_MDP_TRANSFER_TIME 14000
+
+static int dsi_panel_vreg_get(struct dsi_panel *panel)
+{
+ int rc = 0;
+ int i;
+ struct regulator *vreg = NULL;
+
+ for (i = 0; i < panel->power_info.count; i++) {
+ vreg = devm_regulator_get(panel->parent,
+ panel->power_info.vregs[i].vreg_name);
+ rc = PTR_RET(vreg);
+ if (rc) {
+ pr_err("failed to get %s regulator\n",
+ panel->power_info.vregs[i].vreg_name);
+ goto error_put;
+ }
+ panel->power_info.vregs[i].vreg = vreg;
+ }
+
+ return rc;
+error_put:
+ for (i = i - 1; i >= 0; i--) {
+ devm_regulator_put(panel->power_info.vregs[i].vreg);
+ panel->power_info.vregs[i].vreg = NULL;
+ }
+ return rc;
+}
+
+static int dsi_panel_vreg_put(struct dsi_panel *panel)
+{
+ int rc = 0;
+ int i;
+
+ for (i = panel->power_info.count - 1; i >= 0; i--)
+ devm_regulator_put(panel->power_info.vregs[i].vreg);
+
+ return rc;
+}
+
+static int dsi_panel_gpio_request(struct dsi_panel *panel)
+{
+ int rc = 0;
+ struct dsi_panel_reset_config *r_config = &panel->reset_config;
+
+ if (gpio_is_valid(r_config->reset_gpio)) {
+ rc = gpio_request(r_config->reset_gpio, "reset_gpio");
+ if (rc) {
+ pr_err("request for reset_gpio failed, rc=%d\n", rc);
+ goto error;
+ }
+ }
+
+ if (gpio_is_valid(r_config->disp_en_gpio)) {
+ rc = gpio_request(r_config->disp_en_gpio, "disp_en_gpio");
+ if (rc) {
+ pr_err("request for disp_en_gpio failed, rc=%d\n", rc);
+ goto error_release_reset;
+ }
+ }
+
+ if (gpio_is_valid(panel->bl_config.en_gpio)) {
+ rc = gpio_request(panel->bl_config.en_gpio, "bklt_en_gpio");
+ if (rc) {
+ pr_err("request for bklt_en_gpio failed, rc=%d\n", rc);
+ goto error_release_disp_en;
+ }
+ }
+
+ goto error;
+error_release_disp_en:
+ if (gpio_is_valid(r_config->disp_en_gpio))
+ gpio_free(r_config->disp_en_gpio);
+error_release_reset:
+ if (gpio_is_valid(r_config->reset_gpio))
+ gpio_free(r_config->reset_gpio);
+error:
+ return rc;
+}
+
+static int dsi_panel_gpio_release(struct dsi_panel *panel)
+{
+ int rc = 0;
+ struct dsi_panel_reset_config *r_config = &panel->reset_config;
+
+ if (gpio_is_valid(r_config->reset_gpio))
+ gpio_free(r_config->reset_gpio);
+
+ if (gpio_is_valid(r_config->disp_en_gpio))
+ gpio_free(r_config->disp_en_gpio);
+
+ if (gpio_is_valid(panel->bl_config.en_gpio))
+ gpio_free(panel->bl_config.en_gpio);
+
+ return rc;
+}
+
+static int dsi_panel_reset(struct dsi_panel *panel)
+{
+ int rc = 0;
+ struct dsi_panel_reset_config *r_config = &panel->reset_config;
+ int i;
+
+ if (gpio_is_valid(panel->reset_config.disp_en_gpio)) {
+ rc = gpio_direction_output(panel->bl_config.en_gpio, 1);
+ if (rc) {
+ pr_err("unable to set dir for disp gpio rc=%d\n", rc);
+ goto exit;
+ }
+ }
+
+ if (r_config->count) {
+ rc = gpio_direction_output(r_config->reset_gpio,
+ r_config->sequence[0].level);
+ if (rc) {
+ pr_err("unable to set dir for rst gpio rc=%d\n", rc);
+ goto exit;
+ }
+ }
+
+ for (i = 0; i < r_config->count; i++) {
+ gpio_set_value(r_config->reset_gpio,
+ r_config->sequence[i].level);
+
+
+ if (r_config->sequence[i].sleep_ms)
+ usleep_range(r_config->sequence[i].sleep_ms * 1000,
+ r_config->sequence[i].sleep_ms * 1000);
+ }
+
+ if (gpio_is_valid(panel->bl_config.en_gpio)) {
+ rc = gpio_direction_output(panel->bl_config.en_gpio, 1);
+ if (rc)
+ pr_err("unable to set dir for bklt gpio rc=%d\n", rc);
+ }
+exit:
+ return rc;
+}
+
+static int dsi_panel_set_pinctrl_state(struct dsi_panel *panel, bool enable)
+{
+ int rc = 0;
+ struct pinctrl_state *state;
+
+ if (enable)
+ state = panel->pinctrl.active;
+ else
+ state = panel->pinctrl.suspend;
+
+ if (panel->pinctrl.pinctrl && state) {
+ rc = pinctrl_select_state(panel->pinctrl.pinctrl, state);
+ if (rc)
+ pr_err("[%s] failed to set pin state, rc=%d\n",
+ panel->name, rc);
+ }
+
+ return rc;
+}
+
+
+static int dsi_panel_power_on(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ rc = dsi_pwr_enable_regulator(&panel->power_info, true);
+ if (rc) {
+ pr_err("[%s] failed to enable vregs, rc=%d\n", panel->name, rc);
+ goto exit;
+ }
+
+ rc = dsi_panel_set_pinctrl_state(panel, true);
+ if (rc) {
+ pr_err("[%s] failed to set pinctrl, rc=%d\n", panel->name, rc);
+ goto error_disable_vregs;
+ }
+
+ rc = dsi_panel_reset(panel);
+ if (rc) {
+ pr_err("[%s] failed to reset panel, rc=%d\n", panel->name, rc);
+ goto error_disable_gpio;
+ }
+
+ goto exit;
+
+error_disable_gpio:
+ if (gpio_is_valid(panel->reset_config.disp_en_gpio))
+ gpio_set_value(panel->reset_config.disp_en_gpio, 0);
+
+ if (gpio_is_valid(panel->bl_config.en_gpio))
+ gpio_set_value(panel->bl_config.en_gpio, 0);
+
+ (void)dsi_panel_set_pinctrl_state(panel, false);
+
+error_disable_vregs:
+ (void)dsi_pwr_enable_regulator(&panel->power_info, false);
+
+exit:
+ return rc;
+}
+
+static int dsi_panel_power_off(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (gpio_is_valid(panel->reset_config.disp_en_gpio))
+ gpio_set_value(panel->reset_config.disp_en_gpio, 0);
+
+ if (gpio_is_valid(panel->reset_config.reset_gpio))
+ gpio_set_value(panel->reset_config.reset_gpio, 0);
+
+ rc = dsi_panel_set_pinctrl_state(panel, false);
+ if (rc) {
+ pr_err("[%s] failed set pinctrl state, rc=%d\n", panel->name,
+ rc);
+ }
+
+ rc = dsi_pwr_enable_regulator(&panel->power_info, false);
+ if (rc)
+ pr_err("[%s] failed to enable vregs, rc=%d\n", panel->name, rc);
+
+ return rc;
+}
+static int dsi_panel_tx_cmd_set(struct dsi_panel *panel,
+ enum dsi_cmd_set_type type)
+{
+ int rc = 0, i = 0;
+ ssize_t len;
+ struct dsi_cmd_desc *cmds = panel->cmd_sets[type].cmds;
+ u32 count = panel->cmd_sets[type].count;
+ enum dsi_cmd_set_state state = panel->cmd_sets[type].state;
+ const struct mipi_dsi_host_ops *ops = panel->host->ops;
+
+ if (count == 0) {
+ pr_debug("[%s] No commands to be sent for state(%d)\n",
+ panel->name, type);
+ goto error;
+ }
+
+ for (i = 0; i < count; i++) {
+ /* TODO: handle last command */
+ if (state == DSI_CMD_SET_STATE_LP)
+ cmds->msg.flags |= MIPI_DSI_MSG_USE_LPM;
+
+ len = ops->transfer(panel->host, &cmds->msg);
+ if (len < 0) {
+ rc = len;
+ pr_err("failed to set cmds(%d), rc=%d\n", type, rc);
+ goto error;
+ }
+ if (cmds->post_wait_ms)
+ msleep(cmds->post_wait_ms);
+ cmds++;
+ }
+error:
+ return rc;
+}
+
+static int dsi_panel_pinctrl_deinit(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ devm_pinctrl_put(panel->pinctrl.pinctrl);
+
+ return rc;
+}
+
+static int dsi_panel_pinctrl_init(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ /* TODO: pinctrl is defined in dsi dt node */
+ panel->pinctrl.pinctrl = devm_pinctrl_get(panel->parent);
+ if (IS_ERR_OR_NULL(panel->pinctrl.pinctrl)) {
+ rc = PTR_ERR(panel->pinctrl.pinctrl);
+ pr_err("failed to get pinctrl, rc=%d\n", rc);
+ goto error;
+ }
+
+ panel->pinctrl.active = pinctrl_lookup_state(panel->pinctrl.pinctrl,
+ "panel_active");
+ if (IS_ERR_OR_NULL(panel->pinctrl.active)) {
+ rc = PTR_ERR(panel->pinctrl.active);
+ pr_err("failed to get pinctrl active state, rc=%d\n", rc);
+ goto error;
+ }
+
+ panel->pinctrl.suspend =
+ pinctrl_lookup_state(panel->pinctrl.pinctrl, "panel_suspend");
+
+ if (IS_ERR_OR_NULL(panel->pinctrl.suspend)) {
+ rc = PTR_ERR(panel->pinctrl.suspend);
+ pr_err("failed to get pinctrl suspend state, rc=%d\n", rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+#ifdef CONFIG_LEDS_TRIGGERS
+static int dsi_panel_led_bl_register(struct dsi_panel *panel,
+ struct dsi_backlight_config *bl)
+{
+ int rc = 0;
+
+ led_trigger_register_simple("bkl-trigger", &bl->wled);
+
+ /* LED APIs don't tell us directly whether a classdev has yet
+ * been registered to service this trigger. Until classdev is
+ * registered, calling led_trigger has no effect, and doesn't
+ * fail. Classdevs are associated with any registered triggers
+ * when they do register, but that is too late for FBCon.
+ * Check the cdev list directly and defer if appropriate.
+ */
+ if (!bl->wled) {
+ pr_err("[%s] backlight registration failed\n", panel->name);
+ rc = -EINVAL;
+ } else {
+ read_lock(&bl->wled->leddev_list_lock);
+ if (list_empty(&bl->wled->led_cdevs))
+ rc = -EPROBE_DEFER;
+ read_unlock(&bl->wled->leddev_list_lock);
+
+ if (rc) {
+ pr_info("[%s] backlight %s not ready, defer probe\n",
+ panel->name, bl->wled->name);
+ led_trigger_unregister_simple(bl->wled);
+ }
+ }
+
+ return rc;
+}
+#else
+static int dsi_panel_led_bl_register(struct dsi_panel *panel,
+ struct dsi_backlight_config *bl)
+{
+ return 0;
+}
+#endif
+
+int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl)
+{
+ int rc = 0;
+ struct dsi_backlight_config *bl = &panel->bl_config;
+
+ switch (bl->type) {
+ case DSI_BACKLIGHT_WLED:
+ led_trigger_event(bl->wled, bl_lvl);
+ break;
+ default:
+ pr_err("Backlight type(%d) not supported\n", bl->type);
+ rc = -ENOTSUPP;
+ }
+
+ return rc;
+}
+
+static int dsi_panel_bl_register(struct dsi_panel *panel)
+{
+ int rc = 0;
+ struct dsi_backlight_config *bl = &panel->bl_config;
+
+ switch (bl->type) {
+ case DSI_BACKLIGHT_WLED:
+ rc = dsi_panel_led_bl_register(panel, bl);
+ break;
+ case DSI_BACKLIGHT_UNKNOWN:
+ DRM_INFO("backlight type is unknown\n");
+ break;
+ default:
+ pr_err("Backlight type(%d) not supported\n", bl->type);
+ rc = -ENOTSUPP;
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_bl_unregister(struct dsi_panel *panel)
+{
+ int rc = 0;
+ struct dsi_backlight_config *bl = &panel->bl_config;
+
+ switch (bl->type) {
+ case DSI_BACKLIGHT_WLED:
+ led_trigger_unregister_simple(bl->wled);
+ break;
+ default:
+ pr_err("Backlight type(%d) not supported\n", bl->type);
+ rc = -ENOTSUPP;
+ goto error;
+ }
+
+error:
+ return rc;
+}
+static int dsi_panel_parse_timing(struct dsi_mode_info *mode,
+ struct device_node *of_node)
+{
+ int rc = 0;
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-framerate",
+ &mode->refresh_rate);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-panel-framerate, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-width",
+ &mode->h_active);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-panel-width, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-h-front-porch",
+ &mode->h_front_porch);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-h-front-porch, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-h-back-porch",
+ &mode->h_back_porch);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-h-back-porch, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-h-pulse-width",
+ &mode->h_sync_width);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-h-pulse-width, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-h-sync-skew",
+ &mode->h_skew);
+ if (rc)
+ pr_err("qcom,mdss-dsi-h-sync-skew is not defined, rc=%d\n", rc);
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-panel-height",
+ &mode->v_active);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-panel-height, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-v-back-porch",
+ &mode->v_back_porch);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-v-back-porch, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-v-front-porch",
+ &mode->v_front_porch);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-v-back-porch, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-v-pulse-width",
+ &mode->v_sync_width);
+ if (rc) {
+ pr_err("failed to read qcom,mdss-dsi-v-pulse-width, rc=%d\n",
+ rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_pixel_format(struct dsi_host_common_cfg *host,
+ struct device_node *of_node,
+ const char *name)
+{
+ int rc = 0;
+ u32 bpp = 0;
+ enum dsi_pixel_format fmt;
+ const char *packing;
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-bpp", &bpp);
+ if (rc) {
+ pr_err("[%s] failed to read qcom,mdss-dsi-bpp, rc=%d\n",
+ name, rc);
+ return rc;
+ }
+
+ switch (bpp) {
+ case 3:
+ fmt = DSI_PIXEL_FORMAT_RGB111;
+ break;
+ case 8:
+ fmt = DSI_PIXEL_FORMAT_RGB332;
+ break;
+ case 12:
+ fmt = DSI_PIXEL_FORMAT_RGB444;
+ break;
+ case 16:
+ fmt = DSI_PIXEL_FORMAT_RGB565;
+ break;
+ case 18:
+ fmt = DSI_PIXEL_FORMAT_RGB666;
+ break;
+ case 24:
+ default:
+ fmt = DSI_PIXEL_FORMAT_RGB888;
+ break;
+ }
+
+ if (fmt == DSI_PIXEL_FORMAT_RGB666) {
+ packing = of_get_property(of_node,
+ "qcom,mdss-dsi-pixel-packing",
+ NULL);
+ if (packing && !strcmp(packing, "loose"))
+ fmt = DSI_PIXEL_FORMAT_RGB666_LOOSE;
+ }
+
+ host->dst_format = fmt;
+ return rc;
+}
+
+static int dsi_panel_parse_lane_states(struct dsi_host_common_cfg *host,
+ struct device_node *of_node,
+ const char *name)
+{
+ int rc = 0;
+ bool lane_enabled;
+
+ lane_enabled = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-lane-0-state");
+ host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_0 : 0);
+
+ lane_enabled = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-lane-1-state");
+ host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_1 : 0);
+
+ lane_enabled = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-lane-2-state");
+ host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_2 : 0);
+
+ lane_enabled = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-lane-3-state");
+ host->data_lanes |= (lane_enabled ? DSI_DATA_LANE_3 : 0);
+
+ if (host->data_lanes == 0) {
+ pr_err("[%s] No data lanes are enabled, rc=%d\n", name, rc);
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static int dsi_panel_parse_color_swap(struct dsi_host_common_cfg *host,
+ struct device_node *of_node,
+ const char *name)
+{
+ int rc = 0;
+ const char *swap_mode;
+
+ swap_mode = of_get_property(of_node, "qcom,mdss-dsi-color-order", NULL);
+ if (swap_mode) {
+ if (!strcmp(swap_mode, "rgb_swap_rgb")) {
+ host->swap_mode = DSI_COLOR_SWAP_RGB;
+ } else if (!strcmp(swap_mode, "rgb_swap_rbg")) {
+ host->swap_mode = DSI_COLOR_SWAP_RBG;
+ } else if (!strcmp(swap_mode, "rgb_swap_brg")) {
+ host->swap_mode = DSI_COLOR_SWAP_BRG;
+ } else if (!strcmp(swap_mode, "rgb_swap_grb")) {
+ host->swap_mode = DSI_COLOR_SWAP_GRB;
+ } else if (!strcmp(swap_mode, "rgb_swap_gbr")) {
+ host->swap_mode = DSI_COLOR_SWAP_GBR;
+ } else {
+ pr_err("[%s] Unrecognized color order-%s\n",
+ name, swap_mode);
+ rc = -EINVAL;
+ }
+ } else {
+ pr_debug("[%s] Falling back to default color order\n", name);
+ host->swap_mode = DSI_COLOR_SWAP_RGB;
+ }
+
+ /* bit swap on color channel is not defined in dt */
+ host->bit_swap_red = false;
+ host->bit_swap_green = false;
+ host->bit_swap_blue = false;
+ return rc;
+}
+
+static int dsi_panel_parse_triggers(struct dsi_host_common_cfg *host,
+ struct device_node *of_node,
+ const char *name)
+{
+ const char *trig;
+ int rc = 0;
+
+ trig = of_get_property(of_node, "qcom,mdss-dsi-mdp-trigger", NULL);
+ if (trig) {
+ if (!strcmp(trig, "none")) {
+ host->mdp_cmd_trigger = DSI_TRIGGER_NONE;
+ } else if (!strcmp(trig, "trigger_te")) {
+ host->mdp_cmd_trigger = DSI_TRIGGER_TE;
+ } else if (!strcmp(trig, "trigger_sw")) {
+ host->mdp_cmd_trigger = DSI_TRIGGER_SW;
+ } else if (!strcmp(trig, "trigger_sw_te")) {
+ host->mdp_cmd_trigger = DSI_TRIGGER_SW_TE;
+ } else {
+ pr_err("[%s] Unrecognized mdp trigger type (%s)\n",
+ name, trig);
+ rc = -EINVAL;
+ }
+
+ } else {
+ pr_debug("[%s] Falling back to default MDP trigger\n",
+ name);
+ host->mdp_cmd_trigger = DSI_TRIGGER_SW;
+ }
+
+ trig = of_get_property(of_node, "qcom,mdss-dsi-dma-trigger", NULL);
+ if (trig) {
+ if (!strcmp(trig, "none")) {
+ host->dma_cmd_trigger = DSI_TRIGGER_NONE;
+ } else if (!strcmp(trig, "trigger_te")) {
+ host->dma_cmd_trigger = DSI_TRIGGER_TE;
+ } else if (!strcmp(trig, "trigger_sw")) {
+ host->dma_cmd_trigger = DSI_TRIGGER_SW;
+ } else if (!strcmp(trig, "trigger_sw_seof")) {
+ host->dma_cmd_trigger = DSI_TRIGGER_SW_SEOF;
+ } else if (!strcmp(trig, "trigger_sw_te")) {
+ host->dma_cmd_trigger = DSI_TRIGGER_SW_TE;
+ } else {
+ pr_err("[%s] Unrecognized mdp trigger type (%s)\n",
+ name, trig);
+ rc = -EINVAL;
+ }
+
+ } else {
+ pr_debug("[%s] Falling back to default MDP trigger\n", name);
+ host->dma_cmd_trigger = DSI_TRIGGER_SW;
+ }
+
+
+ return rc;
+}
+
+static int dsi_panel_parse_misc_host_config(struct dsi_host_common_cfg *host,
+ struct device_node *of_node,
+ const char *name)
+{
+ u32 val = 0;
+ int rc = 0;
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-t-clk-post", &val);
+ if (rc) {
+ pr_debug("[%s] Fallback to default t_clk_post value\n", name);
+ host->t_clk_post = 0x03;
+ } else {
+ host->t_clk_post = val;
+ pr_debug("[%s] t_clk_post = %d\n", name, val);
+ }
+
+ val = 0;
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-t-clk-pre", &val);
+ if (rc) {
+ pr_debug("[%s] Fallback to default t_clk_pre value\n", name);
+ host->t_clk_pre = 0x24;
+ } else {
+ host->t_clk_pre = val;
+ pr_debug("[%s] t_clk_pre = %d\n", name, val);
+ }
+
+ host->ignore_rx_eot = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-rx-eot-ignore");
+
+ host->append_tx_eot = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-tx-eot-append");
+
+ host->force_clk_lane_hs = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-force-clock-lane-hs");
+ return 0;
+}
+
+static int dsi_panel_parse_host_config(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0;
+
+ rc = dsi_panel_parse_pixel_format(&panel->host_config, of_node,
+ panel->name);
+ if (rc) {
+ pr_err("[%s] failed to get pixel format, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_lane_states(&panel->host_config, of_node,
+ panel->name);
+ if (rc) {
+ pr_err("[%s] failed to parse lane states, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_color_swap(&panel->host_config, of_node,
+ panel->name);
+ if (rc) {
+ pr_err("[%s] failed to parse color swap config, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_triggers(&panel->host_config, of_node,
+ panel->name);
+ if (rc) {
+ pr_err("[%s] failed to parse triggers, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_misc_host_config(&panel->host_config, of_node,
+ panel->name);
+ if (rc) {
+ pr_err("[%s] failed to parse misc host config, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_dfps_caps(struct dsi_dfps_capabilities *dfps_caps,
+ struct device_node *of_node,
+ const char *name)
+{
+ int rc = 0;
+ bool supported = false;
+ const char *type;
+ u32 val = 0;
+
+ supported = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-pan-enable-dynamic-fps");
+
+ if (!supported) {
+ pr_debug("[%s] DFPS is not supported\n", name);
+ dfps_caps->dfps_support = false;
+ } else {
+
+ type = of_get_property(of_node,
+ "qcom,mdss-dsi-pan-fps-update",
+ NULL);
+ if (!type) {
+ pr_err("[%s] dfps type not defined\n", name);
+ rc = -EINVAL;
+ goto error;
+ } else if (!strcmp(type, "dfps_suspend_resume_mode")) {
+ dfps_caps->type = DSI_DFPS_SUSPEND_RESUME;
+ } else if (!strcmp(type, "dfps_immediate_clk_mode")) {
+ dfps_caps->type = DSI_DFPS_IMMEDIATE_CLK;
+ } else if (!strcmp(type, "dfps_immediate_porch_mode_hfp")) {
+ dfps_caps->type = DSI_DFPS_IMMEDIATE_HFP;
+ } else if (!strcmp(type, "dfps_immediate_porch_mode_vfp")) {
+ dfps_caps->type = DSI_DFPS_IMMEDIATE_VFP;
+ } else {
+ pr_err("[%s] dfps type is not recognized\n", name);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node,
+ "qcom,mdss-dsi-min-refresh-rate",
+ &val);
+ if (rc) {
+ pr_err("[%s] Min refresh rate is not defined\n", name);
+ rc = -EINVAL;
+ goto error;
+ }
+ dfps_caps->min_refresh_rate = val;
+
+ rc = of_property_read_u32(of_node,
+ "qcom,mdss-dsi-max-refresh-rate",
+ &val);
+ if (rc) {
+ pr_debug("[%s] Using default refresh rate\n", name);
+ rc = of_property_read_u32(of_node,
+ "qcom,mdss-dsi-panel-framerate",
+ &val);
+ if (rc) {
+ pr_err("[%s] max refresh rate is not defined\n",
+ name);
+ rc = -EINVAL;
+ goto error;
+ }
+ }
+ dfps_caps->max_refresh_rate = val;
+
+ if (dfps_caps->min_refresh_rate > dfps_caps->max_refresh_rate) {
+ pr_err("[%s] min rate > max rate\n", name);
+ rc = -EINVAL;
+ }
+
+ pr_debug("[%s] DFPS is supported %d-%d, mode %d\n", name,
+ dfps_caps->min_refresh_rate,
+ dfps_caps->max_refresh_rate,
+ dfps_caps->type);
+ dfps_caps->dfps_support = true;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_video_host_config(struct dsi_video_engine_cfg *cfg,
+ struct device_node *of_node,
+ const char *name)
+{
+ int rc = 0;
+ const char *traffic_mode;
+ u32 vc_id = 0;
+ u32 val = 0;
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-h-sync-pulse", &val);
+ if (rc) {
+ pr_debug("[%s] fallback to default h-sync-pulse\n", name);
+ cfg->pulse_mode_hsa_he = false;
+ } else if (val == 1) {
+ cfg->pulse_mode_hsa_he = true;
+ } else if (val == 0) {
+ cfg->pulse_mode_hsa_he = false;
+ } else {
+ pr_err("[%s] Unrecognized value for mdss-dsi-h-sync-pulse\n",
+ name);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ cfg->hfp_lp11_en = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-hfp-power-mode");
+
+ cfg->hbp_lp11_en = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-hbp-power-mode");
+
+ cfg->hsa_lp11_en = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-hsa-power-mode");
+
+ cfg->last_line_interleave_en = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-last-line-interleave");
+
+ cfg->eof_bllp_lp11_en = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-bllp-eof-power-mode");
+
+ cfg->bllp_lp11_en = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-bllp-power-mode");
+
+ traffic_mode = of_get_property(of_node,
+ "qcom,mdss-dsi-traffic-mode",
+ NULL);
+ if (!traffic_mode) {
+ pr_debug("[%s] Falling back to default traffic mode\n", name);
+ cfg->traffic_mode = DSI_VIDEO_TRAFFIC_SYNC_PULSES;
+ } else if (!strcmp(traffic_mode, "non_burst_sync_pulse")) {
+ cfg->traffic_mode = DSI_VIDEO_TRAFFIC_SYNC_PULSES;
+ } else if (!strcmp(traffic_mode, "non_burst_sync_event")) {
+ cfg->traffic_mode = DSI_VIDEO_TRAFFIC_SYNC_START_EVENTS;
+ } else if (!strcmp(traffic_mode, "burst_mode")) {
+ cfg->traffic_mode = DSI_VIDEO_TRAFFIC_BURST_MODE;
+ } else {
+ pr_err("[%s] Unrecognized traffic mode-%s\n", name,
+ traffic_mode);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-virtual-channel-id",
+ &vc_id);
+ if (rc) {
+ pr_debug("[%s] Fallback to default vc id\n", name);
+ cfg->vc_id = 0;
+ } else {
+ cfg->vc_id = vc_id;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_cmd_host_config(struct dsi_cmd_engine_cfg *cfg,
+ struct device_node *of_node,
+ const char *name)
+{
+ u32 val = 0;
+ int rc = 0;
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-wr-mem-start", &val);
+ if (rc) {
+ pr_debug("[%s] Fallback to default wr-mem-start\n", name);
+ cfg->wr_mem_start = 0x2C;
+ } else {
+ cfg->wr_mem_start = val;
+ }
+
+ val = 0;
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-wr-mem-continue",
+ &val);
+ if (rc) {
+ pr_debug("[%s] Fallback to default wr-mem-continue\n", name);
+ cfg->wr_mem_continue = 0x3C;
+ } else {
+ cfg->wr_mem_continue = val;
+ }
+
+ /* TODO: fix following */
+ cfg->max_cmd_packets_interleave = 0;
+
+ val = 0;
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-te-dcs-command",
+ &val);
+ if (rc) {
+ pr_debug("[%s] fallback to default te-dcs-cmd\n", name);
+ cfg->insert_dcs_command = true;
+ } else if (val == 1) {
+ cfg->insert_dcs_command = true;
+ } else if (val == 0) {
+ cfg->insert_dcs_command = false;
+ } else {
+ pr_err("[%s] Unrecognized value for mdss-dsi-te-dcs-command\n",
+ name);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (of_property_read_u32(of_node, "qcom,mdss-mdp-transfer-time-us",
+ &val)) {
+ pr_debug("[%s] Fallback to default transfer-time-us\n", name);
+ cfg->mdp_transfer_time_us = DEFAULT_MDP_TRANSFER_TIME;
+ } else {
+ cfg->mdp_transfer_time_us = val;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_panel_mode(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ enum dsi_op_mode panel_mode;
+ const char *mode;
+
+ mode = of_get_property(of_node, "qcom,mdss-dsi-panel-type", NULL);
+ if (!mode) {
+ pr_debug("[%s] Fallback to default panel mode\n", panel->name);
+ panel_mode = DSI_OP_VIDEO_MODE;
+ } else if (!strcmp(mode, "dsi_video_mode")) {
+ panel_mode = DSI_OP_VIDEO_MODE;
+ } else if (!strcmp(mode, "dsi_cmd_mode")) {
+ panel_mode = DSI_OP_CMD_MODE;
+ } else {
+ pr_err("[%s] Unrecognized panel type-%s\n", panel->name, mode);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ if (panel_mode == DSI_OP_VIDEO_MODE) {
+ rc = dsi_panel_parse_video_host_config(&panel->video_config,
+ of_node,
+ panel->name);
+ if (rc) {
+ pr_err("[%s] Failed to parse video host cfg, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+ }
+
+ if (panel_mode == DSI_OP_CMD_MODE) {
+ rc = dsi_panel_parse_cmd_host_config(&panel->cmd_config,
+ of_node,
+ panel->name);
+ if (rc) {
+ pr_err("[%s] Failed to parse cmd host config, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+ }
+
+ panel->mode.panel_mode = panel_mode;
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_phy_props(struct dsi_panel_phy_props *props,
+ struct device_node *of_node,
+ const char *name)
+{
+ int rc = 0;
+ u32 val = 0;
+ const char *str;
+
+ rc = of_property_read_u32(of_node,
+ "qcom,mdss-pan-physical-width-dimension",
+ &val);
+ if (rc) {
+ pr_debug("[%s] Physical panel width is not defined\n", name);
+ props->panel_width_mm = 0;
+ rc = 0;
+ } else {
+ props->panel_width_mm = val;
+ }
+
+ rc = of_property_read_u32(of_node,
+ "qcom,mdss-pan-physical-height-dimension",
+ &val);
+ if (rc) {
+ pr_debug("[%s] Physical panel height is not defined\n", name);
+ props->panel_height_mm = 0;
+ rc = 0;
+ } else {
+ props->panel_height_mm = val;
+ }
+
+ str = of_get_property(of_node, "qcom,mdss-dsi-panel-orientation", NULL);
+ if (!str) {
+ props->rotation = DSI_PANEL_ROTATE_NONE;
+ } else if (!strcmp(str, "180")) {
+ props->rotation = DSI_PANEL_ROTATE_HV_FLIP;
+ } else if (!strcmp(str, "hflip")) {
+ props->rotation = DSI_PANEL_ROTATE_H_FLIP;
+ } else if (!strcmp(str, "vflip")) {
+ props->rotation = DSI_PANEL_ROTATE_V_FLIP;
+ } else {
+ pr_err("[%s] Unrecognized panel rotation-%s\n", name, str);
+ rc = -EINVAL;
+ goto error;
+ }
+error:
+ return rc;
+}
+const char *cmd_set_prop_map[DSI_CMD_SET_MAX] = {
+ "qcom,mdss-dsi-pre-on-command",
+ "qcom,mdss-dsi-on-command",
+ "qcom,mdss-dsi-post-panel-on-command",
+ "qcom,mdss-dsi-pre-off-command",
+ "qcom,mdss-dsi-off-command",
+ "qcom,mdss-dsi-post-off-command",
+ "qcom,mdss-dsi-pre-res-switch",
+ "qcom,mdss-dsi-res-switch",
+ "qcom,mdss-dsi-post-res-switch",
+ "qcom,cmd-to-video-mode-switch-commands",
+ "qcom,cmd-to-video-mode-post-switch-commands",
+ "qcom,video-to-cmd-mode-switch-commands",
+ "qcom,video-to-cmd-mode-post-switch-commands",
+ "qcom,mdss-dsi-panel-status-command",
+};
+
+const char *cmd_set_state_map[DSI_CMD_SET_MAX] = {
+ "qcom,mdss-dsi-pre-on-command-state",
+ "qcom,mdss-dsi-on-command-state",
+ "qcom,mdss-dsi-post-on-command-state",
+ "qcom,mdss-dsi-pre-off-command-state",
+ "qcom,mdss-dsi-off-command-state",
+ "qcom,mdss-dsi-post-off-command-state",
+ "qcom,mdss-dsi-pre-res-switch-state",
+ "qcom,mdss-dsi-res-switch-state",
+ "qcom,mdss-dsi-post-res-switch-state",
+ "qcom,cmd-to-video-mode-switch-commands-state",
+ "qcom,cmd-to-video-mode-post-switch-commands-state",
+ "qcom,video-to-cmd-mode-switch-commands-state",
+ "qcom,video-to-cmd-mode-post-switch-commands-state",
+ "qcom,mdss-dsi-panel-status-command-state",
+};
+
+static int dsi_panel_get_cmd_pkt_count(const char *data, u32 length, u32 *cnt)
+{
+ const u32 cmd_set_min_size = 7;
+ u32 count = 0;
+ u32 packet_length;
+ u32 tmp;
+
+ while (length >= cmd_set_min_size) {
+ packet_length = cmd_set_min_size;
+ tmp = ((data[5] << 8) | (data[6]));
+ packet_length += tmp;
+ if (packet_length > length) {
+ pr_err("FORMAT ERROR\n");
+ return -EINVAL;
+ }
+ length -= packet_length;
+ data += packet_length;
+ count++;
+ };
+
+ *cnt = count;
+ return 0;
+}
+
+static int dsi_panel_create_cmd_packets(const char *data,
+ u32 length,
+ u32 count,
+ struct dsi_cmd_desc *cmd)
+{
+ int rc = 0;
+ int i, j;
+ u8 *payload;
+
+ for (i = 0; i < count; i++) {
+ u32 size;
+
+ cmd[i].msg.type = data[0];
+ cmd[i].last_command = (data[1] == 1 ? true : false);
+ cmd[i].msg.channel = data[2];
+ cmd[i].msg.flags |= (data[3] == 1 ? MIPI_DSI_MSG_REQ_ACK : 0);
+ cmd[i].post_wait_ms = data[4];
+ cmd[i].msg.tx_len = ((data[5] << 8) | (data[6]));
+
+ size = cmd[i].msg.tx_len * sizeof(u8);
+
+ payload = kzalloc(size, GFP_KERNEL);
+ if (!payload) {
+ rc = -ENOMEM;
+ goto error_free_payloads;
+ }
+
+ for (j = 0; j < cmd[i].msg.tx_len; j++)
+ payload[j] = data[7 + j];
+
+ cmd[i].msg.tx_buf = payload;
+ data += (7 + cmd[i].msg.tx_len);
+ }
+
+ return rc;
+error_free_payloads:
+ for (i = i - 1; i >= 0; i--) {
+ cmd--;
+ kfree(cmd->msg.tx_buf);
+ }
+
+ return rc;
+}
+
+static void dsi_panel_destroy_cmd_packets(struct dsi_panel_cmd_set *set)
+{
+ u32 i = 0;
+ struct dsi_cmd_desc *cmd;
+
+ for (i = 0; i < set->count; i++) {
+ cmd = &set->cmds[i];
+ kfree(cmd->msg.tx_buf);
+ }
+
+ kfree(set->cmds);
+}
+
+static int dsi_panel_parse_cmd_sets_sub(struct dsi_panel_cmd_set *cmd,
+ enum dsi_cmd_set_type type,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ u32 length = 0;
+ u32 size;
+ const char *data;
+ const char *state;
+ u32 packet_count = 0;
+
+ data = of_get_property(of_node, cmd_set_prop_map[type], &length);
+ if (!data) {
+ pr_err("%s commands not defined\n", cmd_set_prop_map[type]);
+ rc = -ENOTSUPP;
+ goto error;
+ }
+
+ rc = dsi_panel_get_cmd_pkt_count(data, length, &packet_count);
+ if (rc) {
+ pr_err("commands failed, rc=%d\n", rc);
+ goto error;
+ }
+ pr_debug("[%s] packet-count=%d, %d\n", cmd_set_prop_map[type],
+ packet_count, length);
+
+ size = packet_count * sizeof(*cmd->cmds);
+ cmd->cmds = kzalloc(size, GFP_KERNEL);
+ if (!cmd->cmds) {
+ rc = -ENOMEM;
+ goto error;
+ }
+ cmd->count = packet_count;
+
+ rc = dsi_panel_create_cmd_packets(data, length, packet_count,
+ cmd->cmds);
+ if (rc) {
+ pr_err("Failed to create cmd packets, rc=%d\n", rc);
+ goto error_free_mem;
+ }
+
+ state = of_get_property(of_node, cmd_set_state_map[type], NULL);
+ if (!state || !strcmp(state, "dsi_lp_mode")) {
+ cmd->state = DSI_CMD_SET_STATE_LP;
+ } else if (!strcmp(state, "dsi_hs_mode")) {
+ cmd->state = DSI_CMD_SET_STATE_HS;
+ } else {
+ pr_err("[%s] Command state unrecognized-%s\n",
+ cmd_set_state_map[type], state);
+ goto error_free_mem;
+ }
+
+ return rc;
+error_free_mem:
+ kfree(cmd->cmds);
+ cmd->cmds = NULL;
+error:
+ return rc;
+
+}
+
+static int dsi_panel_parse_cmd_sets(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ struct dsi_panel_cmd_set *set;
+ u32 i;
+
+ for (i = DSI_CMD_SET_PRE_ON; i < DSI_CMD_SET_MAX; i++) {
+ set = &panel->cmd_sets[i];
+ set->type = i;
+ rc = dsi_panel_parse_cmd_sets_sub(set, i, of_node);
+ if (rc)
+ pr_err("[%s] failed to parse set %d\n", panel->name, i);
+ }
+
+ rc = 0;
+ return rc;
+}
+
+static int dsi_panel_parse_reset_sequence(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ int i;
+ u32 length = 0;
+ u32 count = 0;
+ u32 size = 0;
+ u32 *arr_32 = NULL;
+ const u32 *arr;
+ struct dsi_reset_seq *seq;
+
+ arr = of_get_property(of_node, "qcom,mdss-dsi-reset-sequence", &length);
+ if (!arr) {
+ pr_err("[%s] dsi-reset-sequence not found\n", panel->name);
+ rc = -EINVAL;
+ goto error;
+ }
+ if (length & 0x1) {
+ pr_err("[%s] syntax error for dsi-reset-sequence\n",
+ panel->name);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ pr_err("RESET SEQ LENGTH = %d\n", length);
+ length = length / sizeof(u32);
+
+ size = length * sizeof(u32);
+
+ arr_32 = kzalloc(size, GFP_KERNEL);
+ if (!arr_32) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,mdss-dsi-reset-sequence",
+ arr_32, length);
+ if (rc) {
+ pr_err("[%s] cannot read dso-reset-seqience\n", panel->name);
+ goto error_free_arr_32;
+ }
+
+ count = length / 2;
+ size = count * sizeof(*seq);
+ seq = kzalloc(size, GFP_KERNEL);
+ if (!seq) {
+ rc = -ENOMEM;
+ goto error_free_arr_32;
+ }
+
+ panel->reset_config.sequence = seq;
+ panel->reset_config.count = count;
+
+ for (i = 0; i < length; i += 2) {
+ seq->level = arr_32[i];
+ seq->sleep_ms = arr_32[i + 1];
+ seq++;
+ }
+
+
+error_free_arr_32:
+ kfree(arr_32);
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_power_cfg(struct device *parent,
+ struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0;
+
+ rc = dsi_clk_pwr_of_get_vreg_data(of_node,
+ &panel->power_info,
+ "qcom,panel-supply-entries");
+ if (rc) {
+ pr_err("[%s] failed to parse vregs\n", panel->name);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_gpios(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0;
+
+ /* Need to set GPIO default value to -1, since 0 is a valid value */
+ panel->reset_config.disp_en_gpio = -1;
+ panel->reset_config.reset_gpio = of_get_named_gpio(of_node,
+ "qcom,platform-reset-gpio",
+ 0);
+ if (!gpio_is_valid(panel->reset_config.reset_gpio)) {
+ pr_err("[%s] failed get reset gpio, rc=%d\n", panel->name, rc);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ panel->reset_config.disp_en_gpio = of_get_named_gpio(of_node,
+ "qcom,5v-boost-gpio",
+ 0);
+ if (!gpio_is_valid(panel->reset_config.disp_en_gpio)) {
+ pr_debug("[%s] 5v-boot-gpio is not set, rc=%d\n",
+ panel->name, rc);
+ panel->reset_config.disp_en_gpio = of_get_named_gpio(of_node,
+ "qcom,platform-en-gpio",
+ 0);
+ if (!gpio_is_valid(panel->reset_config.disp_en_gpio)) {
+ pr_debug("[%s] platform-en-gpio is not set, rc=%d\n",
+ panel->name, rc);
+ }
+ }
+
+ /* TODO: release memory */
+ rc = dsi_panel_parse_reset_sequence(panel, of_node);
+ if (rc) {
+ pr_err("[%s] failed to parse reset sequence, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_bl_pwm_config(struct dsi_backlight_config *config,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ u32 val;
+
+ rc = of_property_read_u32(of_node, "qcom,dsi-bl-pmic-bank-select",
+ &val);
+ if (rc) {
+ pr_err("bl-pmic-bank-select is not defined, rc=%d\n", rc);
+ goto error;
+ }
+ config->pwm_pmic_bank = val;
+
+ rc = of_property_read_u32(of_node, "qcom,dsi-bl-pmic-pwm-frequency",
+ &val);
+ if (rc) {
+ pr_err("bl-pmic-bank-select is not defined, rc=%d\n", rc);
+ goto error;
+ }
+ config->pwm_period_usecs = val;
+
+ config->pwm_pmi_control = of_property_read_bool(of_node,
+ "qcom,mdss-dsi-bl-pwm-pmi");
+
+ config->pwm_gpio = of_get_named_gpio(of_node,
+ "qcom,mdss-dsi-pwm-gpio",
+ 0);
+ if (!gpio_is_valid(config->pwm_gpio)) {
+ pr_err("pwm gpio is invalid\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_bl_config(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0;
+ const char *bl_type;
+ u32 val = 0;
+
+ bl_type = of_get_property(of_node,
+ "qcom,mdss-dsi-bl-pmic-control-type",
+ NULL);
+ if (!bl_type) {
+ panel->bl_config.type = DSI_BACKLIGHT_UNKNOWN;
+ } else if (!strcmp(bl_type, "bl_ctrl_pwm")) {
+ panel->bl_config.type = DSI_BACKLIGHT_PWM;
+ } else if (!strcmp(bl_type, "bl_ctrl_wled")) {
+ panel->bl_config.type = DSI_BACKLIGHT_WLED;
+ } else if (!strcmp(bl_type, "bl_ctrl_dcs")) {
+ panel->bl_config.type = DSI_BACKLIGHT_DCS;
+ } else {
+ pr_debug("[%s] bl-pmic-control-type unknown-%s\n",
+ panel->name, bl_type);
+ panel->bl_config.type = DSI_BACKLIGHT_UNKNOWN;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-bl-min-level", &val);
+ if (rc) {
+ pr_debug("[%s] bl-min-level unspecified, defaulting to zero\n",
+ panel->name);
+ panel->bl_config.bl_min_level = 0;
+ } else {
+ panel->bl_config.bl_min_level = val;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-dsi-bl-max-level", &val);
+ if (rc) {
+ pr_debug("[%s] bl-max-level unspecified, defaulting to max level\n",
+ panel->name);
+ panel->bl_config.bl_max_level = MAX_BL_LEVEL;
+ } else {
+ panel->bl_config.bl_max_level = val;
+ }
+
+ rc = of_property_read_u32(of_node, "qcom,mdss-brightness-max-level",
+ &val);
+ if (rc) {
+ pr_debug("[%s] brigheness-max-level unspecified, defaulting to 255\n",
+ panel->name);
+ panel->bl_config.brightness_max_level = 255;
+ } else {
+ panel->bl_config.brightness_max_level = val;
+ }
+
+ if (panel->bl_config.type == DSI_BACKLIGHT_PWM) {
+ rc = dsi_panel_parse_bl_pwm_config(&panel->bl_config, of_node);
+ if (rc) {
+ pr_err("[%s] failed to parse pwm config, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+ }
+
+ panel->bl_config.en_gpio = of_get_named_gpio(of_node,
+ "qcom,platform-bklight-en-gpio",
+ 0);
+ if (!gpio_is_valid(panel->bl_config.en_gpio)) {
+ pr_err("[%s] failed get bklt gpio, rc=%d\n", panel->name, rc);
+ rc = -EINVAL;
+ goto error;
+ }
+
+error:
+ return rc;
+}
+
+static int dsi_panel_parse_dba_config(struct dsi_panel *panel,
+ struct device_node *of_node)
+{
+ int rc = 0, len = 0;
+
+ panel->dba_config.dba_panel = of_property_read_bool(of_node,
+ "qcom,dba-panel");
+
+ if (panel->dba_config.dba_panel) {
+ panel->dba_config.hdmi_mode = of_property_read_bool(of_node,
+ "qcom,hdmi-mode");
+
+ panel->dba_config.bridge_name = of_get_property(of_node,
+ "qcom,bridge-name", &len);
+ if (!panel->dba_config.bridge_name || len <= 0) {
+ SDE_ERROR(
+ "%s:%d Unable to read bridge_name, data=%pK,len=%d\n",
+ __func__, __LINE__, panel->dba_config.bridge_name, len);
+ rc = -EINVAL;
+ goto error;
+ }
+ }
+
+error:
+ return rc;
+}
+
+struct dsi_panel *dsi_panel_get(struct device *parent,
+ struct device_node *of_node)
+{
+ struct dsi_panel *panel;
+ int rc = 0;
+
+ panel = kzalloc(sizeof(*panel), GFP_KERNEL);
+ if (!panel)
+ return ERR_PTR(-ENOMEM);
+
+ panel->name = of_get_property(of_node, "qcom,mdss-dsi-panel-name",
+ NULL);
+ if (!panel->name)
+ panel->name = DSI_PANEL_DEFAULT_LABEL;
+
+ rc = dsi_panel_parse_timing(&panel->mode.timing, of_node);
+ if (rc) {
+ pr_err("failed to parse panel timing, rc=%d\n", rc);
+ goto error;
+ }
+
+ panel->mode.pixel_clk_khz = (DSI_H_TOTAL(&panel->mode.timing) *
+ DSI_V_TOTAL(&panel->mode.timing) *
+ panel->mode.timing.refresh_rate) / 1000;
+ rc = dsi_panel_parse_host_config(panel, of_node);
+ if (rc) {
+ pr_err("failed to parse host configuration, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_panel_mode(panel, of_node);
+ if (rc) {
+ pr_err("failed to parse panel mode configuration, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_dfps_caps(&panel->dfps_caps, of_node, panel->name);
+ if (rc)
+ pr_err("failed to parse dfps configuration, rc=%d\n", rc);
+
+ rc = dsi_panel_parse_phy_props(&panel->phy_props, of_node, panel->name);
+ if (rc) {
+ pr_err("failed to parse panel physical dimension, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_cmd_sets(panel, of_node);
+ if (rc) {
+ pr_err("failed to parse command sets, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = dsi_panel_parse_power_cfg(parent, panel, of_node);
+ if (rc)
+ pr_err("failed to parse power config, rc=%d\n", rc);
+
+ rc = dsi_panel_parse_gpios(panel, of_node);
+ if (rc)
+ pr_err("failed to parse panel gpios, rc=%d\n", rc);
+
+ rc = dsi_panel_parse_bl_config(panel, of_node);
+ if (rc)
+ pr_err("failed to parse backlight config, rc=%d\n", rc);
+
+ rc = dsi_panel_parse_dba_config(panel, of_node);
+ if (rc)
+ pr_err("failed to parse dba config, rc=%d\n", rc);
+
+ panel->panel_of_node = of_node;
+ drm_panel_init(&panel->drm_panel);
+ mutex_init(&panel->panel_lock);
+ panel->parent = parent;
+ return panel;
+error:
+ kfree(panel);
+ return ERR_PTR(rc);
+}
+
+void dsi_panel_put(struct dsi_panel *panel)
+{
+ u32 i;
+
+ if (!panel)
+ return;
+
+ for (i = 0; i < DSI_CMD_SET_MAX; i++)
+ dsi_panel_destroy_cmd_packets(&panel->cmd_sets[i]);
+
+ /* TODO: more free */
+ kfree(panel);
+}
+
+int dsi_panel_drv_init(struct dsi_panel *panel,
+ struct mipi_dsi_host *host)
+{
+ int rc = 0;
+ struct mipi_dsi_device *dev;
+
+ if (!panel || !host) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ dev = &panel->mipi_device;
+
+ dev->host = host;
+ /*
+ * We dont have device structure since panel is not a device node.
+ * When using drm panel framework, the device is probed when the host is
+ * create.
+ */
+ dev->channel = 0;
+ dev->lanes = 4;
+
+ panel->host = host;
+ rc = dsi_panel_vreg_get(panel);
+ if (rc) {
+ pr_err("[%s] Failed to get panel regulators, rc=%d\n",
+ panel->name, rc);
+ goto exit;
+ }
+
+ rc = dsi_panel_pinctrl_init(panel);
+ if (rc)
+ pr_err("[%s] failed to init pinctrl, rc=%d\n", panel->name, rc);
+
+ rc = dsi_panel_gpio_request(panel);
+ if (rc) {
+ pr_err("[%s] failed to request gpios, rc=%d\n", panel->name,
+ rc);
+ goto error_pinctrl_deinit;
+ }
+
+ rc = dsi_panel_bl_register(panel);
+ if (rc) {
+ if (rc != -EPROBE_DEFER)
+ pr_err("[%s] failed to register backlight, rc=%d\n",
+ panel->name, rc);
+ goto error_gpio_release;
+ }
+
+ goto exit;
+
+error_gpio_release:
+ (void)dsi_panel_gpio_release(panel);
+error_pinctrl_deinit:
+ (void)dsi_panel_pinctrl_deinit(panel);
+ (void)dsi_panel_vreg_put(panel);
+exit:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_drv_deinit(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ rc = dsi_panel_bl_unregister(panel);
+ if (rc)
+ pr_err("[%s] failed to unregister backlight, rc=%d\n",
+ panel->name, rc);
+
+ rc = dsi_panel_gpio_release(panel);
+ if (rc)
+ pr_err("[%s] failed to release gpios, rc=%d\n", panel->name,
+ rc);
+
+ rc = dsi_panel_pinctrl_deinit(panel);
+ if (rc)
+ pr_err("[%s] failed to deinit gpios, rc=%d\n", panel->name,
+ rc);
+
+ rc = dsi_panel_vreg_put(panel);
+ if (rc)
+ pr_err("[%s] failed to put regs, rc=%d\n", panel->name, rc);
+
+ panel->host = NULL;
+ memset(&panel->mipi_device, 0x0, sizeof(panel->mipi_device));
+
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_validate_mode(struct dsi_panel *panel,
+ struct dsi_display_mode *mode)
+{
+ return 0;
+}
+
+int dsi_panel_get_mode_count(struct dsi_panel *panel, u32 *count)
+{
+ int rc = 0;
+
+ if (!panel || !count) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+ /* TODO: DT format has not been decided for multiple modes. */
+ *count = 1;
+
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_get_phy_props(struct dsi_panel *panel,
+ struct dsi_panel_phy_props *phy_props)
+{
+ int rc = 0;
+
+ if (!panel || !phy_props) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ memcpy(phy_props, &panel->phy_props, sizeof(*phy_props));
+
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_get_dfps_caps(struct dsi_panel *panel,
+ struct dsi_dfps_capabilities *dfps_caps)
+{
+ int rc = 0;
+
+ if (!panel || !dfps_caps) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ memcpy(dfps_caps, &panel->dfps_caps, sizeof(*dfps_caps));
+
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_get_mode(struct dsi_panel *panel,
+ u32 index,
+ struct dsi_display_mode *mode)
+{
+ int rc = 0;
+
+ if (!panel || !mode) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+ if (index != 0)
+ rc = -ENOTSUPP; /* TODO: Support more than one mode */
+ else
+ memcpy(mode, &panel->mode, sizeof(*mode));
+
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_get_host_cfg_for_mode(struct dsi_panel *panel,
+ struct dsi_display_mode *mode,
+ struct dsi_host_config *config)
+{
+ int rc = 0;
+
+ if (!panel || !mode || !config) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ config->panel_mode = panel->mode.panel_mode;
+ memcpy(&config->common_config, &panel->host_config,
+ sizeof(config->common_config));
+
+ if (mode->panel_mode == DSI_OP_VIDEO_MODE) {
+ memcpy(&config->u.video_engine, &panel->video_config,
+ sizeof(config->u.video_engine));
+ } else {
+ memcpy(&config->u.cmd_engine, &panel->cmd_config,
+ sizeof(config->u.cmd_engine));
+ }
+
+ memcpy(&config->video_timing, &mode->timing,
+ sizeof(config->video_timing));
+
+ config->esc_clk_rate_hz = 19200000;
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_pre_prepare(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ /* If LP11_INIT is set, panel will be powered up during prepare() */
+ if (panel->lp11_init)
+ goto error;
+
+ rc = dsi_panel_power_on(panel);
+ if (rc) {
+ pr_err("[%s] Panel power on failed, rc=%d\n", panel->name, rc);
+ goto error;
+ }
+
+error:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_prepare(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ if (panel->lp11_init) {
+ rc = dsi_panel_power_on(panel);
+ if (rc) {
+ pr_err("[%s] panel power on failed, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+ }
+
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_PRE_ON);
+ if (rc) {
+ pr_err("[%s] failed to send DSI_CMD_SET_PRE_ON cmds, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+error:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_enable(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_ON);
+ if (rc) {
+ pr_err("[%s] failed to send DSI_CMD_SET_ON cmds, rc=%d\n",
+ panel->name, rc);
+ }
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_post_enable(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_POST_ON);
+ if (rc) {
+ pr_err("[%s] failed to send DSI_CMD_SET_POST_ON cmds, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+error:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_pre_disable(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_PRE_OFF);
+ if (rc) {
+ pr_err("[%s] failed to send DSI_CMD_SET_PRE_OFF cmds, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+error:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_disable(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_OFF);
+ if (rc) {
+ pr_err("[%s] failed to send DSI_CMD_SET_OFF cmds, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+error:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_unprepare(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ rc = dsi_panel_tx_cmd_set(panel, DSI_CMD_SET_POST_OFF);
+ if (rc) {
+ pr_err("[%s] failed to send DSI_CMD_SET_POST_OFF cmds, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+
+ if (panel->lp11_init) {
+ rc = dsi_panel_power_off(panel);
+ if (rc) {
+ pr_err("[%s] panel power_Off failed, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+ }
+error:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
+
+int dsi_panel_post_unprepare(struct dsi_panel *panel)
+{
+ int rc = 0;
+
+ if (!panel) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&panel->panel_lock);
+
+ if (!panel->lp11_init) {
+ rc = dsi_panel_power_off(panel);
+ if (rc) {
+ pr_err("[%s] panel power_Off failed, rc=%d\n",
+ panel->name, rc);
+ goto error;
+ }
+ }
+error:
+ mutex_unlock(&panel->panel_lock);
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
new file mode 100644
index 000000000000..8106ed1261b4
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_panel.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DSI_PANEL_H_
+#define _DSI_PANEL_H_
+
+#include <linux/of_device.h>
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/errno.h>
+#include <linux/leds.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_mipi_dsi.h>
+
+#include "dsi_defs.h"
+#include "dsi_ctrl_hw.h"
+#include "dsi_clk_pwr.h"
+
+#define MAX_BL_LEVEL 4096
+
+enum dsi_panel_rotation {
+ DSI_PANEL_ROTATE_NONE = 0,
+ DSI_PANEL_ROTATE_HV_FLIP,
+ DSI_PANEL_ROTATE_H_FLIP,
+ DSI_PANEL_ROTATE_V_FLIP
+};
+
+enum dsi_cmd_set_type {
+ DSI_CMD_SET_PRE_ON = 0,
+ DSI_CMD_SET_ON,
+ DSI_CMD_SET_POST_ON,
+ DSI_CMD_SET_PRE_OFF,
+ DSI_CMD_SET_OFF,
+ DSI_CMD_SET_POST_OFF,
+ DSI_CMD_SET_PRE_RES_SWITCH,
+ DSI_CMD_SET_RES_SWITCH,
+ DSI_CMD_SET_POST_RES_SWITCH,
+ DSI_CMD_SET_CMD_TO_VID_SWITCH,
+ DSI_CMD_SET_POST_CMD_TO_VID_SWITCH,
+ DSI_CMD_SET_VID_TO_CMD_SWITCH,
+ DSI_CMD_SET_POST_VID_TO_CMD_SWITCH,
+ DSI_CMD_SET_PANEL_STATUS,
+ DSI_CMD_SET_MAX
+};
+
+enum dsi_cmd_set_state {
+ DSI_CMD_SET_STATE_LP = 0,
+ DSI_CMD_SET_STATE_HS,
+ DSI_CMD_SET_STATE_MAX
+};
+
+enum dsi_backlight_type {
+ DSI_BACKLIGHT_PWM = 0,
+ DSI_BACKLIGHT_WLED,
+ DSI_BACKLIGHT_DCS,
+ DSI_BACKLIGHT_UNKNOWN,
+ DSI_BACKLIGHT_MAX,
+};
+
+struct dsi_dfps_capabilities {
+ bool dfps_support;
+ enum dsi_dfps_type type;
+ u32 min_refresh_rate;
+ u32 max_refresh_rate;
+};
+
+struct dsi_pinctrl_info {
+ struct pinctrl *pinctrl;
+ struct pinctrl_state *active;
+ struct pinctrl_state *suspend;
+};
+
+struct dsi_panel_phy_props {
+ u32 panel_width_mm;
+ u32 panel_height_mm;
+ enum dsi_panel_rotation rotation;
+};
+
+struct dsi_cmd_desc {
+ struct mipi_dsi_msg msg;
+ bool last_command;
+ u32 post_wait_ms;
+};
+
+struct dsi_panel_cmd_set {
+ enum dsi_cmd_set_type type;
+ enum dsi_cmd_set_state state;
+ u32 count;
+ struct dsi_cmd_desc *cmds;
+};
+
+struct dsi_backlight_config {
+ enum dsi_backlight_type type;
+
+ u32 bl_min_level;
+ u32 bl_max_level;
+ u32 brightness_max_level;
+
+ int en_gpio;
+ /* PWM params */
+ bool pwm_pmi_control;
+ u32 pwm_pmic_bank;
+ u32 pwm_period_usecs;
+ int pwm_gpio;
+
+ /* WLED params */
+ struct led_trigger *wled;
+ struct backlight_device *bd;
+};
+
+struct dsi_reset_seq {
+ u32 level;
+ u32 sleep_ms;
+};
+
+struct dsi_panel_reset_config {
+ struct dsi_reset_seq *sequence;
+ u32 count;
+
+ int reset_gpio;
+ int disp_en_gpio;
+};
+
+/**
+ * struct dsi_panel_dba - DSI DBA panel information
+ * @dba_panel: Indicate if it's DBA panel
+ * @bridge_name: Bridge chip name
+ * @hdmi_mode: If bridge chip is in hdmi mode.
+ */
+struct dsi_panel_dba {
+ bool dba_panel;
+ const char *bridge_name;
+ bool hdmi_mode;
+};
+
+struct dsi_panel {
+ const char *name;
+ struct device_node *panel_of_node;
+ struct mipi_dsi_device mipi_device;
+
+ struct mutex panel_lock;
+ struct drm_panel drm_panel;
+ struct mipi_dsi_host *host;
+ struct device *parent;
+
+ struct dsi_host_common_cfg host_config;
+ struct dsi_video_engine_cfg video_config;
+ struct dsi_cmd_engine_cfg cmd_config;
+
+ struct dsi_dfps_capabilities dfps_caps;
+
+ struct dsi_panel_cmd_set cmd_sets[DSI_CMD_SET_MAX];
+ struct dsi_panel_phy_props phy_props;
+
+ struct dsi_regulator_info power_info;
+ struct dsi_display_mode mode;
+
+ struct dsi_backlight_config bl_config;
+ struct dsi_panel_reset_config reset_config;
+ struct dsi_pinctrl_info pinctrl;
+
+ struct dsi_panel_dba dba_config;
+
+ bool lp11_init;
+};
+
+struct dsi_panel *dsi_panel_get(struct device *parent,
+ struct device_node *of_node);
+void dsi_panel_put(struct dsi_panel *panel);
+
+int dsi_panel_drv_init(struct dsi_panel *panel, struct mipi_dsi_host *host);
+int dsi_panel_drv_deinit(struct dsi_panel *panel);
+
+int dsi_panel_get_mode_count(struct dsi_panel *panel, u32 *count);
+int dsi_panel_get_mode(struct dsi_panel *panel,
+ u32 index,
+ struct dsi_display_mode *mode);
+int dsi_panel_validate_mode(struct dsi_panel *panel,
+ struct dsi_display_mode *mode);
+int dsi_panel_get_host_cfg_for_mode(struct dsi_panel *panel,
+ struct dsi_display_mode *mode,
+ struct dsi_host_config *config);
+
+int dsi_panel_get_phy_props(struct dsi_panel *panel,
+ struct dsi_panel_phy_props *phy_props);
+int dsi_panel_get_dfps_caps(struct dsi_panel *panel,
+ struct dsi_dfps_capabilities *dfps_caps);
+
+int dsi_panel_pre_prepare(struct dsi_panel *panel);
+
+int dsi_panel_prepare(struct dsi_panel *panel);
+
+int dsi_panel_enable(struct dsi_panel *panel);
+
+int dsi_panel_post_enable(struct dsi_panel *panel);
+
+int dsi_panel_pre_disable(struct dsi_panel *panel);
+
+int dsi_panel_disable(struct dsi_panel *panel);
+
+int dsi_panel_unprepare(struct dsi_panel *panel);
+
+int dsi_panel_post_unprepare(struct dsi_panel *panel);
+
+int dsi_panel_set_backlight(struct dsi_panel *panel, u32 bl_lvl);
+#endif /* _DSI_PANEL_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
new file mode 100644
index 000000000000..5bcd0d0634b6
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.c
@@ -0,0 +1,862 @@
+/*
+ * Copyright (c) 2016, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "msm-dsi-phy:[%s] " fmt, __func__
+
+#include <linux/of_device.h>
+#include <linux/err.h>
+#include <linux/regulator/consumer.h>
+#include <linux/clk.h>
+#include <linux/msm-bus.h>
+#include <linux/list.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_gpu.h"
+#include "dsi_phy.h"
+#include "dsi_phy_hw.h"
+#include "dsi_clk_pwr.h"
+#include "dsi_catalog.h"
+
+#define DSI_PHY_DEFAULT_LABEL "MDSS PHY CTRL"
+
+struct dsi_phy_list_item {
+ struct msm_dsi_phy *phy;
+ struct list_head list;
+};
+
+static LIST_HEAD(dsi_phy_list);
+static DEFINE_MUTEX(dsi_phy_list_lock);
+
+static const struct dsi_ver_spec_info dsi_phy_v1_0 = {
+ .version = DSI_PHY_VERSION_1_0,
+ .lane_cfg_count = 4,
+ .strength_cfg_count = 2,
+ .regulator_cfg_count = 1,
+ .timing_cfg_count = 8,
+};
+static const struct dsi_ver_spec_info dsi_phy_v2_0 = {
+ .version = DSI_PHY_VERSION_2_0,
+ .lane_cfg_count = 4,
+ .strength_cfg_count = 2,
+ .regulator_cfg_count = 1,
+ .timing_cfg_count = 8,
+};
+static const struct dsi_ver_spec_info dsi_phy_v3_0 = {
+ .version = DSI_PHY_VERSION_3_0,
+ .lane_cfg_count = 4,
+ .strength_cfg_count = 2,
+ .regulator_cfg_count = 1,
+ .timing_cfg_count = 8,
+};
+static const struct dsi_ver_spec_info dsi_phy_v4_0 = {
+ .version = DSI_PHY_VERSION_4_0,
+ .lane_cfg_count = 4,
+ .strength_cfg_count = 2,
+ .regulator_cfg_count = 1,
+ .timing_cfg_count = 8,
+};
+
+static const struct of_device_id msm_dsi_phy_of_match[] = {
+ { .compatible = "qcom,dsi-phy-v1.0",
+ .data = &dsi_phy_v1_0,},
+ { .compatible = "qcom,dsi-phy-v2.0",
+ .data = &dsi_phy_v2_0,},
+ { .compatible = "qcom,dsi-phy-v3.0",
+ .data = &dsi_phy_v3_0,},
+ { .compatible = "qcom,dsi-phy-v4.0",
+ .data = &dsi_phy_v4_0,},
+ {}
+};
+
+static int dsi_phy_regmap_init(struct platform_device *pdev,
+ struct msm_dsi_phy *phy)
+{
+ int rc = 0;
+ void __iomem *ptr;
+
+ ptr = msm_ioremap(pdev, "dsi_phy", phy->name);
+ if (IS_ERR(ptr)) {
+ rc = PTR_ERR(ptr);
+ return rc;
+ }
+
+ phy->hw.base = ptr;
+
+ pr_debug("[%s] map dsi_phy registers to %pK\n",
+ phy->name, phy->hw.base);
+
+ return rc;
+}
+
+static int dsi_phy_regmap_deinit(struct msm_dsi_phy *phy)
+{
+ pr_debug("[%s] unmap registers\n", phy->name);
+ return 0;
+}
+
+static int dsi_phy_clocks_deinit(struct msm_dsi_phy *phy)
+{
+ int rc = 0;
+ struct dsi_core_clk_info *core = &phy->clks.core_clks;
+
+ if (core->mdp_core_clk)
+ devm_clk_put(&phy->pdev->dev, core->mdp_core_clk);
+ if (core->iface_clk)
+ devm_clk_put(&phy->pdev->dev, core->iface_clk);
+ if (core->core_mmss_clk)
+ devm_clk_put(&phy->pdev->dev, core->core_mmss_clk);
+ if (core->bus_clk)
+ devm_clk_put(&phy->pdev->dev, core->bus_clk);
+
+ memset(core, 0x0, sizeof(*core));
+
+ return rc;
+}
+
+static int dsi_phy_clocks_init(struct platform_device *pdev,
+ struct msm_dsi_phy *phy)
+{
+ int rc = 0;
+ struct dsi_core_clk_info *core = &phy->clks.core_clks;
+
+ core->mdp_core_clk = devm_clk_get(&pdev->dev, "mdp_core_clk");
+ if (IS_ERR(core->mdp_core_clk)) {
+ rc = PTR_ERR(core->mdp_core_clk);
+ pr_err("failed to get mdp_core_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ core->iface_clk = devm_clk_get(&pdev->dev, "iface_clk");
+ if (IS_ERR(core->iface_clk)) {
+ rc = PTR_ERR(core->iface_clk);
+ pr_err("failed to get iface_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ core->core_mmss_clk = devm_clk_get(&pdev->dev, "core_mmss_clk");
+ if (IS_ERR(core->core_mmss_clk)) {
+ rc = PTR_ERR(core->core_mmss_clk);
+ pr_err("failed to get core_mmss_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ core->bus_clk = devm_clk_get(&pdev->dev, "bus_clk");
+ if (IS_ERR(core->bus_clk)) {
+ rc = PTR_ERR(core->bus_clk);
+ pr_err("failed to get bus_clk, rc=%d\n", rc);
+ goto fail;
+ }
+
+ return rc;
+fail:
+ dsi_phy_clocks_deinit(phy);
+ return rc;
+}
+
+static int dsi_phy_supplies_init(struct platform_device *pdev,
+ struct msm_dsi_phy *phy)
+{
+ int rc = 0;
+ int i = 0;
+ struct dsi_regulator_info *regs;
+ struct regulator *vreg = NULL;
+
+ regs = &phy->pwr_info.digital;
+ regs->vregs = devm_kzalloc(&pdev->dev, sizeof(struct dsi_vreg),
+ GFP_KERNEL);
+ if (!regs->vregs)
+ goto error;
+
+ regs->count = 1;
+ snprintf(regs->vregs->vreg_name,
+ ARRAY_SIZE(regs->vregs[i].vreg_name),
+ "%s", "gdsc");
+
+ rc = dsi_clk_pwr_get_dt_vreg_data(&pdev->dev,
+ &phy->pwr_info.phy_pwr,
+ "qcom,phy-supply-entries");
+ if (rc) {
+ pr_err("failed to get host power supplies, rc = %d\n", rc);
+ goto error_digital;
+ }
+
+ regs = &phy->pwr_info.digital;
+ for (i = 0; i < regs->count; i++) {
+ vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
+ rc = PTR_RET(vreg);
+ if (rc) {
+ pr_err("failed to get %s regulator\n",
+ regs->vregs[i].vreg_name);
+ goto error_host_pwr;
+ }
+ regs->vregs[i].vreg = vreg;
+ }
+
+ regs = &phy->pwr_info.phy_pwr;
+ for (i = 0; i < regs->count; i++) {
+ vreg = devm_regulator_get(&pdev->dev, regs->vregs[i].vreg_name);
+ rc = PTR_RET(vreg);
+ if (rc) {
+ pr_err("failed to get %s regulator\n",
+ regs->vregs[i].vreg_name);
+ for (--i; i >= 0; i--)
+ devm_regulator_put(regs->vregs[i].vreg);
+ goto error_digital_put;
+ }
+ regs->vregs[i].vreg = vreg;
+ }
+
+ return rc;
+
+error_digital_put:
+ regs = &phy->pwr_info.digital;
+ for (i = 0; i < regs->count; i++)
+ devm_regulator_put(regs->vregs[i].vreg);
+error_host_pwr:
+ devm_kfree(&pdev->dev, phy->pwr_info.phy_pwr.vregs);
+ phy->pwr_info.phy_pwr.vregs = NULL;
+ phy->pwr_info.phy_pwr.count = 0;
+error_digital:
+ devm_kfree(&pdev->dev, phy->pwr_info.digital.vregs);
+ phy->pwr_info.digital.vregs = NULL;
+ phy->pwr_info.digital.count = 0;
+error:
+ return rc;
+}
+
+static int dsi_phy_supplies_deinit(struct msm_dsi_phy *phy)
+{
+ int i = 0;
+ int rc = 0;
+ struct dsi_regulator_info *regs;
+
+ regs = &phy->pwr_info.digital;
+ for (i = 0; i < regs->count; i++) {
+ if (!regs->vregs[i].vreg)
+ pr_err("vreg is NULL, should not reach here\n");
+ else
+ devm_regulator_put(regs->vregs[i].vreg);
+ }
+
+ regs = &phy->pwr_info.phy_pwr;
+ for (i = 0; i < regs->count; i++) {
+ if (!regs->vregs[i].vreg)
+ pr_err("vreg is NULL, should not reach here\n");
+ else
+ devm_regulator_put(regs->vregs[i].vreg);
+ }
+
+ if (phy->pwr_info.phy_pwr.vregs) {
+ devm_kfree(&phy->pdev->dev, phy->pwr_info.phy_pwr.vregs);
+ phy->pwr_info.phy_pwr.vregs = NULL;
+ phy->pwr_info.phy_pwr.count = 0;
+ }
+ if (phy->pwr_info.digital.vregs) {
+ devm_kfree(&phy->pdev->dev, phy->pwr_info.digital.vregs);
+ phy->pwr_info.digital.vregs = NULL;
+ phy->pwr_info.digital.count = 0;
+ }
+
+ return rc;
+}
+
+static int dsi_phy_parse_dt_per_lane_cfgs(struct platform_device *pdev,
+ struct dsi_phy_per_lane_cfgs *cfg,
+ char *property)
+{
+ int rc = 0, i = 0, j = 0;
+ const u8 *data;
+ u32 len = 0;
+
+ data = of_get_property(pdev->dev.of_node, property, &len);
+ if (!data) {
+ pr_err("Unable to read Phy %s settings\n", property);
+ return -EINVAL;
+ }
+
+ if (len != DSI_LANE_MAX * cfg->count_per_lane) {
+ pr_err("incorrect phy %s settings, exp=%d, act=%d\n",
+ property, (DSI_LANE_MAX * cfg->count_per_lane), len);
+ return -EINVAL;
+ }
+
+ for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+ for (j = 0; j < cfg->count_per_lane; j++) {
+ cfg->lane[i][j] = *data;
+ data++;
+ }
+ }
+
+ return rc;
+}
+
+static int dsi_phy_settings_init(struct platform_device *pdev,
+ struct msm_dsi_phy *phy)
+{
+ int rc = 0;
+ struct dsi_phy_per_lane_cfgs *lane = &phy->cfg.lanecfg;
+ struct dsi_phy_per_lane_cfgs *strength = &phy->cfg.strength;
+ struct dsi_phy_per_lane_cfgs *timing = &phy->cfg.timing;
+ struct dsi_phy_per_lane_cfgs *regs = &phy->cfg.regulators;
+
+ lane->count_per_lane = phy->ver_info->lane_cfg_count;
+ rc = dsi_phy_parse_dt_per_lane_cfgs(pdev, lane,
+ "qcom,platform-lane-config");
+ if (rc) {
+ pr_err("failed to parse lane cfgs, rc=%d\n", rc);
+ goto err;
+ }
+
+ strength->count_per_lane = phy->ver_info->strength_cfg_count;
+ rc = dsi_phy_parse_dt_per_lane_cfgs(pdev, strength,
+ "qcom,platform-strength-ctrl");
+ if (rc) {
+ pr_err("failed to parse lane cfgs, rc=%d\n", rc);
+ goto err;
+ }
+
+ regs->count_per_lane = phy->ver_info->regulator_cfg_count;
+ rc = dsi_phy_parse_dt_per_lane_cfgs(pdev, regs,
+ "qcom,platform-regulator-settings");
+ if (rc) {
+ pr_err("failed to parse lane cfgs, rc=%d\n", rc);
+ goto err;
+ }
+
+ /* Actual timing values are dependent on panel */
+ timing->count_per_lane = phy->ver_info->timing_cfg_count;
+
+err:
+ lane->count_per_lane = 0;
+ strength->count_per_lane = 0;
+ regs->count_per_lane = 0;
+ timing->count_per_lane = 0;
+ return rc;
+}
+
+static int dsi_phy_settings_deinit(struct msm_dsi_phy *phy)
+{
+ memset(&phy->cfg.lanecfg, 0x0, sizeof(phy->cfg.lanecfg));
+ memset(&phy->cfg.strength, 0x0, sizeof(phy->cfg.strength));
+ memset(&phy->cfg.timing, 0x0, sizeof(phy->cfg.timing));
+ memset(&phy->cfg.regulators, 0x0, sizeof(phy->cfg.regulators));
+ return 0;
+}
+
+static int dsi_phy_driver_probe(struct platform_device *pdev)
+{
+ struct msm_dsi_phy *dsi_phy;
+ struct dsi_phy_list_item *item;
+ const struct of_device_id *id;
+ const struct dsi_ver_spec_info *ver_info;
+ int rc = 0;
+ u32 index = 0;
+
+ if (!pdev || !pdev->dev.of_node) {
+ pr_err("pdev not found\n");
+ return -ENODEV;
+ }
+
+ id = of_match_node(msm_dsi_phy_of_match, pdev->dev.of_node);
+ if (!id)
+ return -ENODEV;
+
+ ver_info = id->data;
+
+ item = devm_kzalloc(&pdev->dev, sizeof(*item), GFP_KERNEL);
+ if (!item)
+ return -ENOMEM;
+
+
+ dsi_phy = devm_kzalloc(&pdev->dev, sizeof(*dsi_phy), GFP_KERNEL);
+ if (!dsi_phy) {
+ devm_kfree(&pdev->dev, item);
+ return -ENOMEM;
+ }
+
+ rc = of_property_read_u32(pdev->dev.of_node, "cell-index", &index);
+ if (rc) {
+ pr_debug("cell index not set, default to 0\n");
+ index = 0;
+ }
+
+ dsi_phy->index = index;
+
+ dsi_phy->name = of_get_property(pdev->dev.of_node, "label", NULL);
+ if (!dsi_phy->name)
+ dsi_phy->name = DSI_PHY_DEFAULT_LABEL;
+
+ pr_debug("Probing %s device\n", dsi_phy->name);
+
+ rc = dsi_phy_regmap_init(pdev, dsi_phy);
+ if (rc) {
+ pr_err("Failed to parse register information, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = dsi_phy_clocks_init(pdev, dsi_phy);
+ if (rc) {
+ pr_err("failed to parse clock information, rc = %d\n", rc);
+ goto fail_regmap;
+ }
+
+ rc = dsi_phy_supplies_init(pdev, dsi_phy);
+ if (rc) {
+ pr_err("failed to parse voltage supplies, rc = %d\n", rc);
+ goto fail_clks;
+ }
+
+ rc = dsi_catalog_phy_setup(&dsi_phy->hw, ver_info->version,
+ dsi_phy->index);
+ if (rc) {
+ pr_err("Catalog does not support version (%d)\n",
+ ver_info->version);
+ goto fail_supplies;
+ }
+
+ dsi_phy->ver_info = ver_info;
+ rc = dsi_phy_settings_init(pdev, dsi_phy);
+ if (rc) {
+ pr_err("Failed to parse phy setting, rc=%d\n", rc);
+ goto fail_supplies;
+ }
+
+ item->phy = dsi_phy;
+
+ mutex_lock(&dsi_phy_list_lock);
+ list_add(&item->list, &dsi_phy_list);
+ mutex_unlock(&dsi_phy_list_lock);
+
+ mutex_init(&dsi_phy->phy_lock);
+ /** TODO: initialize debugfs */
+ dsi_phy->pdev = pdev;
+ platform_set_drvdata(pdev, dsi_phy);
+ pr_debug("Probe successful for %s\n", dsi_phy->name);
+ return 0;
+
+fail_supplies:
+ (void)dsi_phy_supplies_deinit(dsi_phy);
+fail_clks:
+ (void)dsi_phy_clocks_deinit(dsi_phy);
+fail_regmap:
+ (void)dsi_phy_regmap_deinit(dsi_phy);
+fail:
+ devm_kfree(&pdev->dev, dsi_phy);
+ devm_kfree(&pdev->dev, item);
+ return rc;
+}
+
+static int dsi_phy_driver_remove(struct platform_device *pdev)
+{
+ int rc = 0;
+ struct msm_dsi_phy *phy = platform_get_drvdata(pdev);
+ struct list_head *pos, *tmp;
+
+ if (!pdev || !phy) {
+ pr_err("Invalid device\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_phy_list_lock);
+ list_for_each_safe(pos, tmp, &dsi_phy_list) {
+ struct dsi_phy_list_item *n;
+
+ n = list_entry(pos, struct dsi_phy_list_item, list);
+ if (n->phy == phy) {
+ list_del(&n->list);
+ devm_kfree(&pdev->dev, n);
+ break;
+ }
+ }
+ mutex_unlock(&dsi_phy_list_lock);
+
+ mutex_lock(&phy->phy_lock);
+ rc = dsi_phy_settings_deinit(phy);
+ if (rc)
+ pr_err("failed to deinitialize phy settings, rc=%d\n", rc);
+
+ rc = dsi_phy_supplies_deinit(phy);
+ if (rc)
+ pr_err("failed to deinitialize voltage supplies, rc=%d\n", rc);
+
+ rc = dsi_phy_clocks_deinit(phy);
+ if (rc)
+ pr_err("failed to deinitialize clocks, rc=%d\n", rc);
+
+ rc = dsi_phy_regmap_deinit(phy);
+ if (rc)
+ pr_err("failed to deinitialize regmap, rc=%d\n", rc);
+ mutex_unlock(&phy->phy_lock);
+
+ mutex_destroy(&phy->phy_lock);
+ devm_kfree(&pdev->dev, phy);
+
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static struct platform_driver dsi_phy_platform_driver = {
+ .probe = dsi_phy_driver_probe,
+ .remove = dsi_phy_driver_remove,
+ .driver = {
+ .name = "dsi_phy",
+ .of_match_table = msm_dsi_phy_of_match,
+ },
+};
+
+static void dsi_phy_enable_hw(struct msm_dsi_phy *phy)
+{
+ if (phy->hw.ops.regulator_enable)
+ phy->hw.ops.regulator_enable(&phy->hw, &phy->cfg.regulators);
+
+ if (phy->hw.ops.enable)
+ phy->hw.ops.enable(&phy->hw, &phy->cfg);
+}
+
+static void dsi_phy_disable_hw(struct msm_dsi_phy *phy)
+{
+ if (phy->hw.ops.disable)
+ phy->hw.ops.disable(&phy->hw);
+
+ if (phy->hw.ops.regulator_disable)
+ phy->hw.ops.regulator_disable(&phy->hw);
+}
+
+/**
+ * dsi_phy_get() - get a dsi phy handle from device node
+ * @of_node: device node for dsi phy controller
+ *
+ * Gets the DSI PHY handle for the corresponding of_node. The ref count is
+ * incremented to one all subsequents get will fail until the original client
+ * calls a put.
+ *
+ * Return: DSI PHY handle or an error code.
+ */
+struct msm_dsi_phy *dsi_phy_get(struct device_node *of_node)
+{
+ struct list_head *pos, *tmp;
+ struct msm_dsi_phy *phy = NULL;
+
+ mutex_lock(&dsi_phy_list_lock);
+ list_for_each_safe(pos, tmp, &dsi_phy_list) {
+ struct dsi_phy_list_item *n;
+
+ n = list_entry(pos, struct dsi_phy_list_item, list);
+ if (n->phy->pdev->dev.of_node == of_node) {
+ phy = n->phy;
+ break;
+ }
+ }
+ mutex_unlock(&dsi_phy_list_lock);
+
+ if (!phy) {
+ pr_err("Device with of node not found\n");
+ phy = ERR_PTR(-EPROBE_DEFER);
+ return phy;
+ }
+
+ mutex_lock(&phy->phy_lock);
+ if (phy->refcount > 0) {
+ pr_err("[PHY_%d] Device under use\n", phy->index);
+ phy = ERR_PTR(-EINVAL);
+ } else {
+ phy->refcount++;
+ }
+ mutex_unlock(&phy->phy_lock);
+ return phy;
+}
+
+/**
+ * dsi_phy_put() - release dsi phy handle
+ * @dsi_phy: DSI PHY handle.
+ *
+ * Release the DSI PHY hardware. Driver will clean up all resources and puts
+ * back the DSI PHY into reset state.
+ */
+void dsi_phy_put(struct msm_dsi_phy *dsi_phy)
+{
+ mutex_lock(&dsi_phy->phy_lock);
+
+ if (dsi_phy->refcount == 0)
+ pr_err("Unbalanced dsi_phy_put call\n");
+ else
+ dsi_phy->refcount--;
+
+ mutex_unlock(&dsi_phy->phy_lock);
+}
+
+/**
+ * dsi_phy_drv_init() - initialize dsi phy driver
+ * @dsi_phy: DSI PHY handle.
+ *
+ * Initializes DSI PHY driver. Should be called after dsi_phy_get().
+ *
+ * Return: error code.
+ */
+int dsi_phy_drv_init(struct msm_dsi_phy *dsi_phy)
+{
+ return 0;
+}
+
+/**
+ * dsi_phy_drv_deinit() - de-initialize dsi phy driver
+ * @dsi_phy: DSI PHY handle.
+ *
+ * Release all resources acquired by dsi_phy_drv_init().
+ *
+ * Return: error code.
+ */
+int dsi_phy_drv_deinit(struct msm_dsi_phy *dsi_phy)
+{
+ return 0;
+}
+
+/**
+ * dsi_phy_validate_mode() - validate a display mode
+ * @dsi_phy: DSI PHY handle.
+ * @mode: Mode information.
+ *
+ * Validation will fail if the mode cannot be supported by the PHY driver or
+ * hardware.
+ *
+ * Return: error code.
+ */
+int dsi_phy_validate_mode(struct msm_dsi_phy *dsi_phy,
+ struct dsi_mode_info *mode)
+{
+ int rc = 0;
+
+ if (!dsi_phy || !mode) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_phy->phy_lock);
+
+ pr_debug("[PHY_%d] Skipping validation\n", dsi_phy->index);
+
+ mutex_unlock(&dsi_phy->phy_lock);
+ return rc;
+}
+
+/**
+ * dsi_phy_set_power_state() - enable/disable dsi phy power supplies
+ * @dsi_phy: DSI PHY handle.
+ * @enable: Boolean flag to enable/disable.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable)
+{
+ int rc = 0;
+
+ if (!dsi_phy) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&dsi_phy->phy_lock);
+
+ if (enable == dsi_phy->power_state) {
+ pr_err("[PHY_%d] No state change\n", dsi_phy->index);
+ goto error;
+ }
+
+ if (enable) {
+ rc = dsi_pwr_enable_regulator(&dsi_phy->pwr_info.digital, true);
+ if (rc) {
+ pr_err("failed to enable digital regulator\n");
+ goto error;
+ }
+ rc = dsi_pwr_enable_regulator(&dsi_phy->pwr_info.phy_pwr, true);
+ if (rc) {
+ pr_err("failed to enable phy power\n");
+ (void)dsi_pwr_enable_regulator(
+ &dsi_phy->pwr_info.digital,
+ false
+ );
+ goto error;
+ }
+ } else {
+ rc = dsi_pwr_enable_regulator(&dsi_phy->pwr_info.phy_pwr,
+ false);
+ if (rc) {
+ pr_err("failed to enable digital regulator\n");
+ goto error;
+ }
+ rc = dsi_pwr_enable_regulator(&dsi_phy->pwr_info.digital,
+ false);
+ if (rc) {
+ pr_err("failed to enable phy power\n");
+ goto error;
+ }
+ }
+
+ dsi_phy->power_state = enable;
+error:
+ mutex_unlock(&dsi_phy->phy_lock);
+ return rc;
+}
+
+/**
+ * dsi_phy_enable() - enable DSI PHY hardware
+ * @dsi_phy: DSI PHY handle.
+ * @config: DSI host configuration.
+ * @pll_source: Source PLL for PHY clock.
+ * @skip_validation: Validation will not be performed on parameters.
+ *
+ * Validates and enables DSI PHY.
+ *
+ * Return: error code.
+ */
+int dsi_phy_enable(struct msm_dsi_phy *phy,
+ struct dsi_host_config *config,
+ enum dsi_phy_pll_source pll_source,
+ bool skip_validation,
+ bool cont_splash_enabled)
+{
+ int rc = 0;
+
+ if (!phy || !config) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&phy->phy_lock);
+
+ if (!skip_validation)
+ pr_debug("[PHY_%d] TODO: perform validation\n", phy->index);
+
+ rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, true);
+ if (rc) {
+ pr_err("failed to enable core clocks, rc=%d\n", rc);
+ goto error;
+ }
+
+ memcpy(&phy->mode, &config->video_timing, sizeof(phy->mode));
+ phy->data_lanes = config->common_config.data_lanes;
+ phy->dst_format = config->common_config.dst_format;
+ phy->lane_map = config->lane_map;
+ phy->cfg.pll_source = pll_source;
+
+ rc = phy->hw.ops.calculate_timing_params(&phy->hw,
+ &phy->mode,
+ &config->common_config,
+ &phy->cfg.timing);
+ if (rc) {
+ pr_err("[%s] failed to set timing, rc=%d\n", phy->name, rc);
+ goto error_disable_clks;
+ }
+
+ if (!cont_splash_enabled)
+ dsi_phy_enable_hw(phy);
+
+error_disable_clks:
+ rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, false);
+ if (rc) {
+ pr_err("failed to disable clocks, skip phy disable\n");
+ goto error;
+ }
+error:
+ mutex_unlock(&phy->phy_lock);
+ return rc;
+}
+
+/**
+ * dsi_phy_disable() - disable DSI PHY hardware.
+ * @phy: DSI PHY handle.
+ *
+ * Return: error code.
+ */
+int dsi_phy_disable(struct msm_dsi_phy *phy)
+{
+ int rc = 0;
+
+ if (!phy) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&phy->phy_lock);
+
+ rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, true);
+ if (rc) {
+ pr_err("failed to enable core clocks, rc=%d\n", rc);
+ goto error;
+ }
+
+ dsi_phy_disable_hw(phy);
+
+ rc = dsi_clk_enable_core_clks(&phy->clks.core_clks, false);
+ if (rc) {
+ pr_err("failed to disable core clocks, rc=%d\n", rc);
+ goto error;
+ }
+
+error:
+ mutex_unlock(&phy->phy_lock);
+ return rc;
+}
+
+/**
+ * dsi_phy_set_timing_params() - timing parameters for the panel
+ * @phy: DSI PHY handle
+ * @timing: array holding timing params.
+ * @size: size of the array.
+ *
+ * When PHY timing calculator is not implemented, this array will be used to
+ * pass PHY timing information.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_timing_params(struct msm_dsi_phy *phy,
+ u8 *timing, u32 size)
+{
+ int rc = 0;
+ int i, j;
+ struct dsi_phy_per_lane_cfgs *timing_cfg;
+
+ if (!phy || !timing || !size) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&phy->phy_lock);
+
+ if (size != (DSI_LANE_MAX * phy->cfg.timing.count_per_lane)) {
+ pr_err("Unexpected timing array size %d\n", size);
+ rc = -EINVAL;
+ } else {
+ timing_cfg = &phy->cfg.timing;
+ for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+ for (j = 0; j < timing_cfg->count_per_lane; j++) {
+ timing_cfg->lane[i][j] = *timing;
+ timing++;
+ }
+ }
+ }
+ mutex_unlock(&phy->phy_lock);
+ return rc;
+}
+
+void dsi_phy_drv_register(void)
+{
+ platform_driver_register(&dsi_phy_platform_driver);
+}
+
+void dsi_phy_drv_unregister(void)
+{
+ platform_driver_unregister(&dsi_phy_platform_driver);
+}
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
new file mode 100644
index 000000000000..aa21d0b347e8
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy.h
@@ -0,0 +1,197 @@
+/*
+ * Copyright (c) 2016, 2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DSI_PHY_H_
+#define _DSI_PHY_H_
+
+#include "dsi_defs.h"
+#include "dsi_clk_pwr.h"
+#include "dsi_phy_hw.h"
+
+struct dsi_ver_spec_info {
+ enum dsi_phy_version version;
+ u32 lane_cfg_count;
+ u32 strength_cfg_count;
+ u32 regulator_cfg_count;
+ u32 timing_cfg_count;
+};
+
+/**
+ * struct dsi_phy_clk_info - clock information for DSI controller
+ * @core_clks: Core clocks needed to access PHY registers.
+ */
+struct dsi_phy_clk_info {
+ struct dsi_core_clk_info core_clks;
+};
+
+/**
+ * struct dsi_phy_power_info - digital and analog power supplies for DSI PHY
+ * @digital: Digital power supply for DSI PHY.
+ * @phy_pwr: Analog power supplies for DSI PHY to work.
+ */
+struct dsi_phy_power_info {
+ struct dsi_regulator_info digital;
+ struct dsi_regulator_info phy_pwr;
+};
+
+/**
+ * struct msm_dsi_phy - DSI PHY object
+ * @pdev: Pointer to platform device.
+ * @index: Instance id.
+ * @name: Name of the PHY instance.
+ * @refcount: Reference count.
+ * @phy_lock: Mutex for hardware and object access.
+ * @ver_info: Version specific phy parameters.
+ * @hw: DSI PHY hardware object.
+ * @cfg: DSI phy configuration.
+ * @power_state: True if PHY is powered on.
+ * @mode: Current mode.
+ * @data_lanes: Number of data lanes used.
+ * @dst_format: Destination format.
+ * @lane_map: Map between logical and physical lanes.
+ */
+struct msm_dsi_phy {
+ struct platform_device *pdev;
+ int index;
+ const char *name;
+ u32 refcount;
+ struct mutex phy_lock;
+
+ const struct dsi_ver_spec_info *ver_info;
+ struct dsi_phy_hw hw;
+
+ struct dsi_phy_clk_info clks;
+ struct dsi_phy_power_info pwr_info;
+
+ struct dsi_phy_cfg cfg;
+
+ bool power_state;
+ struct dsi_mode_info mode;
+ enum dsi_data_lanes data_lanes;
+ enum dsi_pixel_format dst_format;
+ struct dsi_lane_mapping lane_map;
+};
+
+/**
+ * dsi_phy_get() - get a dsi phy handle from device node
+ * @of_node: device node for dsi phy controller
+ *
+ * Gets the DSI PHY handle for the corresponding of_node. The ref count is
+ * incremented to one all subsequents get will fail until the original client
+ * calls a put.
+ *
+ * Return: DSI PHY handle or an error code.
+ */
+struct msm_dsi_phy *dsi_phy_get(struct device_node *of_node);
+
+/**
+ * dsi_phy_put() - release dsi phy handle
+ * @dsi_phy: DSI PHY handle.
+ *
+ * Release the DSI PHY hardware. Driver will clean up all resources and puts
+ * back the DSI PHY into reset state.
+ */
+void dsi_phy_put(struct msm_dsi_phy *dsi_phy);
+
+/**
+ * dsi_phy_drv_init() - initialize dsi phy driver
+ * @dsi_phy: DSI PHY handle.
+ *
+ * Initializes DSI PHY driver. Should be called after dsi_phy_get().
+ *
+ * Return: error code.
+ */
+int dsi_phy_drv_init(struct msm_dsi_phy *dsi_phy);
+
+/**
+ * dsi_phy_drv_deinit() - de-initialize dsi phy driver
+ * @dsi_phy: DSI PHY handle.
+ *
+ * Release all resources acquired by dsi_phy_drv_init().
+ *
+ * Return: error code.
+ */
+int dsi_phy_drv_deinit(struct msm_dsi_phy *dsi_phy);
+
+/**
+ * dsi_phy_validate_mode() - validate a display mode
+ * @dsi_phy: DSI PHY handle.
+ * @mode: Mode information.
+ *
+ * Validation will fail if the mode cannot be supported by the PHY driver or
+ * hardware.
+ *
+ * Return: error code.
+ */
+int dsi_phy_validate_mode(struct msm_dsi_phy *dsi_phy,
+ struct dsi_mode_info *mode);
+
+/**
+ * dsi_phy_set_power_state() - enable/disable dsi phy power supplies
+ * @dsi_phy: DSI PHY handle.
+ * @enable: Boolean flag to enable/disable.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_power_state(struct msm_dsi_phy *dsi_phy, bool enable);
+
+/**
+ * dsi_phy_enable() - enable DSI PHY hardware
+ * @dsi_phy: DSI PHY handle.
+ * @config: DSI host configuration.
+ * @pll_source: Source PLL for PHY clock.
+ * @skip_validation: Validation will not be performed on parameters.
+ *
+ * Validates and enables DSI PHY.
+ *
+ * Return: error code.
+ */
+int dsi_phy_enable(struct msm_dsi_phy *dsi_phy,
+ struct dsi_host_config *config,
+ enum dsi_phy_pll_source pll_source,
+ bool skip_validation,
+ bool cont_splash_enabled);
+
+/**
+ * dsi_phy_disable() - disable DSI PHY hardware.
+ * @phy: DSI PHY handle.
+ *
+ * Return: error code.
+ */
+int dsi_phy_disable(struct msm_dsi_phy *phy);
+
+/**
+ * dsi_phy_set_timing_params() - timing parameters for the panel
+ * @phy: DSI PHY handle
+ * @timing: array holding timing params.
+ * @size: size of the array.
+ *
+ * When PHY timing calculator is not implemented, this array will be used to
+ * pass PHY timing information.
+ *
+ * Return: error code.
+ */
+int dsi_phy_set_timing_params(struct msm_dsi_phy *phy,
+ u8 *timing, u32 size);
+
+/**
+ * dsi_phy_drv_register() - register platform driver for dsi phy
+ */
+void dsi_phy_drv_register(void);
+
+/**
+ * dsi_phy_drv_unregister() - unregister platform driver
+ */
+void dsi_phy_drv_unregister(void);
+
+#endif /* _DSI_PHY_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
new file mode 100644
index 000000000000..5edfd5e62738
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw.h
@@ -0,0 +1,164 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DSI_PHY_HW_H_
+#define _DSI_PHY_HW_H_
+
+#include "dsi_defs.h"
+
+#define DSI_MAX_SETTINGS 8
+
+/**
+ * enum dsi_phy_version - DSI PHY version enumeration
+ * @DSI_PHY_VERSION_UNKNOWN: Unknown version.
+ * @DSI_PHY_VERSION_1_0: 28nm-HPM.
+ * @DSI_PHY_VERSION_2_0: 28nm-LPM.
+ * @DSI_PHY_VERSION_3_0: 20nm.
+ * @DSI_PHY_VERSION_4_0: 14nm.
+ * @DSI_PHY_VERSION_MAX:
+ */
+enum dsi_phy_version {
+ DSI_PHY_VERSION_UNKNOWN,
+ DSI_PHY_VERSION_1_0, /* 28nm-HPM */
+ DSI_PHY_VERSION_2_0, /* 28nm-LPM */
+ DSI_PHY_VERSION_3_0, /* 20nm */
+ DSI_PHY_VERSION_4_0, /* 14nm */
+ DSI_PHY_VERSION_MAX
+};
+
+/**
+ * enum dsi_phy_hw_features - features supported by DSI PHY hardware
+ * @DSI_PHY_DPHY: Supports DPHY
+ * @DSI_PHY_CPHY: Supports CPHY
+ */
+enum dsi_phy_hw_features {
+ DSI_PHY_DPHY,
+ DSI_PHY_CPHY,
+ DSI_PHY_MAX_FEATURES
+};
+
+/**
+ * enum dsi_phy_pll_source - pll clock source for PHY.
+ * @DSI_PLL_SOURCE_STANDALONE: Clock is sourced from native PLL and is not
+ * shared by other PHYs.
+ * @DSI_PLL_SOURCE_NATIVE: Clock is sourced from native PLL and is
+ * shared by other PHYs.
+ * @DSI_PLL_SOURCE_NON_NATIVE: Clock is sourced from other PHYs.
+ * @DSI_PLL_SOURCE_MAX:
+ */
+enum dsi_phy_pll_source {
+ DSI_PLL_SOURCE_STANDALONE = 0,
+ DSI_PLL_SOURCE_NATIVE,
+ DSI_PLL_SOURCE_NON_NATIVE,
+ DSI_PLL_SOURCE_MAX
+};
+
+/**
+ * struct dsi_phy_per_lane_cfgs - Holds register values for PHY parameters
+ * @lane: A set of maximum 8 values for each lane.
+ * @count_per_lane: Number of values per each lane.
+ */
+struct dsi_phy_per_lane_cfgs {
+ u8 lane[DSI_LANE_MAX][DSI_MAX_SETTINGS];
+ u32 count_per_lane;
+};
+
+/**
+ * struct dsi_phy_cfg - DSI PHY configuration
+ * @lanecfg: Lane configuration settings.
+ * @strength: Strength settings for lanes.
+ * @timing: Timing parameters for lanes.
+ * @regulators: Regulator settings for lanes.
+ * @pll_source: PLL source.
+ */
+struct dsi_phy_cfg {
+ struct dsi_phy_per_lane_cfgs lanecfg;
+ struct dsi_phy_per_lane_cfgs strength;
+ struct dsi_phy_per_lane_cfgs timing;
+ struct dsi_phy_per_lane_cfgs regulators;
+ enum dsi_phy_pll_source pll_source;
+};
+
+struct dsi_phy_hw;
+
+/**
+ * struct dsi_phy_hw_ops - Operations for DSI PHY hardware.
+ * @regulator_enable: Enable PHY regulators.
+ * @regulator_disable: Disable PHY regulators.
+ * @enable: Enable PHY.
+ * @disable: Disable PHY.
+ * @calculate_timing_params: Calculate PHY timing params from mode information
+ */
+struct dsi_phy_hw_ops {
+ /**
+ * regulator_enable() - enable regulators for DSI PHY
+ * @phy: Pointer to DSI PHY hardware object.
+ * @reg_cfg: Regulator configuration for all DSI lanes.
+ */
+ void (*regulator_enable)(struct dsi_phy_hw *phy,
+ struct dsi_phy_per_lane_cfgs *reg_cfg);
+
+ /**
+ * regulator_disable() - disable regulators
+ * @phy: Pointer to DSI PHY hardware object.
+ */
+ void (*regulator_disable)(struct dsi_phy_hw *phy);
+
+ /**
+ * enable() - Enable PHY hardware
+ * @phy: Pointer to DSI PHY hardware object.
+ * @cfg: Per lane configurations for timing, strength and lane
+ * configurations.
+ */
+ void (*enable)(struct dsi_phy_hw *phy, struct dsi_phy_cfg *cfg);
+
+ /**
+ * disable() - Disable PHY hardware
+ * @phy: Pointer to DSI PHY hardware object.
+ */
+ void (*disable)(struct dsi_phy_hw *phy);
+
+ /**
+ * calculate_timing_params() - calculates timing parameters.
+ * @phy: Pointer to DSI PHY hardware object.
+ * @mode: Mode information for which timing has to be calculated.
+ * @config: DSI host configuration for this mode.
+ * @timing: Timing parameters for each lane which will be returned.
+ */
+ int (*calculate_timing_params)(struct dsi_phy_hw *phy,
+ struct dsi_mode_info *mode,
+ struct dsi_host_common_cfg *config,
+ struct dsi_phy_per_lane_cfgs *timing);
+};
+
+/**
+ * struct dsi_phy_hw - DSI phy hardware object specific to an instance
+ * @base: VA for the DSI PHY base address.
+ * @length: Length of the DSI PHY register base map.
+ * @index: Instance ID of the controller.
+ * @version: DSI PHY version.
+ * @feature_map: Features supported by DSI PHY.
+ * @ops: Function pointer to PHY operations.
+ */
+struct dsi_phy_hw {
+ void __iomem *base;
+ u32 length;
+ u32 index;
+
+ enum dsi_phy_version version;
+
+ DECLARE_BITMAP(feature_map, DSI_PHY_MAX_FEATURES);
+ struct dsi_phy_hw_ops ops;
+};
+
+#endif /* _DSI_PHY_HW_H_ */
diff --git a/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
new file mode 100644
index 000000000000..512352d96f98
--- /dev/null
+++ b/drivers/gpu/drm/msm/dsi-staging/dsi_phy_hw_v4_0.c
@@ -0,0 +1,858 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "dsi-phy-hw:" fmt
+#include <linux/math64.h>
+#include <linux/delay.h>
+#include "dsi_hw.h"
+#include "dsi_phy_hw.h"
+
+#define DSIPHY_CMN_REVISION_ID0 0x0000
+#define DSIPHY_CMN_REVISION_ID1 0x0004
+#define DSIPHY_CMN_REVISION_ID2 0x0008
+#define DSIPHY_CMN_REVISION_ID3 0x000C
+#define DSIPHY_CMN_CLK_CFG0 0x0010
+#define DSIPHY_CMN_CLK_CFG1 0x0014
+#define DSIPHY_CMN_GLBL_TEST_CTRL 0x0018
+#define DSIPHY_CMN_CTRL_0 0x001C
+#define DSIPHY_CMN_CTRL_1 0x0020
+#define DSIPHY_CMN_CAL_HW_TRIGGER 0x0024
+#define DSIPHY_CMN_CAL_SW_CFG0 0x0028
+#define DSIPHY_CMN_CAL_SW_CFG1 0x002C
+#define DSIPHY_CMN_CAL_SW_CFG2 0x0030
+#define DSIPHY_CMN_CAL_HW_CFG0 0x0034
+#define DSIPHY_CMN_CAL_HW_CFG1 0x0038
+#define DSIPHY_CMN_CAL_HW_CFG2 0x003C
+#define DSIPHY_CMN_CAL_HW_CFG3 0x0040
+#define DSIPHY_CMN_CAL_HW_CFG4 0x0044
+#define DSIPHY_CMN_PLL_CNTRL 0x0048
+#define DSIPHY_CMN_LDO_CNTRL 0x004C
+
+#define DSIPHY_CMN_REGULATOR_CAL_STATUS0 0x0064
+#define DSIPHY_CMN_REGULATOR_CAL_STATUS1 0x0068
+
+/* n = 0..3 for data lanes and n = 4 for clock lane */
+#define DSIPHY_DLNX_CFG0(n) (0x100 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG1(n) (0x104 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG2(n) (0x108 + ((n) * 0x80))
+#define DSIPHY_DLNX_CFG3(n) (0x10C + ((n) * 0x80))
+#define DSIPHY_DLNX_TEST_DATAPATH(n) (0x110 + ((n) * 0x80))
+#define DSIPHY_DLNX_TEST_STR(n) (0x114 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_4(n) (0x118 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_5(n) (0x11C + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_6(n) (0x120 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_7(n) (0x124 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_8(n) (0x128 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_9(n) (0x12C + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_10(n) (0x130 + ((n) * 0x80))
+#define DSIPHY_DLNX_TIMING_CTRL_11(n) (0x134 + ((n) * 0x80))
+#define DSIPHY_DLNX_STRENGTH_CTRL_0(n) (0x138 + ((n) * 0x80))
+#define DSIPHY_DLNX_STRENGTH_CTRL_1(n) (0x13C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_POLY(n) (0x140 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SEED0(n) (0x144 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SEED1(n) (0x148 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_HEAD(n) (0x14C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_SOT(n) (0x150 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL0(n) (0x154 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL1(n) (0x158 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL2(n) (0x15C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_CTRL3(n) (0x160 + ((n) * 0x80))
+#define DSIPHY_DLNX_VREG_CNTRL(n) (0x164 + ((n) * 0x80))
+#define DSIPHY_DLNX_HSTX_STR_STATUS(n) (0x168 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS0(n) (0x16C + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS1(n) (0x170 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS2(n) (0x174 + ((n) * 0x80))
+#define DSIPHY_DLNX_BIST_STATUS3(n) (0x178 + ((n) * 0x80))
+#define DSIPHY_DLNX_MISR_STATUS(n) (0x17C + ((n) * 0x80))
+
+#define DSIPHY_PLL_CLKBUFLR_EN 0x041C
+#define DSIPHY_PLL_PLL_BANDGAP 0x0508
+
+/**
+ * struct timing_entry - Calculated values for each timing parameter.
+ * @mipi_min:
+ * @mipi_max:
+ * @rec_min:
+ * @rec_max:
+ * @rec:
+ * @reg_value: Value to be programmed in register.
+ */
+struct timing_entry {
+ s32 mipi_min;
+ s32 mipi_max;
+ s32 rec_min;
+ s32 rec_max;
+ s32 rec;
+ u8 reg_value;
+};
+
+/**
+ * struct phy_timing_desc - Timing parameters for DSI PHY.
+ */
+struct phy_timing_desc {
+ struct timing_entry clk_prepare;
+ struct timing_entry clk_zero;
+ struct timing_entry clk_trail;
+ struct timing_entry hs_prepare;
+ struct timing_entry hs_zero;
+ struct timing_entry hs_trail;
+ struct timing_entry hs_rqst;
+ struct timing_entry hs_rqst_clk;
+ struct timing_entry hs_exit;
+ struct timing_entry ta_go;
+ struct timing_entry ta_sure;
+ struct timing_entry ta_set;
+ struct timing_entry clk_post;
+ struct timing_entry clk_pre;
+};
+
+/**
+ * struct phy_clk_params - Clock parameters for PHY timing calculations.
+ */
+struct phy_clk_params {
+ u32 bitclk_mbps;
+ u32 escclk_numer;
+ u32 escclk_denom;
+ u32 tlpx_numer_ns;
+ u32 treot_ns;
+};
+
+/**
+ * regulator_enable() - enable regulators for DSI PHY
+ * @phy: Pointer to DSI PHY hardware object.
+ * @reg_cfg: Regulator configuration for all DSI lanes.
+ */
+void dsi_phy_hw_v4_0_regulator_enable(struct dsi_phy_hw *phy,
+ struct dsi_phy_per_lane_cfgs *reg_cfg)
+{
+ int i;
+
+ for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++)
+ DSI_W32(phy, DSIPHY_DLNX_VREG_CNTRL(i), reg_cfg->lane[i][0]);
+
+ /* make sure all values are written to hardware */
+ wmb();
+
+ pr_debug("[DSI_%d] Phy regulators enabled\n", phy->index);
+}
+
+/**
+ * regulator_disable() - disable regulators
+ * @phy: Pointer to DSI PHY hardware object.
+ */
+void dsi_phy_hw_v4_0_regulator_disable(struct dsi_phy_hw *phy)
+{
+ pr_debug("[DSI_%d] Phy regulators disabled\n", phy->index);
+}
+
+/**
+ * enable() - Enable PHY hardware
+ * @phy: Pointer to DSI PHY hardware object.
+ * @cfg: Per lane configurations for timing, strength and lane
+ * configurations.
+ */
+void dsi_phy_hw_v4_0_enable(struct dsi_phy_hw *phy,
+ struct dsi_phy_cfg *cfg)
+{
+ int i;
+ struct dsi_phy_per_lane_cfgs *timing = &cfg->timing;
+ u32 data;
+
+ DSI_W32(phy, DSIPHY_CMN_LDO_CNTRL, 0x1C);
+
+ DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0x1);
+ for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+
+ DSI_W32(phy, DSIPHY_DLNX_CFG0(i), cfg->lanecfg.lane[i][0]);
+ DSI_W32(phy, DSIPHY_DLNX_CFG1(i), cfg->lanecfg.lane[i][1]);
+ DSI_W32(phy, DSIPHY_DLNX_CFG2(i), cfg->lanecfg.lane[i][2]);
+ DSI_W32(phy, DSIPHY_DLNX_CFG3(i), cfg->lanecfg.lane[i][3]);
+
+ DSI_W32(phy, DSIPHY_DLNX_TEST_STR(i), 0x88);
+
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_4(i), timing->lane[i][0]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_5(i), timing->lane[i][1]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_6(i), timing->lane[i][2]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_7(i), timing->lane[i][3]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_8(i), timing->lane[i][4]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_9(i), timing->lane[i][5]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_10(i), timing->lane[i][6]);
+ DSI_W32(phy, DSIPHY_DLNX_TIMING_CTRL_11(i), timing->lane[i][7]);
+
+ DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_0(i),
+ cfg->strength.lane[i][0]);
+ DSI_W32(phy, DSIPHY_DLNX_STRENGTH_CTRL_1(i),
+ cfg->strength.lane[i][1]);
+ }
+
+ /* make sure all values are written to hardware before enabling phy */
+ wmb();
+
+ DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x80);
+ udelay(100);
+ DSI_W32(phy, DSIPHY_CMN_CTRL_1, 0x00);
+
+ data = DSI_R32(phy, DSIPHY_CMN_GLBL_TEST_CTRL);
+
+ switch (cfg->pll_source) {
+ case DSI_PLL_SOURCE_STANDALONE:
+ DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x01);
+ data &= ~BIT(2);
+ break;
+ case DSI_PLL_SOURCE_NATIVE:
+ DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x03);
+ data &= ~BIT(2);
+ break;
+ case DSI_PLL_SOURCE_NON_NATIVE:
+ DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0x00);
+ data |= BIT(2);
+ break;
+ default:
+ break;
+ }
+
+ DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, data);
+
+ /* Enable bias current for pll1 during split display case */
+ if (cfg->pll_source == DSI_PLL_SOURCE_NON_NATIVE)
+ DSI_W32(phy, DSIPHY_PLL_PLL_BANDGAP, 0x3);
+
+ pr_debug("[DSI_%d]Phy enabled ", phy->index);
+}
+
+/**
+ * disable() - Disable PHY hardware
+ * @phy: Pointer to DSI PHY hardware object.
+ */
+void dsi_phy_hw_v4_0_disable(struct dsi_phy_hw *phy)
+{
+ DSI_W32(phy, DSIPHY_PLL_CLKBUFLR_EN, 0);
+ DSI_W32(phy, DSIPHY_CMN_GLBL_TEST_CTRL, 0);
+ DSI_W32(phy, DSIPHY_CMN_CTRL_0, 0);
+ pr_debug("[DSI_%d]Phy disabled ", phy->index);
+}
+
+static const u32 bits_per_pixel[DSI_PIXEL_FORMAT_MAX] = {
+ 16, 18, 18, 24, 3, 8, 12 };
+
+/**
+ * calc_clk_prepare - calculates prepare timing params for clk lane.
+ */
+static int calc_clk_prepare(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ s32 *actual_frac,
+ s64 *actual_intermediate)
+{
+ u32 const min_prepare_frac = 50;
+ u64 const multiplier = BIT(20);
+
+ struct timing_entry *t = &desc->clk_prepare;
+ int rc = 0;
+ u64 dividend, temp, temp_multiple;
+ s32 frac = 0;
+ s64 intermediate;
+ s64 clk_prep_actual;
+
+ dividend = ((t->rec_max - t->rec_min) * min_prepare_frac * multiplier);
+ temp = roundup(div_s64(dividend, 100), multiplier);
+ temp += (t->rec_min * multiplier);
+ t->rec = div_s64(temp, multiplier);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor clk_prepare\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ /* calculate theoretical value */
+ temp_multiple = 8 * t->reg_value * clk_params->tlpx_numer_ns
+ * multiplier;
+ intermediate = div_s64(temp_multiple, clk_params->bitclk_mbps);
+ div_s64_rem(temp_multiple, clk_params->bitclk_mbps, &frac);
+ clk_prep_actual = div_s64((intermediate + frac), multiplier);
+
+ pr_debug("CLK_PREPARE:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max);
+ pr_debug(" reg_value=%d, actual=%lld\n", t->reg_value, clk_prep_actual);
+
+ *actual_frac = frac;
+ *actual_intermediate = intermediate;
+
+ return rc;
+}
+
+/**
+ * calc_clk_zero - calculates zero timing params for clk lane.
+ */
+static int calc_clk_zero(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ s32 actual_frac,
+ s64 actual_intermediate)
+{
+ u32 const clk_zero_min_frac = 2;
+ u64 const multiplier = BIT(20);
+
+ int rc = 0;
+ struct timing_entry *t = &desc->clk_zero;
+ s64 mipi_min, rec_temp1, rec_temp2, rec_temp3, rec_min;
+
+ mipi_min = ((300 * multiplier) - (actual_intermediate + actual_frac));
+ t->mipi_min = div_s64(mipi_min, multiplier);
+
+ rec_temp1 = div_s64((mipi_min * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+ rec_temp2 = (rec_temp1 - (11 * multiplier));
+ rec_temp3 = roundup(div_s64(rec_temp2, 8), multiplier);
+ rec_min = (div_s64(rec_temp3, multiplier) - 3);
+ t->rec_min = rec_min;
+ t->rec_max = ((t->rec_min > 255) ? 511 : 255);
+
+ t->rec = DIV_ROUND_UP(
+ (((t->rec_max - t->rec_min) * clk_zero_min_frac) +
+ (t->rec_min * 100)),
+ 100);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor clk_zero\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("CLK_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+ return rc;
+}
+
+/**
+ * calc_clk_trail - calculates prepare trail params for clk lane.
+ */
+static int calc_clk_trail(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ s64 *teot_clk_lane)
+{
+ u64 const multiplier = BIT(20);
+ u32 const phy_timing_frac = 30;
+
+ int rc = 0;
+ struct timing_entry *t = &desc->clk_trail;
+ u64 temp_multiple;
+ s32 frac;
+ s64 mipi_max_tr, rec_temp1, rec_temp2, rec_temp3, mipi_max;
+ s64 teot_clk_lane1;
+
+ temp_multiple = div_s64(
+ (12 * multiplier * clk_params->tlpx_numer_ns),
+ clk_params->bitclk_mbps);
+ div_s64_rem(temp_multiple, multiplier, &frac);
+
+ mipi_max_tr = ((105 * multiplier) +
+ (temp_multiple + frac));
+ teot_clk_lane1 = div_s64(mipi_max_tr, multiplier);
+
+ mipi_max = (mipi_max_tr - (clk_params->treot_ns * multiplier));
+ t->mipi_max = div_s64(mipi_max, multiplier);
+
+ temp_multiple = div_s64(
+ (t->mipi_min * multiplier * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+
+ div_s64_rem(temp_multiple, multiplier, &frac);
+ rec_temp1 = temp_multiple + frac + (3 * multiplier);
+ rec_temp2 = div_s64(rec_temp1, 8);
+ rec_temp3 = roundup(rec_temp2, multiplier);
+
+ t->rec_min = div_s64(rec_temp3, multiplier);
+
+ /* recommended max */
+ rec_temp1 = div_s64((mipi_max * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+ rec_temp2 = rec_temp1 + (3 * multiplier);
+ rec_temp3 = rec_temp2 / 8;
+ t->rec_max = div_s64(rec_temp3, multiplier);
+
+ t->rec = DIV_ROUND_UP(
+ (((t->rec_max - t->rec_min) * phy_timing_frac) +
+ (t->rec_min * 100)),
+ 100);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor clk_zero\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ *teot_clk_lane = teot_clk_lane1;
+ pr_debug("CLK_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+ return rc;
+
+}
+
+/**
+ * calc_hs_prepare - calculates prepare timing params for data lanes in HS.
+ */
+static int calc_hs_prepare(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ u64 *temp_mul)
+{
+ u64 const multiplier = BIT(20);
+ u32 const min_prepare_frac = 50;
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_prepare;
+ u64 temp_multiple, dividend, temp;
+ s32 frac;
+ s64 rec_temp1, rec_temp2, mipi_max, mipi_min;
+ u32 low_clk_multiplier = 0;
+
+ if (clk_params->bitclk_mbps <= 120)
+ low_clk_multiplier = 2;
+ /* mipi min */
+ temp_multiple = div_s64((4 * multiplier * clk_params->tlpx_numer_ns),
+ clk_params->bitclk_mbps);
+ div_s64_rem(temp_multiple, multiplier, &frac);
+ mipi_min = (40 * multiplier) + (temp_multiple + frac);
+ t->mipi_min = div_s64(mipi_min, multiplier);
+
+ /* mipi_max */
+ temp_multiple = div_s64(
+ (6 * multiplier * clk_params->tlpx_numer_ns),
+ clk_params->bitclk_mbps);
+ div_s64_rem(temp_multiple, multiplier, &frac);
+ mipi_max = (85 * multiplier) + temp_multiple;
+ t->mipi_max = div_s64(mipi_max, multiplier);
+
+ /* recommended min */
+ temp_multiple = div_s64((mipi_min * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+ temp_multiple -= (low_clk_multiplier * multiplier);
+ div_s64_rem(temp_multiple, multiplier, &frac);
+ rec_temp1 = roundup(((temp_multiple + frac) / 8), multiplier);
+ t->rec_min = div_s64(rec_temp1, multiplier);
+
+ /* recommended max */
+ temp_multiple = div_s64((mipi_max * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+ temp_multiple -= (low_clk_multiplier * multiplier);
+ div_s64_rem(temp_multiple, multiplier, &frac);
+ rec_temp2 = rounddown((temp_multiple / 8), multiplier);
+ t->rec_max = div_s64(rec_temp2, multiplier);
+
+ /* register value */
+ dividend = ((rec_temp2 - rec_temp1) * min_prepare_frac);
+ temp = roundup(div_u64(dividend, 100), multiplier);
+ t->rec = div_s64((temp + rec_temp1), multiplier);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_prepare\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ temp_multiple = div_s64(
+ (8 * (temp + rec_temp1) * clk_params->tlpx_numer_ns),
+ clk_params->bitclk_mbps);
+
+ *temp_mul = temp_multiple;
+ pr_debug("HS_PREP:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+ return rc;
+}
+
+/**
+ * calc_hs_zero - calculates zero timing params for data lanes in HS.
+ */
+static int calc_hs_zero(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ u64 temp_multiple)
+{
+ u32 const hs_zero_min_frac = 10;
+ u64 const multiplier = BIT(20);
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_zero;
+ s64 rec_temp1, rec_temp2, rec_temp3, mipi_min;
+ s64 rec_min;
+
+ mipi_min = div_s64((10 * clk_params->tlpx_numer_ns * multiplier),
+ clk_params->bitclk_mbps);
+ rec_temp1 = (145 * multiplier) + mipi_min - temp_multiple;
+ t->mipi_min = div_s64(rec_temp1, multiplier);
+
+ /* recommended min */
+ rec_temp1 = div_s64((rec_temp1 * clk_params->bitclk_mbps),
+ clk_params->tlpx_numer_ns);
+ rec_temp2 = rec_temp1 - (11 * multiplier);
+ rec_temp3 = roundup((rec_temp2 / 8), multiplier);
+ rec_min = rec_temp3 - (3 * multiplier);
+ t->rec_min = div_s64(rec_min, multiplier);
+ t->rec_max = ((t->rec_min > 255) ? 511 : 255);
+
+ t->rec = DIV_ROUND_UP(
+ (((t->rec_max - t->rec_min) * hs_zero_min_frac) +
+ (t->rec_min * 100)),
+ 100);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_zero\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("HS_ZERO:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_hs_trail - calculates trail timing params for data lanes in HS.
+ */
+static int calc_hs_trail(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc,
+ u64 teot_clk_lane)
+{
+ u32 const phy_timing_frac = 30;
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_trail;
+ s64 rec_temp1;
+
+ t->mipi_min = 60 +
+ mult_frac(clk_params->tlpx_numer_ns, 4,
+ clk_params->bitclk_mbps);
+
+ t->mipi_max = teot_clk_lane - clk_params->treot_ns;
+
+ t->rec_min = DIV_ROUND_UP(
+ ((t->mipi_min * clk_params->bitclk_mbps) +
+ (3 * clk_params->tlpx_numer_ns)),
+ (8 * clk_params->tlpx_numer_ns));
+
+ rec_temp1 = ((t->mipi_max * clk_params->bitclk_mbps) +
+ (3 * clk_params->tlpx_numer_ns));
+ t->rec_max = (rec_temp1 / (8 * clk_params->tlpx_numer_ns));
+ rec_temp1 = DIV_ROUND_UP(
+ ((t->rec_max - t->rec_min) * phy_timing_frac),
+ 100);
+ t->rec = rec_temp1 + t->rec_min;
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_trail\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("HS_TRAIL:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_hs_rqst - calculates rqst timing params for data lanes in HS.
+ */
+static int calc_hs_rqst(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_rqst;
+
+ t->rec = DIV_ROUND_UP(
+ ((t->mipi_min * clk_params->bitclk_mbps) -
+ (8 * clk_params->tlpx_numer_ns)),
+ (8 * clk_params->tlpx_numer_ns));
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_rqst, %d\n", t->rec);
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("HS_RQST:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_hs_exit - calculates exit timing params for data lanes in HS.
+ */
+static int calc_hs_exit(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ u32 const hs_exit_min_frac = 10;
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_exit;
+
+ t->rec_min = (DIV_ROUND_UP(
+ (t->mipi_min * clk_params->bitclk_mbps),
+ (8 * clk_params->tlpx_numer_ns)) - 1);
+
+ t->rec = DIV_ROUND_UP(
+ (((t->rec_max - t->rec_min) * hs_exit_min_frac) +
+ (t->rec_min * 100)),
+ 100);
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_exit\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("HS_EXIT:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+
+ return rc;
+}
+
+/**
+ * calc_hs_rqst_clk - calculates rqst timing params for clock lane..
+ */
+static int calc_hs_rqst_clk(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ int rc = 0;
+ struct timing_entry *t = &desc->hs_rqst_clk;
+
+ t->rec = DIV_ROUND_UP(
+ ((t->mipi_min * clk_params->bitclk_mbps) -
+ (8 * clk_params->tlpx_numer_ns)),
+ (8 * clk_params->tlpx_numer_ns));
+
+ if (t->rec & 0xffffff00) {
+ pr_err("Incorrect rec valuefor hs_rqst_clk\n");
+ rc = -EINVAL;
+ } else {
+ t->reg_value = t->rec;
+ }
+
+ pr_debug("HS_RQST_CLK:mipi_min=%d, mipi_max=%d, rec_min=%d, rec_max=%d, reg_val=%d\n",
+ t->mipi_min, t->mipi_max, t->rec_min, t->rec_max,
+ t->reg_value);
+
+ return rc;
+}
+
+/**
+ * dsi_phy_calc_timing_params - calculates timing paramets for a given bit clock
+ */
+static int dsi_phy_calc_timing_params(struct phy_clk_params *clk_params,
+ struct phy_timing_desc *desc)
+{
+ int rc = 0;
+ s32 actual_frac = 0;
+ s64 actual_intermediate = 0;
+ u64 temp_multiple;
+ s64 teot_clk_lane;
+
+ rc = calc_clk_prepare(clk_params, desc, &actual_frac,
+ &actual_intermediate);
+ if (rc) {
+ pr_err("clk_prepare calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_clk_zero(clk_params, desc, actual_frac, actual_intermediate);
+ if (rc) {
+ pr_err("clk_zero calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_clk_trail(clk_params, desc, &teot_clk_lane);
+ if (rc) {
+ pr_err("clk_trail calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_prepare(clk_params, desc, &temp_multiple);
+ if (rc) {
+ pr_err("hs_prepare calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_zero(clk_params, desc, temp_multiple);
+ if (rc) {
+ pr_err("hs_zero calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_trail(clk_params, desc, teot_clk_lane);
+ if (rc) {
+ pr_err("hs_trail calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_rqst(clk_params, desc);
+ if (rc) {
+ pr_err("hs_rqst calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_exit(clk_params, desc);
+ if (rc) {
+ pr_err("hs_exit calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+
+ rc = calc_hs_rqst_clk(clk_params, desc);
+ if (rc) {
+ pr_err("hs_rqst_clk calculations failed, rc=%d\n", rc);
+ goto error;
+ }
+error:
+ return rc;
+}
+
+/**
+ * calculate_timing_params() - calculates timing parameters.
+ * @phy: Pointer to DSI PHY hardware object.
+ * @mode: Mode information for which timing has to be calculated.
+ * @config: DSI host configuration for this mode.
+ * @timing: Timing parameters for each lane which will be returned.
+ */
+int dsi_phy_hw_v4_0_calculate_timing_params(struct dsi_phy_hw *phy,
+ struct dsi_mode_info *mode,
+ struct dsi_host_common_cfg *host,
+ struct dsi_phy_per_lane_cfgs *timing)
+{
+ /* constants */
+ u32 const esc_clk_mhz = 192; /* TODO: esc clock is hardcoded */
+ u32 const esc_clk_mmss_cc_prediv = 10;
+ u32 const tlpx_numer = 1000;
+ u32 const tr_eot = 20;
+ u32 const clk_prepare_spec_min = 38;
+ u32 const clk_prepare_spec_max = 95;
+ u32 const clk_trail_spec_min = 60;
+ u32 const hs_exit_spec_min = 100;
+ u32 const hs_exit_reco_max = 255;
+ u32 const hs_rqst_spec_min = 50;
+
+ /* local vars */
+ int rc = 0;
+ int i;
+ u32 h_total, v_total;
+ u64 inter_num;
+ u32 num_of_lanes = 0;
+ u32 bpp;
+ u64 x, y;
+ struct phy_timing_desc desc;
+ struct phy_clk_params clk_params = {0};
+
+ memset(&desc, 0x0, sizeof(desc));
+ h_total = DSI_H_TOTAL(mode);
+ v_total = DSI_V_TOTAL(mode);
+
+ bpp = bits_per_pixel[host->dst_format];
+
+ inter_num = bpp * mode->refresh_rate;
+
+ if (host->data_lanes & DSI_DATA_LANE_0)
+ num_of_lanes++;
+ if (host->data_lanes & DSI_DATA_LANE_1)
+ num_of_lanes++;
+ if (host->data_lanes & DSI_DATA_LANE_2)
+ num_of_lanes++;
+ if (host->data_lanes & DSI_DATA_LANE_3)
+ num_of_lanes++;
+
+
+ x = mult_frac(v_total * h_total, inter_num, num_of_lanes);
+ y = rounddown(x, 1);
+
+ clk_params.bitclk_mbps = rounddown(mult_frac(y, 1, 1000000), 1);
+ clk_params.escclk_numer = esc_clk_mhz;
+ clk_params.escclk_denom = esc_clk_mmss_cc_prediv;
+ clk_params.tlpx_numer_ns = tlpx_numer;
+ clk_params.treot_ns = tr_eot;
+
+
+ /* Setup default parameters */
+ desc.clk_prepare.mipi_min = clk_prepare_spec_min;
+ desc.clk_prepare.mipi_max = clk_prepare_spec_max;
+ desc.clk_trail.mipi_min = clk_trail_spec_min;
+ desc.hs_exit.mipi_min = hs_exit_spec_min;
+ desc.hs_exit.rec_max = hs_exit_reco_max;
+
+ desc.clk_prepare.rec_min = DIV_ROUND_UP(
+ (desc.clk_prepare.mipi_min * clk_params.bitclk_mbps),
+ (8 * clk_params.tlpx_numer_ns)
+ );
+
+ desc.clk_prepare.rec_max = rounddown(
+ mult_frac((desc.clk_prepare.mipi_max * clk_params.bitclk_mbps),
+ 1, (8 * clk_params.tlpx_numer_ns)),
+ 1);
+
+ desc.hs_rqst.mipi_min = hs_rqst_spec_min;
+ desc.hs_rqst_clk.mipi_min = hs_rqst_spec_min;
+
+ pr_debug("BIT CLOCK = %d, tlpx_numer_ns=%d, treot_ns=%d\n",
+ clk_params.bitclk_mbps, clk_params.tlpx_numer_ns,
+ clk_params.treot_ns);
+ rc = dsi_phy_calc_timing_params(&clk_params, &desc);
+ if (rc) {
+ pr_err("Timing calc failed, rc=%d\n", rc);
+ goto error;
+ }
+
+
+ for (i = DSI_LOGICAL_LANE_0; i < DSI_LANE_MAX; i++) {
+ timing->lane[i][0] = desc.hs_exit.reg_value;
+
+ if (i == DSI_LOGICAL_CLOCK_LANE)
+ timing->lane[i][1] = desc.clk_zero.reg_value;
+ else
+ timing->lane[i][1] = desc.hs_zero.reg_value;
+
+ if (i == DSI_LOGICAL_CLOCK_LANE)
+ timing->lane[i][2] = desc.clk_prepare.reg_value;
+ else
+ timing->lane[i][2] = desc.hs_prepare.reg_value;
+
+ if (i == DSI_LOGICAL_CLOCK_LANE)
+ timing->lane[i][3] = desc.clk_trail.reg_value;
+ else
+ timing->lane[i][3] = desc.hs_trail.reg_value;
+
+ if (i == DSI_LOGICAL_CLOCK_LANE)
+ timing->lane[i][4] = desc.hs_rqst_clk.reg_value;
+ else
+ timing->lane[i][4] = desc.hs_rqst.reg_value;
+
+ timing->lane[i][5] = 0x3;
+ timing->lane[i][6] = 0x4;
+ timing->lane[i][7] = 0xA0;
+ pr_debug("[%d][%d %d %d %d %d]\n", i, timing->lane[i][0],
+ timing->lane[i][1],
+ timing->lane[i][2],
+ timing->lane[i][3],
+ timing->lane[i][4]);
+ }
+ timing->count_per_lane = 8;
+
+error:
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index 6edcd6f57e70..f9bed1058f38 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015, 2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -193,6 +193,9 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
struct drm_bridge *ext_bridge;
int ret, i;
+ if (!msm_dsi)
+ return -EINVAL;
+
if (WARN_ON(!encoders[MSM_DSI_VIDEO_ENCODER_ID] ||
!encoders[MSM_DSI_CMD_ENCODER_ID]))
return -EINVAL;
@@ -246,19 +249,17 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
return 0;
fail:
- if (msm_dsi) {
- /* bridge/connector are normally destroyed by drm: */
- if (msm_dsi->bridge) {
- msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
- msm_dsi->bridge = NULL;
- }
+ /* bridge/connector are normally destroyed by drm: */
+ if (msm_dsi->bridge) {
+ msm_dsi_manager_bridge_destroy(msm_dsi->bridge);
+ msm_dsi->bridge = NULL;
+ }
- /* don't destroy connector if we didn't make it */
- if (msm_dsi->connector && !msm_dsi->external_bridge)
- msm_dsi->connector->funcs->destroy(msm_dsi->connector);
+ /* don't destroy connector if we didn't make it */
+ if (msm_dsi->connector && !msm_dsi->external_bridge)
+ msm_dsi->connector->funcs->destroy(msm_dsi->connector);
- msm_dsi->connector = NULL;
- }
+ msm_dsi->connector = NULL;
return ret;
}
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 5f5a3732cdf6..4cb4764e7492 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -89,7 +89,7 @@ int msm_dsi_manager_phy_enable(int id,
u32 *clk_pre, u32 *clk_post);
void msm_dsi_manager_phy_disable(int id);
int msm_dsi_manager_cmd_xfer(int id, const struct mipi_dsi_msg *msg);
-bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len);
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u64 iova, u32 len);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
@@ -143,7 +143,7 @@ int msm_dsi_host_cmd_tx(struct mipi_dsi_host *host,
int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg);
void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
- u32 iova, u32 len);
+ u64 iova, u32 len);
int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index b2b5f3dd1b4c..4958594d5266 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 12ddbbb53107..bb9df8556c0e 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -597,7 +597,7 @@ static inline enum dsi_cmd_dst_format dsi_get_cmd_fmt(
switch (mipi_fmt) {
case MIPI_DSI_FMT_RGB888: return CMD_DST_FORMAT_RGB888;
case MIPI_DSI_FMT_RGB666_PACKED:
- case MIPI_DSI_FMT_RGB666: return VID_DST_FORMAT_RGB666;
+ case MIPI_DSI_FMT_RGB666: return CMD_DST_FORMAT_RGB666;
case MIPI_DSI_FMT_RGB565: return CMD_DST_FORMAT_RGB565;
default: return CMD_DST_FORMAT_RGB888;
}
@@ -838,24 +838,21 @@ static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
{
struct drm_device *dev = msm_host->dev;
int ret;
- u32 iova;
+ u64 iova;
- mutex_lock(&dev->struct_mutex);
msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
if (IS_ERR(msm_host->tx_gem_obj)) {
ret = PTR_ERR(msm_host->tx_gem_obj);
pr_err("%s: failed to allocate gem, %d\n", __func__, ret);
msm_host->tx_gem_obj = NULL;
- mutex_unlock(&dev->struct_mutex);
return ret;
}
- ret = msm_gem_get_iova_locked(msm_host->tx_gem_obj, 0, &iova);
+ ret = msm_gem_get_iova(msm_host->tx_gem_obj, NULL, &iova);
if (ret) {
pr_err("%s: failed to get iova, %d\n", __func__, ret);
return ret;
}
- mutex_unlock(&dev->struct_mutex);
if (iova & 0x07) {
pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
@@ -903,7 +900,7 @@ static int dsi_cmd_dma_add(struct drm_gem_object *tx_gem,
data = msm_gem_vaddr(tx_gem);
- if (IS_ERR(data)) {
+ if (IS_ERR_OR_NULL(data)) {
ret = PTR_ERR(data);
pr_err("%s: get vaddr failed, %d\n", __func__, ret);
return ret;
@@ -976,7 +973,7 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
{
int ret;
- u32 iova;
+ uint64_t iova;
bool triggered;
ret = msm_gem_get_iova(msm_host->tx_gem_obj, 0, &iova);
@@ -1011,7 +1008,7 @@ static int dsi_cmd_dma_rx(struct msm_dsi_host *msm_host,
u32 *lp, *temp, data;
int i, j = 0, cnt;
u32 read_cnt;
- u8 reg[16];
+ u8 reg[16] = {0};
int repeated_bytes = 0;
int buf_offset = buf - msm_host->rx_buf;
@@ -1752,11 +1749,12 @@ int msm_dsi_host_cmd_rx(struct mipi_dsi_host *host,
return ret;
}
-void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u32 iova, u32 len)
+void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host, u64 iova, u32 len)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- dsi_write(msm_host, REG_DSI_DMA_BASE, iova);
+ /* FIXME: Verify that the iova < 32 bits? */
+ dsi_write(msm_host, REG_DSI_DMA_BASE, lower_32_bits(iova));
dsi_write(msm_host, REG_DSI_DMA_LEN, len);
dsi_write(msm_host, REG_DSI_TRIG_DMA, 1);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index 34220df1265f..94e421c82356 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2015,2017-2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -623,16 +623,26 @@ fail:
struct drm_connector *msm_dsi_manager_ext_bridge_init(u8 id)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
- struct drm_device *dev = msm_dsi->dev;
+ struct drm_device *dev;
struct drm_encoder *encoder;
struct drm_bridge *int_bridge, *ext_bridge;
struct drm_connector *connector;
struct list_head *connector_list;
+ if (!msm_dsi)
+ return ERR_PTR(-EINVAL);
+
+ dev = msm_dsi->dev;
+
int_bridge = msm_dsi->bridge;
ext_bridge = msm_dsi->external_bridge =
msm_dsi_host_get_bridge(msm_dsi->host);
+ if (!int_bridge || !ext_bridge) {
+ pr_err("%s: failed to get bridge info\n", __func__);
+ return ERR_PTR(-EINVAL);
+ }
+
/*
* HACK: we may not know the external DSI bridge device's mode
* flags here. We'll get to know them only when the device
@@ -779,7 +789,7 @@ restore_host0:
return ret;
}
-bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 iova, u32 len)
+bool msm_dsi_manager_cmd_xfer_trigger(int id, u64 iova, u32 len)
{
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct msm_dsi *msm_dsi0 = dsi_mgr_get_dsi(DSI_0);
@@ -802,7 +812,7 @@ int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
int id = msm_dsi->id;
int ret;
- if (id > DSI_MAX) {
+ if (id >= DSI_MAX) {
pr_err("%s: invalid id %d\n", __func__, id);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 80ec65e47468..2d999494cdea 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 7d7662e69e11..506434fac993 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/edp/edp.c b/drivers/gpu/drm/msm/edp/edp.c
index 0940e84b2821..2c9d11638f29 100644
--- a/drivers/gpu/drm/msm/edp/edp.c
+++ b/drivers/gpu/drm/msm/edp/edp.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015,2018 The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -54,7 +54,7 @@ static struct msm_edp *edp_init(struct platform_device *pdev)
ret = -ENOMEM;
goto fail;
}
- DBG("eDP probed=%p", edp);
+ DBG("eDP probed=%pK", edp);
edp->pdev = pdev;
platform_set_drvdata(pdev, edp);
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index 90bf5ed46746..f1072c18c81e 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/ekms/edrm_connector.c b/drivers/gpu/drm/msm/ekms/edrm_connector.c
new file mode 100644
index 000000000000..1a9a930a3974
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_connector.c
@@ -0,0 +1,127 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "edrm_connector.h"
+
+struct edrm_connector {
+ struct drm_connector base;
+ struct drm_encoder *encoder;
+ struct msm_edrm_display *display;
+};
+
+#define to_edrm_connector(x) container_of(x, struct edrm_connector, base)
+
+static enum drm_connector_status
+edrm_connector_detect(struct drm_connector *conn, bool force)
+{
+ return connector_status_connected;
+}
+
+static int
+edrm_connector_get_modes(struct drm_connector *connector)
+{
+ struct edrm_connector *edrm_conn = to_edrm_connector(connector);
+ struct drm_display_mode *m;
+
+ m = drm_mode_duplicate(connector->dev, &edrm_conn->display->mode);
+ if (m == NULL) {
+ pr_err("edrm drm_mode_duplicate failed\n");
+ return 0;
+ }
+ drm_mode_set_name(m);
+ drm_mode_probed_add(connector, m);
+
+ return 1;
+}
+
+static enum drm_mode_status
+edrm_mode_valid(struct drm_connector *connector, struct drm_display_mode *mode)
+{
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+edrm_connector_best_encoder(struct drm_connector *connector)
+{
+ struct edrm_connector *edrm_conn = to_edrm_connector(connector);
+
+ return edrm_conn->encoder;
+}
+
+void edrm_connector_destroy(struct drm_connector *connector)
+{
+ struct edrm_connector *edrm_conn = to_edrm_connector(connector);
+
+ drm_connector_unregister(connector);
+ drm_connector_cleanup(connector);
+ kfree(edrm_conn);
+}
+
+static const struct drm_connector_helper_funcs edrm_connector_helper_funcs = {
+ .get_modes = edrm_connector_get_modes,
+ .mode_valid = edrm_mode_valid,
+ .best_encoder = edrm_connector_best_encoder,
+};
+
+static const struct drm_connector_funcs edrm_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = edrm_connector_detect,
+ .destroy = edrm_connector_destroy,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+struct drm_connector *edrm_connector_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ struct msm_edrm_display *display)
+{
+ struct edrm_connector *edrm_conn;
+ struct drm_connector *connector;
+ int ret;
+
+ edrm_conn = kzalloc(sizeof(*edrm_conn), GFP_KERNEL);
+ if (!edrm_conn)
+ return ERR_PTR(-ENOMEM);
+ connector = &edrm_conn->base;
+
+ ret = drm_connector_init(dev, connector,
+ &edrm_connector_funcs,
+ display->connector_type);
+ if (ret) {
+ pr_err("edrm drm_connector_init failed\n");
+ goto fail;
+ }
+
+ drm_connector_helper_add(connector, &edrm_connector_helper_funcs);
+
+ edrm_conn->display = display;
+ edrm_conn->encoder = encoder;
+
+ ret = drm_connector_register(&edrm_conn->base);
+ if (ret) {
+ pr_err("failed to register drm connector, %d\n", ret);
+ goto fail;
+ }
+
+ ret = drm_mode_connector_attach_encoder(&edrm_conn->base, encoder);
+ if (ret) {
+ pr_err("failed to attach encoder to connector, %d\n", ret);
+ goto fail;
+ }
+
+ return connector;
+fail:
+ kfree(edrm_conn);
+ return ERR_PTR(ret);
+
+}
diff --git a/drivers/gpu/drm/msm/ekms/edrm_connector.h b/drivers/gpu/drm/msm/ekms/edrm_connector.h
new file mode 100644
index 000000000000..4bd6deb7b6d0
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_connector.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _EDRM_CONNECTOR_H_
+#define _EDRM_CONNECTOR_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "edrm_kms.h"
+
+struct drm_connector *edrm_connector_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ struct msm_edrm_display *display);
+
+void edrm_connector_destroy(struct drm_connector *connector);
+
+#endif /* _EDRM_CONNECTOR_H_ */
diff --git a/drivers/gpu/drm/msm/ekms/edrm_crtc.c b/drivers/gpu/drm/msm/ekms/edrm_crtc.c
new file mode 100644
index 000000000000..cb72aba6d0aa
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_crtc.c
@@ -0,0 +1,270 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "edrm_crtc.h"
+#include "edrm_plane.h"
+#include "edrm_encoder.h"
+#include "sde_kms.h"
+
+/* display control path Flush register offset */
+#define FLUSH_OFFSET 0x18
+#define SSPP_SRC_FORMAT 0x30
+#define SSPP_SRC_UNPACK_PATTERN 0x34
+#define SSPP_SRC_OP_MODE 0x38
+#define SSPP_CONSTANT_COLOR 0x3c
+#define LAYER_BLEND5_OP 0x260
+#define FLUST_CTL_BIT 17
+#define LAYER_OP_ENABLE_ALPHA_BLEND 0x600
+
+static void edrm_crtc_plane_attach(struct drm_crtc *crtc,
+ struct drm_plane *plane)
+{
+ struct drm_device *dev = crtc->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_edrm_kms *edrm_kms = to_edrm_kms(kms);
+ struct msm_drm_private *master_priv =
+ edrm_kms->master_dev->dev_private;
+ struct sde_kms *master_kms = to_sde_kms(master_priv->kms);
+ u32 layer_val, ctl_off, lm_idx;
+ struct edrm_plane *edrm_plane = to_edrm_plane(plane);
+ struct edrm_crtc *edrm_crtc = to_edrm_crtc(crtc);
+ struct msm_edrm_display *display;
+
+ display = &edrm_kms->display[edrm_crtc->display_id];
+ ctl_off = display->ctl_off;
+ lm_idx = (display->ctl_id - 1) * 0x4;
+
+ layer_val = readl_relaxed(master_kms->mmio + ctl_off + lm_idx);
+ switch (edrm_plane->sspp_cfg_id) {
+ case 1: /* vig 0 */
+ layer_val |= edrm_plane->lm_stage + 2;
+ break;
+ case 2: /* vig 1 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 3;
+ break;
+ case 3: /* vig 2 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 6;
+ break;
+ case 4: /* vig 3 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 26;
+ break;
+ case 5: /* rgb 0 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 9;
+ break;
+ case 6: /* rgb 1 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 12;
+ break;
+ case 7: /* rgb 2 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 15;
+ break;
+ case 8: /* rgb 3 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 29;
+ break;
+ case 9: /* dma 0 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 18;
+ break;
+ case 10: /* dma 1 */
+ layer_val |= (edrm_plane->lm_stage + 2) << 21;
+ break;
+ }
+ writel_relaxed(layer_val, master_kms->mmio + ctl_off + lm_idx);
+ plane->crtc = crtc;
+}
+
+void edrm_crtc_postinit(struct drm_crtc *crtc)
+{
+ struct drm_device *dev = crtc->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_edrm_kms *edrm_kms;
+ struct sde_kms *master_kms;
+ struct msm_drm_private *master_priv;
+ struct msm_edrm_display *display;
+ struct edrm_crtc *edrm_crtc;
+ struct edrm_plane *edrm_plane;
+ u32 lm_off, flush_val;
+ const struct drm_plane_helper_funcs *funcs;
+ u32 sspp_flush_mask_bit[10] = {
+ 0, 1, 2, 18, 3, 4, 5, 19, 11, 12};
+
+ edrm_kms = to_edrm_kms(kms);
+ master_priv = edrm_kms->master_dev->dev_private;
+ master_kms = to_sde_kms(master_priv->kms);
+ edrm_plane = to_edrm_plane(crtc->primary);
+ edrm_crtc = to_edrm_crtc(crtc);
+ funcs = crtc->primary->helper_private;
+ funcs->atomic_disable(crtc->primary, crtc->primary->state);
+ display = &edrm_kms->display[edrm_crtc->display_id];
+ lm_off = display->lm_off;
+
+ edrm_crtc_plane_attach(crtc, crtc->primary);
+
+ /* Update CTL bit, layer mixer flush bit and sspp flush bit */
+ flush_val = BIT(FLUST_CTL_BIT);
+ flush_val |= BIT(display->ctl_id + 5);
+ flush_val |= BIT(sspp_flush_mask_bit[edrm_plane->sspp_cfg_id - 1]);
+
+ /* setup alpha blending for mixer stage 5 */
+ writel_relaxed(LAYER_OP_ENABLE_ALPHA_BLEND, master_kms->mmio + lm_off +
+ LAYER_BLEND5_OP);
+ edrm_crtc->sspp_flush_mask |= flush_val;
+
+ edrm_crtc_commit_kickoff(crtc);
+}
+
+static void edrm_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct drm_plane *plane = NULL;
+
+ if (!crtc) {
+ pr_err("invalid crtc\n");
+ return;
+ }
+
+ /* TODO: wait for acquire fences before anything else is done */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ /* update SSPP bit in sspp_flush_mask */
+ edrm_plane_flush(plane);
+ }
+}
+
+static void edrm_crtc_enable(struct drm_crtc *crtc)
+{
+ crtc->state->enable = true;
+}
+
+static void edrm_crtc_disable(struct drm_crtc *crtc)
+{
+ struct edrm_plane *edrm_plane;
+ struct edrm_crtc *edrm_crtc = to_edrm_crtc(crtc);
+ const struct drm_plane_helper_funcs *funcs;
+ u32 sspp_flush_mask_bit[10] = {
+ 0, 1, 2, 18, 3, 4, 5, 19, 11, 12};
+ struct drm_encoder *encoder;
+
+ edrm_plane = to_edrm_plane(crtc->primary);
+ funcs = crtc->primary->helper_private;
+ funcs->atomic_disable(crtc->primary, crtc->primary->state);
+
+ edrm_crtc->sspp_flush_mask |=
+ BIT(sspp_flush_mask_bit[edrm_plane->sspp_cfg_id - 1]);
+ edrm_crtc_commit_kickoff(crtc);
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ edrm_encoder_wait_for_commit_done(encoder);
+ }
+}
+
+void edrm_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct edrm_crtc *edrm_crtc = to_edrm_crtc(crtc);
+
+ drm_crtc_cleanup(crtc);
+ kfree(edrm_crtc);
+}
+
+static const struct drm_crtc_funcs edrm_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = edrm_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+};
+
+static const struct drm_crtc_helper_funcs edrm_crtc_helper_funcs = {
+ .disable = edrm_crtc_disable,
+ .enable = edrm_crtc_enable,
+ .atomic_flush = edrm_crtc_atomic_flush,
+};
+
+struct drm_crtc *edrm_crtc_init(struct drm_device *dev,
+ struct msm_edrm_display *display,
+ struct drm_plane *primary_plane)
+{
+ struct edrm_crtc *edrm_crtc;
+ struct drm_crtc *crtc;
+ int ret;
+
+ edrm_crtc = kzalloc(sizeof(*edrm_crtc), GFP_KERNEL);
+ if (!edrm_crtc) {
+ ret = -ENOMEM;
+ goto fail_no_mem;
+ }
+
+ crtc = &edrm_crtc->base;
+ ret = drm_crtc_init_with_planes(dev, crtc, primary_plane, NULL,
+ &edrm_crtc_funcs);
+ if (ret)
+ goto fail;
+
+ drm_crtc_helper_add(crtc, &edrm_crtc_helper_funcs);
+ edrm_crtc->display_id = display->display_id;
+
+ return crtc;
+fail:
+ kfree(edrm_crtc);
+fail_no_mem:
+ return ERR_PTR(ret);
+}
+
+void edrm_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ struct msm_edrm_kms *edrm_kms;
+ struct msm_edrm_display *display;
+ struct edrm_crtc *edrm_crtc;
+ struct sde_kms *master_kms;
+ struct msm_drm_private *master_priv;
+ u32 ctl_off;
+
+ dev = crtc->dev;
+ priv = dev->dev_private;
+ edrm_kms = to_edrm_kms(priv->kms);
+ master_priv = edrm_kms->master_dev->dev_private;
+ master_kms = to_sde_kms(master_priv->kms);
+ edrm_crtc = to_edrm_crtc(crtc);
+
+ display = &edrm_kms->display[edrm_crtc->display_id];
+ ctl_off = display->ctl_off;
+
+ /* Trigger the flush */
+ writel_relaxed(edrm_crtc->sspp_flush_mask, master_kms->mmio + ctl_off +
+ FLUSH_OFFSET);
+}
+
+void edrm_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ struct drm_encoder *encoder;
+
+ dev = crtc->dev;
+ priv = dev->dev_private;
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ edrm_encoder_wait_for_commit_done(encoder);
+ }
+}
+
+void edrm_crtc_prepare_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+}
diff --git a/drivers/gpu/drm/msm/ekms/edrm_crtc.h b/drivers/gpu/drm/msm/ekms/edrm_crtc.h
new file mode 100644
index 000000000000..761a6a97e5b2
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_crtc.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _EDRM_CRTC_H_
+#define _EDRM_CRTC_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "edrm_kms.h"
+
+struct edrm_crtc {
+ struct drm_crtc base;
+ u32 sspp_flush_mask;
+ int display_id;
+};
+
+#define to_edrm_crtc(x) container_of(x, struct edrm_crtc, base)
+
+struct drm_crtc *edrm_crtc_init(struct drm_device *dev,
+ struct msm_edrm_display *display, struct drm_plane *primary_plane);
+
+/**
+ * Helper function to setup the control path
+ * @crtc: Pointer to drm crtc object
+ */
+void edrm_crtc_postinit(struct drm_crtc *crtc);
+
+/**
+ * edrm_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void edrm_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * edrm_crtc_complete_commit - callback to prepare for output fences
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void edrm_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state);
+
+void edrm_crtc_prepare_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state);
+
+/**
+ * edrm_crtc_destroy - free up edrm_crtc structure
+ * @crtc: Pointer to drm crtc object
+ */
+void edrm_crtc_destroy(struct drm_crtc *crtc);
+
+#endif /* _EDRM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/msm/ekms/edrm_drv.c b/drivers/gpu/drm/msm/ekms/edrm_drv.c
new file mode 100644
index 000000000000..69b8c01e59d4
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_drv.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/of_address.h>
+#include <linux/sde_io_util.h>
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "edrm_kms.h"
+
+static int msm_edrm_unload(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ int i;
+
+ /* clean up display commit worker threads */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ if (priv->disp_thread[i].thread) {
+ flush_kthread_worker(&priv->disp_thread[i].worker);
+ kthread_stop(priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+ }
+
+ drm_kms_helper_poll_fini(dev);
+ drm_mode_config_cleanup(dev);
+
+ flush_workqueue(priv->wq);
+ destroy_workqueue(priv->wq);
+
+ if (kms)
+ kms->funcs->destroy(kms);
+
+ dev->dev_private = NULL;
+
+ kfree(priv);
+
+ return 0;
+}
+
+static int msm_edrm_load(struct drm_device *dev, unsigned long flags)
+{
+ struct platform_device *pdev = dev->platformdev;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+ struct drm_device *master_dev;
+ struct msm_drm_private *master_priv;
+ struct drm_minor *minor;
+ int ret, i;
+ struct sched_param param;
+
+ /* main DRM's minor ID is zero */
+ minor = drm_minor_acquire(0);
+ if (IS_ERR(minor)) {
+ pr_err("master drm_minor has no dev, stop early drm loading\n");
+ return -ENODEV;
+ }
+ master_dev = minor->dev;
+ drm_minor_release(minor);
+ master_priv = master_dev->dev_private;
+
+ priv = kzalloc(sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev->dev_private = priv;
+
+ priv->wq = alloc_ordered_workqueue("msm_edrm", 0);
+ init_waitqueue_head(&priv->fence_event);
+ init_waitqueue_head(&priv->pending_crtcs_event);
+ INIT_LIST_HEAD(&priv->client_event_list);
+ INIT_LIST_HEAD(&priv->inactive_list);
+ INIT_LIST_HEAD(&priv->fence_cbs);
+ hash_init(priv->mn_hash);
+ mutex_init(&priv->mn_lock);
+
+ drm_mode_config_init(dev);
+
+ platform_set_drvdata(pdev, dev);
+ priv->pclient = master_priv->pclient;
+ memcpy((void *)&priv->phandle.mp, (void *) &master_priv->phandle.mp,
+ sizeof(struct dss_module_power));
+ INIT_LIST_HEAD(&priv->phandle.power_client_clist);
+ mutex_init(&priv->phandle.phandle_lock);
+
+ priv->vram.size = 0;
+ kms = msm_edrm_kms_init(dev);
+ if (IS_ERR(kms)) {
+ priv->kms = NULL;
+ dev_err(dev->dev, "failed to load kms\n");
+ ret = PTR_ERR(kms);
+ goto fail;
+ }
+
+ priv->kms = kms;
+ if (kms && kms->funcs && kms->funcs->hw_init) {
+ ret = kms->funcs->hw_init(kms);
+ if (ret) {
+ dev_err(dev->dev, "kms hw init failed: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ /**
+ * this priority was found during empiric testing to have appropriate
+ * realtime scheduling to process display updates and interact with
+ * other real time and normal priority task
+ */
+ param.sched_priority = 16;
+ /* initialize commit thread structure */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ init_kthread_worker(&priv->disp_thread[i].worker);
+ priv->disp_thread[i].dev = dev;
+ priv->disp_thread[i].thread =
+ kthread_run(kthread_worker_fn,
+ &priv->disp_thread[i].worker,
+ "crtc_commit:%d",
+ priv->disp_thread[i].crtc_id);
+ ret = sched_setscheduler(priv->disp_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ pr_warn("display thread priority update failed: %d\n",
+ ret);
+
+ if (IS_ERR(priv->disp_thread[i].thread)) {
+ dev_err(dev->dev, "failed to create kthread\n");
+ priv->disp_thread[i].thread = NULL;
+ /* clean up previously created threads if any */
+ for (i -= 1; i >= 0; i--) {
+ kthread_stop(priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+ goto fail;
+ }
+ }
+
+ /* share same function from master drm */
+ dev->mode_config.funcs = master_dev->mode_config.funcs;
+
+ ret = drm_vblank_init(dev, priv->num_crtcs);
+ if (ret < 0) {
+ dev_err(dev->dev, "failed to initialize vblank\n");
+ goto fail;
+ }
+
+ drm_mode_config_reset(dev);
+ /* perform subdriver post initialization */
+ if (kms && kms->funcs && kms->funcs->postinit) {
+ ret = kms->funcs->postinit(kms);
+ if (ret) {
+ dev_err(dev->dev, "kms post init failed: %d\n", ret);
+ goto fail;
+ }
+ }
+
+ drm_kms_helper_poll_init(dev);
+ return 0;
+
+fail:
+ msm_edrm_unload(dev);
+ return ret;
+}
+
+static int msm_edrm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_file_private *ctx = NULL;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+
+ if (!dev || !dev->dev_private)
+ return -ENODEV;
+ priv = dev->dev_private;
+
+ file->driver_priv = ctx;
+ kms = priv->kms;
+
+ if (kms) {
+ struct msm_edrm_kms *edrm_kms;
+
+ edrm_kms = to_edrm_kms(kms);
+ /* return failure if eDRM already handoff display resource
+ * to main DRM
+ */
+ if (edrm_kms->handoff_flag)
+ return -ENODEV;
+ }
+
+ if (kms && kms->funcs && kms->funcs->postopen)
+ kms->funcs->postopen(kms, file);
+
+ return 0;
+}
+
+static void msm_preclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ if (kms && kms->funcs && kms->funcs->preclose)
+ kms->funcs->preclose(kms, file);
+}
+
+static void msm_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_kms *kms = priv->kms;
+
+ if (kms && kms->funcs && kms->funcs->postclose)
+ kms->funcs->postclose(kms, file);
+
+ if (!ctx)
+ return;
+
+ kfree(ctx);
+}
+
+static void msm_lastclose(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ struct msm_kms *kms = priv->kms;
+
+ /* wait for pending vblank requests to be executed by worker thread */
+ flush_workqueue(priv->wq);
+
+ if (kms && kms->funcs && kms->funcs->lastclose)
+ kms->funcs->lastclose(kms);
+}
+
+static int msm_edrm_enable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+ return 0;
+}
+
+static void msm_edrm_disable_vblank(struct drm_device *dev, unsigned int pipe)
+{
+}
+
+
+static const struct vm_operations_struct vm_ops = {
+ .fault = msm_gem_fault,
+ .open = drm_gem_vm_open,
+ .close = drm_gem_vm_close,
+};
+
+static const struct file_operations fops = {
+ .owner = THIS_MODULE,
+ .open = drm_open,
+ .release = drm_release,
+ .unlocked_ioctl = drm_ioctl,
+#ifdef CONFIG_COMPAT
+ .compat_ioctl = drm_compat_ioctl,
+#endif
+ .poll = drm_poll,
+ .read = drm_read,
+ .llseek = no_llseek,
+ .mmap = msm_gem_mmap,
+};
+
+static struct drm_driver msm_edrm_driver = {
+ .driver_features = DRIVER_HAVE_IRQ |
+ DRIVER_GEM |
+ DRIVER_PRIME |
+ DRIVER_RENDER |
+ DRIVER_ATOMIC |
+ DRIVER_MODESET,
+ .load = msm_edrm_load,
+ .unload = msm_edrm_unload,
+ .open = msm_edrm_open,
+ .preclose = msm_preclose,
+ .postclose = msm_postclose,
+ .lastclose = msm_lastclose,
+ .set_busid = drm_platform_set_busid,
+ .get_vblank_counter = drm_vblank_no_hw_counter,
+ .enable_vblank = msm_edrm_enable_vblank,
+ .disable_vblank = msm_edrm_disable_vblank,
+ .gem_free_object = msm_gem_free_object,
+ .gem_vm_ops = &vm_ops,
+ .dumb_create = msm_gem_dumb_create,
+ .dumb_map_offset = msm_gem_dumb_map_offset,
+ .dumb_destroy = drm_gem_dumb_destroy,
+ .prime_handle_to_fd = drm_gem_prime_handle_to_fd,
+ .prime_fd_to_handle = drm_gem_prime_fd_to_handle,
+ .gem_prime_export = drm_gem_prime_export,
+ .gem_prime_import = drm_gem_prime_import,
+ .gem_prime_res_obj = msm_gem_prime_res_obj,
+ .gem_prime_pin = msm_gem_prime_pin,
+ .gem_prime_unpin = msm_gem_prime_unpin,
+ .gem_prime_get_sg_table = msm_gem_prime_get_sg_table,
+ .gem_prime_import_sg_table = msm_gem_prime_import_sg_table,
+ .gem_prime_vmap = msm_gem_prime_vmap,
+ .gem_prime_vunmap = msm_gem_prime_vunmap,
+ .gem_prime_mmap = msm_gem_prime_mmap,
+
+ .ioctls = NULL,
+ .num_ioctls = 0,
+ .fops = &fops,
+ .name = "msm",
+ .desc = "MSM Snapdragon DRM",
+ .date = "20181024",
+ .major = 1,
+ .minor = 1,
+};
+
+static int msm_pdev_edrm_probe(struct platform_device *pdev)
+{
+ int ret;
+ struct drm_minor *minor;
+ struct drm_device *master_dev;
+ struct msm_drm_private *master_priv;
+ struct msm_kms *master_kms;
+
+ /* main DRM's minor ID is zero */
+ minor = drm_minor_acquire(0);
+ if (IS_ERR(minor)) {
+ pr_err("drm_minor has no dev, defer the probe\n");
+ return -EPROBE_DEFER;
+ }
+ master_dev = minor->dev;
+ drm_minor_release(minor);
+ if (!master_dev) {
+ pr_err("master_dev is null, defer the probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ master_priv = master_dev->dev_private;
+ if (!master_priv) {
+ pr_err("master_priv is null, defer the probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ master_kms = master_priv->kms;
+ if (!master_kms) {
+ pr_err("master KMS is null, defer the probe\n");
+ return -EPROBE_DEFER;
+ }
+
+ /* on all devices that I am aware of, iommu's which cna map
+ * any address the cpu can see are used:
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
+ if (ret) {
+ pr_err("dma_set_mask_and_coherent return %d\n", ret);
+ return ret;
+ }
+
+ ret = drm_platform_init(&msm_edrm_driver,
+ to_platform_device(&pdev->dev));
+ if (ret)
+ DRM_ERROR("drm_platform_init failed: %d\n", ret);
+
+ return ret;
+}
+
+static int msm_pdev_edrm_remove(struct platform_device *pdev)
+{
+ drm_put_dev(platform_get_drvdata(to_platform_device(&pdev->dev)));
+ return 0;
+}
+
+static const struct platform_device_id msm_edrm_id[] = {
+ { "edrm_mdp", 0 },
+ { }
+};
+
+static void msm_edrm_lastclose(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ if (kms && kms->funcs && kms->funcs->lastclose)
+ kms->funcs->lastclose(kms);
+}
+
+static void msm_pdev_edrm_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *ddev = platform_get_drvdata(pdev);
+ struct msm_drm_private *priv = NULL;
+
+ priv = ddev->dev_private;
+ msm_edrm_lastclose(ddev);
+
+ /* set this after lastclose to allow kickoff from lastclose */
+ priv->shutdown_in_progress = true;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,msm-kms-edrm" }, /* sde */
+ {}
+};
+MODULE_DEVICE_TABLE(of, dt_match);
+
+static struct platform_driver msm_platform_driver = {
+ .probe = msm_pdev_edrm_probe,
+ .remove = msm_pdev_edrm_remove,
+ .shutdown = msm_pdev_edrm_shutdown,
+ .driver = {
+ .name = "msm_early_drm",
+ .of_match_table = dt_match,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
+ },
+ .id_table = msm_edrm_id,
+};
+
+static int __init msm_edrm_register(void)
+{
+ DBG("init");
+ return platform_driver_register(&msm_platform_driver);
+}
+
+static void __exit msm_edrm_unregister(void)
+{
+ DBG("fini");
+ platform_driver_unregister(&msm_platform_driver);
+}
+
+module_init(msm_edrm_register);
+module_exit(msm_edrm_unregister);
+
+MODULE_DESCRIPTION("MSM EARLY DRM Driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/msm/ekms/edrm_encoder.c b/drivers/gpu/drm/msm/ekms/edrm_encoder.c
new file mode 100644
index 000000000000..0cee78c73f50
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_encoder.c
@@ -0,0 +1,112 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "edrm_encoder.h"
+#include "edrm_crtc.h"
+#include "sde_kms.h"
+
+static void edrm_encoder_enable(struct drm_encoder *drm_enc)
+{
+ pr_err("eDRM Encoder enable\n");
+}
+
+static void edrm_encoder_disable(struct drm_encoder *drm_enc)
+{
+ pr_err("eDRM Encoder disable\n");
+}
+
+void edrm_encoder_destroy(struct drm_encoder *encoder)
+{
+ struct edrm_encoder *edrm_enc = to_edrm_encoder(encoder);
+
+ drm_encoder_cleanup(encoder);
+ kfree(edrm_enc);
+}
+
+static const struct drm_encoder_helper_funcs edrm_encoder_helper_funcs = {
+ .disable = edrm_encoder_disable,
+ .enable = edrm_encoder_enable,
+};
+
+static const struct drm_encoder_funcs edrm_encoder_funcs = {
+ .destroy = edrm_encoder_destroy,
+};
+
+int edrm_encoder_wait_for_commit_done(struct drm_encoder *drm_enc)
+{
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ struct msm_edrm_kms *edrm_kms;
+ struct msm_edrm_display *display;
+ struct edrm_crtc *edrm_crtc;
+ struct sde_kms *master_kms;
+ struct msm_drm_private *master_priv;
+ struct sde_mdss_cfg *cfg;
+ u32 ctl_off;
+ u32 flush_register = 0;
+ int i;
+
+ dev = drm_enc->dev;
+ priv = dev->dev_private;
+ edrm_kms = to_edrm_kms(priv->kms);
+ master_priv = edrm_kms->master_dev->dev_private;
+ master_kms = to_sde_kms(master_priv->kms);
+ cfg = master_kms->catalog;
+ edrm_crtc = to_edrm_crtc(drm_enc->crtc);
+ display = &edrm_kms->display[edrm_crtc->display_id];
+ ctl_off = display->ctl_off;
+
+ /* poll edrm_crtc->sspp_flush_mask until cleared */
+ for (i = 0; i < 20; i++) {
+ flush_register = readl_relaxed(master_kms->mmio +
+ ctl_off + 0x18);
+ if ((flush_register & edrm_crtc->sspp_flush_mask) != 0)
+ usleep_range(1000, 2000);
+ else
+ break;
+ }
+
+ /* reset sspp_flush_mask */
+ edrm_crtc->sspp_flush_mask = 0;
+
+ return 0;
+}
+
+
+struct drm_encoder *edrm_encoder_init(struct drm_device *dev,
+ struct msm_edrm_display *display)
+{
+ struct edrm_encoder *edrm_encoder;
+ struct drm_encoder *encoder;
+ int ret;
+
+ edrm_encoder = kzalloc(sizeof(*edrm_encoder), GFP_KERNEL);
+ if (!edrm_encoder)
+ return ERR_PTR(-ENOMEM);
+
+ encoder = &edrm_encoder->base;
+
+ ret = drm_encoder_init(dev, encoder,
+ &edrm_encoder_funcs,
+ display->encoder_type);
+ if (ret)
+ goto fail;
+
+ drm_encoder_helper_add(encoder, &edrm_encoder_helper_funcs);
+
+ edrm_encoder->intf_idx = display->intf_id;
+
+ return encoder;
+fail:
+ kfree(edrm_encoder);
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/ekms/edrm_encoder.h b/drivers/gpu/drm/msm/ekms/edrm_encoder.h
new file mode 100644
index 000000000000..eeb91d659535
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_encoder.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _EDRM_ENCODER_H_
+#define _EDRM_ENCODER_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "edrm_kms.h"
+
+struct edrm_encoder {
+ struct drm_encoder base;
+ u32 sspp_mask;
+ int intf_idx;
+};
+
+#define to_edrm_encoder(x) container_of(x, struct edrm_encoder, base)
+
+/**
+ * edrm_encoder_wait_for_commit_done - wait until the register flush is done
+ * @drm_enc: Pointer to drm_encoder object
+ */
+int edrm_encoder_wait_for_commit_done(struct drm_encoder *drm_enc);
+
+/**
+ * edrm_encoder_destroy - free up drm_encoder object
+ * @drm_enc: Pointer to drm encoder object
+ */
+void edrm_encoder_destroy(struct drm_encoder *encoder);
+
+/**
+ * edrm_encoder_init - create drm_encoder object
+ * @dev: drm_device that this encoder going to register.
+ * @display: display structure that associate with this encoder.
+ */
+struct drm_encoder *edrm_encoder_init(struct drm_device *dev,
+ struct msm_edrm_display *display);
+
+#endif /* _EDRM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/msm/ekms/edrm_kms.c b/drivers/gpu/drm/msm/ekms/edrm_kms.c
new file mode 100644
index 000000000000..c78b2f9dc080
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_kms.c
@@ -0,0 +1,717 @@
+/*
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
+#include <soc/qcom/boot_stats.h>
+#include "msm_kms.h"
+#include "edrm_kms.h"
+#include "edrm_crtc.h"
+#include "edrm_encoder.h"
+#include "edrm_plane.h"
+#include "edrm_connector.h"
+#include "sde_kms.h"
+#include "sde_formats.h"
+#include "edrm_splash.h"
+#include "sde_hdmi.h"
+#include "dsi_display.h"
+#include "sde_crtc.h"
+
+#define MMSS_MDP_CTL_TOP_OFFSET 0x14
+
+static bool first_commit = true;
+
+static void edrm_kms_prepare_commit(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ struct msm_edrm_kms *edrm_kms = to_edrm_kms(kms);
+ struct drm_device *dev = edrm_kms->master_dev;
+ struct msm_drm_private *master_priv = edrm_kms->master_dev->dev_private;
+ struct sde_kms *master_kms;
+ int i, nplanes;
+ struct drm_plane *plane;
+ bool valid_commit = false;
+
+ master_kms = to_sde_kms(master_priv->kms);
+ nplanes = dev->mode_config.num_total_plane;
+ for (i = 0; i < nplanes; i++) {
+ plane = state->planes[i];
+ if (plane && plane->fb) {
+ valid_commit = true;
+ break;
+ }
+ }
+
+ if (valid_commit && first_commit) {
+ first_commit = false;
+ place_marker("eDRM display first valid commit");
+ }
+
+ sde_power_resource_enable(&master_priv->phandle,
+ master_kms->core_client, true);
+
+ /* Notify bootloader splash to stop */
+ if (valid_commit && edrm_kms->lk_running_flag) {
+
+
+ /* next eDRM close will trigger display resources handoff */
+ edrm_kms->handoff_flag = true;
+ }
+}
+
+static void edrm_kms_commit(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ if (crtc->state->active)
+ edrm_crtc_commit_kickoff(crtc);
+ }
+}
+
+static void edrm_kms_complete_commit(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct msm_edrm_kms *edrm_kms = to_edrm_kms(kms);
+ struct msm_drm_private *master_priv = edrm_kms->master_dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ struct sde_kms *master_kms;
+ int i;
+
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
+ edrm_crtc_complete_commit(crtc, old_crtc_state);
+
+ master_kms = to_sde_kms(master_priv->kms);
+ sde_power_resource_enable(&master_priv->phandle,
+ master_kms->core_client, false);
+}
+
+static void edrm_kms_wait_for_commit_done(struct msm_kms *kms,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ int ret;
+
+ dev = crtc->dev;
+ if (!dev)
+ return;
+
+ if (!crtc->state->enable) {
+ pr_err("[crtc:%d] not enable\n", crtc->base.id);
+ return;
+ }
+
+ if (!crtc->state->active) {
+ pr_err("[crtc:%d] not active\n", crtc->base.id);
+ return;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+ ret = edrm_encoder_wait_for_commit_done(encoder);
+ if (ret && ret != -EWOULDBLOCK) {
+ pr_err("wait for commit done returned %d\n", ret);
+ break;
+ }
+ }
+}
+
+static void edrm_kms_prepare_fence(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
+ pr_err("invalid argument(s)\n");
+ return;
+ }
+
+ /* old_state contains updated crtc pointers */
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
+ edrm_crtc_prepare_commit(crtc, old_crtc_state);
+}
+
+static void _edrm_kms_drm_obj_destroy(struct msm_edrm_kms *edrm_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!edrm_kms) {
+ pr_err("invalid sde_kms\n");
+ return;
+ } else if (!edrm_kms->dev) {
+ pr_err("invalid dev\n");
+ return;
+ } else if (!edrm_kms->dev->dev_private) {
+ pr_err("invalid dev_private\n");
+ return;
+ }
+ priv = edrm_kms->dev->dev_private;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
+ priv->num_crtcs = 0;
+
+ for (i = 0; i < priv->num_planes; i++)
+ priv->planes[i]->funcs->destroy(priv->planes[i]);
+ priv->num_planes = 0;
+
+ for (i = 0; i < priv->num_connectors; i++)
+ priv->connectors[i]->funcs->destroy(priv->connectors[i]);
+ priv->num_connectors = 0;
+
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->funcs->destroy(priv->encoders[i]);
+ priv->num_encoders = 0;
+}
+
+static void convert_dsi_to_drm_mode(const struct dsi_display_mode *dsi_mode,
+ struct drm_display_mode *drm_mode)
+{
+ memset(drm_mode, 0, sizeof(*drm_mode));
+
+ drm_mode->hdisplay = dsi_mode->timing.h_active;
+ drm_mode->hsync_start = drm_mode->hdisplay +
+ dsi_mode->timing.h_front_porch;
+ drm_mode->hsync_end = drm_mode->hsync_start +
+ dsi_mode->timing.h_sync_width;
+ drm_mode->htotal = drm_mode->hsync_end + dsi_mode->timing.h_back_porch;
+ drm_mode->hskew = dsi_mode->timing.h_skew;
+
+ drm_mode->vdisplay = dsi_mode->timing.v_active;
+ drm_mode->vsync_start = drm_mode->vdisplay +
+ dsi_mode->timing.v_front_porch;
+ drm_mode->vsync_end = drm_mode->vsync_start +
+ dsi_mode->timing.v_sync_width;
+ drm_mode->vtotal = drm_mode->vsync_end + dsi_mode->timing.v_back_porch;
+
+ drm_mode->vrefresh = dsi_mode->timing.refresh_rate;
+ drm_mode->clock = dsi_mode->pixel_clk_khz;
+
+ if (dsi_mode->flags & DSI_MODE_FLAG_SEAMLESS)
+ drm_mode->flags |= DRM_MODE_FLAG_SEAMLESS;
+ if (dsi_mode->flags & DSI_MODE_FLAG_DFPS)
+ drm_mode->private_flags |= MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS;
+ if (dsi_mode->flags & DSI_MODE_FLAG_VBLANK_PRE_MODESET)
+ drm_mode->private_flags |= MSM_MODE_FLAG_VBLANK_PRE_MODESET;
+ drm_mode->flags |= (dsi_mode->timing.h_sync_polarity) ?
+ DRM_MODE_FLAG_NHSYNC : DRM_MODE_FLAG_PHSYNC;
+ drm_mode->flags |= (dsi_mode->timing.v_sync_polarity) ?
+ DRM_MODE_FLAG_NVSYNC : DRM_MODE_FLAG_PVSYNC;
+
+ drm_mode_set_name(drm_mode);
+}
+
+static int setup_edrm_displays(struct sde_kms *master_kms,
+ struct msm_edrm_display *display,
+ const char *label, const char *type)
+{
+ int i, ret;
+ struct dsi_display *dsi_disp;
+ struct sde_hdmi *hdmi_display;
+ struct sde_mdss_cfg *cfg;
+ u32 reg_value;
+
+ cfg = master_kms->catalog;
+ ret = -EINVAL;
+ /* check main DRM for the matching display */
+ if (!strcmp(type, "dsi")) {
+ int mode_cnt;
+ struct dsi_display_mode *dsi_mode;
+ /* check main DRM's DSI display list */
+ for (i = 0; i < master_kms->dsi_display_count; i++) {
+ dsi_disp = (struct dsi_display *)
+ master_kms->dsi_displays[i];
+ if (!strcmp(dsi_disp->name, label)) {
+ dsi_display_get_modes(dsi_disp, NULL,
+ &mode_cnt);
+ dsi_mode = kcalloc(mode_cnt, sizeof(*dsi_mode),
+ GFP_KERNEL);
+ if (!dsi_mode)
+ return -ENOMEM;
+ dsi_display_get_modes(dsi_disp, dsi_mode,
+ &mode_cnt);
+
+ /* convert to DRM mode */
+ convert_dsi_to_drm_mode(&dsi_mode[0],
+ &display->mode);
+ display->encoder_type = DRM_MODE_ENCODER_DSI;
+ display->connector_type =
+ DRM_MODE_CONNECTOR_DSI;
+ ret = 0;
+ break;
+ }
+ }
+ if (ret) {
+ pr_err("Cannot find %s in main DRM\n", label);
+ return ret;
+ }
+ ret = -EINVAL;
+ for (i = 0; i < cfg->ctl_count; i++) {
+ reg_value = readl_relaxed(master_kms->mmio +
+ cfg->ctl[i].base + MMSS_MDP_CTL_TOP_OFFSET);
+ reg_value &= 0x000000F0;
+
+ /* Check the interface from TOP register */
+ if ((((reg_value >> 4) == 0x2) &&
+ (dsi_disp->ctrl[0].ctrl->index == 0)) ||
+ (((reg_value >> 4) == 0x3) &&
+ (dsi_disp->ctrl[0].ctrl->index == 1))) {
+ display->ctl_id = i + 1;
+ display->ctl_off = cfg->ctl[i].base;
+ display->lm_off = cfg->mixer[i].base;
+ ret = 0;
+ break;
+ }
+ }
+ if (ret) {
+ pr_err("LK does not enable %s\n", label);
+ kfree(dsi_mode);
+ return -EINVAL;
+ }
+ } else if (!strcmp(type, "hdmi")) {
+ /* for HDMI interface, check main DRM's HDMI display list */
+ for (i = 0; i < master_kms->hdmi_display_count; i++) {
+ hdmi_display = (struct sde_hdmi *)
+ master_kms->hdmi_displays[i];
+
+ if (!strcmp(hdmi_display->name, label)) {
+ drm_mode_copy(&display->mode,
+ (struct drm_display_mode *)
+ hdmi_display->mode_list.next);
+ display->encoder_type = DRM_MODE_ENCODER_TMDS;
+ display->connector_type =
+ DRM_MODE_CONNECTOR_HDMIA;
+ ret = 0;
+ break;
+ }
+ }
+ if (ret) {
+ pr_err("Cannot find %s in main DRM\n", label);
+ return ret;
+ }
+ ret = -EINVAL;
+ for (i = 0; i < cfg->ctl_count; i++) {
+ reg_value = readl_relaxed(master_kms->mmio +
+ cfg->ctl[i].base + MMSS_MDP_CTL_TOP_OFFSET);
+ reg_value &= 0x000000F0;
+
+ /* Check the interface from TOP register */
+ if ((reg_value >> 4) == 0x4) {
+ display->ctl_id = i + 1;
+ display->ctl_off = cfg->ctl[i].base;
+ display->lm_off = cfg->mixer[i].base;
+ ret = 0;
+ break;
+ }
+ }
+ if (ret) {
+ pr_err("No LK does not enable %s\n", label);
+ return -EINVAL;
+ }
+ }
+ return ret;
+}
+
+static int _sspp_search(const char *p_name, struct sde_mdss_cfg *cfg,
+ u32 *sspp_offset, u32 *sspp_cfg_id, u32 *sspp_type)
+{
+ int i, ret;
+
+ ret = -1;
+ for (i = 0; i < cfg->sspp_count; i++)
+ if (!strcmp(cfg->sspp[i].name, p_name)) {
+ *sspp_offset = cfg->sspp[i].base;
+ *sspp_cfg_id = cfg->sspp[i].id;
+ *sspp_type = cfg->sspp[i].type;
+ ret = 0;
+ break;
+ }
+ return ret;
+}
+
+static int _edrm_kms_parse_dt(struct msm_edrm_kms *edrm_kms)
+{
+ struct sde_kms *master_kms;
+ struct msm_drm_private *master_priv;
+ struct msm_drm_private *priv;
+ struct sde_mdss_cfg *cfg;
+ struct device_node *parent, *node;
+ int i, ret, disp_cnt, plane_cnt;
+ const char *clabel;
+ const char *ctype;
+ struct device_node *plane_node;
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct edrm_plane *edrm_plane;
+ const char *p_name;
+ u32 lm_stage, sspp_offset, sspp_cfg_id, sspp_type;
+
+ master_priv = edrm_kms->master_dev->dev_private;
+ master_kms = to_sde_kms(master_priv->kms);
+ priv = edrm_kms->dev->dev_private;
+ cfg = master_kms->catalog;
+ ret = 0;
+ parent = of_get_child_by_name(edrm_kms->dev->dev->of_node,
+ "qcom,edrm-assigned-display");
+ if (!parent) {
+ pr_err("cannot find qcom,edrm-assigned-display\n");
+ return 0;
+ }
+
+ /* parse the dtsi and retrieve information from main DRM */
+ disp_cnt = 0;
+ for_each_child_of_node(parent, node) {
+ of_property_read_string(node, "qcom,intf-type", &ctype);
+ of_property_read_string(node, "qcom,label", &clabel);
+
+ plane_cnt = 0;
+ do {
+ plane_node = of_parse_phandle(node,
+ "qcom,assigned_plane", plane_cnt);
+ /* Initialize plane */
+ if (!plane_node)
+ break;
+
+ of_property_read_string(plane_node, "qcom,plane-name",
+ &p_name);
+ of_property_read_u32(plane_node, "lm-stage",
+ &lm_stage);
+ if (_sspp_search(p_name, cfg, &sspp_offset,
+ &sspp_cfg_id, &sspp_type)) {
+ pr_err("Cannot find %s in main DRM\n",
+ p_name);
+ continue;
+ }
+
+ plane = edrm_plane_init(edrm_kms->dev,
+ edrm_kms->plane_id[disp_cnt],
+ sspp_type);
+ if (IS_ERR(plane)) {
+ pr_err("edrm_plane_init failed\n");
+ ret = PTR_ERR(plane);
+ of_node_put(plane_node);
+ goto fail;
+ }
+ priv->planes[priv->num_planes] = plane;
+ edrm_plane = to_edrm_plane(plane);
+ edrm_plane->display_id = disp_cnt;
+ edrm_plane->lm_stage = lm_stage;
+ edrm_plane->sspp_offset = sspp_offset;
+ edrm_plane->sspp_cfg_id = sspp_cfg_id;
+ edrm_plane->sspp_type = sspp_type;
+ plane->possible_crtcs = (1 << disp_cnt);
+ priv->num_planes++;
+ plane_cnt++;
+ of_node_put(plane_node);
+ } while (plane_node);
+
+ edrm_kms->display[disp_cnt].plane_cnt = plane_cnt;
+ ret = setup_edrm_displays(master_kms,
+ &edrm_kms->display[disp_cnt], clabel, ctype);
+ if (ret)
+ goto fail;
+
+ /* Initialize crtc */
+ crtc = edrm_crtc_init(edrm_kms->dev,
+ &edrm_kms->display[disp_cnt], priv->planes[disp_cnt]);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+
+ /* Initialize encoder */
+ encoder = edrm_encoder_init(edrm_kms->dev,
+ &edrm_kms->display[disp_cnt]);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(encoder);
+ goto fail;
+ }
+ encoder->possible_crtcs = (1 << disp_cnt);
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ /* Initialize connector */
+ connector = edrm_connector_init(edrm_kms->dev,
+ priv->encoders[disp_cnt],
+ &edrm_kms->display[disp_cnt]);
+ if (IS_ERR(encoder)) {
+ ret = PTR_ERR(connector);
+ goto fail;
+ }
+ priv->connectors[priv->num_connectors++] = connector;
+
+ disp_cnt++;
+ }
+ of_node_put(parent);
+
+ edrm_kms->display_count = disp_cnt;
+ edrm_kms->plane_count = priv->num_planes;
+ return ret;
+fail:
+ for (i = 0; i < priv->num_planes; i++)
+ edrm_plane_destroy(priv->planes[i]);
+ priv->num_planes = 0;
+
+ for (i = 0; i < disp_cnt; i++) {
+ if (priv->crtcs[i]) {
+ edrm_crtc_destroy(priv->crtcs[i]);
+ priv->num_crtcs--;
+ }
+ if (priv->encoders[i]) {
+ edrm_encoder_destroy(priv->encoders[i]);
+ priv->num_encoders--;
+ }
+ if (priv->connectors[i]) {
+ edrm_connector_destroy(priv->connectors[i]);
+ priv->num_connectors--;
+ }
+ }
+ disp_cnt = 0;
+ edrm_kms->display_count = 0;
+ edrm_kms->plane_count = 0;
+ of_node_put(parent);
+ return ret;
+}
+
+static int _edrm_kms_drm_obj_init(struct msm_edrm_kms *edrm_kms)
+{
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ int ret;
+
+ if (!edrm_kms || !edrm_kms->dev || !edrm_kms->dev->dev) {
+ pr_err("invalid edrm_kms\n");
+ return -EINVAL;
+ }
+
+ dev = edrm_kms->dev;
+ priv = dev->dev_private;
+
+ ret = _edrm_kms_parse_dt(edrm_kms);
+ if (ret)
+ goto fail;
+
+ return 0;
+fail:
+ _edrm_kms_drm_obj_destroy(edrm_kms);
+ return ret;
+}
+
+static int edrm_kms_postinit(struct msm_kms *kms)
+{
+ struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct msm_edrm_kms *edrm_kms;
+
+ edrm_kms = to_edrm_kms(kms);
+ dev = edrm_kms->dev;
+
+ drm_for_each_crtc(crtc, dev)
+ edrm_crtc_postinit(crtc);
+
+ place_marker("eDRM driver init completed");
+ return 0;
+}
+
+static void edrm_kms_destroy(struct msm_kms *kms)
+{
+ struct msm_edrm_kms *edrm_kms;
+ struct drm_device *dev;
+
+ if (!kms) {
+ pr_err("edrm_kms_destroy invalid kms\n");
+ return;
+ }
+
+ edrm_kms = to_edrm_kms(kms);
+ dev = edrm_kms->dev;
+ if (!dev) {
+ pr_err("invalid device\n");
+ return;
+ }
+
+ kfree(edrm_kms);
+}
+
+static void edrm_kms_lastclose(struct msm_kms *kms)
+{
+ /* handoff early drm resource */
+ struct msm_edrm_kms *edrm_kms = to_edrm_kms(kms);
+
+ /* notify main DRM that eDRM is relased. main DRM can
+ * reclaim all eDRM resource. Main DRM will clear eDRM
+ * plane stage in next commit
+ */
+ if (edrm_kms->handoff_flag) {
+ pr_info("handoff eDRM resource to main DRM\n");
+ edrm_display_release(kms);
+ }
+}
+
+static int edrm_kms_hw_init(struct msm_kms *kms)
+{
+ struct msm_edrm_kms *edrm_kms;
+ struct sde_kms *sde_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ struct msm_drm_private *master_priv;
+ int rc = -EINVAL;
+ u32 lk_status;
+
+ if (!kms) {
+ pr_err("edrm_kms_hw_init invalid kms\n");
+ goto error;
+ }
+
+ edrm_kms = to_edrm_kms(kms);
+ dev = edrm_kms->dev;
+ if (!dev || !dev->platformdev) {
+ pr_err("invalid device\n");
+ goto error;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ pr_err("invalid private data\n");
+ goto error;
+ }
+
+ master_priv = edrm_kms->master_dev->dev_private;
+ sde_kms = to_sde_kms(master_priv->kms);
+ rc = sde_power_resource_enable(&master_priv->phandle,
+ sde_kms->core_client, true);
+ if (rc) {
+ pr_err("resource enable failed: %d\n", rc);
+ goto error;
+ }
+
+ /* check bootloader status register */
+ lk_status = edrm_splash_get_lk_status(kms);
+ if (lk_status == SPLASH_STATUS_RUNNING)
+ edrm_kms->lk_running_flag = true;
+ else
+ edrm_kms->lk_running_flag = false;
+
+ /* if early domain is not start, eDRM cannot initialize
+ * display interface and bridge chip. Need to return err
+ * ToDo: implement interface and bridge chip startup functions
+ */
+ if (lk_status == SPLASH_STATUS_NOT_START) {
+ rc = -EINVAL;
+ pr_err("LK does not start, eDRM cannot initialize\n");
+ goto power_error;
+ }
+
+ /* only unsecure buffer is support for now */
+ edrm_kms->aspace = sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * max crtc width is equal to the max mixer width * 2 and max height is
+ * is 4K
+ */
+ dev->mode_config.max_width = sde_kms->catalog->max_sspp_linewidth * 2;
+ dev->mode_config.max_height = 4096;
+
+ /*
+ * Support format modifiers for compression etc.
+ */
+ dev->mode_config.allow_fb_modifiers = true;
+
+ rc = _edrm_kms_drm_obj_init(edrm_kms);
+ if (rc) {
+ pr_err("drm obj init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ /* notify main DRM that eDRM is started */
+ edrm_display_acquire(kms);
+
+ sde_power_resource_enable(&master_priv->phandle,
+ sde_kms->core_client, false);
+ return 0;
+power_error:
+ sde_power_resource_enable(&master_priv->phandle,
+ sde_kms->core_client, false);
+error:
+ return rc;
+}
+
+static long edrm_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ return rate;
+}
+
+static const struct msm_kms_funcs edrm_kms_funcs = {
+ .hw_init = edrm_kms_hw_init,
+ .postinit = edrm_kms_postinit,
+ .prepare_fence = edrm_kms_prepare_fence,
+ .prepare_commit = edrm_kms_prepare_commit,
+ .commit = edrm_kms_commit,
+ .complete_commit = edrm_kms_complete_commit,
+ .wait_for_crtc_commit_done = edrm_kms_wait_for_commit_done,
+ .check_modified_format = sde_format_check_modified_format,
+ .get_format = sde_get_msm_format,
+ .round_pixclk = edrm_kms_round_pixclk,
+ .destroy = edrm_kms_destroy,
+ .lastclose = edrm_kms_lastclose,
+};
+
+struct msm_kms *msm_edrm_kms_init(struct drm_device *dev)
+{
+ struct msm_edrm_kms *edrm_kms;
+ struct drm_minor *minor;
+
+ if (!dev || !dev->dev_private) {
+ pr_err("drm device node invalid\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ minor = drm_minor_acquire(0);
+ if (IS_ERR_OR_NULL(minor))
+ return ERR_PTR(-EINVAL);
+
+ edrm_kms = kzalloc(sizeof(*edrm_kms), GFP_KERNEL);
+ if (!edrm_kms) {
+ drm_minor_release(minor);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ msm_kms_init(&edrm_kms->base, &edrm_kms_funcs);
+ edrm_kms->dev = dev;
+ edrm_kms->master_dev = minor->dev;
+ drm_minor_release(minor);
+
+ return &edrm_kms->base;
+}
diff --git a/drivers/gpu/drm/msm/ekms/edrm_kms.h b/drivers/gpu/drm/msm/ekms/edrm_kms.h
new file mode 100644
index 000000000000..214c5b85e614
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_kms.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _EDRM_KMS_H_
+#define _EDRM_KMS_H_
+
+#include <drm/drmP.h>
+#include "msm_kms.h"
+
+#define MAX_CTRLS_PER_DISPLAY 2
+
+struct msm_edrm_display {
+ int display_id;
+ int ctl_id;
+ int intf_id;
+ int encoder_type;
+ int connector_type;
+ struct drm_display_mode mode;
+ int ctl_off;
+ int lm_off;
+ int plane_cnt;
+};
+
+struct msm_edrm_kms {
+ struct msm_kms base;
+ struct drm_device *dev;
+ struct drm_device *master_dev;
+ struct msm_gem_address_space *aspace;
+
+ struct msm_edrm_display display[MAX_ENCODERS];
+ int display_count;
+
+ int plane_id[MAX_PLANES];
+ int plane_count;
+
+ /* when this flag is set, the next lastclose() will trigger
+ * handoff eDRM resource to main kernel.
+ */
+ bool handoff_flag;
+ bool lk_running_flag;
+};
+
+struct msm_kms *msm_edrm_kms_init(struct drm_device *dev);
+
+#define to_edrm_kms(x) container_of(x, struct msm_edrm_kms, base)
+
+#endif /* _EDRM_KMS_H_ */
diff --git a/drivers/gpu/drm/msm/ekms/edrm_plane.c b/drivers/gpu/drm/msm/ekms/edrm_plane.c
new file mode 100644
index 000000000000..efe43543dc18
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_plane.c
@@ -0,0 +1,912 @@
+/* Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "edrm_plane.h"
+#include "edrm_crtc.h"
+#include "sde_kms.h"
+#include "edrm_kms.h"
+
+/* SDE_SSPP_SRC */
+#define SSPP_SRC_SIZE 0x00
+#define SSPP_SRC_XY 0x08
+#define SSPP_OUT_SIZE 0x0c
+#define SSPP_OUT_XY 0x10
+#define SSPP_SRC0_ADDR 0x14
+#define SSPP_SRC1_ADDR 0x18
+#define SSPP_SRC2_ADDR 0x1C
+#define SSPP_SRC3_ADDR 0x20
+#define SSPP_SRC_YSTRIDE0 0x24
+#define SSPP_SRC_YSTRIDE1 0x28
+#define SSPP_SRC_FORMAT 0x30
+#define SSPP_SRC_UNPACK_PATTERN 0x34
+#define SSPP_SRC_OP_MODE 0x38
+#define SSPP_CONSTANT_COLOR 0x3c
+#define PIPE_SW_PIX_EXT_C0_LR 0x100
+#define PIPE_SW_PIX_EXT_C0_TB 0x104
+#define PIPE_SW_PIXEL_EXT_C0_REQ 0x108
+#define PIPE_SW_PIX_EXT_C1C2_LR 0x110
+#define PIPE_SW_PIX_EXT_C1C2_TB 0x114
+#define PIPE_SW_PIXEL_EXT_C1C2_REQ 0x118
+#define PIPE_SW_PIX_EXT_C3_LR 0x120
+#define PIPE_SW_PIX_EXT_C3_TB 0x124
+#define PIPE_SW_PIXEL_EXT_C3_REQ 0x128
+#define SSPP_CDP_CNTL 0x134
+#define FLUSH_OFFSET 0x18
+#define PIPE_OP_MODE 0x200
+#define PIPE_CSC_1_MATRIX_COEFF_0 0x320
+#define PIPE_CSC_1_MATRIX_COEFF_1 0x324
+#define PIPE_CSC_1_MATRIX_COEFF_2 0x328
+#define PIPE_CSC_1_MATRIX_COEFF_3 0x32C
+#define PIPE_CSC_1_MATRIX_COEFF_4 0x330
+#define PIPE_CSC_1_COMP_0_PRE_CLAMP 0x334
+#define PIPE_CSC_1_COMP_1_PRE_CLAMP 0x338
+#define PIPE_CSC_1_COMP_2_PRE_CLAMP 0x33C
+#define PIPE_CSC_1_COMP_0_POST_CAMP 0x340
+#define PIPE_CSC_1_COMP_1_POST_CLAMP 0x344
+#define PIPE_CSC_1_COMP_2_POST_CLAMP 0x348
+#define PIPE_CSC_1_COMP_0_PRE_BIAS 0x34C
+#define PIPE_CSC_1_COMP_1_PRE_BIAS 0x350
+#define PIPE_CSC_1_COMP_2_PRE_BIAS 0x354
+#define PIPE_CSC_1_COMP_0_POST_BIAS 0x358
+#define PIPE_CSC_1_COMP_1_POST_BIAS 0x35C
+#define PIPE_CSC_1_COMP_2_POST_BIAS 0x360
+#define PIPE_VP_0_QSEED2_CONFIG 0x204
+#define PIPE_COMP0_3_PHASE_STEP_X 0x210
+#define PIPE_COMP0_3_PHASE_STEP_Y 0x214
+#define PIPE_COMP1_2_PHASE_STEP_X 0x218
+#define PIPE_COMP1_2_PHASE_STEP_Y 0x21C
+#define PIPE_VP_0_QSEED2_SHARP_SMOOTH_STRENGTH 0x230
+#define PIPE_VP_0_QSEED2_SHARP_THRESHOLD_EDGE 0x234
+#define PIPE_VP_0_QSEED2_SHARP_THRESHOLD_SMOOTH 0x238
+#define PIPE_VP_0_QSEED2_SHARP_THRESHOLD_NOISE 0x23C
+
+#define SSPP_SOLID_FILL_FORMAT 0x004237FF
+#define SSPP_ARGB8888_FORMAT 0x000237FF
+#define SSPP_XRGB8888_FORMAT 0x000236FF
+#define SSPP_ARGB1555_FORMAT 0x00023315
+#define SSPP_XRGB1555_FORMAT 0x00023215
+#define SSPP_ARGB4444_FORMAT 0x00023340
+#define SSPP_XRGB4444_FORMAT 0x00023240
+#define SSPP_NV12_FORMAT 0x0192923F
+#define SSPP_NV16_FORMAT 0x0092923F
+#define SSPP_YUYV_FORMAT 0x0082B23F
+#define SSPP_YUV420_FORMAT 0x018A803F
+#define SSPP_RGB888_FORMAT 0x0002243F
+#define SSPP_RGB565_FORMAT 0x00022216
+#define SSPP_ARGB_PATTERN 0x03020001
+#define SSPP_ABGR_PATTERN 0x03010002
+#define SSPP_RGBA_PATTERN 0x02000103
+#define SSPP_BGRA_PATTERN 0x01000203
+
+#define LAYER_BLEND5_OP 0x260
+#define LAYER_OP_ENABLE_ALPHA_BLEND 0x600
+#define LAYER_OP_DISABLE_ALPHA_BLEND 0x200
+
+static u32 edrm_plane_formats_RGB[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_BGRX4444
+};
+
+static u32 edrm_plane_formats_YUV[] = {
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_RGBA8888,
+ DRM_FORMAT_BGRX8888,
+ DRM_FORMAT_BGRA8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGBX8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGBA5551,
+ DRM_FORMAT_BGRA5551,
+ DRM_FORMAT_XRGB1555,
+ DRM_FORMAT_XBGR1555,
+ DRM_FORMAT_RGBX5551,
+ DRM_FORMAT_BGRX5551,
+ DRM_FORMAT_ARGB4444,
+ DRM_FORMAT_ABGR4444,
+ DRM_FORMAT_RGBA4444,
+ DRM_FORMAT_BGRA4444,
+ DRM_FORMAT_XRGB4444,
+ DRM_FORMAT_XBGR4444,
+ DRM_FORMAT_RGBX4444,
+ DRM_FORMAT_BGRX4444,
+ DRM_FORMAT_NV12,
+ DRM_FORMAT_NV21,
+ DRM_FORMAT_NV16,
+ DRM_FORMAT_NV61,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_YUV420,
+ DRM_FORMAT_YVU420
+};
+
+static void edrm_plane_enable_csc(struct sde_kms *master_kms,
+ u32 plane_offset)
+{
+ writel_relaxed(0x00060000, master_kms->mmio + plane_offset +
+ PIPE_OP_MODE);
+
+ writel_relaxed(0x9, master_kms->mmio + plane_offset + SSPP_CDP_CNTL);
+ writel_relaxed(0x00000254, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_MATRIX_COEFF_0);
+ writel_relaxed(0x02540396, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_MATRIX_COEFF_1);
+ writel_relaxed(0x1eef1f93, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_MATRIX_COEFF_2);
+ writel_relaxed(0x043e0254, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_MATRIX_COEFF_3);
+ writel_relaxed(0x00000000, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_MATRIX_COEFF_4);
+
+ writel_relaxed(0x000010eb, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_0_PRE_CLAMP);
+ writel_relaxed(0x000010f0, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_1_PRE_CLAMP);
+ writel_relaxed(0x000010f0, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_2_PRE_CLAMP);
+ writel_relaxed(0x000000ff, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_0_POST_CAMP);
+ writel_relaxed(0x000000ff, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_1_POST_CLAMP);
+ writel_relaxed(0x000000ff, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_2_POST_CLAMP);
+ writel_relaxed(0x0000fff0, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_0_PRE_BIAS);
+ writel_relaxed(0x0000ff80, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_1_PRE_BIAS);
+ writel_relaxed(0x0000ff80, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_2_PRE_BIAS);
+ writel_relaxed(0x00000000, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_0_POST_BIAS);
+ writel_relaxed(0x00000000, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_1_POST_BIAS);
+ writel_relaxed(0x00000000, master_kms->mmio + plane_offset +
+ PIPE_CSC_1_COMP_2_POST_BIAS);
+
+ writel_relaxed(0x200000, master_kms->mmio + plane_offset +
+ PIPE_COMP0_3_PHASE_STEP_X);
+ writel_relaxed(0x200000, master_kms->mmio + plane_offset +
+ PIPE_COMP0_3_PHASE_STEP_Y);
+ writel_relaxed(0x100000, master_kms->mmio + plane_offset +
+ PIPE_COMP1_2_PHASE_STEP_X);
+ writel_relaxed(0x100000, master_kms->mmio + plane_offset +
+ PIPE_COMP1_2_PHASE_STEP_Y);
+}
+
+static void edrm_plane_set_yuv_plane(struct drm_plane *plane,
+ struct sde_kms *master_kms, u32 lm_off)
+{
+ u32 img_size, ystride0, ystride1;
+ u32 plane0_addr, plane1_addr, plane2_addr, plane3_addr;
+ struct edrm_plane *edrm_plane;
+
+ edrm_plane = to_edrm_plane(plane);
+ edrm_plane_enable_csc(master_kms, edrm_plane->sspp_offset);
+ if ((plane->state->fb->pixel_format == DRM_FORMAT_NV12) ||
+ (plane->state->fb->pixel_format == DRM_FORMAT_NV21) ||
+ (plane->state->fb->pixel_format == DRM_FORMAT_NV16) ||
+ (plane->state->fb->pixel_format == DRM_FORMAT_NV61)) {
+ ystride0 = (plane->state->fb->width << 16) |
+ plane->state->fb->width;
+ ystride1 = 0;
+ plane0_addr = msm_framebuffer_iova(plane->state->fb,
+ edrm_plane->aspace, 0);
+ plane1_addr = msm_framebuffer_iova(plane->state->fb,
+ edrm_plane->aspace, 1);
+ plane2_addr = 0;
+ plane3_addr = 0;
+ } else if ((plane->state->fb->pixel_format == DRM_FORMAT_YUYV) ||
+ (plane->state->fb->pixel_format == DRM_FORMAT_YVYU) ||
+ (plane->state->fb->pixel_format == DRM_FORMAT_VYUY) ||
+ (plane->state->fb->pixel_format == DRM_FORMAT_UYVY)) {
+ /* YUYV formats are single plane */
+ ystride0 = plane->state->fb->width * 2;
+ ystride1 = 0;
+ plane0_addr = msm_framebuffer_iova(plane->state->fb,
+ edrm_plane->aspace, 0);
+ plane1_addr = 0;
+ plane2_addr = 0;
+ plane3_addr = 0;
+ } else if ((plane->state->fb->pixel_format == DRM_FORMAT_YUV420) ||
+ (plane->state->fb->pixel_format == DRM_FORMAT_YVU420)) {
+ ystride0 = ((plane->state->fb->width/2) << 16) |
+ plane->state->fb->width;
+ ystride1 = plane->state->fb->width/2;
+ plane0_addr = msm_framebuffer_iova(plane->state->fb,
+ edrm_plane->aspace, 0);
+ plane1_addr = msm_framebuffer_iova(plane->state->fb,
+ edrm_plane->aspace, 1);
+ plane2_addr = msm_framebuffer_iova(plane->state->fb,
+ edrm_plane->aspace, 2);
+ plane3_addr = 0;
+ } else {
+ pr_err("Format %x not supported in eDRM\n",
+ plane->state->fb->pixel_format);
+ return;
+ }
+
+ writel_relaxed(ystride0, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_YSTRIDE0);
+ writel_relaxed(ystride1, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_YSTRIDE1);
+ writel_relaxed(plane0_addr, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC0_ADDR);
+ writel_relaxed(plane1_addr, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC1_ADDR);
+ writel_relaxed(plane2_addr, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC2_ADDR);
+ writel_relaxed(plane3_addr, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC3_ADDR);
+ writel_relaxed(0x00055f03, master_kms->mmio + edrm_plane->sspp_offset
+ + PIPE_VP_0_QSEED2_CONFIG);
+ writel_relaxed(0x00000020, master_kms->mmio + edrm_plane->sspp_offset
+ + PIPE_VP_0_QSEED2_SHARP_SMOOTH_STRENGTH);
+ writel_relaxed(0x00000070, master_kms->mmio + edrm_plane->sspp_offset
+ + PIPE_VP_0_QSEED2_SHARP_THRESHOLD_EDGE);
+ writel_relaxed(0x00000008, master_kms->mmio + edrm_plane->sspp_offset
+ + PIPE_VP_0_QSEED2_SHARP_THRESHOLD_SMOOTH);
+ writel_relaxed(0x00000002, master_kms->mmio + edrm_plane->sspp_offset
+ + PIPE_VP_0_QSEED2_SHARP_THRESHOLD_NOISE);
+
+ writel_relaxed(0x00020001, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C0_LR);
+ writel_relaxed(0x00020001, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C0_TB);
+ img_size = ((plane->state->fb->height + 3) << 16) |
+ (plane->state->fb->width + 3);
+ writel_relaxed(img_size, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIXEL_EXT_C0_REQ);
+
+ writel_relaxed(0x00010000, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C1C2_LR);
+ writel_relaxed(0x00010000, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C1C2_TB);
+ img_size = ((plane->state->fb->height/2 + 1) << 16) |
+ (plane->state->fb->width/2 + 1);
+ writel_relaxed(img_size, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIXEL_EXT_C1C2_REQ);
+
+ writel_relaxed(0x00010000, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C3_LR);
+ writel_relaxed(0x00010000, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C3_TB);
+ img_size = ((plane->state->fb->height + 1) << 16) |
+ (plane->state->fb->width + 1);
+ writel_relaxed(img_size, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIXEL_EXT_C3_REQ);
+
+
+ /* do a solid fill of transparent color */
+ writel_relaxed(0xFF000000, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_CONSTANT_COLOR);
+
+ /* setup blending for mixer stage 5 */
+ writel_relaxed(LAYER_OP_DISABLE_ALPHA_BLEND, master_kms->mmio + lm_off
+ + LAYER_BLEND5_OP);
+}
+
+static void edrm_plane_set_rgb_plane(struct drm_plane *plane,
+ struct sde_kms *master_kms, u32 lm_off)
+{
+ u32 img_size, ystride0, ystride1, plane_addr;
+ struct edrm_plane *edrm_plane;
+
+ edrm_plane = to_edrm_plane(plane);
+
+ ystride0 = (plane->state->fb->width *
+ plane->state->fb->bits_per_pixel/8);
+ ystride1 = 0;
+ plane_addr = msm_framebuffer_iova(plane->state->fb,
+ edrm_plane->aspace, 0);
+ img_size = (plane->state->fb->height << 16) | plane->state->fb->width;
+ writel_relaxed(plane_addr, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC0_ADDR);
+ writel_relaxed(ystride0, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_YSTRIDE0);
+ writel_relaxed(ystride1, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_YSTRIDE1);
+ writel_relaxed(0x0, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C0_LR);
+ writel_relaxed(0x0, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C0_TB);
+ writel_relaxed(img_size, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIXEL_EXT_C0_REQ);
+ writel_relaxed(0x0, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C1C2_LR);
+ writel_relaxed(0x0, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIX_EXT_C1C2_TB);
+ writel_relaxed(img_size, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIXEL_EXT_C1C2_REQ);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C3_LR);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C3_TB);
+ writel_relaxed(img_size, master_kms->mmio +
+ edrm_plane->sspp_offset + PIPE_SW_PIXEL_EXT_C3_REQ);
+ /* do a solid fill of transparent color */
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_CONSTANT_COLOR);
+
+ /* setup blending for mixer stage 5 */
+ writel_relaxed(LAYER_OP_ENABLE_ALPHA_BLEND, master_kms->mmio + lm_off
+ + LAYER_BLEND5_OP);
+
+ /* disable CSC */
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_OP_MODE);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset
+ + PIPE_VP_0_QSEED2_CONFIG);
+}
+
+static int edrm_plane_modeset(struct drm_plane *plane)
+{
+ struct drm_device *dev = plane->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_edrm_kms *edrm_kms;
+ struct msm_drm_private *master_priv;
+ struct sde_kms *master_kms;
+ struct edrm_plane *edrm_plane;
+ bool yuv_format;
+ u32 img_size, src_xy, dst_xy, lm_off;
+ struct msm_edrm_display *display;
+
+ edrm_kms = to_edrm_kms(kms);
+ master_priv = edrm_kms->master_dev->dev_private;
+ master_kms = to_sde_kms(master_priv->kms);
+ edrm_plane = to_edrm_plane(plane);
+ display = &edrm_kms->display[edrm_plane->display_id];
+ lm_off = display->lm_off;
+
+ switch (plane->state->fb->pixel_format) {
+ case DRM_FORMAT_ARGB8888:
+ writel_relaxed(SSPP_ARGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_ABGR8888:
+ writel_relaxed(SSPP_ARGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ABGR_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGBA8888:
+ writel_relaxed(SSPP_ARGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_RGBA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGRX8888:
+ writel_relaxed(SSPP_XRGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_BGRA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGRA8888:
+ writel_relaxed(SSPP_ARGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_BGRA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_XRGB8888:
+ writel_relaxed(SSPP_XRGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_XBGR8888:
+ writel_relaxed(SSPP_XRGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ABGR_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGBX8888:
+ writel_relaxed(SSPP_XRGB8888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_RGBA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGB888:
+ writel_relaxed(SSPP_RGB888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00020001, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGR888:
+ writel_relaxed(SSPP_RGB888_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00010002, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGB565:
+ writel_relaxed(SSPP_RGB565_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00020001, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGR565:
+ writel_relaxed(SSPP_RGB565_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00010002, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_ARGB1555:
+ writel_relaxed(SSPP_ARGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_ABGR1555:
+ writel_relaxed(SSPP_ARGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ABGR_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGBA5551:
+ writel_relaxed(SSPP_ARGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_RGBA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGRA5551:
+ writel_relaxed(SSPP_ARGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_BGRA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_XRGB1555:
+ writel_relaxed(SSPP_XRGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_XBGR1555:
+ writel_relaxed(SSPP_XRGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ABGR_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGBX5551:
+ writel_relaxed(SSPP_XRGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_RGBA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGRX5551:
+ writel_relaxed(SSPP_XRGB1555_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_BGRA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_ARGB4444:
+ writel_relaxed(SSPP_ARGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_ABGR4444:
+ writel_relaxed(SSPP_ARGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGBA4444:
+ writel_relaxed(SSPP_ARGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_RGBA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGRA4444:
+ writel_relaxed(SSPP_ARGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_BGRA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_XRGB4444:
+ writel_relaxed(SSPP_ARGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_XBGR4444:
+ writel_relaxed(SSPP_XRGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ABGR_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_RGBX4444:
+ writel_relaxed(SSPP_XRGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_RGBA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_BGRX4444:
+ writel_relaxed(SSPP_XRGB4444_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_BGRA_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = false;
+ break;
+ case DRM_FORMAT_NV12:
+ writel_relaxed(SSPP_NV12_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00000201, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_NV21:
+ writel_relaxed(SSPP_NV12_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00000102, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_NV16:
+ writel_relaxed(SSPP_NV16_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00000201, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_NV61:
+ writel_relaxed(SSPP_NV16_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00000102, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_VYUY:
+ writel_relaxed(SSPP_YUYV_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00010002, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_UYVY:
+ writel_relaxed(SSPP_YUYV_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00020001, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_YUYV:
+ writel_relaxed(SSPP_YUYV_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x02000100, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_YVYU:
+ writel_relaxed(SSPP_YUYV_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x01000200, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_YUV420:
+ writel_relaxed(SSPP_YUV420_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00000102, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ case DRM_FORMAT_YVU420:
+ writel_relaxed(SSPP_YUV420_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(0x00000201, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ yuv_format = true;
+ break;
+ default:
+ pr_err("Format %x not supported in eDRM\n",
+ plane->state->fb->pixel_format);
+ return -EINVAL;
+ }
+
+ if (yuv_format)
+ edrm_plane_set_yuv_plane(plane, master_kms, lm_off);
+ else
+ edrm_plane_set_rgb_plane(plane, master_kms, lm_off);
+
+ img_size = (plane->state->fb->height << 16) | plane->state->fb->width;
+ src_xy = (plane->state->src_x << 16) | plane->state->src_y;
+ dst_xy = (plane->state->crtc_x << 16) | plane->state->crtc_y;
+
+ writel_relaxed(img_size, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_SRC_SIZE);
+ writel_relaxed(src_xy, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_SRC_XY);
+ writel_relaxed(img_size, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_OUT_SIZE);
+ writel_relaxed(dst_xy, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_OUT_XY);
+
+ return 0;
+}
+
+void edrm_plane_destroy(struct drm_plane *plane)
+{
+ struct edrm_plane *edrm_plane = to_edrm_plane(plane);
+
+ drm_plane_helper_disable(plane);
+ drm_plane_cleanup(plane);
+ kfree(edrm_plane);
+}
+
+int edrm_plane_flush(struct drm_plane *plane)
+{
+ struct edrm_plane *edrm_plane = to_edrm_plane(plane);
+ struct edrm_crtc *edrm_crtc = to_edrm_crtc(plane->state->crtc);
+ u32 sspp_flush_mask_bit[10] = {
+ 0, 1, 2, 18, 3, 4, 5, 19, 11, 12};
+
+ edrm_crtc->sspp_flush_mask |=
+ BIT(sspp_flush_mask_bit[edrm_plane->sspp_cfg_id - 1]);
+ return 0;
+}
+
+static int edrm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ /* TODO: check plane setting */
+ return 0;
+}
+
+static void edrm_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (!plane->state->crtc) {
+ pr_err("state crtc is null, skip pipe programming\n");
+ return;
+ }
+ if (!plane->state->fb) {
+ pr_err("state fb is null, skip pipe programming\n");
+ return;
+ }
+
+ if (edrm_plane_modeset(plane))
+ pr_err("Plane modeset failed\n");
+}
+
+/* Plane disable should setup the sspp to show a transparent frame
+ * If the pipe still attached with a buffer pointer, the buffer could
+ * be released and cause SMMU fault. We don't want to change CTL and
+ * LM during eDRM closing because main DRM could be updating CTL and
+ * LM at any moment. In eDRM lastclose(), it will notify main DRM to
+ * release eDRM display resouse. The next main DRM commit will clear
+ * the stage setup by eDRM
+ */
+static void edrm_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_device *dev = plane->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ struct msm_edrm_kms *edrm_kms;
+ struct msm_drm_private *master_priv;
+ struct sde_kms *master_kms;
+ struct msm_edrm_display *display;
+ struct edrm_plane *edrm_plane;
+ u32 img_size, stride, lm_off;
+
+ edrm_kms = to_edrm_kms(kms);
+ master_priv = edrm_kms->master_dev->dev_private;
+ master_kms = to_sde_kms(master_priv->kms);
+ dev = edrm_kms->dev;
+ priv = dev->dev_private;
+
+ edrm_plane = to_edrm_plane(plane);
+ display = &edrm_kms->display[edrm_plane->display_id];
+ lm_off = display->lm_off;
+
+ /* setup SSPP */
+ img_size = (display->mode.vdisplay << 16) | display->mode.hdisplay;
+ stride = display->mode.hdisplay * 4;
+ writel_relaxed(img_size, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_SRC_SIZE);
+ writel_relaxed(0, master_kms->mmio + edrm_plane->sspp_offset
+ + SSPP_SRC_XY);
+ writel_relaxed(img_size, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_OUT_SIZE);
+ writel_relaxed(0, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_OUT_XY);
+ writel_relaxed(stride, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_SRC_YSTRIDE0);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C0_LR);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C0_TB);
+ writel_relaxed(img_size, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIXEL_EXT_C0_REQ);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C1C2_LR);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C1C2_TB);
+ writel_relaxed(img_size, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIXEL_EXT_C1C2_REQ);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C3_LR);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIX_EXT_C3_TB);
+ writel_relaxed(img_size, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_SW_PIXEL_EXT_C3_REQ);
+
+ /* RGB format */
+ writel_relaxed(SSPP_SOLID_FILL_FORMAT, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_FORMAT);
+ writel_relaxed(SSPP_ARGB_PATTERN, master_kms->mmio +
+ edrm_plane->sspp_offset + SSPP_SRC_UNPACK_PATTERN);
+ /* do a solid fill of transparent color */
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ SSPP_CONSTANT_COLOR);
+ writel_relaxed(LAYER_OP_ENABLE_ALPHA_BLEND, master_kms->mmio + lm_off
+ + LAYER_BLEND5_OP);
+
+ /* disable CSC */
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset +
+ PIPE_OP_MODE);
+ writel_relaxed(0x0, master_kms->mmio + edrm_plane->sspp_offset
+ + PIPE_VP_0_QSEED2_CONFIG);
+}
+
+static int edrm_plane_prepare_fb(struct drm_plane *plane,
+ const struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb;
+ struct edrm_plane *edrm_plane;
+
+ if (!plane || !new_state)
+ return -EINVAL;
+
+ if (!new_state->fb)
+ return 0;
+ edrm_plane = to_edrm_plane(plane);
+ fb = new_state->fb;
+ return msm_framebuffer_prepare(fb, edrm_plane->aspace);
+}
+
+static void edrm_plane_cleanup_fb(struct drm_plane *plane,
+ const struct drm_plane_state *old_state)
+{
+ struct drm_framebuffer *fb = old_state ? old_state->fb : NULL;
+ struct edrm_plane *edrm_plane = plane ? to_edrm_plane(plane) : NULL;
+
+ if (!fb || !plane)
+ return;
+
+ msm_framebuffer_cleanup(fb, edrm_plane->aspace);
+}
+
+static const struct drm_plane_funcs edrm_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = edrm_plane_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+static const struct drm_plane_helper_funcs edrm_plane_helper_funcs = {
+ .prepare_fb = edrm_plane_prepare_fb,
+ .cleanup_fb = edrm_plane_cleanup_fb,
+ .atomic_check = edrm_plane_atomic_check,
+ .atomic_update = edrm_plane_atomic_update,
+ .atomic_disable = edrm_plane_atomic_disable,
+};
+
+struct drm_plane *edrm_plane_init(struct drm_device *dev, int pipe,
+ u32 pipe_type)
+{
+ struct msm_drm_private *priv;
+ struct msm_edrm_kms *edrm_kms;
+ struct edrm_plane *edrm_plane;
+ struct drm_plane *plane;
+ int ret;
+
+ edrm_plane = kzalloc(sizeof(*edrm_plane), GFP_KERNEL);
+ if (!edrm_plane) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ plane = &edrm_plane->base;
+ if (pipe_type == SSPP_TYPE_VIG)
+ ret = drm_universal_plane_init(dev, plane, 0,
+ &edrm_plane_funcs,
+ edrm_plane_formats_YUV,
+ ARRAY_SIZE(edrm_plane_formats_YUV),
+ DRM_PLANE_TYPE_PRIMARY);
+ else
+ ret = drm_universal_plane_init(dev, plane, 0,
+ &edrm_plane_funcs,
+ edrm_plane_formats_RGB,
+ ARRAY_SIZE(edrm_plane_formats_RGB),
+ DRM_PLANE_TYPE_PRIMARY);
+ if (ret)
+ goto fail;
+
+ drm_plane_helper_add(plane, &edrm_plane_helper_funcs);
+
+ priv = dev->dev_private;
+ edrm_kms = to_edrm_kms(priv->kms);
+
+ edrm_plane->pipe = pipe;
+ edrm_plane->aspace = edrm_kms->aspace;
+
+ return plane;
+fail:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/ekms/edrm_plane.h b/drivers/gpu/drm/msm/ekms/edrm_plane.h
new file mode 100644
index 000000000000..3136bb86d124
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_plane.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _EDRM_PLANE_H_
+#define _EDRM_PLANE_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "edrm_kms.h"
+
+struct edrm_plane {
+ struct drm_plane base;
+ struct msm_gem_address_space *aspace;
+ int pipe;
+ int display_id;
+ u32 sspp_offset;
+ u32 sspp_cfg_id;
+ u32 lm_stage;
+ u32 sspp_type;
+};
+
+int edrm_plane_flush(struct drm_plane *plane);
+
+struct drm_plane *edrm_plane_init(struct drm_device *dev, int id, u32 type);
+
+void edrm_plane_destroy(struct drm_plane *plane);
+
+#define to_edrm_plane(x) container_of(x, struct edrm_plane, base)
+
+#endif /* _EDRM_ENCODER_H_ */
diff --git a/drivers/gpu/drm/msm/ekms/edrm_splash.c b/drivers/gpu/drm/msm/ekms/edrm_splash.c
new file mode 100644
index 000000000000..f29f4926e6fb
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_splash.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/of_address.h>
+#include <linux/debugfs.h>
+#include <linux/memblock.h>
+#include <soc/qcom/early_domain.h>
+#include "msm_drv.h"
+#include "sde_kms.h"
+#include "edrm_kms.h"
+#include "sde_splash.h"
+#include "edrm_splash.h"
+
+/* scratch registers */
+#define SCRATCH_REGISTER_0 0x014
+#define SCRATCH_REGISTER_1 0x018
+#define SCRATCH_REGISTER_2 0x01C
+#define SCRATCH_REGISTER_3 0x020
+
+#define SDE_RUNNING_VALUE 0xC001CAFE
+#define SDE_LK_STOP_VALUE 0xDEADDEAD
+#define SDE_EXIT_VALUE 0xDEADBEEF
+#define SDE_LK_IMMEDIATE_STOP_VALUE 0xFEFEFEFE
+
+/*
+ * Below function will indicate early display exited or not started.
+ */
+int edrm_splash_get_lk_status(struct msm_kms *kms)
+{
+ if (get_early_service_status(EARLY_DISPLAY))
+ return SPLASH_STATUS_RUNNING;
+ else
+ return SPLASH_STATUS_NOT_START;
+}
+
+
+/*
+ * Below function will indicate early display started.
+ */
+void edrm_display_acquire(struct msm_kms *kms)
+{
+ struct msm_edrm_kms *edrm_kms = to_edrm_kms(kms);
+ struct sde_kms *master_kms;
+ struct sde_splash_info *master_sinfo;
+ struct msm_drm_private *master_priv =
+ edrm_kms->master_dev->dev_private;
+
+ master_kms = to_sde_kms(master_priv->kms);
+ master_sinfo = &master_kms->splash_info;
+ master_sinfo->early_display_enabled = true;
+}
+
+/*
+ * Below function will indicate early display exited or not started.
+ */
+void edrm_display_release(struct msm_kms *kms)
+{
+ struct msm_edrm_kms *edrm_kms = to_edrm_kms(kms);
+ struct sde_kms *master_kms;
+ struct sde_splash_info *master_sinfo;
+ struct msm_drm_private *master_priv =
+ edrm_kms->master_dev->dev_private;
+
+ master_kms = to_sde_kms(master_priv->kms);
+ master_sinfo = &master_kms->splash_info;
+ master_sinfo->early_display_enabled = false;
+}
diff --git a/drivers/gpu/drm/msm/ekms/edrm_splash.h b/drivers/gpu/drm/msm/ekms/edrm_splash.h
new file mode 100644
index 000000000000..658394dbddea
--- /dev/null
+++ b/drivers/gpu/drm/msm/ekms/edrm_splash.h
@@ -0,0 +1,44 @@
+/**
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef EDRM_SPLASH_H_
+#define EDRM_SPLASH_H_
+
+#define SPLASH_STATUS_NOT_START 0
+#define SPLASH_STATUS_RUNNING 1
+#define SPLASH_STATUS_STOP 2
+
+/* APIs for early splash handoff functions */
+
+/**
+ * edrm_splash_get_lk_status
+ *
+ * Get early display status to set the status flag.
+ */
+int edrm_splash_get_lk_status(struct msm_kms *kms);
+
+/**
+ * edrm_display_acquire
+ *
+ * Update main DRM that eDRM is active and eDRM display resource is being used.
+ */
+void edrm_display_acquire(struct msm_kms *kms);
+
+/**
+ * edrm_splash_get_lk_status
+ *
+ * Update main DRM that eDRM is active and eDRM display resource no longer
+ * being use. Main DRM can claim back the resource anytime.
+ */
+void edrm_display_release(struct msm_kms *kms);
+
+#endif
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
new file mode 100644
index 000000000000..d755ba959c20
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.c
@@ -0,0 +1,3426 @@
+/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "sde-hdmi:[%s] " fmt, __func__
+
+#include <linux/list.h>
+#include <linux/of.h>
+#include <linux/gpio.h>
+#include <linux/of_irq.h>
+#include <linux/of_platform.h>
+#include <linux/irqdomain.h>
+
+#include "sde_kms.h"
+#include "sde_connector.h"
+#include "msm_drv.h"
+#include "sde_hdmi.h"
+#include "sde_hdmi_regs.h"
+#include "hdmi.h"
+
+static DEFINE_MUTEX(sde_hdmi_list_lock);
+static LIST_HEAD(sde_hdmi_list);
+
+/* HDMI SCDC register offsets */
+#define HDMI_SCDC_UPDATE_0 0x10
+#define HDMI_SCDC_UPDATE_1 0x11
+#define HDMI_SCDC_TMDS_CONFIG 0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
+#define HDMI_SCDC_CONFIG_0 0x30
+#define HDMI_SCDC_STATUS_FLAGS_0 0x40
+#define HDMI_SCDC_STATUS_FLAGS_1 0x41
+#define HDMI_SCDC_ERR_DET_0_L 0x50
+#define HDMI_SCDC_ERR_DET_0_H 0x51
+#define HDMI_SCDC_ERR_DET_1_L 0x52
+#define HDMI_SCDC_ERR_DET_1_H 0x53
+#define HDMI_SCDC_ERR_DET_2_L 0x54
+#define HDMI_SCDC_ERR_DET_2_H 0x55
+#define HDMI_SCDC_ERR_DET_CHECKSUM 0x56
+
+#define HDMI_DISPLAY_MAX_WIDTH 4096
+#define HDMI_DISPLAY_MAX_HEIGHT 2160
+
+static const struct of_device_id sde_hdmi_dt_match[] = {
+ {.compatible = "qcom,hdmi-display"},
+ {}
+};
+
+static ssize_t _sde_hdmi_debugfs_dump_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char *buf;
+ u32 len = 0;
+
+ if (!display)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(SZ_1K, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += snprintf(buf, SZ_1K, "name = %s\n", display->name);
+
+ if (len > count) {
+ kfree(buf);
+ return -ENOMEM;
+ }
+
+ if (copy_to_user(buff, buf, len)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+
+ kfree(buf);
+ return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_edid_modes_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char *buf;
+ u32 len = 0;
+ struct drm_connector *connector;
+ u32 mode_count = 0;
+ struct drm_display_mode *mode;
+
+ if (!display)
+ return -ENODEV;
+
+ if (!display->ctrl.ctrl ||
+ !display->ctrl.ctrl->connector) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ display);
+ return -ENOMEM;
+ }
+
+ if (*ppos)
+ return 0;
+
+ connector = display->ctrl.ctrl->connector;
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ mode_count++;
+ }
+
+ /* Adding one more to store title */
+ mode_count++;
+
+ buf = kzalloc((mode_count * sizeof(*mode)), GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ "name refresh (Hz) hdisp hss hse htot vdisp");
+
+ len += snprintf(buf + len, PAGE_SIZE - len,
+ " vss vse vtot flags\n");
+
+ list_for_each_entry(mode, &connector->modes, head) {
+ len += snprintf(buf + len, SZ_4K - len,
+ "%s %d %d %d %d %d %d %d %d %d 0x%x\n",
+ mode->name, mode->vrefresh, mode->hdisplay,
+ mode->hsync_start, mode->hsync_end, mode->htotal,
+ mode->vdisplay, mode->vsync_start, mode->vsync_end,
+ mode->vtotal, mode->flags);
+ }
+
+ if (copy_to_user(buff, buf, len)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+
+ kfree(buf);
+ return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_edid_vsdb_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[200];
+ u32 len = 0;
+ struct drm_connector *connector;
+
+ if (!display)
+ return -ENODEV;
+
+ if (!display->ctrl.ctrl ||
+ !display->ctrl.ctrl->connector) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ display);
+ return -ENOMEM;
+ }
+
+ SDE_HDMI_DEBUG("%s +", __func__);
+ if (*ppos)
+ return 0;
+
+ connector = display->ctrl.ctrl->connector;
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "max_tmds_clock = %d\n",
+ connector->max_tmds_clock);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "latency_present %d %d\n",
+ connector->latency_present[0],
+ connector->latency_present[1]);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "video_latency %d %d\n",
+ connector->video_latency[0],
+ connector->video_latency[1]);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "audio_latency %d %d\n",
+ connector->audio_latency[0],
+ connector->audio_latency[1]);
+ len += snprintf(buf + len, sizeof(buf) - len,
+ "dvi_dual %d\n",
+ (int)connector->dvi_dual);
+
+ if (copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ SDE_HDMI_DEBUG("%s - ", __func__);
+ return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_edid_hdr_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[200];
+ u32 len = 0;
+ struct drm_connector *connector;
+
+ if (!display)
+ return -ENODEV;
+
+ if (!display->ctrl.ctrl ||
+ !display->ctrl.ctrl->connector) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ display);
+ return -ENOMEM;
+ }
+
+ SDE_HDMI_DEBUG("%s +", __func__);
+ if (*ppos)
+ return 0;
+
+ connector = display->ctrl.ctrl->connector;
+ len += snprintf(buf, sizeof(buf), "hdr_eotf = %d\n"
+ "hdr_metadata_type_one %d\n"
+ "hdr_max_luminance %d\n"
+ "hdr_avg_luminance %d\n"
+ "hdr_min_luminance %d\n"
+ "hdr_supported %d\n",
+ connector->hdr_eotf,
+ connector->hdr_metadata_type_one,
+ connector->hdr_max_luminance,
+ connector->hdr_avg_luminance,
+ connector->hdr_min_luminance,
+ (int)connector->hdr_supported);
+
+ if (copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ SDE_HDMI_DEBUG("%s - ", __func__);
+ return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_edid_hfvsdb_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[200];
+ u32 len = 0;
+ struct drm_connector *connector;
+
+ if (!display)
+ return -ENODEV;
+
+ if (!display->ctrl.ctrl ||
+ !display->ctrl.ctrl->connector) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ display);
+ return -ENOMEM;
+ }
+
+ SDE_HDMI_DEBUG("%s +", __func__);
+ if (*ppos)
+ return 0;
+
+ connector = display->ctrl.ctrl->connector;
+ len += snprintf(buf, PAGE_SIZE - len, "max_tmds_char = %d\n"
+ "scdc_present %d\n"
+ "rr_capable %d\n"
+ "supports_scramble %d\n"
+ "flags_3d %d\n",
+ connector->max_tmds_char,
+ (int)connector->scdc_present,
+ (int)connector->rr_capable,
+ (int)connector->supports_scramble,
+ connector->flags_3d);
+
+ if (copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_edid_vcdb_info_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[100];
+ u32 len = 0;
+ struct drm_connector *connector;
+
+ if (!display)
+ return -ENODEV;
+
+ if (!display->ctrl.ctrl ||
+ !display->ctrl.ctrl->connector) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ display);
+ return -ENOMEM;
+ }
+
+ SDE_HDMI_DEBUG("%s +", __func__);
+ if (*ppos)
+ return 0;
+
+ connector = display->ctrl.ctrl->connector;
+ len += snprintf(buf, PAGE_SIZE - len, "pt_scan_info = %d\n"
+ "it_scan_info = %d\n"
+ "ce_scan_info = %d\n",
+ (int)connector->pt_scan_info,
+ (int)connector->it_scan_info,
+ (int)connector->ce_scan_info);
+
+ if (copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ SDE_HDMI_DEBUG("%s - ", __func__);
+ return len;
+}
+
+static ssize_t _sde_hdmi_edid_vendor_name_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[100];
+ u32 len = 0;
+ struct drm_connector *connector;
+
+ if (!display)
+ return -ENODEV;
+
+ if (!display->ctrl.ctrl ||
+ !display->ctrl.ctrl->connector) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ display);
+ return -ENOMEM;
+ }
+
+ SDE_HDMI_DEBUG("%s +", __func__);
+ if (*ppos)
+ return 0;
+
+ connector = display->ctrl.ctrl->connector;
+ len += snprintf(buf, PAGE_SIZE - len, "Vendor ID is %s\n",
+ display->edid_ctrl->vendor_id);
+
+ if (copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ SDE_HDMI_DEBUG("%s - ", __func__);
+ return len;
+}
+
+static ssize_t _sde_hdmi_src_hdcp14_support_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[SZ_128];
+ u32 len = 0;
+
+ if (!display)
+ return -ENODEV;
+
+ if (!display->ctrl.ctrl) {
+ SDE_ERROR("hdmi is NULL\n");
+ return -ENOMEM;
+ }
+
+ SDE_HDMI_DEBUG("%s +", __func__);
+ if (*ppos)
+ return 0;
+
+ if (display->hdcp14_present)
+ len += snprintf(buf, SZ_128 - len, "true\n");
+ else
+ len += snprintf(buf, SZ_128 - len, "false\n");
+
+ if (copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ SDE_HDMI_DEBUG("%s - ", __func__);
+ return len;
+}
+
+static ssize_t _sde_hdmi_src_hdcp22_support_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[SZ_128];
+ u32 len = 0;
+
+ if (!display)
+ return -ENODEV;
+
+ if (!display->ctrl.ctrl) {
+ SDE_ERROR("hdmi is NULL\n");
+ return -ENOMEM;
+ }
+
+ SDE_HDMI_DEBUG("%s +", __func__);
+ if (*ppos)
+ return 0;
+
+ if (display->src_hdcp22_support)
+ len += snprintf(buf, SZ_128 - len, "true\n");
+ else
+ len += snprintf(buf, SZ_128 - len, "false\n");
+
+ if (copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ SDE_HDMI_DEBUG("%s - ", __func__);
+ return len;
+}
+
+static ssize_t _sde_hdmi_sink_hdcp22_support_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[SZ_128];
+ u32 len = 0;
+
+ if (!display)
+ return -ENODEV;
+
+ if (!display->ctrl.ctrl) {
+ SDE_ERROR("hdmi is NULL\n");
+ return -ENOMEM;
+ }
+
+ SDE_HDMI_DEBUG("%s +", __func__);
+ if (*ppos)
+ return 0;
+
+ if (display->sink_hdcp22_support)
+ len += snprintf(buf, SZ_128 - len, "true\n");
+ else
+ len += snprintf(buf, SZ_128 - len, "false\n");
+
+ if (copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ SDE_HDMI_DEBUG("%s - ", __func__);
+ return len;
+}
+
+static ssize_t _sde_hdmi_hdcp_state_read(struct file *file,
+ char __user *buff,
+ size_t count,
+ loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[SZ_128];
+ u32 len = 0;
+
+ if (!display)
+ return -ENODEV;
+
+ SDE_HDMI_DEBUG("%s +", __func__);
+ if (*ppos)
+ return 0;
+
+ len += snprintf(buf, SZ_128 - len, "HDCP state : %s\n",
+ sde_hdcp_state_name(display->hdcp_status));
+
+ if (copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len;
+ SDE_HDMI_DEBUG("%s - ", __func__);
+ return len;
+}
+
+static const struct file_operations dump_info_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_dump_info_read,
+};
+
+static const struct file_operations edid_modes_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_edid_modes_read,
+};
+
+static const struct file_operations edid_vsdb_info_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_edid_vsdb_info_read,
+};
+
+static const struct file_operations edid_hdr_info_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_edid_hdr_info_read,
+};
+
+static const struct file_operations edid_hfvsdb_info_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_edid_hfvsdb_info_read,
+};
+
+static const struct file_operations edid_vcdb_info_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_edid_vcdb_info_read,
+};
+
+static const struct file_operations edid_vendor_name_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_edid_vendor_name_read,
+};
+
+static const struct file_operations hdcp_src_14_support_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_src_hdcp14_support_read,
+};
+
+static const struct file_operations hdcp_src_22_support_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_src_hdcp22_support_read,
+};
+
+static const struct file_operations hdcp_sink_22_support_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_sink_hdcp22_support_read,
+};
+
+static const struct file_operations sde_hdmi_hdcp_state_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_hdcp_state_read,
+};
+
+static u64 _sde_hdmi_clip_valid_pclk(struct hdmi *hdmi, u64 pclk_in)
+{
+ u32 pclk_delta, pclk;
+ u64 pclk_clip = pclk_in;
+
+ /* as per standard, 0.5% of deviation is allowed */
+ pclk = hdmi->pixclock;
+ pclk_delta = pclk * 5 / 1000;
+
+ if (pclk_in < (pclk - pclk_delta))
+ pclk_clip = pclk - pclk_delta;
+ else if (pclk_in > (pclk + pclk_delta))
+ pclk_clip = pclk + pclk_delta;
+
+ if (pclk_in != pclk_clip)
+ pr_warn("clip pclk from %lld to %lld\n", pclk_in, pclk_clip);
+
+ return pclk_clip;
+}
+
+static void sde_hdmi_tx_hdcp_cb(void *ptr, enum sde_hdcp_states status)
+{
+ struct sde_hdmi *hdmi_ctrl = (struct sde_hdmi *)ptr;
+ struct hdmi *hdmi;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ hdmi = hdmi_ctrl->ctrl.ctrl;
+ hdmi_ctrl->hdcp_status = status;
+ queue_delayed_work(hdmi->workq, &hdmi_ctrl->hdcp_cb_work, HZ/4);
+}
+
+static void sde_hdmi_tx_set_avmute(void *ptr)
+{
+ struct sde_hdmi *hdmi_ctrl = (struct sde_hdmi *)ptr;
+ struct hdmi *hdmi;
+
+ if (!hdmi_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ hdmi = hdmi_ctrl->ctrl.ctrl;
+
+ /*
+ * When we try to continuously re-auth there
+ * is no need to enforce avmute for clear
+ * content. Hence check the current encryption level
+ * before enforcing avmute on authentication failure
+ */
+ if (sde_hdmi_tx_is_encryption_set(hdmi_ctrl))
+ sde_hdmi_config_avmute(hdmi, true);
+}
+
+void sde_hdmi_hdcp_off(struct sde_hdmi *hdmi_ctrl)
+{
+
+ if (!hdmi_ctrl) {
+ SDE_ERROR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ if (hdmi_ctrl->hdcp_ops)
+ hdmi_ctrl->hdcp_ops->off(hdmi_ctrl->hdcp_data);
+
+ flush_delayed_work(&hdmi_ctrl->hdcp_cb_work);
+
+ hdmi_ctrl->hdcp_ops = NULL;
+}
+
+static void sde_hdmi_tx_hdcp_cb_work(struct work_struct *work)
+{
+ struct sde_hdmi *hdmi_ctrl = NULL;
+ struct delayed_work *dw = to_delayed_work(work);
+ int rc = 0;
+ struct hdmi *hdmi;
+
+ hdmi_ctrl = container_of(dw, struct sde_hdmi, hdcp_cb_work);
+ if (!hdmi_ctrl) {
+ DEV_DBG("%s: invalid input\n", __func__);
+ return;
+ }
+
+ hdmi = hdmi_ctrl->ctrl.ctrl;
+
+ switch (hdmi_ctrl->hdcp_status) {
+ case HDCP_STATE_AUTHENTICATED:
+ hdmi_ctrl->auth_state = true;
+
+ if (sde_hdmi_tx_is_panel_on(hdmi_ctrl) &&
+ sde_hdmi_tx_is_stream_shareable(hdmi_ctrl)) {
+ rc = sde_hdmi_config_avmute(hdmi, false);
+ }
+
+ if (hdmi_ctrl->hdcp1_use_sw_keys &&
+ hdmi_ctrl->hdcp14_present) {
+ if (!hdmi_ctrl->hdcp22_present)
+ hdcp1_set_enc(true);
+ }
+ break;
+ case HDCP_STATE_AUTH_FAIL:
+ if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present) {
+ if (hdmi_ctrl->auth_state && !hdmi_ctrl->hdcp22_present)
+ hdcp1_set_enc(false);
+ }
+
+ hdmi_ctrl->auth_state = false;
+
+ if (sde_hdmi_tx_is_panel_on(hdmi_ctrl)) {
+ pr_debug("%s: Reauthenticating\n", __func__);
+ if (hdmi_ctrl->hdcp_ops && hdmi_ctrl->hdcp_data) {
+ rc = hdmi_ctrl->hdcp_ops->reauthenticate(
+ hdmi_ctrl->hdcp_data);
+ if (rc)
+ pr_err("%s: HDCP reauth failed. rc=%d\n",
+ __func__, rc);
+ } else
+ pr_err("%s: NULL HDCP Ops and Data\n",
+ __func__);
+ } else {
+ pr_debug("%s: Not reauthenticating. Cable not conn\n",
+ __func__);
+ }
+
+ break;
+ case HDCP_STATE_AUTH_FAIL_NOREAUTH:
+ if (hdmi_ctrl->hdcp1_use_sw_keys && hdmi_ctrl->hdcp14_present) {
+ if (hdmi_ctrl->auth_state && !hdmi_ctrl->hdcp22_present)
+ hdcp1_set_enc(false);
+ }
+
+ hdmi_ctrl->auth_state = false;
+
+ break;
+ case HDCP_STATE_AUTH_ENC_NONE:
+ hdmi_ctrl->enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
+ if (sde_hdmi_tx_is_panel_on(hdmi_ctrl))
+ rc = sde_hdmi_config_avmute(hdmi, false);
+ break;
+ case HDCP_STATE_AUTH_ENC_1X:
+ case HDCP_STATE_AUTH_ENC_2P2:
+ hdmi_ctrl->enc_lvl = hdmi_ctrl->hdcp_status;
+
+ if (sde_hdmi_tx_is_panel_on(hdmi_ctrl) &&
+ sde_hdmi_tx_is_stream_shareable(hdmi_ctrl)) {
+ rc = sde_hdmi_config_avmute(hdmi, false);
+ } else {
+ rc = sde_hdmi_config_avmute(hdmi, true);
+ }
+ break;
+ default:
+ break;
+ /* do nothing */
+ }
+}
+
+/**
+ * _sde_hdmi_update_pll_delta() - Update the HDMI pixel clock as per input ppm
+ *
+ * @ppm: ppm is parts per million multiplied by 1000.
+ * return: 0 on success, non-zero in case of failure.
+ *
+ * The input ppm will be clipped if it's more than or less than 5% of the TMDS
+ * clock rate defined by HDMI spec.
+ */
+static int _sde_hdmi_update_pll_delta(struct sde_hdmi *display, s32 ppm)
+{
+ struct hdmi *hdmi = display->ctrl.ctrl;
+ u64 cur_pclk, dst_pclk;
+ u64 clip_pclk;
+ int rc = 0;
+
+ mutex_lock(&display->display_lock);
+
+ if (!hdmi->power_on || !display->connected) {
+ SDE_ERROR("HDMI display is not ready\n");
+ mutex_unlock(&display->display_lock);
+ return -EINVAL;
+ }
+
+ if (!display->pll_update_enable) {
+ SDE_ERROR("PLL update function is not enabled\n");
+ mutex_unlock(&display->display_lock);
+ return -EINVAL;
+ }
+
+ /* get current pclk */
+ cur_pclk = hdmi->actual_pixclock;
+ /* get desired pclk */
+ dst_pclk = cur_pclk * (1000000000 + ppm);
+ do_div(dst_pclk, 1000000000);
+
+ clip_pclk = _sde_hdmi_clip_valid_pclk(hdmi, dst_pclk);
+
+ /* update pclk */
+ if (clip_pclk != cur_pclk) {
+ SDE_DEBUG("PCLK changes from %llu to %llu when delta is %d\n",
+ cur_pclk, clip_pclk, ppm);
+
+ rc = clk_set_rate(hdmi->pwr_clks[0], clip_pclk);
+ if (rc < 0) {
+ SDE_ERROR("HDMI PLL update failed\n");
+ mutex_unlock(&display->display_lock);
+ return rc;
+ }
+
+ hdmi->actual_pixclock = clip_pclk;
+ }
+
+ mutex_unlock(&display->display_lock);
+
+ return rc;
+}
+
+static ssize_t _sde_hdmi_debugfs_pll_delta_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[10];
+ int ppm = 0;
+
+ if (!display)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 0, &ppm))
+ return -EFAULT;
+
+ if (ppm)
+ _sde_hdmi_update_pll_delta(display, ppm);
+
+ return count;
+}
+
+static const struct file_operations pll_delta_fops = {
+ .open = simple_open,
+ .write = _sde_hdmi_debugfs_pll_delta_write,
+};
+
+/**
+ * _sde_hdmi_enable_pll_update() - Enable the HDMI PLL update function
+ *
+ * @enable: non-zero to enable PLL update function, 0 to disable.
+ * return: 0 on success, non-zero in case of failure.
+ *
+ */
+static int _sde_hdmi_enable_pll_update(struct sde_hdmi *display, s32 enable)
+{
+ struct hdmi *hdmi = display->ctrl.ctrl;
+ int rc = 0;
+
+ mutex_lock(&display->display_lock);
+
+ if (!hdmi->power_on || !display->connected) {
+ SDE_ERROR("HDMI display is not ready\n");
+ mutex_unlock(&display->display_lock);
+ return -EINVAL;
+ }
+
+ if (!enable && hdmi->actual_pixclock != hdmi->pixclock) {
+ /* reset pixel clock when disable */
+ rc = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
+ if (rc < 0) {
+ SDE_ERROR("reset clock rate failed\n");
+ mutex_unlock(&display->display_lock);
+ return rc;
+ }
+ }
+ hdmi->actual_pixclock = hdmi->pixclock;
+
+ display->pll_update_enable = !!enable;
+
+ mutex_unlock(&display->display_lock);
+
+ SDE_DEBUG("HDMI PLL update: %s\n",
+ display->pll_update_enable ? "enable" : "disable");
+
+ return rc;
+}
+
+static ssize_t _sde_hdmi_debugfs_pll_enable_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char *buf;
+ u32 len = 0;
+
+ if (!display)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0;
+
+ buf = kzalloc(SZ_1K, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ len += snprintf(buf, SZ_4K, "%s\n",
+ display->pll_update_enable ? "enable" : "disable");
+
+ if (copy_to_user(buff, buf, len)) {
+ kfree(buf);
+ return -EFAULT;
+ }
+
+ *ppos += len;
+
+ kfree(buf);
+ return len;
+}
+
+static ssize_t _sde_hdmi_debugfs_pll_enable_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_hdmi *display = file->private_data;
+ char buf[10];
+ int enable = 0;
+
+ if (!display)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 0, &enable))
+ return -EFAULT;
+
+ _sde_hdmi_enable_pll_update(display, enable);
+
+ return count;
+}
+
+static const struct file_operations pll_enable_fops = {
+ .open = simple_open,
+ .read = _sde_hdmi_debugfs_pll_enable_read,
+ .write = _sde_hdmi_debugfs_pll_enable_write,
+};
+
+static int _sde_hdmi_debugfs_init(struct sde_hdmi *display)
+{
+ int rc = 0;
+ struct dentry *dir, *dump_file, *edid_modes;
+ struct dentry *edid_vsdb_info, *edid_hdr_info, *edid_hfvsdb_info;
+ struct dentry *edid_vcdb_info, *edid_vendor_name;
+ struct dentry *src_hdcp14_support, *src_hdcp22_support;
+ struct dentry *sink_hdcp22_support, *hdmi_hdcp_state;
+ struct dentry *pll_delta_file, *pll_enable_file;
+
+ dir = debugfs_create_dir(display->name, NULL);
+ if (!dir) {
+ rc = -ENOMEM;
+ SDE_ERROR("[%s]debugfs create dir failed, rc = %d\n",
+ display->name, rc);
+ goto error;
+ }
+
+ dump_file = debugfs_create_file("dump_info",
+ 0444,
+ dir,
+ display,
+ &dump_info_fops);
+ if (IS_ERR_OR_NULL(dump_file)) {
+ rc = PTR_ERR(dump_file);
+ SDE_ERROR("[%s]debugfs create dump_info file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ pll_delta_file = debugfs_create_file("pll_delta",
+ 0644,
+ dir,
+ display,
+ &pll_delta_fops);
+ if (IS_ERR_OR_NULL(pll_delta_file)) {
+ rc = PTR_ERR(pll_delta_file);
+ SDE_ERROR("[%s]debugfs create pll_delta file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ pll_enable_file = debugfs_create_file("pll_enable",
+ 0644,
+ dir,
+ display,
+ &pll_enable_fops);
+ if (IS_ERR_OR_NULL(pll_enable_file)) {
+ rc = PTR_ERR(pll_enable_file);
+ SDE_ERROR("[%s]debugfs create pll_enable file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ edid_modes = debugfs_create_file("edid_modes",
+ 0444,
+ dir,
+ display,
+ &edid_modes_fops);
+
+ if (IS_ERR_OR_NULL(edid_modes)) {
+ rc = PTR_ERR(edid_modes);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ edid_vsdb_info = debugfs_create_file("edid_vsdb_info",
+ 0444,
+ dir,
+ display,
+ &edid_vsdb_info_fops);
+
+ if (IS_ERR_OR_NULL(edid_vsdb_info)) {
+ rc = PTR_ERR(edid_vsdb_info);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ edid_hdr_info = debugfs_create_file("edid_hdr_info",
+ 0444,
+ dir,
+ display,
+ &edid_hdr_info_fops);
+ if (IS_ERR_OR_NULL(edid_hdr_info)) {
+ rc = PTR_ERR(edid_hdr_info);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ edid_hfvsdb_info = debugfs_create_file("edid_hfvsdb_info",
+ 0444,
+ dir,
+ display,
+ &edid_hfvsdb_info_fops);
+
+ if (IS_ERR_OR_NULL(edid_hfvsdb_info)) {
+ rc = PTR_ERR(edid_hfvsdb_info);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ edid_vcdb_info = debugfs_create_file("edid_vcdb_info",
+ 0444,
+ dir,
+ display,
+ &edid_vcdb_info_fops);
+
+ if (IS_ERR_OR_NULL(edid_vcdb_info)) {
+ rc = PTR_ERR(edid_vcdb_info);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ edid_vendor_name = debugfs_create_file("edid_vendor_name",
+ 0444,
+ dir,
+ display,
+ &edid_vendor_name_fops);
+
+ if (IS_ERR_OR_NULL(edid_vendor_name)) {
+ rc = PTR_ERR(edid_vendor_name);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ src_hdcp14_support = debugfs_create_file("src_hdcp14_support",
+ 0444,
+ dir,
+ display,
+ &hdcp_src_14_support_fops);
+
+ if (IS_ERR_OR_NULL(src_hdcp14_support)) {
+ rc = PTR_ERR(src_hdcp14_support);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ src_hdcp22_support = debugfs_create_file("src_hdcp22_support",
+ 0444,
+ dir,
+ display,
+ &hdcp_src_22_support_fops);
+
+ if (IS_ERR_OR_NULL(src_hdcp22_support)) {
+ rc = PTR_ERR(src_hdcp22_support);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ sink_hdcp22_support = debugfs_create_file("sink_hdcp22_support",
+ 0444,
+ dir,
+ display,
+ &hdcp_sink_22_support_fops);
+
+ if (IS_ERR_OR_NULL(sink_hdcp22_support)) {
+ rc = PTR_ERR(sink_hdcp22_support);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ hdmi_hdcp_state = debugfs_create_file("hdmi_hdcp_state",
+ 0444,
+ dir,
+ display,
+ &sde_hdmi_hdcp_state_fops);
+
+ if (IS_ERR_OR_NULL(hdmi_hdcp_state)) {
+ rc = PTR_ERR(hdmi_hdcp_state);
+ SDE_ERROR("[%s]debugfs create file failed, rc=%d\n",
+ display->name, rc);
+ goto error_remove_dir;
+ }
+
+ display->root = dir;
+ return rc;
+error_remove_dir:
+ debugfs_remove(dir);
+error:
+ return rc;
+}
+
+static void _sde_hdmi_debugfs_deinit(struct sde_hdmi *display)
+{
+ debugfs_remove(display->root);
+}
+
+static void _sde_hdmi_phy_reset(struct hdmi *hdmi)
+{
+ unsigned int val;
+
+ val = hdmi_read(hdmi, REG_HDMI_PHY_CTRL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET);
+
+ if (val & HDMI_PHY_CTRL_SW_RESET_PLL_LOW)
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val | HDMI_PHY_CTRL_SW_RESET_PLL);
+ else
+ hdmi_write(hdmi, REG_HDMI_PHY_CTRL,
+ val & ~HDMI_PHY_CTRL_SW_RESET_PLL);
+}
+
+static int _sde_hdmi_gpio_config(struct hdmi *hdmi, bool on)
+{
+ const struct hdmi_platform_config *config = hdmi->config;
+ int ret;
+
+ if (on) {
+ if (config->ddc_clk_gpio != -1) {
+ ret = gpio_request(config->ddc_clk_gpio,
+ "HDMI_DDC_CLK");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_CLK", config->ddc_clk_gpio,
+ ret);
+ goto error_ddc_clk_gpio;
+ }
+ gpio_set_value_cansleep(config->ddc_clk_gpio, 1);
+ }
+
+ if (config->ddc_data_gpio != -1) {
+ ret = gpio_request(config->ddc_data_gpio,
+ "HDMI_DDC_DATA");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_DDC_DATA", config->ddc_data_gpio,
+ ret);
+ goto error_ddc_data_gpio;
+ }
+ gpio_set_value_cansleep(config->ddc_data_gpio, 1);
+ }
+
+ ret = gpio_request(config->hpd_gpio, "HDMI_HPD");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_HPD", config->hpd_gpio, ret);
+ goto error_hpd_gpio;
+ }
+ gpio_direction_output(config->hpd_gpio, 1);
+ if (config->hpd5v_gpio != -1) {
+ ret = gpio_request(config->hpd5v_gpio, "HDMI_HPD_5V");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_HPD_5V",
+ config->hpd5v_gpio,
+ ret);
+ goto error_hpd5v_gpio;
+ }
+ gpio_set_value_cansleep(config->hpd5v_gpio, 1);
+ }
+
+ if (config->mux_en_gpio != -1) {
+ ret = gpio_request(config->mux_en_gpio, "HDMI_MUX_EN");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_EN", config->mux_en_gpio,
+ ret);
+ goto error_en_gpio;
+ }
+ gpio_set_value_cansleep(config->mux_en_gpio, 1);
+ }
+
+ if (config->mux_sel_gpio != -1) {
+ ret = gpio_request(config->mux_sel_gpio,
+ "HDMI_MUX_SEL");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_SEL", config->mux_sel_gpio,
+ ret);
+ goto error_sel_gpio;
+ }
+ gpio_set_value_cansleep(config->mux_sel_gpio, 0);
+ }
+
+ if (config->mux_lpm_gpio != -1) {
+ ret = gpio_request(config->mux_lpm_gpio,
+ "HDMI_MUX_LPM");
+ if (ret) {
+ SDE_ERROR("'%s'(%d) gpio_request failed: %d\n",
+ "HDMI_MUX_LPM",
+ config->mux_lpm_gpio, ret);
+ goto error_lpm_gpio;
+ }
+ gpio_set_value_cansleep(config->mux_lpm_gpio, 1);
+ }
+ SDE_DEBUG("gpio on");
+ } else {
+ if (config->ddc_clk_gpio != -1)
+ gpio_free(config->ddc_clk_gpio);
+
+ if (config->ddc_data_gpio != -1)
+ gpio_free(config->ddc_data_gpio);
+
+ gpio_free(config->hpd_gpio);
+
+ if (config->hpd5v_gpio != -1)
+ gpio_free(config->hpd5v_gpio);
+
+ if (config->mux_en_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_en_gpio, 0);
+ gpio_free(config->mux_en_gpio);
+ }
+
+ if (config->mux_sel_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_sel_gpio, 1);
+ gpio_free(config->mux_sel_gpio);
+ }
+
+ if (config->mux_lpm_gpio != -1) {
+ gpio_set_value_cansleep(config->mux_lpm_gpio, 0);
+ gpio_free(config->mux_lpm_gpio);
+ }
+ SDE_DEBUG("gpio off");
+ }
+
+ return 0;
+
+error_lpm_gpio:
+ if (config->mux_sel_gpio != -1)
+ gpio_free(config->mux_sel_gpio);
+error_sel_gpio:
+ if (config->mux_en_gpio != -1)
+ gpio_free(config->mux_en_gpio);
+error_en_gpio:
+ gpio_free(config->hpd5v_gpio);
+error_hpd5v_gpio:
+ gpio_free(config->hpd_gpio);
+error_hpd_gpio:
+ if (config->ddc_data_gpio != -1)
+ gpio_free(config->ddc_data_gpio);
+error_ddc_data_gpio:
+ if (config->ddc_clk_gpio != -1)
+ gpio_free(config->ddc_clk_gpio);
+error_ddc_clk_gpio:
+ return ret;
+}
+
+static int _sde_hdmi_hpd_enable(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct device *dev = &hdmi->pdev->dev;
+ uint32_t hpd_ctrl;
+ int i, ret;
+ unsigned long flags;
+ struct drm_connector *connector;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ connector = hdmi->connector;
+ priv = connector->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
+
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->hpd_regs[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto fail;
+ }
+ }
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret) {
+ SDE_ERROR("pinctrl state chg failed: %d\n", ret);
+ goto fail;
+ }
+
+ ret = _sde_hdmi_gpio_config(hdmi, true);
+ if (ret) {
+ SDE_ERROR("failed to configure GPIOs: %d\n", ret);
+ goto fail;
+ }
+
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ if (config->hpd_freq && config->hpd_freq[i]) {
+ ret = clk_set_rate(hdmi->hpd_clks[i],
+ config->hpd_freq[i]);
+ if (ret)
+ pr_warn("failed to set clk %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+
+ ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ goto fail;
+ }
+ }
+
+ if (!sde_hdmi->cont_splash_enabled) {
+ sde_hdmi_set_mode(hdmi, false);
+ _sde_hdmi_phy_reset(hdmi);
+ sde_hdmi_set_mode(hdmi, true);
+ }
+
+ hdmi_write(hdmi, REG_HDMI_USEC_REFTIMER, 0x0001001b);
+
+ /* set timeout to 4.1ms (max) for hardware debounce */
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ hpd_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_CTRL);
+ hpd_ctrl |= HDMI_HPD_CTRL_TIMEOUT(0x1fff);
+
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+
+ /* enable HPD events: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ HDMI_HPD_INT_CTRL_INT_CONNECT |
+ HDMI_HPD_INT_CTRL_INT_EN);
+
+ /* Toggle HPD circuit to trigger HPD sense */
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ ~HDMI_HPD_CTRL_ENABLE & hpd_ctrl);
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL,
+ HDMI_HPD_CTRL_ENABLE | hpd_ctrl);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ if (!sde_hdmi->non_pluggable)
+ hdmi->hpd_off = false;
+ SDE_DEBUG("enabled hdmi hpd\n");
+ return 0;
+
+fail:
+ return ret;
+}
+
+int sde_hdmi_core_enable(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct device *dev = &hdmi->pdev->dev;
+ int i, ret = 0;
+
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->hpd_regs[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ goto err_regulator_enable;
+ }
+ }
+
+ ret = pinctrl_pm_select_default_state(dev);
+ if (ret) {
+ SDE_ERROR("pinctrl state chg failed: %d\n", ret);
+ goto err_pinctrl_state;
+ }
+
+ ret = _sde_hdmi_gpio_config(hdmi, true);
+ if (ret) {
+ SDE_ERROR("failed to configure GPIOs: %d\n", ret);
+ goto err_gpio_config;
+ }
+
+ for (i = 0; i < config->hpd_clk_cnt; i++) {
+ if (config->hpd_freq && config->hpd_freq[i]) {
+ ret = clk_set_rate(hdmi->hpd_clks[i],
+ config->hpd_freq[i]);
+ if (ret)
+ pr_warn("failed to set clk %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ }
+
+ ret = clk_prepare_enable(hdmi->hpd_clks[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable hpd clk: %s (%d)\n",
+ config->hpd_clk_names[i], ret);
+ goto err_clk_prepare_enable;
+ }
+ }
+ sde_hdmi_set_mode(hdmi, true);
+ goto exit;
+
+err_clk_prepare_enable:
+ for (i = 0; i < config->hpd_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->hpd_clks[i]);
+err_gpio_config:
+ _sde_hdmi_gpio_config(hdmi, false);
+err_pinctrl_state:
+ pinctrl_pm_select_sleep_state(dev);
+err_regulator_enable:
+ for (i = 0; i < config->hpd_reg_cnt; i++)
+ regulator_disable(hdmi->hpd_regs[i]);
+exit:
+ return ret;
+}
+
+static void _sde_hdmi_hpd_disable(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct device *dev = &hdmi->pdev->dev;
+ int i, ret = 0;
+ unsigned long flags;
+
+ if (!sde_hdmi->non_pluggable && hdmi->hpd_off) {
+ pr_warn("hdmi display hpd was already disabled\n");
+ return;
+ }
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ /* Disable HPD interrupt */
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL, 0);
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_STATUS, 0);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ sde_hdmi_set_mode(hdmi, false);
+
+ for (i = 0; i < config->hpd_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->hpd_clks[i]);
+
+ ret = _sde_hdmi_gpio_config(hdmi, false);
+ if (ret)
+ pr_warn("failed to unconfigure GPIOs: %d\n", ret);
+
+ ret = pinctrl_pm_select_sleep_state(dev);
+ if (ret)
+ pr_warn("pinctrl state chg failed: %d\n", ret);
+
+ for (i = 0; i < config->hpd_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->hpd_regs[i]);
+ if (ret)
+ pr_warn("failed to disable hpd regulator: %s (%d)\n",
+ config->hpd_reg_names[i], ret);
+ }
+
+ if (!sde_hdmi->non_pluggable)
+ hdmi->hpd_off = true;
+ SDE_DEBUG("disabled hdmi hpd\n");
+}
+
+/**
+ * _sde_hdmi_update_hpd_state() - Update the HDMI HPD clock state
+ *
+ * @state: non-zero to disbale HPD clock, 0 to enable.
+ * return: 0 on success, non-zero in case of failure.
+ *
+ */
+static int
+_sde_hdmi_update_hpd_state(struct sde_hdmi *hdmi_display, u64 state)
+{
+ struct hdmi *hdmi = hdmi_display->ctrl.ctrl;
+ int rc = 0;
+
+ if (hdmi_display->non_pluggable)
+ return 0;
+
+ SDE_DEBUG("changing hdmi hpd state to %llu\n", state);
+
+ if (state == SDE_MODE_HPD_ON) {
+ if (!hdmi->hpd_off)
+ pr_warn("hdmi display hpd was already enabled\n");
+ rc = _sde_hdmi_hpd_enable(hdmi_display);
+ } else
+ _sde_hdmi_hpd_disable(hdmi_display);
+
+ return rc;
+}
+
+void sde_hdmi_core_disable(struct sde_hdmi *sde_hdmi)
+{
+ /* HPD contains all the core clock and pwr */
+ _sde_hdmi_hpd_disable(sde_hdmi);
+}
+
+static void _sde_hdmi_cec_update_phys_addr(struct sde_hdmi *display)
+{
+ struct edid *edid = display->edid_ctrl->edid;
+
+ if (edid)
+ cec_notifier_set_phys_addr_from_edid(display->notifier, edid);
+ else
+ cec_notifier_set_phys_addr(display->notifier,
+ CEC_PHYS_ADDR_INVALID);
+
+}
+
+static void _sde_hdmi_init_ddc(struct sde_hdmi *display, struct hdmi *hdmi)
+{
+ display->ddc_ctrl.io = &display->io[HDMI_TX_CORE_IO];
+ init_completion(&display->ddc_ctrl.rx_status_done);
+}
+
+static void _sde_hdmi_map_regs(struct sde_hdmi *display, struct hdmi *hdmi)
+{
+ display->io[HDMI_TX_CORE_IO].base = hdmi->mmio;
+ display->io[HDMI_TX_CORE_IO].len = hdmi->mmio_len;
+ display->io[HDMI_TX_QFPROM_IO].base = hdmi->qfprom_mmio;
+ display->io[HDMI_TX_QFPROM_IO].len = hdmi->qfprom_mmio_len;
+ display->io[HDMI_TX_HDCP_IO].base = hdmi->hdcp_mmio;
+ display->io[HDMI_TX_HDCP_IO].len = hdmi->hdcp_mmio_len;
+}
+
+static void _sde_hdmi_hotplug_work(struct work_struct *work)
+{
+ struct sde_hdmi *sde_hdmi =
+ container_of(work, struct sde_hdmi, hpd_work);
+ struct drm_connector *connector;
+ struct hdmi *hdmi = NULL;
+ u32 hdmi_ctrl;
+
+ if (!sde_hdmi || !sde_hdmi->ctrl.ctrl ||
+ !sde_hdmi->ctrl.ctrl->connector ||
+ !sde_hdmi->edid_ctrl) {
+ SDE_ERROR("sde_hdmi=%p or hdmi or connector is NULL\n",
+ sde_hdmi);
+ return;
+ }
+ hdmi = sde_hdmi->ctrl.ctrl;
+ connector = sde_hdmi->ctrl.ctrl->connector;
+
+ if (sde_hdmi->connected) {
+ hdmi_ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl | HDMI_CTRL_ENABLE);
+ sde_get_edid(connector, hdmi->i2c,
+ (void **)&sde_hdmi->edid_ctrl);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl);
+ hdmi->hdmi_mode = sde_detect_hdmi_monitor(sde_hdmi->edid_ctrl);
+ } else
+ sde_free_edid((void **)&sde_hdmi->edid_ctrl);
+
+ drm_helper_hpd_irq_event(connector->dev);
+ _sde_hdmi_cec_update_phys_addr(sde_hdmi);
+}
+
+static void _sde_hdmi_connector_irq(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ uint32_t hpd_int_status, hpd_int_ctrl;
+
+ /* Process HPD: */
+ hpd_int_status = hdmi_read(hdmi, REG_HDMI_HPD_INT_STATUS);
+ hpd_int_ctrl = hdmi_read(hdmi, REG_HDMI_HPD_INT_CTRL);
+
+ if ((hpd_int_ctrl & HDMI_HPD_INT_CTRL_INT_EN) &&
+ (hpd_int_status & HDMI_HPD_INT_STATUS_INT)) {
+ sde_hdmi->connected = !!(hpd_int_status &
+ HDMI_HPD_INT_STATUS_CABLE_DETECTED);
+ /* ack & disable (temporarily) HPD events: */
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL,
+ HDMI_HPD_INT_CTRL_INT_ACK);
+
+ SDE_HDMI_DEBUG("status=%04x, ctrl=%04x", hpd_int_status,
+ hpd_int_ctrl);
+
+ /* detect disconnect if we are connected or visa versa: */
+ hpd_int_ctrl = HDMI_HPD_INT_CTRL_INT_EN;
+ if (!sde_hdmi->connected)
+ hpd_int_ctrl |= HDMI_HPD_INT_CTRL_INT_CONNECT;
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, hpd_int_ctrl);
+
+ queue_work(hdmi->workq, &sde_hdmi->hpd_work);
+ }
+}
+
+static void _sde_hdmi_cec_irq(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+ u32 cec_intr = hdmi_read(hdmi, REG_HDMI_CEC_INT);
+
+ /* Routing interrupt to external CEC drivers */
+ if (cec_intr)
+ generic_handle_irq(irq_find_mapping(
+ sde_hdmi->irq_domain, 1));
+}
+
+
+static irqreturn_t _sde_hdmi_irq(int irq, void *dev_id)
+{
+ struct sde_hdmi *display = dev_id;
+ struct hdmi *hdmi;
+
+ if (!display || !display->ctrl.ctrl) {
+ SDE_ERROR("sde_hdmi=%pK or hdmi is NULL\n", display);
+ return IRQ_NONE;
+ }
+
+ hdmi = display->ctrl.ctrl;
+ /* Process HPD: */
+ _sde_hdmi_connector_irq(display);
+
+ /* Process Scrambling ISR */
+ sde_hdmi_ddc_scrambling_isr((void *)display);
+
+ /* Process DDC2 */
+ sde_hdmi_ddc_hdcp2p2_isr((void *)display);
+
+ /* Process DDC: */
+ hdmi_i2c_irq(hdmi->i2c);
+
+ /* Process HDCP: */
+ if (display->hdcp_ops && display->hdcp_data) {
+ if (display->hdcp_ops->isr) {
+ if (display->hdcp_ops->isr(
+ display->hdcp_data))
+ DEV_ERR("%s: hdcp_1x_isr failed\n",
+ __func__);
+ }
+ }
+
+ /* Process CEC: */
+ _sde_hdmi_cec_irq(display);
+
+ return IRQ_HANDLED;
+}
+
+static int _sde_hdmi_audio_info_setup(struct platform_device *pdev,
+ struct msm_ext_disp_audio_setup_params *params)
+{
+ int rc = -EPERM;
+ struct sde_hdmi *display = NULL;
+ struct hdmi *hdmi = NULL;
+
+ display = platform_get_drvdata(pdev);
+
+ if (!display || !params) {
+ SDE_ERROR("invalid param(s), display %pK, params %pK\n",
+ display, params);
+ return -ENODEV;
+ }
+
+ hdmi = display->ctrl.ctrl;
+
+ if (hdmi->hdmi_mode)
+ rc = sde_hdmi_audio_on(hdmi, params);
+
+ return rc;
+}
+
+static int _sde_hdmi_get_audio_edid_blk(struct platform_device *pdev,
+ struct msm_ext_disp_audio_edid_blk *blk)
+{
+ struct sde_hdmi *display = platform_get_drvdata(pdev);
+
+ if (!display || !blk) {
+ SDE_ERROR("invalid param(s), display %pK, blk %pK\n",
+ display, blk);
+ return -ENODEV;
+ }
+
+ blk->audio_data_blk = display->edid_ctrl->audio_data_block;
+ blk->audio_data_blk_size = display->edid_ctrl->adb_size;
+
+ blk->spk_alloc_data_blk = display->edid_ctrl->spkr_alloc_data_block;
+ blk->spk_alloc_data_blk_size = display->edid_ctrl->sadb_size;
+
+ return 0;
+}
+
+static int _sde_hdmi_get_cable_status(struct platform_device *pdev, u32 vote)
+{
+ struct sde_hdmi *display = NULL;
+ struct hdmi *hdmi = NULL;
+
+ display = platform_get_drvdata(pdev);
+
+ if (!display) {
+ SDE_ERROR("invalid param(s), display %pK\n", display);
+ return -ENODEV;
+ }
+
+ hdmi = display->ctrl.ctrl;
+
+ return hdmi->power_on && display->connected;
+}
+
+static void _sde_hdmi_audio_codec_ready(struct platform_device *pdev)
+{
+ struct sde_hdmi *display = platform_get_drvdata(pdev);
+
+ if (!display) {
+ SDE_ERROR("invalid param(s), display %pK\n", display);
+ return;
+ }
+
+ mutex_lock(&display->display_lock);
+ if (!display->codec_ready) {
+ display->codec_ready = true;
+
+ if (display->client_notify_pending)
+ sde_hdmi_notify_clients(display, display->connected);
+ }
+ mutex_unlock(&display->display_lock);
+}
+
+static int _sde_hdmi_ext_disp_init(struct sde_hdmi *display)
+{
+ int rc = 0;
+ struct device_node *pd_np;
+ const char *phandle = "qcom,msm_ext_disp";
+
+ if (!display) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+
+ display->ext_audio_data.type = EXT_DISPLAY_TYPE_HDMI;
+ display->ext_audio_data.pdev = display->pdev;
+ display->ext_audio_data.codec_ops.audio_info_setup =
+ _sde_hdmi_audio_info_setup;
+ display->ext_audio_data.codec_ops.get_audio_edid_blk =
+ _sde_hdmi_get_audio_edid_blk;
+ display->ext_audio_data.codec_ops.cable_status =
+ _sde_hdmi_get_cable_status;
+ display->ext_audio_data.codec_ops.codec_ready =
+ _sde_hdmi_audio_codec_ready;
+
+ if (!display->pdev->dev.of_node) {
+ SDE_ERROR("[%s]cannot find sde_hdmi of_node\n", display->name);
+ return -ENODEV;
+ }
+
+ pd_np = of_parse_phandle(display->pdev->dev.of_node, phandle, 0);
+ if (!pd_np) {
+ SDE_ERROR("[%s]cannot find %s device node\n",
+ display->name, phandle);
+ return -ENODEV;
+ }
+
+ display->ext_pdev = of_find_device_by_node(pd_np);
+ if (!display->ext_pdev) {
+ SDE_ERROR("[%s]cannot find %s platform device\n",
+ display->name, phandle);
+ return -ENODEV;
+ }
+
+ rc = msm_ext_disp_register_intf(display->ext_pdev,
+ &display->ext_audio_data);
+ if (rc)
+ SDE_ERROR("[%s]failed to register disp\n", display->name);
+
+ return rc;
+}
+
+void sde_hdmi_notify_clients(struct sde_hdmi *display, bool connected)
+{
+ int state = connected ?
+ EXT_DISPLAY_CABLE_CONNECT : EXT_DISPLAY_CABLE_DISCONNECT;
+
+ if (display && display->ext_audio_data.intf_ops.hpd) {
+ struct hdmi *hdmi = display->ctrl.ctrl;
+ u32 flags = MSM_EXT_DISP_HPD_ASYNC_VIDEO;
+
+ if (hdmi->hdmi_mode)
+ flags |= MSM_EXT_DISP_HPD_AUDIO;
+
+ display->ext_audio_data.intf_ops.hpd(display->ext_pdev,
+ display->ext_audio_data.type, state, flags);
+ }
+}
+
+void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on)
+{
+ uint32_t ctrl = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+ if (power_on) {
+ ctrl |= HDMI_CTRL_ENABLE;
+ if (!hdmi->hdmi_mode) {
+ ctrl |= HDMI_CTRL_HDMI;
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ ctrl &= ~HDMI_CTRL_HDMI;
+ } else {
+ ctrl |= HDMI_CTRL_HDMI;
+ }
+ } else {
+ ctrl &= ~HDMI_CTRL_HDMI;
+ }
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+ SDE_HDMI_DEBUG("HDMI Core: %s, HDMI_CTRL=0x%08x\n",
+ power_on ? "Enable" : "Disable", ctrl);
+}
+
+#define DDC_WRITE_MAX_BYTE_NUM 32
+
+int sde_hdmi_scdc_read(struct hdmi *hdmi, u32 data_type, u32 *val)
+{
+ int rc = 0;
+ u8 data_buf[2] = {0};
+ u16 dev_addr, data_len;
+ u8 offset;
+
+ if (!hdmi || !hdmi->i2c || !val) {
+ SDE_ERROR("Bad Parameters\n");
+ return -EINVAL;
+ }
+
+ if (data_type >= HDMI_TX_SCDC_MAX) {
+ SDE_ERROR("Unsupported data type\n");
+ return -EINVAL;
+ }
+
+ dev_addr = 0xA8;
+
+ switch (data_type) {
+ case HDMI_TX_SCDC_SCRAMBLING_STATUS:
+ data_len = 1;
+ offset = HDMI_SCDC_SCRAMBLER_STATUS;
+ break;
+ case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+ case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+ data_len = 1;
+ offset = HDMI_SCDC_TMDS_CONFIG;
+ break;
+ case HDMI_TX_SCDC_CLOCK_DET_STATUS:
+ case HDMI_TX_SCDC_CH0_LOCK_STATUS:
+ case HDMI_TX_SCDC_CH1_LOCK_STATUS:
+ case HDMI_TX_SCDC_CH2_LOCK_STATUS:
+ data_len = 1;
+ offset = HDMI_SCDC_STATUS_FLAGS_0;
+ break;
+ case HDMI_TX_SCDC_CH0_ERROR_COUNT:
+ data_len = 2;
+ offset = HDMI_SCDC_ERR_DET_0_L;
+ break;
+ case HDMI_TX_SCDC_CH1_ERROR_COUNT:
+ data_len = 2;
+ offset = HDMI_SCDC_ERR_DET_1_L;
+ break;
+ case HDMI_TX_SCDC_CH2_ERROR_COUNT:
+ data_len = 2;
+ offset = HDMI_SCDC_ERR_DET_2_L;
+ break;
+ case HDMI_TX_SCDC_READ_ENABLE:
+ data_len = 1;
+ offset = HDMI_SCDC_CONFIG_0;
+ break;
+ default:
+ break;
+ }
+
+ rc = hdmi_ddc_read(hdmi, dev_addr, offset, data_buf,
+ data_len, true);
+ if (rc) {
+ SDE_ERROR("DDC Read failed for %d\n", data_type);
+ return rc;
+ }
+
+ switch (data_type) {
+ case HDMI_TX_SCDC_SCRAMBLING_STATUS:
+ *val = (data_buf[0] & BIT(0)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+ *val = (data_buf[0] & BIT(0)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+ *val = (data_buf[0] & BIT(1)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_CLOCK_DET_STATUS:
+ *val = (data_buf[0] & BIT(0)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_CH0_LOCK_STATUS:
+ *val = (data_buf[0] & BIT(1)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_CH1_LOCK_STATUS:
+ *val = (data_buf[0] & BIT(2)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_CH2_LOCK_STATUS:
+ *val = (data_buf[0] & BIT(3)) ? 1 : 0;
+ break;
+ case HDMI_TX_SCDC_CH0_ERROR_COUNT:
+ case HDMI_TX_SCDC_CH1_ERROR_COUNT:
+ case HDMI_TX_SCDC_CH2_ERROR_COUNT:
+ if (data_buf[1] & BIT(7))
+ *val = (data_buf[0] | ((data_buf[1] & 0x7F) << 8));
+ else
+ *val = 0;
+ break;
+ case HDMI_TX_SCDC_READ_ENABLE:
+ *val = (data_buf[0] & BIT(0)) ? 1 : 0;
+ break;
+ default:
+ break;
+ }
+
+ return 0;
+}
+
+int sde_hdmi_scdc_write(struct hdmi *hdmi, u32 data_type, u32 val)
+{
+ int rc = 0;
+ u8 data_buf[2] = {0};
+ u8 read_val = 0;
+ u16 dev_addr, data_len;
+ u8 offset;
+
+ if (!hdmi || !hdmi->i2c) {
+ SDE_ERROR("Bad Parameters\n");
+ return -EINVAL;
+ }
+
+ if (data_type >= HDMI_TX_SCDC_MAX) {
+ SDE_ERROR("Unsupported data type\n");
+ return -EINVAL;
+ }
+
+ dev_addr = 0xA8;
+
+ switch (data_type) {
+ case HDMI_TX_SCDC_SCRAMBLING_ENABLE:
+ case HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE:
+ dev_addr = 0xA8;
+ data_len = 1;
+ offset = HDMI_SCDC_TMDS_CONFIG;
+ rc = hdmi_ddc_read(hdmi, dev_addr, offset, &read_val,
+ data_len, true);
+ if (rc) {
+ SDE_ERROR("scdc read failed\n");
+ return rc;
+ }
+ if (data_type == HDMI_TX_SCDC_SCRAMBLING_ENABLE) {
+ data_buf[0] = ((((u8)(read_val & 0xFF)) & (~BIT(0))) |
+ ((u8)(val & BIT(0))));
+ } else {
+ data_buf[0] = ((((u8)(read_val & 0xFF)) & (~BIT(1))) |
+ (((u8)(val & BIT(0))) << 1));
+ }
+ break;
+ case HDMI_TX_SCDC_READ_ENABLE:
+ data_len = 1;
+ offset = HDMI_SCDC_CONFIG_0;
+ data_buf[0] = (u8)(val & 0x1);
+ break;
+ default:
+ SDE_ERROR("Cannot write to read only reg (%d)\n",
+ data_type);
+ return -EINVAL;
+ }
+
+ rc = hdmi_ddc_write(hdmi, dev_addr, offset, data_buf,
+ data_len, true);
+ if (rc) {
+ SDE_ERROR("DDC Read failed for %d\n", data_type);
+ return rc;
+ }
+ return 0;
+}
+
+int sde_hdmi_get_info(struct msm_display_info *info,
+ void *display)
+{
+ int rc = 0;
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct hdmi *hdmi = hdmi_display->ctrl.ctrl;
+
+ if (!display || !info) {
+ SDE_ERROR("display=%p or info=%p is NULL\n", display, info);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_display->display_lock);
+
+ info->intf_type = DRM_MODE_CONNECTOR_HDMIA;
+ info->num_of_h_tiles = 1;
+ info->h_tile_instance[0] = 0;
+ if (hdmi_display->non_pluggable) {
+ info->capabilities = MSM_DISPLAY_CAP_VID_MODE;
+ hdmi_display->connected = true;
+ hdmi->hdmi_mode = true;
+ } else {
+ info->capabilities = MSM_DISPLAY_CAP_HOT_PLUG |
+ MSM_DISPLAY_CAP_EDID | MSM_DISPLAY_CAP_VID_MODE;
+ }
+ info->is_connected = hdmi_display->connected;
+ info->max_width = HDMI_DISPLAY_MAX_WIDTH;
+ info->max_height = HDMI_DISPLAY_MAX_HEIGHT;
+ info->compression = MSM_DISPLAY_COMPRESS_NONE;
+
+ mutex_unlock(&hdmi_display->display_lock);
+ return rc;
+}
+
+static void sde_hdmi_panel_set_hdr_infoframe(struct sde_hdmi *display,
+struct drm_msm_ext_panel_hdr_metadata *hdr_meta)
+{
+ u32 packet_payload = 0;
+ u32 packet_header = 0;
+ u32 packet_control = 0;
+ u32 const type_code = 0x87;
+ u32 const version = 0x01;
+ u32 const length = 0x1a;
+ u32 const descriptor_id = 0x00;
+ u8 checksum;
+ struct hdmi *hdmi;
+ struct drm_connector *connector;
+
+ if (!display || !hdr_meta) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ hdmi = display->ctrl.ctrl;
+ connector = display->ctrl.ctrl->connector;
+
+ if (!hdmi || !connector) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ /* Setup the line number to send the packet on */
+ packet_control = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL);
+ packet_control |= BIT(16);
+ hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control);
+
+ /* Setup the packet to be sent every frame */
+ packet_control = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL);
+ packet_control |= BIT(1);
+ hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control);
+
+ /* Setup Packet header and payload */
+ packet_header = type_code | (version << 8) | (length << 16);
+ hdmi_write(hdmi, HDMI_GENERIC0_HDR, packet_header);
+
+ /**
+ * Checksum is not a mandatory field for
+ * the HDR infoframe as per CEA-861-3 specification.
+ * However some HDMI sinks still expect a
+ * valid checksum to be included as part of
+ * the infoframe. Hence compute and add
+ * the checksum to improve sink interoperability
+ * for our HDR solution on HDMI.
+ */
+ checksum = sde_hdmi_hdr_set_chksum(hdr_meta);
+
+ packet_payload = (hdr_meta->eotf << 8) | checksum;
+
+ if (connector->hdr_metadata_type_one) {
+ packet_payload |= (descriptor_id << 16)
+ | (HDMI_GET_LSB(hdr_meta->display_primaries_x[0])
+ << 24);
+ hdmi_write(hdmi, HDMI_GENERIC0_0, packet_payload);
+ } else {
+ pr_debug("Metadata Type 1 not supported\n");
+ hdmi_write(hdmi, HDMI_GENERIC0_0, packet_payload);
+ goto enable_packet_control;
+ }
+
+ packet_payload =
+ (HDMI_GET_MSB(hdr_meta->display_primaries_x[0]))
+ | (HDMI_GET_LSB(hdr_meta->display_primaries_y[0]) << 8)
+ | (HDMI_GET_MSB(hdr_meta->display_primaries_y[0]) << 16)
+ | (HDMI_GET_LSB(hdr_meta->display_primaries_x[1]) << 24);
+ hdmi_write(hdmi, HDMI_GENERIC0_1, packet_payload);
+
+ packet_payload =
+ (HDMI_GET_MSB(hdr_meta->display_primaries_x[1]))
+ | (HDMI_GET_LSB(hdr_meta->display_primaries_y[1]) << 8)
+ | (HDMI_GET_MSB(hdr_meta->display_primaries_y[1]) << 16)
+ | (HDMI_GET_LSB(hdr_meta->display_primaries_x[2]) << 24);
+ hdmi_write(hdmi, HDMI_GENERIC0_2, packet_payload);
+
+ packet_payload =
+ (HDMI_GET_MSB(hdr_meta->display_primaries_x[2]))
+ | (HDMI_GET_LSB(hdr_meta->display_primaries_y[2]) << 8)
+ | (HDMI_GET_MSB(hdr_meta->display_primaries_y[2]) << 16)
+ | (HDMI_GET_LSB(hdr_meta->white_point_x) << 24);
+ hdmi_write(hdmi, HDMI_GENERIC0_3, packet_payload);
+
+ packet_payload =
+ (HDMI_GET_MSB(hdr_meta->white_point_x))
+ | (HDMI_GET_LSB(hdr_meta->white_point_y) << 8)
+ | (HDMI_GET_MSB(hdr_meta->white_point_y) << 16)
+ | (HDMI_GET_LSB(hdr_meta->max_luminance) << 24);
+ hdmi_write(hdmi, HDMI_GENERIC0_4, packet_payload);
+
+ packet_payload =
+ (HDMI_GET_MSB(hdr_meta->max_luminance))
+ | (HDMI_GET_LSB(hdr_meta->min_luminance) << 8)
+ | (HDMI_GET_MSB(hdr_meta->min_luminance) << 16)
+ | (HDMI_GET_LSB(hdr_meta->max_content_light_level) << 24);
+ hdmi_write(hdmi, HDMI_GENERIC0_5, packet_payload);
+
+ packet_payload =
+ (HDMI_GET_MSB(hdr_meta->max_content_light_level))
+ | (HDMI_GET_LSB(hdr_meta->max_average_light_level) << 8)
+ | (HDMI_GET_MSB(hdr_meta->max_average_light_level) << 16);
+ hdmi_write(hdmi, HDMI_GENERIC0_6, packet_payload);
+
+enable_packet_control:
+
+ /* Flush the contents to the register */
+ packet_control = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL);
+ packet_control |= BIT(2);
+ hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control);
+
+ /* Clear the flush bit of the register */
+ packet_control = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL);
+ packet_control &= ~BIT(2);
+ hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control);
+
+ /* Start sending the packets*/
+ packet_control = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL);
+ packet_control |= BIT(0);
+ hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control);
+}
+
+static void sde_hdmi_update_colorimetry(struct sde_hdmi *display,
+ bool use_bt2020)
+{
+ struct hdmi *hdmi;
+ struct drm_connector *connector;
+ bool mode_is_yuv = false;
+ struct drm_display_mode *mode;
+ u32 mode_fmt_flags = 0;
+ u8 checksum;
+ u32 avi_info0 = 0;
+ u32 avi_info1 = 0;
+ u8 avi_iframe[HDMI_AVI_INFOFRAME_BUFFER_SIZE] = {0};
+ u8 *avi_frame = &avi_iframe[HDMI_INFOFRAME_HEADER_SIZE];
+ struct hdmi_avi_infoframe info;
+
+ if (!display) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ hdmi = display->ctrl.ctrl;
+
+ if (!hdmi) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ connector = display->ctrl.ctrl->connector;
+
+ if (!connector) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ if (!connector->hdr_supported) {
+ SDE_DEBUG("HDR is not supported\n");
+ return;
+ }
+
+ /* If sink doesn't support BT2020, just return */
+ if (!(connector->color_enc_fmt & DRM_EDID_COLORIMETRY_BT2020_YCC) ||
+ !(connector->color_enc_fmt & DRM_EDID_COLORIMETRY_BT2020_RGB)) {
+ SDE_DEBUG("BT2020 colorimetry is not supported\n");
+ return;
+ }
+
+ /* If there is no change in colorimetry, just return */
+ if (use_bt2020 && display->bt2020_colorimetry)
+ return;
+ else if (!use_bt2020 && !display->bt2020_colorimetry)
+ return;
+
+ mode = &display->mode;
+ /* Cache the format flags before clearing */
+ mode_fmt_flags = mode->flags;
+ /**
+ * Clear the RGB/YUV format flags before calling upstream API
+ * as the API also compares the flags and then returns a mode
+ */
+ mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
+ drm_hdmi_avi_infoframe_from_display_mode(&info, mode);
+ /* Restore the format flags */
+ mode->flags = mode_fmt_flags;
+
+ /* Mode should only support YUV and not both to set the flag */
+ if ((mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+ && !(mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_RGB444)) {
+ mode_is_yuv = true;
+ }
+
+
+ if (!display->bt2020_colorimetry && use_bt2020) {
+ /**
+ * 1. Update colorimetry to use extended
+ * 2. Change extended to use BT2020
+ * 3. Change colorspace based on mode
+ * 4. Use limited as BT2020 is always limited
+ */
+ info.colorimetry = SDE_HDMI_USE_EXTENDED_COLORIMETRY;
+ info.extended_colorimetry = SDE_HDMI_BT2020_COLORIMETRY;
+ if (mode_is_yuv)
+ info.colorspace = HDMI_COLORSPACE_YUV420;
+ if (connector->yuv_qs)
+ info.ycc_quantization_range =
+ HDMI_YCC_QUANTIZATION_RANGE_LIMITED;
+ } else if (display->bt2020_colorimetry && !use_bt2020) {
+ /**
+ * 1. Update colorimetry to non-extended
+ * 2. Change colorspace based on mode
+ * 3. Restore quantization to full if QS
+ * is enabled
+ */
+ info.colorimetry = SDE_HDMI_DEFAULT_COLORIMETRY;
+ if (mode_is_yuv)
+ info.colorspace = HDMI_COLORSPACE_YUV420;
+ if (connector->yuv_qs)
+ info.ycc_quantization_range =
+ HDMI_YCC_QUANTIZATION_RANGE_FULL;
+ }
+
+ hdmi_avi_infoframe_pack(&info, avi_iframe, sizeof(avi_iframe));
+ checksum = avi_iframe[HDMI_INFOFRAME_HEADER_SIZE - 1];
+ avi_info0 = checksum |
+ LEFT_SHIFT_BYTE(avi_frame[0]) |
+ LEFT_SHIFT_WORD(avi_frame[1]) |
+ LEFT_SHIFT_24BITS(avi_frame[2]);
+
+ avi_info1 = avi_frame[3] |
+ LEFT_SHIFT_BYTE(avi_frame[4]) |
+ LEFT_SHIFT_WORD(avi_frame[5]) |
+ LEFT_SHIFT_24BITS(avi_frame[6]);
+
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(0), avi_info0);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(1), avi_info1);
+ display->bt2020_colorimetry = use_bt2020;
+}
+
+static void sde_hdmi_clear_hdr_infoframe(struct sde_hdmi *display)
+{
+ struct hdmi *hdmi;
+ struct drm_connector *connector;
+ u32 packet_control = 0;
+
+ if (!display) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ hdmi = display->ctrl.ctrl;
+ connector = display->ctrl.ctrl->connector;
+
+ if (!hdmi || !connector) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ packet_control = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL);
+ packet_control &= ~HDMI_GEN_PKT_CTRL_CLR_MASK;
+ hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, packet_control);
+}
+
+int sde_hdmi_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display)
+{
+ int rc = 0;
+
+ if (!connector || !display) {
+ SDE_ERROR("connector=%pK or display=%pK is NULL\n",
+ connector, display);
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ if (property_index == CONNECTOR_PROP_PLL_ENABLE)
+ rc = _sde_hdmi_enable_pll_update(display, value);
+ else if (property_index == CONNECTOR_PROP_PLL_DELTA)
+ rc = _sde_hdmi_update_pll_delta(display, value);
+ else if (property_index == CONNECTOR_PROP_HPD_OFF)
+ rc = _sde_hdmi_update_hpd_state(display, value);
+
+ return rc;
+}
+
+int sde_hdmi_get_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t *value,
+ void *display)
+{
+ struct sde_hdmi *hdmi_display = display;
+ int rc = 0;
+
+ if (!connector || !hdmi_display) {
+ SDE_ERROR("connector=%pK or display=%pK is NULL\n",
+ connector, hdmi_display);
+ return -EINVAL;
+ }
+
+ mutex_lock(&hdmi_display->display_lock);
+ if (property_index == CONNECTOR_PROP_PLL_ENABLE)
+ *value = hdmi_display->pll_update_enable ? 1 : 0;
+ if (property_index == CONNECTOR_PROP_HDCP_VERSION)
+ *value = hdmi_display->sink_hdcp_ver;
+ mutex_unlock(&hdmi_display->display_lock);
+
+ return rc;
+}
+
+u32 sde_hdmi_get_num_of_displays(void)
+{
+ u32 count = 0;
+ struct sde_hdmi *display;
+
+ mutex_lock(&sde_hdmi_list_lock);
+
+ list_for_each_entry(display, &sde_hdmi_list, list)
+ count++;
+
+ mutex_unlock(&sde_hdmi_list_lock);
+ return count;
+}
+
+int sde_hdmi_get_displays(void **display_array, u32 max_display_count)
+{
+ struct sde_hdmi *display;
+ int i = 0;
+
+ SDE_DEBUG("\n");
+
+ if (!display_array || !max_display_count) {
+ if (!display_array)
+ SDE_ERROR("invalid param\n");
+ return 0;
+ }
+
+ mutex_lock(&sde_hdmi_list_lock);
+ list_for_each_entry(display, &sde_hdmi_list, list) {
+ if (i >= max_display_count)
+ break;
+ display_array[i++] = display;
+ }
+ mutex_unlock(&sde_hdmi_list_lock);
+
+ return i;
+}
+
+int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+ void *display)
+{
+ struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)display;
+
+ if (!sde_hdmi || !sde_hdmi->ctrl.ctrl) {
+ SDE_ERROR("sde_hdmi=%p or hdmi is NULL\n", sde_hdmi);
+ return -EINVAL;
+ }
+
+ _sde_hdmi_hpd_disable(sde_hdmi);
+
+ return 0;
+}
+
+static void _sde_hdmi_get_tx_version(struct sde_hdmi *sde_hdmi)
+{
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+
+ sde_hdmi->hdmi_tx_version = hdmi_read(hdmi, REG_HDMI_VERSION);
+ sde_hdmi->hdmi_tx_major_version =
+ SDE_GET_MAJOR_VER(sde_hdmi->hdmi_tx_version);
+
+ switch (sde_hdmi->hdmi_tx_major_version) {
+ case (HDMI_TX_VERSION_3):
+ sde_hdmi->max_pclk_khz = HDMI_TX_3_MAX_PCLK_RATE;
+ break;
+ case (HDMI_TX_VERSION_4):
+ sde_hdmi->max_pclk_khz = HDMI_TX_4_MAX_PCLK_RATE;
+ break;
+ default:
+ sde_hdmi->max_pclk_khz = HDMI_DEFAULT_MAX_PCLK_RATE;
+ break;
+ }
+ SDE_DEBUG("sde_hdmi->hdmi_tx_version = 0x%x\n",
+ sde_hdmi->hdmi_tx_version);
+ SDE_DEBUG("sde_hdmi->hdmi_tx_major_version = 0x%x\n",
+ sde_hdmi->hdmi_tx_major_version);
+ SDE_DEBUG("sde_hdmi->max_pclk_khz = 0x%x\n",
+ sde_hdmi->max_pclk_khz);
+}
+
+static int sde_hdmi_tx_check_capability(struct sde_hdmi *sde_hdmi)
+{
+ u32 hdmi_disabled, hdcp_disabled, reg_val;
+ int ret = 0;
+ struct hdmi *hdmi = sde_hdmi->ctrl.ctrl;
+
+ /* check if hdmi and hdcp are disabled */
+ if (sde_hdmi->hdmi_tx_major_version < HDMI_TX_VERSION_4) {
+ hdcp_disabled = hdmi_qfprom_read(hdmi,
+ QFPROM_RAW_FEAT_CONFIG_ROW0_LSB) & BIT(31);
+
+ hdmi_disabled = hdmi_qfprom_read(hdmi,
+ QFPROM_RAW_FEAT_CONFIG_ROW0_MSB) & BIT(0);
+ } else {
+ reg_val = hdmi_qfprom_read(hdmi,
+ QFPROM_RAW_FEAT_CONFIG_ROW0_LSB + QFPROM_RAW_VERSION_4);
+ hdcp_disabled = reg_val & BIT(12);
+
+ hdmi_disabled = reg_val & BIT(13);
+
+ reg_val = hdmi_qfprom_read(hdmi, SEC_CTRL_HW_VERSION);
+
+ SDE_DEBUG("SEC_CTRL_HW_VERSION reg_val = 0x%x\n", reg_val);
+ /*
+ * With HDCP enabled on capable hardware, check if HW
+ * or SW keys should be used.
+ */
+ if (!hdcp_disabled && (reg_val >= HDCP_SEL_MIN_SEC_VERSION)) {
+ reg_val = hdmi_qfprom_read(hdmi,
+ QFPROM_RAW_FEAT_CONFIG_ROW0_MSB +
+ QFPROM_RAW_VERSION_4);
+
+ if (!(reg_val & BIT(23)))
+ sde_hdmi->hdcp1_use_sw_keys = true;
+ }
+ }
+
+ if (sde_hdmi->hdmi_tx_major_version >= HDMI_TX_VERSION_4)
+ sde_hdmi->dc_feature_supported = true;
+
+ SDE_DEBUG("%s: Features <HDMI:%s, HDCP:%s, Deep Color:%s>\n", __func__,
+ hdmi_disabled ? "OFF" : "ON",
+ hdcp_disabled ? "OFF" : "ON",
+ sde_hdmi->dc_feature_supported ? "ON" : "OFF");
+
+ if (hdmi_disabled) {
+ DEV_ERR("%s: HDMI disabled\n", __func__);
+ ret = -ENODEV;
+ goto end;
+ }
+
+ sde_hdmi->hdcp14_present = !hdcp_disabled;
+
+ end:
+ return ret;
+} /* hdmi_tx_check_capability */
+
+static int _sde_hdmi_init_hdcp(struct sde_hdmi *hdmi_ctrl)
+{
+ struct sde_hdcp_init_data hdcp_init_data;
+ void *hdcp_data;
+ int rc = 0;
+ struct hdmi *hdmi;
+
+ if (!hdmi_ctrl) {
+ SDE_ERROR("sde_hdmi is NULL\n");
+ return -EINVAL;
+ }
+
+ hdmi = hdmi_ctrl->ctrl.ctrl;
+ hdcp_init_data.phy_addr = hdmi->mmio_phy_addr;
+ hdcp_init_data.core_io = &hdmi_ctrl->io[HDMI_TX_CORE_IO];
+ hdcp_init_data.qfprom_io = &hdmi_ctrl->io[HDMI_TX_QFPROM_IO];
+ hdcp_init_data.hdcp_io = &hdmi_ctrl->io[HDMI_TX_HDCP_IO];
+ hdcp_init_data.mutex = &hdmi_ctrl->hdcp_mutex;
+ hdcp_init_data.workq = hdmi->workq;
+ hdcp_init_data.notify_status = sde_hdmi_tx_hdcp_cb;
+ hdcp_init_data.avmute_sink = sde_hdmi_tx_set_avmute;
+ hdcp_init_data.cb_data = (void *)hdmi_ctrl;
+ hdcp_init_data.hdmi_tx_ver = hdmi_ctrl->hdmi_tx_major_version;
+ hdcp_init_data.sec_access = true;
+ hdcp_init_data.client_id = HDCP_CLIENT_HDMI;
+ hdcp_init_data.ddc_ctrl = &hdmi_ctrl->ddc_ctrl;
+
+ if (hdmi_ctrl->hdcp14_present) {
+ hdcp_data = sde_hdcp_1x_init(&hdcp_init_data);
+
+ if (IS_ERR_OR_NULL(hdcp_data)) {
+ DEV_ERR("%s: hdcp 1.4 init failed\n", __func__);
+ rc = -EINVAL;
+ kfree(hdcp_data);
+ goto end;
+ } else {
+ hdmi_ctrl->hdcp_feat_data[SDE_HDCP_1x] = hdcp_data;
+ SDE_HDMI_DEBUG("%s: HDCP 1.4 initialized\n", __func__);
+ }
+ }
+
+ hdcp_data = sde_hdmi_hdcp2p2_init(&hdcp_init_data);
+
+ if (IS_ERR_OR_NULL(hdcp_data)) {
+ DEV_ERR("%s: hdcp 2.2 init failed\n", __func__);
+ rc = -EINVAL;
+ goto end;
+ } else {
+ hdmi_ctrl->hdcp_feat_data[SDE_HDCP_2P2] = hdcp_data;
+ SDE_HDMI_DEBUG("%s: HDCP 2.2 initialized\n", __func__);
+ }
+
+end:
+ return rc;
+}
+
+int sde_hdmi_set_top_ctl(struct drm_connector *connector,
+ struct drm_display_mode *adj_mode, void *display)
+{
+ int rc = 0;
+ struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)display;
+
+ if (!sde_hdmi) {
+ SDE_ERROR("sde_hdmi is NULL\n");
+ return -EINVAL;
+ }
+
+ if (sde_hdmi->display_topology) {
+ SDE_DEBUG("%s, set display topology %d\n",
+ __func__, sde_hdmi->display_topology);
+
+ msm_property_set_property(sde_connector_get_propinfo(connector),
+ sde_connector_get_property_values(connector->state),
+ CONNECTOR_PROP_TOPOLOGY_CONTROL,
+ sde_hdmi->display_topology);
+ }
+
+ return rc;
+}
+
+int sde_hdmi_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ int rc = 0;
+ struct sde_hdmi *sde_hdmi = (struct sde_hdmi *)display;
+ struct hdmi *hdmi;
+
+ if (!sde_hdmi) {
+ SDE_ERROR("sde_hdmi is NULL\n");
+ return -EINVAL;
+ }
+
+ hdmi = sde_hdmi->ctrl.ctrl;
+ if (!hdmi) {
+ SDE_ERROR("hdmi is NULL\n");
+ return -EINVAL;
+ }
+
+ if (info)
+ sde_kms_info_add_keystr(info,
+ "display type",
+ sde_hdmi->display_type);
+
+ hdmi->connector = connector;
+ INIT_WORK(&sde_hdmi->hpd_work, _sde_hdmi_hotplug_work);
+
+ if (sde_hdmi->non_pluggable) {
+ /* Disable HPD interrupt */
+ hdmi_write(hdmi, REG_HDMI_HPD_CTRL, 0);
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_CTRL, 0);
+ hdmi_write(hdmi, REG_HDMI_HPD_INT_STATUS, 0);
+ } else {
+ /* Enable HPD detection if non_pluggable flag is not defined */
+ rc = _sde_hdmi_hpd_enable(sde_hdmi);
+ if (rc)
+ SDE_ERROR("failed to enable HPD: %d\n", rc);
+ }
+
+ _sde_hdmi_get_tx_version(sde_hdmi);
+
+ sde_hdmi_tx_check_capability(sde_hdmi);
+
+ _sde_hdmi_init_hdcp(sde_hdmi);
+
+ return rc;
+}
+
+int sde_hdmi_start_hdcp(struct drm_connector *connector)
+{
+ int rc;
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+ struct hdmi *hdmi = display->ctrl.ctrl;
+
+ if (!hdmi) {
+ SDE_ERROR("%s: invalid input\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!sde_hdmi_tx_is_hdcp_enabled(display))
+ return 0;
+
+ if (sde_hdmi_tx_is_encryption_set(display))
+ sde_hdmi_config_avmute(hdmi, true);
+
+ rc = display->hdcp_ops->authenticate(display->hdcp_data);
+ if (rc)
+ SDE_ERROR("%s: hdcp auth failed. rc=%d\n", __func__, rc);
+
+ return rc;
+}
+
+enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display)
+{
+ enum drm_connector_status status = connector_status_unknown;
+ struct msm_display_info info;
+ int rc;
+
+ if (!connector || !display) {
+ SDE_ERROR("connector=%p or display=%p is NULL\n",
+ connector, display);
+ return status;
+ }
+
+ /* get display dsi_info */
+ memset(&info, 0x0, sizeof(info));
+ rc = sde_hdmi_get_info(&info, display);
+ if (rc) {
+ SDE_ERROR("failed to get display info, rc=%d\n", rc);
+ return connector_status_disconnected;
+ }
+
+ if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+ status = (info.is_connected ? connector_status_connected :
+ connector_status_disconnected);
+ else
+ status = connector_status_connected;
+
+ connector->display_info.width_mm = info.width_mm;
+ connector->display_info.height_mm = info.height_mm;
+
+ return status;
+}
+
+int sde_hdmi_pre_kickoff(struct drm_connector *connector,
+ void *display,
+ struct msm_display_kickoff_params *params)
+{
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl;
+ struct drm_msm_ext_panel_hdr_metadata *hdr_meta;
+ u8 hdr_op;
+
+ if (!connector || !display || !params ||
+ !params->hdr_ctrl) {
+ pr_err("Invalid params\n");
+ return -EINVAL;
+ }
+
+ hdr_ctrl = params->hdr_ctrl;
+ hdr_meta = &hdr_ctrl->hdr_meta;
+
+ if (!hdr_meta) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+
+ hdr_op = sde_hdmi_hdr_get_ops(hdmi_display->curr_hdr_state,
+ hdr_ctrl->hdr_state);
+
+ if (hdr_op == HDR_SEND_INFO) {
+ if (connector->hdr_supported)
+ sde_hdmi_panel_set_hdr_infoframe(display,
+ &hdr_ctrl->hdr_meta);
+ if (hdr_meta->eotf)
+ sde_hdmi_update_colorimetry(hdmi_display,
+ true);
+ else
+ sde_hdmi_update_colorimetry(hdmi_display,
+ false);
+ } else if (hdr_op == HDR_CLEAR_INFO)
+ sde_hdmi_clear_hdr_infoframe(display);
+
+ hdmi_display->curr_hdr_state = hdr_ctrl->hdr_state;
+
+ return 0;
+}
+
+bool sde_hdmi_mode_needs_full_range(void *display)
+{
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct drm_display_mode *mode;
+ u32 mode_fmt_flags = 0;
+ u32 cea_mode;
+
+ if (!hdmi_display) {
+ SDE_ERROR("invalid input\n");
+ return false;
+ }
+
+ mode = &hdmi_display->mode;
+ /* Cache the format flags before clearing */
+ mode_fmt_flags = mode->flags;
+ /**
+ * Clear the RGB/YUV format flags before calling upstream API
+ * as the API also compares the flags and then returns a mode
+ */
+ mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
+ cea_mode = drm_match_cea_mode(mode);
+ /* Restore the format flags */
+ mode->flags = mode_fmt_flags;
+
+ if (cea_mode > SDE_HDMI_VIC_640x480)
+ return false;
+
+ return true;
+}
+
+enum sde_csc_type sde_hdmi_get_csc_type(struct drm_connector *conn,
+ void *display)
+{
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct sde_connector_state *c_state;
+ struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl;
+ struct drm_msm_ext_panel_hdr_metadata *hdr_meta;
+
+ if (!hdmi_display || !conn) {
+ SDE_ERROR("invalid input\n");
+ goto error;
+ }
+
+ c_state = to_sde_connector_state(conn->state);
+
+ if (!c_state) {
+ SDE_ERROR("invalid input\n");
+ goto error;
+ }
+
+ hdr_ctrl = &c_state->hdr_ctrl;
+ hdr_meta = &hdr_ctrl->hdr_meta;
+
+ if ((hdr_ctrl->hdr_state == HDR_ENABLE)
+ && (hdr_meta->eotf != 0))
+ return SDE_CSC_RGB2YUV_2020L;
+ else if (sde_hdmi_mode_needs_full_range(hdmi_display)
+ || conn->yuv_qs)
+ return SDE_CSC_RGB2YUV_601FR;
+
+error:
+ return SDE_CSC_RGB2YUV_601L;
+}
+
+int sde_hdmi_connector_get_modes(struct drm_connector *connector, void *display)
+{
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct drm_display_mode *mode, *m;
+ int ret = 0;
+
+ if (!connector || !display) {
+ SDE_ERROR("connector=%p or display=%p is NULL\n",
+ connector, display);
+ return 0;
+ }
+
+ if (hdmi_display->non_pluggable) {
+ list_for_each_entry(mode, &hdmi_display->mode_list, head) {
+ m = drm_mode_duplicate(connector->dev, mode);
+ if (!m) {
+ SDE_ERROR("failed to add hdmi mode %dx%d\n",
+ mode->hdisplay, mode->vdisplay);
+ break;
+ }
+ drm_mode_probed_add(connector, m);
+ }
+ ret = hdmi_display->num_of_modes;
+ } else {
+ /* pluggable case assumes EDID is read when HPD */
+ ret = _sde_edid_update_modes(connector,
+ hdmi_display->edid_ctrl);
+ }
+
+ return ret;
+}
+
+enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display)
+{
+ struct sde_hdmi *hdmi_display = (struct sde_hdmi *)display;
+ struct hdmi *hdmi;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+ long actual, requested;
+
+ if (!connector || !display || !mode) {
+ SDE_ERROR("connector=%p or display=%p or mode=%p is NULL\n",
+ connector, display, mode);
+ return 0;
+ }
+
+ hdmi = hdmi_display->ctrl.ctrl;
+ priv = connector->dev->dev_private;
+ kms = priv->kms;
+ requested = 1000 * mode->clock;
+ actual = kms->funcs->round_pixclk(kms,
+ requested, hdmi->encoder);
+
+ SDE_HDMI_DEBUG("requested=%ld, actual=%ld", requested, actual);
+
+ if (actual != requested)
+ return MODE_CLOCK_RANGE;
+
+ /* if no format flags are present remove the mode */
+ if (!(mode->flags & SDE_DRM_MODE_FLAG_FMT_MASK)) {
+ SDE_HDMI_DEBUG("removing following mode from list\n");
+ drm_mode_debug_printmodeline(mode);
+ return MODE_BAD;
+ }
+
+ return MODE_OK;
+}
+
+int sde_hdmi_dev_init(struct sde_hdmi *display)
+{
+ if (!display) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int sde_hdmi_dev_deinit(struct sde_hdmi *display)
+{
+ if (!display) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+ if (display->hdcp_feat_data[SDE_HDCP_1x])
+ sde_hdcp_1x_deinit(display->hdcp_feat_data[SDE_HDCP_1x]);
+
+ if (display->hdcp_feat_data[SDE_HDCP_2P2])
+ sde_hdmi_hdcp2p2_deinit(display->hdcp_feat_data[SDE_HDCP_2P2]);
+
+ return 0;
+}
+
+static int _sde_hdmi_cec_init(struct sde_hdmi *display)
+{
+ struct platform_device *pdev = display->pdev;
+
+ display->notifier = cec_notifier_get(&pdev->dev);
+ if (!display->notifier) {
+ SDE_ERROR("CEC notifier get failed\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+static void _sde_hdmi_cec_deinit(struct sde_hdmi *display)
+{
+ cec_notifier_set_phys_addr(display->notifier, CEC_PHYS_ADDR_INVALID);
+ cec_notifier_put(display->notifier);
+}
+
+static int sde_hdmi_bind(struct device *dev, struct device *master, void *data)
+{
+ int rc = 0;
+ struct sde_hdmi_ctrl *display_ctrl = NULL;
+ struct sde_hdmi *display = NULL;
+ struct drm_device *drm = NULL;
+ struct msm_drm_private *priv = NULL;
+ struct platform_device *pdev = to_platform_device(dev);
+
+ SDE_HDMI_DEBUG(" %s +\n", __func__);
+ if (!dev || !pdev || !master) {
+ pr_err("invalid param(s), dev %pK, pdev %pK, master %pK\n",
+ dev, pdev, master);
+ return -EINVAL;
+ }
+
+ drm = dev_get_drvdata(master);
+ display = platform_get_drvdata(pdev);
+ if (!drm || !display) {
+ pr_err("invalid param(s), drm %pK, display %pK\n",
+ drm, display);
+ return -EINVAL;
+ }
+
+ priv = drm->dev_private;
+ mutex_lock(&display->display_lock);
+
+ rc = _sde_hdmi_debugfs_init(display);
+ if (rc) {
+ SDE_ERROR("[%s]Debugfs init failed, rc=%d\n",
+ display->name, rc);
+ goto debug_error;
+ }
+
+ rc = _sde_hdmi_ext_disp_init(display);
+ if (rc) {
+ SDE_ERROR("[%s]Ext Disp init failed, rc=%d\n",
+ display->name, rc);
+ goto ext_error;
+ }
+
+ rc = _sde_hdmi_cec_init(display);
+ if (rc) {
+ SDE_ERROR("[%s]CEC init failed, rc=%d\n",
+ display->name, rc);
+ goto ext_error;
+ }
+
+ display->edid_ctrl = sde_edid_init();
+ if (!display->edid_ctrl) {
+ SDE_ERROR("[%s]sde edid init failed\n",
+ display->name);
+ rc = -ENOMEM;
+ goto cec_error;
+ }
+
+ display_ctrl = &display->ctrl;
+ display_ctrl->ctrl = priv->hdmi;
+ display->drm_dev = drm;
+
+ _sde_hdmi_map_regs(display, priv->hdmi);
+ _sde_hdmi_init_ddc(display, priv->hdmi);
+
+ display->enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
+
+ INIT_DELAYED_WORK(&display->hdcp_cb_work,
+ sde_hdmi_tx_hdcp_cb_work);
+ mutex_init(&display->hdcp_mutex);
+ mutex_unlock(&display->display_lock);
+ return rc;
+
+cec_error:
+ (void)_sde_hdmi_cec_deinit(display);
+ext_error:
+ (void)_sde_hdmi_debugfs_deinit(display);
+debug_error:
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+
+static void sde_hdmi_unbind(struct device *dev, struct device *master,
+ void *data)
+{
+ struct sde_hdmi *display = NULL;
+
+ if (!dev) {
+ SDE_ERROR("invalid params\n");
+ return;
+ }
+
+ display = platform_get_drvdata(to_platform_device(dev));
+ if (!display) {
+ SDE_ERROR("Invalid display device\n");
+ return;
+ }
+ mutex_lock(&display->display_lock);
+ (void)_sde_hdmi_debugfs_deinit(display);
+ (void)sde_edid_deinit((void **)&display->edid_ctrl);
+ (void)_sde_hdmi_cec_deinit(display);
+ display->drm_dev = NULL;
+ mutex_unlock(&display->display_lock);
+}
+
+static const struct component_ops sde_hdmi_comp_ops = {
+ .bind = sde_hdmi_bind,
+ .unbind = sde_hdmi_unbind,
+};
+
+static int _sde_hdmi_parse_dt_modes(struct device_node *np,
+ struct list_head *head,
+ u32 *num_of_modes)
+{
+ int rc = 0;
+ struct drm_display_mode *mode;
+ u32 mode_count = 0;
+ struct device_node *node = NULL;
+ struct device_node *root_node = NULL;
+ const char *name;
+ u32 h_front_porch, h_pulse_width, h_back_porch;
+ u32 v_front_porch, v_pulse_width, v_back_porch;
+ bool h_active_high, v_active_high;
+ u32 flags = 0;
+ root_node = of_get_child_by_name(np, "qcom,customize-modes");
+ if (!root_node) {
+ root_node = of_parse_phandle(np, "qcom,customize-modes", 0);
+ if (!root_node) {
+ DRM_INFO("No entry present for qcom,customize-modes");
+ goto end;
+ }
+ }
+ for_each_child_of_node(root_node, node) {
+ rc = 0;
+ mode = kzalloc(sizeof(*mode), GFP_KERNEL);
+ if (!mode) {
+ SDE_ERROR("Out of memory\n");
+ rc = -ENOMEM;
+ continue;
+ }
+
+ rc = of_property_read_string(node, "qcom,mode-name",
+ &name);
+ if (rc) {
+ SDE_ERROR("failed to read qcom,mode-name, rc=%d\n", rc);
+ goto fail;
+ }
+ strlcpy(mode->name, name, DRM_DISPLAY_MODE_LEN);
+
+ rc = of_property_read_u32(node, "qcom,mode-h-active",
+ &mode->hdisplay);
+ if (rc) {
+ SDE_ERROR("failed to read h-active, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-front-porch",
+ &h_front_porch);
+ if (rc) {
+ SDE_ERROR("failed to read h-front-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-pulse-width",
+ &h_pulse_width);
+ if (rc) {
+ SDE_ERROR("failed to read h-pulse-width, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-back-porch",
+ &h_back_porch);
+ if (rc) {
+ SDE_ERROR("failed to read h-back-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ h_active_high = of_property_read_bool(node,
+ "qcom,mode-h-active-high");
+
+ rc = of_property_read_u32(node, "qcom,mode-v-active",
+ &mode->vdisplay);
+ if (rc) {
+ SDE_ERROR("failed to read v-active, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-front-porch",
+ &v_front_porch);
+ if (rc) {
+ SDE_ERROR("failed to read v-front-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-pulse-width",
+ &v_pulse_width);
+ if (rc) {
+ SDE_ERROR("failed to read v-pulse-width, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-back-porch",
+ &v_back_porch);
+ if (rc) {
+ SDE_ERROR("failed to read v-back-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ v_active_high = of_property_read_bool(node,
+ "qcom,mode-v-active-high");
+
+ rc = of_property_read_u32(node, "qcom,mode-refresh-rate",
+ &mode->vrefresh);
+ if (rc) {
+ SDE_ERROR("failed to read refresh-rate, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-clock-in-khz",
+ &mode->clock);
+ if (rc) {
+ SDE_ERROR("failed to read clock, rc=%d\n", rc);
+ goto fail;
+ }
+
+ mode->hsync_start = mode->hdisplay + h_front_porch;
+ mode->hsync_end = mode->hsync_start + h_pulse_width;
+ mode->htotal = mode->hsync_end + h_back_porch;
+ mode->vsync_start = mode->vdisplay + v_front_porch;
+ mode->vsync_end = mode->vsync_start + v_pulse_width;
+ mode->vtotal = mode->vsync_end + v_back_porch;
+ if (h_active_high)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (v_active_high)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+
+ flags |= DRM_MODE_FLAG_SUPPORTS_RGB;
+ mode->flags = flags;
+
+ if (!rc) {
+ mode_count++;
+ list_add_tail(&mode->head, head);
+ }
+
+ SDE_DEBUG("mode[%d] h[%d,%d,%d,%d] v[%d,%d,%d,%d] %d %xH %d\n",
+ mode_count - 1, mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal, mode->vdisplay,
+ mode->vsync_start, mode->vsync_end, mode->vtotal,
+ mode->vrefresh, mode->flags, mode->clock);
+fail:
+ if (rc) {
+ kfree(mode);
+ continue;
+ }
+ }
+
+ if (num_of_modes)
+ *num_of_modes = mode_count;
+
+end:
+ return rc;
+}
+
+static int _sde_hdmi_parse_dt(struct device_node *node,
+ struct sde_hdmi *display)
+{
+ int rc = 0;
+
+ const char *name;
+ u32 top = 0;
+
+ display->name = of_get_property(node, "label", NULL);
+
+ display->display_type = of_get_property(node,
+ "qcom,display-type", NULL);
+ if (!display->display_type)
+ display->display_type = "unknown";
+
+ display->non_pluggable = of_property_read_bool(node,
+ "qcom,non-pluggable");
+
+ rc = of_property_read_string(node, "qcom,display-topology-control",
+ &name);
+ if (rc) {
+ SDE_ERROR("unable to get qcom,display-topology-control,rc=%d\n",
+ rc);
+ } else {
+ SDE_DEBUG("%s qcom,display-topology-control = %s\n",
+ __func__, name);
+
+ if (!strcmp(name, "force-mixer"))
+ top = BIT(SDE_RM_TOPCTL_FORCE_MIXER);
+ else if (!strcmp(name, "force-tiling"))
+ top = BIT(SDE_RM_TOPCTL_FORCE_TILING);
+
+ display->display_topology = top;
+ }
+
+ display->skip_ddc = of_property_read_bool(node,
+ "qcom,skip_ddc");
+
+ rc = _sde_hdmi_parse_dt_modes(node, &display->mode_list,
+ &display->num_of_modes);
+ if (rc)
+ SDE_ERROR("parse_dt_modes failed rc=%d\n", rc);
+
+ return rc;
+}
+
+static int _sde_hdmi_dev_probe(struct platform_device *pdev)
+{
+ int rc;
+ struct sde_hdmi *display;
+ int ret = 0;
+
+
+ SDE_DEBUG("\n");
+
+ if (!pdev || !pdev->dev.of_node) {
+ SDE_ERROR("pdev not found\n");
+ return -ENODEV;
+ }
+
+ display = devm_kzalloc(&pdev->dev, sizeof(*display), GFP_KERNEL);
+ if (!display)
+ return -ENOMEM;
+
+ INIT_LIST_HEAD(&display->mode_list);
+ rc = _sde_hdmi_parse_dt(pdev->dev.of_node, display);
+ if (rc)
+ SDE_ERROR("parse dt failed, rc=%d\n", rc);
+
+ mutex_init(&display->display_lock);
+ display->pdev = pdev;
+ platform_set_drvdata(pdev, display);
+ mutex_lock(&sde_hdmi_list_lock);
+ list_add(&display->list, &sde_hdmi_list);
+ mutex_unlock(&sde_hdmi_list_lock);
+ if (!sde_hdmi_dev_init(display)) {
+ ret = component_add(&pdev->dev, &sde_hdmi_comp_ops);
+ if (ret) {
+ pr_err("component add failed\n");
+ goto out;
+ }
+ }
+ return 0;
+
+out:
+ if (rc)
+ devm_kfree(&pdev->dev, display);
+ return rc;
+}
+
+static int _sde_hdmi_dev_remove(struct platform_device *pdev)
+{
+ struct sde_hdmi *display;
+ struct sde_hdmi *pos, *tmp;
+ struct drm_display_mode *mode, *n;
+
+ if (!pdev) {
+ SDE_ERROR("Invalid device\n");
+ return -EINVAL;
+ }
+
+ display = platform_get_drvdata(pdev);
+
+ mutex_lock(&sde_hdmi_list_lock);
+ list_for_each_entry_safe(pos, tmp, &sde_hdmi_list, list) {
+ if (pos == display) {
+ list_del(&display->list);
+ break;
+ }
+ }
+ mutex_unlock(&sde_hdmi_list_lock);
+
+ list_for_each_entry_safe(mode, n, &display->mode_list, head) {
+ list_del(&mode->head);
+ kfree(mode);
+ }
+
+ platform_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, display);
+ return 0;
+}
+
+static struct platform_driver sde_hdmi_driver = {
+ .probe = _sde_hdmi_dev_probe,
+ .remove = _sde_hdmi_dev_remove,
+ .driver = {
+ .name = "sde_hdmi",
+ .of_match_table = sde_hdmi_dt_match,
+ },
+};
+
+static int sde_hdmi_irqdomain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct sde_hdmi *display;
+ int rc;
+
+ if (!domain || !domain->host_data) {
+ pr_err("invalid parameters domain\n");
+ return -EINVAL;
+ }
+ display = domain->host_data;
+
+ irq_set_chip_and_handler(irq, &dummy_irq_chip, handle_level_irq);
+ rc = irq_set_chip_data(irq, display);
+
+ return rc;
+}
+
+static const struct irq_domain_ops sde_hdmi_irqdomain_ops = {
+ .map = sde_hdmi_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+int sde_hdmi_drm_init(struct sde_hdmi *display, struct drm_encoder *enc)
+{
+ int rc = 0;
+ struct msm_drm_private *priv = NULL;
+ struct hdmi *hdmi;
+ struct platform_device *pdev;
+
+ DBG("");
+ if (!display || !display->drm_dev || !enc) {
+ SDE_ERROR("display=%p or enc=%p or drm_dev is NULL\n",
+ display, enc);
+ return -EINVAL;
+ }
+
+ mutex_lock(&display->display_lock);
+ priv = display->drm_dev->dev_private;
+ hdmi = display->ctrl.ctrl;
+
+ if (!priv || !hdmi) {
+ SDE_ERROR("priv=%p or hdmi=%p is NULL\n",
+ priv, hdmi);
+ mutex_unlock(&display->display_lock);
+ return -EINVAL;
+ }
+
+ pdev = hdmi->pdev;
+ hdmi->dev = display->drm_dev;
+ hdmi->encoder = enc;
+
+ hdmi_audio_infoframe_init(&hdmi->audio.infoframe);
+
+ hdmi->bridge = sde_hdmi_bridge_init(hdmi, display);
+ if (IS_ERR(hdmi->bridge)) {
+ rc = PTR_ERR(hdmi->bridge);
+ SDE_ERROR("failed to create HDMI bridge: %d\n", rc);
+ hdmi->bridge = NULL;
+ goto error;
+ }
+ hdmi->irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+ if (hdmi->irq < 0) {
+ rc = hdmi->irq;
+ SDE_ERROR("failed to get irq: %d\n", rc);
+ goto error;
+ }
+
+ rc = devm_request_irq(&pdev->dev, hdmi->irq,
+ _sde_hdmi_irq, IRQF_TRIGGER_HIGH,
+ "sde_hdmi_isr", display);
+ if (rc < 0) {
+ SDE_ERROR("failed to request IRQ%u: %d\n",
+ hdmi->irq, rc);
+ goto error;
+ }
+
+ display->irq_domain = irq_domain_add_linear(pdev->dev.of_node, 8,
+ &sde_hdmi_irqdomain_ops, display);
+ if (!display->irq_domain) {
+ SDE_ERROR("failed to create IRQ domain\n");
+ goto error;
+ }
+
+ enc->bridge = hdmi->bridge;
+ priv->bridges[priv->num_bridges++] = hdmi->bridge;
+
+ /*
+ * After initialising HDMI bridge, we need to check
+ * whether the early display is enabled for HDMI.
+ * If yes, we need to increase refcount of hdmi power
+ * clocks. This can skip the clock disabling operation in
+ * clock_late_init when finding clk.count == 1.
+ */
+ if (display->cont_splash_enabled) {
+ sde_hdmi_bridge_power_on(hdmi->bridge);
+ hdmi->power_on = true;
+ }
+
+ mutex_unlock(&display->display_lock);
+ return 0;
+
+error:
+ /* bridge is normally destroyed by drm: */
+ if (hdmi->bridge) {
+ hdmi_bridge_destroy(hdmi->bridge);
+ hdmi->bridge = NULL;
+ }
+ mutex_unlock(&display->display_lock);
+ return rc;
+}
+
+int sde_hdmi_drm_deinit(struct sde_hdmi *display)
+{
+ int rc = 0;
+
+ if (!display) {
+ SDE_ERROR("Invalid params\n");
+ return -EINVAL;
+ }
+
+ if (display->irq_domain)
+ irq_domain_remove(display->irq_domain);
+
+ return rc;
+}
+
+static int __init sde_hdmi_register(void)
+{
+ int rc = 0;
+
+ DBG("");
+ rc = platform_driver_register(&sde_hdmi_driver);
+ return rc;
+}
+
+static void __exit sde_hdmi_unregister(void)
+{
+ platform_driver_unregister(&sde_hdmi_driver);
+}
+
+module_init(sde_hdmi_register);
+module_exit(sde_hdmi_unregister);
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
new file mode 100644
index 000000000000..082b3328a40d
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi.h
@@ -0,0 +1,699 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_HDMI_H_
+#define _SDE_HDMI_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/msm_ext_display.h>
+#include <linux/hdcp_qseecom.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <media/cec-notifier.h>
+#include "hdmi.h"
+#include "sde_kms.h"
+#include "sde_connector.h"
+#include "msm_drv.h"
+#include "sde_edid_parser.h"
+#include "sde_hdmi_util.h"
+#include "sde_hdcp.h"
+
+#ifndef MIN
+#define MIN(x, y) (((x) < (y)) ? (x) : (y))
+#endif
+#ifdef HDMI_DEBUG_ENABLE
+#define SDE_HDMI_DEBUG(fmt, args...) SDE_ERROR(fmt, ##args)
+#else
+#define SDE_HDMI_DEBUG(fmt, args...) SDE_DEBUG(fmt, ##args)
+#endif
+
+/* HW Revisions for different SDE targets */
+#define SDE_GET_MAJOR_VER(rev)((rev) >> 28)
+#define SDE_GET_MINOR_VER(rev)(((rev) >> 16) & 0xFFF)
+
+/**
+ * struct sde_hdmi_info - defines hdmi display properties
+ * @display_type: Display type as defined by device tree.
+ * @is_hot_pluggable: Can panel be hot plugged.
+ * @is_connected: Is panel connected.
+ * @is_edid_supported: Does panel support reading EDID information.
+ * @width_mm: Physical width of panel in millimeters.
+ * @height_mm: Physical height of panel in millimeters.
+ */
+struct sde_hdmi_info {
+ const char *display_type;
+
+ /* HPD */
+ bool is_hot_pluggable;
+ bool is_connected;
+ bool is_edid_supported;
+
+ /* Physical properties */
+ u32 width_mm;
+ u32 height_mm;
+};
+
+/**
+ * struct sde_hdmi_ctrl - hdmi ctrl/phy information for the display
+ * @ctrl: Handle to the HDMI controller device.
+ * @ctrl_of_node: pHandle to the HDMI controller device.
+ * @hdmi_ctrl_idx: HDMI controller instance id.
+ */
+struct sde_hdmi_ctrl {
+ /* controller info */
+ struct hdmi *ctrl;
+ struct device_node *ctrl_of_node;
+ u32 hdmi_ctrl_idx;
+};
+
+enum hdmi_tx_io_type {
+ HDMI_TX_CORE_IO,
+ HDMI_TX_QFPROM_IO,
+ HDMI_TX_HDCP_IO,
+ HDMI_TX_MAX_IO
+};
+
+enum hdmi_tx_feature_type {
+ SDE_HDCP_1x,
+ SDE_HDCP_2P2
+};
+
+/**
+ * struct sde_hdmi - hdmi display information
+ * @pdev: Pointer to platform device.
+ * @drm_dev: DRM device associated with the display.
+ * @name: Name of the display.
+ * @display_type: Display type as defined in device tree.
+ * @list: List pointer.
+ * @display_lock: Mutex for sde_hdmi interface.
+ * @ctrl: Controller information for HDMI display.
+ * @non_pluggable: If HDMI display is non pluggable
+ * @display_topology: user requested display topology
+ * @num_of_modes: Number of modes supported by display if non pluggable.
+ * @mode_list: Mode list if non pluggable.
+ * @mode: Current display mode.
+ * @connected: If HDMI display is connected.
+ * @is_tpg_enabled: TPG state.
+ * @hdmi_tx_version: HDMI TX version
+ * @hdmi_tx_major_version: HDMI TX major version
+ * @max_pclk_khz: Max pixel clock supported
+ * @hdcp1_use_sw_keys: If HDCP1 engine uses SW keys
+ * @hdcp14_present: If the sink supports HDCP 1.4
+ * @hdcp22_present: If the sink supports HDCP 2.2
+ * @hdcp_status: Current HDCP status
+ * @sink_hdcp_ver: HDCP version of the sink
+ * @enc_lvl: Current encryption level
+ * @curr_hdr_state: Current HDR state of the HDMI connector
+ * @auth_state: Current authentication state of HDCP
+ * @sink_hdcp22_support: If the sink supports HDCP 2.2
+ * @src_hdcp22_support: If the source supports HDCP 2.2
+ * @hdcp_data: Call back data registered by the client with HDCP lib
+ * @hdcp_feat_data: Handle to HDCP feature data
+ * @hdcp_ops: Function ops registered by the client with the HDCP lib
+ * @ddc_ctrl: Handle to HDMI DDC Controller
+ * @hpd_work: HPD work structure.
+ * @codec_ready: If audio codec is ready.
+ * @client_notify_pending: If there is client notification pending.
+ * @irq_domain: IRQ domain structure.
+ * @notifier: CEC notifider to convey physical address information.
+ * @pll_update_enable: if it's allowed to update HDMI PLL ppm.
+ * @dc_enable: If deep color is enabled. Only DC_30 so far.
+ * @dc_feature_supported: If deep color feature is supported.
+ * @bt2020_colorimetry: If BT2020 colorimetry is supported by sink
+ * @hdcp_cb_work: Callback function for HDCP
+ * @io: Handle to IO base addresses for HDMI
+ * @root: Debug fs root entry.
+ */
+struct sde_hdmi {
+ struct platform_device *pdev;
+ struct drm_device *drm_dev;
+
+ const char *name;
+ const char *display_type;
+ struct list_head list;
+ struct mutex display_lock;
+ struct mutex hdcp_mutex;
+ struct sde_hdmi_ctrl ctrl;
+
+ struct platform_device *ext_pdev;
+ struct msm_ext_disp_init_data ext_audio_data;
+ struct sde_edid_ctrl *edid_ctrl;
+
+ bool non_pluggable;
+ u32 display_topology;
+ bool skip_ddc;
+ u32 num_of_modes;
+ struct list_head mode_list;
+ struct drm_display_mode mode;
+ bool connected;
+ bool is_tpg_enabled;
+ u32 hdmi_tx_version;
+ u32 hdmi_tx_major_version;
+ u32 max_pclk_khz;
+ bool hdcp1_use_sw_keys;
+ u32 hdcp14_present;
+ u32 hdcp22_present;
+ u8 hdcp_status;
+ u8 sink_hdcp_ver;
+ u32 enc_lvl;
+ u8 curr_hdr_state;
+ bool auth_state;
+ bool sink_hdcp22_support;
+ bool src_hdcp22_support;
+
+ /*hold final data
+ *based on hdcp support
+ */
+ void *hdcp_data;
+ /*hold hdcp init data*/
+ void *hdcp_feat_data[2];
+ struct sde_hdcp_ops *hdcp_ops;
+ struct sde_hdmi_tx_ddc_ctrl ddc_ctrl;
+ struct work_struct hpd_work;
+ bool codec_ready;
+ bool client_notify_pending;
+
+ struct irq_domain *irq_domain;
+ struct cec_notifier *notifier;
+ bool pll_update_enable;
+ bool dc_enable;
+ bool dc_feature_supported;
+ bool bt2020_colorimetry;
+
+ struct delayed_work hdcp_cb_work;
+ struct dss_io_data io[HDMI_TX_MAX_IO];
+ /* DEBUG FS */
+ struct dentry *root;
+
+ bool cont_splash_enabled;
+};
+
+/**
+ * hdmi_tx_scdc_access_type() - hdmi 2.0 DDC functionalities.
+ */
+enum hdmi_tx_scdc_access_type {
+ HDMI_TX_SCDC_SCRAMBLING_STATUS,
+ HDMI_TX_SCDC_SCRAMBLING_ENABLE,
+ HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE,
+ HDMI_TX_SCDC_CLOCK_DET_STATUS,
+ HDMI_TX_SCDC_CH0_LOCK_STATUS,
+ HDMI_TX_SCDC_CH1_LOCK_STATUS,
+ HDMI_TX_SCDC_CH2_LOCK_STATUS,
+ HDMI_TX_SCDC_CH0_ERROR_COUNT,
+ HDMI_TX_SCDC_CH1_ERROR_COUNT,
+ HDMI_TX_SCDC_CH2_ERROR_COUNT,
+ HDMI_TX_SCDC_READ_ENABLE,
+ HDMI_TX_SCDC_MAX,
+};
+
+#define HDMI_KHZ_TO_HZ 1000
+#define HDMI_MHZ_TO_HZ 1000000
+#define HDMI_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO 2
+#define HDMI_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO 1
+
+#define HDMI_GEN_PKT_CTRL_CLR_MASK 0x3f0007
+
+/* for AVI program */
+#define HDMI_AVI_INFOFRAME_BUFFER_SIZE \
+ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_AVI_INFOFRAME_SIZE)
+#define HDMI_VS_INFOFRAME_BUFFER_SIZE (HDMI_INFOFRAME_HEADER_SIZE + 6)
+
+#define LEFT_SHIFT_BYTE(x) ((x) << 8)
+#define LEFT_SHIFT_WORD(x) ((x) << 16)
+#define LEFT_SHIFT_24BITS(x) ((x) << 24)
+
+/* Maximum pixel clock rates for hdmi tx */
+#define HDMI_DEFAULT_MAX_PCLK_RATE 148500
+#define HDMI_TX_3_MAX_PCLK_RATE 297000
+#define HDMI_TX_4_MAX_PCLK_RATE 600000
+/**
+ * hdmi_tx_ddc_timer_type() - hdmi DDC timer functionalities.
+ */
+enum hdmi_tx_ddc_timer_type {
+ HDMI_TX_DDC_TIMER_HDCP2P2_RD_MSG,
+ HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS,
+ HDMI_TX_DDC_TIMER_UPDATE_FLAGS,
+ HDMI_TX_DDC_TIMER_STATUS_FLAGS,
+ HDMI_TX_DDC_TIMER_CED,
+ HDMI_TX_DDC_TIMER_MAX,
+ };
+
+#ifdef CONFIG_DRM_SDE_HDMI
+/**
+ * sde_hdmi_get_num_of_displays() - returns number of display devices
+ * supported.
+ *
+ * Return: number of displays.
+ */
+u32 sde_hdmi_get_num_of_displays(void);
+
+/**
+ * sde_hdmi_get_displays() - returns the display list that's available.
+ * @display_array: Pointer to display list
+ * @max_display_count: Number of maximum displays in the list
+ *
+ * Return: number of available displays.
+ */
+int sde_hdmi_get_displays(void **display_array, u32 max_display_count);
+
+/**
+ * sde_hdmi_connector_pre_deinit()- perform additional deinitialization steps
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+ void *display);
+
+/**
+ * sde_hdmi_set_top_ctl()- set display topology control property
+ * @connector: Pointer to drm connector structure
+ * @adj_mode: adjusted mode
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+int sde_hdmi_set_top_ctl(struct drm_connector *connector,
+ struct drm_display_mode *adj_mode, void *display);
+
+/**
+ * sde_hdmi_connector_post_init()- perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to sde connector info structure
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+int sde_hdmi_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display);
+
+/**
+ * sde_hdmi_connector_detect()- determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ *
+ * Return: error code
+ */
+enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display);
+
+/**
+ * sde_hdmi_core_enable()- turn on clk and pwr for hdmi core
+ * @sde_hdmi: Pointer to sde_hdmi structure
+ *
+ * Return: error code
+ */
+int sde_hdmi_core_enable(struct sde_hdmi *sde_hdmi);
+
+/**
+ * sde_hdmi_core_disable()- turn off clk and pwr for hdmi core
+ * @sde_hdmi: Pointer to sde_hdmi structure
+ *
+ * Return: none
+ */
+void sde_hdmi_core_disable(struct sde_hdmi *sde_hdmi);
+
+/**
+ * sde_hdmi_connector_get_modes - add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+
+ * Returns: Number of modes added
+ */
+int sde_hdmi_connector_get_modes(struct drm_connector *connector,
+ void *display);
+
+/**
+ * sde_hdmi_mode_valid - determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ *
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status sde_hdmi_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display);
+
+/**
+ * sde_hdmi_dev_init() - Initializes the display device
+ * @display: Handle to the display.
+ *
+ * Initialization will acquire references to the resources required for the
+ * display hardware to function.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_dev_init(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_dev_deinit() - Desinitializes the display device
+ * @display: Handle to the display.
+ *
+ * All the resources acquired during device init will be released.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_dev_deinit(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_drm_init() - initializes DRM objects for the display device.
+ * @display: Handle to the display.
+ * @encoder: Pointer to the encoder object which is connected to the
+ * display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_drm_init(struct sde_hdmi *display,
+ struct drm_encoder *enc);
+
+/**
+ * sde_hdmi_drm_deinit() - destroys DRM objects assosciated with the display
+ * @display: Handle to the display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_drm_deinit(struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_get_info() - returns the display properties
+ * @display: Handle to the display.
+ * @info: Pointer to the structure where info is stored.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_get_info(struct msm_display_info *info,
+ void *display);
+
+/**
+ * sde_hdmi_set_property() - set the connector properties
+ * @connector: Handle to the connector.
+ * @state: Handle to the connector state.
+ * @property_index: property index.
+ * @value: property value.
+ * @display: Handle to the display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display);
+/**
+ * sde_hdmi_bridge_power_on -- A wrapper of _sde_hdmi_bridge_power_on.
+ * @bridge: Handle to the drm bridge.
+ *
+ * Return: void.
+ */
+void sde_hdmi_bridge_power_on(struct drm_bridge *bridge);
+
+/**
+ * sde_hdmi_get_property() - get the connector properties
+ * @connector: Handle to the connector.
+ * @state: Handle to the connector state.
+ * @property_index: property index.
+ * @value: property value.
+ * @display: Handle to the display.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_get_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t *value,
+ void *display);
+
+/**
+ * sde_hdmi_bridge_init() - init sde hdmi bridge
+ * @hdmi: Handle to the hdmi.
+ * @display: Handle to the sde_hdmi
+ *
+ * Return: struct drm_bridge *.
+ */
+struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi,
+ struct sde_hdmi *display);
+
+/**
+ * sde_hdmi_set_mode() - Set HDMI mode API.
+ * @hdmi: Handle to the hdmi.
+ * @power_on: Power on/off request.
+ *
+ * Return: void.
+ */
+void sde_hdmi_set_mode(struct hdmi *hdmi, bool power_on);
+
+/**
+ * sde_hdmi_scdc_read() - hdmi 2.0 ddc read API.
+ * @hdmi: Handle to the hdmi.
+ * @data_type: DDC data type, refer to enum hdmi_tx_scdc_access_type.
+ * @val: Read back value.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_scdc_read(struct hdmi *hdmi, u32 data_type, u32 *val);
+
+/**
+ * sde_hdmi_scdc_write() - hdmi 2.0 ddc write API.
+ * @hdmi: Handle to the hdmi.
+ * @data_type: DDC data type, refer to enum hdmi_tx_scdc_access_type.
+ * @val: Value write through DDC.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_scdc_write(struct hdmi *hdmi, u32 data_type, u32 val);
+
+/**
+ * sde_hdmi_audio_on() - enable hdmi audio.
+ * @hdmi: Handle to the hdmi.
+ * @params: audio setup parameters from codec.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_audio_on(struct hdmi *hdmi,
+ struct msm_ext_disp_audio_setup_params *params);
+
+/**
+ * sde_hdmi_audio_off() - disable hdmi audio.
+ * @hdmi: Handle to the hdmi.
+ *
+ * Return: void.
+ */
+void sde_hdmi_audio_off(struct hdmi *hdmi);
+
+/**
+ * sde_hdmi_config_avmute() - mute hdmi.
+ * @hdmi: Handle to the hdmi.
+ * @set: enable/disable avmute.
+ *
+ * Return: error code.
+ */
+int sde_hdmi_config_avmute(struct hdmi *hdmi, bool set);
+
+/**
+ * sde_hdmi_notify_clients() - notify hdmi clients of the connection status.
+ * @display: Handle to sde_hdmi.
+ * @connected: connection status.
+ *
+ * Return: void.
+ */
+void sde_hdmi_notify_clients(struct sde_hdmi *display, bool connected);
+
+/**
+ * sde_hdmi_ack_state() - acknowledge the connection status.
+ * @connector: Handle to the drm_connector.
+ * @status: connection status.
+ *
+ * Return: void.
+ */
+void sde_hdmi_ack_state(struct drm_connector *connector,
+ enum drm_connector_status status);
+
+bool sde_hdmi_tx_is_hdcp_enabled(struct sde_hdmi *hdmi_ctrl);
+bool sde_hdmi_tx_is_encryption_set(struct sde_hdmi *hdmi_ctrl);
+bool sde_hdmi_tx_is_stream_shareable(struct sde_hdmi *hdmi_ctrl);
+bool sde_hdmi_tx_is_panel_on(struct sde_hdmi *hdmi_ctrl);
+int sde_hdmi_start_hdcp(struct drm_connector *connector);
+void sde_hdmi_hdcp_off(struct sde_hdmi *hdmi_ctrl);
+
+
+/*
+ * sde_hdmi_pre_kickoff - program kickoff-time features
+ * @display: Pointer to private display structure
+ * @params: Parameters for kickoff-time programming
+ * Returns: Zero on success
+ */
+int sde_hdmi_pre_kickoff(struct drm_connector *connector,
+ void *display,
+ struct msm_display_kickoff_params *params);
+
+/*
+ * sde_hdmi_mode_needs_full_range - does mode need full range
+ * quantization
+ * @display: Pointer to private display structure
+ * Returns: true or false based on mode
+ */
+bool sde_hdmi_mode_needs_full_range(void *display);
+
+/*
+ * sde_hdmi_get_csc_type - returns the CSC type to be
+ * used based on state of HDR playback
+ * @conn: Pointer to DRM connector
+ * @display: Pointer to private display structure
+ * Returns: true or false based on mode
+ */
+enum sde_csc_type sde_hdmi_get_csc_type(struct drm_connector *conn,
+ void *display);
+#else /*#ifdef CONFIG_DRM_SDE_HDMI*/
+
+static inline u32 sde_hdmi_get_num_of_displays(void)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_get_displays(void **display_array,
+ u32 max_display_count)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_connector_pre_deinit(struct drm_connector *connector,
+ void *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_set_top_ctl(struct drm_connector *connector,
+ struct drm_display_mode *adj_mode, void *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ return 0;
+}
+
+static inline enum drm_connector_status
+sde_hdmi_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display)
+{
+ return connector_status_disconnected;
+}
+
+static inline int sde_hdmi_connector_get_modes(struct drm_connector *connector,
+ void *display)
+{
+ return 0;
+}
+
+static inline enum drm_mode_status sde_hdmi_mode_valid(
+ struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display)
+{
+ return MODE_OK;
+}
+
+static inline int sde_hdmi_dev_init(struct sde_hdmi *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_dev_deinit(struct sde_hdmi *display)
+{
+ return 0;
+}
+
+bool hdmi_tx_is_hdcp_enabled(struct sde_hdmi *hdmi_ctrl)
+{
+ return false;
+}
+
+bool sde_hdmi_tx_is_encryption_set(struct sde_hdmi *hdmi_ctrl)
+{
+ return false;
+}
+
+bool sde_hdmi_tx_is_stream_shareable(struct sde_hdmi *hdmi_ctrl)
+{
+ return false;
+}
+
+bool sde_hdmi_tx_is_panel_on(struct sde_hdmi *hdmi_ctrl)
+{
+ return false;
+}
+
+static inline int sde_hdmi_drm_init(struct sde_hdmi *display,
+ struct drm_encoder *enc)
+{
+ return 0;
+}
+
+int sde_hdmi_start_hdcp(struct drm_connector *connector)
+{
+ return 0;
+}
+
+void sde_hdmi_hdcp_off(struct sde_hdmi *hdmi_ctrl)
+{
+
+}
+
+static inline int sde_hdmi_drm_deinit(struct sde_hdmi *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_get_info(struct msm_display_info *info,
+ void *display)
+{
+ return 0;
+}
+
+static inline int sde_hdmi_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display)
+{
+ return 0;
+}
+
+static inline bool sde_hdmi_mode_needs_full_range(void *display)
+{
+ return false;
+}
+
+enum sde_csc_type sde_hdmi_get_csc_type(struct drm_connector *conn,
+ void *display)
+{
+ return 0;
+}
+
+#endif /*#else of CONFIG_DRM_SDE_HDMI*/
+#endif /* _SDE_HDMI_H_ */
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c
new file mode 100644
index 000000000000..d6213dc0a4aa
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_audio.c
@@ -0,0 +1,357 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include <linux/switch.h>
+#include <linux/gcd.h>
+
+#include "drm_edid.h"
+#include "sde_kms.h"
+#include "sde_hdmi.h"
+#include "sde_hdmi_regs.h"
+#include "hdmi.h"
+
+#define HDMI_AUDIO_INFO_FRAME_PACKET_HEADER 0x84
+#define HDMI_AUDIO_INFO_FRAME_PACKET_VERSION 0x1
+#define HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH 0x0A
+
+#define HDMI_ACR_N_MULTIPLIER 128
+#define DEFAULT_AUDIO_SAMPLE_RATE_HZ 48000
+
+/* Supported HDMI Audio channels */
+enum hdmi_audio_channels {
+ AUDIO_CHANNEL_2 = 2,
+ AUDIO_CHANNEL_3,
+ AUDIO_CHANNEL_4,
+ AUDIO_CHANNEL_5,
+ AUDIO_CHANNEL_6,
+ AUDIO_CHANNEL_7,
+ AUDIO_CHANNEL_8,
+};
+
+/* parameters for clock regeneration */
+struct hdmi_audio_acr {
+ u32 n;
+ u32 cts;
+};
+
+enum hdmi_audio_sample_rates {
+ AUDIO_SAMPLE_RATE_32KHZ,
+ AUDIO_SAMPLE_RATE_44_1KHZ,
+ AUDIO_SAMPLE_RATE_48KHZ,
+ AUDIO_SAMPLE_RATE_88_2KHZ,
+ AUDIO_SAMPLE_RATE_96KHZ,
+ AUDIO_SAMPLE_RATE_176_4KHZ,
+ AUDIO_SAMPLE_RATE_192KHZ,
+ AUDIO_SAMPLE_RATE_MAX
+};
+
+struct sde_hdmi_audio {
+ struct hdmi *hdmi;
+ struct msm_ext_disp_audio_setup_params params;
+ u32 pclk;
+};
+
+static void _sde_hdmi_audio_get_audio_sample_rate(u32 *sample_rate_hz)
+{
+ u32 rate = *sample_rate_hz;
+
+ switch (rate) {
+ case 32000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_32KHZ;
+ break;
+ case 44100:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_44_1KHZ;
+ break;
+ case 48000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_48KHZ;
+ break;
+ case 88200:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_88_2KHZ;
+ break;
+ case 96000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_96KHZ;
+ break;
+ case 176400:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_176_4KHZ;
+ break;
+ case 192000:
+ *sample_rate_hz = AUDIO_SAMPLE_RATE_192KHZ;
+ break;
+ default:
+ SDE_ERROR("%d unchanged\n", rate);
+ break;
+ }
+}
+
+static void _sde_hdmi_audio_get_acr_param(u32 pclk, u32 fs,
+ struct hdmi_audio_acr *acr)
+{
+ u32 div, mul;
+
+ if (!acr) {
+ SDE_ERROR("invalid data\n");
+ return;
+ }
+
+ /*
+ * as per HDMI specification, N/CTS = (128*fs)/pclk.
+ * get the ratio using this formula.
+ */
+ acr->n = HDMI_ACR_N_MULTIPLIER * fs;
+ acr->cts = pclk;
+
+ /* get the greatest common divisor for the ratio */
+ div = gcd(acr->n, acr->cts);
+
+ /* get the n and cts values wrt N/CTS formula */
+ acr->n /= div;
+ acr->cts /= div;
+
+ /*
+ * as per HDMI specification, 300 <= 128*fs/N <= 1500
+ * with a target of 128*fs/N = 1000. To get closest
+ * value without truncating fractional values, find
+ * the corresponding multiplier
+ */
+ mul = ((HDMI_ACR_N_MULTIPLIER * fs / HDMI_KHZ_TO_HZ)
+ + (acr->n - 1)) / acr->n;
+
+ acr->n *= mul;
+ acr->cts *= mul;
+}
+
+static void _sde_hdmi_audio_acr_enable(struct sde_hdmi_audio *audio)
+{
+ struct hdmi_audio_acr acr;
+ struct msm_ext_disp_audio_setup_params *params;
+ u32 pclk, layout, multiplier = 1, sample_rate;
+ u32 acr_pkt_ctl, aud_pkt_ctl2, acr_reg_cts, acr_reg_n;
+ struct hdmi *hdmi;
+
+ hdmi = audio->hdmi;
+ params = &audio->params;
+ pclk = audio->pclk;
+ sample_rate = params->sample_rate_hz;
+
+ _sde_hdmi_audio_get_acr_param(pclk, sample_rate, &acr);
+ _sde_hdmi_audio_get_audio_sample_rate(&sample_rate);
+
+ layout = (params->num_of_channels == AUDIO_CHANNEL_2) ? 0 : 1;
+
+ SDE_DEBUG("n=%u, cts=%u, layout=%u\n", acr.n, acr.cts, layout);
+
+ /* AUDIO_PRIORITY | SOURCE */
+ acr_pkt_ctl = BIT(31) | BIT(8);
+
+ switch (sample_rate) {
+ case AUDIO_SAMPLE_RATE_44_1KHZ:
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ case AUDIO_SAMPLE_RATE_48KHZ:
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_192KHZ:
+ multiplier = 4;
+ acr.n >>= 2;
+
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_176_4KHZ:
+ multiplier = 4;
+ acr.n >>= 2;
+
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ case AUDIO_SAMPLE_RATE_96KHZ:
+ multiplier = 2;
+ acr.n >>= 1;
+
+ acr_pkt_ctl |= 0x3 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_48_0;
+ acr_reg_n = HDMI_ACR_48_1;
+ break;
+ case AUDIO_SAMPLE_RATE_88_2KHZ:
+ multiplier = 2;
+ acr.n >>= 1;
+
+ acr_pkt_ctl |= 0x2 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_44_0;
+ acr_reg_n = HDMI_ACR_44_1;
+ break;
+ default:
+ multiplier = 1;
+
+ acr_pkt_ctl |= 0x1 << 4;
+ acr.cts <<= 12;
+
+ acr_reg_cts = HDMI_ACR_32_0;
+ acr_reg_n = HDMI_ACR_32_1;
+ break;
+ }
+
+ aud_pkt_ctl2 = BIT(0) | (layout << 1);
+
+ /* N_MULTIPLE(multiplier) */
+ acr_pkt_ctl &= ~(7 << 16);
+ acr_pkt_ctl |= (multiplier & 0x7) << 16;
+
+ /* SEND | CONT */
+ acr_pkt_ctl |= BIT(0) | BIT(1);
+
+ hdmi_write(hdmi, acr_reg_cts, acr.cts);
+ hdmi_write(hdmi, acr_reg_n, acr.n);
+ hdmi_write(hdmi, HDMI_ACR_PKT_CTRL, acr_pkt_ctl);
+ hdmi_write(hdmi, HDMI_AUDIO_PKT_CTRL2, aud_pkt_ctl2);
+}
+
+static void _sde_hdmi_audio_acr_setup(struct sde_hdmi_audio *audio, bool on)
+{
+ if (on)
+ _sde_hdmi_audio_acr_enable(audio);
+ else
+ hdmi_write(audio->hdmi, HDMI_ACR_PKT_CTRL, 0);
+}
+
+static void _sde_hdmi_audio_infoframe_setup(struct sde_hdmi_audio *audio,
+ bool enabled)
+{
+ struct hdmi *hdmi = audio->hdmi;
+ u32 channels, channel_allocation, level_shift, down_mix, layout;
+ u32 hdmi_debug_reg = 0, audio_info_0_reg = 0, audio_info_1_reg = 0;
+ u32 audio_info_ctrl_reg, aud_pck_ctrl_2_reg;
+ u32 check_sum, sample_present;
+
+ audio_info_ctrl_reg = hdmi_read(hdmi, HDMI_INFOFRAME_CTRL0);
+ audio_info_ctrl_reg &= ~0xF0;
+
+ if (!enabled)
+ goto end;
+
+ channels = audio->params.num_of_channels - 1;
+ channel_allocation = audio->params.channel_allocation;
+ level_shift = audio->params.level_shift;
+ down_mix = audio->params.down_mix;
+ sample_present = audio->params.sample_present;
+
+ layout = (audio->params.num_of_channels == AUDIO_CHANNEL_2) ? 0 : 1;
+ aud_pck_ctrl_2_reg = BIT(0) | (layout << 1);
+ hdmi_write(hdmi, HDMI_AUDIO_PKT_CTRL2, aud_pck_ctrl_2_reg);
+
+ audio_info_1_reg |= channel_allocation & 0xFF;
+ audio_info_1_reg |= ((level_shift & 0xF) << 11);
+ audio_info_1_reg |= ((down_mix & 0x1) << 15);
+
+ check_sum = 0;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_HEADER;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_VERSION;
+ check_sum += HDMI_AUDIO_INFO_FRAME_PACKET_LENGTH;
+ check_sum += channels;
+ check_sum += channel_allocation;
+ check_sum += (level_shift & 0xF) << 3 | (down_mix & 0x1) << 7;
+ check_sum &= 0xFF;
+ check_sum = (u8) (256 - check_sum);
+
+ audio_info_0_reg |= check_sum & 0xFF;
+ audio_info_0_reg |= ((channels & 0x7) << 8);
+
+ /* Enable Audio InfoFrame Transmission */
+ audio_info_ctrl_reg |= 0xF0;
+
+ if (layout) {
+ /* Set the Layout bit */
+ hdmi_debug_reg |= BIT(4);
+
+ /* Set the Sample Present bits */
+ hdmi_debug_reg |= sample_present & 0xF;
+ }
+end:
+ hdmi_write(hdmi, HDMI_DEBUG, hdmi_debug_reg);
+ hdmi_write(hdmi, HDMI_AUDIO_INFO0, audio_info_0_reg);
+ hdmi_write(hdmi, HDMI_AUDIO_INFO1, audio_info_1_reg);
+ hdmi_write(hdmi, HDMI_INFOFRAME_CTRL0, audio_info_ctrl_reg);
+}
+
+int sde_hdmi_audio_on(struct hdmi *hdmi,
+ struct msm_ext_disp_audio_setup_params *params)
+{
+ struct sde_hdmi_audio audio;
+ int rc = 0;
+
+ if (!hdmi) {
+ SDE_ERROR("invalid HDMI Ctrl\n");
+ rc = -ENODEV;
+ goto end;
+ }
+
+ audio.pclk = hdmi->pixclock;
+ audio.params = *params;
+ audio.hdmi = hdmi;
+
+ if (!audio.params.num_of_channels) {
+ audio.params.sample_rate_hz = DEFAULT_AUDIO_SAMPLE_RATE_HZ;
+ audio.params.num_of_channels = AUDIO_CHANNEL_2;
+ }
+
+ _sde_hdmi_audio_acr_setup(&audio, true);
+ _sde_hdmi_audio_infoframe_setup(&audio, true);
+
+ SDE_DEBUG("HDMI Audio: Enabled\n");
+end:
+ return rc;
+}
+
+void sde_hdmi_audio_off(struct hdmi *hdmi)
+{
+ struct sde_hdmi_audio audio;
+ int rc = 0;
+
+ if (!hdmi) {
+ SDE_ERROR("invalid HDMI Ctrl\n");
+ rc = -ENODEV;
+ return;
+ }
+
+ audio.hdmi = hdmi;
+
+ _sde_hdmi_audio_infoframe_setup(&audio, false);
+ _sde_hdmi_audio_acr_setup(&audio, false);
+
+ SDE_DEBUG("HDMI Audio: Disabled\n");
+}
+
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
new file mode 100644
index 000000000000..b874d8dbf454
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_bridge.c
@@ -0,0 +1,1088 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "drm_edid.h"
+#include "sde_kms.h"
+#include "sde_connector.h"
+#include "sde_hdmi.h"
+#include "hdmi.h"
+
+/*
+ * Add these register definitions to support the latest chipsets. These
+ * are derived from hdmi.xml.h and are going to be replaced by a chipset
+ * based mask approach.
+ */
+#define SDE_HDMI_ACTIVE_HSYNC_START__MASK 0x00001fff
+static inline uint32_t SDE_HDMI_ACTIVE_HSYNC_START(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) &
+ SDE_HDMI_ACTIVE_HSYNC_START__MASK;
+}
+#define SDE_HDMI_ACTIVE_HSYNC_END__MASK 0x1fff0000
+static inline uint32_t SDE_HDMI_ACTIVE_HSYNC_END(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_HSYNC_END__SHIFT) &
+ SDE_HDMI_ACTIVE_HSYNC_END__MASK;
+}
+
+#define SDE_HDMI_ACTIVE_VSYNC_START__MASK 0x00001fff
+static inline uint32_t SDE_HDMI_ACTIVE_VSYNC_START(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) &
+ SDE_HDMI_ACTIVE_VSYNC_START__MASK;
+}
+#define SDE_HDMI_ACTIVE_VSYNC_END__MASK 0x1fff0000
+static inline uint32_t SDE_HDMI_ACTIVE_VSYNC_END(uint32_t val)
+{
+ return ((val) << HDMI_ACTIVE_VSYNC_END__SHIFT) &
+ SDE_HDMI_ACTIVE_VSYNC_END__MASK;
+}
+
+#define SDE_HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00001fff
+static inline uint32_t SDE_HDMI_VSYNC_ACTIVE_F2_START(uint32_t val)
+{
+ return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) &
+ SDE_HDMI_VSYNC_ACTIVE_F2_START__MASK;
+}
+#define SDE_HDMI_VSYNC_ACTIVE_F2_END__MASK 0x1fff0000
+static inline uint32_t SDE_HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
+{
+ return ((val) << HDMI_VSYNC_ACTIVE_F2_END__SHIFT) &
+ SDE_HDMI_VSYNC_ACTIVE_F2_END__MASK;
+}
+
+#define SDE_HDMI_TOTAL_H_TOTAL__MASK 0x00001fff
+static inline uint32_t SDE_HDMI_TOTAL_H_TOTAL(uint32_t val)
+{
+ return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) &
+ SDE_HDMI_TOTAL_H_TOTAL__MASK;
+}
+
+#define SDE_HDMI_TOTAL_V_TOTAL__MASK 0x1fff0000
+static inline uint32_t SDE_HDMI_TOTAL_V_TOTAL(uint32_t val)
+{
+ return ((val) << HDMI_TOTAL_V_TOTAL__SHIFT) &
+ SDE_HDMI_TOTAL_V_TOTAL__MASK;
+}
+
+#define SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00001fff
+static inline uint32_t SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
+{
+ return ((val) << HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT) &
+ SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK;
+}
+
+struct sde_hdmi_bridge {
+ struct drm_bridge base;
+ struct hdmi *hdmi;
+ struct sde_hdmi *display;
+};
+#define to_hdmi_bridge(x) container_of(x, struct sde_hdmi_bridge, base)
+
+/* TX major version that supports scrambling */
+#define HDMI_TX_SCRAMBLER_MIN_TX_VERSION 0x04
+#define HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ 340000
+#define HDMI_TX_SCRAMBLER_TIMEOUT_MSEC 200
+
+
+#define HDMI_SPD_INFOFRAME_BUFFER_SIZE \
+ (HDMI_INFOFRAME_HEADER_SIZE + HDMI_SPD_INFOFRAME_SIZE)
+#define HDMI_DEFAULT_VENDOR_NAME "unknown"
+#define HDMI_DEFAULT_PRODUCT_NAME "msm"
+#define HDMI_AVI_IFRAME_LINE_NUMBER 1
+#define HDMI_VENDOR_IFRAME_LINE_NUMBER 3
+
+void _sde_hdmi_bridge_destroy(struct drm_bridge *bridge)
+{
+}
+
+static void sde_hdmi_clear_hdr_info(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct drm_connector *connector = hdmi->connector;
+
+ connector->hdr_eotf = SDE_HDMI_HDR_EOTF_NONE;
+ connector->hdr_metadata_type_one = false;
+ connector->hdr_max_luminance = SDE_HDMI_HDR_LUMINANCE_NONE;
+ connector->hdr_avg_luminance = SDE_HDMI_HDR_LUMINANCE_NONE;
+ connector->hdr_min_luminance = SDE_HDMI_HDR_LUMINANCE_NONE;
+ connector->hdr_supported = false;
+}
+
+static void sde_hdmi_clear_colorimetry(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct drm_connector *connector = hdmi->connector;
+
+ connector->color_enc_fmt = 0;
+}
+
+static void sde_hdmi_clear_vsdb_info(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct drm_connector *connector = hdmi->connector;
+
+ connector->max_tmds_clock = 0;
+ connector->latency_present[0] = false;
+ connector->latency_present[1] = false;
+ connector->video_latency[0] = false;
+ connector->video_latency[1] = false;
+ connector->audio_latency[0] = false;
+ connector->audio_latency[1] = false;
+}
+
+static void sde_hdmi_clear_hf_vsdb_info(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct drm_connector *connector = hdmi->connector;
+
+ connector->max_tmds_char = 0;
+ connector->scdc_present = false;
+ connector->rr_capable = false;
+ connector->supports_scramble = false;
+ connector->flags_3d = 0;
+}
+
+static void sde_hdmi_clear_vcdb_info(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct drm_connector *connector = hdmi->connector;
+
+ connector->pt_scan_info = 0;
+ connector->it_scan_info = 0;
+ connector->ce_scan_info = 0;
+ connector->rgb_qs = false;
+ connector->yuv_qs = false;
+}
+
+static void sde_hdmi_clear_vsdbs(struct drm_bridge *bridge)
+{
+ /* Clear fields of HDMI VSDB */
+ sde_hdmi_clear_vsdb_info(bridge);
+ /* Clear fields of HDMI forum VSDB */
+ sde_hdmi_clear_hf_vsdb_info(bridge);
+}
+
+static int _sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ int i, ret = 0;
+ struct sde_hdmi *display = sde_hdmi_bridge->display;
+
+ if ((display->non_pluggable) && (!hdmi->power_on)) {
+ ret = sde_hdmi_core_enable(display);
+ if (ret) {
+ SDE_ERROR("failed to enable HDMI core (%d)\n", ret);
+ goto err_core_enable;
+ }
+ }
+
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ ret = regulator_enable(hdmi->pwr_regs[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ goto err_regulator_enable;
+ }
+ }
+
+ if (config->pwr_clk_cnt > 0 && hdmi->pixclock) {
+ DRM_DEBUG("pixclock: %lu", hdmi->pixclock);
+ ret = clk_set_rate(hdmi->pwr_clks[0], hdmi->pixclock);
+ if (ret) {
+ pr_warn("failed to set pixclock: %s %ld (%d)\n",
+ config->pwr_clk_names[0],
+ hdmi->pixclock, ret);
+ }
+ }
+
+ for (i = 0; i < config->pwr_clk_cnt; i++) {
+ ret = clk_prepare_enable(hdmi->pwr_clks[i]);
+ if (ret) {
+ SDE_ERROR("failed to enable pwr clk: %s (%d)\n",
+ config->pwr_clk_names[i], ret);
+ goto err_prepare_enable;
+ }
+ }
+ goto exit;
+
+err_prepare_enable:
+ for (i = 0; i < config->pwr_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->pwr_clks[i]);
+err_regulator_enable:
+ for (i = 0; i < config->pwr_reg_cnt; i++)
+ regulator_disable(hdmi->pwr_regs[i]);
+err_core_enable:
+ if (display->non_pluggable)
+ sde_hdmi_core_disable(display);
+exit:
+ return ret;
+}
+
+static int _sde_hdmi_bridge_power_off(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ const struct hdmi_platform_config *config = hdmi->config;
+ struct sde_hdmi *display = sde_hdmi_bridge->display;
+ int i, ret = 0;
+
+ /* Wait for vsync */
+ msleep(20);
+
+ for (i = 0; i < config->pwr_clk_cnt; i++)
+ clk_disable_unprepare(hdmi->pwr_clks[i]);
+
+ for (i = 0; i < config->pwr_reg_cnt; i++) {
+ ret = regulator_disable(hdmi->pwr_regs[i]);
+ if (ret)
+ SDE_ERROR("failed to disable pwr regulator: %s (%d)\n",
+ config->pwr_reg_names[i], ret);
+ }
+
+ if (display->non_pluggable)
+ sde_hdmi_core_disable(display);
+
+ return ret;
+}
+
+static int _sde_hdmi_bridge_ddc_clear_irq(struct hdmi *hdmi,
+ char *what)
+{
+ u32 ddc_int_ctrl, ddc_status, in_use, timeout;
+ u32 sw_done_mask = BIT(2);
+ u32 sw_done_ack = BIT(1);
+ u32 in_use_by_sw = BIT(0);
+ u32 in_use_by_hw = BIT(1);
+
+ /* clear and enable interrutps */
+ ddc_int_ctrl = sw_done_mask | sw_done_ack;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL, ddc_int_ctrl);
+
+ /* wait until DDC HW is free */
+ timeout = 100;
+ do {
+ ddc_status = hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS);
+ in_use = ddc_status & (in_use_by_sw | in_use_by_hw);
+ if (in_use) {
+ SDE_DEBUG("ddc is in use by %s, timeout(%d)\n",
+ ddc_status & in_use_by_sw ? "sw" : "hw",
+ timeout);
+ udelay(100);
+ }
+ } while (in_use && --timeout);
+
+ if (!timeout) {
+ SDE_ERROR("%s: timedout\n", what);
+ return -ETIMEDOUT;
+ }
+
+ return 0;
+}
+
+static int _sde_hdmi_bridge_scrambler_ddc_check_status(struct hdmi *hdmi)
+{
+ int rc = 0;
+ u32 reg_val;
+
+ /* check for errors and clear status */
+ reg_val = hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_STATUS);
+ if (reg_val & BIT(4)) {
+ SDE_ERROR("ddc aborted\n");
+ reg_val |= BIT(5);
+ rc = -ECONNABORTED;
+ }
+
+ if (reg_val & BIT(8)) {
+ SDE_ERROR("timed out\n");
+ reg_val |= BIT(9);
+ rc = -ETIMEDOUT;
+ }
+
+ if (reg_val & BIT(12)) {
+ SDE_ERROR("NACK0\n");
+ reg_val |= BIT(13);
+ rc = -EIO;
+ }
+ if (reg_val & BIT(14)) {
+ SDE_ERROR("NACK1\n");
+ reg_val |= BIT(15);
+ rc = -EIO;
+ }
+ hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_STATUS, reg_val);
+
+ return rc;
+}
+
+static int _sde_hdmi_bridge_scrambler_status_timer_setup(struct hdmi *hdmi,
+ u32 timeout_hsync)
+{
+ u32 reg_val;
+ int rc;
+ struct sde_connector *c_conn;
+ struct drm_connector *connector = NULL;
+ struct sde_hdmi *display;
+
+ if (!hdmi) {
+ SDE_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+ connector = hdmi->connector;
+ c_conn = to_sde_connector(hdmi->connector);
+ display = (struct sde_hdmi *)c_conn->display;
+
+ _sde_hdmi_bridge_ddc_clear_irq(hdmi, "scrambler");
+
+ hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL,
+ timeout_hsync);
+ hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2,
+ timeout_hsync);
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL5);
+ reg_val |= BIT(10);
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL5, reg_val);
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL2);
+ /* Trigger interrupt if scrambler status is 0 or DDC failure */
+ reg_val |= BIT(10);
+ reg_val &= ~(BIT(15) | BIT(16));
+ reg_val |= BIT(16);
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL2, reg_val);
+
+ /* Enable DDC access */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HW_DDC_CTRL);
+
+ reg_val &= ~(BIT(8) | BIT(9));
+ reg_val |= BIT(8);
+ hdmi_write(hdmi, REG_HDMI_HW_DDC_CTRL, reg_val);
+
+ /* WAIT for 200ms as per HDMI 2.0 standard for sink to respond */
+ msleep(200);
+
+ /* clear the scrambler status */
+ rc = _sde_hdmi_bridge_scrambler_ddc_check_status(hdmi);
+ if (rc)
+ SDE_ERROR("scrambling ddc error %d\n", rc);
+
+ _sde_hdmi_scrambler_ddc_disable((void *)display);
+
+ return rc;
+}
+
+static int _sde_hdmi_bridge_setup_ddc_timers(struct hdmi *hdmi,
+ u32 type, u32 to_in_num_lines)
+{
+ if (type >= HDMI_TX_DDC_TIMER_MAX) {
+ SDE_ERROR("Invalid timer type %d\n", type);
+ return -EINVAL;
+ }
+
+ switch (type) {
+ case HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS:
+ _sde_hdmi_bridge_scrambler_status_timer_setup(hdmi,
+ to_in_num_lines);
+ break;
+ default:
+ SDE_ERROR("%d type not supported\n", type);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static int _sde_hdmi_bridge_setup_scrambler(struct hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ int rc = 0;
+ int timeout_hsync;
+ u32 reg_val = 0;
+ u32 tmds_clock_ratio = 0;
+ bool scrambler_on = false;
+ struct sde_connector *c_conn;
+ struct drm_connector *connector = NULL;
+ struct sde_hdmi *display;
+
+ if (!hdmi || !mode) {
+ SDE_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+ connector = hdmi->connector;
+ c_conn = to_sde_connector(hdmi->connector);
+ display = (struct sde_hdmi *)c_conn->display;
+
+ /* Read HDMI version */
+ reg_val = hdmi_read(hdmi, REG_HDMI_VERSION);
+ reg_val = (reg_val & 0xF0000000) >> 28;
+ /* Scrambling is supported from HDMI TX 4.0 */
+ if (reg_val < HDMI_TX_SCRAMBLER_MIN_TX_VERSION) {
+ DRM_INFO("scrambling not supported by tx\n");
+ return 0;
+ }
+
+ /* use actual clock instead of mode clock */
+ if (hdmi->pixclock >
+ HDMI_TX_SCRAMBLER_THRESHOLD_RATE_KHZ * HDMI_KHZ_TO_HZ) {
+ scrambler_on = true;
+ tmds_clock_ratio = 1;
+ } else {
+ tmds_clock_ratio = 0;
+ scrambler_on = connector->supports_scramble;
+ }
+
+ DRM_INFO("scrambler %s\n", scrambler_on ? "on" : "off");
+
+ if (scrambler_on) {
+ rc = sde_hdmi_scdc_write(hdmi,
+ HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE,
+ tmds_clock_ratio);
+ if (rc) {
+ SDE_ERROR("TMDS CLK RATIO ERR\n");
+ return rc;
+ }
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val |= BIT(28); /* Set SCRAMBLER_EN bit */
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+
+ rc = sde_hdmi_scdc_write(hdmi,
+ HDMI_TX_SCDC_SCRAMBLING_ENABLE, 0x1);
+ if (rc) {
+ SDE_ERROR("failed to enable scrambling\n");
+ return rc;
+ }
+
+ /*
+ * Setup hardware to periodically check for scrambler
+ * status bit on the sink. Sink should set this bit
+ * with in 200ms after scrambler is enabled.
+ */
+ timeout_hsync = _sde_hdmi_get_timeout_in_hysnc(
+ (void *)display,
+ HDMI_TX_SCRAMBLER_TIMEOUT_MSEC);
+
+ if (timeout_hsync <= 0) {
+ SDE_ERROR("err in timeout hsync calc\n");
+ timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC;
+ }
+ SDE_DEBUG("timeout for scrambling en: %d hsyncs\n",
+ timeout_hsync);
+
+ rc = _sde_hdmi_bridge_setup_ddc_timers(hdmi,
+ HDMI_TX_DDC_TIMER_SCRAMBLER_STATUS, timeout_hsync);
+ } else {
+ /* reset tmds clock ratio */
+ rc = sde_hdmi_scdc_write(hdmi,
+ HDMI_TX_SCDC_TMDS_BIT_CLOCK_RATIO_UPDATE,
+ tmds_clock_ratio);
+ /* scdc write can fail if sink doesn't support SCDC */
+ if (rc && connector->scdc_present)
+ SDE_ERROR("SCDC present, TMDS clk ratio err\n");
+
+ sde_hdmi_scdc_write(hdmi, HDMI_TX_SCDC_SCRAMBLING_ENABLE, 0x0);
+ reg_val = hdmi_read(hdmi, REG_HDMI_CTRL);
+ reg_val &= ~BIT(28); /* Unset SCRAMBLER_EN bit */
+ hdmi_write(hdmi, REG_HDMI_CTRL, reg_val);
+ }
+ return rc;
+}
+
+static void _sde_hdmi_bridge_setup_deep_color(struct hdmi *hdmi)
+{
+ struct drm_connector *connector = hdmi->connector;
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+ u32 hdmi_ctrl_reg, vbi_pkt_reg;
+
+ SDE_DEBUG("Deep Color: %s\n", display->dc_enable ? "On" : "Off");
+
+ if (display->dc_enable) {
+ hdmi_ctrl_reg = hdmi_read(hdmi, REG_HDMI_CTRL);
+
+ /* GC CD override */
+ hdmi_ctrl_reg |= BIT(27);
+
+ /* enable deep color for RGB888/YUV444/YUV420 30 bits */
+ hdmi_ctrl_reg |= BIT(24);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl_reg);
+ /* Enable GC_CONT and GC_SEND in General Control Packet
+ * (GCP) register so that deep color data is
+ * transmitted to the sink on every frame, allowing
+ * the sink to decode the data correctly.
+ *
+ * GC_CONT: 0x1 - Send GCP on every frame
+ * GC_SEND: 0x1 - Enable GCP Transmission
+ */
+ vbi_pkt_reg = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
+ vbi_pkt_reg |= BIT(5) | BIT(4);
+ hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_reg);
+ } else {
+ hdmi_ctrl_reg = hdmi_read(hdmi, REG_HDMI_CTRL);
+
+ /* disable GC CD override */
+ hdmi_ctrl_reg &= ~BIT(27);
+ /* disable deep color for RGB888/YUV444/YUV420 30 bits */
+ hdmi_ctrl_reg &= ~BIT(24);
+ hdmi_write(hdmi, REG_HDMI_CTRL, hdmi_ctrl_reg);
+
+ /* disable the GC packet sending */
+ vbi_pkt_reg = hdmi_read(hdmi, REG_HDMI_VBI_PKT_CTRL);
+ vbi_pkt_reg &= ~(BIT(5) | BIT(4));
+ hdmi_write(hdmi, REG_HDMI_VBI_PKT_CTRL, vbi_pkt_reg);
+ }
+}
+
+static void _sde_hdmi_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+ struct sde_hdmi *display = sde_hdmi_bridge->display;
+
+ DRM_DEBUG("power up");
+
+ if (!hdmi->power_on) {
+ if (_sde_hdmi_bridge_power_on(bridge)) {
+ DEV_ERR("failed to power on bridge\n");
+ return;
+ }
+ hdmi->power_on = true;
+ }
+ if (!display->skip_ddc)
+ _sde_hdmi_bridge_setup_scrambler(hdmi, &display->mode);
+
+ if (phy)
+ phy->funcs->powerup(phy, hdmi->pixclock);
+
+ sde_hdmi_set_mode(hdmi, true);
+
+ if (hdmi->hdcp_ctrl && hdmi->is_hdcp_supported)
+ hdmi_hdcp_ctrl_on(hdmi->hdcp_ctrl);
+
+ mutex_lock(&display->display_lock);
+ if (display->codec_ready)
+ sde_hdmi_notify_clients(display, display->connected);
+ else
+ display->client_notify_pending = true;
+ mutex_unlock(&display->display_lock);
+}
+
+static void sde_hdmi_update_hdcp_info(struct drm_connector *connector)
+{
+ void *fd = NULL;
+ struct sde_hdcp_ops *ops = NULL;
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+ if (!display) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+ if (display->skip_ddc) {
+ display->sink_hdcp22_support = false;
+ display->hdcp22_present = false;
+ } else {
+ /* check first if hdcp2p2 is supported */
+ fd = display->hdcp_feat_data[SDE_HDCP_2P2];
+ if (fd)
+ ops = sde_hdmi_hdcp2p2_start(fd);
+
+ /* If ops is true, sink supports hdcp */
+ if (ops)
+ display->sink_hdcp22_support = true;
+
+ if (ops && ops->feature_supported)
+ display->hdcp22_present = ops->feature_supported(fd);
+ else
+ display->hdcp22_present = false;
+ }
+ /* if hdcp22_present is true, src supports hdcp 2p2 */
+ if (display->hdcp22_present)
+ display->src_hdcp22_support = true;
+
+ if (!display->hdcp22_present) {
+ if (display->hdcp1_use_sw_keys) {
+ display->hdcp14_present =
+ hdcp1_check_if_supported_load_app();
+ }
+ if (display->hdcp14_present) {
+ fd = display->hdcp_feat_data[SDE_HDCP_1x];
+ if (fd)
+ ops = sde_hdcp_1x_start(fd);
+ }
+ }
+
+ if (display->sink_hdcp22_support)
+ display->sink_hdcp_ver = SDE_HDMI_HDCP_22;
+ else
+ display->sink_hdcp_ver = SDE_HDMI_HDCP_14;
+
+ /* update internal data about hdcp */
+ display->hdcp_data = fd;
+ display->hdcp_ops = ops;
+}
+
+static void _sde_hdmi_bridge_enable(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct sde_hdmi *display = sde_hdmi_bridge->display;
+
+ /* need to update hdcp info here to ensure right HDCP support*/
+ sde_hdmi_update_hdcp_info(hdmi->connector);
+
+ /* start HDCP authentication */
+ sde_hdmi_start_hdcp(hdmi->connector);
+
+ /* reset HDR state */
+ display->curr_hdr_state = HDR_DISABLE;
+}
+
+static void _sde_hdmi_bridge_disable(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct sde_hdmi *display = sde_hdmi_bridge->display;
+ struct sde_connector_state *c_state;
+
+ mutex_lock(&display->display_lock);
+
+ if (!bridge) {
+ SDE_ERROR("Invalid params\n");
+ mutex_unlock(&display->display_lock);
+ return;
+ }
+
+ hdmi->connector->hdr_eotf = 0;
+ hdmi->connector->hdr_metadata_type_one = 0;
+ hdmi->connector->hdr_max_luminance = 0;
+ hdmi->connector->hdr_avg_luminance = 0;
+ hdmi->connector->hdr_min_luminance = 0;
+
+ c_state = to_sde_connector_state(hdmi->connector->state);
+ memset(&c_state->hdr_ctrl.hdr_meta,
+ 0, sizeof(c_state->hdr_ctrl.hdr_meta));
+ c_state->hdr_ctrl.hdr_state = HDR_DISABLE;
+
+ display->pll_update_enable = false;
+ display->sink_hdcp_ver = SDE_HDMI_HDCP_NONE;
+ display->sink_hdcp22_support = false;
+
+ if (sde_hdmi_tx_is_hdcp_enabled(display))
+ sde_hdmi_hdcp_off(display);
+
+ sde_hdmi_clear_hdr_info(bridge);
+ /* Clear HDMI VSDB blocks info */
+ sde_hdmi_clear_vsdbs(bridge);
+ /* Clear HDMI VCDB block info */
+ sde_hdmi_clear_vcdb_info(bridge);
+ /* Clear HDMI colorimetry data block info */
+ sde_hdmi_clear_colorimetry(bridge);
+
+ mutex_unlock(&display->display_lock);
+}
+
+static void _sde_hdmi_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct hdmi_phy *phy = hdmi->phy;
+ struct sde_hdmi *display = sde_hdmi_bridge->display;
+
+ sde_hdmi_notify_clients(display, display->connected);
+
+ sde_hdmi_audio_off(hdmi);
+
+ DRM_DEBUG("power down");
+ sde_hdmi_set_mode(hdmi, false);
+
+ if (phy)
+ phy->funcs->powerdown(phy);
+
+ /* HDMI teardown sequence */
+ sde_hdmi_ctrl_reset(hdmi);
+
+ if (hdmi->power_on) {
+ _sde_hdmi_bridge_power_off(bridge);
+ hdmi->power_on = false;
+ }
+
+ if (!display->non_pluggable) {
+ /* Powering-on the controller for HPD */
+ sde_hdmi_ctrl_cfg(hdmi, 1);
+ }
+}
+
+static void _sde_hdmi_bridge_set_avi_infoframe(struct hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ u8 avi_iframe[HDMI_AVI_INFOFRAME_BUFFER_SIZE] = {0};
+ u8 *avi_frame = &avi_iframe[HDMI_INFOFRAME_HEADER_SIZE];
+ u8 checksum;
+ u32 reg_val;
+ u32 mode_fmt_flags = 0;
+ struct hdmi_avi_infoframe info;
+ struct drm_connector *connector;
+
+ if (!hdmi || !mode) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ connector = hdmi->connector;
+
+ if (!connector) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ /* Cache the format flags before clearing */
+ mode_fmt_flags = mode->flags;
+ /**
+ * Clear the RGB/YUV format flags before calling upstream API
+ * as the API also compares the flags and then returns a mode
+ */
+ mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
+ drm_hdmi_avi_infoframe_from_display_mode(&info, mode);
+ /* Restore the format flags */
+ mode->flags = mode_fmt_flags;
+
+ if (mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420) {
+ info.colorspace = HDMI_COLORSPACE_YUV420;
+ /**
+ * If sink supports quantization select,
+ * override to full range
+ */
+ if (connector->yuv_qs)
+ info.ycc_quantization_range =
+ HDMI_YCC_QUANTIZATION_RANGE_FULL;
+ }
+
+ hdmi_avi_infoframe_pack(&info, avi_iframe, sizeof(avi_iframe));
+ checksum = avi_iframe[HDMI_INFOFRAME_HEADER_SIZE - 1];
+
+ reg_val = checksum |
+ LEFT_SHIFT_BYTE(avi_frame[0]) |
+ LEFT_SHIFT_WORD(avi_frame[1]) |
+ LEFT_SHIFT_24BITS(avi_frame[2]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(0), reg_val);
+
+ reg_val = avi_frame[3] |
+ LEFT_SHIFT_BYTE(avi_frame[4]) |
+ LEFT_SHIFT_WORD(avi_frame[5]) |
+ LEFT_SHIFT_24BITS(avi_frame[6]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(1), reg_val);
+
+ reg_val = avi_frame[7] |
+ LEFT_SHIFT_BYTE(avi_frame[8]) |
+ LEFT_SHIFT_WORD(avi_frame[9]) |
+ LEFT_SHIFT_24BITS(avi_frame[10]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(2), reg_val);
+
+ reg_val = avi_frame[11] |
+ LEFT_SHIFT_BYTE(avi_frame[12]) |
+ LEFT_SHIFT_24BITS(avi_iframe[1]);
+ hdmi_write(hdmi, REG_HDMI_AVI_INFO(3), reg_val);
+
+ /* AVI InfFrame enable (every frame) */
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
+ hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0) | BIT(1) | BIT(0));
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ reg_val &= ~0x3F;
+ reg_val |= HDMI_AVI_IFRAME_LINE_NUMBER;
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, reg_val);
+}
+
+static void _sde_hdmi_bridge_set_vs_infoframe(struct hdmi *hdmi,
+ const struct drm_display_mode *mode)
+{
+ u8 vs_iframe[HDMI_VS_INFOFRAME_BUFFER_SIZE] = {0};
+ u32 reg_val;
+ struct hdmi_vendor_infoframe info;
+ int rc = 0;
+
+ rc = drm_hdmi_vendor_infoframe_from_display_mode(&info, mode);
+ if (rc < 0) {
+ SDE_DEBUG("don't send vendor infoframe\n");
+ return;
+ }
+ hdmi_vendor_infoframe_pack(&info, vs_iframe, sizeof(vs_iframe));
+
+ reg_val = (info.s3d_struct << 24) | (info.vic << 16) |
+ (vs_iframe[3] << 8) | (vs_iframe[7] << 5) |
+ vs_iframe[2];
+ hdmi_write(hdmi, REG_HDMI_VENSPEC_INFO0, reg_val);
+
+ /* vendor specific info-frame enable (every frame) */
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL0,
+ hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL0) | BIT(13) | BIT(12));
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_INFOFRAME_CTRL1);
+ reg_val &= ~0x3F000000;
+ reg_val |= (HDMI_VENDOR_IFRAME_LINE_NUMBER << 24);
+ hdmi_write(hdmi, REG_HDMI_INFOFRAME_CTRL1, reg_val);
+}
+
+static void _sde_hdmi_bridge_set_spd_infoframe(struct hdmi *hdmi,
+ const struct drm_display_mode *mode)
+{
+ u8 spd_iframe[HDMI_SPD_INFOFRAME_BUFFER_SIZE] = {0};
+ u32 packet_payload, packet_control, packet_header;
+ struct hdmi_spd_infoframe info;
+ int i;
+
+ /* Need to query vendor and product name from platform setup */
+ hdmi_spd_infoframe_init(&info, HDMI_DEFAULT_VENDOR_NAME,
+ HDMI_DEFAULT_PRODUCT_NAME);
+ hdmi_spd_infoframe_pack(&info, spd_iframe, sizeof(spd_iframe));
+
+ packet_header = spd_iframe[0]
+ | LEFT_SHIFT_BYTE(spd_iframe[1] & 0x7f)
+ | LEFT_SHIFT_WORD(spd_iframe[2] & 0x7f);
+ hdmi_write(hdmi, REG_HDMI_GENERIC1_HDR, packet_header);
+
+ for (i = 0; i < MAX_REG_HDMI_GENERIC1_INDEX; i++) {
+ packet_payload = spd_iframe[3 + i * 4]
+ | LEFT_SHIFT_BYTE(spd_iframe[4 + i * 4] & 0x7f)
+ | LEFT_SHIFT_WORD(spd_iframe[5 + i * 4] & 0x7f)
+ | LEFT_SHIFT_24BITS(spd_iframe[6 + i * 4] & 0x7f);
+ hdmi_write(hdmi, REG_HDMI_GENERIC1(i), packet_payload);
+ }
+
+ packet_payload = (spd_iframe[27] & 0x7f)
+ | LEFT_SHIFT_BYTE(spd_iframe[28] & 0x7f);
+ hdmi_write(hdmi, REG_HDMI_GENERIC1(MAX_REG_HDMI_GENERIC1_INDEX),
+ packet_payload);
+
+ /*
+ * GENERIC1_LINE | GENERIC1_CONT | GENERIC1_SEND
+ * Setup HDMI TX generic packet control
+ * Enable this packet to transmit every frame
+ * Enable HDMI TX engine to transmit Generic packet 1
+ */
+ packet_control = hdmi_read(hdmi, REG_HDMI_GEN_PKT_CTRL);
+ packet_control |= ((0x1 << 24) | (1 << 5) | (1 << 4));
+ hdmi_write(hdmi, REG_HDMI_GEN_PKT_CTRL, packet_control);
+}
+
+static inline void _sde_hdmi_save_mode(struct hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ struct sde_connector *c_conn = to_sde_connector(hdmi->connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+ drm_mode_copy(&display->mode, mode);
+}
+
+static u32 _sde_hdmi_choose_best_format(struct hdmi *hdmi,
+ struct drm_display_mode *mode)
+{
+ /*
+ * choose priority:
+ * 1. DC + RGB
+ * 2. DC + YUV
+ * 3. RGB
+ * 4. YUV
+ */
+ int dc_format;
+ struct drm_connector *connector = hdmi->connector;
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+ dc_format = sde_hdmi_sink_dc_support(connector, mode);
+ if (dc_format & MSM_MODE_FLAG_RGB444_DC_ENABLE)
+ return (MSM_MODE_FLAG_COLOR_FORMAT_RGB444
+ | MSM_MODE_FLAG_RGB444_DC_ENABLE);
+ else if (dc_format & MSM_MODE_FLAG_YUV420_DC_ENABLE)
+ return (MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420
+ | MSM_MODE_FLAG_YUV420_DC_ENABLE);
+ else if (mode->flags & DRM_MODE_FLAG_SUPPORTS_RGB)
+ return MSM_MODE_FLAG_COLOR_FORMAT_RGB444;
+ else if (mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV)
+ return MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420;
+
+ if (display && !display->non_pluggable)
+ SDE_ERROR("Can't get available best display format\n");
+
+ return MSM_MODE_FLAG_COLOR_FORMAT_RGB444;
+}
+
+static void _sde_hdmi_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+ struct sde_hdmi *display = sde_hdmi_bridge->display;
+ int hstart, hend, vstart, vend;
+ uint32_t frame_ctrl;
+ u32 div = 0;
+
+ mode = adjusted_mode;
+
+ display->dc_enable = mode->private_flags &
+ (MSM_MODE_FLAG_RGB444_DC_ENABLE |
+ MSM_MODE_FLAG_YUV420_DC_ENABLE);
+ /* compute pixclock as per color format and bit depth */
+ hdmi->pixclock = sde_hdmi_calc_pixclk(
+ mode->clock * HDMI_KHZ_TO_HZ,
+ mode->private_flags,
+ display->dc_enable);
+ SDE_DEBUG("Actual PCLK: %lu, Mode PCLK: %d\n",
+ hdmi->pixclock, mode->clock);
+
+ if (mode->private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+ div = 1;
+
+ hstart = (mode->htotal - mode->hsync_start) >> div;
+ hend = (mode->htotal - mode->hsync_start + mode->hdisplay) >> div;
+
+ vstart = mode->vtotal - mode->vsync_start - 1;
+ vend = mode->vtotal - mode->vsync_start + mode->vdisplay - 1;
+
+ SDE_DEBUG(
+ "htotal=%d, vtotal=%d, hstart=%d, hend=%d, vstart=%d, vend=%d",
+ mode->htotal, mode->vtotal, hstart, hend, vstart, vend);
+
+ hdmi_write(hdmi, REG_HDMI_TOTAL,
+ SDE_HDMI_TOTAL_H_TOTAL((mode->htotal >> div) - 1) |
+ SDE_HDMI_TOTAL_V_TOTAL(mode->vtotal - 1));
+
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_HSYNC,
+ SDE_HDMI_ACTIVE_HSYNC_START(hstart) |
+ SDE_HDMI_ACTIVE_HSYNC_END(hend));
+ hdmi_write(hdmi, REG_HDMI_ACTIVE_VSYNC,
+ SDE_HDMI_ACTIVE_VSYNC_START(vstart) |
+ SDE_HDMI_ACTIVE_VSYNC_END(vend));
+
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE) {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL(mode->vtotal));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ SDE_HDMI_VSYNC_ACTIVE_F2_START(vstart + 1) |
+ SDE_HDMI_VSYNC_ACTIVE_F2_END(vend + 1));
+ } else {
+ hdmi_write(hdmi, REG_HDMI_VSYNC_TOTAL_F2,
+ SDE_HDMI_VSYNC_TOTAL_F2_V_TOTAL(0));
+ hdmi_write(hdmi, REG_HDMI_VSYNC_ACTIVE_F2,
+ SDE_HDMI_VSYNC_ACTIVE_F2_START(0) |
+ SDE_HDMI_VSYNC_ACTIVE_F2_END(0));
+ }
+
+ frame_ctrl = 0;
+ if (mode->flags & DRM_MODE_FLAG_NHSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_HSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_NVSYNC)
+ frame_ctrl |= HDMI_FRAME_CTRL_VSYNC_LOW;
+ if (mode->flags & DRM_MODE_FLAG_INTERLACE)
+ frame_ctrl |= HDMI_FRAME_CTRL_INTERLACED_EN;
+ DRM_DEBUG("frame_ctrl=%08x\n", frame_ctrl);
+ hdmi_write(hdmi, REG_HDMI_FRAME_CTRL, frame_ctrl);
+
+ /*
+ * Setup info frame
+ * Current drm_edid driver doesn't have all CEA formats defined in
+ * latest CEA-861(CTA-861) spec. So, don't check if mode is CEA mode
+ * in here. Once core framework is updated, the check needs to be
+ * added back.
+ */
+ if (hdmi->hdmi_mode) {
+ _sde_hdmi_bridge_set_avi_infoframe(hdmi, mode);
+ _sde_hdmi_bridge_set_vs_infoframe(hdmi, mode);
+ _sde_hdmi_bridge_set_spd_infoframe(hdmi, mode);
+ DRM_DEBUG("hdmi setup info frame\n");
+ }
+
+ _sde_hdmi_save_mode(hdmi, mode);
+ _sde_hdmi_bridge_setup_deep_color(hdmi);
+}
+
+static bool _sde_hdmi_bridge_mode_fixup(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ struct sde_hdmi_bridge *sde_hdmi_bridge = to_hdmi_bridge(bridge);
+ struct hdmi *hdmi = sde_hdmi_bridge->hdmi;
+
+ /* Clear the private flags before assigning new one */
+ adjusted_mode->private_flags = 0;
+
+ adjusted_mode->private_flags |=
+ _sde_hdmi_choose_best_format(hdmi, adjusted_mode);
+ SDE_DEBUG("Adjusted mode private flags: 0x%x\n",
+ adjusted_mode->private_flags);
+
+ return true;
+}
+
+void sde_hdmi_bridge_power_on(struct drm_bridge *bridge)
+{
+ _sde_hdmi_bridge_power_on(bridge);
+}
+
+static const struct drm_bridge_funcs _sde_hdmi_bridge_funcs = {
+ .pre_enable = _sde_hdmi_bridge_pre_enable,
+ .enable = _sde_hdmi_bridge_enable,
+ .disable = _sde_hdmi_bridge_disable,
+ .post_disable = _sde_hdmi_bridge_post_disable,
+ .mode_set = _sde_hdmi_bridge_mode_set,
+ .mode_fixup = _sde_hdmi_bridge_mode_fixup,
+};
+
+
+/* initialize bridge */
+struct drm_bridge *sde_hdmi_bridge_init(struct hdmi *hdmi,
+ struct sde_hdmi *display)
+{
+ struct drm_bridge *bridge = NULL;
+ struct sde_hdmi_bridge *sde_hdmi_bridge;
+ int ret;
+
+ sde_hdmi_bridge = devm_kzalloc(hdmi->dev->dev,
+ sizeof(*sde_hdmi_bridge), GFP_KERNEL);
+ if (!sde_hdmi_bridge) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ sde_hdmi_bridge->hdmi = hdmi;
+ sde_hdmi_bridge->display = display;
+
+ bridge = &sde_hdmi_bridge->base;
+ bridge->funcs = &_sde_hdmi_bridge_funcs;
+
+ ret = drm_bridge_attach(hdmi->dev, bridge);
+ if (ret)
+ goto fail;
+
+ return bridge;
+
+fail:
+ if (bridge)
+ _sde_hdmi_bridge_destroy(bridge);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c
new file mode 100644
index 000000000000..fbb8bd1a3ca4
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_hdcp2p2.c
@@ -0,0 +1,1053 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/types.h>
+#include <linux/kthread.h>
+
+#include <linux/hdcp_qseecom.h>
+#include "sde_hdcp.h"
+#include "video/msm_hdmi_hdcp_mgr.h"
+#include "sde_hdmi_util.h"
+
+/*
+ * Defined addresses and offsets of standard HDCP 2.2 sink registers
+ * for DDC, as defined in HDCP 2.2 spec section 2.14 table 2.7
+ */
+#define HDCP_SINK_DDC_SLAVE_ADDR 0x74 /* Sink DDC slave address */
+#define HDCP_SINK_DDC_HDCP2_VERSION 0x50 /* Does sink support HDCP2.2 */
+#define HDCP_SINK_DDC_HDCP2_WRITE_MESSAGE 0x60 /* HDCP Tx writes here */
+#define HDCP_SINK_DDC_HDCP2_RXSTATUS 0x70 /* RxStatus, 2 bytes */
+#define HDCP_SINK_DDC_HDCP2_READ_MESSAGE 0x80 /* HDCP Tx reads here */
+
+#define HDCP2P2_DEFAULT_TIMEOUT 500
+
+/*
+ * HDCP 2.2 encryption requires the data encryption block that is present in
+ * HDMI controller version 4.0.0 and above
+ */
+#define MIN_HDMI_TX_MAJOR_VERSION 4
+
+enum sde_hdmi_hdcp2p2_sink_status {
+ SINK_DISCONNECTED,
+ SINK_CONNECTED
+};
+
+enum sde_hdmi_auth_status {
+ HDMI_HDCP_AUTH_STATUS_FAILURE,
+ HDMI_HDCP_AUTH_STATUS_SUCCESS
+};
+
+struct sde_hdmi_hdcp2p2_ctrl {
+ atomic_t auth_state;
+ enum sde_hdmi_hdcp2p2_sink_status sink_status; /* Is sink connected */
+ struct sde_hdcp_init_data init_data; /* Feature data from HDMI drv */
+ struct mutex mutex; /* mutex to protect access to ctrl */
+ struct mutex msg_lock; /* mutex to protect access to msg buffer */
+ struct mutex wakeup_mutex; /* mutex to protect access to wakeup call*/
+ struct sde_hdcp_ops *ops;
+ void *lib_ctx; /* Handle to HDCP 2.2 Trustzone library */
+ struct hdcp_txmtr_ops *lib; /* Ops for driver to call into TZ */
+
+ enum hdmi_hdcp_wakeup_cmd wakeup_cmd;
+ enum sde_hdmi_auth_status auth_status;
+ char *send_msg_buf;
+ uint32_t send_msg_len;
+ uint32_t timeout;
+ uint32_t timeout_left;
+
+ struct task_struct *thread;
+ struct kthread_worker worker;
+ struct kthread_work status;
+ struct kthread_work auth;
+ struct kthread_work send_msg;
+ struct kthread_work recv_msg;
+ struct kthread_work link;
+ struct kthread_work poll;
+};
+
+static int sde_hdmi_hdcp2p2_auth(struct sde_hdmi_hdcp2p2_ctrl *ctrl);
+static void sde_hdmi_hdcp2p2_send_msg(struct sde_hdmi_hdcp2p2_ctrl *ctrl);
+static void sde_hdmi_hdcp2p2_recv_msg(struct sde_hdmi_hdcp2p2_ctrl *ctrl);
+static void sde_hdmi_hdcp2p2_auth_status(struct sde_hdmi_hdcp2p2_ctrl *ctrl);
+static int sde_hdmi_hdcp2p2_link_check(struct sde_hdmi_hdcp2p2_ctrl *ctrl);
+
+static bool sde_hdcp2p2_is_valid_state(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+ if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_AUTHENTICATE)
+ return true;
+
+ if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE)
+ return true;
+
+ return false;
+}
+
+static int sde_hdmi_hdcp2p2_copy_buf(struct sde_hdmi_hdcp2p2_ctrl *ctrl,
+ struct hdmi_hdcp_wakeup_data *data)
+{
+ mutex_lock(&ctrl->msg_lock);
+
+ if (!data->send_msg_len) {
+ mutex_unlock(&ctrl->msg_lock);
+ return 0;
+ }
+
+ ctrl->send_msg_len = data->send_msg_len;
+
+ kzfree(ctrl->send_msg_buf);
+
+ ctrl->send_msg_buf = kzalloc(data->send_msg_len, GFP_KERNEL);
+
+ if (!ctrl->send_msg_buf) {
+ mutex_unlock(&ctrl->msg_lock);
+ return -ENOMEM;
+ }
+
+ memcpy(ctrl->send_msg_buf, data->send_msg_buf, ctrl->send_msg_len);
+
+ mutex_unlock(&ctrl->msg_lock);
+
+ return 0;
+}
+
+static int sde_hdmi_hdcp2p2_wakeup(struct hdmi_hdcp_wakeup_data *data)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl;
+
+ if (!data) {
+ SDE_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ ctrl = data->context;
+ if (!ctrl) {
+ SDE_ERROR("invalid ctrl\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&ctrl->wakeup_mutex);
+
+ SDE_HDCP_DEBUG("cmd: %s, timeout %dms\n",
+ hdmi_hdcp_cmd_to_str(data->cmd),
+ data->timeout);
+
+ ctrl->wakeup_cmd = data->cmd;
+
+ if (data->timeout)
+ ctrl->timeout = data->timeout * 2;
+ else
+ ctrl->timeout = HDCP2P2_DEFAULT_TIMEOUT;
+
+ if (!sde_hdcp2p2_is_valid_state(ctrl)) {
+ SDE_ERROR("invalid state\n");
+ goto exit;
+ }
+
+ if (sde_hdmi_hdcp2p2_copy_buf(ctrl, data))
+ goto exit;
+
+ if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS)
+ ctrl->auth_status = HDMI_HDCP_AUTH_STATUS_SUCCESS;
+ else if (ctrl->wakeup_cmd == HDMI_HDCP_WKUP_CMD_STATUS_FAILED)
+ ctrl->auth_status = HDMI_HDCP_AUTH_STATUS_FAILURE;
+
+ switch (ctrl->wakeup_cmd) {
+ case HDMI_HDCP_WKUP_CMD_SEND_MESSAGE:
+ queue_kthread_work(&ctrl->worker, &ctrl->send_msg);
+ break;
+ case HDMI_HDCP_WKUP_CMD_RECV_MESSAGE:
+ queue_kthread_work(&ctrl->worker, &ctrl->recv_msg);
+ break;
+ case HDMI_HDCP_WKUP_CMD_STATUS_SUCCESS:
+ case HDMI_HDCP_WKUP_CMD_STATUS_FAILED:
+ queue_kthread_work(&ctrl->worker, &ctrl->status);
+ break;
+ case HDMI_HDCP_WKUP_CMD_LINK_POLL:
+ queue_kthread_work(&ctrl->worker, &ctrl->poll);
+ break;
+ case HDMI_HDCP_WKUP_CMD_AUTHENTICATE:
+ queue_kthread_work(&ctrl->worker, &ctrl->auth);
+ break;
+ default:
+ SDE_ERROR("invalid wakeup command %d\n", ctrl->wakeup_cmd);
+ }
+exit:
+ mutex_unlock(&ctrl->wakeup_mutex);
+ return 0;
+}
+
+static int sde_hdmi_hdcp2p2_wakeup_lib(struct sde_hdmi_hdcp2p2_ctrl *ctrl,
+ struct hdcp_lib_wakeup_data *data)
+{
+ int rc = 0;
+
+ if (ctrl && ctrl->lib && ctrl->lib->wakeup &&
+ data && (data->cmd != HDCP_LIB_WKUP_CMD_INVALID)) {
+ rc = ctrl->lib->wakeup(data);
+ if (rc)
+ SDE_ERROR("error sending %s to lib\n",
+ hdcp_lib_cmd_to_str(data->cmd));
+ }
+
+ return rc;
+}
+
+static void sde_hdmi_hdcp2p2_reset(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ ctrl->sink_status = SINK_DISCONNECTED;
+ atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
+}
+
+static void sde_hdmi_hdcp2p2_off(void *input)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl;
+ struct hdmi_hdcp_wakeup_data cdata = {HDMI_HDCP_WKUP_CMD_AUTHENTICATE};
+
+ ctrl = (struct sde_hdmi_hdcp2p2_ctrl *)input;
+
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ sde_hdmi_hdcp2p2_reset(ctrl);
+
+ flush_kthread_worker(&ctrl->worker);
+
+ cdata.context = input;
+ sde_hdmi_hdcp2p2_wakeup(&cdata);
+
+ /* There could be upto one frame delay
+ * between the time encryption disable is
+ * requested till the time we get encryption
+ * disabled interrupt
+ */
+ msleep(20);
+ sde_hdmi_hdcp2p2_ddc_disable((void *)ctrl->init_data.cb_data);
+}
+
+static int sde_hdmi_hdcp2p2_authenticate(void *input)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl = input;
+ struct hdmi_hdcp_wakeup_data cdata = {HDMI_HDCP_WKUP_CMD_AUTHENTICATE};
+ u32 regval;
+ int rc = 0;
+
+ /* Enable authentication success interrupt */
+ regval = DSS_REG_R(ctrl->init_data.core_io, HDMI_HDCP_INT_CTRL2);
+ regval |= BIT(1) | BIT(2);
+
+ DSS_REG_W(ctrl->init_data.core_io, HDMI_HDCP_INT_CTRL2, regval);
+
+ flush_kthread_worker(&ctrl->worker);
+
+ ctrl->sink_status = SINK_CONNECTED;
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATING);
+
+ /* make sure ddc is idle before starting hdcp 2.2 authentication */
+ _sde_hdmi_scrambler_ddc_disable((void *)ctrl->init_data.cb_data);
+ sde_hdmi_hdcp2p2_ddc_disable((void *)ctrl->init_data.cb_data);
+
+ cdata.context = input;
+ sde_hdmi_hdcp2p2_wakeup(&cdata);
+
+ return rc;
+}
+
+static int sde_hdmi_hdcp2p2_reauthenticate(void *input)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl;
+
+ ctrl = (struct sde_hdmi_hdcp2p2_ctrl *)input;
+
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ sde_hdmi_hdcp2p2_reset(ctrl);
+
+ return sde_hdmi_hdcp2p2_authenticate(input);
+}
+
+static void sde_hdmi_hdcp2p2_min_level_change(void *client_ctx,
+int min_enc_lvl)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl =
+ (struct sde_hdmi_hdcp2p2_ctrl *)client_ctx;
+ struct hdcp_lib_wakeup_data cdata = {
+ HDCP_LIB_WKUP_CMD_QUERY_STREAM_TYPE};
+ bool enc_notify = true;
+ enum sde_hdcp_states enc_lvl;
+
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ switch (min_enc_lvl) {
+ case 0:
+ enc_lvl = HDCP_STATE_AUTH_ENC_NONE;
+ break;
+ case 1:
+ enc_lvl = HDCP_STATE_AUTH_ENC_1X;
+ break;
+ case 2:
+ enc_lvl = HDCP_STATE_AUTH_ENC_2P2;
+ break;
+ default:
+ enc_notify = false;
+ }
+
+ SDE_HDCP_DEBUG("enc level changed %d\n", min_enc_lvl);
+
+ /* notify the client first about the new level */
+ if (enc_notify && ctrl->init_data.notify_status)
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data, enc_lvl);
+
+ cdata.context = ctrl->lib_ctx;
+ sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+}
+
+static void sde_hdmi_hdcp2p2_mute_sink(void *client_ctx)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl =
+ (struct sde_hdmi_hdcp2p2_ctrl *)client_ctx;
+
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ /* call into client to send avmute to the sink */
+ if (ctrl->init_data.avmute_sink)
+ ctrl->init_data.avmute_sink(ctrl->init_data.cb_data);
+}
+
+static void sde_hdmi_hdcp2p2_auth_failed(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
+
+ sde_hdmi_hdcp2p2_ddc_disable(ctrl->init_data.cb_data);
+
+ /* notify hdmi tx about HDCP failure */
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+ HDCP_STATE_AUTH_FAIL);
+}
+
+static void sde_hdmi_hdcp2p2_fail_noreauth(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTH_FAIL);
+
+ sde_hdmi_hdcp2p2_ddc_disable(ctrl->init_data.cb_data);
+
+ /* notify hdmi tx about HDCP failure */
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+ HDCP_STATE_AUTH_FAIL_NOREAUTH);
+}
+
+static void sde_hdmi_hdcp2p2_srm_cb(void *client_ctx)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl =
+ (struct sde_hdmi_hdcp2p2_ctrl *)client_ctx;
+ struct hdcp_lib_wakeup_data cdata = {
+ HDCP_LIB_WKUP_CMD_INVALID};
+
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+ sde_hdmi_hdcp2p2_fail_noreauth(ctrl);
+}
+
+static int sde_hdmi_hdcp2p2_ddc_rd_message(struct sde_hdmi_hdcp2p2_ctrl *ctrl,
+ u8 *buf, int size, u32 timeout)
+{
+ struct sde_hdmi_tx_ddc_data *ddc_data;
+ struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+ int rc;
+
+ if (!ctrl) {
+ SDE_ERROR("invalid ctrl\n");
+ return -EINVAL;
+ }
+
+ ddc_ctrl = ctrl->init_data.ddc_ctrl;
+ ddc_data = &ddc_ctrl->ddc_data;
+
+ if (!ddc_data) {
+ SDE_ERROR("invalid ddc data\n");
+ return -EINVAL;
+ }
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ SDE_ERROR("hdcp is off\n");
+ return -EINVAL;
+ }
+
+ memset(ddc_data, 0, sizeof(*ddc_data));
+ ddc_data->dev_addr = HDCP_SINK_DDC_SLAVE_ADDR;
+ ddc_data->offset = HDCP_SINK_DDC_HDCP2_READ_MESSAGE;
+ ddc_data->data_buf = buf;
+ ddc_data->data_len = size;
+ ddc_data->request_len = size;
+ ddc_data->retry = 0;
+ ddc_data->hard_timeout = timeout;
+ ddc_data->what = "HDCP2ReadMessage";
+
+ rc = sde_hdmi_ddc_read(ctrl->init_data.cb_data);
+ if (rc)
+ SDE_ERROR("Cannot read HDCP message register\n");
+
+ ctrl->timeout_left = ddc_data->timeout_left;
+
+ return rc;
+}
+
+static int sde_hdmi_hdcp2p2_ddc_wt_message(struct sde_hdmi_hdcp2p2_ctrl *ctrl,
+ u8 *buf, size_t size)
+{
+ struct sde_hdmi_tx_ddc_data *ddc_data;
+ struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+ int rc;
+
+ if (!ctrl) {
+ SDE_ERROR("invalid ctrl\n");
+ return -EINVAL;
+ }
+
+ ddc_ctrl = ctrl->init_data.ddc_ctrl;
+ ddc_data = &ddc_ctrl->ddc_data;
+
+ if (!ddc_data) {
+ SDE_ERROR("invalid ddc data\n");
+ return -EINVAL;
+ }
+
+ memset(ddc_data, 0, sizeof(*ddc_data));
+ ddc_data->dev_addr = HDCP_SINK_DDC_SLAVE_ADDR;
+ ddc_data->offset = HDCP_SINK_DDC_HDCP2_WRITE_MESSAGE;
+ ddc_data->data_buf = buf;
+ ddc_data->data_len = size;
+ ddc_data->hard_timeout = ctrl->timeout;
+ ddc_data->what = "HDCP2WriteMessage";
+
+ rc = sde_hdmi_ddc_write((void *)ctrl->init_data.cb_data);
+ if (rc)
+ SDE_ERROR("Cannot write HDCP message register\n");
+
+ ctrl->timeout_left = ddc_data->timeout_left;
+
+ return rc;
+}
+
+static int sde_hdmi_hdcp2p2_read_version(struct sde_hdmi_hdcp2p2_ctrl *ctrl,
+ u8 *hdcp2version)
+{
+ struct sde_hdmi_tx_ddc_data *ddc_data;
+ struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+ int rc;
+
+ if (!ctrl) {
+ SDE_ERROR("invalid ctrl\n");
+ return -EINVAL;
+ }
+
+ ddc_ctrl = ctrl->init_data.ddc_ctrl;
+ ddc_data = &ddc_ctrl->ddc_data;
+
+ if (!ddc_data) {
+ SDE_ERROR("invalid ddc data\n");
+ return -EINVAL;
+ }
+ memset(ddc_data, 0, sizeof(*ddc_data));
+ ddc_data->dev_addr = HDCP_SINK_DDC_SLAVE_ADDR;
+ ddc_data->offset = HDCP_SINK_DDC_HDCP2_VERSION;
+ ddc_data->data_buf = hdcp2version;
+ ddc_data->data_len = 1;
+ ddc_data->request_len = 1;
+ ddc_data->retry = 1;
+ ddc_data->what = "HDCP2Version";
+
+ rc = sde_hdmi_ddc_read((void *)ctrl->init_data.cb_data);
+ if (rc) {
+ SDE_ERROR("Cannot read HDCP2Version register");
+ return rc;
+ }
+
+ SDE_HDCP_DEBUG("Read HDCP2Version as %u\n", *hdcp2version);
+ return rc;
+}
+
+static bool sde_hdmi_hdcp2p2_feature_supported(void *input)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl = input;
+ struct hdcp_txmtr_ops *lib = NULL;
+ bool supported = false;
+
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ goto end;
+ }
+
+ lib = ctrl->lib;
+ if (!lib) {
+ SDE_ERROR("invalid lib ops data\n");
+ goto end;
+ }
+
+ if (lib->feature_supported) {
+ supported = lib->feature_supported(
+ ctrl->lib_ctx);
+ }
+
+end:
+ return supported;
+}
+
+static void sde_hdmi_hdcp2p2_send_msg(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+ int rc = 0;
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+ uint32_t msglen;
+ char *msg = NULL;
+
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ SDE_ERROR("hdcp is off\n");
+ goto exit;
+ }
+
+ mutex_lock(&ctrl->msg_lock);
+ msglen = ctrl->send_msg_len;
+
+ if (!msglen) {
+ mutex_unlock(&ctrl->msg_lock);
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ msg = kzalloc(msglen, GFP_KERNEL);
+ if (!msg) {
+ mutex_unlock(&ctrl->msg_lock);
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ memcpy(msg, ctrl->send_msg_buf, msglen);
+ mutex_unlock(&ctrl->msg_lock);
+
+ /* Forward the message to the sink */
+ rc = sde_hdmi_hdcp2p2_ddc_wt_message(ctrl,
+ msg, (size_t)msglen);
+ if (rc) {
+ SDE_ERROR("Error sending msg to sink %d\n", rc);
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_SEND_FAILED;
+ } else {
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_SEND_SUCCESS;
+ cdata.timeout = ctrl->timeout_left;
+ }
+exit:
+ kfree(msg);
+
+ sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+}
+
+static void sde_hdmi_hdcp2p2_send_msg_work(struct kthread_work *work)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct sde_hdmi_hdcp2p2_ctrl, send_msg);
+
+ sde_hdmi_hdcp2p2_send_msg(ctrl);
+}
+
+static void sde_hdmi_hdcp2p2_link_cb(void *data)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl = data;
+
+ if (!ctrl) {
+ SDE_HDCP_DEBUG("invalid input\n");
+ return;
+ }
+
+ if (atomic_read(&ctrl->auth_state) != HDCP_STATE_INACTIVE)
+ queue_kthread_work(&ctrl->worker, &ctrl->link);
+}
+
+static void sde_hdmi_hdcp2p2_recv_msg(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+ int timeout_hsync = 0, rc = 0;
+ char *recvd_msg_buf = NULL;
+ struct sde_hdmi_tx_hdcp2p2_ddc_data *ddc_data;
+ struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ SDE_ERROR("hdcp is off\n");
+ goto exit;
+ }
+
+ ddc_ctrl = ctrl->init_data.ddc_ctrl;
+ if (!ddc_ctrl) {
+ pr_err("invalid ddc ctrl\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ ddc_data = &ddc_ctrl->sde_hdcp2p2_ddc_data;
+ memset(ddc_data, 0, sizeof(*ddc_data));
+
+ timeout_hsync = _sde_hdmi_get_timeout_in_hysnc(
+ (void *)ctrl->init_data.cb_data, ctrl->timeout);
+
+ if (timeout_hsync <= 0) {
+ SDE_ERROR("err in timeout hsync calc\n");
+ timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC;
+ }
+
+ SDE_HDCP_DEBUG("timeout for rxstatus %dms, %d hsync\n",
+ ctrl->timeout, timeout_hsync);
+
+ ddc_data->intr_mask = RXSTATUS_MESSAGE_SIZE | RXSTATUS_REAUTH_REQ;
+ ddc_data->timeout_ms = ctrl->timeout;
+ ddc_data->timeout_hsync = timeout_hsync;
+ ddc_data->periodic_timer_hsync = timeout_hsync / 20;
+ ddc_data->read_method = HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER;
+ ddc_data->wait = true;
+
+ rc = sde_hdmi_hdcp2p2_read_rxstatus(ctrl->init_data.cb_data);
+ if (rc) {
+ SDE_ERROR("error reading rxstatus %d\n", rc);
+ goto exit;
+ }
+
+ if (ddc_data->reauth_req) {
+ ddc_data->reauth_req = false;
+
+ SDE_HDCP_DEBUG("reauth triggered by sink\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ ctrl->timeout_left = ddc_data->timeout_left;
+
+ SDE_HDCP_DEBUG("timeout left after rxstatus %dms, msg size %d\n",
+ ctrl->timeout_left, ddc_data->message_size);
+
+ if (!ddc_data->message_size) {
+ SDE_ERROR("recvd invalid message size\n");
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ recvd_msg_buf = kzalloc(ddc_data->message_size, GFP_KERNEL);
+ if (!recvd_msg_buf) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ rc = sde_hdmi_hdcp2p2_ddc_rd_message(ctrl, recvd_msg_buf,
+ ddc_data->message_size, ctrl->timeout_left);
+ if (rc) {
+ SDE_ERROR("error reading message %d\n", rc);
+ goto exit;
+ }
+
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS;
+ cdata.recvd_msg_buf = recvd_msg_buf;
+ cdata.recvd_msg_len = ddc_data->message_size;
+ cdata.timeout = ctrl->timeout_left;
+exit:
+ if (rc == -ETIMEDOUT)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_TIMEOUT;
+ else if (rc)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_FAILED;
+
+ sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+ kfree(recvd_msg_buf);
+}
+
+static void sde_hdmi_hdcp2p2_recv_msg_work(struct kthread_work *work)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct sde_hdmi_hdcp2p2_ctrl, recv_msg);
+
+ sde_hdmi_hdcp2p2_recv_msg(ctrl);
+}
+
+static int sde_hdmi_hdcp2p2_link_check(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+ struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+ struct sde_hdmi_tx_hdcp2p2_ddc_data *ddc_data;
+ int timeout_hsync;
+ int ret;
+
+ ddc_ctrl = ctrl->init_data.ddc_ctrl;
+
+ if (!ddc_ctrl)
+ return -EINVAL;
+
+ sde_hdmi_ddc_config(ctrl->init_data.cb_data);
+
+ ddc_data = &ddc_ctrl->sde_hdcp2p2_ddc_data;
+
+ memset(ddc_data, 0, sizeof(*ddc_data));
+
+ timeout_hsync = _sde_hdmi_get_timeout_in_hysnc(
+ (void *)ctrl->init_data.cb_data,
+ jiffies_to_msecs(HZ / 2));
+
+ if (timeout_hsync <= 0) {
+ SDE_ERROR("err in timeout hsync calc\n");
+ timeout_hsync = HDMI_DEFAULT_TIMEOUT_HSYNC;
+ }
+ SDE_HDCP_DEBUG("timeout for rxstatus %d hsyncs\n", timeout_hsync);
+
+ ddc_data->intr_mask = RXSTATUS_READY | RXSTATUS_MESSAGE_SIZE |
+ RXSTATUS_REAUTH_REQ;
+ ddc_data->timeout_hsync = timeout_hsync;
+ ddc_data->periodic_timer_hsync = timeout_hsync;
+ ddc_data->read_method = HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER;
+ ddc_data->link_cb = sde_hdmi_hdcp2p2_link_cb;
+ ddc_data->link_data = ctrl;
+
+ ret = sde_hdmi_hdcp2p2_read_rxstatus((void *)ctrl->init_data.cb_data);
+ return ret;
+}
+
+static void sde_hdmi_hdcp2p2_poll_work(struct kthread_work *work)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct sde_hdmi_hdcp2p2_ctrl, poll);
+
+ sde_hdmi_hdcp2p2_link_check(ctrl);
+}
+
+static void sde_hdmi_hdcp2p2_auth_status(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_INACTIVE) {
+ SDE_ERROR("hdcp is off\n");
+ return;
+ }
+
+ if (ctrl->auth_status == HDMI_HDCP_AUTH_STATUS_SUCCESS) {
+ ctrl->init_data.notify_status(ctrl->init_data.cb_data,
+ HDCP_STATE_AUTHENTICATED);
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_AUTHENTICATED);
+ } else {
+ sde_hdmi_hdcp2p2_auth_failed(ctrl);
+ }
+}
+
+static void sde_hdmi_hdcp2p2_auth_status_work(struct kthread_work *work)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct sde_hdmi_hdcp2p2_ctrl, status);
+
+ sde_hdmi_hdcp2p2_auth_status(ctrl);
+}
+
+static void sde_hdmi_hdcp2p2_link_work(struct kthread_work *work)
+{
+ int rc = 0;
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct sde_hdmi_hdcp2p2_ctrl, link);
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+ char *recvd_msg_buf = NULL;
+ struct sde_hdmi_tx_hdcp2p2_ddc_data *ddc_data;
+ struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+
+ ddc_ctrl = ctrl->init_data.ddc_ctrl;
+ if (!ddc_ctrl) {
+ rc = -EINVAL;
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ goto exit;
+ }
+
+ ddc_data = &ddc_ctrl->sde_hdcp2p2_ddc_data;
+
+ if (ddc_data->reauth_req) {
+ SDE_HDCP_DEBUG("reauth triggered by sink\n");
+
+ ddc_data->reauth_req = false;
+ rc = -ENOLINK;
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ goto exit;
+ }
+
+ if (ddc_data->ready && ddc_data->message_size) {
+ SDE_HDCP_DEBUG("topology changed. rxstatus msg size %d\n",
+ ddc_data->message_size);
+
+ ddc_data->ready = false;
+
+ recvd_msg_buf = kzalloc(ddc_data->message_size, GFP_KERNEL);
+ if (!recvd_msg_buf) {
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ goto exit;
+ }
+
+ rc = sde_hdmi_hdcp2p2_ddc_rd_message(ctrl, recvd_msg_buf,
+ ddc_data->message_size, HDCP2P2_DEFAULT_TIMEOUT);
+ if (rc) {
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ SDE_ERROR("error reading message %d\n", rc);
+ } else {
+ cdata.cmd = HDCP_LIB_WKUP_CMD_MSG_RECV_SUCCESS;
+ cdata.recvd_msg_buf = recvd_msg_buf;
+ cdata.recvd_msg_len = ddc_data->message_size;
+ }
+
+ ddc_data->message_size = 0;
+ }
+exit:
+ sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+ kfree(recvd_msg_buf);
+
+ if (rc) {
+ sde_hdmi_hdcp2p2_auth_failed(ctrl);
+ return;
+ }
+}
+
+static int sde_hdmi_hdcp2p2_auth(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+ int rc = 0;
+
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ cdata.context = ctrl->lib_ctx;
+
+ if (atomic_read(&ctrl->auth_state) == HDCP_STATE_AUTHENTICATING)
+ cdata.cmd = HDCP_LIB_WKUP_CMD_START;
+ else
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+
+ rc = sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+ if (rc)
+ sde_hdmi_hdcp2p2_auth_failed(ctrl);
+
+ return rc;
+}
+
+static void sde_hdmi_hdcp2p2_auth_work(struct kthread_work *work)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl = container_of(work,
+ struct sde_hdmi_hdcp2p2_ctrl, auth);
+
+ sde_hdmi_hdcp2p2_auth(ctrl);
+}
+
+void sde_hdmi_hdcp2p2_deinit(void *input)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl;
+ struct hdcp_lib_wakeup_data cdata = {HDCP_LIB_WKUP_CMD_INVALID};
+
+ ctrl = (struct sde_hdmi_hdcp2p2_ctrl *)input;
+
+ if (!ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ cdata.cmd = HDCP_LIB_WKUP_CMD_STOP;
+ cdata.context = ctrl->lib_ctx;
+ sde_hdmi_hdcp2p2_wakeup_lib(ctrl, &cdata);
+
+ kthread_stop(ctrl->thread);
+
+ mutex_destroy(&ctrl->mutex);
+ mutex_destroy(&ctrl->msg_lock);
+ mutex_destroy(&ctrl->wakeup_mutex);
+ kfree(ctrl);
+}
+
+void *sde_hdmi_hdcp2p2_init(struct sde_hdcp_init_data *init_data)
+{
+ int rc;
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl;
+ static struct sde_hdcp_ops ops = {
+ .reauthenticate = sde_hdmi_hdcp2p2_reauthenticate,
+ .authenticate = sde_hdmi_hdcp2p2_authenticate,
+ .feature_supported = sde_hdmi_hdcp2p2_feature_supported,
+ .off = sde_hdmi_hdcp2p2_off
+ };
+
+ static struct hdcp_client_ops client_ops = {
+ .wakeup = sde_hdmi_hdcp2p2_wakeup,
+ .notify_lvl_change = sde_hdmi_hdcp2p2_min_level_change,
+ .srm_cb = sde_hdmi_hdcp2p2_srm_cb,
+ .mute_sink = sde_hdmi_hdcp2p2_mute_sink,
+ };
+
+ static struct hdcp_txmtr_ops txmtr_ops;
+ struct hdcp_register_data register_data;
+
+ SDE_HDCP_DEBUG("HDCP2P2 feature initialization\n");
+
+ if (!init_data || !init_data->core_io || !init_data->mutex ||
+ !init_data->ddc_ctrl || !init_data->notify_status ||
+ !init_data->workq || !init_data->cb_data) {
+ SDE_ERROR("invalid input\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ if (init_data->hdmi_tx_ver < MIN_HDMI_TX_MAJOR_VERSION) {
+ SDE_ERROR("HDMI Tx does not support HDCP 2.2\n");
+ return ERR_PTR(-ENODEV);
+ }
+
+ ctrl = kzalloc(sizeof(*ctrl), GFP_KERNEL);
+ if (!ctrl)
+ return ERR_PTR(-ENOMEM);
+
+ ctrl->init_data = *init_data;
+ ctrl->lib = &txmtr_ops;
+
+ ctrl->sink_status = SINK_DISCONNECTED;
+
+ atomic_set(&ctrl->auth_state, HDCP_STATE_INACTIVE);
+
+ ctrl->ops = &ops;
+ mutex_init(&ctrl->mutex);
+ mutex_init(&ctrl->msg_lock);
+ mutex_init(&ctrl->wakeup_mutex);
+
+ register_data.hdcp_ctx = &ctrl->lib_ctx;
+ register_data.client_ops = &client_ops;
+ register_data.txmtr_ops = &txmtr_ops;
+ register_data.device_type = HDCP_TXMTR_HDMI;
+ register_data.client_ctx = ctrl;
+
+ rc = hdcp_library_register(&register_data);
+ if (rc) {
+ SDE_ERROR("Unable to register with HDCP 2.2 library\n");
+ goto error;
+ }
+
+ init_kthread_worker(&ctrl->worker);
+
+ init_kthread_work(&ctrl->auth, sde_hdmi_hdcp2p2_auth_work);
+ init_kthread_work(&ctrl->send_msg, sde_hdmi_hdcp2p2_send_msg_work);
+ init_kthread_work(&ctrl->recv_msg, sde_hdmi_hdcp2p2_recv_msg_work);
+ init_kthread_work(&ctrl->status, sde_hdmi_hdcp2p2_auth_status_work);
+ init_kthread_work(&ctrl->link, sde_hdmi_hdcp2p2_link_work);
+ init_kthread_work(&ctrl->poll, sde_hdmi_hdcp2p2_poll_work);
+
+ ctrl->thread = kthread_run(kthread_worker_fn,
+ &ctrl->worker, "hdmi_hdcp2p2");
+
+ if (IS_ERR(ctrl->thread)) {
+ SDE_ERROR("unable to start hdcp2p2 thread\n");
+ rc = PTR_ERR(ctrl->thread);
+ ctrl->thread = NULL;
+ goto error;
+ }
+
+ return ctrl;
+error:
+ kfree(ctrl);
+ return ERR_PTR(rc);
+}
+
+static bool sde_hdmi_hdcp2p2_supported(struct sde_hdmi_hdcp2p2_ctrl *ctrl)
+{
+ u8 hdcp2version = 0;
+ int rc = sde_hdmi_hdcp2p2_read_version(ctrl, &hdcp2version);
+
+ if (rc)
+ goto error;
+
+ if (hdcp2version & BIT(2)) {
+ SDE_HDCP_DEBUG("Sink is HDCP 2.2 capable\n");
+ return true;
+ }
+
+error:
+ SDE_HDCP_DEBUG("Sink is not HDCP 2.2 capable\n");
+ return false;
+}
+
+struct sde_hdcp_ops *sde_hdmi_hdcp2p2_start(void *input)
+{
+ struct sde_hdmi_hdcp2p2_ctrl *ctrl;
+
+ ctrl = (struct sde_hdmi_hdcp2p2_ctrl *)input;
+
+ SDE_HDCP_DEBUG("Checking sink capability\n");
+ if (sde_hdmi_hdcp2p2_supported(ctrl))
+ return ctrl->ops;
+ else
+ return NULL;
+
+}
+
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h
new file mode 100644
index 000000000000..f1bff0f08051
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_regs.h
@@ -0,0 +1,300 @@
+/* Copyright (c) 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HDMI_REGS_H
+#define _SDE_HDMI_REGS_H
+
+/* HDMI_TX Registers */
+#define HDMI_CTRL (0x00000000)
+#define HDMI_TEST_PATTERN (0x00000010)
+#define HDMI_RANDOM_PATTERN (0x00000014)
+#define HDMI_PKT_BLK_CTRL (0x00000018)
+#define HDMI_STATUS (0x0000001C)
+#define HDMI_AUDIO_PKT_CTRL (0x00000020)
+#define HDMI_ACR_PKT_CTRL (0x00000024)
+#define HDMI_VBI_PKT_CTRL (0x00000028)
+#define HDMI_INFOFRAME_CTRL0 (0x0000002C)
+#define HDMI_INFOFRAME_CTRL1 (0x00000030)
+#define HDMI_GEN_PKT_CTRL (0x00000034)
+#define HDMI_ACP (0x0000003C)
+#define HDMI_GC (0x00000040)
+#define HDMI_AUDIO_PKT_CTRL2 (0x00000044)
+#define HDMI_ISRC1_0 (0x00000048)
+#define HDMI_ISRC1_1 (0x0000004C)
+#define HDMI_ISRC1_2 (0x00000050)
+#define HDMI_ISRC1_3 (0x00000054)
+#define HDMI_ISRC1_4 (0x00000058)
+#define HDMI_ISRC2_0 (0x0000005C)
+#define HDMI_ISRC2_1 (0x00000060)
+#define HDMI_ISRC2_2 (0x00000064)
+#define HDMI_ISRC2_3 (0x00000068)
+#define HDMI_AVI_INFO0 (0x0000006C)
+#define HDMI_AVI_INFO1 (0x00000070)
+#define HDMI_AVI_INFO2 (0x00000074)
+#define HDMI_AVI_INFO3 (0x00000078)
+#define HDMI_MPEG_INFO0 (0x0000007C)
+#define HDMI_MPEG_INFO1 (0x00000080)
+#define HDMI_GENERIC0_HDR (0x00000084)
+#define HDMI_GENERIC0_0 (0x00000088)
+#define HDMI_GENERIC0_1 (0x0000008C)
+#define HDMI_GENERIC0_2 (0x00000090)
+#define HDMI_GENERIC0_3 (0x00000094)
+#define HDMI_GENERIC0_4 (0x00000098)
+#define HDMI_GENERIC0_5 (0x0000009C)
+#define HDMI_GENERIC0_6 (0x000000A0)
+#define HDMI_GENERIC1_HDR (0x000000A4)
+#define HDMI_GENERIC1_0 (0x000000A8)
+#define HDMI_GENERIC1_1 (0x000000AC)
+#define HDMI_GENERIC1_2 (0x000000B0)
+#define HDMI_GENERIC1_3 (0x000000B4)
+#define HDMI_GENERIC1_4 (0x000000B8)
+#define HDMI_GENERIC1_5 (0x000000BC)
+#define HDMI_GENERIC1_6 (0x000000C0)
+#define HDMI_ACR_32_0 (0x000000C4)
+#define HDMI_ACR_32_1 (0x000000C8)
+#define HDMI_ACR_44_0 (0x000000CC)
+#define HDMI_ACR_44_1 (0x000000D0)
+#define HDMI_ACR_48_0 (0x000000D4)
+#define HDMI_ACR_48_1 (0x000000D8)
+#define HDMI_ACR_STATUS_0 (0x000000DC)
+#define HDMI_ACR_STATUS_1 (0x000000E0)
+#define HDMI_AUDIO_INFO0 (0x000000E4)
+#define HDMI_AUDIO_INFO1 (0x000000E8)
+#define HDMI_CS_60958_0 (0x000000EC)
+#define HDMI_CS_60958_1 (0x000000F0)
+#define HDMI_RAMP_CTRL0 (0x000000F8)
+#define HDMI_RAMP_CTRL1 (0x000000FC)
+#define HDMI_RAMP_CTRL2 (0x00000100)
+#define HDMI_RAMP_CTRL3 (0x00000104)
+#define HDMI_CS_60958_2 (0x00000108)
+#define HDMI_HDCP_CTRL2 (0x0000010C)
+#define HDMI_HDCP_CTRL (0x00000110)
+#define HDMI_HDCP_DEBUG_CTRL (0x00000114)
+#define HDMI_HDCP_INT_CTRL (0x00000118)
+#define HDMI_HDCP_LINK0_STATUS (0x0000011C)
+#define HDMI_HDCP_DDC_CTRL_0 (0x00000120)
+#define HDMI_HDCP_DDC_CTRL_1 (0x00000124)
+#define HDMI_HDCP_DDC_STATUS (0x00000128)
+#define HDMI_HDCP_ENTROPY_CTRL0 (0x0000012C)
+#define HDMI_HDCP_RESET (0x00000130)
+#define HDMI_HDCP_RCVPORT_DATA0 (0x00000134)
+#define HDMI_HDCP_RCVPORT_DATA1 (0x00000138)
+#define HDMI_HDCP_RCVPORT_DATA2_0 (0x0000013C)
+#define HDMI_HDCP_RCVPORT_DATA2_1 (0x00000140)
+#define HDMI_HDCP_RCVPORT_DATA3 (0x00000144)
+#define HDMI_HDCP_RCVPORT_DATA4 (0x00000148)
+#define HDMI_HDCP_RCVPORT_DATA5 (0x0000014C)
+#define HDMI_HDCP_RCVPORT_DATA6 (0x00000150)
+#define HDMI_HDCP_RCVPORT_DATA7 (0x00000154)
+#define HDMI_HDCP_RCVPORT_DATA8 (0x00000158)
+#define HDMI_HDCP_RCVPORT_DATA9 (0x0000015C)
+#define HDMI_HDCP_RCVPORT_DATA10 (0x00000160)
+#define HDMI_HDCP_RCVPORT_DATA11 (0x00000164)
+#define HDMI_HDCP_RCVPORT_DATA12 (0x00000168)
+#define HDMI_VENSPEC_INFO0 (0x0000016C)
+#define HDMI_VENSPEC_INFO1 (0x00000170)
+#define HDMI_VENSPEC_INFO2 (0x00000174)
+#define HDMI_VENSPEC_INFO3 (0x00000178)
+#define HDMI_VENSPEC_INFO4 (0x0000017C)
+#define HDMI_VENSPEC_INFO5 (0x00000180)
+#define HDMI_VENSPEC_INFO6 (0x00000184)
+#define HDMI_HDCP_DEBUG (0x00000194)
+#define HDMI_TMDS_CTRL_CHAR (0x0000019C)
+#define HDMI_TMDS_CTRL_SEL (0x000001A4)
+#define HDMI_TMDS_SYNCCHAR01 (0x000001A8)
+#define HDMI_TMDS_SYNCCHAR23 (0x000001AC)
+#define HDMI_TMDS_DEBUG (0x000001B4)
+#define HDMI_TMDS_CTL_BITS (0x000001B8)
+#define HDMI_TMDS_DCBAL_CTRL (0x000001BC)
+#define HDMI_TMDS_DCBAL_CHAR (0x000001C0)
+#define HDMI_TMDS_CTL01_GEN (0x000001C8)
+#define HDMI_TMDS_CTL23_GEN (0x000001CC)
+#define HDMI_AUDIO_CFG (0x000001D0)
+#define HDMI_DEBUG (0x00000204)
+#define HDMI_USEC_REFTIMER (0x00000208)
+#define HDMI_DDC_CTRL (0x0000020C)
+#define HDMI_DDC_ARBITRATION (0x00000210)
+#define HDMI_DDC_INT_CTRL (0x00000214)
+#define HDMI_DDC_SW_STATUS (0x00000218)
+#define HDMI_DDC_HW_STATUS (0x0000021C)
+#define HDMI_DDC_SPEED (0x00000220)
+#define HDMI_DDC_SETUP (0x00000224)
+#define HDMI_DDC_TRANS0 (0x00000228)
+#define HDMI_DDC_TRANS1 (0x0000022C)
+#define HDMI_DDC_TRANS2 (0x00000230)
+#define HDMI_DDC_TRANS3 (0x00000234)
+#define HDMI_DDC_DATA (0x00000238)
+#define HDMI_HDCP_SHA_CTRL (0x0000023C)
+#define HDMI_HDCP_SHA_STATUS (0x00000240)
+#define HDMI_HDCP_SHA_DATA (0x00000244)
+#define HDMI_HDCP_SHA_DBG_M0_0 (0x00000248)
+#define HDMI_HDCP_SHA_DBG_M0_1 (0x0000024C)
+#define HDMI_HPD_INT_STATUS (0x00000250)
+#define HDMI_HPD_INT_CTRL (0x00000254)
+#define HDMI_HPD_CTRL (0x00000258)
+#define HDMI_HDCP_ENTROPY_CTRL1 (0x0000025C)
+#define HDMI_HDCP_SW_UPPER_AN (0x00000260)
+#define HDMI_HDCP_SW_LOWER_AN (0x00000264)
+#define HDMI_CRC_CTRL (0x00000268)
+#define HDMI_VID_CRC (0x0000026C)
+#define HDMI_AUD_CRC (0x00000270)
+#define HDMI_VBI_CRC (0x00000274)
+#define HDMI_DDC_REF (0x0000027C)
+#define HDMI_HDCP_SW_UPPER_AKSV (0x00000284)
+#define HDMI_HDCP_SW_LOWER_AKSV (0x00000288)
+#define HDMI_CEC_CTRL (0x0000028C)
+#define HDMI_CEC_WR_DATA (0x00000290)
+#define HDMI_CEC_RETRANSMIT (0x00000294)
+#define HDMI_CEC_STATUS (0x00000298)
+#define HDMI_CEC_INT (0x0000029C)
+#define HDMI_CEC_ADDR (0x000002A0)
+#define HDMI_CEC_TIME (0x000002A4)
+#define HDMI_CEC_REFTIMER (0x000002A8)
+#define HDMI_CEC_RD_DATA (0x000002AC)
+#define HDMI_CEC_RD_FILTER (0x000002B0)
+#define HDMI_ACTIVE_H (0x000002B4)
+#define HDMI_ACTIVE_V (0x000002B8)
+#define HDMI_ACTIVE_V_F2 (0x000002BC)
+#define HDMI_TOTAL (0x000002C0)
+#define HDMI_V_TOTAL_F2 (0x000002C4)
+#define HDMI_FRAME_CTRL (0x000002C8)
+#define HDMI_AUD_INT (0x000002CC)
+#define HDMI_DEBUG_BUS_CTRL (0x000002D0)
+#define HDMI_PHY_CTRL (0x000002D4)
+#define HDMI_CEC_WR_RANGE (0x000002DC)
+#define HDMI_CEC_RD_RANGE (0x000002E0)
+#define HDMI_VERSION (0x000002E4)
+#define HDMI_BIST_ENABLE (0x000002F4)
+#define HDMI_TIMING_ENGINE_EN (0x000002F8)
+#define HDMI_INTF_CONFIG (0x000002FC)
+#define HDMI_HSYNC_CTL (0x00000300)
+#define HDMI_VSYNC_PERIOD_F0 (0x00000304)
+#define HDMI_VSYNC_PERIOD_F1 (0x00000308)
+#define HDMI_VSYNC_PULSE_WIDTH_F0 (0x0000030C)
+#define HDMI_VSYNC_PULSE_WIDTH_F1 (0x00000310)
+#define HDMI_DISPLAY_V_START_F0 (0x00000314)
+#define HDMI_DISPLAY_V_START_F1 (0x00000318)
+#define HDMI_DISPLAY_V_END_F0 (0x0000031C)
+#define HDMI_DISPLAY_V_END_F1 (0x00000320)
+#define HDMI_ACTIVE_V_START_F0 (0x00000324)
+#define HDMI_ACTIVE_V_START_F1 (0x00000328)
+#define HDMI_ACTIVE_V_END_F0 (0x0000032C)
+#define HDMI_ACTIVE_V_END_F1 (0x00000330)
+#define HDMI_DISPLAY_HCTL (0x00000334)
+#define HDMI_ACTIVE_HCTL (0x00000338)
+#define HDMI_HSYNC_SKEW (0x0000033C)
+#define HDMI_POLARITY_CTL (0x00000340)
+#define HDMI_TPG_MAIN_CONTROL (0x00000344)
+#define HDMI_TPG_VIDEO_CONFIG (0x00000348)
+#define HDMI_TPG_COMPONENT_LIMITS (0x0000034C)
+#define HDMI_TPG_RECTANGLE (0x00000350)
+#define HDMI_TPG_INITIAL_VALUE (0x00000354)
+#define HDMI_TPG_BLK_WHT_PATTERN_FRAMES (0x00000358)
+#define HDMI_TPG_RGB_MAPPING (0x0000035C)
+#define HDMI_CEC_COMPL_CTL (0x00000360)
+#define HDMI_CEC_RD_START_RANGE (0x00000364)
+#define HDMI_CEC_RD_TOTAL_RANGE (0x00000368)
+#define HDMI_CEC_RD_ERR_RESP_LO (0x0000036C)
+#define HDMI_CEC_WR_CHECK_CONFIG (0x00000370)
+#define HDMI_INTERNAL_TIMING_MODE (0x00000374)
+#define HDMI_CTRL_SW_RESET (0x00000378)
+#define HDMI_CTRL_AUDIO_RESET (0x0000037C)
+#define HDMI_SCRATCH (0x00000380)
+#define HDMI_CLK_CTRL (0x00000384)
+#define HDMI_CLK_ACTIVE (0x00000388)
+#define HDMI_VBI_CFG (0x0000038C)
+#define HDMI_DDC_INT_CTRL0 (0x00000430)
+#define HDMI_DDC_INT_CTRL1 (0x00000434)
+#define HDMI_DDC_INT_CTRL2 (0x00000438)
+#define HDMI_DDC_INT_CTRL3 (0x0000043C)
+#define HDMI_DDC_INT_CTRL4 (0x00000440)
+#define HDMI_DDC_INT_CTRL5 (0x00000444)
+#define HDMI_HDCP2P2_DDC_CTRL (0x0000044C)
+#define HDMI_HDCP2P2_DDC_TIMER_CTRL (0x00000450)
+#define HDMI_HDCP2P2_DDC_TIMER_CTRL2 (0x00000454)
+#define HDMI_HDCP2P2_DDC_STATUS (0x00000458)
+#define HDMI_SCRAMBLER_STATUS_DDC_CTRL (0x00000464)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL (0x00000468)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2 (0x0000046C)
+#define HDMI_SCRAMBLER_STATUS_DDC_STATUS (0x00000470)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS (0x00000474)
+#define HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS2 (0x00000478)
+#define HDMI_HW_DDC_CTRL (0x000004CC)
+#define HDMI_HDCP2P2_DDC_SW_TRIGGER (0x000004D0)
+#define HDMI_HDCP_STATUS (0x00000500)
+#define HDMI_HDCP_INT_CTRL2 (0x00000504)
+
+/* HDMI PHY Registers */
+#define HDMI_PHY_ANA_CFG0 (0x00000000)
+#define HDMI_PHY_ANA_CFG1 (0x00000004)
+#define HDMI_PHY_PD_CTRL0 (0x00000010)
+#define HDMI_PHY_PD_CTRL1 (0x00000014)
+#define HDMI_PHY_BIST_CFG0 (0x00000034)
+#define HDMI_PHY_BIST_PATN0 (0x0000003C)
+#define HDMI_PHY_BIST_PATN1 (0x00000040)
+#define HDMI_PHY_BIST_PATN2 (0x00000044)
+#define HDMI_PHY_BIST_PATN3 (0x00000048)
+
+/* QFPROM Registers for HDMI/HDCP */
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_LSB (0x000000F8)
+#define QFPROM_RAW_FEAT_CONFIG_ROW0_MSB (0x000000FC)
+#define QFPROM_RAW_VERSION_4 (0x000000A8)
+#define SEC_CTRL_HW_VERSION (0x00006000)
+#define HDCP_KSV_LSB (0x000060D8)
+#define HDCP_KSV_MSB (0x000060DC)
+#define HDCP_KSV_VERSION_4_OFFSET (0x00000014)
+
+/* SEC_CTRL version that supports HDCP SEL */
+#define HDCP_SEL_MIN_SEC_VERSION (0x50010000)
+
+#define LPASS_LPAIF_RDDMA_CTL0 (0xFE152000)
+#define LPASS_LPAIF_RDDMA_PER_CNT0 (0x00000014)
+
+/* TX major version that supports scrambling */
+#define HDMI_TX_SCRAMBLER_MIN_TX_VERSION 0x04
+
+/* TX major versions */
+#define HDMI_TX_VERSION_4 4
+#define HDMI_TX_VERSION_3 3
+
+/* HDMI SCDC register offsets */
+#define HDMI_SCDC_UPDATE_0 0x10
+#define HDMI_SCDC_UPDATE_1 0x11
+#define HDMI_SCDC_TMDS_CONFIG 0x20
+#define HDMI_SCDC_SCRAMBLER_STATUS 0x21
+#define HDMI_SCDC_CONFIG_0 0x30
+#define HDMI_SCDC_STATUS_FLAGS_0 0x40
+#define HDMI_SCDC_STATUS_FLAGS_1 0x41
+#define HDMI_SCDC_ERR_DET_0_L 0x50
+#define HDMI_SCDC_ERR_DET_0_H 0x51
+#define HDMI_SCDC_ERR_DET_1_L 0x52
+#define HDMI_SCDC_ERR_DET_1_H 0x53
+#define HDMI_SCDC_ERR_DET_2_L 0x54
+#define HDMI_SCDC_ERR_DET_2_H 0x55
+#define HDMI_SCDC_ERR_DET_CHECKSUM 0x56
+
+/* HDCP secure registers directly accessible to HLOS since HDMI controller
+ * version major version 4.0
+ */
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA0 (0x00000004)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA1 (0x00000008)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA7 (0x0000000C)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA8 (0x00000010)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA9 (0x00000014)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA10 (0x00000018)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA11 (0x0000001C)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA12 (0x00000020)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_CTRL (0x00000024)
+#define HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_DATA (0x00000028)
+
+#endif /* _SDE_HDMI_REGS_H */
+
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c
new file mode 100644
index 000000000000..2928e2ec5b83
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.c
@@ -0,0 +1,1142 @@
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/slab.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <linux/iopoll.h>
+#include <linux/types.h>
+#include <linux/switch.h>
+#include <linux/gcd.h>
+
+#include "drm_edid.h"
+#include "sde_kms.h"
+#include "sde_hdmi.h"
+#include "sde_hdmi_regs.h"
+#include "hdmi.h"
+
+#define HDMI_SEC_TO_MS 1000
+#define HDMI_MS_TO_US 1000
+#define HDMI_SEC_TO_US (HDMI_SEC_TO_MS * HDMI_MS_TO_US)
+#define HDMI_KHZ_TO_HZ 1000
+#define HDMI_BUSY_WAIT_DELAY_US 100
+
+static void sde_hdmi_hdcp2p2_ddc_clear_status(struct sde_hdmi *display)
+{
+ u32 reg_val;
+ struct hdmi *hdmi;
+
+ if (!display) {
+ pr_err("invalid ddc ctrl\n");
+ return;
+ }
+ hdmi = display->ctrl.ctrl;
+ /* check for errors and clear status */
+ reg_val = hdmi_read(hdmi, HDMI_HDCP2P2_DDC_STATUS);
+
+ if (reg_val & BIT(4)) {
+ pr_debug("ddc aborted\n");
+ reg_val |= BIT(5);
+ }
+
+ if (reg_val & BIT(8)) {
+ pr_debug("timed out\n");
+ reg_val |= BIT(9);
+ }
+
+ if (reg_val & BIT(12)) {
+ pr_debug("NACK0\n");
+ reg_val |= BIT(13);
+ }
+
+ if (reg_val & BIT(14)) {
+ pr_debug("NACK1\n");
+ reg_val |= BIT(15);
+ }
+
+ hdmi_write(hdmi, HDMI_HDCP2P2_DDC_STATUS, reg_val);
+}
+
+static const char *sde_hdmi_hdr_sname(enum sde_hdmi_hdr_state hdr_state)
+{
+ switch (hdr_state) {
+ case HDR_DISABLE: return "HDR_DISABLE";
+ case HDR_ENABLE: return "HDR_ENABLE";
+ case HDR_RESET: return "HDR_RESET";
+ default: return "HDR_INVALID_STATE";
+ }
+}
+
+static u8 sde_hdmi_infoframe_checksum(u8 *ptr, size_t size)
+{
+ u8 csum = 0;
+ size_t i;
+
+ /* compute checksum */
+ for (i = 0; i < size; i++)
+ csum += ptr[i];
+
+ return 256 - csum;
+}
+
+u8 sde_hdmi_hdr_set_chksum(struct drm_msm_ext_panel_hdr_metadata *hdr_meta)
+{
+ u8 *buff;
+ u8 *ptr;
+ u32 length;
+ u32 size;
+ u32 chksum = 0;
+ u32 const type_code = 0x87;
+ u32 const version = 0x01;
+ u32 const descriptor_id = 0x00;
+
+ /* length of metadata is 26 bytes */
+ length = 0x1a;
+ /* add 4 bytes for the header */
+ size = length + HDMI_INFOFRAME_HEADER_SIZE;
+
+ buff = kzalloc(size, GFP_KERNEL);
+
+ if (!buff) {
+ SDE_ERROR("invalid buff\n");
+ goto err_alloc;
+ }
+
+ ptr = buff;
+
+ buff[0] = type_code;
+ buff[1] = version;
+ buff[2] = length;
+ buff[3] = 0;
+ /* start infoframe payload */
+ buff += HDMI_INFOFRAME_HEADER_SIZE;
+
+ buff[0] = hdr_meta->eotf;
+ buff[1] = descriptor_id;
+
+ buff[2] = hdr_meta->display_primaries_x[0] & 0xff;
+ buff[3] = hdr_meta->display_primaries_x[0] >> 8;
+
+ buff[4] = hdr_meta->display_primaries_x[1] & 0xff;
+ buff[5] = hdr_meta->display_primaries_x[1] >> 8;
+
+ buff[6] = hdr_meta->display_primaries_x[2] & 0xff;
+ buff[7] = hdr_meta->display_primaries_x[2] >> 8;
+
+ buff[8] = hdr_meta->display_primaries_y[0] & 0xff;
+ buff[9] = hdr_meta->display_primaries_y[0] >> 8;
+
+ buff[10] = hdr_meta->display_primaries_y[1] & 0xff;
+ buff[11] = hdr_meta->display_primaries_y[1] >> 8;
+
+ buff[12] = hdr_meta->display_primaries_y[2] & 0xff;
+ buff[13] = hdr_meta->display_primaries_y[2] >> 8;
+
+ buff[14] = hdr_meta->white_point_x & 0xff;
+ buff[15] = hdr_meta->white_point_x >> 8;
+ buff[16] = hdr_meta->white_point_y & 0xff;
+ buff[17] = hdr_meta->white_point_y >> 8;
+
+ buff[18] = hdr_meta->max_luminance & 0xff;
+ buff[19] = hdr_meta->max_luminance >> 8;
+
+ buff[20] = hdr_meta->min_luminance & 0xff;
+ buff[21] = hdr_meta->min_luminance >> 8;
+
+ buff[22] = hdr_meta->max_content_light_level & 0xff;
+ buff[23] = hdr_meta->max_content_light_level >> 8;
+
+ buff[24] = hdr_meta->max_average_light_level & 0xff;
+ buff[25] = hdr_meta->max_average_light_level >> 8;
+
+ chksum = sde_hdmi_infoframe_checksum(ptr, size);
+
+ kfree(ptr);
+
+err_alloc:
+ return chksum;
+}
+
+/**
+ * sde_hdmi_dump_regs - utility to dump HDMI regs
+ * @hdmi_display: Pointer to private display handle
+ * Return : void
+ */
+
+void sde_hdmi_dump_regs(void *hdmi_display)
+{
+ struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+ struct hdmi *hdmi;
+ int i;
+ u32 addr_off = 0;
+ u32 len = 0;
+
+ if (!display) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ hdmi = display->ctrl.ctrl;
+
+ if (!hdmi) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (!hdmi->power_on || !display->connected) {
+ SDE_ERROR("HDMI display is not ready\n");
+ return;
+ }
+
+ len = hdmi->mmio_len;
+
+ if (len % 16)
+ len += 16;
+ len /= 16;
+
+ pr_info("HDMI CORE regs\n");
+ for (i = 0; i < len; i++) {
+ u32 x0, x4, x8, xc;
+
+ x0 = hdmi_read(hdmi, addr_off+0x0);
+ x4 = hdmi_read(hdmi, addr_off+0x4);
+ x8 = hdmi_read(hdmi, addr_off+0x8);
+ xc = hdmi_read(hdmi, addr_off+0xc);
+
+ pr_info("%08x : %08x %08x %08x %08x\n", addr_off, x0, x4, x8,
+ xc);
+
+ addr_off += 16;
+ }
+}
+
+int sde_hdmi_ddc_hdcp2p2_isr(void *hdmi_display)
+{
+ struct sde_hdmi_tx_hdcp2p2_ddc_data *data;
+ u32 intr0, intr2, intr5;
+ u32 msg_size;
+ int rc = 0;
+ struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+ struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+ struct hdmi *hdmi;
+
+ ddc_ctrl = &display->ddc_ctrl;
+ data = &ddc_ctrl->sde_hdcp2p2_ddc_data;
+ hdmi = display->ctrl.ctrl;
+
+ if (!hdmi) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ intr0 = hdmi_read(hdmi, HDMI_DDC_INT_CTRL0);
+ intr2 = hdmi_read(hdmi, HDMI_HDCP_INT_CTRL2);
+ intr5 = hdmi_read(hdmi, HDMI_DDC_INT_CTRL5);
+
+ pr_debug("intr0: 0x%x, intr2: 0x%x, intr5: 0x%x\n",
+ intr0, intr2, intr5);
+
+ /* check if encryption is enabled */
+ if (intr2 & BIT(0)) {
+ /*
+ * ack encryption ready interrupt.
+ * disable encryption ready interrupt.
+ * enable encryption not ready interrupt.
+ */
+ intr2 &= ~BIT(2);
+ intr2 |= BIT(1) | BIT(6);
+
+ pr_info("HDCP 2.2 Encryption enabled\n");
+ data->encryption_ready = true;
+ }
+
+ /* check if encryption is disabled */
+ if (intr2 & BIT(4)) {
+ /*
+ * ack encryption not ready interrupt.
+ * disable encryption not ready interrupt.
+ * enable encryption ready interrupt.
+ */
+ intr2 &= ~BIT(6);
+ intr2 |= BIT(5) | BIT(2);
+
+ pr_info("HDCP 2.2 Encryption disabled\n");
+ data->encryption_ready = false;
+ }
+
+ hdmi_write(hdmi, HDMI_HDCP_INT_CTRL2, intr2);
+
+ /* get the message size bits 29:20 */
+ msg_size = (intr0 & (0x3FF << 20)) >> 20;
+
+ if (msg_size) {
+ /* ack and disable message size interrupt */
+ intr0 |= BIT(30);
+ intr0 &= ~BIT(31);
+
+ data->message_size = msg_size;
+ }
+
+ /* check and disable ready interrupt */
+ if (intr0 & BIT(16)) {
+ /* ack ready/not ready interrupt */
+ intr0 |= BIT(17);
+ intr0 &= ~BIT(18);
+ pr_debug("got ready interrupt\n");
+ data->ready = true;
+ }
+
+ /* check for reauth req interrupt */
+ if (intr0 & BIT(12)) {
+ /* ack and disable reauth req interrupt */
+ intr0 |= BIT(13);
+ intr0 &= ~BIT(14);
+ pr_err("got reauth interrupt\n");
+ data->reauth_req = true;
+ }
+
+ /* check for ddc fail interrupt */
+ if (intr0 & BIT(8)) {
+ /* ack ddc fail interrupt */
+ intr0 |= BIT(9);
+ pr_err("got ddc fail interrupt\n");
+ data->ddc_max_retries_fail = true;
+ }
+
+ /* check for ddc done interrupt */
+ if (intr0 & BIT(4)) {
+ /* ack ddc done interrupt */
+ intr0 |= BIT(5);
+ pr_debug("got ddc done interrupt\n");
+ data->ddc_done = true;
+ }
+
+ /* check for ddc read req interrupt */
+ if (intr0 & BIT(0)) {
+ /* ack read req interrupt */
+ intr0 |= BIT(1);
+
+ data->ddc_read_req = true;
+ }
+
+ hdmi_write(hdmi, HDMI_DDC_INT_CTRL0, intr0);
+
+ if (intr5 & BIT(0)) {
+ pr_err("RXSTATUS_DDC_REQ_TIMEOUT\n");
+
+ /* ack and disable timeout interrupt */
+ intr5 |= BIT(1);
+ intr5 &= ~BIT(2);
+
+ data->ddc_timeout = true;
+ }
+ hdmi_write(hdmi, HDMI_DDC_INT_CTRL5, intr5);
+
+ if (data->message_size || data->ready || data->reauth_req) {
+ if (data->wait) {
+ complete(&ddc_ctrl->rx_status_done);
+ } else if (data->link_cb && data->link_data) {
+ data->link_cb(data->link_data);
+ } else {
+ pr_err("new msg/reauth not handled\n");
+ rc = -EINVAL;
+ }
+ }
+
+ sde_hdmi_hdcp2p2_ddc_clear_status(display);
+
+ return rc;
+}
+
+int sde_hdmi_ddc_scrambling_isr(void *hdmi_display)
+{
+
+ bool scrambler_timer_off = false;
+ u32 intr2, intr5;
+ struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+ struct hdmi *hdmi;
+
+
+ hdmi = display->ctrl.ctrl;
+
+ if (!hdmi) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ intr2 = hdmi_read(hdmi, HDMI_DDC_INT_CTRL2);
+ intr5 = hdmi_read(hdmi, HDMI_DDC_INT_CTRL5);
+
+ pr_debug("intr2: 0x%x, intr5: 0x%x\n", intr2, intr5);
+
+ if (intr2 & BIT(12)) {
+ pr_err("SCRAMBLER_STATUS_NOT\n");
+
+ intr2 |= BIT(14);
+ scrambler_timer_off = true;
+ }
+
+ if (intr2 & BIT(8)) {
+ pr_err("SCRAMBLER_STATUS_DDC_FAILED\n");
+
+ intr2 |= BIT(9);
+
+ scrambler_timer_off = true;
+ }
+ hdmi_write(hdmi, HDMI_DDC_INT_CTRL2, intr2);
+
+ if (intr5 & BIT(8)) {
+ pr_err("SCRAMBLER_STATUS_DDC_REQ_TIMEOUT\n");
+ intr5 |= BIT(9);
+ intr5 &= ~BIT(10);
+ scrambler_timer_off = true;
+ }
+ hdmi_write(hdmi, HDMI_DDC_INT_CTRL5, intr5);
+
+ if (scrambler_timer_off)
+ _sde_hdmi_scrambler_ddc_disable((void *)display);
+
+ return 0;
+}
+
+static int sde_hdmi_ddc_read_retry(struct sde_hdmi *display)
+{
+ int status;
+ int busy_wait_us;
+ struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+ struct sde_hdmi_tx_ddc_data *ddc_data;
+ struct hdmi *hdmi;
+
+ if (!display) {
+ SDE_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ hdmi = display->ctrl.ctrl;
+ ddc_ctrl = &display->ddc_ctrl;
+ ddc_data = &ddc_ctrl->ddc_data;
+
+ if (!ddc_data) {
+ SDE_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ if (!ddc_data->data_buf) {
+ status = -EINVAL;
+ SDE_ERROR("%s: invalid buf\n", ddc_data->what);
+ goto error;
+ }
+
+ if (ddc_data->retry < 0) {
+ SDE_ERROR("invalid no. of retries %d\n", ddc_data->retry);
+ status = -EINVAL;
+ goto error;
+ }
+
+ do {
+ if (ddc_data->hard_timeout) {
+ HDMI_UTIL_DEBUG("using hard_timeout %dms\n",
+ ddc_data->hard_timeout);
+
+ busy_wait_us = ddc_data->hard_timeout * HDMI_MS_TO_US;
+ hdmi->use_hard_timeout = true;
+ hdmi->busy_wait_us = busy_wait_us;
+ }
+
+ /* Calling upstream ddc read method */
+ status = hdmi_ddc_read(hdmi, ddc_data->dev_addr,
+ ddc_data->offset,
+ ddc_data->data_buf, ddc_data->request_len,
+ false);
+
+ if (ddc_data->hard_timeout)
+ ddc_data->timeout_left = hdmi->timeout_count;
+
+
+ if (ddc_data->hard_timeout && !hdmi->timeout_count) {
+ HDMI_UTIL_DEBUG("%s: timedout\n", ddc_data->what);
+ status = -ETIMEDOUT;
+ }
+
+ } while (status && ddc_data->retry--);
+
+ if (status) {
+ HDMI_UTIL_ERROR("%s: failed status = %d\n",
+ ddc_data->what, status);
+ goto error;
+ }
+
+ HDMI_UTIL_DEBUG("%s: success\n", ddc_data->what);
+
+error:
+ return status;
+} /* sde_hdmi_ddc_read_retry */
+
+int sde_hdmi_ddc_read(void *cb_data)
+{
+ int rc = 0;
+ int retry;
+ struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+ struct sde_hdmi_tx_ddc_data *ddc_data;
+ struct sde_hdmi *display = (struct sde_hdmi *)cb_data;
+
+ if (!display) {
+ SDE_ERROR("invalid ddc ctrl\n");
+ return -EINVAL;
+ }
+
+ ddc_ctrl = &display->ddc_ctrl;
+ ddc_data = &ddc_ctrl->ddc_data;
+ retry = ddc_data->retry;
+
+ rc = sde_hdmi_ddc_read_retry(display);
+ if (!rc)
+ return rc;
+
+ if (ddc_data->retry_align) {
+ ddc_data->retry = retry;
+
+ ddc_data->request_len = 32 * ((ddc_data->data_len + 31) / 32);
+ rc = sde_hdmi_ddc_read_retry(display);
+ }
+
+ return rc;
+} /* hdmi_ddc_read */
+
+int sde_hdmi_ddc_write(void *cb_data)
+{
+ int status;
+ struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+ struct sde_hdmi_tx_ddc_data *ddc_data;
+ int busy_wait_us;
+ struct hdmi *hdmi;
+ struct sde_hdmi *display = (struct sde_hdmi *)cb_data;
+
+ if (!display) {
+ SDE_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ hdmi = display->ctrl.ctrl;
+ ddc_ctrl = &display->ddc_ctrl;
+
+ ddc_data = &ddc_ctrl->ddc_data;
+
+ if (!ddc_data) {
+ SDE_ERROR("invalid input\n");
+ return -EINVAL;
+ }
+
+ if (!ddc_data->data_buf) {
+ status = -EINVAL;
+ SDE_ERROR("%s: invalid buf\n", ddc_data->what);
+ goto error;
+ }
+
+ if (ddc_data->retry < 0) {
+ SDE_ERROR("invalid no. of retries %d\n", ddc_data->retry);
+ status = -EINVAL;
+ goto error;
+ }
+
+ do {
+ if (ddc_data->hard_timeout) {
+ busy_wait_us = ddc_data->hard_timeout * HDMI_MS_TO_US;
+ hdmi->use_hard_timeout = true;
+ hdmi->busy_wait_us = busy_wait_us;
+ }
+
+ status = hdmi_ddc_write(hdmi,
+ ddc_data->dev_addr, ddc_data->offset,
+ ddc_data->data_buf, ddc_data->data_len,
+ false);
+
+ if (ddc_data->hard_timeout)
+ ddc_data->timeout_left = hdmi->timeout_count;
+
+ if (ddc_data->hard_timeout && !hdmi->timeout_count) {
+ HDMI_UTIL_ERROR("%s timout\n", ddc_data->what);
+ status = -ETIMEDOUT;
+ }
+
+ } while (status && ddc_data->retry--);
+
+ if (status) {
+ HDMI_UTIL_ERROR("%s: failed status = %d\n",
+ ddc_data->what, status);
+ goto error;
+ }
+
+ HDMI_UTIL_DEBUG("%s: success\n", ddc_data->what);
+error:
+ return status;
+} /* hdmi_ddc_write */
+
+bool sde_hdmi_tx_is_hdcp_enabled(struct sde_hdmi *hdmi_ctrl)
+{
+ if (!hdmi_ctrl) {
+ SDE_ERROR("%s: invalid input\n", __func__);
+ return false;
+ }
+
+ return (hdmi_ctrl->hdcp14_present || hdmi_ctrl->hdcp22_present) &&
+ hdmi_ctrl->hdcp_ops;
+}
+
+bool sde_hdmi_tx_is_encryption_set(struct sde_hdmi *hdmi_ctrl)
+{
+ bool enc_en = true;
+ u32 reg_val;
+ struct hdmi *hdmi;
+
+ if (!hdmi_ctrl) {
+ SDE_ERROR("%s: invalid input\n", __func__);
+ goto end;
+ }
+
+ hdmi = hdmi_ctrl->ctrl.ctrl;
+
+ /* Check if encryption was enabled */
+ if (hdmi_ctrl->hdmi_tx_major_version <= HDMI_TX_VERSION_3) {
+ reg_val = hdmi_read(hdmi, HDMI_HDCP_CTRL2);
+ if ((reg_val & BIT(0)) && (reg_val & BIT(1)))
+ goto end;
+
+ if (hdmi_read(hdmi, HDMI_CTRL) & BIT(2))
+ goto end;
+ } else {
+ reg_val = hdmi_read(hdmi, HDMI_HDCP_STATUS);
+ if (reg_val)
+ goto end;
+ }
+
+ return false;
+
+end:
+ return enc_en;
+} /* sde_hdmi_tx_is_encryption_set */
+
+bool sde_hdmi_tx_is_stream_shareable(struct sde_hdmi *hdmi_ctrl)
+{
+ bool ret;
+
+ if (!hdmi_ctrl) {
+ SDE_ERROR("%s: invalid input\n", __func__);
+ return false;
+ }
+
+ switch (hdmi_ctrl->enc_lvl) {
+ case HDCP_STATE_AUTH_ENC_NONE:
+ ret = true;
+ break;
+ case HDCP_STATE_AUTH_ENC_1X:
+ ret = sde_hdmi_tx_is_hdcp_enabled(hdmi_ctrl) &&
+ hdmi_ctrl->auth_state;
+ break;
+ case HDCP_STATE_AUTH_ENC_2P2:
+ ret = hdmi_ctrl->hdcp22_present &&
+ hdmi_ctrl->auth_state;
+ break;
+ default:
+ ret = false;
+ }
+
+ return ret;
+}
+
+bool sde_hdmi_tx_is_panel_on(struct sde_hdmi *hdmi_ctrl)
+{
+ struct hdmi *hdmi;
+
+ if (!hdmi_ctrl) {
+ SDE_ERROR("%s: invalid input\n", __func__);
+ return false;
+ }
+
+ hdmi = hdmi_ctrl->ctrl.ctrl;
+
+ return hdmi_ctrl->connected && hdmi->power_on;
+}
+
+int sde_hdmi_config_avmute(struct hdmi *hdmi, bool set)
+{
+ u32 av_mute_status;
+ bool av_pkt_en = false;
+
+ if (!hdmi) {
+ SDE_ERROR("invalid HDMI Ctrl\n");
+ return -ENODEV;
+ }
+
+ av_mute_status = hdmi_read(hdmi, HDMI_GC);
+
+ if (set) {
+ if (!(av_mute_status & BIT(0))) {
+ hdmi_write(hdmi, HDMI_GC, av_mute_status | BIT(0));
+ av_pkt_en = true;
+ }
+ } else {
+ if (av_mute_status & BIT(0)) {
+ hdmi_write(hdmi, HDMI_GC, av_mute_status & ~BIT(0));
+ av_pkt_en = true;
+ }
+ }
+
+ /* Enable AV Mute tranmission here */
+ if (av_pkt_en)
+ hdmi_write(hdmi, HDMI_VBI_PKT_CTRL,
+ hdmi_read(hdmi, HDMI_VBI_PKT_CTRL) | (BIT(4) & BIT(5)));
+
+ pr_info("AVMUTE %s\n", set ? "set" : "cleared");
+
+ return 0;
+}
+
+int _sde_hdmi_get_timeout_in_hysnc(void *hdmi_display, u32 timeout_ms)
+{
+ struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+ struct drm_display_mode mode = display->mode;
+ /*
+ * pixel clock = h_total * v_total * fps
+ * 1 sec = pixel clock number of pixels are transmitted.
+ * time taken by one line (h_total) = 1s / (v_total * fps).
+ * lines for give time = (time_ms * 1000) / (1000000 / (v_total * fps))
+ * = (time_ms * clock) / h_total
+ */
+
+ return (timeout_ms * mode.clock / mode.htotal);
+}
+
+static void sde_hdmi_hdcp2p2_ddc_reset(struct sde_hdmi *hdmi_ctrl)
+{
+ u32 reg_val;
+ struct hdmi *hdmi = hdmi_ctrl->ctrl.ctrl;
+
+ if (!hdmi) {
+ pr_err("Invalid parameters\n");
+ return;
+ }
+
+ /*
+ * Clear acks for DDC_REQ, DDC_DONE, DDC_FAILED, RXSTATUS_READY,
+ * RXSTATUS_MSG_SIZE
+ */
+ reg_val = BIT(30) | BIT(17) | BIT(13) | BIT(9) | BIT(5) | BIT(1);
+ hdmi_write(hdmi, HDMI_DDC_INT_CTRL0, reg_val);
+ /* Reset DDC timers */
+ reg_val = BIT(0) | hdmi_read(hdmi, HDMI_HDCP2P2_DDC_CTRL);
+ hdmi_write(hdmi, HDMI_HDCP2P2_DDC_CTRL, reg_val);
+ reg_val = hdmi_read(hdmi, HDMI_HDCP2P2_DDC_CTRL);
+ reg_val &= ~BIT(0);
+ hdmi_write(hdmi, HDMI_HDCP2P2_DDC_CTRL, reg_val);
+}
+
+void sde_hdmi_hdcp2p2_ddc_disable(void *hdmi_display)
+{
+ struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+ u32 reg_val;
+ struct hdmi *hdmi = display->ctrl.ctrl;
+
+ if (!hdmi) {
+ pr_err("Invalid parameters\n");
+ return;
+ }
+
+ sde_hdmi_hdcp2p2_ddc_reset(display);
+
+ /* Disable HW DDC access to RxStatus register */
+ reg_val = hdmi_read(hdmi, HDMI_HW_DDC_CTRL);
+ reg_val &= ~(BIT(1) | BIT(0));
+
+ hdmi_write(hdmi, HDMI_HW_DDC_CTRL, reg_val);
+}
+
+static void _sde_hdmi_scrambler_ddc_reset(struct hdmi *hdmi)
+{
+ u32 reg_val;
+
+ /* clear ack and disable interrupts */
+ reg_val = BIT(14) | BIT(9) | BIT(5) | BIT(1);
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL2, reg_val);
+
+ /* Reset DDC timers */
+ reg_val = BIT(0) | hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL);
+ hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val);
+
+ reg_val = hdmi_read(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL);
+ reg_val &= ~BIT(0);
+ hdmi_write(hdmi, REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL, reg_val);
+}
+
+void sde_hdmi_ctrl_cfg(struct hdmi *hdmi, bool power_on)
+{
+ uint32_t ctrl = 0;
+ unsigned long flags;
+
+ spin_lock_irqsave(&hdmi->reg_lock, flags);
+ ctrl = hdmi_read(hdmi, REG_HDMI_CTRL);
+
+ if (power_on)
+ ctrl |= HDMI_CTRL_ENABLE;
+ else
+ ctrl &= ~HDMI_CTRL_ENABLE;
+
+ hdmi_write(hdmi, REG_HDMI_CTRL, ctrl);
+ spin_unlock_irqrestore(&hdmi->reg_lock, flags);
+
+ HDMI_UTIL_DEBUG("HDMI Core: %s, HDMI_CTRL=0x%08x\n",
+ power_on ? "Enable" : "Disable", ctrl);
+}
+
+static void sde_hdmi_clear_pkt_send(struct hdmi *hdmi)
+{
+ uint32_t reg_val;
+
+ /* Clear audio sample send */
+ reg_val = hdmi_read(hdmi, HDMI_AUDIO_PKT_CTRL);
+ reg_val &= ~BIT(0);
+ hdmi_write(hdmi, HDMI_AUDIO_PKT_CTRL, reg_val);
+
+ /* Clear sending VBI ctrl packets */
+ reg_val = hdmi_read(hdmi, HDMI_VBI_PKT_CTRL);
+ reg_val &= ~(BIT(4) | BIT(8) | BIT(12));
+ hdmi_write(hdmi, HDMI_VBI_PKT_CTRL, reg_val);
+
+ /* Clear sending infoframe packets */
+ reg_val = hdmi_read(hdmi, HDMI_INFOFRAME_CTRL0);
+ reg_val &= ~(BIT(0) | BIT(4) | BIT(8) | BIT(12)
+ | BIT(15) | BIT(19));
+ hdmi_write(hdmi, HDMI_INFOFRAME_CTRL0, reg_val);
+
+ /* Clear sending general ctrl packets */
+ reg_val = hdmi_read(hdmi, HDMI_GEN_PKT_CTRL);
+ reg_val &= ~(BIT(0) | BIT(4));
+ hdmi_write(hdmi, HDMI_GEN_PKT_CTRL, reg_val);
+}
+
+void sde_hdmi_ctrl_reset(struct hdmi *hdmi)
+{
+ uint32_t reg_val;
+
+ /* Assert HDMI CTRL SW reset */
+ reg_val = hdmi_read(hdmi, HDMI_CTRL_SW_RESET);
+ reg_val |= BIT(0);
+ hdmi_write(hdmi, HDMI_CTRL_SW_RESET, reg_val);
+
+ /* disable the controller and put to known state */
+ sde_hdmi_ctrl_cfg(hdmi, 0);
+
+ /* disable the audio engine */
+ reg_val = hdmi_read(hdmi, HDMI_AUDIO_CFG);
+ reg_val &= ~BIT(0);
+ hdmi_write(hdmi, HDMI_AUDIO_CFG, reg_val);
+
+ /* clear sending packets to sink */
+ sde_hdmi_clear_pkt_send(hdmi);
+
+ /* De-assert HDMI CTRL SW reset */
+ reg_val = hdmi_read(hdmi, HDMI_CTRL_SW_RESET);
+ reg_val &= ~BIT(0);
+ hdmi_write(hdmi, HDMI_CTRL_SW_RESET, reg_val);
+}
+
+void _sde_hdmi_scrambler_ddc_disable(void *hdmi_display)
+{
+ struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+ u32 reg_val;
+
+ struct hdmi *hdmi = display->ctrl.ctrl;
+
+ if (!hdmi) {
+ pr_err("Invalid parameters\n");
+ return;
+ }
+
+ _sde_hdmi_scrambler_ddc_reset(hdmi);
+ /* Disable HW DDC access to RxStatus register */
+ reg_val = hdmi_read(hdmi, REG_HDMI_HW_DDC_CTRL);
+ reg_val &= ~(BIT(8) | BIT(9));
+ hdmi_write(hdmi, REG_HDMI_HW_DDC_CTRL, reg_val);
+}
+
+void sde_hdmi_ddc_config(void *hdmi_display)
+{
+ struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+ struct hdmi *hdmi = display->ctrl.ctrl;
+ uint32_t ddc_speed;
+
+ if (!hdmi) {
+ pr_err("Invalid parameters\n");
+ return;
+ }
+
+ ddc_speed = hdmi_read(hdmi, REG_HDMI_DDC_SPEED);
+ ddc_speed |= HDMI_DDC_SPEED_THRESHOLD(2);
+ ddc_speed |= HDMI_DDC_SPEED_PRESCALE(12);
+
+ hdmi_write(hdmi, REG_HDMI_DDC_SPEED,
+ ddc_speed);
+
+ hdmi_write(hdmi, REG_HDMI_DDC_SETUP,
+ HDMI_DDC_SETUP_TIMEOUT(0xff));
+
+ /* enable reference timer for 19us */
+ hdmi_write(hdmi, REG_HDMI_DDC_REF,
+ HDMI_DDC_REF_REFTIMER_ENABLE |
+ HDMI_DDC_REF_REFTIMER(19));
+}
+
+int sde_hdmi_hdcp2p2_read_rxstatus(void *hdmi_display)
+{
+ u32 reg_val;
+ u32 intr_en_mask;
+ u32 timeout;
+ u32 timer;
+ int rc = 0;
+ int busy_wait_us;
+ struct sde_hdmi_tx_hdcp2p2_ddc_data *data;
+ struct sde_hdmi *display = (struct sde_hdmi *)hdmi_display;
+ struct hdmi *hdmi = display->ctrl.ctrl;
+ struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+ u32 rem;
+
+ if (!hdmi) {
+ pr_err("Invalid ddc data\n");
+ return -EINVAL;
+ }
+
+ ddc_ctrl = &display->ddc_ctrl;
+ data = &ddc_ctrl->sde_hdcp2p2_ddc_data;
+ if (!data) {
+ pr_err("Invalid ddc data\n");
+ return -EINVAL;
+ }
+
+ rc = ddc_clear_irq(hdmi);
+ if (rc) {
+ pr_err("DDC clear irq failed\n");
+ return rc;
+ }
+ intr_en_mask = data->intr_mask;
+ intr_en_mask |= BIT(HDCP2P2_RXSTATUS_DDC_FAILED_INTR_MASK);
+
+ /* Disable short read for now, sinks don't support it */
+ reg_val = hdmi_read(hdmi, HDMI_HDCP2P2_DDC_CTRL);
+ reg_val |= BIT(4);
+ hdmi_write(hdmi, HDMI_HDCP2P2_DDC_CTRL, reg_val);
+ /*
+ * Setup the DDC timers for HDMI_HDCP2P2_DDC_TIMER_CTRL1 and
+ * HDMI_HDCP2P2_DDC_TIMER_CTRL2.
+ * Following are the timers:
+ * 1. DDC_REQUEST_TIMER: Timeout in hsyncs in which to wait for the
+ * HDCP 2.2 sink to respond to an RxStatus request
+ * 2. DDC_URGENT_TIMER: Time period in hsyncs to issue an urgent flag
+ * when an RxStatus DDC request is made but not accepted by I2C
+ * engine
+ * 3. DDC_TIMEOUT_TIMER: Timeout in hsyncs which starts counting when
+ * a request is made and stops when it is accepted by DDC arbiter
+ */
+
+ timeout = data->timeout_hsync;
+ timer = data->periodic_timer_hsync;
+
+ hdmi_write(hdmi, HDMI_HDCP2P2_DDC_TIMER_CTRL, timer);
+ /* Set both urgent and hw-timeout fields to the same value */
+ hdmi_write(hdmi, HDMI_HDCP2P2_DDC_TIMER_CTRL2,
+ (timeout << 16 | timeout));
+ /* enable interrupts */
+ reg_val = intr_en_mask;
+ /* Clear interrupt status bits */
+ reg_val |= intr_en_mask >> 1;
+
+ hdmi_write(hdmi, HDMI_DDC_INT_CTRL0, reg_val);
+ reg_val = hdmi_read(hdmi, HDMI_DDC_INT_CTRL5);
+ /* clear and enable RxStatus read timeout */
+ reg_val |= BIT(2) | BIT(1);
+
+ hdmi_write(hdmi, HDMI_DDC_INT_CTRL5, reg_val);
+ /*
+ * Enable hardware DDC access to RxStatus register
+ *
+ * HDMI_HW_DDC_CTRL:Bits 1:0 (RXSTATUS_DDC_ENABLE) read like this:
+ *
+ * 0 = disable HW controlled DDC access to RxStatus
+ * 1 = automatic on when HDCP 2.2 is authenticated and loop based on
+ * request timer (i.e. the hardware will loop automatically)
+ * 2 = force on and loop based on request timer (hardware will loop)
+ * 3 = enable by sw trigger and loop until interrupt is generated for
+ * RxStatus.reauth_req, RxStatus.ready or RxStatus.message_Size.
+ *
+ * Depending on the value of ddc_data::poll_sink, we make the decision
+ * to use either SW_TRIGGER(3) (poll_sink = false) which means that the
+ * hardware will poll sink and generate interrupt when sink responds,
+ * or use AUTOMATIC_LOOP(1) (poll_sink = true) which will poll the sink
+ * based on request timer
+ */
+
+ reg_val = hdmi_read(hdmi, HDMI_HW_DDC_CTRL);
+ reg_val &= ~(BIT(1) | BIT(0));
+
+ busy_wait_us = data->timeout_ms * HDMI_MS_TO_US;
+
+ /* read method: HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER */
+ reg_val |= BIT(1) | BIT(0);
+ hdmi_write(hdmi, HDMI_HW_DDC_CTRL, reg_val);
+
+ hdmi_write(hdmi, HDMI_HDCP2P2_DDC_SW_TRIGGER, 1);
+ if (data->wait) {
+ reinit_completion(&ddc_ctrl->rx_status_done);
+ rem = wait_for_completion_timeout(&ddc_ctrl->rx_status_done,
+ HZ);
+ data->timeout_left = jiffies_to_msecs(rem);
+
+ if (!data->timeout_left) {
+ pr_err("sw ddc rxstatus timeout\n");
+ rc = -ETIMEDOUT;
+ }
+ sde_hdmi_hdcp2p2_ddc_disable((void *)display);
+ }
+ return rc;
+}
+
+unsigned long sde_hdmi_calc_pixclk(unsigned long pixel_freq,
+ u32 out_format, bool dc_enable)
+{
+ u32 rate_ratio = HDMI_RGB_24BPP_PCLK_TMDS_CH_RATE_RATIO;
+
+ if (out_format & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+ rate_ratio = HDMI_YUV420_24BPP_PCLK_TMDS_CH_RATE_RATIO;
+
+ pixel_freq /= rate_ratio;
+
+ if (dc_enable)
+ pixel_freq += pixel_freq >> 2;
+
+ return pixel_freq;
+
+}
+
+bool sde_hdmi_validate_pixclk(struct drm_connector *connector,
+ unsigned long pclk)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+ unsigned long max_pclk = display->max_pclk_khz * HDMI_KHZ_TO_HZ;
+
+ if (connector->max_tmds_char)
+ max_pclk = MIN(max_pclk,
+ connector->max_tmds_char * HDMI_MHZ_TO_HZ);
+ else if (connector->max_tmds_clock)
+ max_pclk = MIN(max_pclk,
+ connector->max_tmds_clock * HDMI_MHZ_TO_HZ);
+
+ SDE_DEBUG("MAX PCLK = %ld, PCLK = %ld\n", max_pclk, pclk);
+
+ return pclk < max_pclk;
+}
+
+static bool sde_hdmi_check_dc_clock(struct drm_connector *connector,
+ struct drm_display_mode *mode, u32 format)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct sde_hdmi *display = (struct sde_hdmi *)c_conn->display;
+
+ u32 tmds_clk_with_dc = sde_hdmi_calc_pixclk(
+ mode->clock * HDMI_KHZ_TO_HZ,
+ format,
+ true);
+
+ return (display->dc_feature_supported &&
+ sde_hdmi_validate_pixclk(connector, tmds_clk_with_dc));
+}
+
+int sde_hdmi_sink_dc_support(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ int dc_format = 0;
+
+ if ((mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV) &&
+ (connector->display_info.edid_hdmi_dc_modes
+ & DRM_EDID_YCBCR420_DC_30))
+ if (sde_hdmi_check_dc_clock(connector, mode,
+ MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420))
+ dc_format |= MSM_MODE_FLAG_YUV420_DC_ENABLE;
+
+ if ((mode->flags & DRM_MODE_FLAG_SUPPORTS_RGB) &&
+ (connector->display_info.edid_hdmi_dc_modes
+ & DRM_EDID_HDMI_DC_30))
+ if (sde_hdmi_check_dc_clock(connector, mode,
+ MSM_MODE_FLAG_COLOR_FORMAT_RGB444))
+ dc_format |= MSM_MODE_FLAG_RGB444_DC_ENABLE;
+
+ return dc_format;
+}
+
+u8 sde_hdmi_hdr_get_ops(u8 curr_state,
+ u8 new_state)
+{
+
+ /** There could be 4 valid state transitions:
+ * 1. HDR_DISABLE -> HDR_ENABLE
+ *
+ * In this transition, we shall start sending
+ * HDR metadata with metadata from the HDR clip
+ *
+ * 2. HDR_ENABLE -> HDR_RESET
+ *
+ * In this transition, we will keep sending
+ * HDR metadata but with EOTF and metadata as 0
+ *
+ * 3. HDR_RESET -> HDR_ENABLE
+ *
+ * In this transition, we will start sending
+ * HDR metadata with metadata from the HDR clip
+ *
+ * 4. HDR_RESET -> HDR_DISABLE
+ *
+ * In this transition, we will stop sending
+ * metadata to the sink and clear PKT_CTRL register
+ * bits.
+ */
+
+ if ((curr_state == HDR_DISABLE)
+ && (new_state == HDR_ENABLE)) {
+ HDMI_UTIL_DEBUG("State changed %s ---> %s\n",
+ sde_hdmi_hdr_sname(curr_state),
+ sde_hdmi_hdr_sname(new_state));
+ return HDR_SEND_INFO;
+ } else if ((curr_state == HDR_ENABLE)
+ && (new_state == HDR_RESET)) {
+ HDMI_UTIL_DEBUG("State changed %s ---> %s\n",
+ sde_hdmi_hdr_sname(curr_state),
+ sde_hdmi_hdr_sname(new_state));
+ return HDR_SEND_INFO;
+ } else if ((curr_state == HDR_RESET)
+ && (new_state == HDR_ENABLE)) {
+ HDMI_UTIL_DEBUG("State changed %s ---> %s\n",
+ sde_hdmi_hdr_sname(curr_state),
+ sde_hdmi_hdr_sname(new_state));
+ return HDR_SEND_INFO;
+ } else if ((curr_state == HDR_RESET)
+ && (new_state == HDR_DISABLE)) {
+ HDMI_UTIL_DEBUG("State changed %s ---> %s\n",
+ sde_hdmi_hdr_sname(curr_state),
+ sde_hdmi_hdr_sname(new_state));
+ return HDR_CLEAR_INFO;
+ }
+
+ HDMI_UTIL_DEBUG("Unsupported OR no state change\n");
+ return HDR_UNSUPPORTED_OP;
+}
+
diff --git a/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
new file mode 100644
index 000000000000..985d8c3e605c
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi-staging/sde_hdmi_util.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_HDMI_UTIL_H_
+#define _SDE_HDMI_UTIL_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/msm_ext_display.h>
+
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include "hdmi.h"
+#include "sde_kms.h"
+#include "sde_connector.h"
+#include "msm_drv.h"
+#include "sde_hdmi_regs.h"
+
+#ifdef HDMI_UTIL_DEBUG_ENABLE
+#define HDMI_UTIL_DEBUG(fmt, args...) SDE_ERROR(fmt, ##args)
+#else
+#define HDMI_UTIL_DEBUG(fmt, args...) SDE_DEBUG(fmt, ##args)
+#endif
+
+#define HDMI_UTIL_ERROR(fmt, args...) SDE_ERROR(fmt, ##args)
+
+/*
+ * Offsets in HDMI_DDC_INT_CTRL0 register
+ *
+ * The HDMI_DDC_INT_CTRL0 register is intended for HDCP 2.2 RxStatus
+ * register manipulation. It reads like this:
+ *
+ * Bit 31: RXSTATUS_MESSAGE_SIZE_MASK (1 = generate interrupt when size > 0)
+ * Bit 30: RXSTATUS_MESSAGE_SIZE_ACK (1 = Acknowledge message size intr)
+ * Bits 29-20: RXSTATUS_MESSAGE_SIZE (Actual size of message available)
+ * Bits 19-18: RXSTATUS_READY_MASK (1 = generate interrupt when ready = 1
+ * 2 = generate interrupt when ready = 0)
+ * Bit 17: RXSTATUS_READY_ACK (1 = Acknowledge ready bit interrupt)
+ * Bit 16: RXSTATUS_READY (1 = Rxstatus ready bit read is 1)
+ * Bit 15: RXSTATUS_READY_NOT (1 = Rxstatus ready bit read is 0)
+ * Bit 14: RXSTATUS_REAUTH_REQ_MASK (1 = generate interrupt when reauth is
+ * requested by sink)
+ * Bit 13: RXSTATUS_REAUTH_REQ_ACK (1 = Acknowledge Reauth req interrupt)
+ * Bit 12: RXSTATUS_REAUTH_REQ (1 = Rxstatus reauth req bit read is 1)
+ * Bit 10: RXSTATUS_DDC_FAILED_MASK (1 = generate interrupt when DDC
+ * tranasaction fails)
+ * Bit 9: RXSTATUS_DDC_FAILED_ACK (1 = Acknowledge ddc failure interrupt)
+ * Bit 8: RXSTATUS_DDC_FAILED (1 = DDC transaction failed)
+ * Bit 6: RXSTATUS_DDC_DONE_MASK (1 = generate interrupt when DDC
+ * transaction completes)
+ * Bit 5: RXSTATUS_DDC_DONE_ACK (1 = Acknowledge ddc done interrupt)
+ * Bit 4: RXSTATUS_DDC_DONE (1 = DDC transaction is done)
+ * Bit 2: RXSTATUS_DDC_REQ_MASK (1 = generate interrupt when DDC Read
+ * request for RXstatus is made)
+ * Bit 1: RXSTATUS_DDC_REQ_ACK (1 = Acknowledge Rxstatus read interrupt)
+ * Bit 0: RXSTATUS_DDC_REQ (1 = RXStatus DDC read request is made)
+ *
+ */
+
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_SHIFT 20
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_MASK 0x3ff00000
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_ACK_SHIFT 30
+#define HDCP2P2_RXSTATUS_MESSAGE_SIZE_INTR_SHIFT 31
+
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_SHIFT 12
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_MASK 1
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_ACK_SHIFT 13
+#define HDCP2P2_RXSTATUS_REAUTH_REQ_INTR_SHIFT 14
+
+#define HDCP2P2_RXSTATUS_READY_SHIFT 16
+#define HDCP2P2_RXSTATUS_READY_MASK 1
+#define HDCP2P2_RXSTATUS_READY_ACK_SHIFT 17
+#define HDCP2P2_RXSTATUS_READY_INTR_SHIFT 18
+#define HDCP2P2_RXSTATUS_READY_INTR_MASK 18
+
+#define HDCP2P2_RXSTATUS_DDC_FAILED_SHIFT 8
+#define HDCP2P2_RXSTATUS_DDC_FAILED_ACKSHIFT 9
+#define HDCP2P2_RXSTATUS_DDC_FAILED_INTR_MASK 10
+#define HDCP2P2_RXSTATUS_DDC_DONE 6
+
+/* default hsyncs for 4k@60 for 200ms */
+#define HDMI_DEFAULT_TIMEOUT_HSYNC 28571
+
+#define HDMI_GET_MSB(x)(x >> 8)
+#define HDMI_GET_LSB(x)(x & 0xff)
+
+#define SDE_HDMI_VIC_640x480 0x1
+#define SDE_HDMI_YCC_QUANT_MASK (0x3 << 14)
+#define SDE_HDMI_COLORIMETRY_MASK (0x3 << 22)
+
+#define SDE_HDMI_DEFAULT_COLORIMETRY 0x0
+#define SDE_HDMI_USE_EXTENDED_COLORIMETRY 0x3
+#define SDE_HDMI_BT2020_COLORIMETRY 0x6
+
+#define SDE_HDMI_HDCP_22 0x22
+#define SDE_HDMI_HDCP_14 0x14
+#define SDE_HDMI_HDCP_NONE 0x0
+
+#define SDE_HDMI_HDR_LUMINANCE_NONE 0x0
+#define SDE_HDMI_HDR_EOTF_NONE 0x0
+
+/*
+ * Bits 1:0 in HDMI_HW_DDC_CTRL that dictate how the HDCP 2.2 RxStatus will be
+ * read by the hardware
+ */
+#define HDCP2P2_RXSTATUS_HW_DDC_DISABLE 0
+#define HDCP2P2_RXSTATUS_HW_DDC_AUTOMATIC_LOOP 1
+#define HDCP2P2_RXSTATUS_HW_DDC_FORCE_LOOP 2
+#define HDCP2P2_RXSTATUS_HW_DDC_SW_TRIGGER 3
+
+struct sde_hdmi_tx_ddc_data {
+ char *what;
+ u8 *data_buf;
+ u32 data_len;
+ u32 dev_addr;
+ u32 offset;
+ u32 request_len;
+ u32 retry_align;
+ u32 hard_timeout;
+ u32 timeout_left;
+ int retry;
+};
+
+enum sde_hdmi_tx_hdcp2p2_rxstatus_intr_mask {
+ RXSTATUS_MESSAGE_SIZE = BIT(31),
+ RXSTATUS_READY = BIT(18),
+ RXSTATUS_REAUTH_REQ = BIT(14),
+};
+
+enum sde_hdmi_hdr_state {
+ HDR_DISABLE = DRM_MSM_HDR_DISABLE,
+ HDR_ENABLE = DRM_MSM_HDR_ENABLE,
+ HDR_RESET = DRM_MSM_HDR_RESET
+};
+
+enum sde_hdmi_hdr_op {
+ HDR_UNSUPPORTED_OP,
+ HDR_SEND_INFO,
+ HDR_CLEAR_INFO
+};
+
+struct sde_hdmi_tx_hdcp2p2_ddc_data {
+ enum sde_hdmi_tx_hdcp2p2_rxstatus_intr_mask intr_mask;
+ u32 timeout_ms;
+ u32 timeout_hsync;
+ u32 periodic_timer_hsync;
+ u32 timeout_left;
+ u32 read_method;
+ u32 message_size;
+ bool encryption_ready;
+ bool ready;
+ bool reauth_req;
+ bool ddc_max_retries_fail;
+ bool ddc_done;
+ bool ddc_read_req;
+ bool ddc_timeout;
+ bool wait;
+ int irq_wait_count;
+ void (*link_cb)(void *data);
+ void *link_data;
+};
+
+struct sde_hdmi_tx_ddc_ctrl {
+ struct completion rx_status_done;
+ struct dss_io_data *io;
+ struct sde_hdmi_tx_ddc_data ddc_data;
+ struct sde_hdmi_tx_hdcp2p2_ddc_data sde_hdcp2p2_ddc_data;
+};
+
+/* DDC */
+int sde_hdmi_ddc_write(void *cb_data);
+int sde_hdmi_ddc_read(void *cb_data);
+int sde_hdmi_ddc_scrambling_isr(void *hdmi_display);
+int _sde_hdmi_get_timeout_in_hysnc(void *hdmi_display, u32 timeout_ms);
+void _sde_hdmi_scrambler_ddc_disable(void *hdmi_display);
+void sde_hdmi_hdcp2p2_ddc_disable(void *hdmi_display);
+int sde_hdmi_hdcp2p2_read_rxstatus(void *hdmi_display);
+void sde_hdmi_ddc_config(void *hdmi_display);
+int sde_hdmi_ddc_hdcp2p2_isr(void *hdmi_display);
+void sde_hdmi_dump_regs(void *hdmi_display);
+unsigned long sde_hdmi_calc_pixclk(unsigned long pixel_freq,
+ u32 out_format, bool dc_enable);
+bool sde_hdmi_validate_pixclk(struct drm_connector *connector,
+ unsigned long pclk);
+int sde_hdmi_sink_dc_support(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+u8 sde_hdmi_hdr_get_ops(u8 curr_state,
+ u8 new_state);
+void sde_hdmi_ctrl_reset(struct hdmi *hdmi);
+void sde_hdmi_ctrl_cfg(struct hdmi *hdmi, bool power_on);
+u8 sde_hdmi_hdr_set_chksum(struct drm_msm_ext_panel_hdr_metadata *hdr_meta);
+
+#endif /* _SDE_HDMI_UTIL_H_ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.c b/drivers/gpu/drm/msm/hdmi/hdmi.c
index 1f4a95eeb348..9dbd86eff816 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -56,7 +56,7 @@ static irqreturn_t hdmi_irq(int irq, void *dev_id)
/* Process HDCP: */
if (hdmi->hdcp_ctrl)
- hdmi_hdcp_irq(hdmi->hdcp_ctrl);
+ hdmi_hdcp_ctrl_irq(hdmi->hdcp_ctrl);
/* TODO audio.. */
@@ -75,7 +75,8 @@ static void hdmi_destroy(struct hdmi *hdmi)
flush_workqueue(hdmi->workq);
destroy_workqueue(hdmi->workq);
}
- hdmi_hdcp_destroy(hdmi);
+
+ hdmi_hdcp_ctrl_destroy(hdmi);
if (phy)
phy->funcs->destroy(phy);
@@ -94,7 +95,7 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
struct hdmi_platform_config *config = pdev->dev.platform_data;
struct hdmi *hdmi = NULL;
struct resource *res;
- int i, ret;
+ int i, ret = 0;
hdmi = devm_kzalloc(&pdev->dev, sizeof(*hdmi), GFP_KERNEL);
if (!hdmi) {
@@ -118,9 +119,19 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
}
}
+ res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, config->mmio_name);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to find ctrl resource\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ hdmi->mmio_len = (u32)resource_size(res);
hdmi->mmio = msm_ioremap(pdev, config->mmio_name, "HDMI");
if (IS_ERR(hdmi->mmio)) {
ret = PTR_ERR(hdmi->mmio);
+ dev_info(&pdev->dev, "can't map hdmi resource\n");
+ hdmi->mmio = NULL;
goto fail;
}
@@ -129,13 +140,39 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
config->mmio_name);
hdmi->mmio_phy_addr = res->start;
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ config->qfprom_mmio_name);
+
+ if (!res) {
+ dev_err(&pdev->dev, "failed to find qfprom resource\n");
+ ret = -ENOMEM;
+ goto fail;
+ }
+ hdmi->qfprom_mmio_len = (u32)resource_size(res);
+
hdmi->qfprom_mmio = msm_ioremap(pdev,
config->qfprom_mmio_name, "HDMI_QFPROM");
+
if (IS_ERR(hdmi->qfprom_mmio)) {
- dev_info(&pdev->dev, "can't find qfprom resource\n");
+ dev_info(&pdev->dev, "can't map qfprom resource\n");
hdmi->qfprom_mmio = NULL;
}
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM,
+ config->hdcp_mmio_name);
+ if (!res) {
+ dev_err(&pdev->dev, "failed to find hdcp resource: %d\n", ret);
+ ret = -ENOMEM;
+ goto fail;
+ }
+ hdmi->hdcp_mmio_len = (u32)resource_size(res);
+ hdmi->hdcp_mmio = msm_ioremap(pdev,
+ config->hdcp_mmio_name, "HDMI_HDCP");
+ if (IS_ERR(hdmi->hdcp_mmio)) {
+ dev_info(&pdev->dev, "can't map hdcp resource\n");
+ hdmi->hdcp_mmio = NULL;
+ }
+
hdmi->hpd_regs = devm_kzalloc(&pdev->dev, sizeof(hdmi->hpd_regs[0]) *
config->hpd_reg_cnt, GFP_KERNEL);
if (!hdmi->hpd_regs) {
@@ -228,11 +265,16 @@ static struct hdmi *hdmi_init(struct platform_device *pdev)
goto fail;
}
- hdmi->hdcp_ctrl = hdmi_hdcp_init(hdmi);
+ hdmi->hdcp_ctrl = hdmi_hdcp_ctrl_init(hdmi);
if (IS_ERR(hdmi->hdcp_ctrl)) {
dev_warn(&pdev->dev, "failed to init hdcp: disabled\n");
hdmi->hdcp_ctrl = NULL;
}
+ /*making it false currently to avoid ifdefs
+ *will get rid of this flag when HDCP SW
+ *support gets added to HDMI DRM driver
+ */
+ hdmi->is_hdcp_supported = false;
return hdmi;
@@ -382,13 +424,40 @@ static struct hdmi_platform_config hdmi_tx_8994_config = {
static struct hdmi_platform_config hdmi_tx_8996_config = {
.phy_init = NULL,
HDMI_CFG(pwr_reg, none),
- HDMI_CFG(hpd_reg, none),
+ HDMI_CFG(hpd_reg, 8x74),
HDMI_CFG(pwr_clk, 8x74),
HDMI_CFG(hpd_clk, 8x74),
.hpd_freq = hpd_clk_freq_8x74,
};
+/*TO DO*/
+static const char *pwr_reg_names_8x98[] = {"core-vdda", "core-vcc"};
+/*TO DO*/
+static const char *hpd_reg_names_8x98[] = {"hpd-gdsc", "hpd-5v"};
+
+static const char *pwr_clk_names_8x98[] = {"core_extp_clk",
+ "hpd_alt_iface_clk"};
+
+static const char *hpd_clk_names_8x98[] = {"hpd_iface_clk",
+ "hpd_core_clk",
+ "hpd_mdp_core_clk",
+ "mnoc_clk",
+ "hpd_misc_ahb_clk",
+ "hpd_bus_clk"};
+
+static unsigned long hpd_clk_freq_8x98[] = {0, 19200000, 0, 0, 0, 0};
+
+static struct hdmi_platform_config hdmi_tx_8998_config = {
+ .phy_init = NULL,
+ HDMI_CFG(pwr_reg, 8x98),
+ HDMI_CFG(hpd_reg, 8x98),
+ HDMI_CFG(pwr_clk, 8x98),
+ HDMI_CFG(hpd_clk, 8x98),
+ .hpd_freq = hpd_clk_freq_8x98,
+};
+
static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,hdmi-tx-8998", .data = &hdmi_tx_8998_config },
{ .compatible = "qcom,hdmi-tx-8996", .data = &hdmi_tx_8996_config },
{ .compatible = "qcom,hdmi-tx-8994", .data = &hdmi_tx_8994_config },
{ .compatible = "qcom,hdmi-tx-8084", .data = &hdmi_tx_8084_config },
@@ -424,7 +493,6 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
#ifdef CONFIG_OF
struct device_node *of_node = dev->of_node;
const struct of_device_id *match;
-
match = of_match_node(dt_match, of_node);
if (match && match->data) {
hdmi_cfg = (struct hdmi_platform_config *)match->data;
@@ -436,18 +504,20 @@ static int hdmi_bind(struct device *dev, struct device *master, void *data)
hdmi_cfg->mmio_name = "core_physical";
hdmi_cfg->qfprom_mmio_name = "qfprom_physical";
+ hdmi_cfg->hdcp_mmio_name = "hdcp_physical";
hdmi_cfg->ddc_clk_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-clk");
hdmi_cfg->ddc_data_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-ddc-data");
hdmi_cfg->hpd_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd");
hdmi_cfg->mux_en_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-en");
hdmi_cfg->mux_sel_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-sel");
hdmi_cfg->mux_lpm_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-mux-lpm");
-
+ hdmi_cfg->hpd5v_gpio = get_gpio(dev, of_node, "qcom,hdmi-tx-hpd5v");
#else
static struct hdmi_platform_config config = {};
static const char *hpd_clk_names[] = {
"core_clk", "master_iface_clk", "slave_iface_clk",
};
+
if (cpu_is_apq8064()) {
static const char *hpd_reg_names[] = {"8921_hdmi_mvs"};
config.phy_init = hdmi_phy_8960_init;
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.h b/drivers/gpu/drm/msm/hdmi/hdmi.h
index d0e663192d01..9a0733bf81ff 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.h
@@ -27,6 +27,11 @@
#include "msm_drv.h"
#include "hdmi.xml.h"
+#define HDMI_SEC_TO_MS 1000
+#define HDMI_MS_TO_US 1000
+#define HDMI_SEC_TO_US (HDMI_SEC_TO_MS * HDMI_MS_TO_US)
+#define HDMI_KHZ_TO_HZ 1000
+#define HDMI_BUSY_WAIT_DELAY_US 100
struct hdmi_phy;
struct hdmi_platform_config;
@@ -45,15 +50,23 @@ struct hdmi {
const struct hdmi_platform_config *config;
+ /* hpd state: */
+ bool hpd_off;
+
/* audio state: */
struct hdmi_audio audio;
/* video state: */
bool power_on;
unsigned long int pixclock;
+ unsigned long int actual_pixclock;
void __iomem *mmio;
void __iomem *qfprom_mmio;
+ void __iomem *hdcp_mmio;
+ u32 mmio_len;
+ u32 qfprom_mmio_len;
+ u32 hdcp_mmio_len;
phys_addr_t mmio_phy_addr;
struct regulator **hpd_regs;
@@ -70,12 +83,16 @@ struct hdmi {
struct drm_encoder *encoder;
bool hdmi_mode; /* are we in hdmi mode? */
-
+ bool is_hdcp_supported;
int irq;
+ void (*ddc_sw_done_cb)(void *data);
+ void *sw_done_cb_data;
struct workqueue_struct *workq;
struct hdmi_hdcp_ctrl *hdcp_ctrl;
-
+ bool use_hard_timeout;
+ int busy_wait_us;
+ u32 timeout_count;
/*
* spinlock to protect registers shared by different execution
* REG_HDMI_CTRL
@@ -91,7 +108,7 @@ struct hdmi_platform_config {
struct hdmi_phy *(*phy_init)(struct hdmi *hdmi);
const char *mmio_name;
const char *qfprom_mmio_name;
-
+ const char *hdcp_mmio_name;
/* regulators that need to be on for hpd: */
const char **hpd_reg_names;
int hpd_reg_cnt;
@@ -110,12 +127,26 @@ struct hdmi_platform_config {
int pwr_clk_cnt;
/* gpio's: */
- int ddc_clk_gpio, ddc_data_gpio, hpd_gpio, mux_en_gpio, mux_sel_gpio;
+ int ddc_clk_gpio, ddc_data_gpio;
+ int hpd_gpio, mux_en_gpio;
+ int mux_sel_gpio, hpd5v_gpio;
int mux_lpm_gpio;
};
+struct hdmi_i2c_adapter {
+ struct i2c_adapter base;
+ struct hdmi *hdmi;
+ bool sw_done;
+ wait_queue_head_t ddc_event;
+};
+
void hdmi_set_mode(struct hdmi *hdmi, bool power_on);
+#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base)
+
+int ddc_clear_irq(struct hdmi *hdmi);
+void init_ddc(struct hdmi *hdmi);
+
static inline void hdmi_write(struct hdmi *hdmi, u32 reg, u32 data)
{
msm_writel(data, hdmi->mmio + reg);
@@ -185,12 +216,19 @@ void hdmi_i2c_destroy(struct i2c_adapter *i2c);
struct i2c_adapter *hdmi_i2c_init(struct hdmi *hdmi);
/*
+ * DDC utility functions
+ */
+int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
+ u8 *data, u16 data_len, bool self_retry);
+int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
+ u8 *data, u16 data_len, bool self_retry);
+/*
* hdcp
*/
-struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi);
-void hdmi_hdcp_destroy(struct hdmi *hdmi);
-void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
-void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
-void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+struct hdmi_hdcp_ctrl *hdmi_hdcp_ctrl_init(struct hdmi *hdmi);
+void hdmi_hdcp_ctrl_destroy(struct hdmi *hdmi);
+void hdmi_hdcp_ctrl_on(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_ctrl_off(struct hdmi_hdcp_ctrl *hdcp_ctrl);
+void hdmi_hdcp_ctrl_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl);
#endif /* __HDMI_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index 10c45700aefe..ea485a2ec2cd 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
-Copyright (C) 2013-2015 by the following authors:
+Copyright (C) 2013-2016 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
@@ -110,6 +111,8 @@ static inline uint32_t HDMI_ACR_PKT_CTRL_N_MULTIPLIER(uint32_t val)
#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_SOURCE 0x00000040
#define HDMI_INFOFRAME_CTRL0_AUDIO_INFO_UPDATE 0x00000080
+#define REG_HDMI_INFOFRAME_CTRL1 0x00000030
+
#define REG_HDMI_GEN_PKT_CTRL 0x00000034
#define HDMI_GEN_PKT_CTRL_GENERIC0_SEND 0x00000001
#define HDMI_GEN_PKT_CTRL_GENERIC0_CONT 0x00000002
@@ -149,6 +152,7 @@ static inline uint32_t REG_HDMI_GENERIC0(uint32_t i0) { return 0x00000088 + 0x4*
#define REG_HDMI_GENERIC1_HDR 0x000000a4
+#define MAX_REG_HDMI_GENERIC1_INDEX 6
static inline uint32_t REG_HDMI_GENERIC1(uint32_t i0) { return 0x000000a8 + 0x4*i0; }
static inline uint32_t REG_HDMI_ACR(enum hdmi_acr_cts i0) { return 0x000000c4 + 0x8*i0; }
@@ -462,13 +466,13 @@ static inline uint32_t HDMI_DDC_REF_REFTIMER(uint32_t val)
#define REG_HDMI_CEC_RD_FILTER 0x000002b0
#define REG_HDMI_ACTIVE_HSYNC 0x000002b4
-#define HDMI_ACTIVE_HSYNC_START__MASK 0x00000fff
+#define HDMI_ACTIVE_HSYNC_START__MASK 0x00001fff
#define HDMI_ACTIVE_HSYNC_START__SHIFT 0
static inline uint32_t HDMI_ACTIVE_HSYNC_START(uint32_t val)
{
return ((val) << HDMI_ACTIVE_HSYNC_START__SHIFT) & HDMI_ACTIVE_HSYNC_START__MASK;
}
-#define HDMI_ACTIVE_HSYNC_END__MASK 0x0fff0000
+#define HDMI_ACTIVE_HSYNC_END__MASK 0x1fff0000
#define HDMI_ACTIVE_HSYNC_END__SHIFT 16
static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val)
{
@@ -476,13 +480,13 @@ static inline uint32_t HDMI_ACTIVE_HSYNC_END(uint32_t val)
}
#define REG_HDMI_ACTIVE_VSYNC 0x000002b8
-#define HDMI_ACTIVE_VSYNC_START__MASK 0x00000fff
+#define HDMI_ACTIVE_VSYNC_START__MASK 0x00001fff
#define HDMI_ACTIVE_VSYNC_START__SHIFT 0
static inline uint32_t HDMI_ACTIVE_VSYNC_START(uint32_t val)
{
return ((val) << HDMI_ACTIVE_VSYNC_START__SHIFT) & HDMI_ACTIVE_VSYNC_START__MASK;
}
-#define HDMI_ACTIVE_VSYNC_END__MASK 0x0fff0000
+#define HDMI_ACTIVE_VSYNC_END__MASK 0x1fff0000
#define HDMI_ACTIVE_VSYNC_END__SHIFT 16
static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
{
@@ -490,13 +494,13 @@ static inline uint32_t HDMI_ACTIVE_VSYNC_END(uint32_t val)
}
#define REG_HDMI_VSYNC_ACTIVE_F2 0x000002bc
-#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00000fff
+#define HDMI_VSYNC_ACTIVE_F2_START__MASK 0x00001fff
#define HDMI_VSYNC_ACTIVE_F2_START__SHIFT 0
static inline uint32_t HDMI_VSYNC_ACTIVE_F2_START(uint32_t val)
{
return ((val) << HDMI_VSYNC_ACTIVE_F2_START__SHIFT) & HDMI_VSYNC_ACTIVE_F2_START__MASK;
}
-#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x0fff0000
+#define HDMI_VSYNC_ACTIVE_F2_END__MASK 0x1fff0000
#define HDMI_VSYNC_ACTIVE_F2_END__SHIFT 16
static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
{
@@ -504,13 +508,13 @@ static inline uint32_t HDMI_VSYNC_ACTIVE_F2_END(uint32_t val)
}
#define REG_HDMI_TOTAL 0x000002c0
-#define HDMI_TOTAL_H_TOTAL__MASK 0x00000fff
+#define HDMI_TOTAL_H_TOTAL__MASK 0x00001fff
#define HDMI_TOTAL_H_TOTAL__SHIFT 0
static inline uint32_t HDMI_TOTAL_H_TOTAL(uint32_t val)
{
return ((val) << HDMI_TOTAL_H_TOTAL__SHIFT) & HDMI_TOTAL_H_TOTAL__MASK;
}
-#define HDMI_TOTAL_V_TOTAL__MASK 0x0fff0000
+#define HDMI_TOTAL_V_TOTAL__MASK 0x1fff0000
#define HDMI_TOTAL_V_TOTAL__SHIFT 16
static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
{
@@ -518,7 +522,7 @@ static inline uint32_t HDMI_TOTAL_V_TOTAL(uint32_t val)
}
#define REG_HDMI_VSYNC_TOTAL_F2 0x000002c4
-#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00000fff
+#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__MASK 0x00001fff
#define HDMI_VSYNC_TOTAL_F2_V_TOTAL__SHIFT 0
static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
{
@@ -559,6 +563,20 @@ static inline uint32_t HDMI_VSYNC_TOTAL_F2_V_TOTAL(uint32_t val)
#define REG_HDMI_CEC_WR_CHECK_CONFIG 0x00000370
+#define REG_HDMI_DDC_INT_CTRL0 0x00000430
+#define REG_HDMI_DDC_INT_CTRL1 0x00000434
+#define REG_HDMI_DDC_INT_CTRL2 0x00000438
+#define REG_HDMI_DDC_INT_CTRL3 0x0000043C
+#define REG_HDMI_DDC_INT_CTRL4 0x00000440
+#define REG_HDMI_DDC_INT_CTRL5 0x00000444
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_CTRL 0x00000464
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL 0x00000468
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_CTRL2 0x0000046C
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_STATUS 0x00000470
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS 0x00000474
+#define REG_HDMI_SCRAMBLER_STATUS_DDC_TIMER_STATUS2 0x00000478
+#define REG_HDMI_HW_DDC_CTRL 0x000004CC
+
#define REG_HDMI_8x60_PHY_REG0 0x00000300
#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__MASK 0x0000001c
#define HDMI_8x60_PHY_REG0_DESER_DEL_CTRL__SHIFT 2
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
index 92b69ae8caf9..5b6a90abd108 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_bridge.c
@@ -106,7 +106,7 @@ static void hdmi_bridge_pre_enable(struct drm_bridge *bridge)
hdmi_set_mode(hdmi, true);
if (hdmi->hdcp_ctrl)
- hdmi_hdcp_on(hdmi->hdcp_ctrl);
+ hdmi_hdcp_ctrl_on(hdmi->hdcp_ctrl);
}
static void hdmi_bridge_enable(struct drm_bridge *bridge)
@@ -124,7 +124,7 @@ static void hdmi_bridge_post_disable(struct drm_bridge *bridge)
struct hdmi_phy *phy = hdmi->phy;
if (hdmi->hdcp_ctrl)
- hdmi_hdcp_off(hdmi->hdcp_ctrl);
+ hdmi_hdcp_ctrl_off(hdmi->hdcp_ctrl);
DBG("power down");
hdmi_set_mode(hdmi, false);
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
index 1dc9c34eb0df..66be37bea4f6 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_hdcp.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2010-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2010-2017, The Linux Foundation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@@ -14,6 +14,7 @@
#include "hdmi.h"
#include <linux/qcom_scm.h>
+#ifdef CONFIG_DRM_MSM_HDCP
#define HDCP_REG_ENABLE 0x01
#define HDCP_REG_DISABLE 0x00
#define HDCP_PORT_ADDR 0x74
@@ -84,84 +85,6 @@ struct hdmi_hdcp_ctrl {
bool max_dev_exceeded;
};
-static int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
- u8 *data, u16 data_len)
-{
- int rc;
- int retry = 5;
- struct i2c_msg msgs[] = {
- {
- .addr = addr >> 1,
- .flags = 0,
- .len = 1,
- .buf = &offset,
- }, {
- .addr = addr >> 1,
- .flags = I2C_M_RD,
- .len = data_len,
- .buf = data,
- }
- };
-
- DBG("Start DDC read");
-retry:
- rc = i2c_transfer(hdmi->i2c, msgs, 2);
-
- retry--;
- if (rc == 2)
- rc = 0;
- else if (retry > 0)
- goto retry;
- else
- rc = -EIO;
-
- DBG("End DDC read %d", rc);
-
- return rc;
-}
-
-#define HDCP_DDC_WRITE_MAX_BYTE_NUM 32
-
-static int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
- u8 *data, u16 data_len)
-{
- int rc;
- int retry = 10;
- u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM];
- struct i2c_msg msgs[] = {
- {
- .addr = addr >> 1,
- .flags = 0,
- .len = 1,
- }
- };
-
- DBG("Start DDC write");
- if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) {
- pr_err("%s: write size too big\n", __func__);
- return -ERANGE;
- }
-
- buf[0] = offset;
- memcpy(&buf[1], data, data_len);
- msgs[0].buf = buf;
- msgs[0].len = data_len + 1;
-retry:
- rc = i2c_transfer(hdmi->i2c, msgs, 1);
-
- retry--;
- if (rc == 1)
- rc = 0;
- else if (retry > 0)
- goto retry;
- else
- rc = -EIO;
-
- DBG("End DDC write %d", rc);
-
- return rc;
-}
-
static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
u32 *pdata, u32 count)
{
@@ -202,7 +125,7 @@ static int hdmi_hdcp_scm_wr(struct hdmi_hdcp_ctrl *hdcp_ctrl, u32 *preg,
return ret;
}
-void hdmi_hdcp_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+void hdmi_hdcp_ctrl_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg_val, hdcp_int_status;
@@ -1310,7 +1233,7 @@ end:
}
}
-void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+void hdmi_hdcp_ctrl_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
u32 reg_val;
@@ -1335,7 +1258,7 @@ void hdmi_hdcp_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
queue_work(hdmi->workq, &hdcp_ctrl->hdcp_auth_work);
}
-void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+void hdmi_hdcp_ctrl_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
{
struct hdmi *hdmi = hdcp_ctrl->hdmi;
unsigned long flags;
@@ -1399,7 +1322,7 @@ void hdmi_hdcp_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
DBG("HDCP: Off");
}
-struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi)
+struct hdmi_hdcp_ctrl *hdmi_hdcp_ctrl_init(struct hdmi *hdmi)
{
struct hdmi_hdcp_ctrl *hdcp_ctrl = NULL;
@@ -1428,10 +1351,33 @@ struct hdmi_hdcp_ctrl *hdmi_hdcp_init(struct hdmi *hdmi)
return hdcp_ctrl;
}
-void hdmi_hdcp_destroy(struct hdmi *hdmi)
+void hdmi_hdcp_ctrl_destroy(struct hdmi *hdmi)
{
if (hdmi && hdmi->hdcp_ctrl) {
kfree(hdmi->hdcp_ctrl);
hdmi->hdcp_ctrl = NULL;
}
}
+
+#else
+struct hdmi_hdcp_ctrl *hdmi_hdcp_ctrl_init(struct hdmi *hdmi)
+{
+ return NULL;
+}
+
+void hdmi_hdcp_ctrl_destroy(struct hdmi *hdmi)
+{
+}
+
+void hdmi_hdcp_ctrl_on(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+}
+
+void hdmi_hdcp_ctrl_off(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+}
+
+void hdmi_hdcp_ctrl_irq(struct hdmi_hdcp_ctrl *hdcp_ctrl)
+{
+}
+#endif
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
index f4ab7f70fed1..c65cc908b882 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_i2c.c
@@ -17,66 +17,16 @@
#include "hdmi.h"
-struct hdmi_i2c_adapter {
- struct i2c_adapter base;
- struct hdmi *hdmi;
- bool sw_done;
- wait_queue_head_t ddc_event;
-};
-#define to_hdmi_i2c_adapter(x) container_of(x, struct hdmi_i2c_adapter, base)
-
-static void init_ddc(struct hdmi_i2c_adapter *hdmi_i2c)
-{
- struct hdmi *hdmi = hdmi_i2c->hdmi;
-
- hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
- HDMI_DDC_CTRL_SW_STATUS_RESET);
- hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
- HDMI_DDC_CTRL_SOFT_RESET);
-
- hdmi_write(hdmi, REG_HDMI_DDC_SPEED,
- HDMI_DDC_SPEED_THRESHOLD(2) |
- HDMI_DDC_SPEED_PRESCALE(10));
-
- hdmi_write(hdmi, REG_HDMI_DDC_SETUP,
- HDMI_DDC_SETUP_TIMEOUT(0xff));
+#define MAX_TRANSACTIONS 4
- /* enable reference timer for 27us */
- hdmi_write(hdmi, REG_HDMI_DDC_REF,
- HDMI_DDC_REF_REFTIMER_ENABLE |
- HDMI_DDC_REF_REFTIMER(27));
-}
+#define SDE_DDC_TXN_CNT_MASK 0x07ff0000
+#define SDE_DDC_TXN_CNT_SHIFT 16
-static int ddc_clear_irq(struct hdmi_i2c_adapter *hdmi_i2c)
+static inline uint32_t SDE_HDMI_I2C_TRANSACTION_REG_CNT(uint32_t val)
{
- struct hdmi *hdmi = hdmi_i2c->hdmi;
- struct drm_device *dev = hdmi->dev;
- uint32_t retry = 0xffff;
- uint32_t ddc_int_ctrl;
-
- do {
- --retry;
-
- hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
- HDMI_DDC_INT_CTRL_SW_DONE_ACK |
- HDMI_DDC_INT_CTRL_SW_DONE_MASK);
-
- ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
-
- } while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
-
- if (!retry) {
- dev_err(dev->dev, "timeout waiting for DDC\n");
- return -ETIMEDOUT;
- }
-
- hdmi_i2c->sw_done = false;
-
- return 0;
+ return ((val) << SDE_DDC_TXN_CNT_SHIFT) & SDE_DDC_TXN_CNT_MASK;
}
-#define MAX_TRANSACTIONS 4
-
static bool sw_done(struct hdmi_i2c_adapter *hdmi_i2c)
{
struct hdmi *hdmi = hdmi_i2c->hdmi;
@@ -115,12 +65,13 @@ static int hdmi_i2c_xfer(struct i2c_adapter *i2c,
WARN_ON(!(hdmi_read(hdmi, REG_HDMI_CTRL) & HDMI_CTRL_ENABLE));
+
if (num == 0)
return num;
- init_ddc(hdmi_i2c);
+ init_ddc(hdmi);
- ret = ddc_clear_irq(hdmi_i2c);
+ ret = ddc_clear_irq(hdmi);
if (ret)
return ret;
@@ -155,7 +106,7 @@ static int hdmi_i2c_xfer(struct i2c_adapter *i2c,
}
}
- i2c_trans = HDMI_I2C_TRANSACTION_REG_CNT(p->len) |
+ i2c_trans = SDE_HDMI_I2C_TRANSACTION_REG_CNT(p->len) |
HDMI_I2C_TRANSACTION_REG_RW(
(p->flags & I2C_M_RD) ? DDC_READ : DDC_WRITE) |
HDMI_I2C_TRANSACTION_REG_START;
@@ -177,9 +128,13 @@ static int hdmi_i2c_xfer(struct i2c_adapter *i2c,
ret = -ETIMEDOUT;
dev_warn(dev->dev, "DDC timeout: %d\n", ret);
DBG("sw_status=%08x, hw_status=%08x, int_ctrl=%08x",
- hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
- hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
- hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
+ hdmi_read(hdmi, REG_HDMI_DDC_SW_STATUS),
+ hdmi_read(hdmi, REG_HDMI_DDC_HW_STATUS),
+ hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL));
+ if (hdmi->use_hard_timeout) {
+ hdmi->use_hard_timeout = false;
+ hdmi->timeout_count = 0;
+ }
return ret;
}
@@ -213,6 +168,10 @@ static int hdmi_i2c_xfer(struct i2c_adapter *i2c,
}
}
+ if (hdmi->use_hard_timeout) {
+ hdmi->use_hard_timeout = false;
+ hdmi->timeout_count = jiffies_to_msecs(ret);
+ }
return i;
}
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi_util.c b/drivers/gpu/drm/msm/hdmi/hdmi_util.c
new file mode 100644
index 000000000000..a8142e5c0fbb
--- /dev/null
+++ b/drivers/gpu/drm/msm/hdmi/hdmi_util.c
@@ -0,0 +1,149 @@
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/of_irq.h>
+#include "hdmi.h"
+
+void init_ddc(struct hdmi *hdmi)
+{
+
+ uint32_t ddc_speed;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+ HDMI_DDC_CTRL_SW_STATUS_RESET);
+ hdmi_write(hdmi, REG_HDMI_DDC_CTRL,
+ HDMI_DDC_CTRL_SOFT_RESET);
+
+ ddc_speed = hdmi_read(hdmi, REG_HDMI_DDC_SPEED);
+ ddc_speed |= HDMI_DDC_SPEED_THRESHOLD(2);
+ ddc_speed |= HDMI_DDC_SPEED_PRESCALE(12);
+
+ hdmi_write(hdmi, REG_HDMI_DDC_SPEED,
+ ddc_speed);
+
+ hdmi_write(hdmi, REG_HDMI_DDC_SETUP,
+ HDMI_DDC_SETUP_TIMEOUT(0xff));
+
+ /* enable reference timer for 19us */
+ hdmi_write(hdmi, REG_HDMI_DDC_REF,
+ HDMI_DDC_REF_REFTIMER_ENABLE |
+ HDMI_DDC_REF_REFTIMER(19));
+}
+
+int ddc_clear_irq(struct hdmi *hdmi)
+{
+ struct hdmi_i2c_adapter *hdmi_i2c = to_hdmi_i2c_adapter(hdmi->i2c);
+ struct drm_device *dev = hdmi->dev;
+ uint32_t retry = 0xffff;
+ uint32_t ddc_int_ctrl;
+
+ do {
+ --retry;
+
+ hdmi_write(hdmi, REG_HDMI_DDC_INT_CTRL,
+ HDMI_DDC_INT_CTRL_SW_DONE_ACK |
+ HDMI_DDC_INT_CTRL_SW_DONE_MASK);
+
+ ddc_int_ctrl = hdmi_read(hdmi, REG_HDMI_DDC_INT_CTRL);
+
+ } while ((ddc_int_ctrl & HDMI_DDC_INT_CTRL_SW_DONE_INT) && retry);
+
+ if (!retry) {
+ dev_err(dev->dev, "timeout waiting for DDC\n");
+ return -ETIMEDOUT;
+ }
+
+ hdmi_i2c->sw_done = false;
+
+ return 0;
+}
+
+int hdmi_ddc_read(struct hdmi *hdmi, u16 addr, u8 offset,
+u8 *data, u16 data_len, bool self_retry)
+{
+ int rc;
+ int retry = 10;
+ struct i2c_msg msgs[] = {
+ {
+ .addr = addr >> 1,
+ .flags = 0,
+ .len = 1,
+ .buf = &offset,
+ }, {
+ .addr = addr >> 1,
+ .flags = I2C_M_RD,
+ .len = data_len,
+ .buf = data,
+ }
+ };
+
+ DBG("Start DDC read");
+retry:
+ rc = i2c_transfer(hdmi->i2c, msgs, 2);
+ retry--;
+
+ if (rc == 2)
+ rc = 0;
+ else if (self_retry && (retry > 0))
+ goto retry;
+ else
+ rc = -EIO;
+
+ DBG("End DDC read %d", rc);
+
+ return rc;
+}
+
+#define HDCP_DDC_WRITE_MAX_BYTE_NUM 1024
+
+int hdmi_ddc_write(struct hdmi *hdmi, u16 addr, u8 offset,
+ u8 *data, u16 data_len, bool self_retry)
+{
+ int rc;
+ int retry = 10;
+ u8 buf[HDCP_DDC_WRITE_MAX_BYTE_NUM];
+ struct i2c_msg msgs[] = {
+ {
+ .addr = addr >> 1,
+ .flags = 0,
+ .len = 1,
+ }
+ };
+
+ pr_debug("TESTING ! REMOVE RETRY Start DDC write");
+ if (data_len > (HDCP_DDC_WRITE_MAX_BYTE_NUM - 1)) {
+ pr_err("%s: write size too big\n", __func__);
+ return -ERANGE;
+ }
+
+ buf[0] = offset;
+ memcpy(&buf[1], data, data_len);
+ msgs[0].buf = buf;
+ msgs[0].len = data_len + 1;
+retry:
+ rc = i2c_transfer(hdmi->i2c, msgs, 1);
+ retry--;
+ if (rc == 1)
+ rc = 0;
+ else if (self_retry && (retry > 0))
+ goto retry;
+ else
+ rc = -EIO;
+
+ DBG("End DDC write %d", rc);
+
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index dbd9cc4daf2e..6eab7d0cf6b5 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
index d5d94575fa1b..6688e79cc88e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
index 6ac9aa165768..e5f42fe983c1 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_crtc.c
@@ -40,7 +40,7 @@ struct mdp4_crtc {
uint32_t x, y;
/* next cursor to scan-out: */
- uint32_t next_iova;
+ uint64_t next_iova;
struct drm_gem_object *next_bo;
/* current cursor being scanned out: */
@@ -133,7 +133,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
container_of(work, struct mdp4_crtc, unref_cursor_work);
struct mdp4_kms *mdp4_kms = get_kms(&mdp4_crtc->base);
- msm_gem_put_iova(val, mdp4_kms->id);
+ msm_gem_put_iova(val, mdp4_kms->aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -387,25 +387,28 @@ static void update_cursor(struct drm_crtc *crtc)
if (mdp4_crtc->cursor.stale) {
struct drm_gem_object *next_bo = mdp4_crtc->cursor.next_bo;
struct drm_gem_object *prev_bo = mdp4_crtc->cursor.scanout_bo;
- uint32_t iova = mdp4_crtc->cursor.next_iova;
+ uint64_t iova = mdp4_crtc->cursor.next_iova;
if (next_bo) {
/* take a obj ref + iova ref when we start scanning out: */
drm_gem_object_reference(next_bo);
- msm_gem_get_iova_locked(next_bo, mdp4_kms->id, &iova);
+ msm_gem_get_iova(next_bo, mdp4_kms->aspace,
+ &iova);
/* enable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_SIZE(dma),
MDP4_DMA_CURSOR_SIZE_WIDTH(mdp4_crtc->cursor.width) |
MDP4_DMA_CURSOR_SIZE_HEIGHT(mdp4_crtc->cursor.height));
- mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma), iova);
+ /* FIXME: Make sure iova < 32 bits */
+ mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
+ lower_32_bits(iova));
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BLEND_CONFIG(dma),
MDP4_DMA_CURSOR_BLEND_CONFIG_FORMAT(CURSOR_ARGB) |
MDP4_DMA_CURSOR_BLEND_CONFIG_CURSOR_EN);
} else {
/* disable cursor: */
mdp4_write(mdp4_kms, REG_MDP4_DMA_CURSOR_BASE(dma),
- mdp4_kms->blank_cursor_iova);
+ lower_32_bits(mdp4_kms->blank_cursor_iova));
}
/* and drop the iova ref + obj rev when done scanning out: */
@@ -432,7 +435,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct drm_gem_object *cursor_bo, *old_bo;
unsigned long flags;
- uint32_t iova;
+ uint64_t iova;
int ret;
if ((width > CURSOR_WIDTH) || (height > CURSOR_HEIGHT)) {
@@ -449,7 +452,7 @@ static int mdp4_crtc_cursor_set(struct drm_crtc *crtc,
}
if (cursor_bo) {
- ret = msm_gem_get_iova(cursor_bo, mdp4_kms->id, &iova);
+ ret = msm_gem_get_iova(cursor_bo, mdp4_kms->aspace, &iova);
if (ret)
goto fail;
} else {
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
index 077f7521a971..40509434a913 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.c
@@ -17,6 +17,7 @@
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp4_kms.h"
@@ -177,18 +178,33 @@ static void mdp4_preclose(struct msm_kms *kms, struct drm_file *file)
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
struct msm_drm_private *priv = mdp4_kms->dev->dev_private;
unsigned i;
+ struct msm_gem_address_space *aspace = mdp4_kms->aspace;
for (i = 0; i < priv->num_crtcs; i++)
mdp4_crtc_cancel_pending_flip(priv->crtcs[i], file);
+
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu);
+ msm_gem_address_space_destroy(aspace);
+ }
}
static void mdp4_destroy(struct msm_kms *kms)
{
+ struct device *dev = mdp4_kms->dev->dev;
+ struct msm_gem_address_space *aspace = mdp4_kms->aspace;
+
struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms));
if (mdp4_kms->blank_cursor_iova)
- msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id);
+ msm_gem_put_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace);
if (mdp4_kms->blank_cursor_bo)
drm_gem_object_unreference_unlocked(mdp4_kms->blank_cursor_bo);
+
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu);
+ msm_gem_address_space_put(aspace);
+ }
+
kfree(mdp4_kms);
}
@@ -398,17 +414,13 @@ fail:
return ret;
}
-static const char *iommu_ports[] = {
- "mdp_port0_cb0", "mdp_port1_cb0",
-};
-
struct msm_kms *mdp4_kms_init(struct drm_device *dev)
{
struct platform_device *pdev = dev->platformdev;
struct mdp4_platform_config *config = mdp4_get_config(pdev);
struct mdp4_kms *mdp4_kms;
struct msm_kms *kms = NULL;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
int ret;
mdp4_kms = kzalloc(sizeof(*mdp4_kms), GFP_KERNEL);
@@ -497,26 +509,25 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
mdelay(16);
if (config->iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->iommu);
- if (IS_ERR(mmu)) {
- ret = PTR_ERR(mmu);
+ config->iommu->geometry.aperture_start = 0x1000;
+ config->iommu->geometry.aperture_end = 0xffffffff;
+
+ aspace = msm_gem_address_space_create(&pdev->dev,
+ config->iommu, MSM_IOMMU_DOMAIN_DEFAULT, "mdp4");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
+
+ mdp4_kms->aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
if (ret)
goto fail;
} else {
dev_info(dev->dev, "no iommu, fallback to phys "
"contig buffers for scanout\n");
- mmu = NULL;
- }
-
- mdp4_kms->id = msm_register_mmu(dev, mmu);
- if (mdp4_kms->id < 0) {
- ret = mdp4_kms->id;
- dev_err(dev->dev, "failed to register mdp4 iommu: %d\n", ret);
- goto fail;
+ aspace = NULL;
}
ret = modeset_init(mdp4_kms);
@@ -525,9 +536,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- mutex_lock(&dev->struct_mutex);
mdp4_kms->blank_cursor_bo = msm_gem_new(dev, SZ_16K, MSM_BO_WC);
- mutex_unlock(&dev->struct_mutex);
if (IS_ERR(mdp4_kms->blank_cursor_bo)) {
ret = PTR_ERR(mdp4_kms->blank_cursor_bo);
dev_err(dev->dev, "could not allocate blank-cursor bo: %d\n", ret);
@@ -535,7 +544,7 @@ struct msm_kms *mdp4_kms_init(struct drm_device *dev)
goto fail;
}
- ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->id,
+ ret = msm_gem_get_iova(mdp4_kms->blank_cursor_bo, mdp4_kms->aspace,
&mdp4_kms->blank_cursor_iova);
if (ret) {
dev_err(dev->dev, "could not pin blank-cursor bo: %d\n", ret);
@@ -561,7 +570,8 @@ static struct mdp4_platform_config *mdp4_get_config(struct platform_device *dev)
#ifdef CONFIG_OF
/* TODO */
config.max_clk = 266667000;
- config.iommu = iommu_domain_alloc(&platform_bus_type);
+ config.iommu = iommu_domain_alloc(msm_iommu_get_bus(&dev->dev));
+
#else
if (cpu_is_apq8064())
config.max_clk = 266667000;
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
index 8a7f6e1e2bca..5cf03e58a4f4 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_kms.h
@@ -33,8 +33,6 @@ struct mdp4_kms {
int rev;
/* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
-
void __iomem *mmio;
struct regulator *dsi_pll_vdda;
@@ -45,12 +43,13 @@ struct mdp4_kms {
struct clk *pclk;
struct clk *lut_clk;
struct clk *axi_clk;
+ struct msm_gem_address_space *aspace;
struct mdp_irq error_handler;
/* empty/blank cursor bo to use when cursor is "disabled" */
struct drm_gem_object *blank_cursor_bo;
- uint32_t blank_cursor_iova;
+ uint64_t blank_cursor_iova;
};
#define to_mdp4_kms(x) container_of(x, struct mdp4_kms, base)
diff --git a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
index 30d57e74c42f..bc1ece2a5b7e 100644
--- a/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp4/mdp4_plane.c
@@ -109,7 +109,7 @@ static int mdp4_plane_prepare_fb(struct drm_plane *plane,
return 0;
DBG("%s: prepare: FB[%u]", mdp4_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp4_kms->id);
+ return msm_framebuffer_prepare(fb, mdp4_kms->aspace);
}
static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
@@ -123,7 +123,7 @@ static void mdp4_plane_cleanup_fb(struct drm_plane *plane,
return;
DBG("%s: cleanup: FB[%u]", mdp4_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp4_kms->id);
+ msm_framebuffer_cleanup(fb, mdp4_kms->aspace);
}
@@ -172,13 +172,13 @@ static void mdp4_plane_set_scanout(struct drm_plane *plane,
MDP4_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP0_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 0));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 0));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP1_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 1));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 1));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP2_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 2));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 2));
mdp4_write(mdp4_kms, REG_MDP4_PIPE_SRCP3_BASE(pipe),
- msm_framebuffer_iova(fb, mdp4_kms->id, 3));
+ msm_framebuffer_iova(fb, mdp4_kms->aspace, 3));
plane->fb = fb;
}
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
index c37da9c61e29..b275ce11b24b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
index 7f9f4ac88029..65e085fd2b6a 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_crtc.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014-2015, 2018 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -171,7 +171,7 @@ static void unref_cursor_worker(struct drm_flip_work *work, void *val)
container_of(work, struct mdp5_crtc, unref_cursor_work);
struct mdp5_kms *mdp5_kms = get_kms(&mdp5_crtc->base);
- msm_gem_put_iova(val, mdp5_kms->id);
+ msm_gem_put_iova(val, mdp5_kms->aspace);
drm_gem_object_unreference_unlocked(val);
}
@@ -242,7 +242,7 @@ static void blend_setup(struct drm_crtc *crtc)
/* The reset for blending */
for (i = STAGE0; i <= STAGE_MAX; i++) {
- if (!pstates[i])
+ if (!pstates[i] || !pstates[i]->base.fb)
continue;
format = to_mdp_format(
@@ -509,7 +509,8 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
struct drm_device *dev = crtc->dev;
struct mdp5_kms *mdp5_kms = get_kms(crtc);
struct drm_gem_object *cursor_bo, *old_bo = NULL;
- uint32_t blendcfg, cursor_addr, stride;
+ uint32_t blendcfg, stride;
+ uint64_t cursor_addr;
int ret, bpp, lm;
unsigned int depth;
enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
@@ -536,7 +537,7 @@ static int mdp5_crtc_cursor_set(struct drm_crtc *crtc,
if (!cursor_bo)
return -ENOENT;
- ret = msm_gem_get_iova(cursor_bo, mdp5_kms->id, &cursor_addr);
+ ret = msm_gem_get_iova(cursor_bo, mdp5_kms->aspace, &cursor_addr);
if (ret)
return -EINVAL;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
index b532faa8026d..4dbf456504b7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014, 2016-2017 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -18,13 +18,10 @@
#include "msm_drv.h"
+#include "msm_gem.h"
#include "msm_mmu.h"
#include "mdp5_kms.h"
-static const char *iommu_ports[] = {
- "mdp_0",
-};
-
static int mdp5_hw_init(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
@@ -130,13 +127,13 @@ static void mdp5_preclose(struct msm_kms *kms, struct drm_file *file)
static void mdp5_destroy(struct msm_kms *kms)
{
struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms));
- struct msm_mmu *mmu = mdp5_kms->mmu;
+ struct msm_gem_address_space *aspace = mdp5_kms->aspace;
mdp5_irq_domain_fini(mdp5_kms);
- if (mmu) {
- mmu->funcs->detach(mmu, iommu_ports, ARRAY_SIZE(iommu_ports));
- mmu->funcs->destroy(mmu);
+ if (aspace) {
+ aspace->mmu->funcs->detach(aspace->mmu);
+ msm_gem_address_space_put(aspace);
}
if (mdp5_kms->ctlm)
@@ -474,7 +471,7 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
struct mdp5_cfg *config;
struct mdp5_kms *mdp5_kms;
struct msm_kms *kms = NULL;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
uint32_t major, minor;
int i, ret;
@@ -595,33 +592,33 @@ struct msm_kms *mdp5_kms_init(struct drm_device *dev)
mdelay(16);
if (config->platform.iommu) {
- mmu = msm_iommu_new(&pdev->dev, config->platform.iommu);
+ struct msm_mmu *mmu = msm_smmu_new(&pdev->dev,
+ MSM_SMMU_DOMAIN_UNSECURE);
if (IS_ERR(mmu)) {
ret = PTR_ERR(mmu);
dev_err(dev->dev, "failed to init iommu: %d\n", ret);
iommu_domain_free(config->platform.iommu);
+ }
+
+ aspace = msm_gem_smmu_address_space_create(&pdev->dev,
+ mmu, "mdp5");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
goto fail;
}
- ret = mmu->funcs->attach(mmu, iommu_ports,
- ARRAY_SIZE(iommu_ports));
+ mdp5_kms->aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
if (ret) {
- dev_err(dev->dev, "failed to attach iommu: %d\n", ret);
- mmu->funcs->destroy(mmu);
+ dev_err(&pdev->dev, "failed to attach iommu: %d\n",
+ ret);
goto fail;
}
} else {
- dev_info(dev->dev, "no iommu, fallback to phys "
- "contig buffers for scanout\n");
- mmu = NULL;
- }
- mdp5_kms->mmu = mmu;
-
- mdp5_kms->id = msm_register_mmu(dev, mmu);
- if (mdp5_kms->id < 0) {
- ret = mdp5_kms->id;
- dev_err(dev->dev, "failed to register mdp5 iommu: %d\n", ret);
- goto fail;
+ dev_info(&pdev->dev,
+ "no iommu, fallback to phys contig buffers for scanout\n");
+ aspace = NULL;
}
ret = modeset_init(mdp5_kms);
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
index 84f65d415598..c1aa86d21416 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_kms.h
@@ -36,8 +36,7 @@ struct mdp5_kms {
/* mapper-id used to request GEM buffer mapped for scanout: */
- int id;
- struct msm_mmu *mmu;
+ struct msm_gem_address_space *aspace;
struct mdp5_smp *smp;
struct mdp5_ctl_manager *ctlm;
diff --git a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
index 81cd49045ffc..d751625bbfd7 100644
--- a/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
+++ b/drivers/gpu/drm/msm/mdp/mdp5/mdp5_plane.c
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2014-2015 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2014-2015, 2018 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -193,7 +193,8 @@ static void mdp5_plane_reset(struct drm_plane *plane)
kfree(to_mdp5_plane_state(plane->state));
mdp5_state = kzalloc(sizeof(*mdp5_state), GFP_KERNEL);
-
+ if (!mdp5_state)
+ return;
/* assign default blend parameters */
mdp5_state->alpha = 255;
mdp5_state->premultiplied = 0;
@@ -218,8 +219,10 @@ mdp5_plane_duplicate_state(struct drm_plane *plane)
mdp5_state = kmemdup(to_mdp5_plane_state(plane->state),
sizeof(*mdp5_state), GFP_KERNEL);
+ if (!mdp5_state)
+ return NULL;
- if (mdp5_state && mdp5_state->base.fb)
+ if (mdp5_state->base.fb)
drm_framebuffer_reference(mdp5_state->base.fb);
mdp5_state->mode_changed = false;
@@ -260,7 +263,7 @@ static int mdp5_plane_prepare_fb(struct drm_plane *plane,
return 0;
DBG("%s: prepare: FB[%u]", mdp5_plane->name, fb->base.id);
- return msm_framebuffer_prepare(fb, mdp5_kms->id);
+ return msm_framebuffer_prepare(fb, mdp5_kms->aspace);
}
static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
@@ -274,7 +277,7 @@ static void mdp5_plane_cleanup_fb(struct drm_plane *plane,
return;
DBG("%s: cleanup: FB[%u]", mdp5_plane->name, fb->base.id);
- msm_framebuffer_cleanup(fb, mdp5_kms->id);
+ msm_framebuffer_cleanup(fb, mdp5_kms->aspace);
}
static int mdp5_plane_atomic_check(struct drm_plane *plane,
@@ -400,13 +403,13 @@ static void set_scanout_locked(struct drm_plane *plane,
MDP5_PIPE_SRC_STRIDE_B_P3(fb->pitches[3]));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC0_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 0));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 0));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC1_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 1));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 1));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC2_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 2));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 2));
mdp5_write(mdp5_kms, REG_MDP5_PIPE_SRC3_ADDR(pipe),
- msm_framebuffer_iova(fb, mdp5_kms->id, 3));
+ msm_framebuffer_iova(fb, mdp5_kms->aspace, 3));
plane->fb = fb;
}
@@ -684,14 +687,21 @@ static int mdp5_plane_mode_set(struct drm_plane *plane,
bool vflip, hflip;
unsigned long flags;
int ret;
+ const struct msm_format *msm_fmt;
+ msm_fmt = msm_framebuffer_format(fb);
nplanes = drm_format_num_planes(fb->pixel_format);
/* bad formats should already be rejected: */
if (WARN_ON(nplanes > pipe2nclients(pipe)))
return -EINVAL;
- format = to_mdp_format(msm_framebuffer_format(fb));
+ if (!msm_fmt) {
+ pr_err("invalid format");
+ return -EINVAL;
+ }
+
+ format = to_mdp_format(msm_fmt);
pix_format = format->base.pixel_format;
/* src values are in Q16 fixed point, convert to integer: */
diff --git a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
index 0aec1ac1f6d0..452e3518f98b 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_common.xml.h
@@ -9,7 +9,7 @@ git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2015-05-20 20:03:14)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1453 bytes, from 2015-05-20 20:03:07)
+- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-02-10 17:07:21)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2015-09-18 12:07:28)
- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37194 bytes, from 2015-09-18 12:07:28)
@@ -17,11 +17,12 @@ The rules-ng-ng source files this header was generated from are:
- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2015-10-22 16:35:02)
- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2015-05-20 20:03:14)
- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2015-05-20 20:03:07)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 29154 bytes, from 2015-08-10 21:25:43)
+- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41472 bytes, from 2016-01-22 18:18:18)
- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2015-05-20 20:03:14)
Copyright (C) 2013-2015 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
diff --git a/drivers/gpu/drm/msm/mdp/mdp_format.c b/drivers/gpu/drm/msm/mdp/mdp_format.c
index 1c2caffc97e4..d2fa72815833 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_format.c
+++ b/drivers/gpu/drm/msm/mdp/mdp_format.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2014,2016 The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -165,7 +165,11 @@ uint32_t mdp_get_formats(uint32_t *pixel_formats, uint32_t max_formats,
return i;
}
-const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format)
+const struct msm_format *mdp_get_format(
+ struct msm_kms *kms,
+ uint32_t format,
+ const uint64_t *modifiers,
+ uint32_t modifiers_len)
{
int i;
for (i = 0; i < ARRAY_SIZE(formats); i++) {
diff --git a/drivers/gpu/drm/msm/mdp/mdp_kms.h b/drivers/gpu/drm/msm/mdp/mdp_kms.h
index 303130320748..0d0723d32a03 100644
--- a/drivers/gpu/drm/msm/mdp/mdp_kms.h
+++ b/drivers/gpu/drm/msm/mdp/mdp_kms.h
@@ -98,7 +98,9 @@ struct mdp_format {
#define MDP_FORMAT_IS_YUV(mdp_format) ((mdp_format)->is_yuv)
uint32_t mdp_get_formats(uint32_t *formats, uint32_t max_formats, bool rgb_only);
-const struct msm_format *mdp_get_format(struct msm_kms *kms, uint32_t format);
+const struct msm_format *mdp_get_format(struct msm_kms *kms,
+ uint32_t format, const uint64_t *modifiers,
+ uint32_t modifiers_len);
/* MDP capabilities */
#define MDP_CAP_SMP BIT(0) /* Shared Memory Pool */
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index 221eaea651d4..8467be8c3f0b 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2014 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -18,6 +19,7 @@
#include "msm_drv.h"
#include "msm_kms.h"
#include "msm_gem.h"
+#include "sde_trace.h"
struct msm_commit {
struct drm_device *dev;
@@ -25,23 +27,26 @@ struct msm_commit {
uint32_t fence;
struct msm_fence_cb fence_cb;
uint32_t crtc_mask;
+ uint32_t plane_mask;
+ struct kthread_work commit_work;
};
-static void fence_cb(struct msm_fence_cb *cb);
-
/* block until specified crtcs are no longer pending update, and
* atomically mark them as pending update
*/
-static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
+static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask,
+ uint32_t plane_mask)
{
int ret;
spin_lock(&priv->pending_crtcs_event.lock);
ret = wait_event_interruptible_locked(priv->pending_crtcs_event,
- !(priv->pending_crtcs & crtc_mask));
+ !(priv->pending_crtcs & crtc_mask) &&
+ !(priv->pending_planes & plane_mask));
if (ret == 0) {
DBG("start: %08x", crtc_mask);
priv->pending_crtcs |= crtc_mask;
+ priv->pending_planes |= plane_mask;
}
spin_unlock(&priv->pending_crtcs_event.lock);
@@ -50,42 +55,28 @@ static int start_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
/* clear specified crtcs (no longer pending update)
*/
-static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask)
+static void end_atomic(struct msm_drm_private *priv, uint32_t crtc_mask,
+ uint32_t plane_mask)
{
spin_lock(&priv->pending_crtcs_event.lock);
DBG("end: %08x", crtc_mask);
priv->pending_crtcs &= ~crtc_mask;
+ priv->pending_planes &= ~plane_mask;
wake_up_all_locked(&priv->pending_crtcs_event);
spin_unlock(&priv->pending_crtcs_event.lock);
}
-static struct msm_commit *commit_init(struct drm_atomic_state *state)
+static void commit_destroy(struct msm_commit *commit)
{
- struct msm_commit *c = kzalloc(sizeof(*c), GFP_KERNEL);
-
- if (!c)
- return NULL;
-
- c->dev = state->dev;
- c->state = state;
-
- /* TODO we might need a way to indicate to run the cb on a
- * different wq so wait_for_vblanks() doesn't block retiring
- * bo's..
- */
- INIT_FENCE_CB(&c->fence_cb, fence_cb);
-
- return c;
+ end_atomic(commit->dev->dev_private, commit->crtc_mask,
+ commit->plane_mask);
+ kfree(commit);
}
-static void commit_destroy(struct msm_commit *c)
-{
- end_atomic(c->dev->dev_private, c->crtc_mask);
- kfree(c);
-}
-
-static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
- struct drm_atomic_state *old_state)
+static void msm_atomic_wait_for_commit_done(
+ struct drm_device *dev,
+ struct drm_atomic_state *old_state,
+ int modeset_flags)
{
struct drm_crtc *crtc;
struct msm_drm_private *priv = old_state->dev->dev_private;
@@ -94,12 +85,16 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
int i;
for (i = 0; i < ncrtcs; i++) {
+ int private_flags;
+
crtc = old_state->crtcs[i];
- if (!crtc)
+ if (!crtc || !crtc->state || !crtc->state->enable)
continue;
- if (!crtc->state->enable)
+ /* If specified, only wait if requested flag is true */
+ private_flags = crtc->state->adjusted_mode.private_flags;
+ if (modeset_flags && !(modeset_flags & private_flags))
continue;
/* Legacy cursor ioctls are completely unsynced, and userspace
@@ -110,29 +105,324 @@ static void msm_atomic_wait_for_commit_done(struct drm_device *dev,
if (drm_crtc_vblank_get(crtc))
continue;
- kms->funcs->wait_for_crtc_commit_done(kms, crtc);
+ if (kms->funcs->wait_for_crtc_commit_done)
+ kms->funcs->wait_for_crtc_commit_done(kms, crtc);
drm_crtc_vblank_put(crtc);
}
}
+static void
+msm_disable_outputs(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ SDE_ATRACE_BEGIN("msm_disable");
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ const struct drm_encoder_helper_funcs *funcs;
+ struct drm_encoder *encoder;
+ struct drm_crtc_state *old_crtc_state;
+ unsigned int crtc_idx;
+
+ /*
+ * Shut down everything that's in the changeset and currently
+ * still on. So need to check the old, saved state.
+ */
+ if (!old_conn_state->crtc)
+ continue;
+
+ crtc_idx = drm_crtc_index(old_conn_state->crtc);
+ old_crtc_state = old_state->crtc_states[crtc_idx];
+
+ if (!old_crtc_state->active ||
+ !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
+ continue;
+
+ encoder = old_conn_state->best_encoder;
+
+ /* We shouldn't get this far if we didn't previously have
+ * an encoder.. but WARN_ON() rather than explode.
+ */
+ if (WARN_ON(!encoder))
+ continue;
+
+ if (msm_is_mode_seamless(
+ &connector->encoder->crtc->state->mode))
+ continue;
+
+ funcs = encoder->helper_private;
+
+ DRM_DEBUG_ATOMIC("disabling [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call disable hooks twice.
+ */
+ drm_bridge_disable(encoder->bridge);
+
+ /* Right function depends upon target state. */
+ if (connector->state->crtc && funcs->prepare)
+ funcs->prepare(encoder);
+ else if (funcs->disable)
+ funcs->disable(encoder);
+ else
+ funcs->dpms(encoder, DRM_MODE_DPMS_OFF);
+
+ drm_bridge_post_disable(encoder->bridge);
+ }
+
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
+
+ /* Shut down everything that needs a full modeset. */
+ if (!drm_atomic_crtc_needs_modeset(crtc->state))
+ continue;
+
+ if (!old_crtc_state->active)
+ continue;
+
+ if (msm_is_mode_seamless(&crtc->state->mode))
+ continue;
+
+ funcs = crtc->helper_private;
+
+ DRM_DEBUG_ATOMIC("disabling [CRTC:%d]\n",
+ crtc->base.id);
+
+ /* Right function depends upon target state. */
+ if (crtc->state->enable && funcs->prepare)
+ funcs->prepare(crtc);
+ else if (funcs->disable)
+ funcs->disable(crtc);
+ else
+ funcs->dpms(crtc, DRM_MODE_DPMS_OFF);
+ }
+ SDE_ATRACE_END("msm_disable");
+}
+
+static void
+msm_crtc_set_mode(struct drm_device *dev, struct drm_atomic_state *old_state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ int i;
+
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
+
+ if (!crtc->state->mode_changed)
+ continue;
+
+ funcs = crtc->helper_private;
+
+ if (crtc->state->enable && funcs->mode_set_nofb) {
+ DRM_DEBUG_ATOMIC("modeset on [CRTC:%d]\n",
+ crtc->base.id);
+
+ funcs->mode_set_nofb(crtc);
+ }
+ }
+
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ const struct drm_encoder_helper_funcs *funcs;
+ struct drm_crtc_state *new_crtc_state;
+ struct drm_encoder *encoder;
+ struct drm_display_mode *mode, *adjusted_mode;
+
+ if (!connector->state->best_encoder)
+ continue;
+
+ encoder = connector->state->best_encoder;
+ funcs = encoder->helper_private;
+ new_crtc_state = connector->state->crtc->state;
+ mode = &new_crtc_state->mode;
+ adjusted_mode = &new_crtc_state->adjusted_mode;
+
+ if (!new_crtc_state->mode_changed)
+ continue;
+
+ DRM_DEBUG_ATOMIC("modeset on [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call mode_set hooks twice.
+ */
+ if (funcs->mode_set)
+ funcs->mode_set(encoder, mode, adjusted_mode);
+
+ drm_bridge_mode_set(encoder->bridge, mode, adjusted_mode);
+ }
+}
+
+/**
+ * msm_atomic_helper_commit_modeset_disables - modeset commit to disable outputs
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function shuts down all the outputs that need to be shut down and
+ * prepares them (if required) with the new mode.
+ *
+ * For compatibility with legacy crtc helpers this should be called before
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
+ */
+static void msm_atomic_helper_commit_modeset_disables(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ msm_disable_outputs(dev, old_state);
+
+ drm_atomic_helper_update_legacy_modeset_state(dev, old_state);
+
+ msm_crtc_set_mode(dev, old_state);
+}
+
+/**
+ * msm_atomic_helper_commit_modeset_enables - modeset commit to enable outputs
+ * @dev: DRM device
+ * @old_state: atomic state object with old state structures
+ *
+ * This function enables all the outputs with the new configuration which had to
+ * be turned off for the update.
+ *
+ * For compatibility with legacy crtc helpers this should be called after
+ * drm_atomic_helper_commit_planes(), which is what the default commit function
+ * does. But drivers with different needs can group the modeset commits together
+ * and do the plane commits at the end. This is useful for drivers doing runtime
+ * PM since planes updates then only happen when the CRTC is actually enabled.
+ */
+static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_kms *kms = priv->kms;
+ int bridge_enable_count = 0;
+ int i;
+
+ SDE_ATRACE_BEGIN("msm_enable");
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ const struct drm_crtc_helper_funcs *funcs;
+
+ /* Need to filter out CRTCs where only planes change. */
+ if (!drm_atomic_crtc_needs_modeset(crtc->state))
+ continue;
+
+ if (!crtc->state->active)
+ continue;
+
+ if (msm_is_mode_seamless(&crtc->state->mode))
+ continue;
+
+ funcs = crtc->helper_private;
+
+ if (crtc->state->enable) {
+ DRM_DEBUG_ATOMIC("enabling [CRTC:%d]\n",
+ crtc->base.id);
+
+ if (funcs->enable)
+ funcs->enable(crtc);
+ else
+ funcs->commit(crtc);
+ }
+ }
+
+ /* ensure bridge/encoder updates happen on same vblank */
+ msm_atomic_wait_for_commit_done(dev, old_state,
+ MSM_MODE_FLAG_VBLANK_PRE_MODESET);
+
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ const struct drm_encoder_helper_funcs *funcs;
+ struct drm_encoder *encoder;
+
+ if (!connector->state->best_encoder)
+ continue;
+
+ if (!connector->state->crtc->state->active ||
+ !drm_atomic_crtc_needs_modeset(
+ connector->state->crtc->state))
+ continue;
+
+ encoder = connector->state->best_encoder;
+ funcs = encoder->helper_private;
+
+ DRM_DEBUG_ATOMIC("enabling [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ /*
+ * Each encoder has at most one connector (since we always steal
+ * it away), so we won't call enable hooks twice.
+ */
+ drm_bridge_pre_enable(encoder->bridge);
+ ++bridge_enable_count;
+
+ if (funcs->enable)
+ funcs->enable(encoder);
+ else
+ funcs->commit(encoder);
+ }
+
+ if (kms->funcs->commit) {
+ DRM_DEBUG_ATOMIC("triggering commit\n");
+ kms->funcs->commit(kms, old_state);
+ }
+
+ /* If no bridges were pre_enabled, skip iterating over them again */
+ if (bridge_enable_count == 0) {
+ SDE_ATRACE_END("msm_enable");
+ return;
+ }
+
+ for_each_connector_in_state(old_state, connector, old_conn_state, i) {
+ struct drm_encoder *encoder;
+
+ if (!connector->state->best_encoder)
+ continue;
+
+ if (!connector->state->crtc->state->active ||
+ !drm_atomic_crtc_needs_modeset(
+ connector->state->crtc->state))
+ continue;
+
+ encoder = connector->state->best_encoder;
+
+ DRM_DEBUG_ATOMIC("bridge enable enabling [ENCODER:%d:%s]\n",
+ encoder->base.id, encoder->name);
+
+ drm_bridge_enable(encoder->bridge);
+ }
+ SDE_ATRACE_END("msm_enable");
+}
+
/* The (potentially) asynchronous part of the commit. At this point
* nothing can fail short of armageddon.
*/
-static void complete_commit(struct msm_commit *c)
+static void complete_commit(struct msm_commit *commit)
{
- struct drm_atomic_state *state = c->state;
+ struct drm_atomic_state *state = commit->state;
struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
kms->funcs->prepare_commit(kms, state);
- drm_atomic_helper_commit_modeset_disables(dev, state);
+ msm_atomic_helper_commit_modeset_disables(dev, state);
drm_atomic_helper_commit_planes(dev, state, false);
- drm_atomic_helper_commit_modeset_enables(dev, state);
+ msm_atomic_helper_commit_modeset_enables(dev, state);
/* NOTE: _wait_for_vblanks() only waits for vblank on
* enabled CRTCs. So we end up faulting when disabling
@@ -147,7 +437,7 @@ static void complete_commit(struct msm_commit *c)
* not be critical path)
*/
- msm_atomic_wait_for_commit_done(dev, state);
+ msm_atomic_wait_for_commit_done(dev, state, 0);
drm_atomic_helper_cleanup_planes(dev, state);
@@ -155,38 +445,109 @@ static void complete_commit(struct msm_commit *c)
drm_atomic_state_free(state);
- commit_destroy(c);
+ commit_destroy(commit);
}
+static int msm_atomic_commit_dispatch(struct drm_device *dev,
+ struct drm_atomic_state *state, struct msm_commit *commit);
+
static void fence_cb(struct msm_fence_cb *cb)
{
- struct msm_commit *c =
+ struct msm_commit *commit =
container_of(cb, struct msm_commit, fence_cb);
- complete_commit(c);
+ int ret = -EINVAL;
+
+ ret = msm_atomic_commit_dispatch(commit->dev, commit->state, commit);
+ if (ret) {
+ DRM_ERROR("%s: atomic commit failed\n", __func__);
+ drm_atomic_state_free(commit->state);
+ commit_destroy(commit);
+ }
}
-static void add_fb(struct msm_commit *c, struct drm_framebuffer *fb)
+static void _msm_drm_commit_work_cb(struct kthread_work *work)
{
- struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
- c->fence = max(c->fence, msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
+ struct msm_commit *commit = NULL;
+
+ if (!work) {
+ DRM_ERROR("%s: Invalid commit work data!\n", __func__);
+ return;
+ }
+
+ commit = container_of(work, struct msm_commit, commit_work);
+
+ SDE_ATRACE_BEGIN("complete_commit");
+ complete_commit(commit);
+ SDE_ATRACE_END("complete_commit");
}
-int msm_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *state)
+static struct msm_commit *commit_init(struct drm_atomic_state *state)
{
- int ret;
+ struct msm_commit *commit = kzalloc(sizeof(*commit), GFP_KERNEL);
- /*
- * msm ->atomic_check can update ->mode_changed for pixel format
- * changes, hence must be run before we check the modeset changes.
+ if (!commit) {
+ DRM_ERROR("invalid commit\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ commit->dev = state->dev;
+ commit->state = state;
+
+ /* TODO we might need a way to indicate to run the cb on a
+ * different wq so wait_for_vblanks() doesn't block retiring
+ * bo's..
*/
- ret = drm_atomic_helper_check_planes(dev, state);
- if (ret)
- return ret;
+ INIT_FENCE_CB(&commit->fence_cb, fence_cb);
+ init_kthread_work(&commit->commit_work, _msm_drm_commit_work_cb);
- ret = drm_atomic_helper_check_modeset(dev, state);
- if (ret)
- return ret;
+ return commit;
+}
+
+static void commit_set_fence(struct msm_commit *commit,
+ struct drm_framebuffer *fb)
+{
+ struct drm_gem_object *obj = msm_framebuffer_bo(fb, 0);
+ commit->fence = max(commit->fence,
+ msm_gem_fence(to_msm_bo(obj), MSM_PREP_READ));
+}
+
+/* Start display thread function */
+static int msm_atomic_commit_dispatch(struct drm_device *dev,
+ struct drm_atomic_state *state, struct msm_commit *commit)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_crtc *crtc = NULL;
+ struct drm_crtc_state *crtc_state = NULL;
+ int ret = -EINVAL, i = 0, j = 0;
+
+ for_each_crtc_in_state(state, crtc, crtc_state, i) {
+ for (j = 0; j < priv->num_crtcs; j++) {
+ if (priv->disp_thread[j].crtc_id ==
+ crtc->base.id) {
+ if (priv->disp_thread[j].thread) {
+ queue_kthread_work(
+ &priv->disp_thread[j].worker,
+ &commit->commit_work);
+ /* only return zero if work is
+ * queued successfully.
+ */
+ ret = 0;
+ } else {
+ DRM_ERROR(" Error for crtc_id: %d\n",
+ priv->disp_thread[j].crtc_id);
+ }
+ break;
+ }
+ }
+ /*
+ * TODO: handle cases where there will be more than
+ * one crtc per commit cycle. Remove this check then.
+ * Current assumption is there will be only one crtc
+ * per commit cycle.
+ */
+ if (j < priv->num_crtcs)
+ break;
+ }
return ret;
}
@@ -197,9 +558,8 @@ int msm_atomic_check(struct drm_device *dev,
* @state: the driver state object
* @async: asynchronous commit
*
- * This function commits a with drm_atomic_helper_check() pre-validated state
- * object. This can still fail when e.g. the framebuffer reservation fails. For
- * now this doesn't implement asynchronous commits.
+ * This function commits with drm_atomic_helper_check() pre-validated state
+ * object. This can still fail when e.g. the framebuffer reservation fails.
*
* RETURNS
* Zero for success or -errno.
@@ -207,19 +567,29 @@ int msm_atomic_check(struct drm_device *dev,
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async)
{
+ struct msm_drm_private *priv = dev->dev_private;
int nplanes = dev->mode_config.num_total_plane;
int ncrtcs = dev->mode_config.num_crtc;
ktime_t timeout;
- struct msm_commit *c;
+ struct msm_commit *commit;
int i, ret;
+ if (!priv || priv->shutdown_in_progress) {
+ DRM_ERROR("priv is null or shutdwon is in-progress\n");
+ return -EINVAL;
+ }
+
+ SDE_ATRACE_BEGIN("atomic_commit");
ret = drm_atomic_helper_prepare_planes(dev, state);
- if (ret)
+ if (ret) {
+ SDE_ATRACE_END("atomic_commit");
return ret;
+ }
- c = commit_init(state);
- if (!c) {
- ret = -ENOMEM;
+ commit = commit_init(state);
+ if (IS_ERR_OR_NULL(commit)) {
+ ret = PTR_ERR(commit);
+ DRM_ERROR("commit_init failed: %d\n", ret);
goto error;
}
@@ -230,7 +600,7 @@ int msm_atomic_commit(struct drm_device *dev,
struct drm_crtc *crtc = state->crtcs[i];
if (!crtc)
continue;
- c->crtc_mask |= (1 << drm_crtc_index(crtc));
+ commit->crtc_mask |= (1 << i);
}
/*
@@ -244,16 +614,20 @@ int msm_atomic_commit(struct drm_device *dev,
continue;
if ((plane->state->fb != new_state->fb) && new_state->fb)
- add_fb(c, new_state->fb);
+ commit_set_fence(commit, new_state->fb);
+
+ commit->plane_mask |= (1 << i);
}
/*
* Wait for pending updates on any of the same crtc's and then
* mark our set of crtc's as busy:
*/
- ret = start_atomic(dev->dev_private, c->crtc_mask);
+ ret = start_atomic(dev->dev_private, commit->crtc_mask,
+ commit->plane_mask);
if (ret) {
- kfree(c);
+ DRM_ERROR("start_atomic failed: %d\n", ret);
+ commit_destroy(commit);
goto error;
}
@@ -266,6 +640,16 @@ int msm_atomic_commit(struct drm_device *dev,
drm_atomic_helper_swap_state(dev, state);
/*
+ * Provide the driver a chance to prepare for output fences. This is
+ * done after the point of no return, but before asynchronous commits
+ * are dispatched to work queues, so that the fence preparation is
+ * finished before the .atomic_commit returns.
+ */
+ if (priv && priv->kms && priv->kms->funcs &&
+ priv->kms->funcs->prepare_fence)
+ priv->kms->funcs->prepare_fence(priv->kms, state);
+
+ /*
* Everything below can be run asynchronously without the need to grab
* any modeset locks at all under one conditions: It must be guaranteed
* that the asynchronous work has either been cancelled (if the driver
@@ -282,20 +666,23 @@ int msm_atomic_commit(struct drm_device *dev,
*/
if (async) {
- msm_queue_fence_cb(dev, &c->fence_cb, c->fence);
+ msm_queue_fence_cb(dev, &commit->fence_cb, commit->fence);
+ SDE_ATRACE_END("atomic_commit");
return 0;
}
timeout = ktime_add_ms(ktime_get(), 1000);
/* uninterruptible wait */
- msm_wait_fence(dev, c->fence, &timeout, false);
+ msm_wait_fence(dev, commit->fence, &timeout, false);
- complete_commit(c);
+ complete_commit(commit);
+ SDE_ATRACE_END("atomic_commit");
return 0;
error:
drm_atomic_helper_cleanup_planes(dev, state);
+ SDE_ATRACE_END("atomic_commit");
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 24d45fc7716c..80dc06b4a884 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -14,38 +15,112 @@
* You should have received a copy of the GNU General Public License along with
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+/*
+ * Copyright (c) 2016 Intel Corporation
+ *
+ * Permission to use, copy, modify, distribute, and sell this software and its
+ * documentation for any purpose is hereby granted without fee, provided that
+ * the above copyright notice appear in all copies and that both that copyright
+ * notice and this permission notice appear in supporting documentation, and
+ * that the name of the copyright holders not be used in advertising or
+ * publicity pertaining to distribution of the software without specific,
+ * written prior permission. The copyright holders make no representations
+ * about the suitability of this software for any purpose. It is provided "as
+ * is" without express or implied warranty.
+ *
+ * THE COPYRIGHT HOLDERS DISCLAIM ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+ * INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS, IN NO
+ * EVENT SHALL THE COPYRIGHT HOLDERS BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+ * CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
+ * DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
+ * TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE
+ * OF THIS SOFTWARE.
+ */
+#include <linux/of_address.h>
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_kms.h"
+#include "sde_wb.h"
+
+#define TEARDOWN_DEADLOCK_RETRY_MAX 5
+#include "msm_gem.h"
+#include "msm_mmu.h"
+
+static struct completion wait_display_completion;
+static bool msm_drm_probed;
+
+static void msm_drm_helper_hotplug_event(struct drm_device *dev)
+{
+ struct drm_connector *connector;
+ char *event_string;
+ char const *connector_name;
+ char *envp[2];
+
+ if (!dev) {
+ DRM_ERROR("hotplug_event failed, invalid input\n");
+ return;
+ }
+
+ if (!dev->mode_config.poll_enabled)
+ return;
+
+ event_string = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!event_string) {
+ DRM_ERROR("failed to allocate event string\n");
+ return;
+ }
+
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(connector, dev) {
+ /* Only handle HPD capable connectors. */
+ if (!(connector->polled & DRM_CONNECTOR_POLL_HPD))
+ continue;
+
+ connector->status = connector->funcs->detect(connector, false);
+
+ if (connector->name)
+ connector_name = connector->name;
+ else
+ connector_name = "unknown";
+
+ snprintf(event_string, SZ_4K, "name=%s status=%s\n",
+ connector_name,
+ drm_get_connector_status_name(connector->status));
+ DRM_DEBUG("generating hotplug event [%s]\n", event_string);
+ envp[0] = event_string;
+ envp[1] = NULL;
+ kobject_uevent_env(&dev->primary->kdev->kobj, KOBJ_CHANGE,
+ envp);
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+ kfree(event_string);
+}
static void msm_fb_output_poll_changed(struct drm_device *dev)
{
- struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_private *priv = NULL;
+
+ if (!dev) {
+ DRM_ERROR("output_poll_changed failed, invalid input\n");
+ return;
+ }
+
+ priv = dev->dev_private;
+
if (priv->fbdev)
drm_fb_helper_hotplug_event(priv->fbdev);
+ else
+ msm_drm_helper_hotplug_event(dev);
}
static const struct drm_mode_config_funcs mode_config_funcs = {
.fb_create = msm_framebuffer_create,
.output_poll_changed = msm_fb_output_poll_changed,
- .atomic_check = msm_atomic_check,
+ .atomic_check = drm_atomic_helper_check,
.atomic_commit = msm_atomic_commit,
};
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu)
-{
- struct msm_drm_private *priv = dev->dev_private;
- int idx = priv->num_mmus++;
-
- if (WARN_ON(idx >= ARRAY_SIZE(priv->mmus)))
- return -EINVAL;
-
- priv->mmus[idx] = mmu;
-
- return idx;
-}
-
#ifdef CONFIG_DRM_MSM_REGISTER_LOGGING
static bool reglog = false;
MODULE_PARM_DESC(reglog, "Enable register read/write logging");
@@ -94,15 +169,21 @@ void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
}
if (reglog)
- printk(KERN_DEBUG "IO:region %s %p %08lx\n", dbgname, ptr, size);
+ dev_dbg(&pdev->dev, "IO:region %s %pK %08lx\n",
+ dbgname, ptr, size);
return ptr;
}
+void msm_iounmap(struct platform_device *pdev, void __iomem *addr)
+{
+ devm_iounmap(&pdev->dev, addr);
+}
+
void msm_writel(u32 data, void __iomem *addr)
{
if (reglog)
- printk(KERN_DEBUG "IO:W %p %08x\n", addr, data);
+ pr_debug("IO:W %pK %08x\n", addr, data);
writel(data, addr);
}
@@ -110,7 +191,7 @@ u32 msm_readl(const void __iomem *addr)
{
u32 val = readl(addr);
if (reglog)
- printk(KERN_ERR "IO:R %p %08x\n", addr, val);
+ pr_err("IO:R %pK %08x\n", addr, val);
return val;
}
@@ -120,7 +201,7 @@ struct vblank_event {
bool enable;
};
-static void vblank_ctrl_worker(struct work_struct *work)
+static void vblank_ctrl_worker(struct kthread_work *work)
{
struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
struct msm_vblank_ctrl, work);
@@ -129,12 +210,16 @@ static void vblank_ctrl_worker(struct work_struct *work)
struct msm_kms *kms = priv->kms;
struct vblank_event *vbl_ev, *tmp;
unsigned long flags;
+ LIST_HEAD(tmp_head);
spin_lock_irqsave(&vbl_ctrl->lock, flags);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
list_del(&vbl_ev->node);
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ list_add_tail(&vbl_ev->node, &tmp_head);
+ }
+ spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
+ list_for_each_entry_safe(vbl_ev, tmp, &tmp_head, node) {
if (vbl_ev->enable)
kms->funcs->enable_vblank(kms,
priv->crtcs[vbl_ev->crtc_id]);
@@ -143,11 +228,7 @@ static void vblank_ctrl_worker(struct work_struct *work)
priv->crtcs[vbl_ev->crtc_id]);
kfree(vbl_ev);
-
- spin_lock_irqsave(&vbl_ctrl->lock, flags);
}
-
- spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
}
static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
@@ -168,7 +249,7 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
- queue_work(priv->wq, &vbl_ctrl->work);
+ queue_kthread_work(&priv->disp_thread[crtc_id].worker, &vbl_ctrl->work);
return 0;
}
@@ -180,21 +261,32 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
static int msm_unload(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = dev->platformdev;
struct msm_kms *kms = priv->kms;
struct msm_gpu *gpu = priv->gpu;
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
struct vblank_event *vbl_ev, *tmp;
+ int i;
/* We must cancel and cleanup any pending vblank enable/disable
* work before drm_irq_uninstall() to avoid work re-enabling an
* irq after uninstall has disabled it.
*/
- cancel_work_sync(&vbl_ctrl->work);
+ flush_kthread_work(&vbl_ctrl->work);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
list_del(&vbl_ev->node);
kfree(vbl_ev);
}
+ /* clean up display commit worker threads */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ if (priv->disp_thread[i].thread) {
+ flush_kthread_worker(&priv->disp_thread[i].worker);
+ kthread_stop(priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+ }
+
drm_kms_helper_poll_fini(dev);
drm_mode_config_cleanup(dev);
drm_vblank_cleanup(dev);
@@ -213,6 +305,10 @@ static int msm_unload(struct drm_device *dev)
if (gpu) {
mutex_lock(&dev->struct_mutex);
+ /*
+ * XXX what do we do here?
+ * pm_runtime_enable(&pdev->dev);
+ */
gpu->funcs->pm_suspend(gpu);
mutex_unlock(&dev->struct_mutex);
gpu->funcs->destroy(gpu);
@@ -226,6 +322,11 @@ static int msm_unload(struct drm_device *dev)
priv->vram.paddr, &attrs);
}
+ sde_dbg_destroy();
+
+ sde_power_client_destroy(&priv->phandle, priv->pclient);
+ sde_power_resource_deinit(pdev, &priv->phandle);
+
component_unbind_all(dev->dev, dev);
dev->dev_private = NULL;
@@ -235,26 +336,26 @@ static int msm_unload(struct drm_device *dev)
return 0;
}
+#define KMS_MDP4 0
+#define KMS_SDE 1
+
static int get_mdp_ver(struct platform_device *pdev)
{
#ifdef CONFIG_OF
static const struct of_device_id match_types[] = { {
- .compatible = "qcom,mdss_mdp",
- .data = (void *)5,
- }, {
- /* end node */
- } };
+ .compatible = "qcom,sde-kms",
+ .data = (void *)KMS_SDE,
+ },
+ {} };
struct device *dev = &pdev->dev;
const struct of_device_id *match;
match = of_match_node(match_types, dev->of_node);
if (match)
return (int)(unsigned long)match->data;
#endif
- return 4;
+ return KMS_MDP4;
}
-#include <linux/of_address.h>
-
static int msm_init_vram(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -307,6 +408,7 @@ static int msm_init_vram(struct drm_device *dev)
priv->vram.size = size;
drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1);
+ spin_lock_init(&priv->vram.lock);
dma_set_attr(DMA_ATTR_NO_KERNEL_MAPPING, &attrs);
dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
@@ -330,12 +432,39 @@ static int msm_init_vram(struct drm_device *dev)
return ret;
}
+#ifdef CONFIG_OF
+static int msm_component_bind_all(struct device *dev,
+ struct drm_device *drm_dev)
+{
+ int ret;
+
+ ret = component_bind_all(dev, drm_dev);
+ if (ret)
+ DRM_ERROR("component_bind_all failed: %d\n", ret);
+
+ return ret;
+}
+#else
+static int msm_component_bind_all(struct device *dev,
+ struct drm_device *drm_dev)
+{
+ return 0;
+}
+#endif
+
+static int msm_power_enable_wrapper(void *handle, void *client, bool enable)
+{
+ return sde_power_resource_enable(handle, client, enable);
+}
+
static int msm_load(struct drm_device *dev, unsigned long flags)
{
struct platform_device *pdev = dev->platformdev;
struct msm_drm_private *priv;
struct msm_kms *kms;
- int ret;
+ struct sde_dbg_power_ctrl dbg_power_ctrl = { NULL };
+ int ret, i;
+ struct sched_param param;
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
@@ -345,22 +474,38 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
dev->dev_private = priv;
- priv->wq = alloc_ordered_workqueue("msm", 0);
+ priv->wq = alloc_ordered_workqueue("msm_drm", 0);
init_waitqueue_head(&priv->fence_event);
init_waitqueue_head(&priv->pending_crtcs_event);
+ INIT_LIST_HEAD(&priv->client_event_list);
INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->fence_cbs);
INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
- INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+ init_kthread_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
spin_lock_init(&priv->vblank_ctrl.lock);
+ hash_init(priv->mn_hash);
+ mutex_init(&priv->mn_lock);
drm_mode_config_init(dev);
platform_set_drvdata(pdev, dev);
+ ret = sde_power_resource_init(pdev, &priv->phandle);
+ if (ret) {
+ pr_err("sde power resource init failed\n");
+ goto fail;
+ }
+
+ priv->pclient = sde_power_client_create(&priv->phandle, "sde");
+ if (IS_ERR_OR_NULL(priv->pclient)) {
+ pr_err("sde power client create failed\n");
+ ret = -EINVAL;
+ goto fail;
+ }
+
/* Bind all our sub-components: */
- ret = component_bind_all(dev->dev, dev);
+ ret = msm_component_bind_all(dev->dev, dev);
if (ret)
return ret;
@@ -368,12 +513,22 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto fail;
+ dbg_power_ctrl.handle = &priv->phandle;
+ dbg_power_ctrl.client = priv->pclient;
+ dbg_power_ctrl.enable_fn = msm_power_enable_wrapper;
+ ret = sde_dbg_init(dev->primary->debugfs_root, &pdev->dev,
+ &dbg_power_ctrl);
+ if (ret) {
+ dev_err(dev->dev, "failed to init sde dbg: %d\n", ret);
+ goto fail;
+ }
+
switch (get_mdp_ver(pdev)) {
- case 4:
+ case KMS_MDP4:
kms = mdp4_kms_init(dev);
break;
- case 5:
- kms = mdp5_kms_init(dev);
+ case KMS_SDE:
+ kms = sde_kms_init(dev);
break;
default:
kms = ERR_PTR(-ENODEV);
@@ -387,21 +542,55 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
* and (for example) use dmabuf/prime to share buffers with
* imx drm driver on iMX5
*/
+ priv->kms = NULL;
dev_err(dev->dev, "failed to load kms\n");
ret = PTR_ERR(kms);
goto fail;
}
priv->kms = kms;
+ pm_runtime_enable(dev->dev);
- if (kms) {
- pm_runtime_enable(dev->dev);
+ if (kms && kms->funcs && kms->funcs->hw_init) {
ret = kms->funcs->hw_init(kms);
if (ret) {
dev_err(dev->dev, "kms hw init failed: %d\n", ret);
goto fail;
}
}
+ /**
+ * this priority was found during empiric testing to have appropriate
+ * realtime scheduling to process display updates and interact with
+ * other real time and normal priority task
+ */
+ param.sched_priority = 16;
+ /* initialize commit thread structure */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ init_kthread_worker(&priv->disp_thread[i].worker);
+ priv->disp_thread[i].dev = dev;
+ priv->disp_thread[i].thread =
+ kthread_run(kthread_worker_fn,
+ &priv->disp_thread[i].worker,
+ "crtc_commit:%d",
+ priv->disp_thread[i].crtc_id);
+ ret = sched_setscheduler(priv->disp_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ pr_warn("display thread priority update failed: %d\n",
+ ret);
+
+ if (IS_ERR(priv->disp_thread[i].thread)) {
+ dev_err(dev->dev, "failed to create kthread\n");
+ priv->disp_thread[i].thread = NULL;
+ /* clean up previously created threads if any */
+ for (i -= 1; i >= 0; i--) {
+ kthread_stop(priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+ goto fail;
+ }
+ }
dev->mode_config.funcs = &mode_config_funcs;
@@ -430,6 +619,15 @@ static int msm_load(struct drm_device *dev, unsigned long flags)
if (ret)
goto fail;
+ /* perform subdriver post initialization */
+ if (kms && kms->funcs && kms->funcs->postinit) {
+ ret = kms->funcs->postinit(kms);
+ if (ret) {
+ dev_err(dev->dev, "kms post init failed: %d\n", ret);
+ goto fail;
+ }
+ }
+
drm_kms_helper_poll_init(dev);
return 0;
@@ -439,6 +637,11 @@ fail:
return ret;
}
+#ifdef CONFIG_QCOM_KGSL
+static void load_gpu(struct drm_device *dev)
+{
+}
+#else
static void load_gpu(struct drm_device *dev)
{
static DEFINE_MUTEX(init_lock);
@@ -451,47 +654,231 @@ static void load_gpu(struct drm_device *dev)
mutex_unlock(&init_lock);
}
+#endif
-static int msm_open(struct drm_device *dev, struct drm_file *file)
+static struct msm_file_private *setup_pagetable(struct msm_drm_private *priv)
{
struct msm_file_private *ctx;
+ if (!priv || !priv->gpu)
+ return NULL;
+
+ ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
+ if (!ctx)
+ return ERR_PTR(-ENOMEM);
+
+ ctx->aspace = msm_gem_address_space_create_instance(
+ priv->gpu->aspace->mmu, "gpu", 0x100000000ULL,
+ TASK_SIZE_64 - 1);
+
+ if (IS_ERR(ctx->aspace)) {
+ int ret = PTR_ERR(ctx->aspace);
+
+ /*
+ * If dynamic domains are not supported, everybody uses the
+ * same pagetable
+ */
+ if (ret != -EOPNOTSUPP) {
+ kfree(ctx);
+ return ERR_PTR(ret);
+ }
+
+ ctx->aspace = priv->gpu->aspace;
+ }
+
+ ctx->aspace->mmu->funcs->attach(ctx->aspace->mmu, NULL, 0);
+ return ctx;
+}
+
+static int msm_open(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_file_private *ctx = NULL;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+
+ if (!dev || !dev->dev_private)
+ return -ENODEV;
+
+ priv = dev->dev_private;
/* For now, load gpu on open.. to avoid the requirement of having
* firmware in the initrd.
*/
load_gpu(dev);
- ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
- if (!ctx)
- return -ENOMEM;
+ ctx = setup_pagetable(priv);
+ if (IS_ERR(ctx))
+ return PTR_ERR(ctx);
+
+ if (ctx) {
+ INIT_LIST_HEAD(&ctx->counters);
+ msm_submitqueue_init(ctx);
+ }
file->driver_priv = ctx;
+ kms = priv->kms;
+
+ if (kms && kms->funcs && kms->funcs->postopen)
+ kms->funcs->postopen(kms, file);
+
return 0;
}
static void msm_preclose(struct drm_device *dev, struct drm_file *file)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_file_private *ctx = file->driver_priv;
struct msm_kms *kms = priv->kms;
- if (kms)
+ if (kms && kms->funcs && kms->funcs->preclose)
kms->funcs->preclose(kms, file);
+}
- mutex_lock(&dev->struct_mutex);
- if (ctx == priv->lastctx)
- priv->lastctx = NULL;
- mutex_unlock(&dev->struct_mutex);
+static void msm_postclose(struct drm_device *dev, struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_kms *kms = priv->kms;
+
+ if (kms && kms->funcs && kms->funcs->postclose)
+ kms->funcs->postclose(kms, file);
+
+ if (!ctx)
+ return;
+
+ msm_submitqueue_close(ctx);
+
+ if (priv->gpu) {
+ msm_gpu_cleanup_counters(priv->gpu, ctx);
+
+ if (ctx->aspace && ctx->aspace != priv->gpu->aspace) {
+ ctx->aspace->mmu->funcs->detach(ctx->aspace->mmu);
+ msm_gem_address_space_put(ctx->aspace);
+ }
+ }
kfree(ctx);
}
+static int msm_disable_all_modes_commit(
+ struct drm_device *dev,
+ struct drm_atomic_state *state)
+{
+ struct drm_plane *plane;
+ struct drm_crtc *crtc;
+ unsigned plane_mask;
+ int ret;
+
+ plane_mask = 0;
+ drm_for_each_plane(plane, dev) {
+ struct drm_plane_state *plane_state;
+
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
+ }
+
+ plane_state->rotation = BIT(DRM_ROTATE_0);
+
+ plane->old_fb = plane->fb;
+ plane_mask |= 1 << drm_plane_index(plane);
+
+ /* disable non-primary: */
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY)
+ continue;
+
+ DRM_DEBUG("disabling plane %d\n", plane->base.id);
+
+ ret = __drm_atomic_helper_disable_plane(plane, plane_state);
+ if (ret != 0)
+ DRM_ERROR("error %d disabling plane %d\n", ret,
+ plane->base.id);
+ }
+
+ drm_for_each_crtc(crtc, dev) {
+ struct drm_mode_set mode_set;
+
+ memset(&mode_set, 0, sizeof(struct drm_mode_set));
+ mode_set.crtc = crtc;
+
+ DRM_DEBUG("disabling crtc %d\n", crtc->base.id);
+
+ ret = __drm_atomic_helper_set_config(&mode_set, state);
+ if (ret != 0)
+ DRM_ERROR("error %d disabling crtc %d\n", ret,
+ crtc->base.id);
+ }
+
+ DRM_DEBUG("committing disables\n");
+ ret = drm_atomic_commit(state);
+
+fail:
+ drm_atomic_clean_old_fb(dev, plane_mask, ret);
+ DRM_DEBUG("disables result %d\n", ret);
+ return ret;
+}
+
+/**
+ * msm_clear_all_modes - disables all planes and crtcs via an atomic commit
+ * based on restore_fbdev_mode_atomic in drm_fb_helper.c
+ * @dev: device pointer
+ * @Return: 0 on success, otherwise -error
+ */
+static int msm_disable_all_modes(struct drm_device *dev)
+{
+ struct drm_atomic_state *state;
+ int ret, i;
+
+ state = drm_atomic_state_alloc(dev);
+ if (!state)
+ return -ENOMEM;
+
+ state->acquire_ctx = dev->mode_config.acquire_ctx;
+
+ for (i = 0; i < TEARDOWN_DEADLOCK_RETRY_MAX; i++) {
+ ret = msm_disable_all_modes_commit(dev, state);
+ if (ret != -EDEADLK)
+ break;
+ drm_atomic_state_clear(state);
+ drm_atomic_legacy_backoff(state);
+ }
+
+ /* on successful atomic commit state ownership transfers to framework */
+ if (ret != 0)
+ drm_atomic_state_free(state);
+
+ return ret;
+}
+
static void msm_lastclose(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- if (priv->fbdev)
+ struct msm_kms *kms = priv->kms;
+ int i;
+
+ /*
+ * clean up vblank disable immediately as this is the last close.
+ */
+ for (i = 0; i < dev->num_crtcs; i++) {
+ struct drm_vblank_crtc *vblank = &dev->vblank[i];
+ struct timer_list *disable_timer = &vblank->disable_timer;
+
+ if (del_timer_sync(disable_timer))
+ disable_timer->function(disable_timer->data);
+ }
+
+ /* wait for pending vblank requests to be executed by worker thread */
+ flush_workqueue(priv->wq);
+
+ if (priv->fbdev) {
drm_fb_helper_restore_fbdev_mode_unlocked(priv->fbdev);
+ } else {
+ drm_modeset_lock_all(dev);
+ msm_disable_all_modes(dev);
+ drm_modeset_unlock_all(dev);
+ if (kms && kms->funcs && kms->funcs->lastclose)
+ kms->funcs->lastclose(kms);
+ }
}
static irqreturn_t msm_irq(int irq, void *arg)
@@ -533,7 +920,7 @@ static int msm_enable_vblank(struct drm_device *dev, unsigned int pipe)
struct msm_kms *kms = priv->kms;
if (!kms)
return -ENXIO;
- DBG("dev=%p, crtc=%u", dev, pipe);
+ DBG("dev=%pK, crtc=%u", dev, pipe);
return vblank_ctrl_queue_work(priv, pipe, true);
}
@@ -543,7 +930,7 @@ static void msm_disable_vblank(struct drm_device *dev, unsigned int pipe)
struct msm_kms *kms = priv->kms;
if (!kms)
return;
- DBG("dev=%p, crtc=%u", dev, pipe);
+ DBG("dev=%pK, crtc=%u", dev, pipe);
vblank_ctrl_queue_work(priv, pipe, false);
}
@@ -559,12 +946,21 @@ static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
if (gpu) {
seq_printf(m, "%s Status:\n", gpu->name);
+ pm_runtime_get_sync(&gpu->pdev->dev);
gpu->funcs->show(gpu, m);
+ pm_runtime_put_sync(&gpu->pdev->dev);
}
return 0;
}
+static int msm_snapshot_show(struct drm_device *dev, struct seq_file *m)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ return msm_snapshot_write(priv->gpu, m);
+}
+
static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -629,11 +1025,22 @@ static int show_locked(struct seq_file *m, void *arg)
return ret;
}
+static int show_unlocked(struct seq_file *m, void *arg)
+{
+ struct drm_info_node *node = (struct drm_info_node *) m->private;
+ struct drm_device *dev = node->minor->dev;
+ int (*show)(struct drm_device *dev, struct seq_file *m) =
+ node->info_ent->data;
+
+ return show(dev, m);
+}
+
static struct drm_info_list msm_debugfs_list[] = {
{"gpu", show_locked, 0, msm_gpu_show},
{"gem", show_locked, 0, msm_gem_show},
{ "mm", show_locked, 0, msm_mm_show },
{ "fb", show_locked, 0, msm_fb_show },
+ { "snapshot", show_unlocked, 0, msm_snapshot_show },
};
static int late_init_minor(struct drm_minor *minor)
@@ -707,14 +1114,23 @@ int msm_wait_fence(struct drm_device *dev, uint32_t fence,
ktime_t *timeout , bool interruptible)
{
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int index = FENCE_RING(fence);
+ uint32_t submitted;
int ret;
- if (!priv->gpu)
- return 0;
+ if (!gpu)
+ return -ENXIO;
+
+ if (index > MSM_GPU_MAX_RINGS || index >= gpu->nr_rings ||
+ !gpu->rb[index])
+ return -EINVAL;
- if (fence > priv->gpu->submitted_fence) {
+ submitted = gpu->funcs->submitted_fence(gpu, gpu->rb[index]);
+
+ if (fence > submitted) {
DRM_ERROR("waiting on invalid fence: %u (of %u)\n",
- fence, priv->gpu->submitted_fence);
+ fence, submitted);
return -EINVAL;
}
@@ -744,7 +1160,7 @@ int msm_wait_fence(struct drm_device *dev, uint32_t fence,
if (ret == 0) {
DBG("timeout waiting for fence: %u (completed: %u)",
- fence, priv->completed_fence);
+ fence, priv->completed_fence[index]);
ret = -ETIMEDOUT;
} else if (ret != -ERESTARTSYS) {
ret = 0;
@@ -758,12 +1174,13 @@ int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
+ int index = FENCE_RING(fence);
int ret = 0;
mutex_lock(&dev->struct_mutex);
if (!list_empty(&cb->work.entry)) {
ret = -EINVAL;
- } else if (fence > priv->completed_fence) {
+ } else if (fence > priv->completed_fence[index]) {
cb->fence = fence;
list_add_tail(&cb->work.entry, &priv->fence_cbs);
} else {
@@ -778,21 +1195,21 @@ int msm_queue_fence_cb(struct drm_device *dev,
void msm_update_fence(struct drm_device *dev, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
+ struct msm_fence_cb *cb, *tmp;
+ int index = FENCE_RING(fence);
- mutex_lock(&dev->struct_mutex);
- priv->completed_fence = max(fence, priv->completed_fence);
-
- while (!list_empty(&priv->fence_cbs)) {
- struct msm_fence_cb *cb;
-
- cb = list_first_entry(&priv->fence_cbs,
- struct msm_fence_cb, work.entry);
+ if (index >= MSM_GPU_MAX_RINGS)
+ return;
- if (cb->fence > priv->completed_fence)
- break;
+ mutex_lock(&dev->struct_mutex);
+ priv->completed_fence[index] = max(fence, priv->completed_fence[index]);
- list_del_init(&cb->work.entry);
- queue_work(priv->wq, &cb->work);
+ list_for_each_entry_safe(cb, tmp, &priv->fence_cbs, work.entry) {
+ if (COMPARE_FENCE_LTE(cb->fence,
+ priv->completed_fence[index])) {
+ list_del_init(&cb->work.entry);
+ queue_work(priv->wq, &cb->work);
+ }
}
mutex_unlock(&dev->struct_mutex);
@@ -845,6 +1262,20 @@ static int msm_ioctl_gem_new(struct drm_device *dev, void *data,
args->flags, &args->handle);
}
+static int msm_ioctl_gem_svm_new(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_gem_svm_new *args = data;
+
+ if (args->flags & ~MSM_BO_FLAGS) {
+ DRM_ERROR("invalid flags: %08x\n", args->flags);
+ return -EINVAL;
+ }
+
+ return msm_gem_svm_new_handle(dev, file, args->hostptr, args->size,
+ args->flags, &args->handle);
+}
+
static inline ktime_t to_ktime(struct drm_msm_timespec timeout)
{
return ktime_set(timeout.tv_sec, timeout.tv_nsec);
@@ -897,17 +1328,49 @@ static int msm_ioctl_gem_info(struct drm_device *dev, void *data,
{
struct drm_msm_gem_info *args = data;
struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
+ struct msm_file_private *ctx = file->driver_priv;
int ret = 0;
- if (args->pad)
+ if (args->flags & ~MSM_INFO_FLAGS)
return -EINVAL;
obj = drm_gem_object_lookup(dev, file, args->handle);
if (!obj)
return -ENOENT;
- args->offset = msm_gem_mmap_offset(obj);
+ msm_obj = to_msm_bo(obj);
+ if (args->flags & MSM_INFO_IOVA) {
+ struct msm_gem_address_space *aspace = NULL;
+ struct msm_drm_private *priv = dev->dev_private;
+ uint64_t iova;
+
+ if (msm_obj->flags & MSM_BO_SECURE && priv->gpu)
+ aspace = priv->gpu->secure_aspace;
+ else if (ctx)
+ aspace = ctx->aspace;
+
+ if (!aspace) {
+ ret = -EINVAL;
+ goto out;
+ }
+
+ ret = msm_gem_get_iova(obj, aspace, &iova);
+ if (!ret)
+ args->offset = iova;
+ } else {
+ if (msm_obj->flags & MSM_BO_SVM) {
+ /*
+ * Offset for an SVM object is not needed as they are
+ * already mmap'ed before the SVM ioctl is invoked.
+ */
+ ret = -EACCES;
+ goto out;
+ }
+ args->offset = msm_gem_mmap_offset(obj);
+ }
+out:
drm_gem_object_unreference_unlocked(obj);
return ret;
@@ -917,16 +1380,543 @@ static int msm_ioctl_wait_fence(struct drm_device *dev, void *data,
struct drm_file *file)
{
struct drm_msm_wait_fence *args = data;
- ktime_t timeout = to_ktime(args->timeout);
+ ktime_t timeout;
+
if (args->pad) {
DRM_ERROR("invalid pad: %08x\n", args->pad);
return -EINVAL;
}
+ /*
+ * Special case - if the user passes a timeout of 0.0 just return the
+ * current fence status (0 for retired, -EBUSY for active) with no
+ * accompanying kernel logs. This can be a poor man's way of
+ * determining the status of a fence.
+ */
+ if (args->timeout.tv_sec == 0 && args->timeout.tv_nsec == 0)
+ return msm_wait_fence(dev, args->fence, NULL, true);
+
+ timeout = to_ktime(args->timeout);
return msm_wait_fence(dev, args->fence, &timeout, true);
}
+static int msm_event_supported(struct drm_device *dev,
+ struct drm_msm_event_req *req)
+{
+ int ret = -EINVAL;
+ struct drm_mode_object *arg_obj;
+ struct drm_crtc *crtc;
+
+ arg_obj = drm_mode_object_find(dev, req->object_id, req->object_type);
+ if (!arg_obj)
+ return -ENOENT;
+
+ if (arg_obj->type == DRM_MODE_OBJECT_CRTC) {
+ crtc = obj_to_crtc(arg_obj);
+ req->index = drm_crtc_index(crtc);
+ }
+
+ switch (req->event) {
+ case DRM_EVENT_VBLANK:
+ case DRM_EVENT_HISTOGRAM:
+ case DRM_EVENT_AD:
+ if (arg_obj->type == DRM_MODE_OBJECT_CRTC)
+ ret = 0;
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static void msm_vblank_read_cb(struct drm_pending_event *e)
+{
+ struct drm_pending_vblank_event *vblank;
+ struct msm_drm_private *priv;
+ struct drm_file *file_priv;
+ struct drm_device *dev;
+ struct msm_drm_event *v;
+ int ret = 0;
+ bool need_vblank = false;
+
+ if (!e) {
+ DRM_ERROR("invalid pending event payload\n");
+ return;
+ }
+
+ vblank = container_of(e, struct drm_pending_vblank_event, base);
+ file_priv = vblank->base.file_priv;
+ dev = (file_priv && file_priv->minor) ? file_priv->minor->dev : NULL;
+ priv = (dev) ? dev->dev_private : NULL;
+ if (!priv) {
+ DRM_ERROR("invalid msm private\n");
+ return;
+ }
+
+ list_for_each_entry(v, &priv->client_event_list, base.link) {
+ if (v->base.file_priv != file_priv ||
+ (v->event.type != DRM_EVENT_VBLANK &&
+ v->event.type != DRM_EVENT_AD))
+ continue;
+ need_vblank = true;
+ /**
+ * User-space client requests for N vsyncs when event
+ * requested is DRM_EVENT_AD. Once the count reaches zero,
+ * notify stop requesting for additional vsync's.
+ */
+ if (v->event.type == DRM_EVENT_AD) {
+ if (vblank->event.user_data)
+ vblank->event.user_data--;
+ need_vblank = (vblank->event.user_data) ? true : false;
+ }
+ break;
+ }
+
+ if (!need_vblank) {
+ kfree(vblank);
+ } else {
+ ret = drm_vblank_get(dev, vblank->pipe);
+ if (!ret) {
+ list_add(&vblank->base.link, &dev->vblank_event_list);
+ } else {
+ DRM_ERROR("vblank enable failed ret %d\n", ret);
+ kfree(vblank);
+ }
+ }
+}
+
+static int msm_enable_vblank_event(struct drm_device *dev,
+ struct drm_msm_event_req *req, struct drm_file *file)
+{
+ struct drm_pending_vblank_event *e;
+ int ret = 0;
+ unsigned long flags;
+ struct drm_vblank_crtc *vblank;
+
+ if (WARN_ON(req->index >= dev->num_crtcs))
+ return -EINVAL;
+
+ vblank = &dev->vblank[req->index];
+ e = kzalloc(sizeof(*e), GFP_KERNEL);
+ if (!e)
+ return -ENOMEM;
+
+ e->pipe = req->index;
+ e->base.pid = current->pid;
+ e->event.base.type = DRM_EVENT_VBLANK;
+ e->event.base.length = sizeof(e->event);
+ e->event.user_data = req->client_context;
+ e->base.event = &e->event.base;
+ e->base.file_priv = file;
+ e->base.destroy = msm_vblank_read_cb;
+
+ ret = drm_vblank_get(dev, e->pipe);
+ if (ret) {
+ DRM_ERROR("failed to enable the vblank\n");
+ goto free;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (!vblank->enabled) {
+ ret = -EINVAL;
+ goto err_unlock;
+ }
+
+ if (file->event_space < sizeof(e->event)) {
+ ret = -EBUSY;
+ goto err_unlock;
+ }
+ file->event_space -= sizeof(e->event);
+ list_add_tail(&e->base.link, &dev->vblank_event_list);
+err_unlock:
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+free:
+ if (ret)
+ kfree(e);
+ return ret;
+}
+
+static int msm_enable_event(struct drm_device *dev,
+ struct drm_msm_event_req *req, struct drm_file *file)
+{
+ int ret = -EINVAL;
+
+ switch (req->event) {
+ case DRM_EVENT_AD:
+ case DRM_EVENT_VBLANK:
+ ret = msm_enable_vblank_event(dev, req, file);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+static int msm_disable_vblank_event(struct drm_device *dev,
+ struct drm_msm_event_req *req,
+ struct drm_file *file)
+{
+ struct drm_pending_vblank_event *e, *t;
+
+ list_for_each_entry_safe(e, t, &dev->vblank_event_list, base.link) {
+ if (e->pipe != req->index || file != e->base.file_priv)
+ continue;
+ list_del(&e->base.link);
+ drm_vblank_put(dev, req->index);
+ kfree(e);
+ }
+ return 0;
+}
+
+static int msm_disable_event(struct drm_device *dev,
+ struct drm_msm_event_req *req,
+ struct drm_file *file)
+{
+ int ret = -EINVAL;
+
+ switch (req->event) {
+ case DRM_EVENT_AD:
+ case DRM_EVENT_VBLANK:
+ ret = msm_disable_vblank_event(dev, req, file);
+ break;
+ default:
+ break;
+ }
+ return ret;
+}
+
+
+static int msm_ioctl_register_event(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_event_req *req_event = data;
+ struct msm_drm_event *client;
+ struct msm_drm_event *v;
+ unsigned long flag = 0;
+ bool dup_request = false;
+ int ret = 0;
+
+ if (msm_event_supported(dev, req_event)) {
+ DRM_ERROR("unsupported event %x object %x object id %d\n",
+ req_event->event, req_event->object_type,
+ req_event->object_id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flag);
+ list_for_each_entry(v, &priv->client_event_list, base.link) {
+ if (v->base.file_priv != file)
+ continue;
+ if (v->event.type == req_event->event &&
+ v->info.object_id == req_event->object_id) {
+ DRM_ERROR("duplicate request for event %x obj id %d\n",
+ v->event.type, v->info.object_id);
+ dup_request = true;
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flag);
+
+ if (dup_request)
+ return -EINVAL;
+
+ client = kzalloc(sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->base.file_priv = file;
+ client->base.pid = current->pid;
+ client->base.event = &client->event;
+ client->base.destroy = (void (*) (struct drm_pending_event *)) kfree;
+ client->event.type = req_event->event;
+ memcpy(&client->info, req_event, sizeof(client->info));
+
+ spin_lock_irqsave(&dev->event_lock, flag);
+ list_add_tail(&client->base.link, &priv->client_event_list);
+ spin_unlock_irqrestore(&dev->event_lock, flag);
+
+ ret = msm_enable_event(dev, req_event, file);
+ if (ret) {
+ DRM_ERROR("failed to enable event %x object %x object id %d\n",
+ req_event->event, req_event->object_type,
+ req_event->object_id);
+ spin_lock_irqsave(&dev->event_lock, flag);
+ list_del(&client->base.link);
+ spin_unlock_irqrestore(&dev->event_lock, flag);
+ kfree(client);
+ }
+ return ret;
+}
+
+static int msm_ioctl_deregister_event(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_msm_event_req *req_event = data;
+ struct msm_drm_event *client = NULL;
+ struct msm_drm_event *v, *vt;
+ unsigned long flag = 0;
+
+ if (msm_event_supported(dev, req_event)) {
+ DRM_ERROR("unsupported event %x object %x object id %d\n",
+ req_event->event, req_event->object_type,
+ req_event->object_id);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flag);
+ msm_disable_event(dev, req_event, file);
+ list_for_each_entry_safe(v, vt, &priv->client_event_list, base.link) {
+ if (v->event.type == req_event->event &&
+ v->info.object_id == req_event->object_id &&
+ v->base.file_priv == file) {
+ client = v;
+ list_del(&client->base.link);
+ client->base.destroy(&client->base);
+ break;
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flag);
+
+ return 0;
+}
+
+static int msm_ioctl_gem_sync(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+
+ struct drm_msm_gem_sync *arg = data;
+ int i;
+
+ for (i = 0; i < arg->nr_ops; i++) {
+ struct drm_msm_gem_syncop syncop;
+ struct drm_gem_object *obj;
+ int ret;
+ void __user *ptr =
+ (void __user *)(uintptr_t)
+ (arg->ops + (i * sizeof(syncop)));
+
+ ret = copy_from_user(&syncop, ptr, sizeof(syncop));
+ if (ret)
+ return -EFAULT;
+
+ obj = drm_gem_object_lookup(dev, file, syncop.handle);
+ if (!obj)
+ return -ENOENT;
+
+ msm_gem_sync(obj, syncop.op);
+
+ drm_gem_object_unreference_unlocked(obj);
+ }
+
+ return 0;
+}
+
+void msm_send_crtc_notification(struct drm_crtc *crtc,
+ struct drm_event *event, u8 *payload)
+{
+ struct drm_device *dev = NULL;
+ struct msm_drm_private *priv = NULL;
+ unsigned long flags;
+ struct msm_drm_event *notify, *v;
+ int len = 0;
+
+ if (!crtc || !event || !event->length || !payload) {
+ DRM_ERROR("err param crtc %pK event %pK len %d payload %pK\n",
+ crtc, event, ((event) ? (event->length) : -1),
+ payload);
+ return;
+ }
+ dev = crtc->dev;
+ priv = (dev) ? dev->dev_private : NULL;
+ if (!dev || !priv) {
+ DRM_ERROR("invalid dev %pK priv %pK\n", dev, priv);
+ return;
+ }
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_for_each_entry(v, &priv->client_event_list, base.link) {
+ if (v->event.type != event->type ||
+ crtc->base.id != v->info.object_id)
+ continue;
+ len = event->length + sizeof(struct drm_msm_event_resp);
+ if (v->base.file_priv->event_space < len) {
+ DRM_ERROR("Insufficient space to notify\n");
+ continue;
+ }
+ notify = kzalloc(len, GFP_ATOMIC);
+ if (!notify)
+ continue;
+ notify->base.file_priv = v->base.file_priv;
+ notify->base.event = &notify->event;
+ notify->base.pid = v->base.pid;
+ notify->base.destroy =
+ (void (*)(struct drm_pending_event *)) kfree;
+ notify->event.type = v->event.type;
+ notify->event.length = len;
+ list_add(&notify->base.link,
+ &notify->base.file_priv->event_list);
+ notify->base.file_priv->event_space -= len;
+ memcpy(&notify->info, &v->info, sizeof(notify->info));
+ memcpy(notify->data, payload, event->length);
+ wake_up_interruptible(&notify->base.file_priv->event_wait);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+static int msm_ioctl_counter_get(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (priv->gpu)
+ return msm_gpu_counter_get(priv->gpu, data, ctx);
+
+ return -ENODEV;
+}
+
+static int msm_ioctl_counter_put(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (priv->gpu)
+ return msm_gpu_counter_put(priv->gpu, data, ctx);
+
+ return -ENODEV;
+}
+
+static int msm_ioctl_counter_read(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+
+ if (priv->gpu)
+ return msm_gpu_counter_read(priv->gpu, data);
+
+ return -ENODEV;
+}
+
+
+static int msm_ioctl_submitqueue_new(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_submitqueue *args = data;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+
+ if (args->flags & ~MSM_SUBMITQUEUE_FLAGS)
+ return -EINVAL;
+
+ if ((gpu->nr_rings > 1) &&
+ (!file->is_master && args->prio == 0)) {
+ DRM_ERROR("Only DRM master can set highest priority ringbuffer\n");
+ return -EPERM;
+ }
+
+ if (args->flags & MSM_SUBMITQUEUE_BYPASS_QOS_TIMEOUT &&
+ !capable(CAP_SYS_ADMIN)) {
+ DRM_ERROR(
+ "Only CAP_SYS_ADMIN processes can bypass the timer\n");
+ return -EPERM;
+ }
+
+ return msm_submitqueue_create(file->driver_priv, args->prio,
+ args->flags, &args->id);
+}
+
+static int msm_ioctl_submitqueue_query(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_submitqueue_query *args = data;
+ void __user *ptr = (void __user *)(uintptr_t) args->data;
+
+ return msm_submitqueue_query(file->driver_priv, args->id,
+ args->param, ptr, args->len);
+}
+
+static int msm_ioctl_submitqueue_close(struct drm_device *dev, void *data,
+ struct drm_file *file)
+{
+ struct drm_msm_submitqueue *args = data;
+
+ return msm_submitqueue_remove(file->driver_priv, args->id);
+}
+
+int msm_release(struct inode *inode, struct file *filp)
+{
+ struct drm_file *file_priv = filp->private_data;
+ struct drm_minor *minor = file_priv->minor;
+ struct drm_device *dev = minor->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_drm_event *v, *vt;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ list_for_each_entry_safe(v, vt, &priv->client_event_list, base.link) {
+ if (v->base.file_priv != file_priv)
+ continue;
+ list_del(&v->base.link);
+ msm_disable_event(dev, &v->info, file_priv);
+ v->base.destroy(&v->base);
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+
+ return drm_release(inode, filp);
+}
+
+/**
+ * msm_ioctl_rmfb2 - remove an FB from the configuration
+ * @dev: drm device for the ioctl
+ * @data: data pointer for the ioctl
+ * @file_priv: drm file for the ioctl call
+ *
+ * Remove the FB specified by the user.
+ *
+ * Called by the user via ioctl.
+ *
+ * Returns:
+ * Zero on success, negative errno on failure.
+ */
+static int msm_ioctl_rmfb2(struct drm_device *dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct drm_framebuffer *fb = NULL;
+ struct drm_framebuffer *fbl = NULL;
+ uint32_t *id = data;
+ int found = 0;
+
+ if (!drm_core_check_feature(dev, DRIVER_MODESET))
+ return -EINVAL;
+
+ fb = drm_framebuffer_lookup(dev, *id);
+ if (!fb)
+ return -ENOENT;
+
+ /* drop extra ref from traversing drm_framebuffer_lookup */
+ drm_framebuffer_unreference(fb);
+
+ mutex_lock(&file_priv->fbs_lock);
+ list_for_each_entry(fbl, &file_priv->fbs, filp_head)
+ if (fb == fbl)
+ found = 1;
+ if (!found) {
+ mutex_unlock(&file_priv->fbs_lock);
+ return -ENOENT;
+ }
+
+ list_del_init(&fb->filp_head);
+ mutex_unlock(&file_priv->fbs_lock);
+
+ drm_framebuffer_unreference(fb);
+
+ return 0;
+}
+
static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GET_PARAM, msm_ioctl_get_param, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_NEW, msm_ioctl_gem_new, DRM_AUTH|DRM_RENDER_ALLOW),
@@ -935,6 +1925,29 @@ static const struct drm_ioctl_desc msm_ioctls[] = {
DRM_IOCTL_DEF_DRV(MSM_GEM_CPU_FINI, msm_ioctl_gem_cpu_fini, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_GEM_SUBMIT, msm_ioctl_gem_submit, DRM_AUTH|DRM_RENDER_ALLOW),
DRM_IOCTL_DEF_DRV(MSM_WAIT_FENCE, msm_ioctl_wait_fence, DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(SDE_WB_CONFIG, sde_wb_config, DRM_UNLOCKED|DRM_AUTH),
+ DRM_IOCTL_DEF_DRV(MSM_REGISTER_EVENT, msm_ioctl_register_event,
+ DRM_UNLOCKED|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_DEREGISTER_EVENT, msm_ioctl_deregister_event,
+ DRM_UNLOCKED|DRM_CONTROL_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_COUNTER_GET, msm_ioctl_counter_get,
+ DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_COUNTER_PUT, msm_ioctl_counter_put,
+ DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_COUNTER_READ, msm_ioctl_counter_read,
+ DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_SYNC, msm_ioctl_gem_sync,
+ DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_GEM_SVM_NEW, msm_ioctl_gem_svm_new,
+ DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new,
+ DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close,
+ DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query,
+ DRM_AUTH|DRM_RENDER_ALLOW),
+ DRM_IOCTL_DEF_DRV(MSM_RMFB2, msm_ioctl_rmfb2,
+ DRM_CONTROL_ALLOW|DRM_UNLOCKED),
};
static const struct vm_operations_struct vm_ops = {
@@ -946,7 +1959,7 @@ static const struct vm_operations_struct vm_ops = {
static const struct file_operations fops = {
.owner = THIS_MODULE,
.open = drm_open,
- .release = drm_release,
+ .release = msm_release,
.unlocked_ioctl = drm_ioctl,
#ifdef CONFIG_COMPAT
.compat_ioctl = drm_compat_ioctl,
@@ -968,6 +1981,7 @@ static struct drm_driver msm_driver = {
.unload = msm_unload,
.open = msm_open,
.preclose = msm_preclose,
+ .postclose = msm_postclose,
.lastclose = msm_lastclose,
.set_busid = drm_platform_set_busid,
.irq_handler = msm_irq,
@@ -999,9 +2013,9 @@ static struct drm_driver msm_driver = {
.debugfs_cleanup = msm_debugfs_cleanup,
#endif
.ioctls = msm_ioctls,
- .num_ioctls = DRM_MSM_NUM_IOCTLS,
+ .num_ioctls = ARRAY_SIZE(msm_ioctls),
.fops = &fops,
- .name = "msm",
+ .name = "msm_drm",
.desc = "MSM Snapdragon DRM",
.date = "20130625",
.major = 1,
@@ -1011,8 +2025,75 @@ static struct drm_driver msm_driver = {
#ifdef CONFIG_PM_SLEEP
static int msm_pm_suspend(struct device *dev)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
+ struct drm_device *ddev;
+ struct drm_modeset_acquire_ctx *ctx;
+ struct drm_connector *conn;
+ struct drm_atomic_state *state;
+ struct drm_crtc_state *crtc_state;
+ struct msm_drm_private *priv;
+ int ret = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev->dev_private)
+ return -EINVAL;
+
+ priv = ddev->dev_private;
+ SDE_EVT32(0);
+
+ /* acquire modeset lock(s) */
+ drm_modeset_lock_all(ddev);
+ ctx = ddev->mode_config.acquire_ctx;
+
+ /* save current state for resume */
+ if (priv->suspend_state)
+ drm_atomic_state_free(priv->suspend_state);
+ priv->suspend_state = drm_atomic_helper_duplicate_state(ddev, ctx);
+ if (IS_ERR_OR_NULL(priv->suspend_state)) {
+ DRM_ERROR("failed to back up suspend state\n");
+ priv->suspend_state = NULL;
+ goto unlock;
+ }
+
+ /* create atomic state to disable all CRTCs */
+ state = drm_atomic_state_alloc(ddev);
+ if (IS_ERR_OR_NULL(state)) {
+ DRM_ERROR("failed to allocate crtc disable state\n");
+ goto unlock;
+ }
+
+ state->acquire_ctx = ctx;
+ drm_for_each_connector(conn, ddev) {
+
+ if (!conn->state || !conn->state->crtc ||
+ conn->dpms != DRM_MODE_DPMS_ON)
+ continue;
+ /* force CRTC to be inactive */
+ crtc_state = drm_atomic_get_crtc_state(state,
+ conn->state->crtc);
+ if (IS_ERR_OR_NULL(crtc_state)) {
+ DRM_ERROR("failed to get crtc %d state\n",
+ conn->state->crtc->base.id);
+ drm_atomic_state_free(state);
+ goto unlock;
+ }
+ crtc_state->active = false;
+ }
+
+ /* commit the "disable all" state */
+ ret = drm_atomic_commit(state);
+ if (ret < 0) {
+ DRM_ERROR("failed to disable crtcs, %d\n", ret);
+ drm_atomic_state_free(state);
+ }
+
+unlock:
+ drm_modeset_unlock_all(ddev);
+
+ /* disable hot-plug polling */
drm_kms_helper_poll_disable(ddev);
return 0;
@@ -1020,16 +2101,207 @@ static int msm_pm_suspend(struct device *dev)
static int msm_pm_resume(struct device *dev)
{
- struct drm_device *ddev = dev_get_drvdata(dev);
+ struct drm_device *ddev;
+ struct msm_drm_private *priv;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev->dev_private)
+ return -EINVAL;
+
+ priv = ddev->dev_private;
+
+ SDE_EVT32(priv->suspend_state != NULL);
+ drm_mode_config_reset(ddev);
+
+ drm_modeset_lock_all(ddev);
+
+ if (priv->suspend_state) {
+ priv->suspend_state->acquire_ctx =
+ ddev->mode_config.acquire_ctx;
+ ret = drm_atomic_commit(priv->suspend_state);
+ if (ret < 0) {
+ DRM_ERROR("failed to restore state, %d\n", ret);
+ drm_atomic_state_free(priv->suspend_state);
+ }
+ priv->suspend_state = NULL;
+ }
+ drm_modeset_unlock_all(ddev);
+
+ /* enable hot-plug polling */
drm_kms_helper_poll_enable(ddev);
return 0;
}
+
+static int msm_pm_freeze(struct device *dev)
+{
+ struct drm_device *ddev;
+ struct drm_crtc *crtc;
+ struct drm_modeset_acquire_ctx *ctx;
+ struct drm_atomic_state *state;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+ int early_display = 0;
+ int ret = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev->dev_private)
+ return -EINVAL;
+
+ priv = ddev->dev_private;
+
+ kms = priv->kms;
+ if (kms && kms->funcs && kms->funcs->early_display_status)
+ early_display = kms->funcs->early_display_status(kms);
+
+ SDE_EVT32(0);
+
+ if (early_display) {
+ /* acquire modeset lock(s) */
+ drm_modeset_lock_all(ddev);
+ ctx = ddev->mode_config.acquire_ctx;
+
+ /* save current state for restore */
+ if (priv->suspend_state)
+ drm_atomic_state_free(priv->suspend_state);
+
+ priv->suspend_state =
+ drm_atomic_helper_duplicate_state(ddev, ctx);
+
+ if (IS_ERR_OR_NULL(priv->suspend_state)) {
+ DRM_ERROR("failed to back up suspend state\n");
+ priv->suspend_state = NULL;
+ goto unlock;
+ }
+
+ /* create atomic null state to idle CRTCs */
+ state = drm_atomic_state_alloc(ddev);
+ if (IS_ERR_OR_NULL(state)) {
+ DRM_ERROR("failed to allocate null atomic state\n");
+ goto unlock;
+ }
+
+ state->acquire_ctx = ctx;
+
+ /* commit the null state */
+ ret = drm_atomic_commit(state);
+ if (ret < 0) {
+ DRM_ERROR("failed to commit null state, %d\n", ret);
+ drm_atomic_state_free(state);
+ }
+
+ drm_for_each_crtc(crtc, ddev)
+ drm_crtc_vblank_off(crtc);
+
+unlock:
+ drm_modeset_unlock_all(ddev);
+ } else {
+ ret = msm_pm_suspend(dev);
+ if (ret)
+ return ret;
+ }
+ return 0;
+}
+
+static int msm_pm_restore(struct device *dev)
+{
+ struct drm_device *ddev;
+ struct drm_crtc *crtc;
+ struct msm_drm_private *priv;
+ struct msm_kms *kms;
+ int early_display = 0;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev->dev_private)
+ return -EINVAL;
+
+ priv = ddev->dev_private;
+
+ kms = priv->kms;
+ if (kms && kms->funcs && kms->funcs->early_display_status)
+ early_display = kms->funcs->early_display_status(kms);
+
+
+ SDE_EVT32(priv->suspend_state != NULL);
+
+ if (early_display) {
+ drm_mode_config_reset(ddev);
+
+ drm_modeset_lock_all(ddev);
+
+ drm_for_each_crtc(crtc, ddev)
+ drm_crtc_vblank_on(crtc);
+
+ if (priv->suspend_state) {
+ priv->suspend_state->acquire_ctx =
+ ddev->mode_config.acquire_ctx;
+
+ ret = drm_atomic_commit(priv->suspend_state);
+ if (ret < 0) {
+ DRM_ERROR("failed to restore state, %d\n", ret);
+ drm_atomic_state_free(priv->suspend_state);
+ }
+
+ priv->suspend_state = NULL;
+ }
+
+ drm_modeset_unlock_all(ddev);
+ } else {
+ ret = msm_pm_resume(dev);
+ if (ret)
+ return ret;
+ }
+
+ return 0;
+}
+
+static int msm_pm_thaw(struct device *dev)
+{
+ msm_pm_restore(dev);
+
+ return 0;
+}
#endif
static const struct dev_pm_ops msm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(msm_pm_suspend, msm_pm_resume)
+ .suspend = msm_pm_suspend,
+ .resume = msm_pm_resume,
+ .freeze = msm_pm_freeze,
+ .restore = msm_pm_restore,
+ .thaw = msm_pm_thaw,
+};
+
+static int msm_drm_bind(struct device *dev)
+{
+ int ret;
+
+ ret = drm_platform_init(&msm_driver, to_platform_device(dev));
+ if (ret)
+ DRM_ERROR("drm_platform_init failed: %d\n", ret);
+
+ return ret;
+}
+
+static void msm_drm_unbind(struct device *dev)
+{
+ drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+}
+
+static const struct component_master_ops msm_drm_ops = {
+ .bind = msm_drm_bind,
+ .unbind = msm_drm_unbind,
};
/*
@@ -1063,27 +2335,31 @@ static int add_components(struct device *dev, struct component_match **matchptr,
return 0;
}
-#else
-static int compare_dev(struct device *dev, void *data)
+
+static int msm_add_master_component(struct device *dev,
+ struct component_match *match)
{
- return dev == data;
+ int ret;
+
+ ret = component_master_add_with_match(dev, &msm_drm_ops, match);
+ if (ret)
+ DRM_ERROR("component add match failed: %d\n", ret);
+
+ return ret;
}
-#endif
-static int msm_drm_bind(struct device *dev)
+#else
+static int compare_dev(struct device *dev, void *data)
{
- return drm_platform_init(&msm_driver, to_platform_device(dev));
+ return dev == data;
}
-static void msm_drm_unbind(struct device *dev)
+static int msm_add_master_component(struct device *dev,
+ struct component_match *match)
{
- drm_put_dev(platform_get_drvdata(to_platform_device(dev)));
+ return 0;
}
-
-static const struct component_master_ops msm_drm_ops = {
- .bind = msm_drm_bind,
- .unbind = msm_drm_unbind,
-};
+#endif
/*
* Platform driver:
@@ -1091,10 +2367,16 @@ static const struct component_master_ops msm_drm_ops = {
static int msm_pdev_probe(struct platform_device *pdev)
{
+ int ret;
struct component_match *match = NULL;
+
+ msm_drm_probed = true;
+
#ifdef CONFIG_OF
add_components(&pdev->dev, &match, "connectors");
+#ifndef CONFIG_QCOM_KGSL
add_components(&pdev->dev, &match, "gpus");
+#endif
#else
/* For non-DT case, it kinda sucks. We don't actually have a way
* to know whether or not we are waiting for certain devices (or if
@@ -1121,15 +2403,23 @@ static int msm_pdev_probe(struct platform_device *pdev)
component_match_add(&pdev->dev, &match, compare_dev, dev);
}
#endif
+ /* on all devices that I am aware of, iommu's which cna map
+ * any address the cpu can see are used:
+ */
+ ret = dma_set_mask_and_coherent(&pdev->dev, ~0);
+ if (ret)
+ return ret;
+
+ ret = msm_add_master_component(&pdev->dev, match);
+ complete(&wait_display_completion);
- pdev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
- return component_master_add_with_match(&pdev->dev, &msm_drm_ops, match);
+ return ret;
}
static int msm_pdev_remove(struct platform_device *pdev)
{
+ msm_drm_unbind(&pdev->dev);
component_master_del(&pdev->dev, &msm_drm_ops);
-
return 0;
}
@@ -1138,31 +2428,81 @@ static const struct platform_device_id msm_id[] = {
{ }
};
+static void msm_pdev_shutdown(struct platform_device *pdev)
+{
+ struct drm_device *ddev = platform_get_drvdata(pdev);
+ struct msm_drm_private *priv = NULL;
+
+ if (!ddev) {
+ DRM_ERROR("invalid drm device node\n");
+ return;
+ }
+
+ priv = ddev->dev_private;
+ if (!priv) {
+ DRM_ERROR("invalid msm drm private node\n");
+ return;
+ }
+
+ msm_lastclose(ddev);
+
+ /* set this after lastclose to allow kickoff from lastclose */
+ priv->shutdown_in_progress = true;
+}
+
static const struct of_device_id dt_match[] = {
{ .compatible = "qcom,mdp" }, /* mdp4 */
- { .compatible = "qcom,mdss_mdp" }, /* mdp5 */
+ { .compatible = "qcom,sde-kms" }, /* sde */
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
+static int find_match(struct device *dev, void *data)
+{
+ struct device_driver *drv = data;
+
+ return drv->bus->match(dev, drv);
+}
+
+static bool find_device(struct platform_driver *pdrv)
+{
+ struct device_driver *drv = &pdrv->driver;
+
+ return bus_for_each_dev(drv->bus, NULL, drv, find_match);
+}
+
static struct platform_driver msm_platform_driver = {
.probe = msm_pdev_probe,
.remove = msm_pdev_remove,
+ .shutdown = msm_pdev_shutdown,
.driver = {
- .name = "msm",
+ .name = "msm_drm",
.of_match_table = dt_match,
.pm = &msm_pm_ops,
+ .probe_type = PROBE_PREFER_ASYNCHRONOUS,
},
.id_table = msm_id,
};
+#ifdef CONFIG_QCOM_KGSL
+void __init adreno_register(void)
+{
+}
+
+void __exit adreno_unregister(void)
+{
+}
+#endif
+
static int __init msm_drm_register(void)
{
DBG("init");
+ msm_smmu_driver_init();
msm_dsi_register();
msm_edp_register();
hdmi_register();
adreno_register();
+ init_completion(&wait_display_completion);
return platform_driver_register(&msm_platform_driver);
}
@@ -1174,10 +2514,25 @@ static void __exit msm_drm_unregister(void)
adreno_unregister();
msm_edp_unregister();
msm_dsi_unregister();
+ msm_smmu_driver_cleanup();
+}
+
+static int __init msm_drm_late_register(void)
+{
+ struct platform_driver *pdrv;
+
+ pdrv = &msm_platform_driver;
+ if (msm_drm_probed || find_device(pdrv)) {
+ pr_debug("wait for display probe completion\n");
+ wait_for_completion(&wait_display_completion);
+ }
+ return 0;
}
module_init(msm_drm_register);
module_exit(msm_drm_unregister);
+/* init level 7 */
+late_initcall(msm_drm_late_register);
MODULE_AUTHOR("Rob Clark <robdclark@gmail.com");
MODULE_DESCRIPTION("MSM DRM Driver");
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index 026e156e519c..a9c28feb11ce 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -31,7 +32,11 @@
#include <linux/iommu.h>
#include <linux/types.h>
#include <linux/of_graph.h>
+#include <linux/of_device.h>
+#include <linux/sde_io_util.h>
+#include <linux/hashtable.h>
#include <asm/sizes.h>
+#include <linux/kthread.h>
#ifndef CONFIG_OF
#include <mach/board.h>
@@ -48,40 +53,233 @@
#include <drm/msm_drm.h>
#include <drm/drm_gem.h>
+#include "sde_power_handle.h"
+
+#define GET_MAJOR_REV(rev) ((rev) >> 28)
+#define GET_MINOR_REV(rev) (((rev) >> 16) & 0xFFF)
+#define GET_STEP_REV(rev) ((rev) & 0xFFFF)
+
struct msm_kms;
struct msm_gpu;
struct msm_mmu;
struct msm_rd_state;
struct msm_perf_state;
struct msm_gem_submit;
+struct msm_gem_address_space;
+struct msm_gem_vma;
-#define NUM_DOMAINS 2 /* one for KMS, then one per gpu core (?) */
+#define NUM_DOMAINS 4 /* one for KMS, then one per gpu core (?) */
+#define MAX_CRTCS 8
+#define MAX_PLANES 12
+#define MAX_ENCODERS 8
+#define MAX_BRIDGES 8
+#define MAX_CONNECTORS 8
struct msm_file_private {
- /* currently we don't do anything useful with this.. but when
- * per-context address spaces are supported we'd keep track of
- * the context's page-tables here.
- */
- int dummy;
+ struct msm_gem_address_space *aspace;
+ struct list_head counters;
+ rwlock_t queuelock;
+ struct list_head submitqueues;
+ int queueid;
};
enum msm_mdp_plane_property {
- PLANE_PROP_ZPOS,
+ /* blob properties, always put these first */
+ PLANE_PROP_SCALER_V1,
+ PLANE_PROP_SCALER_V2,
+ PLANE_PROP_CSC_V1,
+ PLANE_PROP_INFO,
+ PLANE_PROP_SCALER_LUT_ED,
+ PLANE_PROP_SCALER_LUT_CIR,
+ PLANE_PROP_SCALER_LUT_SEP,
+ PLANE_PROP_SKIN_COLOR,
+ PLANE_PROP_SKY_COLOR,
+ PLANE_PROP_FOLIAGE_COLOR,
+
+ /* # of blob properties */
+ PLANE_PROP_BLOBCOUNT,
+
+ /* range properties */
+ PLANE_PROP_ZPOS = PLANE_PROP_BLOBCOUNT,
PLANE_PROP_ALPHA,
- PLANE_PROP_PREMULTIPLIED,
- PLANE_PROP_MAX_NUM
+ PLANE_PROP_COLOR_FILL,
+ PLANE_PROP_H_DECIMATE,
+ PLANE_PROP_V_DECIMATE,
+ PLANE_PROP_INPUT_FENCE,
+ PLANE_PROP_HUE_ADJUST,
+ PLANE_PROP_SATURATION_ADJUST,
+ PLANE_PROP_VALUE_ADJUST,
+ PLANE_PROP_CONTRAST_ADJUST,
+
+ /* enum/bitmask properties */
+ PLANE_PROP_ROTATION,
+ PLANE_PROP_BLEND_OP,
+ PLANE_PROP_SRC_CONFIG,
+ PLANE_PROP_FB_TRANSLATION_MODE,
+
+ /* total # of properties */
+ PLANE_PROP_COUNT
+};
+
+enum msm_mdp_crtc_property {
+ CRTC_PROP_INFO,
+
+ /* # of blob properties */
+ CRTC_PROP_BLOBCOUNT,
+
+ /* range properties */
+ CRTC_PROP_INPUT_FENCE_TIMEOUT = CRTC_PROP_BLOBCOUNT,
+ CRTC_PROP_OUTPUT_FENCE,
+ CRTC_PROP_OUTPUT_FENCE_OFFSET,
+ CRTC_PROP_CORE_CLK,
+ CRTC_PROP_CORE_AB,
+ CRTC_PROP_CORE_IB,
+ CRTC_PROP_SECURITY_LEVEL,
+
+ /* total # of properties */
+ CRTC_PROP_COUNT
+};
+
+enum msm_mdp_conn_property {
+ /* blob properties, always put these first */
+ CONNECTOR_PROP_SDE_INFO,
+ CONNECTOR_PROP_HDR_INFO,
+ CONNECTOR_PROP_HDR_CONTROL,
+
+ /* # of blob properties */
+ CONNECTOR_PROP_BLOBCOUNT,
+
+ /* range properties */
+ CONNECTOR_PROP_OUT_FB = CONNECTOR_PROP_BLOBCOUNT,
+ CONNECTOR_PROP_RETIRE_FENCE,
+ CONNECTOR_PROP_DST_X,
+ CONNECTOR_PROP_DST_Y,
+ CONNECTOR_PROP_DST_W,
+ CONNECTOR_PROP_DST_H,
+ CONNECTOR_PROP_PLL_DELTA,
+ CONNECTOR_PROP_PLL_ENABLE,
+ CONNECTOR_PROP_HDCP_VERSION,
+
+ /* enum/bitmask properties */
+ CONNECTOR_PROP_TOPOLOGY_NAME,
+ CONNECTOR_PROP_TOPOLOGY_CONTROL,
+ CONNECTOR_PROP_LP,
+ CONNECTOR_PROP_HPD_OFF,
+
+ /* total # of properties */
+ CONNECTOR_PROP_COUNT
};
struct msm_vblank_ctrl {
- struct work_struct work;
+ struct kthread_work work;
struct list_head event_list;
spinlock_t lock;
};
+#define MAX_H_TILES_PER_DISPLAY 2
+
+/**
+ * enum msm_display_compression - compression method used for pixel stream
+ * @MSM_DISPLAY_COMPRESS_NONE: Pixel data is not compressed
+ * @MSM_DISPLAY_COMPRESS_DSC: DSC compresison is used
+ * @MSM_DISPLAY_COMPRESS_FBC: FBC compression is used
+ */
+enum msm_display_compression {
+ MSM_DISPLAY_COMPRESS_NONE,
+ MSM_DISPLAY_COMPRESS_DSC,
+ MSM_DISPLAY_COMPRESS_FBC,
+};
+
+/**
+ * enum msm_display_caps - features/capabilities supported by displays
+ * @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported
+ * @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported
+ * @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported
+ * @MSM_DISPLAY_CAP_EDID: EDID supported
+ * @MSM_DISPLAY_CAP_SHARED: Display is shared
+ */
+enum msm_display_caps {
+ MSM_DISPLAY_CAP_VID_MODE = BIT(0),
+ MSM_DISPLAY_CAP_CMD_MODE = BIT(1),
+ MSM_DISPLAY_CAP_HOT_PLUG = BIT(2),
+ MSM_DISPLAY_CAP_EDID = BIT(3),
+ MSM_DISPLAY_CAP_SHARED = BIT(4),
+};
+
+/**
+ * struct msm_display_info - defines display properties
+ * @intf_type: DRM_MODE_CONNECTOR_ display type
+ * @capabilities: Bitmask of display flags
+ * @num_of_h_tiles: Number of horizontal tiles in case of split interface
+ * @h_tile_instance: Controller instance used per tile. Number of elements is
+ * based on num_of_h_tiles
+ * @is_connected: Set to true if display is connected
+ * @width_mm: Physical width
+ * @height_mm: Physical height
+ * @max_width: Max width of display. In case of hot pluggable display
+ * this is max width supported by controller
+ * @max_height: Max height of display. In case of hot pluggable display
+ * this is max height supported by controller
+ * @compression: Compression supported by the display
+ */
+struct msm_display_info {
+ int intf_type;
+ uint32_t capabilities;
+
+ uint32_t num_of_h_tiles;
+ uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
+
+ bool is_connected;
+
+ unsigned int width_mm;
+ unsigned int height_mm;
+
+ uint32_t max_width;
+ uint32_t max_height;
+
+ enum msm_display_compression compression;
+};
+
+/**
+ * struct - msm_display_kickoff_params - info for display features at kickoff
+ * @hdr_ctrl: HDR control info passed from userspace
+ */
+struct msm_display_kickoff_params {
+ struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl;
+};
+
+/**
+ * struct msm_drm_event - defines custom event notification struct
+ * @base: base object required for event notification by DRM framework.
+ * @event: event object required for event notification by DRM framework.
+ * @info: contains information of DRM object for which events has been
+ * requested.
+ * @data: memory location which contains response payload for event.
+ */
+struct msm_drm_event {
+ struct drm_pending_event base;
+ struct drm_event event;
+ struct drm_msm_event_req info;
+ u8 data[];
+};
+
+/* Commit thread specific structure */
+struct msm_drm_commit {
+ struct drm_device *dev;
+ struct task_struct *thread;
+ unsigned int crtc_id;
+ struct kthread_worker worker;
+};
+
+#define MSM_GPU_MAX_RINGS 4
+
struct msm_drm_private {
struct msm_kms *kms;
+ struct sde_power_handle phandle;
+ struct sde_power_client *pclient;
+
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
@@ -101,11 +299,11 @@ struct msm_drm_private {
/* when we have more than one 'msm_gpu' these need to be an array: */
struct msm_gpu *gpu;
- struct msm_file_private *lastctx;
struct drm_fb_helper *fbdev;
- uint32_t next_fence, completed_fence;
+ uint32_t completed_fence[MSM_GPU_MAX_RINGS];
+
wait_queue_head_t fence_event;
struct msm_rd_state *rd;
@@ -121,29 +319,46 @@ struct msm_drm_private {
/* crtcs pending async atomic updates: */
uint32_t pending_crtcs;
+ uint32_t pending_planes;
wait_queue_head_t pending_crtcs_event;
- /* registered MMUs: */
- unsigned int num_mmus;
- struct msm_mmu *mmus[NUM_DOMAINS];
+ /* Registered address spaces.. currently this is fixed per # of
+ * iommu's. Ie. one for display block and one for gpu block.
+ * Eventually, to do per-process gpu pagetables, we'll want one
+ * of these per-process.
+ */
+ unsigned int num_aspaces;
+ struct msm_gem_address_space *aspace[NUM_DOMAINS];
unsigned int num_planes;
- struct drm_plane *planes[8];
+ struct drm_plane *planes[MAX_PLANES];
unsigned int num_crtcs;
- struct drm_crtc *crtcs[8];
+ struct drm_crtc *crtcs[MAX_CRTCS];
+
+ struct msm_drm_commit disp_thread[MAX_CRTCS];
unsigned int num_encoders;
- struct drm_encoder *encoders[8];
+ struct drm_encoder *encoders[MAX_ENCODERS];
unsigned int num_bridges;
- struct drm_bridge *bridges[8];
+ struct drm_bridge *bridges[MAX_BRIDGES];
unsigned int num_connectors;
- struct drm_connector *connectors[8];
+ struct drm_connector *connectors[MAX_CONNECTORS];
+
+ /* hash to store mm_struct to msm_mmu_notifier mappings */
+ DECLARE_HASHTABLE(mn_hash, 7);
+ /* protects mn_hash and the msm_mmu_notifier for the process */
+ struct mutex mn_lock;
/* Properties */
- struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
+ struct drm_property *plane_property[PLANE_PROP_COUNT];
+ struct drm_property *crtc_property[CRTC_PROP_COUNT];
+ struct drm_property *conn_property[CONNECTOR_PROP_COUNT];
+
+ /* Color processing properties for the crtc */
+ struct drm_property **cp_property;
/* VRAM carveout, used when no IOMMU: */
struct {
@@ -153,15 +368,50 @@ struct msm_drm_private {
* and position mm_node->start is in # of pages:
*/
struct drm_mm mm;
+ spinlock_t lock; /* Protects drm_mm node allocation/removal */
} vram;
struct msm_vblank_ctrl vblank_ctrl;
+
+ /* saved atomic state during system suspend */
+ struct drm_atomic_state *suspend_state;
+
+ /* list of clients waiting for events */
+ struct list_head client_event_list;
+
+ /* update the flag when msm driver receives shutdown notification */
+ bool shutdown_in_progress;
};
struct msm_format {
uint32_t pixel_format;
};
+/*
+ * Some GPU targets can support multiple ringbuffers and preempt between them.
+ * In order to do this without massive API changes we will steal two bits from
+ * the top of the fence and use them to identify the ringbuffer, (0x00000001 for
+ * riug 0, 0x40000001 for ring 1, 0x50000001 for ring 2, etc). If you are going
+ * to do a fence comparision you have to make sure you are only comparing
+ * against fences from the same ring, but since fences within a ringbuffer are
+ * still contigious you can still use straight comparisons (i.e 0x40000001 is
+ * older than 0x40000002). Mathmatically there will be 0x3FFFFFFF timestamps
+ * per ring or ~103 days of 120 interrupts per second (two interrupts per frame
+ * at 60 FPS).
+ */
+#define FENCE_RING(_fence) ((_fence >> 30) & 3)
+#define FENCE(_ring, _fence) ((((_ring) & 3) << 30) | ((_fence) & 0x3FFFFFFF))
+
+static inline bool COMPARE_FENCE_LTE(uint32_t a, uint32_t b)
+{
+ return ((FENCE_RING(a) == FENCE_RING(b)) && a <= b);
+}
+
+static inline bool COMPARE_FENCE_LT(uint32_t a, uint32_t b)
+{
+ return ((FENCE_RING(a) == FENCE_RING(b)) && a < b);
+}
+
/* callback from wq once fence has passed: */
struct msm_fence_cb {
struct work_struct work;
@@ -176,19 +426,52 @@ void __msm_fence_worker(struct work_struct *work);
(_cb)->func = _func; \
} while (0)
-int msm_atomic_check(struct drm_device *dev,
- struct drm_atomic_state *state);
+static inline bool msm_is_suspend_state(struct drm_device *dev)
+{
+ if (!dev || !dev->dev_private)
+ return false;
+
+ return ((struct msm_drm_private *)dev->dev_private)->suspend_state !=
+ NULL;
+}
+
int msm_atomic_commit(struct drm_device *dev,
struct drm_atomic_state *state, bool async);
-int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
-
int msm_wait_fence(struct drm_device *dev, uint32_t fence,
ktime_t *timeout, bool interruptible);
int msm_queue_fence_cb(struct drm_device *dev,
struct msm_fence_cb *cb, uint32_t fence);
void msm_update_fence(struct drm_device *dev, uint32_t fence);
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, bool invalidated);
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, unsigned int flags);
+int msm_gem_reserve_iova(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *domain,
+ uint64_t hostptr, uint64_t size);
+void msm_gem_release_iova(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma);
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace);
+
+/* For GPU and legacy display */
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ int type, const char *name);
+struct msm_gem_address_space *
+msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
+ uint64_t start, uint64_t end);
+
+/* For SDE display */
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+ const char *name);
+
+void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -197,13 +480,14 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova);
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova);
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id);
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova);
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
struct page **msm_gem_get_pages(struct drm_gem_object *obj);
void msm_gem_put_pages(struct drm_gem_object *obj);
-void msm_gem_put_iova(struct drm_gem_object *obj, int id);
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace);
int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
int msm_gem_dumb_map_offset(struct drm_file *file, struct drm_device *dev,
@@ -217,7 +501,6 @@ struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg);
int msm_gem_prime_pin(struct drm_gem_object *obj);
void msm_gem_prime_unpin(struct drm_gem_object *obj);
-void *msm_gem_vaddr_locked(struct drm_gem_object *obj);
void *msm_gem_vaddr(struct drm_gem_object *obj);
int msm_gem_queue_inactive_cb(struct drm_gem_object *obj,
struct msm_fence_cb *cb);
@@ -232,12 +515,29 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
uint32_t size, uint32_t flags, uint32_t *handle);
struct drm_gem_object *msm_gem_new(struct drm_device *dev,
uint32_t size, uint32_t flags);
+struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
+ uint32_t size, uint32_t flags);
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
- uint32_t size, struct sg_table *sgt);
-
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id);
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id);
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane);
+ uint32_t size, struct sg_table *sgt, u32 flags);
+void msm_gem_sync(struct drm_gem_object *obj, u32 op);
+int msm_gem_svm_new_handle(struct drm_device *dev, struct drm_file *file,
+ uint64_t hostptr, uint64_t size,
+ uint32_t flags, uint32_t *handle);
+struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev,
+ struct drm_file *file, uint64_t hostptr,
+ uint64_t size, uint32_t flags);
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova);
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova);
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace);
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane);
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane);
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb);
struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
@@ -247,6 +547,19 @@ struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
struct drm_fb_helper *msm_fbdev_init(struct drm_device *dev);
+struct msm_gpu_submitqueue;
+int msm_submitqueue_init(struct msm_file_private *ctx);
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+ u32 id);
+int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio,
+ u32 flags, u32 *id);
+int msm_submitqueue_query(struct msm_file_private *ctx, u32 id, u32 param,
+ void __user *data, u32 len);
+int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id);
+void msm_submitqueue_close(struct msm_file_private *ctx);
+
+void msm_submitqueue_destroy(struct kref *kref);
+
struct hdmi;
int hdmi_modeset_init(struct hdmi *hdmi, struct drm_device *dev,
struct drm_encoder *encoder);
@@ -265,6 +578,15 @@ enum msm_dsi_encoder_id {
MSM_DSI_CMD_ENCODER_ID = 1,
MSM_DSI_ENCODER_NUM = 2
};
+
+/* *
+ * msm_send_crtc_notification - notify user-space clients of crtc events.
+ * @crtc: crtc that is generating the event.
+ * @event: event that needs to be notified.
+ * @payload: payload for the event.
+ */
+void msm_send_crtc_notification(struct drm_crtc *crtc,
+ struct drm_event *event, u8 *payload);
#ifdef CONFIG_DRM_MSM_DSI
void __init msm_dsi_register(void);
void __exit msm_dsi_unregister(void);
@@ -302,6 +624,7 @@ static inline void msm_rd_dump_submit(struct msm_gem_submit *submit) {}
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname);
+void msm_iounmap(struct platform_device *dev, void __iomem *addr);
void msm_writel(u32 data, void __iomem *addr);
u32 msm_readl(const void __iomem *addr);
@@ -311,7 +634,8 @@ u32 msm_readl(const void __iomem *addr);
static inline bool fence_completed(struct drm_device *dev, uint32_t fence)
{
struct msm_drm_private *priv = dev->dev_private;
- return priv->completed_fence >= fence;
+
+ return priv->completed_fence[FENCE_RING(fence)] >= fence;
}
static inline int align_pitch(int width, int bpp)
@@ -332,5 +656,4 @@ static inline int align_pitch(int width, int bpp)
/* for conditionally setting boolean flag(s): */
#define COND(bool, val) ((bool) ? (val) : 0)
-
#endif /* __MSM_DRV_H__ */
diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c
index 121713281417..8d6d83bf6540 100644
--- a/drivers/gpu/drm/msm/msm_fb.c
+++ b/drivers/gpu/drm/msm/msm_fb.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -33,17 +34,33 @@ static int msm_framebuffer_create_handle(struct drm_framebuffer *fb,
struct drm_file *file_priv,
unsigned int *handle)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ struct msm_framebuffer *msm_fb;
+
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return -EINVAL;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
+
return drm_gem_handle_create(file_priv,
msm_fb->planes[0], handle);
}
static void msm_framebuffer_destroy(struct drm_framebuffer *fb)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ struct msm_framebuffer *msm_fb;
+ int i, n;
- DBG("destroy: FB ID: %d (%p)", fb->base.id, fb);
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
+ n = drm_format_num_planes(fb->pixel_format);
+
+ DBG("destroy: FB ID: %d (%pK)", fb->base.id, fb);
drm_framebuffer_cleanup(fb);
@@ -72,9 +89,16 @@ static const struct drm_framebuffer_funcs msm_framebuffer_funcs = {
#ifdef CONFIG_DEBUG_FS
void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ struct msm_framebuffer *msm_fb;
+ int i, n;
+
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return;
+ }
+ msm_fb = to_msm_framebuffer(fb);
+ n = drm_format_num_planes(fb->pixel_format);
seq_printf(m, "fb: %dx%d@%4.4s (%2d, ID:%d)\n",
fb->width, fb->height, (char *)&fb->pixel_format,
fb->refcount.refcount.counter, fb->base.id);
@@ -92,15 +116,23 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m)
* should be fine, since only the scanout (mdpN) side of things needs
* this, the gpu doesn't care about fb's.
*/
-int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
+int msm_framebuffer_prepare(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int ret, i, n = drm_format_num_planes(fb->pixel_format);
- uint32_t iova;
+ struct msm_framebuffer *msm_fb;
+ int ret, i, n;
+ uint64_t iova;
+
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return -EINVAL;
+ }
+ msm_fb = to_msm_framebuffer(fb);
+ n = drm_format_num_planes(fb->pixel_format);
for (i = 0; i < n; i++) {
- ret = msm_gem_get_iova(msm_fb->planes[i], id, &iova);
- DBG("FB[%u]: iova[%d]: %08x (%d)", fb->base.id, i, iova, ret);
+ ret = msm_gem_get_iova(msm_fb->planes[i], aspace, &iova);
+ DBG("FB[%u]: iova[%d]: %08llx (%d)", fb->base.id, i, iova, ret);
if (ret)
return ret;
}
@@ -108,33 +140,62 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, int id)
return 0;
}
-void msm_framebuffer_cleanup(struct drm_framebuffer *fb, int id)
+void msm_framebuffer_cleanup(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- int i, n = drm_format_num_planes(fb->pixel_format);
+ struct msm_framebuffer *msm_fb;
+ int i, n;
+
+ if (fb == NULL) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
+ n = drm_format_num_planes(fb->pixel_format);
for (i = 0; i < n; i++)
- msm_gem_put_iova(msm_fb->planes[i], id);
+ msm_gem_put_iova(msm_fb->planes[i], aspace);
}
-uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int id, int plane)
+/* FIXME: Leave this as a uint32_t and just return the lower 32 bits? */
+uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb,
+ struct msm_gem_address_space *aspace, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ struct msm_framebuffer *msm_fb;
+ uint64_t iova;
+
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return -EINVAL;
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
if (!msm_fb->planes[plane])
return 0;
- return msm_gem_iova(msm_fb->planes[plane], id) + fb->offsets[plane];
+
+ iova = msm_gem_iova(msm_fb->planes[plane], aspace) + fb->offsets[plane];
+
+ /* FIXME: Make sure it is < 32 bits */
+ return lower_32_bits(iova);
}
struct drm_gem_object *msm_framebuffer_bo(struct drm_framebuffer *fb, int plane)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
+ struct msm_framebuffer *msm_fb;
+
+ if (!fb) {
+ DRM_ERROR("from:%pS null fb\n", __builtin_return_address(0));
+ return ERR_PTR(-EINVAL);
+ }
+
+ msm_fb = to_msm_framebuffer(fb);
return msm_fb->planes[plane];
}
const struct msm_format *msm_framebuffer_format(struct drm_framebuffer *fb)
{
- struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb);
- return msm_fb->format;
+ return fb ? (to_msm_framebuffer(fb))->format : NULL;
}
struct drm_framebuffer *msm_framebuffer_create(struct drm_device *dev,
@@ -175,18 +236,20 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
struct msm_framebuffer *msm_fb = NULL;
struct drm_framebuffer *fb;
const struct msm_format *format;
- int ret, i, n;
+ int ret, i, num_planes;
unsigned int hsub, vsub;
+ bool is_modified = false;
- DBG("create framebuffer: dev=%p, mode_cmd=%p (%dx%d@%4.4s)",
+ DBG("create framebuffer: dev=%pK, mode_cmd=%pK (%dx%d@%4.4s)",
dev, mode_cmd, mode_cmd->width, mode_cmd->height,
(char *)&mode_cmd->pixel_format);
- n = drm_format_num_planes(mode_cmd->pixel_format);
+ num_planes = drm_format_num_planes(mode_cmd->pixel_format);
hsub = drm_format_horz_chroma_subsampling(mode_cmd->pixel_format);
vsub = drm_format_vert_chroma_subsampling(mode_cmd->pixel_format);
- format = kms->funcs->get_format(kms, mode_cmd->pixel_format);
+ format = kms->funcs->get_format(kms, mode_cmd->pixel_format,
+ mode_cmd->modifier, num_planes);
if (!format) {
dev_err(dev->dev, "unsupported pixel format: %4.4s\n",
(char *)&mode_cmd->pixel_format);
@@ -204,27 +267,53 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
msm_fb->format = format;
- if (n > ARRAY_SIZE(msm_fb->planes)) {
+ if (mode_cmd->flags & DRM_MODE_FB_MODIFIERS) {
+ for (i = 0; i < ARRAY_SIZE(mode_cmd->modifier); i++) {
+ if (mode_cmd->modifier[i]) {
+ is_modified = true;
+ break;
+ }
+ }
+ }
+
+ if (num_planes > ARRAY_SIZE(msm_fb->planes)) {
ret = -EINVAL;
goto fail;
}
- for (i = 0; i < n; i++) {
- unsigned int width = mode_cmd->width / (i ? hsub : 1);
- unsigned int height = mode_cmd->height / (i ? vsub : 1);
- unsigned int min_size;
-
- min_size = (height - 1) * mode_cmd->pitches[i]
- + width * drm_format_plane_cpp(mode_cmd->pixel_format, i)
- + mode_cmd->offsets[i];
-
- if (bos[i]->size < min_size) {
+ if (is_modified) {
+ if (!kms->funcs->check_modified_format) {
+ dev_err(dev->dev, "can't check modified fb format\n");
ret = -EINVAL;
goto fail;
+ } else {
+ ret = kms->funcs->check_modified_format(
+ kms, msm_fb->format, mode_cmd, bos);
+ if (ret)
+ goto fail;
+ }
+ } else {
+ for (i = 0; i < num_planes; i++) {
+ unsigned int width = mode_cmd->width / (i ? hsub : 1);
+ unsigned int height = mode_cmd->height / (i ? vsub : 1);
+ unsigned int min_size;
+ unsigned int cpp;
+
+ cpp = drm_format_plane_cpp(mode_cmd->pixel_format, i);
+
+ min_size = (height - 1) * mode_cmd->pitches[i]
+ + width * cpp
+ + mode_cmd->offsets[i];
+
+ if (bos[i]->size < min_size) {
+ ret = -EINVAL;
+ goto fail;
+ }
}
+ }
+ for (i = 0; i < num_planes; i++)
msm_fb->planes[i] = bos[i];
- }
drm_helper_mode_fill_fb_struct(fb, mode_cmd);
@@ -234,7 +323,7 @@ struct drm_framebuffer *msm_framebuffer_init(struct drm_device *dev,
goto fail;
}
- DBG("create: FB ID: %d (%p)", fb->base.id, fb);
+ DBG("create: FB ID: %d (%pK)", fb->base.id, fb);
return fb;
diff --git a/drivers/gpu/drm/msm/msm_fbdev.c b/drivers/gpu/drm/msm/msm_fbdev.c
index 3f6ec077b51d..c71e662d0da1 100644
--- a/drivers/gpu/drm/msm/msm_fbdev.c
+++ b/drivers/gpu/drm/msm/msm_fbdev.c
@@ -85,7 +85,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
struct drm_framebuffer *fb = NULL;
struct fb_info *fbi = NULL;
struct drm_mode_fb_cmd2 mode_cmd = {0};
- uint32_t paddr;
+ uint64_t paddr;
int ret, size;
DBG("create fbdev: %dx%d@%d (%dx%d)", sizes->surface_width,
@@ -104,10 +104,8 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
/* allocate backing bo */
size = mode_cmd.pitches[0] * mode_cmd.height;
DBG("allocating %d bytes for fb %d", size, dev->primary->index);
- mutex_lock(&dev->struct_mutex);
fbdev->bo = msm_gem_new(dev, size, MSM_BO_SCANOUT |
MSM_BO_WC | MSM_BO_STOLEN);
- mutex_unlock(&dev->struct_mutex);
if (IS_ERR(fbdev->bo)) {
ret = PTR_ERR(fbdev->bo);
fbdev->bo = NULL;
@@ -133,7 +131,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
* in panic (ie. lock-safe, etc) we could avoid pinning the
* buffer now:
*/
- ret = msm_gem_get_iova_locked(fbdev->bo, 0, &paddr);
+ ret = msm_gem_get_iova(fbdev->bo, 0, &paddr);
if (ret) {
dev_err(dev->dev, "failed to get buffer obj iova: %d\n", ret);
goto fail_unlock;
@@ -146,7 +144,7 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
goto fail_unlock;
}
- DBG("fbi=%p, dev=%p", fbi, dev);
+ DBG("fbi=%pK, dev=%pK", fbi, dev);
fbdev->fb = fb;
helper->fb = fb;
@@ -160,14 +158,15 @@ static int msm_fbdev_create(struct drm_fb_helper *helper,
drm_fb_helper_fill_fix(fbi, fb->pitches[0], fb->depth);
drm_fb_helper_fill_var(fbi, helper, sizes->fb_width, sizes->fb_height);
- dev->mode_config.fb_base = paddr;
+ /* FIXME: Verify paddr < 32 bits? */
+ dev->mode_config.fb_base = lower_32_bits(paddr);
- fbi->screen_base = msm_gem_vaddr_locked(fbdev->bo);
+ fbi->screen_base = msm_gem_vaddr(fbdev->bo);
fbi->screen_size = fbdev->bo->size;
- fbi->fix.smem_start = paddr;
+ fbi->fix.smem_start = lower_32_bits(paddr);
fbi->fix.smem_len = fbdev->bo->size;
- DBG("par=%p, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
+ DBG("par=%pK, %dx%d", fbi->par, fbi->var.xres, fbi->var.yres);
DBG("allocated %dx%d fb", fbdev->fb->width, fbdev->fb->height);
mutex_unlock(&dev->struct_mutex);
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index 245070950e87..ec8fe4bebb3a 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -18,12 +18,159 @@
#include <linux/spinlock.h>
#include <linux/shmem_fs.h>
#include <linux/dma-buf.h>
+#include <soc/qcom/secure_buffer.h>
#include "msm_drv.h"
#include "msm_gem.h"
#include "msm_gpu.h"
#include "msm_mmu.h"
+static void msm_gem_mn_free(struct kref *refcount)
+{
+ struct msm_mmu_notifier *msm_mn = container_of(refcount,
+ struct msm_mmu_notifier, refcount);
+
+ mmu_notifier_unregister(&msm_mn->mn, msm_mn->mm);
+ hash_del(&msm_mn->node);
+
+ kfree(msm_mn);
+}
+
+static int msm_gem_mn_get(struct msm_mmu_notifier *msm_mn)
+{
+ if (msm_mn)
+ return kref_get_unless_zero(&msm_mn->refcount);
+ return 0;
+}
+
+static void msm_gem_mn_put(struct msm_mmu_notifier *msm_mn)
+{
+ if (msm_mn) {
+ struct msm_drm_private *msm_dev = msm_mn->msm_dev;
+
+ mutex_lock(&msm_dev->mn_lock);
+ kref_put(&msm_mn->refcount, msm_gem_mn_free);
+ mutex_unlock(&msm_dev->mn_lock);
+ }
+}
+
+void msm_mn_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm, unsigned long start, unsigned long end);
+
+static const struct mmu_notifier_ops msm_mn_ops = {
+ .invalidate_range_start = msm_mn_invalidate_range_start,
+};
+
+static struct msm_mmu_notifier *
+msm_gem_mn_find(struct msm_drm_private *msm_dev, struct mm_struct *mm,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_mmu_notifier *msm_mn;
+ int ret = 0;
+
+ mutex_lock(&msm_dev->mn_lock);
+ hash_for_each_possible(msm_dev->mn_hash, msm_mn, node,
+ (unsigned long) mm) {
+ if (msm_mn->mm == mm) {
+ if (!msm_gem_mn_get(msm_mn)) {
+ ret = -EINVAL;
+ goto fail;
+ }
+ mutex_unlock(&msm_dev->mn_lock);
+ return msm_mn;
+ }
+ }
+
+ msm_mn = kzalloc(sizeof(*msm_mn), GFP_KERNEL);
+ if (!msm_mn) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ msm_mn->mm = current->mm;
+ msm_mn->mn.ops = &msm_mn_ops;
+ ret = mmu_notifier_register(&msm_mn->mn, msm_mn->mm);
+ if (ret) {
+ kfree(msm_mn);
+ goto fail;
+ }
+
+ msm_mn->svm_tree = RB_ROOT;
+ spin_lock_init(&msm_mn->svm_tree_lock);
+ kref_init(&msm_mn->refcount);
+ msm_mn->msm_dev = msm_dev;
+
+ /* Insert the msm_mn into the hash */
+ hash_add(msm_dev->mn_hash, &msm_mn->node, (unsigned long) msm_mn->mm);
+ mutex_unlock(&msm_dev->mn_lock);
+
+ return msm_mn;
+
+fail:
+ mutex_unlock(&msm_dev->mn_lock);
+ return ERR_PTR(ret);
+}
+
+static int msm_gem_mn_register(struct msm_gem_svm_object *msm_svm_obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct drm_gem_object *obj = &msm_svm_obj->msm_obj_base.base;
+ struct msm_drm_private *msm_dev = obj->dev->dev_private;
+ struct msm_mmu_notifier *msm_mn;
+
+ msm_svm_obj->mm = current->mm;
+ msm_svm_obj->svm_node.start = msm_svm_obj->hostptr;
+ msm_svm_obj->svm_node.last = msm_svm_obj->hostptr + obj->size - 1;
+
+ msm_mn = msm_gem_mn_find(msm_dev, msm_svm_obj->mm, aspace);
+ if (IS_ERR(msm_mn))
+ return PTR_ERR(msm_mn);
+
+ msm_svm_obj->msm_mn = msm_mn;
+
+ spin_lock(&msm_mn->svm_tree_lock);
+ interval_tree_insert(&msm_svm_obj->svm_node, &msm_mn->svm_tree);
+ spin_unlock(&msm_mn->svm_tree_lock);
+
+ return 0;
+}
+
+static void msm_gem_mn_unregister(struct msm_gem_svm_object *msm_svm_obj)
+{
+ struct msm_mmu_notifier *msm_mn = msm_svm_obj->msm_mn;
+
+ /* invalid: bo already unregistered */
+ if (!msm_mn || msm_svm_obj->invalid)
+ return;
+
+ spin_lock(&msm_mn->svm_tree_lock);
+ interval_tree_remove(&msm_svm_obj->svm_node, &msm_mn->svm_tree);
+ spin_unlock(&msm_mn->svm_tree_lock);
+}
+
+static int protect_pages(struct msm_gem_object *msm_obj)
+{
+ int perm = PERM_READ | PERM_WRITE;
+ int src = VMID_HLOS;
+ int dst = VMID_CP_PIXEL;
+
+ return hyp_assign_table(msm_obj->sgt, &src, 1, &dst, &perm, 1);
+}
+
+static int unprotect_pages(struct msm_gem_object *msm_obj)
+{
+ int perm = PERM_READ | PERM_WRITE | PERM_EXEC;
+ int src = VMID_CP_PIXEL;
+ int dst = VMID_HLOS;
+
+ return hyp_assign_table(msm_obj->sgt, &src, 1, &dst, &perm, 1);
+}
+
+static void *get_dmabuf_ptr(struct drm_gem_object *obj)
+{
+ return (obj && obj->import_attach) ? obj->import_attach->dmabuf : NULL;
+}
+
static dma_addr_t physaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -39,8 +186,7 @@ static bool use_pages(struct drm_gem_object *obj)
}
/* allocate pages from VRAM carveout, used when no IOMMU: */
-static struct page **get_pages_vram(struct drm_gem_object *obj,
- int npages)
+static struct page **get_pages_vram(struct drm_gem_object *obj, int npages)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct msm_drm_private *priv = obj->dev->dev_private;
@@ -52,8 +198,10 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
if (!p)
return ERR_PTR(-ENOMEM);
+ spin_lock(&priv->vram.lock);
ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node,
npages, 0, DRM_MM_SEARCH_DEFAULT);
+ spin_unlock(&priv->vram.lock);
if (ret) {
drm_free_large(p);
return ERR_PTR(ret);
@@ -68,7 +216,6 @@ static struct page **get_pages_vram(struct drm_gem_object *obj,
return p;
}
-/* called with dev->struct_mutex held */
static struct page **get_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
@@ -95,46 +242,75 @@ static struct page **get_pages(struct drm_gem_object *obj)
if (IS_ERR(msm_obj->sgt)) {
void *ptr = ERR_CAST(msm_obj->sgt);
- dev_err(dev->dev, "failed to allocate sgt\n");
msm_obj->sgt = NULL;
return ptr;
}
- /* For non-cached buffers, ensure the new pages are clean
- * because display controller, GPU, etc. are not coherent:
+ /*
+ * Make sure to flush the CPU cache for newly allocated memory
+ * so we don't get ourselves into trouble with a dirty cache
*/
if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
- dma_map_sg(dev->dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+
+ /* Secure the pages if we need to */
+ if (use_pages(obj) && msm_obj->flags & MSM_BO_SECURE) {
+ int ret = protect_pages(msm_obj);
+
+ if (ret)
+ return ERR_PTR(ret);
+
+ /*
+ * Set a flag to indicate the pages are locked by us and
+ * need to be unlocked when the pages get freed
+ */
+ msm_obj->flags |= MSM_BO_LOCKED;
+ }
}
return msm_obj->pages;
}
+static void put_pages_vram(struct drm_gem_object *obj)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_drm_private *priv = obj->dev->dev_private;
+
+ spin_lock(&priv->vram.lock);
+ drm_mm_remove_node(msm_obj->vram_node);
+ spin_unlock(&priv->vram.lock);
+
+ drm_free_large(msm_obj->pages);
+}
+
static void put_pages(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
if (msm_obj->pages) {
if (msm_obj->sgt) {
- /* For non-cached buffers, ensure the new
- * pages are clean because display controller,
- * GPU, etc. are not coherent:
- */
- if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
- dma_unmap_sg(obj->dev->dev, msm_obj->sgt->sgl,
- msm_obj->sgt->nents,
- DMA_BIDIRECTIONAL);
+ if (msm_obj->flags & MSM_BO_LOCKED) {
+ unprotect_pages(msm_obj);
+ msm_obj->flags &= ~MSM_BO_LOCKED;
+ }
sg_free_table(msm_obj->sgt);
kfree(msm_obj->sgt);
}
- if (use_pages(obj))
- drm_gem_put_pages(obj, msm_obj->pages, true, false);
- else {
- drm_mm_remove_node(msm_obj->vram_node);
- drm_free_large(msm_obj->pages);
+ if (use_pages(obj)) {
+ if (msm_obj->flags & MSM_BO_SVM) {
+ int npages = obj->size >> PAGE_SHIFT;
+
+ release_pages(msm_obj->pages, npages, 0);
+ kfree(msm_obj->pages);
+ } else {
+ drm_gem_put_pages(obj, msm_obj->pages,
+ true, false);
+ }
+ } else {
+ put_pages_vram(obj);
}
msm_obj->pages = NULL;
@@ -143,11 +319,12 @@ static void put_pages(struct drm_gem_object *obj)
struct page **msm_gem_get_pages(struct drm_gem_object *obj)
{
- struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **p;
- mutex_lock(&dev->struct_mutex);
+
+ mutex_lock(&msm_obj->lock);
p = get_pages(obj);
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&msm_obj->lock);
return p;
}
@@ -161,6 +338,12 @@ int msm_gem_mmap_obj(struct drm_gem_object *obj,
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ /* We can't mmap secure objects or SVM objects */
+ if (msm_obj->flags & (MSM_BO_SECURE | MSM_BO_SVM)) {
+ drm_gem_vm_close(vma);
+ return -EACCES;
+ }
+
vma->vm_flags &= ~VM_PFNMAP;
vma->vm_flags |= VM_MIXEDMAP;
@@ -201,16 +384,17 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct drm_gem_object *obj = vma->vm_private_data;
- struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
struct page **pages;
unsigned long pfn;
pgoff_t pgoff;
int ret;
- /* Make sure we don't parallel update on a fault, nor move or remove
- * something from beneath our feet
+ /*
+ * vm_ops.open and close get and put a reference on obj.
+ * So, we dont need to hold one here.
*/
- ret = mutex_lock_interruptible(&dev->struct_mutex);
+ ret = mutex_lock_interruptible(&msm_obj->lock);
if (ret)
goto out;
@@ -227,13 +411,13 @@ int msm_gem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
pfn = page_to_pfn(pages[pgoff]);
- VERB("Inserting %p pfn %lx, pa %lx", vmf->virtual_address,
+ VERB("Inserting %pK pfn %lx, pa %lx", vmf->virtual_address,
pfn, pfn << PAGE_SHIFT);
ret = vm_insert_mixed(vma, (unsigned long)vmf->virtual_address, pfn);
out_unlock:
- mutex_unlock(&dev->struct_mutex);
+ mutex_unlock(&msm_obj->lock);
out:
switch (ret) {
case -EAGAIN:
@@ -257,9 +441,10 @@ out:
static uint64_t mmap_offset(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
int ret;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
/* Make it mmapable */
ret = drm_gem_create_mmap_offset(obj);
@@ -275,85 +460,156 @@ static uint64_t mmap_offset(struct drm_gem_object *obj)
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj)
{
uint64_t offset;
- mutex_lock(&obj->dev->struct_mutex);
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ mutex_lock(&msm_obj->lock);
offset = mmap_offset(obj);
- mutex_unlock(&obj->dev->struct_mutex);
+ mutex_unlock(&msm_obj->lock);
return offset;
}
-/* should be called under struct_mutex.. although it can be called
- * from atomic context without struct_mutex to acquire an extra
- * iova ref if you know one is already held.
- *
- * That means when I do eventually need to add support for unpinning
- * the refcnt counter needs to be atomic_t.
- */
-int msm_gem_get_iova_locked(struct drm_gem_object *obj, int id,
- uint32_t *iova)
+static void obj_remove_domain(struct msm_gem_vma *domain)
+{
+ if (domain) {
+ list_del(&domain->list);
+ kfree(domain);
+ }
+}
+
+/* Called with msm_obj->lock locked */
+static void
+put_iova(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int ret = 0;
+ struct msm_gem_svm_object *msm_svm_obj;
+ struct msm_gem_vma *domain, *tmp;
+ bool invalid = false;
- if (!msm_obj->domain[id].iova) {
- struct msm_drm_private *priv = obj->dev->dev_private;
- struct page **pages = get_pages(obj);
+ WARN_ON(!mutex_is_locked(&msm_obj->lock));
- if (IS_ERR(pages))
- return PTR_ERR(pages);
+ if (msm_obj->flags & MSM_BO_SVM) {
+ msm_svm_obj = to_msm_svm_obj(msm_obj);
+ invalid = msm_svm_obj->invalid;
+ }
+ list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
if (iommu_present(&platform_bus_type)) {
- struct msm_mmu *mmu = priv->mmus[id];
- uint32_t offset;
-
- if (WARN_ON(!mmu))
- return -EINVAL;
-
- offset = (uint32_t)mmap_offset(obj);
- ret = mmu->funcs->map(mmu, offset, msm_obj->sgt,
- obj->size, IOMMU_READ | IOMMU_WRITE);
- msm_obj->domain[id].iova = offset;
- } else {
- msm_obj->domain[id].iova = physaddr(obj);
+ msm_gem_unmap_vma(domain->aspace, domain,
+ msm_obj->sgt, get_dmabuf_ptr(obj), invalid);
}
+
+ obj_remove_domain(domain);
}
+}
- if (!ret)
- *iova = msm_obj->domain[id].iova;
+static struct msm_gem_vma *obj_add_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain = kzalloc(sizeof(*domain), GFP_KERNEL);
- return ret;
+ if (!domain)
+ return ERR_PTR(-ENOMEM);
+
+ domain->aspace = aspace;
+
+ list_add_tail(&domain->list, &msm_obj->domains);
+
+ return domain;
}
-/* get iova, taking a reference. Should have a matching put */
-int msm_gem_get_iova(struct drm_gem_object *obj, int id, uint32_t *iova)
+static struct msm_gem_vma *obj_get_domain(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int ret;
+ struct msm_gem_vma *domain;
- /* this is safe right now because we don't unmap until the
- * bo is deleted:
- */
- if (msm_obj->domain[id].iova) {
- *iova = msm_obj->domain[id].iova;
+ list_for_each_entry(domain, &msm_obj->domains, list) {
+ if (domain->aspace == aspace)
+ return domain;
+ }
+
+ return NULL;
+}
+
+#ifndef IOMMU_PRIV
+#define IOMMU_PRIV 0
+#endif
+
+/* A reference to obj must be held before calling this function. */
+int msm_gem_get_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace, uint64_t *iova)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **pages;
+ struct msm_gem_vma *domain;
+ int ret = 0;
+
+ mutex_lock(&msm_obj->lock);
+
+ if (!iommu_present(&platform_bus_type)) {
+ pages = get_pages(obj);
+
+ if (IS_ERR(pages)) {
+ mutex_unlock(&msm_obj->lock);
+ return PTR_ERR(pages);
+ }
+
+ *iova = (uint64_t) physaddr(obj);
+ mutex_unlock(&msm_obj->lock);
return 0;
}
- mutex_lock(&obj->dev->struct_mutex);
- ret = msm_gem_get_iova_locked(obj, id, iova);
- mutex_unlock(&obj->dev->struct_mutex);
+ domain = obj_get_domain(obj, aspace);
+
+ if (!domain) {
+ domain = obj_add_domain(obj, aspace);
+ if (IS_ERR(domain)) {
+ mutex_unlock(&msm_obj->lock);
+ return PTR_ERR(domain);
+ }
+
+ pages = get_pages(obj);
+ if (IS_ERR(pages)) {
+ obj_remove_domain(domain);
+ mutex_unlock(&msm_obj->lock);
+ return PTR_ERR(pages);
+ }
+
+ ret = msm_gem_map_vma(aspace, domain, msm_obj->sgt,
+ get_dmabuf_ptr(obj), msm_obj->flags);
+ }
+
+ if (!ret)
+ *iova = domain->iova;
+ else
+ obj_remove_domain(domain);
+
+ mutex_unlock(&msm_obj->lock);
return ret;
}
/* get iova without taking a reference, used in places where you have
* already done a 'msm_gem_get_iova()'.
*/
-uint32_t msm_gem_iova(struct drm_gem_object *obj, int id)
+uint64_t msm_gem_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!msm_obj->domain[id].iova);
- return msm_obj->domain[id].iova;
+ struct msm_gem_vma *domain;
+ uint64_t iova;
+
+ mutex_lock(&msm_obj->lock);
+ domain = obj_get_domain(obj, aspace);
+ WARN_ON(!domain);
+ iova = domain ? domain->iova : 0;
+ mutex_unlock(&msm_obj->lock);
+
+ return iova;
}
-void msm_gem_put_iova(struct drm_gem_object *obj, int id)
+void msm_gem_put_iova(struct drm_gem_object *obj,
+ struct msm_gem_address_space *aspace)
{
// XXX TODO ..
// NOTE: probably don't need a _locked() version.. we wouldn't
@@ -393,27 +649,31 @@ fail:
return ret;
}
-void *msm_gem_vaddr_locked(struct drm_gem_object *obj)
+void *msm_gem_vaddr(struct drm_gem_object *obj)
{
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- WARN_ON(!mutex_is_locked(&obj->dev->struct_mutex));
- if (!msm_obj->vaddr) {
+
+ mutex_lock(&msm_obj->lock);
+
+ if (msm_obj->vaddr) {
+ mutex_unlock(&msm_obj->lock);
+ return msm_obj->vaddr;
+ }
+
+ if (obj->import_attach) {
+ msm_obj->vaddr = dma_buf_vmap(obj->import_attach->dmabuf);
+ } else {
struct page **pages = get_pages(obj);
- if (IS_ERR(pages))
+ if (IS_ERR(pages)) {
+ mutex_unlock(&msm_obj->lock);
return ERR_CAST(pages);
+ }
msm_obj->vaddr = vmap(pages, obj->size >> PAGE_SHIFT,
VM_MAP, pgprot_writecombine(PAGE_KERNEL));
}
- return msm_obj->vaddr;
-}
+ mutex_unlock(&msm_obj->lock);
-void *msm_gem_vaddr(struct drm_gem_object *obj)
-{
- void *ret;
- mutex_lock(&obj->dev->struct_mutex);
- ret = msm_gem_vaddr_locked(obj);
- mutex_unlock(&obj->dev->struct_mutex);
- return ret;
+ return msm_obj->vaddr;
}
/* setup callback for when bo is no longer busy..
@@ -482,19 +742,46 @@ int msm_gem_cpu_fini(struct drm_gem_object *obj)
return 0;
}
+void msm_gem_sync(struct drm_gem_object *obj, u32 op)
+{
+ struct drm_device *dev = obj->dev;
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+
+ if (msm_obj->flags & (MSM_BO_WC|MSM_BO_UNCACHED))
+ return;
+
+ switch (op) {
+ case MSM_GEM_SYNC_TO_CPU:
+ dma_sync_sg_for_cpu(dev->dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ break;
+ case MSM_GEM_SYNC_TO_DEV:
+ dma_sync_sg_for_device(dev->dev, msm_obj->sgt->sgl,
+ msm_obj->sgt->nents, DMA_BIDIRECTIONAL);
+ break;
+ }
+}
+
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m)
{
struct drm_device *dev = obj->dev;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct msm_gem_vma *domain;
uint64_t off = drm_vma_node_start(&obj->vma_node);
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %p %zu\n",
+ seq_printf(m, "%08x: %c(r=%u,w=%u) %2d (%2d) %08llx %pK\t",
msm_obj->flags, is_active(msm_obj) ? 'A' : 'I',
msm_obj->read_fence, msm_obj->write_fence,
obj->name, obj->refcount.refcount.counter,
- off, msm_obj->vaddr, obj->size);
+ off, msm_obj->vaddr);
+
+ /* FIXME: we need to print the address space here too */
+ list_for_each_entry(domain, &msm_obj->domains, list)
+ seq_printf(m, " %08llx", domain->iova);
+
+ seq_puts(m, "\n");
}
void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
@@ -518,29 +805,33 @@ void msm_gem_describe_objects(struct list_head *list, struct seq_file *m)
void msm_gem_free_object(struct drm_gem_object *obj)
{
struct drm_device *dev = obj->dev;
- struct msm_drm_private *priv = obj->dev->dev_private;
struct msm_gem_object *msm_obj = to_msm_bo(obj);
- int id;
+ struct msm_gem_svm_object *msm_svm_obj = NULL;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
/* object should not be on active list: */
WARN_ON(is_active(msm_obj));
+ if (msm_obj->flags & MSM_BO_SVM)
+ msm_svm_obj = to_msm_svm_obj(msm_obj);
+
list_del(&msm_obj->mm_list);
- for (id = 0; id < ARRAY_SIZE(msm_obj->domain); id++) {
- struct msm_mmu *mmu = priv->mmus[id];
- if (mmu && msm_obj->domain[id].iova) {
- uint32_t offset = msm_obj->domain[id].iova;
- mmu->funcs->unmap(mmu, offset, msm_obj->sgt, obj->size);
- }
+ /* Unregister SVM object from mmu notifications */
+ if (msm_obj->flags & MSM_BO_SVM) {
+ msm_gem_mn_unregister(msm_svm_obj);
+ msm_gem_mn_put(msm_svm_obj->msm_mn);
+ msm_svm_obj->msm_mn = NULL;
}
+ mutex_lock(&msm_obj->lock);
+ put_iova(obj);
+
if (obj->import_attach) {
if (msm_obj->vaddr)
- dma_buf_vunmap(obj->import_attach->dmabuf, msm_obj->vaddr);
-
+ dma_buf_vunmap(obj->import_attach->dmabuf,
+ msm_obj->vaddr);
/* Don't drop the pages for imported dmabuf, as they are not
* ours, just free the array we allocated:
*/
@@ -557,8 +848,12 @@ void msm_gem_free_object(struct drm_gem_object *obj)
reservation_object_fini(msm_obj->resv);
drm_gem_object_release(obj);
+ mutex_unlock(&msm_obj->lock);
- kfree(msm_obj);
+ if (msm_obj->flags & MSM_BO_SVM)
+ kfree(msm_svm_obj);
+ else
+ kfree(msm_obj);
}
/* convenience method to construct a GEM buffer object, and userspace handle */
@@ -568,13 +863,28 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
struct drm_gem_object *obj;
int ret;
- ret = mutex_lock_interruptible(&dev->struct_mutex);
- if (ret)
- return ret;
-
obj = msm_gem_new(dev, size, flags);
- mutex_unlock(&dev->struct_mutex);
+ if (IS_ERR(obj))
+ return PTR_ERR(obj);
+
+ ret = drm_gem_handle_create(file, obj, handle);
+
+ /* drop reference from allocate - handle holds it now */
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ret;
+}
+
+/* convenience method to construct an SVM buffer object, and userspace handle */
+int msm_gem_svm_new_handle(struct drm_device *dev, struct drm_file *file,
+ uint64_t hostptr, uint64_t size,
+ uint32_t flags, uint32_t *handle)
+{
+ struct drm_gem_object *obj;
+ int ret;
+
+ obj = msm_gem_svm_new(dev, file, hostptr, size, flags);
if (IS_ERR(obj))
return PTR_ERR(obj);
@@ -587,13 +897,11 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file,
return ret;
}
-static int msm_gem_new_impl(struct drm_device *dev,
+static int msm_gem_obj_init(struct drm_device *dev,
uint32_t size, uint32_t flags,
- struct drm_gem_object **obj)
+ struct msm_gem_object *msm_obj, bool struct_mutex_locked)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_gem_object *msm_obj;
- unsigned sz;
bool use_vram = false;
switch (flags & MSM_BO_CACHE_MASK) {
@@ -615,43 +923,72 @@ static int msm_gem_new_impl(struct drm_device *dev,
if (WARN_ON(use_vram && !priv->vram.size))
return -EINVAL;
- sz = sizeof(*msm_obj);
- if (use_vram)
- sz += sizeof(struct drm_mm_node);
+ mutex_init(&msm_obj->lock);
- msm_obj = kzalloc(sz, GFP_KERNEL);
- if (!msm_obj)
- return -ENOMEM;
+ if (use_vram) {
+ struct msm_gem_vma *domain = obj_add_domain(&msm_obj->base, 0);
- if (use_vram)
- msm_obj->vram_node = (void *)&msm_obj[1];
+ if (!IS_ERR(domain))
+ msm_obj->vram_node = &domain->node;
+ }
msm_obj->flags = flags;
msm_obj->resv = &msm_obj->_resv;
reservation_object_init(msm_obj->resv);
+ INIT_LIST_HEAD(&msm_obj->mm_list);
INIT_LIST_HEAD(&msm_obj->submit_entry);
- list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ INIT_LIST_HEAD(&msm_obj->domains);
- *obj = &msm_obj->base;
+ if (struct_mutex_locked) {
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ } else {
+ mutex_lock(&dev->struct_mutex);
+ list_add_tail(&msm_obj->mm_list, &priv->inactive_list);
+ mutex_unlock(&dev->struct_mutex);
+ }
return 0;
}
-struct drm_gem_object *msm_gem_new(struct drm_device *dev,
- uint32_t size, uint32_t flags)
+static struct drm_gem_object *msm_gem_new_impl(struct drm_device *dev,
+ uint32_t size, uint32_t flags, bool struct_mutex_locked)
{
- struct drm_gem_object *obj = NULL;
+ struct msm_gem_object *msm_obj;
int ret;
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
+ msm_obj = kzalloc(sizeof(*msm_obj), GFP_KERNEL);
+ if (!msm_obj)
+ return ERR_PTR(-ENOMEM);
+
+ ret = msm_gem_obj_init(dev, size, flags, msm_obj, struct_mutex_locked);
+ if (ret) {
+ kfree(msm_obj);
+ return ERR_PTR(ret);
+ }
+
+ return &msm_obj->base;
+}
+
+static struct drm_gem_object *_msm_gem_new(struct drm_device *dev,
+ uint32_t size, uint32_t flags, bool struct_mutex_locked)
+{
+ struct drm_gem_object *obj;
+ int ret;
size = PAGE_ALIGN(size);
- ret = msm_gem_new_impl(dev, size, flags, &obj);
- if (ret)
- goto fail;
+ /*
+ * Disallow zero sized objects as they make the underlying
+ * infrastructure grumpy
+ */
+ if (!size)
+ return ERR_PTR(-EINVAL);
+
+ obj = msm_gem_new_impl(dev, size, flags, struct_mutex_locked);
+ if (IS_ERR(obj))
+ return obj;
if (use_pages(obj)) {
ret = drm_gem_object_init(dev, obj, size);
@@ -664,14 +1001,166 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev,
return obj;
fail:
- if (obj)
- drm_gem_object_unreference(obj);
+ drm_gem_object_unreference_unlocked(obj);
+
+ return ERR_PTR(ret);
+}
+
+struct drm_gem_object *msm_gem_new_locked(struct drm_device *dev,
+ uint32_t size, uint32_t flags)
+{
+ return _msm_gem_new(dev, size, flags, true);
+}
+
+struct drm_gem_object *msm_gem_new(struct drm_device *dev,
+ uint32_t size, uint32_t flags)
+{
+ return _msm_gem_new(dev, size, flags, false);
+}
+
+static struct drm_gem_object *msm_svm_gem_new_impl(struct drm_device *dev,
+ uint32_t size, uint32_t flags)
+{
+ struct msm_gem_svm_object *msm_svm_obj;
+ struct msm_gem_object *msm_obj;
+ int ret;
+
+ msm_svm_obj = kzalloc(sizeof(*msm_svm_obj), GFP_KERNEL);
+ if (!msm_svm_obj)
+ return ERR_PTR(-ENOMEM);
+
+ msm_obj = &msm_svm_obj->msm_obj_base;
+
+ ret = msm_gem_obj_init(dev, size, flags | MSM_BO_SVM, msm_obj, false);
+ if (ret) {
+ kfree(msm_svm_obj);
+ return ERR_PTR(ret);
+ }
+
+ return &msm_obj->base;
+}
+
+/* convenience method to construct an SVM GEM bo, and userspace handle */
+struct drm_gem_object *msm_gem_svm_new(struct drm_device *dev,
+ struct drm_file *file, uint64_t hostptr,
+ uint64_t size, uint32_t flags)
+{
+ struct drm_gem_object *obj;
+ struct msm_file_private *ctx = file->driver_priv;
+ struct msm_gem_address_space *aspace;
+ struct msm_gem_object *msm_obj;
+ struct msm_gem_svm_object *msm_svm_obj;
+ struct msm_gem_vma *domain = NULL;
+ struct page **p;
+ int npages;
+ int num_pinned = 0;
+ int write;
+ int ret;
+
+ if (!ctx)
+ return ERR_PTR(-ENODEV);
+
+ /* if we don't have IOMMU, don't bother pretending we can import: */
+ if (!iommu_present(&platform_bus_type)) {
+ dev_err_once(dev->dev, "cannot import without IOMMU\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* hostptr and size must be page-aligned */
+ if (offset_in_page(hostptr | size))
+ return ERR_PTR(-EINVAL);
+
+ /* Only CPU cached SVM objects are allowed */
+ if ((flags & MSM_BO_CACHE_MASK) != MSM_BO_CACHED)
+ return ERR_PTR(-EINVAL);
+
+ /* Allocate and initialize a new msm_gem_object */
+ obj = msm_svm_gem_new_impl(dev, size, flags);
+ if (IS_ERR(obj))
+ return obj;
+
+ drm_gem_private_object_init(dev, obj, size);
+
+ msm_obj = to_msm_bo(obj);
+ aspace = ctx->aspace;
+ domain = obj_add_domain(&msm_obj->base, aspace);
+ if (IS_ERR(domain)) {
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_CAST(domain);
+ }
+
+ /* Reserve iova if not already in use, else fail */
+ ret = msm_gem_reserve_iova(aspace, domain, hostptr, size);
+ if (ret) {
+ obj_remove_domain(domain);
+ drm_gem_object_unreference_unlocked(obj);
+ return ERR_PTR(ret);
+ }
+
+ msm_svm_obj = to_msm_svm_obj(msm_obj);
+ msm_svm_obj->hostptr = hostptr;
+ msm_svm_obj->invalid = false;
+
+ ret = msm_gem_mn_register(msm_svm_obj, aspace);
+ if (ret)
+ goto fail;
+
+ /*
+ * Get physical pages and map into smmu in the ioctl itself.
+ * The driver handles iova allocation, physical page allocation and
+ * SMMU map all in one go. If we break this, then we have to maintain
+ * state to tell if physical pages allocation/map needs to happen.
+ * For SVM, iova reservation needs to happen in the ioctl itself,
+ * so do the rest right here as well.
+ */
+ npages = size >> PAGE_SHIFT;
+ p = kcalloc(npages, sizeof(struct page *), GFP_KERNEL);
+ if (!p) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ write = (msm_obj->flags & MSM_BO_GPU_READONLY) ? 0 : 1;
+ /* This may hold mm->mmap_sem */
+ num_pinned = get_user_pages_fast(hostptr, npages, write, p);
+ if (num_pinned != npages) {
+ ret = -EINVAL;
+ goto free_pages;
+ }
+
+ msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+ if (IS_ERR(msm_obj->sgt)) {
+ ret = PTR_ERR(msm_obj->sgt);
+ goto free_pages;
+ }
+
+ msm_obj->pages = p;
+
+ ret = aspace->mmu->funcs->map(aspace->mmu, domain->iova,
+ msm_obj->sgt, msm_obj->flags, get_dmabuf_ptr(obj));
+ if (ret)
+ goto free_pages;
+
+ kref_get(&aspace->kref);
+
+ return obj;
+
+free_pages:
+ release_pages(p, num_pinned, 0);
+ kfree(p);
+
+fail:
+ if (domain)
+ msm_gem_release_iova(aspace, domain);
+
+ obj_remove_domain(domain);
+ drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret);
}
struct drm_gem_object *msm_gem_import(struct drm_device *dev,
- uint32_t size, struct sg_table *sgt)
+ uint32_t size, struct sg_table *sgt, u32 flags)
{
struct msm_gem_object *msm_obj;
struct drm_gem_object *obj;
@@ -685,31 +1174,167 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev,
size = PAGE_ALIGN(size);
- ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj);
- if (ret)
- goto fail;
+ obj = msm_gem_new_impl(dev, size, MSM_BO_WC, false);
+ if (IS_ERR(obj))
+ return obj;
drm_gem_private_object_init(dev, obj, size);
npages = size / PAGE_SIZE;
msm_obj = to_msm_bo(obj);
+ mutex_lock(&msm_obj->lock);
msm_obj->sgt = sgt;
msm_obj->pages = drm_malloc_ab(npages, sizeof(struct page *));
if (!msm_obj->pages) {
+ mutex_unlock(&msm_obj->lock);
ret = -ENOMEM;
goto fail;
}
- ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages, NULL, npages);
- if (ret)
+ /* OR the passed in flags */
+ msm_obj->flags |= flags;
+
+ ret = drm_prime_sg_to_page_addr_arrays(sgt, msm_obj->pages,
+ NULL, npages);
+ if (ret) {
+ mutex_unlock(&msm_obj->lock);
goto fail;
+ }
+
+ mutex_unlock(&msm_obj->lock);
return obj;
fail:
- if (obj)
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_unreference_unlocked(obj);
return ERR_PTR(ret);
}
+
+/* Timeout in ms, long enough so we are sure the GPU is hung */
+#define SVM_OBJ_WAIT_TIMEOUT 10000
+static void invalidate_svm_object(struct msm_gem_svm_object *msm_svm_obj)
+{
+ struct msm_gem_object *msm_obj = &msm_svm_obj->msm_obj_base;
+ struct drm_device *dev = msm_obj->base.dev;
+ struct msm_gem_vma *domain, *tmp;
+ uint32_t fence;
+ int ret;
+
+ if (is_active(msm_obj)) {
+ ktime_t timeout = ktime_add_ms(ktime_get(),
+ SVM_OBJ_WAIT_TIMEOUT);
+
+ /* Get the most recent fence that touches the object */
+ fence = msm_gem_fence(msm_obj, MSM_PREP_READ | MSM_PREP_WRITE);
+
+ /* Wait for the fence to retire */
+ ret = msm_wait_fence(dev, fence, &timeout, true);
+ if (ret)
+ /* The GPU could be hung! Not much we can do */
+ dev_err(dev->dev, "drm: Error (%d) waiting for svm object: 0x%llx",
+ ret, msm_svm_obj->hostptr);
+ }
+
+ /* GPU is done, unmap object from SMMU */
+ mutex_lock(&msm_obj->lock);
+ list_for_each_entry_safe(domain, tmp, &msm_obj->domains, list) {
+ struct msm_gem_address_space *aspace = domain->aspace;
+
+ if (domain->iova)
+ aspace->mmu->funcs->unmap(aspace->mmu,
+ domain->iova, msm_obj->sgt,
+ get_dmabuf_ptr(&msm_obj->base));
+ }
+ /* Let go of the physical pages */
+ put_pages(&msm_obj->base);
+ mutex_unlock(&msm_obj->lock);
+}
+
+void msm_mn_invalidate_range_start(struct mmu_notifier *mn,
+ struct mm_struct *mm, unsigned long start, unsigned long end)
+{
+ struct msm_mmu_notifier *msm_mn =
+ container_of(mn, struct msm_mmu_notifier, mn);
+ struct interval_tree_node *itn = NULL;
+ struct msm_gem_svm_object *msm_svm_obj;
+ struct drm_gem_object *obj;
+ LIST_HEAD(inv_list);
+
+ if (!msm_gem_mn_get(msm_mn))
+ return;
+
+ spin_lock(&msm_mn->svm_tree_lock);
+ itn = interval_tree_iter_first(&msm_mn->svm_tree, start, end - 1);
+ while (itn) {
+ msm_svm_obj = container_of(itn,
+ struct msm_gem_svm_object, svm_node);
+ obj = &msm_svm_obj->msm_obj_base.base;
+
+ if (kref_get_unless_zero(&obj->refcount))
+ list_add(&msm_svm_obj->lnode, &inv_list);
+
+ itn = interval_tree_iter_next(itn, start, end - 1);
+ }
+ spin_unlock(&msm_mn->svm_tree_lock);
+
+ list_for_each_entry(msm_svm_obj, &inv_list, lnode) {
+ obj = &msm_svm_obj->msm_obj_base.base;
+ /* Unregister SVM object from mmu notifications */
+ msm_gem_mn_unregister(msm_svm_obj);
+ msm_svm_obj->invalid = true;
+ invalidate_svm_object(msm_svm_obj);
+ drm_gem_object_unreference_unlocked(obj);
+ }
+
+ msm_gem_mn_put(msm_mn);
+}
+
+/*
+ * Helper function to consolidate in-kernel buffer allocations that usually need
+ * to allocate a buffer object, iova and a virtual address all in one shot
+ */
+static void *_msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova, bool locked)
+{
+ void *vaddr;
+ struct drm_gem_object *obj = _msm_gem_new(dev, size, flags, locked);
+ int ret;
+
+ if (IS_ERR(obj))
+ return ERR_CAST(obj);
+
+ ret = msm_gem_get_iova(obj, aspace, iova);
+ if (ret) {
+ drm_gem_object_unreference(obj);
+ return ERR_PTR(ret);
+ }
+
+ vaddr = msm_gem_vaddr(obj);
+ if (!vaddr) {
+ msm_gem_put_iova(obj, aspace);
+ drm_gem_object_unreference(obj);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ *bo = obj;
+ return vaddr;
+}
+
+void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova)
+{
+ return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova,
+ false);
+}
+
+void *msm_gem_kernel_new_locked(struct drm_device *dev, uint32_t size,
+ uint32_t flags, struct msm_gem_address_space *aspace,
+ struct drm_gem_object **bo, uint64_t *iova)
+{
+ return _msm_gem_kernel_new(dev, size, flags, aspace, bo, iova,
+ true);
+}
diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h
index 6fc59bfeedeb..0cd458fd184b 100644
--- a/drivers/gpu/drm/msm/msm_gem.h
+++ b/drivers/gpu/drm/msm/msm_gem.h
@@ -18,11 +18,33 @@
#ifndef __MSM_GEM_H__
#define __MSM_GEM_H__
+#include <linux/kref.h>
#include <linux/reservation.h>
+#include <linux/mmu_notifier.h>
+#include <linux/interval_tree.h>
#include "msm_drv.h"
/* Additional internal-use only BO flags: */
#define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */
+#define MSM_BO_LOCKED 0x20000000 /* Pages have been securely locked */
+#define MSM_BO_SVM 0x40000000 /* bo is SVM */
+
+struct msm_gem_address_space {
+ const char *name;
+ struct msm_mmu *mmu;
+ struct kref kref;
+ struct drm_mm mm;
+ spinlock_t lock; /* Protects drm_mm node allocation/removal */
+ u64 va_len;
+};
+
+struct msm_gem_vma {
+ /* Node used by the GPU address space, but not the SDE address space */
+ struct drm_mm_node node;
+ struct msm_gem_address_space *aspace;
+ uint64_t iova;
+ struct list_head list;
+};
struct msm_gem_object {
struct drm_gem_object base;
@@ -52,10 +74,7 @@ struct msm_gem_object {
struct sg_table *sgt;
void *vaddr;
- struct {
- // XXX
- uint32_t iova;
- } domain[NUM_DOMAINS];
+ struct list_head domains;
/* normally (resv == &_resv) except for imported bo's */
struct reservation_object *resv;
@@ -65,9 +84,36 @@ struct msm_gem_object {
* an IOMMU. Also used for stolen/splashscreen buffer.
*/
struct drm_mm_node *vram_node;
+ struct mutex lock; /* Protects resources associated with bo */
};
#define to_msm_bo(x) container_of(x, struct msm_gem_object, base)
+struct msm_mmu_notifier {
+ struct mmu_notifier mn;
+ struct mm_struct *mm; /* mm_struct owning the mmu notifier mn */
+ struct hlist_node node;
+ struct rb_root svm_tree; /* interval tree holding all svm bos */
+ spinlock_t svm_tree_lock; /* Protects svm_tree*/
+ struct msm_drm_private *msm_dev;
+ struct kref refcount;
+};
+
+struct msm_gem_svm_object {
+ struct msm_gem_object msm_obj_base;
+ uint64_t hostptr;
+ struct mm_struct *mm; /* mm_struct the svm bo belongs to */
+ struct interval_tree_node svm_node;
+ struct msm_mmu_notifier *msm_mn;
+ struct list_head lnode;
+ /* bo has been unmapped on CPU, cannot be part of GPU submits */
+ bool invalid;
+};
+
+#define to_msm_svm_obj(x) \
+ ((struct msm_gem_svm_object *) \
+ container_of(x, struct msm_gem_svm_object, msm_obj_base))
+
+
static inline bool is_active(struct msm_gem_object *msm_obj)
{
return msm_obj->gpu != NULL;
@@ -86,7 +132,8 @@ static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
return fence;
}
-#define MAX_CMDS 4
+/* Internal submit flags */
+#define SUBMIT_FLAG_SKIP_HANGCHECK 0x00000001
/* Created per submit-ioctl, to track bo's and cmdstream bufs, etc,
* associated with the cmdstream submission for synchronization (and
@@ -95,24 +142,30 @@ static inline uint32_t msm_gem_fence(struct msm_gem_object *msm_obj,
*/
struct msm_gem_submit {
struct drm_device *dev;
- struct msm_gpu *gpu;
- struct list_head node; /* node in gpu submit_list */
+ struct msm_gem_address_space *aspace;
+ struct list_head node; /* node in ring submit list */
struct list_head bo_list;
struct ww_acquire_ctx ticket;
uint32_t fence;
- bool valid;
+ int ring;
+ u32 flags;
+ uint64_t profile_buf_iova;
+ struct drm_msm_gem_submit_profile_buffer *profile_buf;
+ bool secure;
+ struct msm_gpu_submitqueue *queue;
+ int tick_index;
unsigned int nr_cmds;
unsigned int nr_bos;
struct {
uint32_t type;
uint32_t size; /* in dwords */
- uint32_t iova;
+ uint64_t iova;
uint32_t idx; /* cmdstream buffer idx in bos[] */
- } cmd[MAX_CMDS];
+ } *cmd; /* array of size nr_cmds */
struct {
uint32_t flags;
struct msm_gem_object *obj;
- uint32_t iova;
+ uint64_t iova;
} bos[0];
};
diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c
index 1fbddc5c7281..9f3c097d011b 100644
--- a/drivers/gpu/drm/msm/msm_gem_prime.c
+++ b/drivers/gpu/drm/msm/msm_gem_prime.c
@@ -19,6 +19,7 @@
#include "msm_gem.h"
#include <linux/dma-buf.h>
+#include <linux/ion.h>
struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj)
{
@@ -55,7 +56,16 @@ int msm_gem_prime_mmap(struct drm_gem_object *obj, struct vm_area_struct *vma)
struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev,
struct dma_buf_attachment *attach, struct sg_table *sg)
{
- return msm_gem_import(dev, attach->dmabuf->size, sg);
+ u32 flags = 0;
+
+ /*
+ * Check to see if this is a secure buffer by way of Ion and set the
+ * appropriate flag if so.
+ */
+ if (ion_dma_buf_is_secure(attach->dmabuf))
+ flags |= MSM_BO_SECURE;
+
+ return msm_gem_import(dev, attach->dmabuf->size, sg, flags);
}
int msm_gem_prime_pin(struct drm_gem_object *obj)
diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c
index 2422be9a6828..af36b95beadb 100644
--- a/drivers/gpu/drm/msm/msm_gem_submit.c
+++ b/drivers/gpu/drm/msm/msm_gem_submit.c
@@ -18,6 +18,7 @@
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_gem.h"
+#include "msm_trace.h"
/*
* Cmdstream submission:
@@ -29,10 +30,13 @@
#define BO_PINNED 0x2000
static struct msm_gem_submit *submit_create(struct drm_device *dev,
- struct msm_gpu *gpu, uint32_t nr)
+ struct msm_gem_address_space *aspace,
+ uint32_t nr_bos, uint32_t nr_cmds,
+ struct msm_gpu_submitqueue *queue)
{
struct msm_gem_submit *submit;
- uint64_t sz = sizeof(*submit) + ((u64)nr * sizeof(submit->bos[0]));
+ uint64_t sz = sizeof(*submit) + ((u64)nr_bos * sizeof(submit->bos[0])) +
+ ((u64)nr_cmds * sizeof(submit->cmd[0]));
if (sz > SIZE_MAX)
return NULL;
@@ -40,12 +44,24 @@ static struct msm_gem_submit *submit_create(struct drm_device *dev,
submit = kmalloc(sz, GFP_TEMPORARY | __GFP_NOWARN | __GFP_NORETRY);
if (submit) {
submit->dev = dev;
- submit->gpu = gpu;
+ submit->aspace = aspace;
+ submit->queue = queue;
/* initially, until copy_from_user() and bo lookup succeeds: */
submit->nr_bos = 0;
submit->nr_cmds = 0;
+ submit->profile_buf = NULL;
+ submit->profile_buf_iova = 0;
+ submit->cmd = (void *)&submit->bos[nr_bos];
+
+ submit->secure = false;
+
+ /*
+ * Initalize node so we can safely list_del() on it if
+ * we fail in the submit path
+ */
+ INIT_LIST_HEAD(&submit->node);
INIT_LIST_HEAD(&submit->bo_list);
ww_acquire_init(&submit->ticket, &reservation_ww_class);
}
@@ -61,7 +77,18 @@ copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
return -EFAULT;
}
-static int submit_lookup_objects(struct msm_gem_submit *submit,
+void msm_gem_submit_free(struct msm_gem_submit *submit)
+{
+ if (!submit)
+ return;
+
+ msm_submitqueue_put(submit->queue);
+ list_del(&submit->node);
+ kfree(submit);
+}
+
+static int submit_lookup_objects(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit,
struct drm_msm_gem_submit *args, struct drm_file *file)
{
unsigned i;
@@ -77,13 +104,16 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
void __user *userptr =
u64_to_user_ptr(args->bos + (i * sizeof(submit_bo)));
- ret = copy_from_user_inatomic(&submit_bo, userptr, sizeof(submit_bo));
- if (unlikely(ret)) {
+ if (copy_from_user_inatomic(&submit_bo, userptr,
+ sizeof(submit_bo))) {
pagefault_enable();
spin_unlock(&file->table_lock);
- ret = copy_from_user(&submit_bo, userptr, sizeof(submit_bo));
- if (ret)
+ if (copy_from_user(&submit_bo, userptr,
+ sizeof(submit_bo))) {
+ ret = -EFAULT;
goto out;
+ }
+
spin_lock(&file->table_lock);
pagefault_disable();
}
@@ -111,6 +141,20 @@ static int submit_lookup_objects(struct msm_gem_submit *submit,
msm_obj = to_msm_bo(obj);
+ /*
+ * If the buffer is marked as secure make sure that we can
+ * handle secure buffers and then mark the submission as secure
+ */
+ if (msm_obj->flags & MSM_BO_SECURE) {
+ if (!gpu->secure_aspace) {
+ DRM_ERROR("Cannot handle secure buffers\n");
+ ret = -EINVAL;
+ goto out_unlock;
+ }
+
+ submit->secure = true;
+ }
+
if (!list_empty(&msm_obj->submit_entry)) {
DRM_ERROR("handle %u at index %u already on submit list\n",
submit_bo.handle, i);
@@ -135,12 +179,17 @@ out:
return ret;
}
-static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
+static void submit_unlock_unpin_bo(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit, int i)
{
struct msm_gem_object *msm_obj = submit->bos[i].obj;
+ struct msm_gem_address_space *aspace;
+
+ aspace = (msm_obj->flags & MSM_BO_SECURE) ?
+ gpu->secure_aspace : submit->aspace;
if (submit->bos[i].flags & BO_PINNED)
- msm_gem_put_iova(&msm_obj->base, submit->gpu->id);
+ msm_gem_put_iova(&msm_obj->base, aspace);
if (submit->bos[i].flags & BO_LOCKED)
ww_mutex_unlock(&msm_obj->resv->lock);
@@ -152,16 +201,14 @@ static void submit_unlock_unpin_bo(struct msm_gem_submit *submit, int i)
}
/* This is where we make sure all the bo's are reserved and pin'd: */
-static int submit_validate_objects(struct msm_gem_submit *submit)
+static int submit_validate_objects(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit)
{
int contended, slow_locked = -1, i, ret = 0;
retry:
- submit->valid = true;
-
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- uint32_t iova;
if (slow_locked == i)
slow_locked = -1;
@@ -176,28 +223,17 @@ retry:
submit->bos[i].flags |= BO_LOCKED;
}
-
- /* if locking succeeded, pin bo: */
- ret = msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
-
- /* this would break the logic in the fail path.. there is no
- * reason for this to happen, but just to be on the safe side
- * let's notice if this starts happening in the future:
+ /*
+ * An invalid SVM object is part of
+ * this submit's buffer list, fail.
*/
- WARN_ON(ret == -EDEADLK);
-
- if (ret)
- goto fail;
-
- submit->bos[i].flags |= BO_PINNED;
-
- if (iova == submit->bos[i].iova) {
- submit->bos[i].flags |= BO_VALID;
- } else {
- submit->bos[i].iova = iova;
- submit->bos[i].flags &= ~BO_VALID;
- submit->valid = false;
+ if (msm_obj->flags & MSM_BO_SVM) {
+ struct msm_gem_svm_object *msm_svm_obj =
+ to_msm_svm_obj(msm_obj);
+ if (msm_svm_obj->invalid) {
+ ret = -EINVAL;
+ goto fail;
+ }
}
}
@@ -207,10 +243,10 @@ retry:
fail:
for (; i >= 0; i--)
- submit_unlock_unpin_bo(submit, i);
+ submit_unlock_unpin_bo(gpu, submit, i);
if (slow_locked > 0)
- submit_unlock_unpin_bo(submit, slow_locked);
+ submit_unlock_unpin_bo(gpu, submit, slow_locked);
if (ret == -EDEADLK) {
struct msm_gem_object *msm_obj = submit->bos[contended].obj;
@@ -227,9 +263,14 @@ fail:
return ret;
}
-static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
- struct msm_gem_object **obj, uint32_t *iova, bool *valid)
+static int submit_bo(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit, uint32_t idx,
+ struct msm_gem_object **obj, uint64_t *iova, bool *valid)
{
+ struct msm_gem_object *msm_obj;
+ struct msm_gem_address_space *aspace;
+ int ret;
+
if (idx >= submit->nr_bos) {
DRM_ERROR("invalid buffer index: %u (out of %u)\n",
idx, submit->nr_bos);
@@ -238,6 +279,39 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
if (obj)
*obj = submit->bos[idx].obj;
+
+ /* Only map and pin if the caller needs either the iova or valid */
+ if (!iova && !valid)
+ return 0;
+
+ if (!(submit->bos[idx].flags & BO_PINNED)) {
+ uint64_t buf_iova;
+
+ msm_obj = submit->bos[idx].obj;
+ aspace = (msm_obj->flags & MSM_BO_SECURE) ?
+ gpu->secure_aspace : submit->aspace;
+
+ ret = msm_gem_get_iova(&msm_obj->base, aspace, &buf_iova);
+
+ /* this would break the logic in the fail path.. there is no
+ * reason for this to happen, but just to be on the safe side
+ * let's notice if this starts happening in the future:
+ */
+ WARN_ON(ret == -EDEADLK);
+
+ if (ret)
+ return ret;
+
+ submit->bos[idx].flags |= BO_PINNED;
+
+ if (buf_iova == submit->bos[idx].iova) {
+ submit->bos[idx].flags |= BO_VALID;
+ } else {
+ submit->bos[idx].iova = buf_iova;
+ submit->bos[idx].flags &= ~BO_VALID;
+ }
+ }
+
if (iova)
*iova = submit->bos[idx].iova;
if (valid)
@@ -247,8 +321,10 @@ static int submit_bo(struct msm_gem_submit *submit, uint32_t idx,
}
/* process the reloc's and patch up the cmdstream as needed: */
-static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *obj,
- uint32_t offset, uint32_t nr_relocs, uint64_t relocs)
+static int submit_reloc(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit,
+ struct msm_gem_object *obj, uint32_t offset,
+ uint32_t nr_relocs, uint64_t relocs)
{
uint32_t i, last_offset = 0;
uint32_t *ptr;
@@ -259,10 +335,22 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
return -EINVAL;
}
+ if (obj->flags & MSM_BO_SECURE) {
+ DRM_ERROR("cannot do relocs on a secure buffer\n");
+ return -EINVAL;
+ }
+
+ if (nr_relocs == 0)
+ return 0;
+
/* For now, just map the entire thing. Eventually we probably
* to do it page-by-page, w/ kmap() if not vmap()d..
*/
- ptr = msm_gem_vaddr_locked(&obj->base);
+ ptr = msm_gem_vaddr(&obj->base);
+ if (!ptr) {
+ DRM_ERROR("Invalid format");
+ return -EINVAL;
+ }
if (IS_ERR(ptr)) {
ret = PTR_ERR(ptr);
@@ -274,11 +362,12 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
struct drm_msm_gem_submit_reloc submit_reloc;
void __user *userptr =
u64_to_user_ptr(relocs + (i * sizeof(submit_reloc)));
- uint32_t iova, off;
+ uint64_t iova;
+ uint32_t off;
bool valid;
- ret = copy_from_user(&submit_reloc, userptr, sizeof(submit_reloc));
- if (ret)
+ if (copy_from_user(&submit_reloc, userptr,
+ sizeof(submit_reloc)))
return -EFAULT;
if (submit_reloc.submit_offset % 4) {
@@ -296,7 +385,8 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
return -EINVAL;
}
- ret = submit_bo(submit, submit_reloc.reloc_idx, NULL, &iova, &valid);
+ ret = submit_bo(gpu, submit, submit_reloc.reloc_idx,
+ NULL, &iova, &valid);
if (ret)
return ret;
@@ -318,13 +408,17 @@ static int submit_reloc(struct msm_gem_submit *submit, struct msm_gem_object *ob
return 0;
}
-static void submit_cleanup(struct msm_gem_submit *submit, bool fail)
+static void submit_cleanup(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ bool fail)
{
unsigned i;
+ if (!submit)
+ return;
+
for (i = 0; i < submit->nr_bos; i++) {
struct msm_gem_object *msm_obj = submit->bos[i].obj;
- submit_unlock_unpin_bo(submit, i);
+ submit_unlock_unpin_bo(gpu, submit, i);
list_del_init(&msm_obj->submit_entry);
drm_gem_object_unreference(&msm_obj->base);
}
@@ -339,6 +433,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_msm_gem_submit *args = data;
struct msm_file_private *ctx = file->driver_priv;
struct msm_gem_submit *submit;
+ struct msm_gpu_submitqueue *queue;
struct msm_gpu *gpu;
unsigned i;
int ret;
@@ -346,27 +441,31 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
/* for now, we just have 3d pipe.. eventually this would need to
* be more clever to dispatch to appropriate gpu module:
*/
- if (args->pipe != MSM_PIPE_3D0)
+ if (MSM_PIPE_ID(args->flags) != MSM_PIPE_3D0)
return -EINVAL;
gpu = priv->gpu;
+ if (!gpu || !ctx)
+ return -ENXIO;
- if (args->nr_cmds > MAX_CMDS)
- return -EINVAL;
+ queue = msm_submitqueue_get(ctx, args->queueid);
+ if (!queue)
+ return -ENOENT;
mutex_lock(&dev->struct_mutex);
- submit = submit_create(dev, gpu, args->nr_bos);
+ submit = submit_create(dev, ctx->aspace, args->nr_bos, args->nr_cmds,
+ queue);
if (!submit) {
ret = -ENOMEM;
goto out;
}
- ret = submit_lookup_objects(submit, args, file);
+ ret = submit_lookup_objects(gpu, submit, args, file);
if (ret)
goto out;
- ret = submit_validate_objects(submit);
+ ret = submit_validate_objects(gpu, submit);
if (ret)
goto out;
@@ -375,7 +474,8 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
void __user *userptr =
u64_to_user_ptr(args->cmds + (i * sizeof(submit_cmd)));
struct msm_gem_object *msm_obj;
- uint32_t iova;
+ uint64_t iova;
+ size_t size;
ret = copy_from_user(&submit_cmd, userptr, sizeof(submit_cmd));
if (ret) {
@@ -388,6 +488,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
case MSM_SUBMIT_CMD_BUF:
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ case MSM_SUBMIT_CMD_PROFILE_BUF:
break;
default:
DRM_ERROR("invalid type: %08x\n", submit_cmd.type);
@@ -395,7 +496,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
}
- ret = submit_bo(submit, submit_cmd.submit_idx,
+ ret = submit_bo(gpu, submit, submit_cmd.submit_idx,
&msm_obj, &iova, NULL);
if (ret)
goto out;
@@ -407,9 +508,12 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
goto out;
}
- if ((submit_cmd.size + submit_cmd.submit_offset) >=
- msm_obj->base.size) {
- DRM_ERROR("invalid cmdstream size: %u\n", submit_cmd.size);
+ size = submit_cmd.size + submit_cmd.submit_offset;
+
+ if (!submit_cmd.size || (size < submit_cmd.size) ||
+ (size > msm_obj->base.size)) {
+ DRM_ERROR("invalid cmdstream offset/size: %u/%u\n",
+ submit_cmd.submit_offset, submit_cmd.size);
ret = -EINVAL;
goto out;
}
@@ -419,24 +523,32 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
submit->cmd[i].iova = iova + submit_cmd.submit_offset;
submit->cmd[i].idx = submit_cmd.submit_idx;
- if (submit->valid)
- continue;
+ if (submit_cmd.type == MSM_SUBMIT_CMD_PROFILE_BUF) {
+ submit->profile_buf_iova = submit->cmd[i].iova;
+ submit->profile_buf = msm_gem_vaddr(&msm_obj->base)
+ + submit_cmd.submit_offset;
+ }
- ret = submit_reloc(submit, msm_obj, submit_cmd.submit_offset,
- submit_cmd.nr_relocs, submit_cmd.relocs);
+ ret = submit_reloc(gpu, submit, msm_obj,
+ submit_cmd.submit_offset, submit_cmd.nr_relocs,
+ submit_cmd.relocs);
if (ret)
goto out;
}
submit->nr_cmds = i;
- ret = msm_gpu_submit(gpu, submit, ctx);
+ /* Clamp the user submitted ring to the range of available rings */
+ submit->ring = clamp_t(uint32_t, queue->prio, 0, gpu->nr_rings - 1);
+
+ ret = msm_gpu_submit(gpu, submit);
args->fence = submit->fence;
out:
- if (submit)
- submit_cleanup(submit, !!ret);
+ submit_cleanup(gpu, submit, !!ret);
+ if (ret)
+ msm_gem_submit_free(submit);
mutex_unlock(&dev->struct_mutex);
return ret;
}
diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c
new file mode 100644
index 000000000000..a5a768a63858
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_gem_vma.c
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2016 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include "msm_drv.h"
+#include "msm_gem.h"
+#include "msm_iommu.h"
+
+static void
+msm_gem_address_space_destroy(struct kref *kref)
+{
+ struct msm_gem_address_space *aspace = container_of(kref,
+ struct msm_gem_address_space, kref);
+
+ if (aspace->va_len)
+ drm_mm_takedown(&aspace->mm);
+
+ aspace->mmu->funcs->destroy(aspace->mmu);
+
+ kfree(aspace);
+}
+
+void msm_gem_address_space_put(struct msm_gem_address_space *aspace)
+{
+ if (aspace)
+ kref_put(&aspace->kref, msm_gem_address_space_destroy);
+}
+
+static struct msm_gem_address_space *
+msm_gem_address_space_new(struct msm_mmu *mmu, const char *name,
+ uint64_t start, uint64_t end)
+{
+ struct msm_gem_address_space *aspace;
+
+ if (!mmu)
+ return ERR_PTR(-EINVAL);
+
+ aspace = kzalloc(sizeof(*aspace), GFP_KERNEL);
+ if (!aspace)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&aspace->lock);
+ aspace->name = name;
+ aspace->mmu = mmu;
+
+ aspace->va_len = end - start;
+
+ if (aspace->va_len)
+ drm_mm_init(&aspace->mm, (start >> PAGE_SHIFT),
+ (aspace->va_len >> PAGE_SHIFT));
+
+ kref_init(&aspace->kref);
+
+ return aspace;
+}
+
+static int allocate_iova(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ u64 *iova)
+{
+ struct scatterlist *sg;
+ size_t size = 0;
+ int ret, i;
+
+ if (!aspace->va_len)
+ return 0;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ size += sg->length + sg->offset;
+
+ spin_lock(&aspace->lock);
+
+ if (WARN_ON(drm_mm_node_allocated(&vma->node))) {
+ spin_unlock(&aspace->lock);
+ return 0;
+ }
+ ret = drm_mm_insert_node(&aspace->mm, &vma->node,
+ size >> PAGE_SHIFT, 0, DRM_MM_SEARCH_BOTTOM_UP);
+
+ spin_unlock(&aspace->lock);
+
+ if (!ret && iova)
+ *iova = vma->node.start << PAGE_SHIFT;
+
+ return ret;
+}
+
+int msm_gem_reserve_iova(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma,
+ uint64_t hostptr, uint64_t size)
+{
+ struct drm_mm *mm = &aspace->mm;
+ uint64_t start = hostptr >> PAGE_SHIFT;
+ uint64_t last = (hostptr + size - 1) >> PAGE_SHIFT;
+ int ret;
+
+ spin_lock(&aspace->lock);
+
+ if (drm_mm_interval_first(mm, start, last)) {
+ /* iova already in use, fail */
+ spin_unlock(&aspace->lock);
+ return -EADDRINUSE;
+ }
+
+ vma->node.start = hostptr >> PAGE_SHIFT;
+ vma->node.size = size >> PAGE_SHIFT;
+ vma->node.color = 0;
+
+ ret = drm_mm_reserve_node(mm, &vma->node);
+ if (!ret)
+ vma->iova = hostptr;
+
+ spin_unlock(&aspace->lock);
+
+ return ret;
+}
+
+void msm_gem_release_iova(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma)
+{
+ spin_lock(&aspace->lock);
+ if (drm_mm_node_allocated(&vma->node))
+ drm_mm_remove_node(&vma->node);
+ spin_unlock(&aspace->lock);
+}
+
+int msm_gem_map_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, unsigned int flags)
+{
+ u64 iova = 0;
+ int ret;
+
+ if (!aspace)
+ return -EINVAL;
+
+ ret = allocate_iova(aspace, vma, sgt, &iova);
+ if (ret)
+ return ret;
+
+ ret = aspace->mmu->funcs->map(aspace->mmu, iova, sgt,
+ flags, priv);
+
+ if (ret) {
+ msm_gem_release_iova(aspace, vma);
+ return ret;
+ }
+
+ vma->iova = sg_dma_address(sgt->sgl);
+ kref_get(&aspace->kref);
+
+ return 0;
+}
+
+void msm_gem_unmap_vma(struct msm_gem_address_space *aspace,
+ struct msm_gem_vma *vma, struct sg_table *sgt,
+ void *priv, bool invalidated)
+{
+ if (!aspace || !vma->iova)
+ return;
+
+ if (!invalidated)
+ aspace->mmu->funcs->unmap(aspace->mmu, vma->iova, sgt, priv);
+
+ msm_gem_release_iova(aspace, vma);
+
+ vma->iova = 0;
+
+ msm_gem_address_space_put(aspace);
+}
+
+struct msm_gem_address_space *
+msm_gem_smmu_address_space_create(struct device *dev, struct msm_mmu *mmu,
+ const char *name)
+{
+ return msm_gem_address_space_new(mmu, name, 0, 0);
+}
+
+struct msm_gem_address_space *
+msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
+ int type, const char *name)
+{
+ struct msm_mmu *mmu = msm_iommu_new(dev, type, domain);
+
+ if (IS_ERR(mmu))
+ return (struct msm_gem_address_space *) mmu;
+
+ return msm_gem_address_space_new(mmu, name,
+ domain->geometry.aperture_start,
+ domain->geometry.aperture_end);
+}
+
+/* Create a new dynamic instance */
+struct msm_gem_address_space *
+msm_gem_address_space_create_instance(struct msm_mmu *parent, const char *name,
+ uint64_t start, uint64_t end)
+{
+ struct msm_mmu *child = msm_iommu_new_dynamic(parent);
+
+ if (IS_ERR(child))
+ return (struct msm_gem_address_space *) child;
+
+ return msm_gem_address_space_new(child, name, start, end);
+}
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 6b02ada6579a..50dd710aa510 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -18,7 +18,7 @@
#include "msm_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
-
+#include "msm_trace.h"
/*
* Power Management:
@@ -90,21 +90,20 @@ static int disable_pwrrail(struct msm_gpu *gpu)
static int enable_clk(struct msm_gpu *gpu)
{
- struct clk *rate_clk = NULL;
+ uint32_t rate = gpu->gpufreq[gpu->active_level];
int i;
- /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
- if (gpu->grp_clks[i]) {
- clk_prepare(gpu->grp_clks[i]);
- rate_clk = gpu->grp_clks[i];
- }
- }
+ if (gpu->core_clk)
+ clk_set_rate(gpu->core_clk, rate);
- if (rate_clk && gpu->fast_rate)
- clk_set_rate(rate_clk, gpu->fast_rate);
+ if (gpu->rbbmtimer_clk)
+ clk_set_rate(gpu->rbbmtimer_clk, 19200000);
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
+ clk_prepare(gpu->grp_clks[i]);
+
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_enable(gpu->grp_clks[i]);
@@ -113,24 +112,23 @@ static int enable_clk(struct msm_gpu *gpu)
static int disable_clk(struct msm_gpu *gpu)
{
- struct clk *rate_clk = NULL;
+ uint32_t rate = gpu->gpufreq[gpu->nr_pwrlevels - 1];
int i;
- /* NOTE: kgsl_pwrctrl_clk() ignores grp_clks[0].. */
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--) {
- if (gpu->grp_clks[i]) {
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
clk_disable(gpu->grp_clks[i]);
- rate_clk = gpu->grp_clks[i];
- }
- }
- if (rate_clk && gpu->slow_rate)
- clk_set_rate(rate_clk, gpu->slow_rate);
-
- for (i = ARRAY_SIZE(gpu->grp_clks) - 1; i > 0; i--)
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
if (gpu->grp_clks[i])
clk_unprepare(gpu->grp_clks[i]);
+ if (gpu->core_clk)
+ clk_set_rate(gpu->core_clk, rate);
+
+ if (gpu->rbbmtimer_clk)
+ clk_set_rate(gpu->rbbmtimer_clk, 0);
+
return 0;
}
@@ -138,8 +136,9 @@ static int enable_axi(struct msm_gpu *gpu)
{
if (gpu->ebi1_clk)
clk_prepare_enable(gpu->ebi1_clk);
- if (gpu->bus_freq)
- bs_set(gpu, gpu->bus_freq);
+
+ if (gpu->busfreq[gpu->active_level])
+ bs_set(gpu, gpu->busfreq[gpu->active_level]);
return 0;
}
@@ -147,25 +146,17 @@ static int disable_axi(struct msm_gpu *gpu)
{
if (gpu->ebi1_clk)
clk_disable_unprepare(gpu->ebi1_clk);
- if (gpu->bus_freq)
+
+ if (gpu->busfreq[gpu->active_level])
bs_set(gpu, 0);
return 0;
}
int msm_gpu_pm_resume(struct msm_gpu *gpu)
{
- struct drm_device *dev = gpu->dev;
int ret;
- DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (gpu->active_cnt++ > 0)
- return 0;
-
- if (WARN_ON(gpu->active_cnt <= 0))
- return -EINVAL;
+ DBG("%s", gpu->name);
ret = enable_pwrrail(gpu);
if (ret)
@@ -179,23 +170,22 @@ int msm_gpu_pm_resume(struct msm_gpu *gpu)
if (ret)
return ret;
+ if (gpu->aspace && gpu->aspace->mmu)
+ msm_mmu_enable(gpu->aspace->mmu);
+
+ gpu->needs_hw_init = true;
+
return 0;
}
int msm_gpu_pm_suspend(struct msm_gpu *gpu)
{
- struct drm_device *dev = gpu->dev;
int ret;
- DBG("%s: active_cnt=%d", gpu->name, gpu->active_cnt);
-
- WARN_ON(!mutex_is_locked(&dev->struct_mutex));
-
- if (--gpu->active_cnt > 0)
- return 0;
+ DBG("%s", gpu->name);
- if (WARN_ON(gpu->active_cnt < 0))
- return -EINVAL;
+ if (gpu->aspace && gpu->aspace->mmu)
+ msm_mmu_disable(gpu->aspace->mmu);
ret = disable_axi(gpu);
if (ret)
@@ -212,82 +202,71 @@ int msm_gpu_pm_suspend(struct msm_gpu *gpu)
return 0;
}
-/*
- * Inactivity detection (for suspend):
- */
-
-static void inactive_worker(struct work_struct *work)
+int msm_gpu_hw_init(struct msm_gpu *gpu)
{
- struct msm_gpu *gpu = container_of(work, struct msm_gpu, inactive_work);
- struct drm_device *dev = gpu->dev;
+ int ret;
- if (gpu->inactive)
- return;
+ if (!gpu->needs_hw_init)
+ return 0;
- DBG("%s: inactive!\n", gpu->name);
- mutex_lock(&dev->struct_mutex);
- if (!(msm_gpu_active(gpu) || gpu->inactive)) {
- disable_axi(gpu);
- disable_clk(gpu);
- gpu->inactive = true;
- }
- mutex_unlock(&dev->struct_mutex);
+ disable_irq(gpu->irq);
+ ret = gpu->funcs->hw_init(gpu);
+ if (!ret)
+ gpu->needs_hw_init = false;
+ enable_irq(gpu->irq);
+
+ return ret;
}
-static void inactive_handler(unsigned long data)
+static void retire_guilty_submit(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring)
{
- struct msm_gpu *gpu = (struct msm_gpu *)data;
- struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_gem_submit *submit = list_first_entry_or_null(&ring->submits,
+ struct msm_gem_submit, node);
- queue_work(priv->wq, &gpu->inactive_work);
-}
+ if (!submit)
+ return;
-/* cancel inactive timer and make sure we are awake: */
-static void inactive_cancel(struct msm_gpu *gpu)
-{
- DBG("%s", gpu->name);
- del_timer(&gpu->inactive_timer);
- if (gpu->inactive) {
- enable_clk(gpu);
- enable_axi(gpu);
- gpu->inactive = false;
- }
-}
+ submit->queue->faults++;
-static void inactive_start(struct msm_gpu *gpu)
-{
- DBG("%s", gpu->name);
- mod_timer(&gpu->inactive_timer,
- round_jiffies_up(jiffies + DRM_MSM_INACTIVE_JIFFIES));
+ msm_gem_submit_free(submit);
}
/*
* Hangcheck detection for locked gpu:
*/
-static void retire_submits(struct msm_gpu *gpu, uint32_t fence);
+static void retire_submits(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ uint32_t fence);
static void recover_worker(struct work_struct *work)
{
struct msm_gpu *gpu = container_of(work, struct msm_gpu, recover_work);
struct drm_device *dev = gpu->dev;
- dev_err(dev->dev, "%s: hangcheck recover!\n", gpu->name);
-
mutex_lock(&dev->struct_mutex);
if (msm_gpu_active(gpu)) {
struct msm_gem_submit *submit;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ struct msm_ringbuffer *ring;
+ int i;
+
+ /* Retire all events that have already passed */
+ FOR_EACH_RING(gpu, ring, i)
+ retire_submits(gpu, ring, ring->memptrs->fence);
- /* retire completed submits, plus the one that hung: */
- retire_submits(gpu, fence + 1);
+ retire_guilty_submit(gpu, gpu->funcs->active_ring(gpu));
- inactive_cancel(gpu);
+ /* Recover the GPU */
gpu->funcs->recover(gpu);
+ /* Decrement the device usage count for the guilty submit */
+ pm_runtime_put_sync_autosuspend(&gpu->pdev->dev);
- /* replay the remaining submits after the one that hung: */
- list_for_each_entry(submit, &gpu->submit_list, node) {
- gpu->funcs->submit(gpu, submit, NULL);
+ /* Replay the remaining on all rings, highest priority first */
+ for (i = 0; i < gpu->nr_rings; i++) {
+ struct msm_ringbuffer *ring = gpu->rb[i];
+
+ list_for_each_entry(submit, &ring->submits, node)
+ gpu->funcs->submit(gpu, submit);
}
}
mutex_unlock(&dev->struct_mutex);
@@ -307,25 +286,43 @@ static void hangcheck_handler(unsigned long data)
struct msm_gpu *gpu = (struct msm_gpu *)data;
struct drm_device *dev = gpu->dev;
struct msm_drm_private *priv = dev->dev_private;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+ uint32_t fence = ring->memptrs->fence;
+ uint32_t submitted = gpu->funcs->submitted_fence(gpu, ring);
- if (fence != gpu->hangcheck_fence) {
+ if (fence != ring->hangcheck_fence) {
/* some progress has been made.. ya! */
- gpu->hangcheck_fence = fence;
- } else if (fence < gpu->submitted_fence) {
- /* no progress and not done.. hung! */
- gpu->hangcheck_fence = fence;
- dev_err(dev->dev, "%s: hangcheck detected gpu lockup!\n",
- gpu->name);
+ ring->hangcheck_fence = fence;
+ } else if (fence < submitted) {
+ struct msm_gem_submit *submit;
+
+ ring->hangcheck_fence = fence;
+
+ /*
+ * No progress done, but see if the current submit is
+ * intentionally skipping the hangcheck
+ */
+ submit = list_first_entry_or_null(&ring->submits,
+ struct msm_gem_submit, node);
+
+ if (!submit || (submit->queue->flags &
+ MSM_SUBMITQUEUE_BYPASS_QOS_TIMEOUT))
+ goto out;
+
+ /* no progress and not done and not special .. hung! */
+ dev_err(dev->dev, "%s: hangcheck detected gpu lockup rb %d!\n",
+ gpu->name, ring->id);
dev_err(dev->dev, "%s: completed fence: %u\n",
gpu->name, fence);
dev_err(dev->dev, "%s: submitted fence: %u\n",
- gpu->name, gpu->submitted_fence);
+ gpu->name, submitted);
+
queue_work(priv->wq, &gpu->recover_work);
}
+out:
/* if still more pending work, reset the hangcheck timer: */
- if (gpu->submitted_fence > gpu->hangcheck_fence)
+ if (submitted > ring->hangcheck_fence)
hangcheck_timer_reset(gpu);
/* workaround for missing irq: */
@@ -385,6 +382,8 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
{
unsigned long flags;
+ pm_runtime_get_sync(&gpu->pdev->dev);
+
spin_lock_irqsave(&gpu->perf_lock, flags);
/* we could dynamically enable/disable perfcntr registers too.. */
gpu->last_sample.active = msm_gpu_active(gpu);
@@ -398,6 +397,7 @@ void msm_gpu_perfcntr_start(struct msm_gpu *gpu)
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu)
{
gpu->perfcntr_active = false;
+ pm_runtime_put_sync(&gpu->pdev->dev);
}
/* returns -errno or # of cntrs sampled */
@@ -431,60 +431,74 @@ out:
* Cmdstream submission/retirement:
*/
-static void retire_submits(struct msm_gpu *gpu, uint32_t fence)
+static void retire_submits(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ uint32_t fence)
{
struct drm_device *dev = gpu->dev;
+ struct msm_gem_submit *submit, *tmp;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- while (!list_empty(&gpu->submit_list)) {
- struct msm_gem_submit *submit;
-
- submit = list_first_entry(&gpu->submit_list,
- struct msm_gem_submit, node);
+ list_for_each_entry_safe(submit, tmp, &ring->submits, node) {
+ struct msm_memptr_ticks *ticks;
- if (submit->fence <= fence) {
- list_del(&submit->node);
- kfree(submit);
- } else {
+ if (submit->fence > fence)
break;
- }
+
+ ticks = &(ring->memptrs->ticks[submit->tick_index]);
+
+ /* Add memory barrier to ensure the timer ticks are posted */
+ rmb();
+
+ trace_msm_retired(submit, ticks->started, ticks->retired);
+
+ pm_runtime_mark_last_busy(&gpu->pdev->dev);
+ pm_runtime_put_autosuspend(&gpu->pdev->dev);
+ msm_gem_submit_free(submit);
}
}
-static void retire_worker(struct work_struct *work)
+static bool _fence_signaled(struct msm_gem_object *obj, uint32_t fence)
{
- struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
- struct drm_device *dev = gpu->dev;
- uint32_t fence = gpu->funcs->last_fence(gpu);
+ if (obj->write_fence & 0x3FFFFFFF)
+ return COMPARE_FENCE_LTE(obj->write_fence, fence);
- msm_update_fence(gpu->dev, fence);
-
- mutex_lock(&dev->struct_mutex);
-
- retire_submits(gpu, fence);
+ return COMPARE_FENCE_LTE(obj->read_fence, fence);
+}
- while (!list_empty(&gpu->active_list)) {
- struct msm_gem_object *obj;
+static void _retire_ring(struct msm_gpu *gpu, struct msm_ringbuffer *ring,
+ uint32_t fence)
+{
+ struct msm_gem_object *obj, *tmp;
- obj = list_first_entry(&gpu->active_list,
- struct msm_gem_object, mm_list);
+ retire_submits(gpu, ring, fence);
- if ((obj->read_fence <= fence) &&
- (obj->write_fence <= fence)) {
- /* move to inactive: */
+ list_for_each_entry_safe(obj, tmp, &gpu->active_list, mm_list) {
+ if (_fence_signaled(obj, fence)) {
msm_gem_move_to_inactive(&obj->base);
- msm_gem_put_iova(&obj->base, gpu->id);
+ msm_gem_put_iova(&obj->base, gpu->aspace);
drm_gem_object_unreference(&obj->base);
- } else {
- break;
}
}
+}
- mutex_unlock(&dev->struct_mutex);
+static void retire_worker(struct work_struct *work)
+{
+ struct msm_gpu *gpu = container_of(work, struct msm_gpu, retire_work);
+ struct drm_device *dev = gpu->dev;
+ struct msm_ringbuffer *ring;
+ int i;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ if (!ring)
+ continue;
- if (!msm_gpu_active(gpu))
- inactive_start(gpu);
+ msm_update_fence(gpu->dev, ring->memptrs->fence);
+
+ mutex_lock(&dev->struct_mutex);
+ _retire_ring(gpu, ring, ring->memptrs->fence);
+ mutex_unlock(&dev->struct_mutex);
+ }
}
/* call from irq handler to schedule work to retire bo's */
@@ -496,26 +510,29 @@ void msm_gpu_retire(struct msm_gpu *gpu)
}
/* add bo's to gpu's ring, and kick gpu: */
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx)
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit)
{
struct drm_device *dev = gpu->dev;
- struct msm_drm_private *priv = dev->dev_private;
- int i, ret;
+ struct msm_ringbuffer *ring = gpu->rb[submit->ring];
+ int i;
WARN_ON(!mutex_is_locked(&dev->struct_mutex));
- submit->fence = ++priv->next_fence;
+ submit->fence = FENCE(submit->ring, ++ring->seqno);
- gpu->submitted_fence = submit->fence;
+ pm_runtime_get_sync(&gpu->pdev->dev);
- inactive_cancel(gpu);
+ msm_gpu_hw_init(gpu);
- list_add_tail(&submit->node, &gpu->submit_list);
+ list_add_tail(&submit->node, &ring->submits);
- msm_rd_dump_submit(submit);
+ ring->submitted_fence = submit->fence;
+
+ submit->tick_index = ring->tick_index;
+ ring->tick_index = (ring->tick_index + 1) %
+ ARRAY_SIZE(ring->memptrs->ticks);
- gpu->submitted_fence = submit->fence;
+ trace_msm_queued(submit);
update_sw_cntrs(gpu);
@@ -528,27 +545,147 @@ int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
WARN_ON(is_active(msm_obj) && (msm_obj->gpu != gpu));
if (!is_active(msm_obj)) {
- uint32_t iova;
+ struct msm_gem_address_space *aspace;
+ uint64_t iova;
+
+ aspace = (msm_obj->flags & MSM_BO_SECURE) ?
+ gpu->secure_aspace : submit->aspace;
/* ring takes a reference to the bo and iova: */
drm_gem_object_reference(&msm_obj->base);
- msm_gem_get_iova_locked(&msm_obj->base,
- submit->gpu->id, &iova);
+ msm_gem_get_iova(&msm_obj->base, aspace, &iova);
+
+ submit->bos[i].iova = iova;
}
if (submit->bos[i].flags & MSM_SUBMIT_BO_READ)
msm_gem_move_to_active(&msm_obj->base, gpu, false, submit->fence);
-
- if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
+ else if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE)
msm_gem_move_to_active(&msm_obj->base, gpu, true, submit->fence);
}
- ret = gpu->funcs->submit(gpu, submit, ctx);
- priv->lastctx = ctx;
+ msm_rd_dump_submit(submit);
+
+ gpu->funcs->submit(gpu, submit);
hangcheck_timer_reset(gpu);
- return ret;
+ return 0;
+}
+
+struct msm_context_counter {
+ u32 groupid;
+ int counterid;
+ struct list_head node;
+};
+
+int msm_gpu_counter_get(struct msm_gpu *gpu, struct drm_msm_counter *data,
+ struct msm_file_private *ctx)
+{
+ struct msm_context_counter *entry;
+ int counterid;
+ u32 lo = 0, hi = 0;
+
+ if (!ctx || !gpu->funcs->get_counter)
+ return -ENODEV;
+
+ counterid = gpu->funcs->get_counter(gpu, data->groupid, data->countable,
+ &lo, &hi);
+
+ if (counterid < 0)
+ return counterid;
+
+ /*
+ * Check to see if the counter in question is already held by this
+ * process. If it does, put it back and return an error.
+ */
+ list_for_each_entry(entry, &ctx->counters, node) {
+ if (entry->groupid == data->groupid &&
+ entry->counterid == counterid) {
+ gpu->funcs->put_counter(gpu, data->groupid, counterid);
+ return -EBUSY;
+ }
+ }
+
+ entry = kmalloc(sizeof(*entry), GFP_KERNEL);
+ if (!entry) {
+ gpu->funcs->put_counter(gpu, data->groupid, counterid);
+ return -ENOMEM;
+ }
+
+ entry->groupid = data->groupid;
+ entry->counterid = counterid;
+ list_add_tail(&entry->node, &ctx->counters);
+
+ data->counterid = counterid;
+ data->counter_lo = lo;
+ data->counter_hi = hi;
+
+ return 0;
+}
+
+int msm_gpu_counter_put(struct msm_gpu *gpu, struct drm_msm_counter *data,
+ struct msm_file_private *ctx)
+{
+ struct msm_context_counter *entry;
+
+ if (!gpu || !ctx)
+ return -ENODEV;
+
+ list_for_each_entry(entry, &ctx->counters, node) {
+ if (entry->groupid == data->groupid &&
+ entry->counterid == data->counterid) {
+ gpu->funcs->put_counter(gpu, data->groupid,
+ data->counterid);
+
+ list_del(&entry->node);
+ kfree(entry);
+
+ return 0;
+ }
+ }
+
+ return -EINVAL;
+}
+
+void msm_gpu_cleanup_counters(struct msm_gpu *gpu,
+ struct msm_file_private *ctx)
+{
+ struct msm_context_counter *entry, *tmp;
+
+ if (!ctx)
+ return;
+
+ list_for_each_entry_safe(entry, tmp, &ctx->counters, node) {
+ gpu->funcs->put_counter(gpu, entry->groupid, entry->counterid);
+ list_del(&entry->node);
+ kfree(entry);
+ }
+}
+
+u64 msm_gpu_counter_read(struct msm_gpu *gpu, struct drm_msm_counter_read *data)
+{
+ int i;
+
+ if (!gpu->funcs->read_counter)
+ return 0;
+
+ for (i = 0; i < data->nr_ops; i++) {
+ struct drm_msm_counter_read_op op;
+ void __user *ptr = (void __user *)(uintptr_t)
+ (data->ops + (i * sizeof(op)));
+
+ if (copy_from_user(&op, ptr, sizeof(op)))
+ return -EFAULT;
+
+ op.value = gpu->funcs->read_counter(gpu, op.groupid,
+ op.counterid);
+
+ if (copy_to_user(ptr, &op, sizeof(op)))
+ return -EFAULT;
+ }
+
+ return 0;
}
/*
@@ -561,17 +698,114 @@ static irqreturn_t irq_handler(int irq, void *data)
return gpu->funcs->irq(gpu);
}
-static const char *clk_names[] = {
- "src_clk", "core_clk", "iface_clk", "mem_clk", "mem_iface_clk",
- "alt_mem_iface_clk",
-};
+static struct clk *get_clock(struct device *dev, const char *name)
+{
+ struct clk *clk = devm_clk_get(dev, name);
+
+ DBG("clks[%s]: %p", name, clk);
+
+ return IS_ERR(clk) ? NULL : clk;
+}
+
+static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
+{
+ struct device *dev = &pdev->dev;
+ struct property *prop;
+ const char *name;
+ int i = 0;
+
+ gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
+ if (gpu->nr_clocks < 1) {
+ gpu->nr_clocks = 0;
+ return 0;
+ }
+
+ gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
+ GFP_KERNEL);
+ if (!gpu->grp_clks)
+ return -ENOMEM;
+
+ of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+ gpu->grp_clks[i] = get_clock(dev, name);
+
+ /* Remember the key clocks that we need to control later */
+ if (!strcmp(name, "core_clk"))
+ gpu->core_clk = gpu->grp_clks[i];
+ else if (!strcmp(name, "rbbmtimer_clk"))
+ gpu->rbbmtimer_clk = gpu->grp_clks[i];
+
+ ++i;
+ }
+
+ return 0;
+}
+
+static struct msm_gem_address_space *
+msm_gpu_create_address_space(struct msm_gpu *gpu, struct device *dev,
+ int type, u64 start, u64 end, const char *name)
+{
+ struct msm_gem_address_space *aspace;
+ struct iommu_domain *iommu;
+
+ /*
+ * If start == end then assume we don't want an address space; this is
+ * mainly for targets to opt out of secure
+ */
+ if (start == end)
+ return NULL;
+
+ iommu = iommu_domain_alloc(&platform_bus_type);
+ if (!iommu) {
+ dev_info(gpu->dev->dev,
+ "%s: no IOMMU, fallback to VRAM carveout!\n",
+ gpu->name);
+ return NULL;
+ }
+
+ iommu->geometry.aperture_start = start;
+ iommu->geometry.aperture_end = end;
+
+ dev_info(gpu->dev->dev, "%s: using IOMMU '%s'\n", gpu->name, name);
+
+ aspace = msm_gem_address_space_create(dev, iommu, type, name);
+ if (IS_ERR(aspace)) {
+ dev_err(gpu->dev->dev, "%s: failed to init IOMMU '%s': %ld\n",
+ gpu->name, name, PTR_ERR(aspace));
+
+ iommu_domain_free(iommu);
+ return NULL;
+ }
+
+ if (aspace->mmu) {
+ int ret = aspace->mmu->funcs->attach(aspace->mmu, NULL, 0);
+
+ if (ret) {
+ dev_err(gpu->dev->dev,
+ "%s: failed to atach IOMMU '%s': %d\n",
+ gpu->name, name, ret);
+ msm_gem_address_space_put(aspace);
+ aspace = ERR_PTR(ret);
+ }
+ }
+
+ return aspace;
+}
+
+static void msm_gpu_destroy_address_space(struct msm_gem_address_space *aspace)
+{
+ if (!IS_ERR_OR_NULL(aspace) && aspace->mmu)
+ aspace->mmu->funcs->detach(aspace->mmu);
+
+ msm_gem_address_space_put(aspace);
+}
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
- const char *name, const char *ioname, const char *irqname, int ringsz)
+ const char *name, struct msm_gpu_config *config)
{
- struct iommu_domain *iommu;
- int i, ret;
+ int i, ret, nr_rings;
+ void *memptrs;
+ uint64_t memptrs_iova;
if (WARN_ON(gpu->num_perfcntrs > ARRAY_SIZE(gpu->last_cntrs)))
gpu->num_perfcntrs = ARRAY_SIZE(gpu->last_cntrs);
@@ -579,33 +813,27 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
gpu->dev = drm;
gpu->funcs = funcs;
gpu->name = name;
- gpu->inactive = true;
INIT_LIST_HEAD(&gpu->active_list);
INIT_WORK(&gpu->retire_work, retire_worker);
- INIT_WORK(&gpu->inactive_work, inactive_worker);
INIT_WORK(&gpu->recover_work, recover_worker);
- INIT_LIST_HEAD(&gpu->submit_list);
- setup_timer(&gpu->inactive_timer, inactive_handler,
- (unsigned long)gpu);
setup_timer(&gpu->hangcheck_timer, hangcheck_handler,
(unsigned long)gpu);
spin_lock_init(&gpu->perf_lock);
- BUG_ON(ARRAY_SIZE(clk_names) != ARRAY_SIZE(gpu->grp_clks));
/* Map registers: */
- gpu->mmio = msm_ioremap(pdev, ioname, name);
+ gpu->mmio = msm_ioremap(pdev, config->ioname, name);
if (IS_ERR(gpu->mmio)) {
ret = PTR_ERR(gpu->mmio);
goto fail;
}
/* Get Interrupt: */
- gpu->irq = platform_get_irq_byname(pdev, irqname);
+ gpu->irq = platform_get_irq_byname(pdev, config->irqname);
if (gpu->irq < 0) {
ret = gpu->irq;
dev_err(drm->dev, "failed to get irq: %d\n", ret);
@@ -615,17 +843,14 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
ret = devm_request_irq(&pdev->dev, gpu->irq, irq_handler,
IRQF_TRIGGER_HIGH, gpu->name, gpu);
if (ret) {
+ gpu->irq = ret;
dev_err(drm->dev, "failed to request IRQ%u: %d\n", gpu->irq, ret);
goto fail;
}
- /* Acquire clocks: */
- for (i = 0; i < ARRAY_SIZE(clk_names); i++) {
- gpu->grp_clks[i] = devm_clk_get(&pdev->dev, clk_names[i]);
- DBG("grp_clks[%s]: %p", clk_names[i], gpu->grp_clks[i]);
- if (IS_ERR(gpu->grp_clks[i]))
- gpu->grp_clks[i] = NULL;
- }
+ ret = get_clocks(pdev, gpu);
+ if (ret)
+ goto fail;
gpu->ebi1_clk = devm_clk_get(&pdev->dev, "bus_clk");
DBG("ebi1_clk: %p", gpu->ebi1_clk);
@@ -643,61 +868,126 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
if (IS_ERR(gpu->gpu_cx))
gpu->gpu_cx = NULL;
- /* Setup IOMMU.. eventually we will (I think) do this once per context
- * and have separate page tables per context. For now, to keep things
- * simple and to get something working, just use a single address space:
- */
- iommu = iommu_domain_alloc(&platform_bus_type);
- if (iommu) {
- dev_info(drm->dev, "%s: using IOMMU\n", name);
- gpu->mmu = msm_iommu_new(&pdev->dev, iommu);
- if (IS_ERR(gpu->mmu)) {
- ret = PTR_ERR(gpu->mmu);
- dev_err(drm->dev, "failed to init iommu: %d\n", ret);
- gpu->mmu = NULL;
- iommu_domain_free(iommu);
- goto fail;
- }
+ gpu->aspace = msm_gpu_create_address_space(gpu, &pdev->dev,
+ MSM_IOMMU_DOMAIN_USER, config->va_start, config->va_end,
+ "gpu");
+
+ gpu->secure_aspace = msm_gpu_create_address_space(gpu, &pdev->dev,
+ MSM_IOMMU_DOMAIN_SECURE, config->secure_va_start,
+ config->secure_va_end, "gpu_secure");
+
+ nr_rings = config->nr_rings;
- } else {
- dev_info(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name);
+ if (nr_rings > ARRAY_SIZE(gpu->rb)) {
+ WARN(1, "Only creating %lu ringbuffers\n", ARRAY_SIZE(gpu->rb));
+ nr_rings = ARRAY_SIZE(gpu->rb);
}
- gpu->id = msm_register_mmu(drm, gpu->mmu);
+ /* Allocate one buffer to hold all the memptr records for the rings */
+ memptrs = msm_gem_kernel_new(drm, sizeof(struct msm_memptrs) * nr_rings,
+ MSM_BO_UNCACHED, gpu->aspace, &gpu->memptrs_bo, &memptrs_iova);
- /* Create ringbuffer: */
- mutex_lock(&drm->struct_mutex);
- gpu->rb = msm_ringbuffer_new(gpu, ringsz);
- mutex_unlock(&drm->struct_mutex);
- if (IS_ERR(gpu->rb)) {
- ret = PTR_ERR(gpu->rb);
- gpu->rb = NULL;
- dev_err(drm->dev, "could not create ringbuffer: %d\n", ret);
+ if (IS_ERR(memptrs)) {
+ ret = PTR_ERR(memptrs);
goto fail;
}
+ /* Create ringbuffer(s): */
+ for (i = 0; i < nr_rings; i++) {
+ gpu->rb[i] = msm_ringbuffer_new(gpu, i, memptrs, memptrs_iova);
+ if (IS_ERR(gpu->rb[i])) {
+ ret = PTR_ERR(gpu->rb[i]);
+ gpu->rb[i] = NULL;
+ dev_err(drm->dev,
+ "could not create ringbuffer %d: %d\n", i, ret);
+ goto fail;
+ }
+
+ memptrs += sizeof(struct msm_memptrs);
+ memptrs_iova += sizeof(struct msm_memptrs);
+ }
+
+ gpu->nr_rings = nr_rings;
+
+#ifdef CONFIG_SMP
+ gpu->pm_qos_req_dma.type = PM_QOS_REQ_AFFINE_IRQ;
+ gpu->pm_qos_req_dma.irq = gpu->irq;
+#endif
+
+ pm_qos_add_request(&gpu->pm_qos_req_dma, PM_QOS_CPU_DMA_LATENCY,
+ PM_QOS_DEFAULT_VALUE);
+ gpu->pdev = pdev;
+ platform_set_drvdata(pdev, gpu);
+
bs_init(gpu);
+ gpu->snapshot = msm_snapshot_new(gpu);
+ if (IS_ERR(gpu->snapshot))
+ gpu->snapshot = NULL;
+
return 0;
fail:
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)
+ msm_ringbuffer_destroy(gpu->rb[i]);
+
+ if (gpu->memptrs_bo) {
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
+ }
+
+ msm_gpu_destroy_address_space(gpu->aspace);
+ msm_gpu_destroy_address_space(gpu->secure_aspace);
+
return ret;
}
void msm_gpu_cleanup(struct msm_gpu *gpu)
{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ int i;
+
DBG("%s", gpu->name);
WARN_ON(!list_empty(&gpu->active_list));
+ if (gpu->irq >= 0) {
+ disable_irq(gpu->irq);
+ devm_free_irq(&pdev->dev, gpu->irq, gpu);
+ }
+
bs_fini(gpu);
- if (gpu->rb) {
- if (gpu->rb_iova)
- msm_gem_put_iova(gpu->rb->bo, gpu->id);
- msm_ringbuffer_destroy(gpu->rb);
+ for (i = 0; i < ARRAY_SIZE(gpu->rb); i++)
+ msm_ringbuffer_destroy(gpu->rb[i]);
+
+ if (gpu->memptrs_bo) {
+ msm_gem_put_iova(gpu->memptrs_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(gpu->memptrs_bo);
}
- if (gpu->mmu)
- gpu->mmu->funcs->destroy(gpu->mmu);
+ msm_snapshot_destroy(gpu, gpu->snapshot);
+
+ msm_gpu_destroy_address_space(gpu->aspace);
+ msm_gpu_destroy_address_space(gpu->secure_aspace);
+
+ if (gpu->gpu_reg)
+ devm_regulator_put(gpu->gpu_reg);
+
+ if (gpu->gpu_cx)
+ devm_regulator_put(gpu->gpu_cx);
+
+ if (gpu->ebi1_clk)
+ devm_clk_put(&pdev->dev, gpu->ebi1_clk);
+
+ for (i = gpu->nr_clocks - 1; i >= 0; i--)
+ if (gpu->grp_clks[i])
+ devm_clk_put(&pdev->dev, gpu->grp_clks[i]);
+
+ devm_kfree(&pdev->dev, gpu->grp_clks);
+
+ if (gpu->mmio)
+ devm_iounmap(&pdev->dev, gpu->mmio);
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index 2bbe85a3d6f6..deb12aed5b28 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -19,14 +19,29 @@
#define __MSM_GPU_H__
#include <linux/clk.h>
+#include <linux/pm_qos.h>
#include <linux/regulator/consumer.h>
#include "msm_drv.h"
#include "msm_ringbuffer.h"
+#include "msm_snapshot.h"
struct msm_gem_submit;
struct msm_gpu_perfcntr;
+#define MSM_GPU_DEFAULT_IONAME "kgsl_3d0_reg_memory"
+#define MSM_GPU_DEFAULT_IRQNAME "kgsl_3d0_irq"
+
+struct msm_gpu_config {
+ const char *ioname;
+ const char *irqname;
+ int nr_rings;
+ uint64_t va_start;
+ uint64_t va_end;
+ uint64_t secure_va_start;
+ uint64_t secure_va_end;
+};
+
/* So far, with hardware that I've seen to date, we can have:
* + zero, one, or two z180 2d cores
* + a3xx or a2xx 3d core, which share a common CP (the firmware
@@ -46,23 +61,29 @@ struct msm_gpu_funcs {
int (*hw_init)(struct msm_gpu *gpu);
int (*pm_suspend)(struct msm_gpu *gpu);
int (*pm_resume)(struct msm_gpu *gpu);
- int (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx);
- void (*flush)(struct msm_gpu *gpu);
- void (*idle)(struct msm_gpu *gpu);
+ void (*submit)(struct msm_gpu *gpu, struct msm_gem_submit *submit);
+ void (*flush)(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
irqreturn_t (*irq)(struct msm_gpu *irq);
- uint32_t (*last_fence)(struct msm_gpu *gpu);
+ uint32_t (*submitted_fence)(struct msm_gpu *gpu,
+ struct msm_ringbuffer *ring);
+ struct msm_ringbuffer *(*active_ring)(struct msm_gpu *gpu);
void (*recover)(struct msm_gpu *gpu);
void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
/* show GPU status in debugfs: */
void (*show)(struct msm_gpu *gpu, struct seq_file *m);
#endif
+ int (*snapshot)(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+ int (*get_counter)(struct msm_gpu *gpu, u32 groupid, u32 countable,
+ u32 *lo, u32 *hi);
+ void (*put_counter)(struct msm_gpu *gpu, u32 groupid, int counterid);
+ u64 (*read_counter)(struct msm_gpu *gpu, u32 groupid, int counterid);
};
struct msm_gpu {
const char *name;
struct drm_device *dev;
+ struct platform_device *pdev;
const struct msm_gpu_funcs *funcs;
/* performance counters (hw & sw): */
@@ -77,17 +98,14 @@ struct msm_gpu {
const struct msm_gpu_perfcntr *perfcntrs;
uint32_t num_perfcntrs;
- struct msm_ringbuffer *rb;
- uint32_t rb_iova;
+ struct msm_ringbuffer *rb[MSM_GPU_MAX_RINGS];
+ int nr_rings;
/* list of GEM active objects: */
struct list_head active_list;
- uint32_t submitted_fence;
-
- /* is gpu powered/active? */
- int active_cnt;
- bool inactive;
+ /* does gpu need hw_init? */
+ bool needs_hw_init;
/* worker for handling active-list retiring: */
struct work_struct retire_work;
@@ -95,13 +113,23 @@ struct msm_gpu {
void __iomem *mmio;
int irq;
- struct msm_mmu *mmu;
- int id;
+ struct msm_gem_address_space *aspace;
+ struct msm_gem_address_space *secure_aspace;
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
- struct clk *ebi1_clk, *grp_clks[6];
- uint32_t fast_rate, slow_rate, bus_freq;
+ struct clk **grp_clks;
+ struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
+ int nr_clocks;
+
+ uint32_t gpufreq[10];
+ uint32_t busfreq[10];
+ uint32_t nr_pwrlevels;
+ uint32_t active_level;
+
+ struct pm_qos_request pm_qos_req_dma;
+
+ struct drm_gem_object *memptrs_bo;
#ifdef DOWNSTREAM_CONFIG_MSM_BUS_SCALING
struct msm_bus_scale_pdata *bus_scale_table;
@@ -111,21 +139,53 @@ struct msm_gpu {
/* Hang and Inactivity Detection:
*/
#define DRM_MSM_INACTIVE_PERIOD 66 /* in ms (roughly four frames) */
-#define DRM_MSM_INACTIVE_JIFFIES msecs_to_jiffies(DRM_MSM_INACTIVE_PERIOD)
- struct timer_list inactive_timer;
- struct work_struct inactive_work;
+
#define DRM_MSM_HANGCHECK_PERIOD 500 /* in ms */
#define DRM_MSM_HANGCHECK_JIFFIES msecs_to_jiffies(DRM_MSM_HANGCHECK_PERIOD)
struct timer_list hangcheck_timer;
- uint32_t hangcheck_fence;
struct work_struct recover_work;
+ struct msm_snapshot *snapshot;
+};
- struct list_head submit_list;
+struct msm_gpu_submitqueue {
+ int id;
+ u32 flags;
+ u32 prio;
+ int faults;
+ struct list_head node;
+ struct kref ref;
};
+/* It turns out that all targets use the same ringbuffer size. */
+#define MSM_GPU_RINGBUFFER_SZ SZ_32K
+#define MSM_GPU_RINGBUFFER_BLKSIZE 32
+
+#define MSM_GPU_RB_CNTL_DEFAULT \
+ (AXXX_CP_RB_CNTL_BUFSZ(ilog2(MSM_GPU_RINGBUFFER_SZ / 8)) | \
+ AXXX_CP_RB_CNTL_BLKSZ(ilog2(MSM_GPU_RINGBUFFER_BLKSIZE / 8)))
+
+static inline struct msm_ringbuffer *__get_ring(struct msm_gpu *gpu, int index)
+{
+ return (index < ARRAY_SIZE(gpu->rb) ? gpu->rb[index] : NULL);
+}
+
+#define FOR_EACH_RING(gpu, ring, index) \
+ for (index = 0, ring = (gpu)->rb[0]; \
+ index < (gpu)->nr_rings && index < ARRAY_SIZE((gpu)->rb); \
+ index++, ring = __get_ring(gpu, index))
+
static inline bool msm_gpu_active(struct msm_gpu *gpu)
{
- return gpu->submitted_fence > gpu->funcs->last_fence(gpu);
+ struct msm_ringbuffer *ring;
+ int i;
+
+ FOR_EACH_RING(gpu, ring, i) {
+ if (gpu->funcs->submitted_fence(gpu, ring) >
+ ring->memptrs->fence)
+ return true;
+ }
+
+ return false;
}
/* Perf-Counters:
@@ -151,25 +211,84 @@ static inline u32 gpu_read(struct msm_gpu *gpu, u32 reg)
return msm_readl(gpu->mmio + (reg << 2));
}
+static inline void gpu_rmw(struct msm_gpu *gpu, u32 reg, u32 mask, u32 or)
+{
+ uint32_t val = gpu_read(gpu, reg);
+
+ val &= ~mask;
+ gpu_write(gpu, reg, val | or);
+}
+
+static inline u64 gpu_read64(struct msm_gpu *gpu, u32 lo, u32 hi)
+{
+ u64 val;
+
+ /*
+ * Why not a readq here? Two reasons: 1) many of the LO registers are
+ * not quad word aligned and 2) the GPU hardware designers have a bit
+ * of a history of putting registers where they fit, especially in
+ * spins. The longer a GPU family goes the higher the chance that
+ * we'll get burned. We could do a series of validity checks if we
+ * wanted to, but really is a readq() that much better? Nah.
+ */
+
+ /*
+ * For some lo/hi registers (like perfcounters), the hi value is latched
+ * when the lo is read, so make sure to read the lo first to trigger
+ * that
+ */
+ val = (u64) msm_readl(gpu->mmio + (lo << 2));
+ val |= ((u64) msm_readl(gpu->mmio + (hi << 2)) << 32);
+
+ return val;
+}
+
+static inline void gpu_write64(struct msm_gpu *gpu, u32 lo, u32 hi, u64 val)
+{
+ /* Why not a writeq here? Read the screed above */
+ msm_writel(lower_32_bits(val), gpu->mmio + (lo << 2));
+ msm_writel(upper_32_bits(val), gpu->mmio + (hi << 2));
+}
+
int msm_gpu_pm_suspend(struct msm_gpu *gpu);
int msm_gpu_pm_resume(struct msm_gpu *gpu);
+int msm_gpu_hw_init(struct msm_gpu *gpu);
+
void msm_gpu_perfcntr_start(struct msm_gpu *gpu);
void msm_gpu_perfcntr_stop(struct msm_gpu *gpu);
int msm_gpu_perfcntr_sample(struct msm_gpu *gpu, uint32_t *activetime,
uint32_t *totaltime, uint32_t ncntrs, uint32_t *cntrs);
void msm_gpu_retire(struct msm_gpu *gpu);
-int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
- struct msm_file_private *ctx);
+int msm_gpu_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit);
int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs,
- const char *name, const char *ioname, const char *irqname, int ringsz);
+ const char *name, struct msm_gpu_config *config);
+
void msm_gpu_cleanup(struct msm_gpu *gpu);
struct msm_gpu *adreno_load_gpu(struct drm_device *dev);
void __init adreno_register(void);
void __exit adreno_unregister(void);
+int msm_gpu_counter_get(struct msm_gpu *gpu, struct drm_msm_counter *data,
+ struct msm_file_private *ctx);
+
+int msm_gpu_counter_put(struct msm_gpu *gpu, struct drm_msm_counter *data,
+ struct msm_file_private *ctx);
+
+void msm_gpu_cleanup_counters(struct msm_gpu *gpu,
+ struct msm_file_private *ctx);
+
+u64 msm_gpu_counter_read(struct msm_gpu *gpu,
+ struct drm_msm_counter_read *data);
+
+static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
+{
+ if (queue)
+ kref_put(&queue->ref, msm_submitqueue_destroy);
+}
+
#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c
index 7ac2f1997e4a..4586b62401fb 100644
--- a/drivers/gpu/drm/msm/msm_iommu.c
+++ b/drivers/gpu/drm/msm/msm_iommu.c
@@ -15,98 +15,206 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/of_platform.h>
+#include <linux/of_address.h>
+#include <soc/qcom/secure_buffer.h>
#include "msm_drv.h"
-#include "msm_mmu.h"
-
-struct msm_iommu {
- struct msm_mmu base;
- struct iommu_domain *domain;
-};
-#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+#include "msm_iommu.h"
static int msm_fault_handler(struct iommu_domain *iommu, struct device *dev,
unsigned long iova, int flags, void *arg)
{
- pr_warn_ratelimited("*** fault: iova=%08lx, flags=%d\n", iova, flags);
+ pr_warn_ratelimited("*** fault: iova=%16llX, flags=%d\n", (u64) iova, flags);
return 0;
}
-static int msm_iommu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+static void iommu_get_clocks(struct msm_iommu *iommu, struct device *dev)
+{
+ struct property *prop;
+ const char *name;
+ int i = 0;
+
+ iommu->nr_clocks =
+ of_property_count_strings(dev->of_node, "clock-names");
+
+ if (iommu->nr_clocks < 0)
+ return;
+
+ if (WARN_ON(iommu->nr_clocks > ARRAY_SIZE(iommu->clocks)))
+ iommu->nr_clocks = ARRAY_SIZE(iommu->clocks);
+
+ of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+ if (i == iommu->nr_clocks)
+ break;
+
+ iommu->clocks[i++] = clk_get(dev, name);
+ }
+}
+
+
+static void msm_iommu_clocks_enable(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int i;
+
+ if (!iommu->nr_clocks)
+ iommu_get_clocks(iommu, mmu->dev->parent);
+
+ for (i = 0; i < iommu->nr_clocks; i++) {
+ if (iommu->clocks[i])
+ clk_prepare_enable(iommu->clocks[i]);
+ }
+}
+
+static void msm_iommu_clocks_disable(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int i;
+
+ for (i = 0; i < iommu->nr_clocks; i++) {
+ if (iommu->clocks[i])
+ clk_disable_unprepare(iommu->clocks[i]);
+ }
+}
+
+static int msm_iommu_attach(struct msm_mmu *mmu, const char **names,
+ int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
+
return iommu_attach_device(iommu->domain, mmu->dev);
}
-static void msm_iommu_detach(struct msm_mmu *mmu, const char **names, int cnt)
+static int msm_iommu_attach_user(struct msm_mmu *mmu, const char **names,
+ int cnt)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
- iommu_detach_device(iommu->domain, mmu->dev);
+ int ret, val = 1;
+
+ /* Hope springs eternal */
+ iommu->allow_dynamic = !iommu_domain_set_attr(iommu->domain,
+ DOMAIN_ATTR_ENABLE_TTBR1, &val) ? true : false;
+
+ /* Mark the GPU as I/O coherent if it is supported */
+ iommu->is_coherent = of_dma_is_coherent(mmu->dev->of_node);
+
+ ret = iommu_attach_device(iommu->domain, mmu->dev);
+ if (ret)
+ return ret;
+
+ /*
+ * Get the context bank for the base domain; this will be shared with
+ * the children.
+ */
+ iommu->cb = -1;
+ if (iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_CONTEXT_BANK,
+ &iommu->cb))
+ iommu->allow_dynamic = false;
+
+ return 0;
}
-static int msm_iommu_map(struct msm_mmu *mmu, uint32_t iova,
- struct sg_table *sgt, unsigned len, int prot)
+static int msm_iommu_attach_dynamic(struct msm_mmu *mmu, const char **names,
+ int cnt)
{
+ static unsigned int procid;
struct msm_iommu *iommu = to_msm_iommu(mmu);
- struct iommu_domain *domain = iommu->domain;
- struct scatterlist *sg;
- unsigned int da = iova;
- unsigned int i, j;
int ret;
+ unsigned int id;
- if (!domain || !sgt)
- return -EINVAL;
-
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- u32 pa = sg_phys(sg) - sg->offset;
- size_t bytes = sg->length + sg->offset;
+ /* Assign a unique procid for the domain to cut down on TLB churn */
+ id = ++procid;
- VERB("map[%d]: %08x %08x(%zx)", i, iova, pa, bytes);
+ iommu_domain_set_attr(iommu->domain, DOMAIN_ATTR_PROCID, &id);
- ret = iommu_map(domain, da, pa, bytes, prot);
- if (ret)
- goto fail;
+ ret = iommu_attach_device(iommu->domain, mmu->dev);
+ if (ret)
+ return ret;
- da += bytes;
- }
+ /*
+ * Get the TTBR0 and the CONTEXTIDR - these will be used by the GPU to
+ * switch the pagetable on its own.
+ */
+ iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_TTBR0,
+ &iommu->ttbr0);
+ iommu_domain_get_attr(iommu->domain, DOMAIN_ATTR_CONTEXTIDR,
+ &iommu->contextidr);
return 0;
+}
+
+static int msm_iommu_attach_secure(struct msm_mmu *mmu, const char **names,
+ int cnt)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ int ret, vmid = VMID_CP_PIXEL;
-fail:
- da = iova;
+ ret = iommu_domain_set_attr(iommu->domain, DOMAIN_ATTR_SECURE_VMID,
+ &vmid);
+ if (ret)
+ return ret;
- for_each_sg(sgt->sgl, sg, i, j) {
- size_t bytes = sg->length + sg->offset;
- iommu_unmap(domain, da, bytes);
- da += bytes;
- }
- return ret;
+ return iommu_attach_device(iommu->domain, mmu->dev);
}
-static int msm_iommu_unmap(struct msm_mmu *mmu, uint32_t iova,
- struct sg_table *sgt, unsigned len)
+static void msm_iommu_detach(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ iommu_detach_device(iommu->domain, mmu->dev);
+}
+
+static void msm_iommu_detach_dynamic(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ iommu_detach_device(iommu->domain, mmu->dev);
+}
+
+static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, u32 flags, void *priv)
{
struct msm_iommu *iommu = to_msm_iommu(mmu);
struct iommu_domain *domain = iommu->domain;
- struct scatterlist *sg;
- unsigned int da = iova;
- int i;
+ int ret;
+ u32 prot = IOMMU_READ;
- for_each_sg(sgt->sgl, sg, sgt->nents, i) {
- size_t bytes = sg->length + sg->offset;
- size_t unmapped;
+ if (!domain || !sgt)
+ return -EINVAL;
- unmapped = iommu_unmap(domain, da, bytes);
- if (unmapped < bytes)
- return unmapped;
+ if (!(flags & MSM_BO_GPU_READONLY))
+ prot |= IOMMU_WRITE;
- VERB("unmap[%d]: %08x(%zx)", i, iova, bytes);
+ if (flags & MSM_BO_PRIVILEGED)
+ prot |= IOMMU_PRIV;
- BUG_ON(!PAGE_ALIGNED(bytes));
+ if ((flags & MSM_BO_CACHED) && msm_iommu_coherent(mmu))
+ prot |= IOMMU_CACHE;
- da += bytes;
- }
+ /* iommu_map_sg returns the number of bytes mapped */
+ ret = iommu_map_sg(domain, iova, sgt->sgl, sgt->nents, prot);
+ if (ret)
+ sgt->sgl->dma_address = iova;
- return 0;
+ return ret ? 0 : -ENOMEM;
+}
+
+static void msm_iommu_unmap(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, void *priv)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+ struct iommu_domain *domain = iommu->domain;
+ struct scatterlist *sg;
+ size_t len = 0;
+ int ret, i;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ len += sg->length;
+
+ ret = iommu_unmap(domain, iova, len);
+ if (ret != len)
+ dev_warn(mmu->dev, "could not unmap iova %llx\n", iova);
+
+ sgt->sgl->dma_address = 0;
}
static void msm_iommu_destroy(struct msm_mmu *mmu)
@@ -116,7 +224,31 @@ static void msm_iommu_destroy(struct msm_mmu *mmu)
kfree(iommu);
}
-static const struct msm_mmu_funcs funcs = {
+static struct device *find_context_bank(const char *name)
+{
+ struct device_node *node = of_find_node_by_name(NULL, name);
+ struct platform_device *pdev, *parent;
+
+ if (!node)
+ return ERR_PTR(-ENODEV);
+
+ if (!of_find_property(node, "iommus", NULL))
+ return ERR_PTR(-ENODEV);
+
+ /* Get the parent device */
+ parent = of_find_device_by_node(node->parent);
+ if (!parent)
+ return ERR_PTR(-ENODEV);
+ /* Populate the sub nodes */
+ of_platform_populate(parent->dev.of_node, NULL, NULL, &parent->dev);
+
+ /* Get the context bank device */
+ pdev = of_find_device_by_node(node);
+
+ return pdev ? &pdev->dev : ERR_PTR(-ENODEV);
+}
+
+static const struct msm_mmu_funcs default_funcs = {
.attach = msm_iommu_attach,
.detach = msm_iommu_detach,
.map = msm_iommu_map,
@@ -124,7 +256,52 @@ static const struct msm_mmu_funcs funcs = {
.destroy = msm_iommu_destroy,
};
-struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
+static const struct msm_mmu_funcs user_funcs = {
+ .attach = msm_iommu_attach_user,
+ .detach = msm_iommu_detach,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .destroy = msm_iommu_destroy,
+ .enable = msm_iommu_clocks_enable,
+ .disable = msm_iommu_clocks_disable,
+};
+
+static const struct msm_mmu_funcs secure_funcs = {
+ .attach = msm_iommu_attach_secure,
+ .detach = msm_iommu_detach,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .destroy = msm_iommu_destroy,
+};
+
+static const struct msm_mmu_funcs dynamic_funcs = {
+ .attach = msm_iommu_attach_dynamic,
+ .detach = msm_iommu_detach_dynamic,
+ .map = msm_iommu_map,
+ .unmap = msm_iommu_unmap,
+ .destroy = msm_iommu_destroy,
+};
+
+static const struct {
+ const char *cbname;
+ const struct msm_mmu_funcs *funcs;
+} msm_iommu_domains[] = {
+ [MSM_IOMMU_DOMAIN_DEFAULT] = {
+ .cbname = NULL,
+ .funcs = &default_funcs,
+ },
+ [MSM_IOMMU_DOMAIN_USER] = {
+ .cbname = "gfx3d_user",
+ .funcs = &user_funcs,
+ },
+ [MSM_IOMMU_DOMAIN_SECURE] = {
+ .cbname = "gfx3d_secure",
+ .funcs = &secure_funcs
+ },
+};
+
+static struct msm_mmu *iommu_create(struct device *dev,
+ struct iommu_domain *domain, const struct msm_mmu_funcs *funcs)
{
struct msm_iommu *iommu;
@@ -133,8 +310,73 @@ struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain)
return ERR_PTR(-ENOMEM);
iommu->domain = domain;
- msm_mmu_init(&iommu->base, dev, &funcs);
+ msm_mmu_init(&iommu->base, dev, funcs);
iommu_set_fault_handler(domain, msm_fault_handler, dev);
return &iommu->base;
}
+
+struct msm_mmu *msm_iommu_new(struct device *parent,
+ enum msm_iommu_domain_type type, struct iommu_domain *domain)
+{
+ struct device *dev = parent;
+
+ if (type >= ARRAY_SIZE(msm_iommu_domains) ||
+ !msm_iommu_domains[type].funcs)
+ return ERR_PTR(-ENODEV);
+
+ if (msm_iommu_domains[type].cbname) {
+ dev = find_context_bank(msm_iommu_domains[type].cbname);
+ if (IS_ERR(dev))
+ return ERR_CAST(dev);
+ }
+
+ return iommu_create(dev, domain, msm_iommu_domains[type].funcs);
+}
+
+/*
+ * Given a base domain that is attached to a IOMMU device try to create a
+ * dynamic domain that is also attached to the same device but allocates a new
+ * pagetable. This is used to allow multiple pagetables to be attached to the
+ * same device.
+ */
+struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *base)
+{
+ struct msm_iommu *base_iommu = to_msm_iommu(base);
+ struct iommu_domain *domain;
+ struct msm_mmu *mmu;
+ int ret, val = 1;
+ struct msm_iommu *child_iommu;
+
+ /* Don't continue if the base domain didn't have the support we need */
+ if (!base || base_iommu->allow_dynamic == false)
+ return ERR_PTR(-EOPNOTSUPP);
+
+ domain = iommu_domain_alloc(&platform_bus_type);
+ if (!domain)
+ return ERR_PTR(-ENODEV);
+
+ mmu = iommu_create(base->dev, domain, &dynamic_funcs);
+
+ if (IS_ERR(mmu)) {
+ if (domain)
+ iommu_domain_free(domain);
+ return mmu;
+ }
+
+ ret = iommu_domain_set_attr(domain, DOMAIN_ATTR_DYNAMIC, &val);
+ if (ret) {
+ msm_iommu_destroy(mmu);
+ return ERR_PTR(ret);
+ }
+
+ /* Set the context bank to match the base domain */
+ iommu_domain_set_attr(domain, DOMAIN_ATTR_CONTEXT_BANK,
+ &base_iommu->cb);
+
+ /* Mark the dynamic domain as I/O coherent if the base domain is */
+ child_iommu = to_msm_iommu(mmu);
+ child_iommu->is_coherent = base_iommu->is_coherent;
+
+ return mmu;
+}
diff --git a/drivers/gpu/drm/msm/msm_iommu.h b/drivers/gpu/drm/msm/msm_iommu.h
new file mode 100644
index 000000000000..3a67b60ad81d
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_iommu.h
@@ -0,0 +1,46 @@
+/* Copyright (c) 2016-2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_IOMMU_H_
+#define _MSM_IOMMU_H_
+
+#include "msm_mmu.h"
+
+struct msm_iommu {
+ struct msm_mmu base;
+ struct iommu_domain *domain;
+ int cb;
+ phys_addr_t ttbr0;
+ uint32_t contextidr;
+ bool allow_dynamic;
+
+ struct clk *clocks[5];
+ int nr_clocks;
+
+ bool is_coherent;
+};
+#define to_msm_iommu(x) container_of(x, struct msm_iommu, base)
+
+static inline bool msm_iommu_allow_dynamic(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ return iommu->allow_dynamic;
+}
+
+static inline bool msm_iommu_coherent(struct msm_mmu *mmu)
+{
+ struct msm_iommu *iommu = to_msm_iommu(mmu);
+
+ return iommu->is_coherent;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index 9bcabaada179..6e3df60aac55 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2017, 2019, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -25,6 +26,33 @@
#define MAX_PLANE 4
+/**
+ * Device Private DRM Mode Flags
+ * drm_mode->private_flags
+ */
+/* Connector has interpreted seamless transition request as dynamic fps */
+#define MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS (1<<0)
+/* Transition to new mode requires a wait-for-vblank before the modeset */
+#define MSM_MODE_FLAG_VBLANK_PRE_MODESET (1<<1)
+/*
+ * We need setting some flags in bridge, and using them in encoder. Add them in
+ * private_flags would be better for use. DRM_MODE_FLAG_SUPPORTS_RGB/YUV are
+ * flags that indicating the SINK supported color formats read from EDID. While,
+ * these flags defined here indicate the best color/bit depth foramt we choosed
+ * that would be better for display. For example the best mode display like:
+ * RGB+RGB_DC,YUV+YUV_DC, RGB,YUV. And we could not set RGB and YUV format at
+ * the same time. And also RGB_DC only set when RGB format is set,the same for
+ * YUV_DC.
+ */
+/* Enable RGB444 30 bit deep color */
+#define MSM_MODE_FLAG_RGB444_DC_ENABLE (1<<2)
+/* Enable YUV420 30 bit deep color */
+#define MSM_MODE_FLAG_YUV420_DC_ENABLE (1<<3)
+/* Choose RGB444 format to display */
+#define MSM_MODE_FLAG_COLOR_FORMAT_RGB444 (1<<4)
+/* Choose YUV420 format to display */
+#define MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420 (1<<5)
+
/* As there are different display controller blocks depending on the
* snapdragon version, the kms support is split out and the appropriate
* implementation is loaded at runtime. The kms module is responsible
@@ -33,6 +61,7 @@
struct msm_kms_funcs {
/* hw initialization: */
int (*hw_init)(struct msm_kms *kms);
+ int (*postinit)(struct msm_kms *kms);
/* irq handling: */
void (*irq_preinstall)(struct msm_kms *kms);
int (*irq_postinstall)(struct msm_kms *kms);
@@ -41,21 +70,39 @@ struct msm_kms_funcs {
int (*enable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
/* modeset, bracketing atomic_commit(): */
- void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
- void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+ void (*prepare_fence)(struct msm_kms *kms,
+ struct drm_atomic_state *state);
+ void (*prepare_commit)(struct msm_kms *kms,
+ struct drm_atomic_state *state);
+ void (*commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+ void (*complete_commit)(struct msm_kms *kms,
+ struct drm_atomic_state *state);
/* functions to wait for atomic commit completed on each CRTC */
void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
struct drm_crtc *crtc);
+ /* get msm_format w/ optional format modifiers from drm_mode_fb_cmd2 */
+ const struct msm_format *(*get_format)(struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t *modifiers,
+ const uint32_t modifiers_len);
+ /* do format checking on format modified through fb_cmd2 modifiers */
+ int (*check_modified_format)(const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
/* misc: */
- const struct msm_format *(*get_format)(struct msm_kms *kms, uint32_t format);
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
int (*set_split_display)(struct msm_kms *kms,
struct drm_encoder *encoder,
struct drm_encoder *slave_encoder,
bool is_cmd_mode);
+ void (*postopen)(struct msm_kms *kms, struct drm_file *file);
+ bool (*early_display_status)(struct msm_kms *kms);
/* cleanup: */
void (*preclose)(struct msm_kms *kms, struct drm_file *file);
+ void (*postclose)(struct msm_kms *kms, struct drm_file *file);
+ void (*lastclose)(struct msm_kms *kms);
void (*destroy)(struct msm_kms *kms);
};
@@ -74,7 +121,33 @@ static inline void msm_kms_init(struct msm_kms *kms,
kms->funcs = funcs;
}
+#ifdef CONFIG_DRM_MSM_MDP4
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
+#else
+static inline
+struct msm_kms *mdp4_kms_init(struct drm_device *dev) { return NULL; };
+#endif
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
+struct msm_kms *sde_kms_init(struct drm_device *dev);
+
+/**
+ * Mode Set Utility Functions
+ */
+static inline bool msm_is_mode_seamless(const struct drm_display_mode *mode)
+{
+ return (mode->flags & DRM_MODE_FLAG_SEAMLESS);
+}
+
+static inline bool msm_is_mode_dynamic_fps(const struct drm_display_mode *mode)
+{
+ return ((mode->flags & DRM_MODE_FLAG_SEAMLESS) &&
+ (mode->private_flags & MSM_MODE_FLAG_SEAMLESS_DYNAMIC_FPS));
+}
+
+static inline bool msm_needs_vblank_pre_modeset(
+ const struct drm_display_mode *mode)
+{
+ return (mode->private_flags & MSM_MODE_FLAG_VBLANK_PRE_MODESET);
+}
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h
index 7cd88d9dc155..03f8a55255c9 100644
--- a/drivers/gpu/drm/msm/msm_mmu.h
+++ b/drivers/gpu/drm/msm/msm_mmu.h
@@ -20,14 +20,38 @@
#include <linux/iommu.h>
+struct msm_mmu;
+
+enum msm_mmu_domain_type {
+ MSM_SMMU_DOMAIN_UNSECURE,
+ MSM_SMMU_DOMAIN_NRT_UNSECURE,
+ MSM_SMMU_DOMAIN_SECURE,
+ MSM_SMMU_DOMAIN_NRT_SECURE,
+ MSM_SMMU_DOMAIN_MAX,
+};
+
+enum msm_iommu_domain_type {
+ MSM_IOMMU_DOMAIN_DEFAULT,
+ MSM_IOMMU_DOMAIN_USER,
+ MSM_IOMMU_DOMAIN_SECURE,
+};
+
struct msm_mmu_funcs {
int (*attach)(struct msm_mmu *mmu, const char **names, int cnt);
- void (*detach)(struct msm_mmu *mmu, const char **names, int cnt);
- int (*map)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
- unsigned len, int prot);
- int (*unmap)(struct msm_mmu *mmu, uint32_t iova, struct sg_table *sgt,
- unsigned len);
+ void (*detach)(struct msm_mmu *mmu);
+ int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
+ u32 flags, void *priv);
+ void (*unmap)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt,
+ void *priv);
void (*destroy)(struct msm_mmu *mmu);
+ void (*enable)(struct msm_mmu *mmu);
+ void (*disable)(struct msm_mmu *mmu);
+ int (*early_splash_map)(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, u32 flags);
+ void (*early_splash_unmap)(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt);
+ int (*set_property)(struct msm_mmu *mmu,
+ enum iommu_attr attr, void *data);
};
struct msm_mmu {
@@ -42,7 +66,35 @@ static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev,
mmu->funcs = funcs;
}
-struct msm_mmu *msm_iommu_new(struct device *dev, struct iommu_domain *domain);
-struct msm_mmu *msm_gpummu_new(struct device *dev, struct msm_gpu *gpu);
+/* Create a new SDE mmu device */
+struct msm_mmu *msm_smmu_new(struct device *dev,
+ enum msm_mmu_domain_type domain);
+
+/* Create a new legacy MDP4 or GPU mmu device */
+struct msm_mmu *msm_iommu_new(struct device *parent,
+ enum msm_iommu_domain_type type, struct iommu_domain *domain);
+
+/* Create a new dynamic domain for GPU */
+struct msm_mmu *msm_iommu_new_dynamic(struct msm_mmu *orig);
+
+static inline void msm_mmu_enable(struct msm_mmu *mmu)
+{
+ if (mmu->funcs->enable)
+ mmu->funcs->enable(mmu);
+}
+
+static inline void msm_mmu_disable(struct msm_mmu *mmu)
+{
+ if (mmu->funcs->disable)
+ mmu->funcs->disable(mmu);
+}
+
+/* SDE smmu driver initialize and cleanup functions */
+int __init msm_smmu_driver_init(void);
+void __exit msm_smmu_driver_cleanup(void);
+
+/* register custom fault handler for a specific domain */
+void msm_smmu_register_fault_handler(struct msm_mmu *mmu,
+ iommu_fault_handler_t handler);
#endif /* __MSM_MMU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_prop.c b/drivers/gpu/drm/msm/msm_prop.c
new file mode 100644
index 000000000000..02ed2b7a062f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_prop.c
@@ -0,0 +1,734 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_prop.h"
+
+void msm_property_init(struct msm_property_info *info,
+ struct drm_mode_object *base,
+ struct drm_device *dev,
+ struct drm_property **property_array,
+ struct msm_property_data *property_data,
+ uint32_t property_count,
+ uint32_t blob_count,
+ uint32_t state_size)
+{
+ int i;
+
+ /* prevent access if any of these are NULL */
+ if (!base || !dev || !property_array || !property_data) {
+ property_count = 0;
+ blob_count = 0;
+
+ DRM_ERROR("invalid arguments, forcing zero properties\n");
+ return;
+ }
+
+ /* can't have more blob properties than total properties */
+ if (blob_count > property_count) {
+ blob_count = property_count;
+
+ DBG("Capping number of blob properties to %d", blob_count);
+ }
+
+ if (!info) {
+ DRM_ERROR("info pointer is NULL\n");
+ } else {
+ info->base = base;
+ info->dev = dev;
+ info->property_array = property_array;
+ info->property_data = property_data;
+ info->property_count = property_count;
+ info->blob_count = blob_count;
+ info->install_request = 0;
+ info->install_count = 0;
+ info->recent_idx = 0;
+ info->is_active = false;
+ info->state_size = state_size;
+ info->state_cache_size = 0;
+ mutex_init(&info->property_lock);
+
+ memset(property_data,
+ 0,
+ sizeof(struct msm_property_data) *
+ property_count);
+ INIT_LIST_HEAD(&info->dirty_list);
+
+ for (i = 0; i < property_count; ++i)
+ INIT_LIST_HEAD(&property_data[i].dirty_node);
+ }
+}
+
+void msm_property_destroy(struct msm_property_info *info)
+{
+ if (!info)
+ return;
+
+ /* reset dirty list */
+ INIT_LIST_HEAD(&info->dirty_list);
+
+ /* free state cache */
+ while (info->state_cache_size > 0)
+ kfree(info->state_cache[--(info->state_cache_size)]);
+
+ mutex_destroy(&info->property_lock);
+}
+
+int msm_property_pop_dirty(struct msm_property_info *info)
+{
+ struct list_head *item;
+ int rc = 0;
+
+ if (!info) {
+ DRM_ERROR("invalid info\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&info->property_lock);
+ if (list_empty(&info->dirty_list)) {
+ rc = -EAGAIN;
+ } else {
+ item = info->dirty_list.next;
+ list_del_init(item);
+ rc = container_of(item, struct msm_property_data, dirty_node)
+ - info->property_data;
+ DRM_DEBUG_KMS("property %d dirty\n", rc);
+ }
+ mutex_unlock(&info->property_lock);
+
+ return rc;
+}
+
+/**
+ * _msm_property_set_dirty_no_lock - flag given property as being dirty
+ * This function doesn't mutex protect the
+ * dirty linked list.
+ * @info: Pointer to property info container struct
+ * @property_idx: Property index
+ */
+static void _msm_property_set_dirty_no_lock(
+ struct msm_property_info *info,
+ uint32_t property_idx)
+{
+ if (!info || property_idx >= info->property_count) {
+ DRM_ERROR("invalid argument(s), info %pK, idx %u\n",
+ info, property_idx);
+ return;
+ }
+
+ /* avoid re-inserting if already dirty */
+ if (!list_empty(&info->property_data[property_idx].dirty_node)) {
+ DRM_DEBUG_KMS("property %u already dirty\n", property_idx);
+ return;
+ }
+
+ list_add_tail(&info->property_data[property_idx].dirty_node,
+ &info->dirty_list);
+}
+
+/**
+ * _msm_property_install_integer - install standard drm range property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ * @force_dirty: Whether or not to filter 'dirty' status on unchanged values
+ */
+static void _msm_property_install_integer(struct msm_property_info *info,
+ const char *name, int flags, uint64_t min, uint64_t max,
+ uint64_t init, uint32_t property_idx, bool force_dirty)
+{
+ struct drm_property **prop;
+
+ if (!info)
+ return;
+
+ ++info->install_request;
+
+ if (!name || (property_idx >= info->property_count)) {
+ DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
+ } else {
+ prop = &info->property_array[property_idx];
+ /*
+ * Properties need to be attached to each drm object that
+ * uses them, but only need to be created once
+ */
+ if (*prop == 0) {
+ *prop = drm_property_create_range(info->dev,
+ flags, name, min, max);
+ if (*prop == 0)
+ DRM_ERROR("create %s property failed\n", name);
+ }
+
+ /* save init value for later */
+ info->property_data[property_idx].default_value = init;
+ info->property_data[property_idx].force_dirty = force_dirty;
+
+ /* always attach property, if created */
+ if (*prop) {
+ drm_object_attach_property(info->base, *prop, init);
+ ++info->install_count;
+ }
+ }
+}
+
+/**
+ * _msm_property_install_integer - install signed drm range property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ * @force_dirty: Whether or not to filter 'dirty' status on unchanged values
+ */
+static void _msm_property_install_signed_integer(struct msm_property_info *info,
+ const char *name, int flags, int64_t min, int64_t max,
+ int64_t init, uint32_t property_idx, bool force_dirty)
+{
+ struct drm_property **prop;
+
+ if (!info)
+ return;
+
+ ++info->install_request;
+
+ if (!name || (property_idx >= info->property_count)) {
+ DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
+ } else {
+ prop = &info->property_array[property_idx];
+ /*
+ * Properties need to be attached to each drm object that
+ * uses them, but only need to be created once
+ */
+ if (*prop == 0) {
+ *prop = drm_property_create_signed_range(info->dev,
+ flags, name, min, max);
+ if (*prop == 0)
+ DRM_ERROR("create %s property failed\n", name);
+ }
+
+ /* save init value for later */
+ info->property_data[property_idx].default_value = I642U64(init);
+ info->property_data[property_idx].force_dirty = force_dirty;
+
+ /* always attach property, if created */
+ if (*prop) {
+ drm_object_attach_property(info->base, *prop, init);
+ ++info->install_count;
+ }
+ }
+}
+
+void msm_property_install_range(struct msm_property_info *info,
+ const char *name, int flags, uint64_t min, uint64_t max,
+ uint64_t init, uint32_t property_idx)
+{
+ _msm_property_install_integer(info, name, flags,
+ min, max, init, property_idx, false);
+}
+
+void msm_property_install_volatile_range(struct msm_property_info *info,
+ const char *name, int flags, uint64_t min, uint64_t max,
+ uint64_t init, uint32_t property_idx)
+{
+ _msm_property_install_integer(info, name, flags,
+ min, max, init, property_idx, true);
+}
+
+void msm_property_install_signed_range(struct msm_property_info *info,
+ const char *name, int flags, int64_t min, int64_t max,
+ int64_t init, uint32_t property_idx)
+{
+ _msm_property_install_signed_integer(info, name, flags,
+ min, max, init, property_idx, false);
+}
+
+void msm_property_install_volatile_signed_range(struct msm_property_info *info,
+ const char *name, int flags, int64_t min, int64_t max,
+ int64_t init, uint32_t property_idx)
+{
+ _msm_property_install_signed_integer(info, name, flags,
+ min, max, init, property_idx, true);
+}
+
+void msm_property_install_rotation(struct msm_property_info *info,
+ unsigned int supported_rotations, uint32_t property_idx)
+{
+ struct drm_property **prop;
+
+ if (!info)
+ return;
+
+ ++info->install_request;
+
+ if (property_idx >= info->property_count) {
+ DRM_ERROR("invalid property index %d\n", property_idx);
+ } else {
+ prop = &info->property_array[property_idx];
+ /*
+ * Properties need to be attached to each drm object that
+ * uses them, but only need to be created once
+ */
+ if (*prop == 0) {
+ *prop = drm_mode_create_rotation_property(info->dev,
+ supported_rotations);
+ if (*prop == 0)
+ DRM_ERROR("create rotation property failed\n");
+ }
+
+ /* save init value for later */
+ info->property_data[property_idx].default_value = 0;
+ info->property_data[property_idx].force_dirty = false;
+
+ /* always attach property, if created */
+ if (*prop) {
+ drm_object_attach_property(info->base, *prop, 0);
+ ++info->install_count;
+ }
+ }
+}
+
+void msm_property_install_enum(struct msm_property_info *info,
+ const char *name, int flags, int is_bitmask,
+ const struct drm_prop_enum_list *values, int num_values,
+ uint32_t property_idx, uint64_t default_value)
+{
+ struct drm_property **prop;
+
+ if (!info)
+ return;
+
+ ++info->install_request;
+
+ if (!name || !values || !num_values ||
+ (property_idx >= info->property_count)) {
+ DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
+ } else {
+ prop = &info->property_array[property_idx];
+ /*
+ * Properties need to be attached to each drm object that
+ * uses them, but only need to be created once
+ */
+ if (*prop == 0) {
+ /* 'bitmask' is a special type of 'enum' */
+ if (is_bitmask)
+ *prop = drm_property_create_bitmask(info->dev,
+ DRM_MODE_PROP_BITMASK | flags,
+ name, values, num_values, -1);
+ else
+ *prop = drm_property_create_enum(info->dev,
+ DRM_MODE_PROP_ENUM | flags,
+ name, values, num_values);
+ if (*prop == 0)
+ DRM_ERROR("create %s property failed\n", name);
+ }
+
+ /* save init value for later */
+ info->property_data[property_idx].default_value = default_value;
+ info->property_data[property_idx].force_dirty = false;
+
+ /* select first defined value for enums */
+ if (!is_bitmask)
+ info->property_data[property_idx].default_value =
+ values->type;
+
+ /* always attach property, if created */
+ if (*prop) {
+ drm_object_attach_property(info->base, *prop,
+ info->property_data
+ [property_idx].default_value);
+ ++info->install_count;
+ }
+ }
+}
+
+void msm_property_install_blob(struct msm_property_info *info,
+ const char *name, int flags, uint32_t property_idx)
+{
+ struct drm_property **prop;
+
+ if (!info)
+ return;
+
+ ++info->install_request;
+
+ if (!name || (property_idx >= info->blob_count)) {
+ DRM_ERROR("invalid argument(s), %s\n", name ? name : "null");
+ } else {
+ prop = &info->property_array[property_idx];
+ /*
+ * Properties need to be attached to each drm object that
+ * uses them, but only need to be created once
+ */
+ if (*prop == 0) {
+ /* use 'create' for blob property place holder */
+ *prop = drm_property_create(info->dev,
+ DRM_MODE_PROP_BLOB | flags, name, 0);
+ if (*prop == 0)
+ DRM_ERROR("create %s property failed\n", name);
+ }
+
+ /* save init value for later */
+ info->property_data[property_idx].default_value = 0;
+ info->property_data[property_idx].force_dirty = true;
+
+ /* always attach property, if created */
+ if (*prop) {
+ drm_object_attach_property(info->base, *prop, -1);
+ ++info->install_count;
+ }
+ }
+}
+
+int msm_property_install_get_status(struct msm_property_info *info)
+{
+ int rc = -ENOMEM;
+
+ if (info && (info->install_request == info->install_count))
+ rc = 0;
+
+ return rc;
+}
+
+int msm_property_index(struct msm_property_info *info,
+ struct drm_property *property)
+{
+ uint32_t count;
+ int32_t idx;
+ int rc = -EINVAL;
+
+ if (!info || !property) {
+ DRM_ERROR("invalid argument(s)\n");
+ } else {
+ /*
+ * Linear search, but start from last found index. This will
+ * help if any single property is accessed multiple times in a
+ * row. Ideally, we could keep a list of properties sorted in
+ * the order of most recent access, but that may be overkill
+ * for now.
+ */
+ mutex_lock(&info->property_lock);
+ idx = info->recent_idx;
+ count = info->property_count;
+ while (count) {
+ --count;
+
+ /* stop searching on match */
+ if (info->property_array[idx] == property) {
+ info->recent_idx = idx;
+ rc = idx;
+ break;
+ }
+
+ /* move to next valid index */
+ if (--idx < 0)
+ idx = info->property_count - 1;
+ }
+ mutex_unlock(&info->property_lock);
+ }
+
+ return rc;
+}
+
+int msm_property_atomic_set(struct msm_property_info *info,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs,
+ struct drm_property *property, uint64_t val)
+{
+ struct drm_property_blob *blob;
+ int property_idx, rc = -EINVAL;
+
+ property_idx = msm_property_index(info, property);
+ if (!info || (property_idx == -EINVAL) || !property_values) {
+ DRM_DEBUG("Invalid argument(s)\n");
+ } else {
+ /* extra handling for incoming properties */
+ mutex_lock(&info->property_lock);
+ if ((property->flags & DRM_MODE_PROP_BLOB) &&
+ (property_idx < info->blob_count) &&
+ property_blobs) {
+ /* DRM lookup also takes a reference */
+ blob = drm_property_lookup_blob(info->dev,
+ (uint32_t)val);
+ if (!blob) {
+ DRM_ERROR("blob not found\n");
+ val = 0;
+ } else {
+ DBG("Blob %u saved", blob->base.id);
+ val = blob->base.id;
+
+ /* save blob - need to clear previous ref */
+ if (property_blobs[property_idx])
+ drm_property_unreference_blob(
+ property_blobs[property_idx]);
+ property_blobs[property_idx] = blob;
+ }
+ }
+
+ /* update value and flag as dirty */
+ if (property_values[property_idx] != val ||
+ info->property_data[property_idx].force_dirty) {
+ property_values[property_idx] = val;
+ _msm_property_set_dirty_no_lock(info, property_idx);
+
+ DBG("%s - %lld", property->name, val);
+ }
+ mutex_unlock(&info->property_lock);
+ rc = 0;
+ }
+
+ return rc;
+}
+
+int msm_property_atomic_get(struct msm_property_info *info,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs,
+ struct drm_property *property, uint64_t *val)
+{
+ int property_idx, rc = -EINVAL;
+
+ property_idx = msm_property_index(info, property);
+ if (!info || (property_idx == -EINVAL) || !property_values || !val) {
+ DRM_DEBUG("Invalid argument(s)\n");
+ } else {
+ mutex_lock(&info->property_lock);
+ *val = property_values[property_idx];
+ mutex_unlock(&info->property_lock);
+ rc = 0;
+ }
+
+ return rc;
+}
+
+void *msm_property_alloc_state(struct msm_property_info *info)
+{
+ void *state = NULL;
+
+ if (!info) {
+ DRM_ERROR("invalid property info\n");
+ return NULL;
+ }
+
+ mutex_lock(&info->property_lock);
+ if (info->state_cache_size)
+ state = info->state_cache[--(info->state_cache_size)];
+ mutex_unlock(&info->property_lock);
+
+ if (!state && info->state_size)
+ state = kmalloc(info->state_size, GFP_KERNEL);
+
+ if (!state)
+ DRM_ERROR("failed to allocate state\n");
+
+ return state;
+}
+
+/**
+ * _msm_property_free_state - helper function for freeing local state objects
+ * @info: Pointer to property info container struct
+ * @st: Pointer to state object
+ */
+static void _msm_property_free_state(struct msm_property_info *info, void *st)
+{
+ if (!info || !st)
+ return;
+
+ mutex_lock(&info->property_lock);
+ if (info->state_cache_size < MSM_PROP_STATE_CACHE_SIZE)
+ info->state_cache[(info->state_cache_size)++] = st;
+ else
+ kfree(st);
+ mutex_unlock(&info->property_lock);
+}
+
+void msm_property_reset_state(struct msm_property_info *info, void *state,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs)
+{
+ uint32_t i;
+
+ if (!info) {
+ DRM_ERROR("invalid property info\n");
+ return;
+ }
+
+ if (state)
+ memset(state, 0, info->state_size);
+
+ /*
+ * Assign default property values. This helper is mostly used
+ * to initialize newly created state objects.
+ */
+ if (property_values)
+ for (i = 0; i < info->property_count; ++i)
+ property_values[i] =
+ info->property_data[i].default_value;
+
+ if (property_blobs)
+ for (i = 0; i < info->blob_count; ++i)
+ property_blobs[i] = 0;
+}
+
+void msm_property_duplicate_state(struct msm_property_info *info,
+ void *old_state, void *state,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs)
+{
+ uint32_t i;
+
+ if (!info || !old_state || !state) {
+ DRM_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ memcpy(state, old_state, info->state_size);
+
+ if (property_blobs) {
+ /* add ref count for blobs */
+ for (i = 0; i < info->blob_count; ++i)
+ if (property_blobs[i])
+ drm_property_reference_blob(property_blobs[i]);
+ }
+}
+
+void msm_property_destroy_state(struct msm_property_info *info, void *state,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs)
+{
+ uint32_t i;
+
+ if (!info || !state) {
+ DRM_ERROR("invalid argument(s)\n");
+ return;
+ }
+ if (property_blobs) {
+ /* remove ref count for blobs */
+ for (i = 0; i < info->blob_count; ++i)
+ if (property_blobs[i])
+ drm_property_unreference_blob(
+ property_blobs[i]);
+ }
+
+ _msm_property_free_state(info, state);
+}
+
+void *msm_property_get_blob(struct msm_property_info *info,
+ struct drm_property_blob **property_blobs,
+ size_t *byte_len,
+ uint32_t property_idx)
+{
+ struct drm_property_blob *blob;
+ size_t len = 0;
+ void *rc = 0;
+
+ if (!info || !property_blobs || (property_idx >= info->blob_count)) {
+ DRM_ERROR("invalid argument(s)\n");
+ } else {
+ blob = property_blobs[property_idx];
+ if (blob) {
+ len = blob->length;
+ rc = &blob->data;
+ }
+ }
+
+ if (byte_len)
+ *byte_len = len;
+
+ return rc;
+}
+
+int msm_property_set_blob(struct msm_property_info *info,
+ struct drm_property_blob **blob_reference,
+ void *blob_data,
+ size_t byte_len,
+ uint32_t property_idx)
+{
+ struct drm_property_blob *blob = NULL;
+ int rc = -EINVAL;
+
+ if (!info || !blob_reference || (property_idx >= info->blob_count)) {
+ DRM_ERROR("invalid argument(s)\n");
+ } else {
+ /* create blob */
+ if (blob_data && byte_len) {
+ blob = drm_property_create_blob(info->dev,
+ byte_len,
+ blob_data);
+ if (IS_ERR_OR_NULL(blob)) {
+ rc = PTR_ERR(blob);
+ DRM_ERROR("failed to create blob, %d\n", rc);
+ goto exit;
+ }
+ }
+
+ /* update drm object */
+ rc = drm_object_property_set_value(info->base,
+ info->property_array[property_idx],
+ blob ? blob->base.id : 0);
+ if (rc) {
+ DRM_ERROR("failed to set blob to property\n");
+ if (blob)
+ drm_property_unreference_blob(blob);
+ goto exit;
+ }
+
+ /* update local reference */
+ if (*blob_reference)
+ drm_property_unreference_blob(*blob_reference);
+ *blob_reference = blob;
+ }
+
+exit:
+ return rc;
+}
+
+int msm_property_set_property(struct msm_property_info *info,
+ uint64_t *property_values,
+ uint32_t property_idx,
+ uint64_t val)
+{
+ int rc = -EINVAL;
+
+ if (!info || (property_idx >= info->property_count) ||
+ property_idx < info->blob_count || !property_values) {
+ DRM_ERROR("invalid argument(s)\n");
+ } else {
+ struct drm_property *drm_prop;
+
+ mutex_lock(&info->property_lock);
+
+ /* update cached value */
+ if (property_values)
+ property_values[property_idx] = val;
+
+ /* update the new default value for immutables */
+ drm_prop = info->property_array[property_idx];
+ if (drm_prop->flags & DRM_MODE_PROP_IMMUTABLE)
+ info->property_data[property_idx].default_value = val;
+
+ mutex_unlock(&info->property_lock);
+
+ /* update drm object */
+ rc = drm_object_property_set_value(info->base, drm_prop, val);
+ if (rc)
+ DRM_ERROR("failed set property value, idx %d rc %d\n",
+ property_idx, rc);
+
+ }
+
+ return rc;
+}
+
diff --git a/drivers/gpu/drm/msm/msm_prop.h b/drivers/gpu/drm/msm/msm_prop.h
new file mode 100644
index 000000000000..6e600c4fd02f
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_prop.h
@@ -0,0 +1,432 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _MSM_PROP_H_
+#define _MSM_PROP_H_
+
+#include <linux/list.h>
+#include "msm_drv.h"
+
+#define MSM_PROP_STATE_CACHE_SIZE 2
+
+/**
+ * struct msm_property_data - opaque structure for tracking per
+ * drm-object per property stuff
+ * @default_value: Default property value for this drm object
+ * @dirty_node: Linked list node to track if property is dirty or not
+ * @force_dirty: Always dirty property on incoming sets, rather than checking
+ * for modified values
+ */
+struct msm_property_data {
+ uint64_t default_value;
+ struct list_head dirty_node;
+ bool force_dirty;
+};
+
+/**
+ * struct msm_property_info: Structure for property/state helper functions
+ * @base: Pointer to base drm object (plane/crtc/etc.)
+ * @dev: Pointer to drm device object
+ * @property_array: Pointer to array for storing created property objects
+ * @property_data: Pointer to array for storing private property data
+ * @property_count: Total number of properties
+ * @blob_count: Total number of blob properties, should be <= count
+ * @install_request: Total number of property 'install' requests
+ * @install_count: Total number of successful 'install' requests
+ * @recent_idx: Index of property most recently accessed by set/get
+ * @dirty_list: List of all properties that have been 'atomic_set' but not
+ * yet cleared with 'msm_property_pop_dirty'
+ * @is_active: Whether or not drm component properties are 'active'
+ * @state_cache: Cache of local states, to prevent alloc/free thrashing
+ * @state_size: Size of local state structures
+ * @state_cache_size: Number of state structures currently stored in state_cache
+ * @property_lock: Mutex to protect local variables
+ */
+struct msm_property_info {
+ struct drm_mode_object *base;
+ struct drm_device *dev;
+
+ struct drm_property **property_array;
+ struct msm_property_data *property_data;
+ uint32_t property_count;
+ uint32_t blob_count;
+ uint32_t install_request;
+ uint32_t install_count;
+
+ int32_t recent_idx;
+
+ struct list_head dirty_list;
+ bool is_active;
+
+ void *state_cache[MSM_PROP_STATE_CACHE_SIZE];
+ uint32_t state_size;
+ int32_t state_cache_size;
+ struct mutex property_lock;
+};
+
+/**
+ * msm_property_get_default - query default value of a property
+ * @info: Pointer to property info container struct
+ * @property_idx: Property index
+ * Returns: Default value for specified property
+ */
+static inline
+uint64_t msm_property_get_default(struct msm_property_info *info,
+ uint32_t property_idx)
+{
+ uint64_t rc = 0;
+
+ if (!info)
+ return 0;
+
+ mutex_lock(&info->property_lock);
+ if (property_idx < info->property_count)
+ rc = info->property_data[property_idx].default_value;
+ mutex_unlock(&info->property_lock);
+
+ return rc;
+}
+
+/**
+ * msm_property_set_is_active - set overall 'active' status for all properties
+ * @info: Pointer to property info container struct
+ * @is_active: New 'is active' status
+ */
+static inline
+void msm_property_set_is_active(struct msm_property_info *info, bool is_active)
+{
+ if (info) {
+ mutex_lock(&info->property_lock);
+ info->is_active = is_active;
+ mutex_unlock(&info->property_lock);
+ }
+}
+
+/**
+ * msm_property_get_is_active - query property 'is active' status
+ * @info: Pointer to property info container struct
+ * Returns: Current 'is active's status
+ */
+static inline
+bool msm_property_get_is_active(struct msm_property_info *info)
+{
+ bool rc = false;
+
+ if (info) {
+ mutex_lock(&info->property_lock);
+ rc = info->is_active;
+ mutex_unlock(&info->property_lock);
+ }
+
+ return rc;
+}
+
+/**
+ * msm_property_pop_dirty - determine next dirty property and clear
+ * its dirty flag
+ * @info: Pointer to property info container struct
+ * Returns: Valid msm property index on success,
+ * -EAGAIN if no dirty properties are available
+ * Property indicies returned from this function are similar
+ * to those returned by the msm_property_index function.
+ */
+int msm_property_pop_dirty(struct msm_property_info *info);
+
+/**
+ * msm_property_init - initialize property info structure
+ * @info: Pointer to property info container struct
+ * @base: Pointer to base drm object (plane/crtc/etc.)
+ * @dev: Pointer to drm device object
+ * @property_array: Pointer to array for storing created property objects
+ * @property_data: Pointer to array for storing private property data
+ * @property_count: Total number of properties
+ * @blob_count: Total number of blob properties, should be <= count
+ * @state_size: Size of local state object
+ */
+void msm_property_init(struct msm_property_info *info,
+ struct drm_mode_object *base,
+ struct drm_device *dev,
+ struct drm_property **property_array,
+ struct msm_property_data *property_data,
+ uint32_t property_count,
+ uint32_t blob_count,
+ uint32_t state_size);
+
+/**
+ * msm_property_destroy - destroy helper info structure
+ *
+ * @info: Pointer to property info container struct
+ */
+void msm_property_destroy(struct msm_property_info *info);
+
+/**
+ * msm_property_install_range - install standard drm range property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ */
+void msm_property_install_range(struct msm_property_info *info,
+ const char *name,
+ int flags,
+ uint64_t min,
+ uint64_t max,
+ uint64_t init,
+ uint32_t property_idx);
+
+/**
+ * msm_property_install_volatile_range - install drm range property
+ * This function is similar to msm_property_install_range, but assumes
+ * that the property is meant for holding user pointers or descriptors
+ * that may reference volatile data without having an updated value.
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ */
+void msm_property_install_volatile_range(struct msm_property_info *info,
+ const char *name,
+ int flags,
+ uint64_t min,
+ uint64_t max,
+ uint64_t init,
+ uint32_t property_idx);
+
+/**
+ * msm_property_install_signed_range - install signed drm range property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ */
+void msm_property_install_signed_range(struct msm_property_info *info,
+ const char *name,
+ int flags,
+ int64_t min,
+ int64_t max,
+ int64_t init,
+ uint32_t property_idx);
+
+/**
+ * msm_property_install_volatile_signed_range - install signed range property
+ * This function is similar to msm_property_install_range, but assumes
+ * that the property is meant for holding user pointers or descriptors
+ * that may reference volatile data without having an updated value.
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @min: Min property value
+ * @max: Max property value
+ * @init: Default Property value
+ * @property_idx: Property index
+ */
+void msm_property_install_volatile_signed_range(struct msm_property_info *info,
+ const char *name,
+ int flags,
+ int64_t min,
+ int64_t max,
+ int64_t init,
+ uint32_t property_idx);
+
+/**
+ * msm_property_install_rotation - install standard drm rotation property
+ * @info: Pointer to property info container struct
+ * @supported_rotations: Bitmask of supported rotation values (see
+ * drm_mode_create_rotation_property for more details)
+ * @property_idx: Property index
+ */
+void msm_property_install_rotation(struct msm_property_info *info,
+ unsigned int supported_rotations,
+ uint32_t property_idx);
+
+/**
+ * msm_property_install_enum - install standard drm enum/bitmask property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Other property type flags, e.g. DRM_MODE_PROP_IMMUTABLE
+ * @is_bitmask: Set to non-zero to create a bitmask property, rather than an
+ * enumeration one
+ * @values: Array of allowable enumeration/bitmask values
+ * @num_values: Size of values array
+ * @property_idx: Property index
+ * @default_value: Default value of current property
+ */
+void msm_property_install_enum(struct msm_property_info *info,
+ const char *name,
+ int flags,
+ int is_bitmask,
+ const struct drm_prop_enum_list *values,
+ int num_values,
+ uint32_t property_idx,
+ uint64_t default_value);
+
+/**
+ * msm_property_install_blob - install standard drm blob property
+ * @info: Pointer to property info container struct
+ * @name: Property name
+ * @flags: Extra flags for property creation
+ * @property_idx: Property index
+ */
+void msm_property_install_blob(struct msm_property_info *info,
+ const char *name,
+ int flags,
+ uint32_t property_idx);
+
+/**
+ * msm_property_install_get_status - query overal status of property additions
+ * @info: Pointer to property info container struct
+ * Returns: Zero if previous property install calls were all successful
+ */
+int msm_property_install_get_status(struct msm_property_info *info);
+
+/**
+ * msm_property_index - determine property index from drm_property ptr
+ * @info: Pointer to property info container struct
+ * @property: Incoming property pointer
+ * Returns: Valid property index, or -EINVAL on error
+ */
+int msm_property_index(struct msm_property_info *info,
+ struct drm_property *property);
+
+/**
+ * msm_property_atomic_set - helper function for atomic property set callback
+ * @info: Pointer to property info container struct
+ * @property_values: Pointer to property values cache array
+ * @property_blobs: Pointer to property blobs cache array
+ * @property: Incoming property pointer
+ * @val: Incoming property value
+ * Returns: Zero on success
+ */
+int msm_property_atomic_set(struct msm_property_info *info,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs,
+ struct drm_property *property,
+ uint64_t val);
+
+/**
+ * msm_property_atomic_get - helper function for atomic property get callback
+ * @info: Pointer to property info container struct
+ * @property_values: Pointer to property values cache array
+ * @property_blobs: Pointer to property blobs cache array
+ * @property: Incoming property pointer
+ * @val: Pointer to variable for receiving property value
+ * Returns: Zero on success
+ */
+int msm_property_atomic_get(struct msm_property_info *info,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs,
+ struct drm_property *property,
+ uint64_t *val);
+
+/**
+ * msm_property_alloc_state - helper function for allocating local state objects
+ * @info: Pointer to property info container struct
+ */
+void *msm_property_alloc_state(struct msm_property_info *info);
+
+/**
+ * msm_property_reset_state - helper function for state reset callback
+ * @info: Pointer to property info container struct
+ * @state: Pointer to local state structure
+ * @property_values: Pointer to property values cache array
+ * @property_blobs: Pointer to property blobs cache array
+ */
+void msm_property_reset_state(struct msm_property_info *info,
+ void *state,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs);
+
+/**
+ * msm_property_duplicate_state - helper function for duplicate state cb
+ * @info: Pointer to property info container struct
+ * @old_state: Pointer to original state structure
+ * @state: Pointer to newly created state structure
+ * @property_values: Pointer to property values cache array
+ * @property_blobs: Pointer to property blobs cache array
+ */
+void msm_property_duplicate_state(struct msm_property_info *info,
+ void *old_state,
+ void *state,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs);
+
+/**
+ * msm_property_destroy_state - helper function for destroy state cb
+ * @info: Pointer to property info container struct
+ * @state: Pointer to local state structure
+ * @property_values: Pointer to property values cache array
+ * @property_blobs: Pointer to property blobs cache array
+ */
+void msm_property_destroy_state(struct msm_property_info *info,
+ void *state,
+ uint64_t *property_values,
+ struct drm_property_blob **property_blobs);
+
+/**
+ * msm_property_get_blob - obtain cached data pointer for drm blob property
+ * @info: Pointer to property info container struct
+ * @property_blobs: Pointer to property blobs cache array
+ * @byte_len: Optional pointer to variable for accepting blob size
+ * @property_idx: Property index
+ * Returns: Pointer to blob data
+ */
+void *msm_property_get_blob(struct msm_property_info *info,
+ struct drm_property_blob **property_blobs,
+ size_t *byte_len,
+ uint32_t property_idx);
+
+/**
+ * msm_property_set_blob - update blob property on a drm object
+ * This function updates the blob property value of the given drm object. Its
+ * intended use is to update blob properties that have been created with the
+ * DRM_MODE_PROP_IMMUTABLE flag set.
+ * @info: Pointer to property info container struct
+ * @blob_reference: Reference to a pointer that holds the created data blob
+ * @blob_data: Pointer to blob data
+ * @byte_len: Length of blob data, in bytes
+ * @property_idx: Property index
+ * Returns: Zero on success
+ */
+int msm_property_set_blob(struct msm_property_info *info,
+ struct drm_property_blob **blob_reference,
+ void *blob_data,
+ size_t byte_len,
+ uint32_t property_idx);
+
+/**
+ * msm_property_set_property - update property on a drm object
+ * This function updates the property value of the given drm object. Its
+ * intended use is to update properties that have been created with the
+ * DRM_MODE_PROP_IMMUTABLE flag set.
+ * Note: This function cannot be called on a blob.
+ * @info: Pointer to property info container struct
+ * @property_values: Pointer to property values cache array
+ * @property_idx: Property index
+ * @val: value of the property to set
+ * Returns: Zero on success
+ */
+int msm_property_set_property(struct msm_property_info *info,
+ uint64_t *property_values,
+ uint32_t property_idx,
+ uint64_t val);
+
+#endif /* _MSM_PROP_H_ */
+
diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c
index 909a52b21ebe..e7e1dc293a5d 100644
--- a/drivers/gpu/drm/msm/msm_rd.c
+++ b/drivers/gpu/drm/msm/msm_rd.c
@@ -27,6 +27,11 @@
* This bypasses drm_debugfs_create_files() mainly because we need to use
* our own fops for a bit more control. In particular, we don't want to
* do anything if userspace doesn't have the debugfs file open.
+ *
+ * The module-param "rd_full", which defaults to false, enables snapshotting
+ * all (non-written) buffers in the submit, rather than just cmdstream bo's.
+ * This is useful to capture the contents of (for example) vbo's or textures,
+ * or shader programs (if not emitted inline in cmdstream).
*/
#ifdef CONFIG_DEBUG_FS
@@ -40,6 +45,10 @@
#include "msm_gpu.h"
#include "msm_gem.h"
+static bool rd_full = false;
+MODULE_PARM_DESC(rd_full, "If true, $debugfs/.../rd will snapshot all buffer contents");
+module_param_named(rd_full, rd_full, bool, 0600);
+
enum rd_sect_type {
RD_NONE,
RD_TEST, /* ascii text */
@@ -282,6 +291,36 @@ void msm_rd_debugfs_cleanup(struct drm_minor *minor)
kfree(rd);
}
+static void snapshot_buf(struct msm_rd_state *rd,
+ struct msm_gem_submit *submit, int idx,
+ uint64_t iova, uint32_t size)
+{
+ struct msm_gem_object *obj = submit->bos[idx].obj;
+ uint64_t offset = 0;
+
+ if (iova) {
+ offset = iova - submit->bos[idx].iova;
+ } else {
+ iova = submit->bos[idx].iova;
+ size = obj->base.size;
+ }
+
+ /* Always write the RD_GPUADDR so we know how big the buffer is */
+ rd_write_section(rd, RD_GPUADDR,
+ (uint64_t[2]) { iova, size }, 16);
+
+ /* But only dump contents for buffers marked as read and not secure */
+ if (submit->bos[idx].flags & MSM_SUBMIT_BO_READ &&
+ !(obj->flags & MSM_BO_SECURE)) {
+ const char *buf = msm_gem_vaddr(&obj->base);
+
+ if (IS_ERR_OR_NULL(buf))
+ return;
+
+ rd_write_section(rd, RD_BUFFER_CONTENTS, buf + offset, size);
+ }
+}
+
/* called under struct_mutex */
void msm_rd_dump_submit(struct msm_gem_submit *submit)
{
@@ -305,24 +344,20 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4));
- /* could be nice to have an option (module-param?) to snapshot
- * all the bo's associated with the submit. Handy to see vtx
- * buffers, etc. For now just the cmdstream bo's is enough.
- */
+ if (rd_full) {
+ for (i = 0; i < submit->nr_bos; i++)
+ snapshot_buf(rd, submit, i, 0, 0);
+ }
for (i = 0; i < submit->nr_cmds; i++) {
- uint32_t idx = submit->cmd[i].idx;
- uint32_t iova = submit->cmd[i].iova;
+ uint64_t iova = submit->cmd[i].iova;
uint32_t szd = submit->cmd[i].size; /* in dwords */
- struct msm_gem_object *obj = submit->bos[idx].obj;
- const char *buf = msm_gem_vaddr_locked(&obj->base);
- buf += iova - submit->bos[idx].iova;
-
- rd_write_section(rd, RD_GPUADDR,
- (uint32_t[2]){ iova, szd * 4 }, 8);
- rd_write_section(rd, RD_BUFFER_CONTENTS,
- buf, szd * 4);
+ /* snapshot cmdstream bo's (if we haven't already): */
+ if (!rd_full) {
+ snapshot_buf(rd, submit, submit->cmd[i].idx,
+ submit->cmd[i].iova, szd * 4);
+ }
switch (submit->cmd[i].type) {
case MSM_SUBMIT_CMD_IB_TARGET_BUF:
@@ -334,7 +369,7 @@ void msm_rd_dump_submit(struct msm_gem_submit *submit)
case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
case MSM_SUBMIT_CMD_BUF:
rd_write_section(rd, RD_CMDSTREAM_ADDR,
- (uint32_t[2]){ iova, szd }, 8);
+ (uint64_t[2]) { iova, szd }, 16);
break;
}
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.c b/drivers/gpu/drm/msm/msm_ringbuffer.c
index ae317271cf81..2a5843e6f81b 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.c
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.c
@@ -18,13 +18,14 @@
#include "msm_ringbuffer.h"
#include "msm_gpu.h"
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
+ struct msm_memptrs *memptrs, uint64_t memptrs_iova)
{
struct msm_ringbuffer *ring;
int ret;
- if (WARN_ON(!is_power_of_2(size)))
- return ERR_PTR(-EINVAL);
+ /* We assume everwhere that MSM_GPU_RINGBUFFER_SZ is a power of 2 */
+ BUILD_BUG_ON(!is_power_of_2(MSM_GPU_RINGBUFFER_SZ));
ring = kzalloc(sizeof(*ring), GFP_KERNEL);
if (!ring) {
@@ -33,18 +34,26 @@ struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size)
}
ring->gpu = gpu;
- ring->bo = msm_gem_new(gpu->dev, size, MSM_BO_WC);
+ ring->id = id;
+ ring->bo = msm_gem_new(gpu->dev, MSM_GPU_RINGBUFFER_SZ,
+ MSM_BO_WC);
if (IS_ERR(ring->bo)) {
ret = PTR_ERR(ring->bo);
ring->bo = NULL;
goto fail;
}
- ring->start = msm_gem_vaddr_locked(ring->bo);
- ring->end = ring->start + (size / 4);
+ ring->memptrs = memptrs;
+ ring->memptrs_iova = memptrs_iova;
+
+
+ ring->start = msm_gem_vaddr(ring->bo);
+ ring->end = ring->start + (MSM_GPU_RINGBUFFER_SZ >> 2);
+ ring->next = ring->start;
ring->cur = ring->start;
- ring->size = size;
+ INIT_LIST_HEAD(&ring->submits);
+ spin_lock_init(&ring->lock);
return ring;
@@ -56,7 +65,10 @@ fail:
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring)
{
- if (ring->bo)
+ if (ring && ring->bo) {
+ msm_gem_put_iova(ring->bo, ring->gpu->aspace);
drm_gem_object_unreference_unlocked(ring->bo);
+ }
+
kfree(ring);
}
diff --git a/drivers/gpu/drm/msm/msm_ringbuffer.h b/drivers/gpu/drm/msm/msm_ringbuffer.h
index 6e0e1049fa4f..e9678d57fffd 100644
--- a/drivers/gpu/drm/msm/msm_ringbuffer.h
+++ b/drivers/gpu/drm/msm/msm_ringbuffer.h
@@ -20,14 +20,46 @@
#include "msm_drv.h"
+#define rbmemptr(ring, member) \
+ ((ring)->memptrs_iova + offsetof(struct msm_memptrs, member))
+
+struct msm_memptr_ticks {
+ uint64_t started;
+ uint64_t retired;
+};
+
+struct msm_memptrs {
+ volatile uint32_t rptr;
+ volatile uint32_t fence;
+ volatile uint64_t ttbr0;
+ volatile unsigned int contextidr;
+ struct msm_memptr_ticks ticks[128];
+};
+
+#define RING_TICKS_IOVA(ring, index, field) \
+ ((ring)->memptrs_iova + offsetof(struct msm_memptrs, ticks) + \
+ ((index) * sizeof(struct msm_memptr_ticks)) + \
+ offsetof(struct msm_memptr_ticks, field))
+
struct msm_ringbuffer {
struct msm_gpu *gpu;
- int size;
+ int id;
struct drm_gem_object *bo;
- uint32_t *start, *end, *cur;
+ uint32_t *start, *end, *cur, *next;
+ uint64_t iova;
+ uint32_t seqno;
+ uint32_t submitted_fence;
+ spinlock_t lock;
+ struct list_head submits;
+ uint32_t hangcheck_fence;
+
+ struct msm_memptrs *memptrs;
+ uint64_t memptrs_iova;
+ int tick_index;
};
-struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int size);
+struct msm_ringbuffer *msm_ringbuffer_new(struct msm_gpu *gpu, int id,
+ struct msm_memptrs *memptrs, uint64_t memptrs_iova);
void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
/* ringbuffer helpers (the parts that are same for a3xx/a2xx/z180..) */
@@ -35,9 +67,13 @@ void msm_ringbuffer_destroy(struct msm_ringbuffer *ring);
static inline void
OUT_RING(struct msm_ringbuffer *ring, uint32_t data)
{
- if (ring->cur == ring->end)
- ring->cur = ring->start;
- *(ring->cur++) = data;
+ /*
+ * ring->next points to the current command being written - it won't be
+ * committed as ring->cur until the flush
+ */
+ if (ring->next == ring->end)
+ ring->next = ring->start;
+ *(ring->next++) = data;
}
#endif /* __MSM_RINGBUFFER_H__ */
diff --git a/drivers/gpu/drm/msm/msm_smmu.c b/drivers/gpu/drm/msm/msm_smmu.c
new file mode 100644
index 000000000000..eed3cfcb99ee
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_smmu.c
@@ -0,0 +1,496 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/pm_runtime.h>
+#include <linux/msm_dma_iommu_mapping.h>
+
+#include <asm/dma-iommu.h>
+#include <soc/qcom/secure_buffer.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+
+#ifndef SZ_4G
+#define SZ_4G (((size_t) SZ_1G) * 4)
+#endif
+
+struct msm_smmu_client {
+ struct device *dev;
+ struct dma_iommu_mapping *mmu_mapping;
+ bool domain_attached;
+};
+
+struct msm_smmu {
+ struct msm_mmu base;
+ struct device *client_dev;
+ struct msm_smmu_client *client;
+};
+
+struct msm_smmu_domain {
+ const char *label;
+ size_t va_start;
+ size_t va_size;
+ bool secure;
+};
+
+#define to_msm_smmu(x) container_of(x, struct msm_smmu, base)
+#define msm_smmu_to_client(smmu) (smmu->client)
+
+
+static int msm_smmu_fault_handler(struct iommu_domain *iommu,
+ struct device *dev, unsigned long iova, int flags, void *arg)
+{
+
+ dev_info(dev, "%s: iova=0x%08lx, flags=0x%x, iommu=%pK\n", __func__,
+ iova, flags, iommu);
+ return 0;
+}
+
+
+static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
+ const struct msm_smmu_domain *domain);
+
+static int msm_smmu_attach(struct msm_mmu *mmu, const char **names, int cnt)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ int rc = 0;
+
+ if (!client) {
+ pr_err("undefined smmu client\n");
+ return -EINVAL;
+ }
+
+ /* domain attach only once */
+ if (client->domain_attached)
+ return 0;
+
+ rc = arm_iommu_attach_device(client->dev,
+ client->mmu_mapping);
+ if (rc) {
+ dev_err(client->dev, "iommu attach dev failed (%d)\n",
+ rc);
+ return rc;
+ }
+
+ client->domain_attached = true;
+
+ dev_dbg(client->dev, "iommu domain attached\n");
+
+ return 0;
+}
+
+static void msm_smmu_detach(struct msm_mmu *mmu)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+
+ if (!client) {
+ pr_err("undefined smmu client\n");
+ return;
+ }
+
+ if (!client->domain_attached)
+ return;
+
+ arm_iommu_detach_device(client->dev);
+ client->domain_attached = false;
+ dev_dbg(client->dev, "iommu domain detached\n");
+}
+
+static int msm_smmu_map(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, u32 flags, void *priv)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ int ret;
+
+ if (!client || !sgt)
+ return -EINVAL;
+
+ if (priv)
+ ret = msm_dma_map_sg_lazy(client->dev, sgt->sgl,
+ sgt->nents, DMA_BIDIRECTIONAL, priv);
+ else
+ ret = dma_map_sg(client->dev, sgt->sgl, sgt->nents,
+ DMA_BIDIRECTIONAL);
+
+ return (ret != sgt->nents) ? -ENOMEM : 0;
+}
+
+static void msm_smmu_unmap(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, void *priv)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+
+ if (priv)
+ msm_dma_unmap_sg(client->dev, sgt->sgl, sgt->nents,
+ DMA_BIDIRECTIONAL, priv);
+ else
+ dma_unmap_sg(client->dev, sgt->sgl, sgt->nents,
+ DMA_BIDIRECTIONAL);
+}
+
+static int msm_smmu_early_splash_map(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt, u32 flags)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ struct iommu_domain *domain;
+
+ if (!client || !sgt)
+ return -EINVAL;
+
+ if (!client->mmu_mapping || !client->mmu_mapping->domain)
+ return -EINVAL;
+
+ domain = client->mmu_mapping->domain;
+
+ return iommu_map_sg(domain, iova, sgt->sgl, sgt->nents, flags);
+}
+
+static void msm_smmu_early_splash_unmap(struct msm_mmu *mmu, uint64_t iova,
+ struct sg_table *sgt)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ struct iommu_domain *domain;
+ struct scatterlist *sg;
+ size_t len = 0;
+ int unmapped, i = 0;
+
+ if (!client || !client->mmu_mapping || !client->mmu_mapping->domain)
+ return;
+
+ domain = client->mmu_mapping->domain;
+
+ for_each_sg(sgt->sgl, sg, sgt->nents, i)
+ len += sg->length;
+
+ unmapped = iommu_unmap(domain, iova, len);
+ if (unmapped < len)
+ DRM_ERROR("could not unmap iova@%llx\n", iova);
+}
+
+static void msm_smmu_destroy(struct msm_mmu *mmu)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct platform_device *pdev = to_platform_device(smmu->client_dev);
+
+ if (smmu->client_dev)
+ platform_device_unregister(pdev);
+ kfree(smmu);
+}
+
+/* user can call this API to set the attribute of smmu*/
+static int msm_smmu_set_property(struct msm_mmu *mmu,
+ enum iommu_attr attr, void *data)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+ struct iommu_domain *domain;
+ int ret = 0;
+
+ if (!client)
+ return -EINVAL;
+
+ domain = client->mmu_mapping->domain;
+ if (!domain)
+ return -EINVAL;
+
+ ret = iommu_domain_set_attr(domain, attr, data);
+ if (ret)
+ DRM_ERROR("set domain attribute failed\n");
+
+ return ret;
+}
+
+static const struct msm_mmu_funcs funcs = {
+ .attach = msm_smmu_attach,
+ .detach = msm_smmu_detach,
+ .map = msm_smmu_map,
+ .unmap = msm_smmu_unmap,
+ .destroy = msm_smmu_destroy,
+ .early_splash_map = msm_smmu_early_splash_map,
+ .early_splash_unmap = msm_smmu_early_splash_unmap,
+ .set_property = msm_smmu_set_property,
+};
+
+static struct msm_smmu_domain msm_smmu_domains[MSM_SMMU_DOMAIN_MAX] = {
+ [MSM_SMMU_DOMAIN_UNSECURE] = {
+ .label = "mdp_ns",
+ .va_start = SZ_128K,
+ .va_size = SZ_4G - SZ_128K,
+ .secure = false,
+ },
+ [MSM_SMMU_DOMAIN_SECURE] = {
+ .label = "mdp_s",
+ .va_start = SZ_128K,
+ .va_size = SZ_4G - SZ_128K,
+ .secure = true,
+ },
+ [MSM_SMMU_DOMAIN_NRT_UNSECURE] = {
+ .label = "rot_ns",
+ .va_start = SZ_128K,
+ .va_size = SZ_4G - SZ_128K,
+ .secure = false,
+ },
+ [MSM_SMMU_DOMAIN_NRT_SECURE] = {
+ .label = "rot_s",
+ .va_start = SZ_128K,
+ .va_size = SZ_4G - SZ_128K,
+ .secure = true,
+ },
+};
+
+static const struct of_device_id msm_smmu_dt_match[] = {
+ { .compatible = "qcom,smmu_sde_unsec",
+ .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_UNSECURE] },
+ { .compatible = "qcom,smmu_sde_sec",
+ .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_SECURE] },
+ { .compatible = "qcom,smmu_sde_nrt_unsec",
+ .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_UNSECURE] },
+ { .compatible = "qcom,smmu_sde_nrt_sec",
+ .data = &msm_smmu_domains[MSM_SMMU_DOMAIN_NRT_SECURE] },
+ {}
+};
+MODULE_DEVICE_TABLE(of, msm_smmu_dt_match);
+
+static struct device *msm_smmu_device_create(struct device *dev,
+ enum msm_mmu_domain_type domain,
+ struct msm_smmu *smmu)
+{
+ struct device_node *child;
+ struct platform_device *pdev;
+ int i;
+ const char *compat = NULL;
+
+ for (i = 0; i < ARRAY_SIZE(msm_smmu_dt_match); i++) {
+ if (msm_smmu_dt_match[i].data == &msm_smmu_domains[domain]) {
+ compat = msm_smmu_dt_match[i].compatible;
+ break;
+ }
+ }
+
+ if (!compat) {
+ DRM_ERROR("unable to find matching domain for %d\n", domain);
+ return ERR_PTR(-ENOENT);
+ }
+ DRM_INFO("found domain %d compat: %s\n", domain, compat);
+
+ if (domain == MSM_SMMU_DOMAIN_UNSECURE) {
+ int rc;
+
+ smmu->client = devm_kzalloc(dev,
+ sizeof(struct msm_smmu_client), GFP_KERNEL);
+ if (!smmu->client)
+ return ERR_PTR(-ENOMEM);
+
+ smmu->client->dev = dev;
+
+ rc = _msm_smmu_create_mapping(msm_smmu_to_client(smmu),
+ msm_smmu_dt_match[i].data);
+ if (rc) {
+ devm_kfree(dev, smmu->client);
+ smmu->client = NULL;
+ return ERR_PTR(rc);
+ }
+
+ return NULL;
+ }
+
+ child = of_find_compatible_node(dev->of_node, NULL, compat);
+ if (!child) {
+ DRM_ERROR("unable to find compatible node for %s\n", compat);
+ return ERR_PTR(-ENODEV);
+ }
+
+ pdev = of_platform_device_create(child, NULL, dev);
+ if (!pdev) {
+ DRM_ERROR("unable to create smmu platform dev for domain %d\n",
+ domain);
+ return ERR_PTR(-ENODEV);
+ }
+
+ smmu->client = platform_get_drvdata(pdev);
+
+ return &pdev->dev;
+}
+
+void msm_smmu_register_fault_handler(struct msm_mmu *mmu,
+ iommu_fault_handler_t handler)
+{
+ struct msm_smmu *smmu = to_msm_smmu(mmu);
+ struct msm_smmu_client *client = msm_smmu_to_client(smmu);
+
+ if (client)
+ iommu_set_fault_handler(client->mmu_mapping->domain,
+ handler, client->dev);
+
+}
+
+struct msm_mmu *msm_smmu_new(struct device *dev,
+ enum msm_mmu_domain_type domain)
+{
+ struct msm_smmu *smmu;
+ struct device *client_dev;
+ struct msm_smmu_client *client;
+
+ smmu = kzalloc(sizeof(*smmu), GFP_KERNEL);
+ if (!smmu)
+ return ERR_PTR(-ENOMEM);
+
+ client_dev = msm_smmu_device_create(dev, domain, smmu);
+ if (IS_ERR(client_dev)) {
+ kfree(smmu);
+ return (void *)client_dev ? : ERR_PTR(-ENODEV);
+ }
+
+ smmu->client_dev = client_dev;
+ msm_mmu_init(&smmu->base, dev, &funcs);
+
+ client = msm_smmu_to_client(smmu);
+ if (client)
+ iommu_set_fault_handler(client->mmu_mapping->domain,
+ msm_smmu_fault_handler, dev);
+
+ return &smmu->base;
+}
+
+static int _msm_smmu_create_mapping(struct msm_smmu_client *client,
+ const struct msm_smmu_domain *domain)
+{
+ int rc;
+
+ client->mmu_mapping = arm_iommu_create_mapping(&platform_bus_type,
+ domain->va_start, domain->va_size);
+ if (IS_ERR(client->mmu_mapping)) {
+ dev_err(client->dev,
+ "iommu create mapping failed for domain=%s\n",
+ domain->label);
+ return PTR_ERR(client->mmu_mapping);
+ }
+
+ if (domain->secure) {
+ int secure_vmid = VMID_CP_PIXEL;
+
+ rc = iommu_domain_set_attr(client->mmu_mapping->domain,
+ DOMAIN_ATTR_SECURE_VMID, &secure_vmid);
+ if (rc) {
+ dev_err(client->dev, "couldn't set secure pix vmid\n");
+ goto error;
+ }
+ }
+
+ DRM_INFO("Created domain %s [%zx,%zx] secure=%d\n",
+ domain->label, domain->va_start, domain->va_size,
+ domain->secure);
+
+ return 0;
+
+error:
+ arm_iommu_release_mapping(client->mmu_mapping);
+ return rc;
+}
+
+/**
+ * msm_smmu_probe()
+ * @pdev: platform device
+ *
+ * Each smmu context acts as a separate device and the context banks are
+ * configured with a VA range.
+ * Registers the clks as each context bank has its own clks, for which voting
+ * has to be done everytime before using that context bank.
+ */
+static int msm_smmu_probe(struct platform_device *pdev)
+{
+ const struct of_device_id *match;
+ struct msm_smmu_client *client;
+ const struct msm_smmu_domain *domain;
+ int rc;
+
+ match = of_match_device(msm_smmu_dt_match, &pdev->dev);
+ if (!match || !match->data) {
+ dev_err(&pdev->dev, "probe failed as match data is invalid\n");
+ return -EINVAL;
+ }
+
+ domain = match->data;
+ if (!domain) {
+ dev_err(&pdev->dev, "no matching device found\n");
+ return -EINVAL;
+ }
+
+ DRM_INFO("probing device %s\n", match->compatible);
+
+ client = devm_kzalloc(&pdev->dev, sizeof(*client), GFP_KERNEL);
+ if (!client)
+ return -ENOMEM;
+
+ client->dev = &pdev->dev;
+
+ rc = _msm_smmu_create_mapping(client, domain);
+ platform_set_drvdata(pdev, client);
+
+ return rc;
+}
+
+static int msm_smmu_remove(struct platform_device *pdev)
+{
+ struct msm_smmu_client *client;
+
+ client = platform_get_drvdata(pdev);
+ if (client->domain_attached) {
+ arm_iommu_detach_device(client->dev);
+ client->domain_attached = false;
+ }
+ arm_iommu_release_mapping(client->mmu_mapping);
+
+ return 0;
+}
+
+static struct platform_driver msm_smmu_driver = {
+ .probe = msm_smmu_probe,
+ .remove = msm_smmu_remove,
+ .driver = {
+ .name = "msmdrm_smmu",
+ .of_match_table = msm_smmu_dt_match,
+ },
+};
+
+int __init msm_smmu_driver_init(void)
+{
+ int ret;
+
+ ret = platform_driver_register(&msm_smmu_driver);
+ if (ret)
+ pr_err("mdss_smmu_register_driver() failed!\n");
+
+ return ret;
+}
+
+void __exit msm_smmu_driver_cleanup(void)
+{
+ platform_driver_unregister(&msm_smmu_driver);
+}
+
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("MSM SMMU driver");
diff --git a/drivers/gpu/drm/msm/msm_snapshot.c b/drivers/gpu/drm/msm/msm_snapshot.c
new file mode 100644
index 000000000000..30f3e5c64ebd
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_snapshot.c
@@ -0,0 +1,105 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "msm_gpu.h"
+#include "msm_gem.h"
+#include "msm_snapshot_api.h"
+
+void msm_snapshot_destroy(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+
+ if (!snapshot)
+ return;
+
+ dma_free_coherent(&pdev->dev, SZ_1M, snapshot->ptr,
+ snapshot->physaddr);
+
+ kfree(snapshot);
+}
+
+struct msm_snapshot *msm_snapshot_new(struct msm_gpu *gpu)
+{
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct msm_snapshot *snapshot;
+
+ snapshot = kzalloc(sizeof(*snapshot), GFP_KERNEL);
+ if (!snapshot)
+ return ERR_PTR(-ENOMEM);
+
+ snapshot->ptr = dma_alloc_coherent(&pdev->dev, SZ_1M,
+ &snapshot->physaddr, GFP_KERNEL);
+
+ if (!snapshot->ptr) {
+ kfree(snapshot);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ seq_buf_init(&snapshot->buf, snapshot->ptr, SZ_1M);
+
+ return snapshot;
+}
+
+int msm_gpu_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot)
+{
+ int ret;
+ struct msm_snapshot_header header;
+ uint64_t val;
+
+ if (!snapshot)
+ return -ENOMEM;
+
+ /*
+ * For now, blow away the snapshot and take a new one - the most
+ * interesting hang is the last one we saw
+ */
+ seq_buf_init(&snapshot->buf, snapshot->ptr, SZ_1M);
+
+ header.magic = SNAPSHOT_MAGIC;
+ gpu->funcs->get_param(gpu, MSM_PARAM_GPU_ID, &val);
+ header.gpuid = lower_32_bits(val);
+
+ gpu->funcs->get_param(gpu, MSM_PARAM_CHIP_ID, &val);
+ header.chipid = lower_32_bits(val);
+
+ seq_buf_putmem(&snapshot->buf, &header, sizeof(header));
+
+ ret = gpu->funcs->snapshot(gpu, snapshot);
+
+ if (!ret) {
+ struct msm_snapshot_section_header end;
+
+ end.magic = SNAPSHOT_SECTION_MAGIC;
+ end.id = SNAPSHOT_SECTION_END;
+ end.size = sizeof(end);
+
+ seq_buf_putmem(&snapshot->buf, &end, sizeof(end));
+
+ dev_info(gpu->dev->dev, "GPU snapshot created [0x%pa (%d bytes)]\n",
+ &snapshot->physaddr, seq_buf_used(&snapshot->buf));
+ }
+
+ return ret;
+}
+
+int msm_snapshot_write(struct msm_gpu *gpu, struct seq_file *m)
+{
+ if (gpu && gpu->snapshot)
+ seq_write(m, gpu->snapshot->ptr,
+ seq_buf_used(&gpu->snapshot->buf));
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/msm_snapshot.h b/drivers/gpu/drm/msm/msm_snapshot.h
new file mode 100644
index 000000000000..fd560b2129f1
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_snapshot.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SNAPSHOT_H_
+#define MSM_SNAPSHOT_H_
+
+#include <linux/string.h>
+#include <linux/seq_buf.h>
+#include "msm_snapshot_api.h"
+
+struct msm_snapshot {
+ void *ptr;
+ struct seq_buf buf;
+ phys_addr_t physaddr;
+ uint32_t index;
+ uint32_t remain;
+ unsigned long timestamp;
+ void *priv;
+};
+
+/* Write a uint32_t value to the next position in the snapshot buffer */
+static inline void SNAPSHOT_WRITE_U32(struct msm_snapshot *snapshot,
+ uint32_t value)
+{
+ seq_buf_putmem(&snapshot->buf, &value, sizeof(value));
+}
+
+/* Copy a block of memory to the next position in the snapshot buffer */
+static inline void SNAPSHOT_MEMCPY(struct msm_snapshot *snapshot, void *src,
+ uint32_t size)
+{
+ if (size)
+ seq_buf_putmem(&snapshot->buf, src, size);
+}
+
+static inline bool _snapshot_header(struct msm_snapshot *snapshot,
+ struct msm_snapshot_section_header *header,
+ u32 headsz, u32 datasz, u32 id)
+{
+ u32 size = headsz + datasz;
+
+ if (seq_buf_buffer_left(&snapshot->buf) <= size)
+ return false;
+
+ /* Write the section header */
+ header->magic = SNAPSHOT_SECTION_MAGIC;
+ header->id = id;
+ header->size = headsz + datasz;
+
+ /* Write the section header */
+ seq_buf_putmem(&snapshot->buf, header, headsz);
+
+ /* The caller will fill in the data from here */
+ return true;
+}
+
+/* SNAPSHOT_HEADER
+ * _snapshot: pointer to struct msm_snapshot
+ * _header: Local variable containing the sub-section header
+ * _id: Section ID to write
+ * _dword: Size of the data section (in dword)
+ */
+#define SNAPSHOT_HEADER(_snapshot, _header, _id, _dwords) \
+ _snapshot_header((_snapshot), \
+ (struct msm_snapshot_section_header *) &(_header), \
+ sizeof(_header), (_dwords) << 2, (_id))
+
+struct msm_gpu;
+
+struct msm_snapshot *msm_snapshot_new(struct msm_gpu *gpu);
+void msm_snapshot_destroy(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+int msm_gpu_snapshot(struct msm_gpu *gpu, struct msm_snapshot *snapshot);
+int msm_snapshot_write(struct msm_gpu *gpu, struct seq_file *m);
+
+#endif
+
diff --git a/drivers/gpu/drm/msm/msm_snapshot_api.h b/drivers/gpu/drm/msm/msm_snapshot_api.h
new file mode 100644
index 000000000000..7ad6f0498423
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_snapshot_api.h
@@ -0,0 +1,134 @@
+/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef MSM_SNAPSHOT_API_H_
+#define MSM_SNAPSHOT_API_H_
+
+#include <linux/types.h>
+
+/* High word is the magic, low word is the snapshot header version */
+#define SNAPSHOT_MAGIC 0x504D0002
+
+struct msm_snapshot_header {
+ __u32 magic;
+ __u32 gpuid;
+ __u32 chipid;
+} __packed;
+
+#define SNAPSHOT_SECTION_MAGIC 0xABCD
+
+struct msm_snapshot_section_header {
+ __u16 magic;
+ __u16 id;
+ __u32 size;
+} __packed;
+
+/* Section identifiers */
+#define SNAPSHOT_SECTION_OS 0x0101
+#define SNAPSHOT_SECTION_REGS_V2 0x0202
+#define SNAPSHOT_SECTION_RB_V2 0x0302
+#define SNAPSHOT_SECTION_IB_V2 0x0402
+#define SNAPSHOT_SECTION_INDEXED_REGS 0x0501
+#define SNAPSHOT_SECTION_DEBUG 0x0901
+#define SNAPSHOT_SECTION_DEBUGBUS 0x0A01
+#define SNAPSHOT_SECTION_GPU_OBJECT_V2 0x0B02
+#define SNAPSHOT_SECTION_MEMLIST_V2 0x0E02
+#define SNAPSHOT_SECTION_SHADER 0x1201
+#define SNAPSHOT_SECTION_END 0xFFFF
+
+#define SNAPSHOT_OS_LINUX_V3 0x00000202
+
+struct msm_snapshot_linux {
+ struct msm_snapshot_section_header header;
+ int osid;
+ __u32 seconds;
+ __u32 power_flags;
+ __u32 power_level;
+ __u32 power_interval_timeout;
+ __u32 grpclk;
+ __u32 busclk;
+ __u64 ptbase;
+ __u32 pid;
+ __u32 current_context;
+ __u32 ctxtcount;
+ unsigned char release[32];
+ unsigned char version[32];
+ unsigned char comm[16];
+} __packed;
+
+struct msm_snapshot_ringbuffer {
+ struct msm_snapshot_section_header header;
+ int start;
+ int end;
+ int rbsize;
+ int wptr;
+ int rptr;
+ int count;
+ __u32 timestamp_queued;
+ __u32 timestamp_retired;
+ __u64 gpuaddr;
+ __u32 id;
+} __packed;
+
+struct msm_snapshot_regs {
+ struct msm_snapshot_section_header header;
+ __u32 count;
+} __packed;
+
+struct msm_snapshot_indexed_regs {
+ struct msm_snapshot_section_header header;
+ __u32 index_reg;
+ __u32 data_reg;
+ __u32 start;
+ __u32 count;
+} __packed;
+
+#define SNAPSHOT_DEBUG_CP_MEQ 7
+#define SNAPSHOT_DEBUG_CP_PM4_RAM 8
+#define SNAPSHOT_DEBUG_CP_PFP_RAM 9
+#define SNAPSHOT_DEBUG_CP_ROQ 10
+#define SNAPSHOT_DEBUG_SHADER_MEMORY 11
+#define SNAPSHOT_DEBUG_CP_MERCIU 12
+
+struct msm_snapshot_debug {
+ struct msm_snapshot_section_header header;
+ __u32 type;
+ __u32 size;
+} __packed;
+
+struct msm_snapshot_debugbus {
+ struct msm_snapshot_section_header header;
+ __u32 id;
+ __u32 count;
+} __packed;
+
+struct msm_snapshot_shader {
+ struct msm_snapshot_section_header header;
+ __u32 type;
+ __u32 index;
+ __u32 size;
+} __packed;
+
+#define SNAPSHOT_GPU_OBJECT_SHADER 1
+#define SNAPSHOT_GPU_OBJECT_IB 2
+#define SNAPSHOT_GPU_OBJECT_GENERIC 3
+#define SNAPSHOT_GPU_OBJECT_DRAW 4
+#define SNAPSHOT_GPU_OBJECT_GLOBAL 5
+
+struct msm_snapshot_gpu_object {
+ struct msm_snapshot_section_header header;
+ __u32 type;
+ __u64 gpuaddr;
+ __u64 pt_base;
+ __u64 size;
+} __packed;
+#endif
diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c
new file mode 100644
index 000000000000..f79e74071c79
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_submitqueue.c
@@ -0,0 +1,151 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/kref.h>
+#include "msm_gpu.h"
+
+void msm_submitqueue_destroy(struct kref *kref)
+{
+ struct msm_gpu_submitqueue *queue = container_of(kref,
+ struct msm_gpu_submitqueue, ref);
+
+ kfree(queue);
+}
+
+struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_file_private *ctx,
+ u32 id)
+{
+ struct msm_gpu_submitqueue *entry;
+
+ if (!ctx)
+ return NULL;
+
+ read_lock(&ctx->queuelock);
+
+ list_for_each_entry(entry, &ctx->submitqueues, node) {
+ if (entry->id == id) {
+ kref_get(&entry->ref);
+ read_unlock(&ctx->queuelock);
+
+ return entry;
+ }
+ }
+
+ read_unlock(&ctx->queuelock);
+ return NULL;
+}
+
+void msm_submitqueue_close(struct msm_file_private *ctx)
+{
+ struct msm_gpu_submitqueue *entry, *tmp;
+
+ /*
+ * No lock needed in close and there won't
+ * be any more user ioctls coming our way
+ */
+
+ list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node)
+ msm_submitqueue_put(entry);
+}
+
+int msm_submitqueue_create(struct msm_file_private *ctx, u32 prio, u32 flags,
+ u32 *id)
+{
+ struct msm_gpu_submitqueue *queue = kzalloc(sizeof(*queue), GFP_KERNEL);
+
+ if (!queue)
+ return -ENOMEM;
+
+ kref_init(&queue->ref);
+ queue->flags = flags;
+ queue->prio = prio;
+
+ write_lock(&ctx->queuelock);
+
+ queue->id = ctx->queueid++;
+
+ if (id)
+ *id = queue->id;
+
+ list_add_tail(&queue->node, &ctx->submitqueues);
+
+ write_unlock(&ctx->queuelock);
+
+ return 0;
+}
+
+int msm_submitqueue_init(struct msm_file_private *ctx)
+{
+ INIT_LIST_HEAD(&ctx->submitqueues);
+
+ rwlock_init(&ctx->queuelock);
+
+ /*
+ * Add the "default" submitqueue with id 0
+ * "low" priority (2) and no flags
+ */
+
+ return msm_submitqueue_create(ctx, 2, 0, NULL);
+}
+
+int msm_submitqueue_query(struct msm_file_private *ctx, u32 id, u32 param,
+ void __user *data, u32 len)
+{
+ struct msm_gpu_submitqueue *queue = msm_submitqueue_get(ctx, id);
+ int ret = 0;
+
+ if (!queue)
+ return -ENOENT;
+
+ if (param == MSM_SUBMITQUEUE_PARAM_FAULTS) {
+ u32 size = min_t(u32, len, sizeof(queue->faults));
+
+ if (copy_to_user(data, &queue->faults, size))
+ ret = -EFAULT;
+ } else {
+ ret = -EINVAL;
+ }
+
+ msm_submitqueue_put(queue);
+
+ return ret;
+}
+
+int msm_submitqueue_remove(struct msm_file_private *ctx, u32 id)
+{
+ struct msm_gpu_submitqueue *entry;
+
+ /*
+ * id 0 is the "default" queue and can't be destroyed
+ * by the user
+ */
+
+ if (!id)
+ return -ENOENT;
+
+ write_lock(&ctx->queuelock);
+
+ list_for_each_entry(entry, &ctx->submitqueues, node) {
+ if (entry->id == id) {
+ list_del(&entry->node);
+ write_unlock(&ctx->queuelock);
+
+ msm_submitqueue_put(entry);
+ return 0;
+ }
+ }
+
+ write_unlock(&ctx->queuelock);
+ return -ENOENT;
+}
+
diff --git a/drivers/gpu/drm/msm/msm_trace.h b/drivers/gpu/drm/msm/msm_trace.h
new file mode 100644
index 000000000000..68c7ff78ffc2
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_trace.h
@@ -0,0 +1,98 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#if !defined(_MSM_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _MSM_TRACE_H_
+
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM msm_drm
+#define TRACE_INCLUDE_FILE msm_trace
+
+TRACE_EVENT(msm_queued,
+ TP_PROTO(struct msm_gem_submit *submit),
+ TP_ARGS(submit),
+ TP_STRUCT__entry(
+ __field(uint32_t, queue_id)
+ __field(uint32_t, fence_id)
+ __field(int, ring)
+ ),
+ TP_fast_assign(
+ __entry->queue_id = submit->queue->id;
+ __entry->fence_id = submit->fence;
+ __entry->ring = submit->ring;
+ ),
+ TP_printk(
+ "queue=%u fence=%u ring=%d",
+ __entry->queue_id, __entry->fence_id, __entry->ring
+ )
+);
+
+TRACE_EVENT(msm_submitted,
+ TP_PROTO(struct msm_gem_submit *submit, uint64_t ticks, uint64_t nsecs),
+ TP_ARGS(submit, ticks, nsecs),
+ TP_STRUCT__entry(
+ __field(uint32_t, queue_id)
+ __field(uint32_t, fence_id)
+ __field(int, ring)
+ __field(uint64_t, ticks)
+ __field(uint64_t, nsecs)
+ ),
+ TP_fast_assign(
+ __entry->queue_id = submit->queue->id;
+ __entry->fence_id = submit->fence;
+ __entry->ring = submit->ring;
+ __entry->ticks = ticks;
+ __entry->nsecs = nsecs;
+ ),
+ TP_printk(
+ "queue=%u fence=%u ring=%d ticks=%lld nsecs=%llu",
+ __entry->queue_id, __entry->fence_id, __entry->ring,
+ __entry->ticks, __entry->nsecs
+ )
+);
+
+TRACE_EVENT(msm_retired,
+ TP_PROTO(struct msm_gem_submit *submit, uint64_t start_ticks,
+ uint64_t retire_ticks),
+ TP_ARGS(submit, start_ticks, retire_ticks),
+ TP_STRUCT__entry(
+ __field(uint32_t, queue_id)
+ __field(uint32_t, fence_id)
+ __field(int, ring)
+ __field(uint64_t, start_ticks)
+ __field(uint64_t, retire_ticks)
+ ),
+ TP_fast_assign(
+ __entry->queue_id = submit->queue->id;
+ __entry->fence_id = submit->fence;
+ __entry->ring = submit->ring;
+ __entry->start_ticks = start_ticks;
+ __entry->retire_ticks = retire_ticks;
+ ),
+ TP_printk(
+ "queue=%u fence=%u ring=%d started=%lld retired=%lld",
+ __entry->queue_id, __entry->fence_id, __entry->ring,
+ __entry->start_ticks, __entry->retire_ticks
+ )
+);
+
+
+#endif
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
+
diff --git a/drivers/gpu/drm/msm/msm_trace_points.c b/drivers/gpu/drm/msm/msm_trace_points.c
new file mode 100644
index 000000000000..41d9a975ac92
--- /dev/null
+++ b/drivers/gpu/drm/msm/msm_trace_points.c
@@ -0,0 +1,18 @@
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "msm_gem.h"
+#include "msm_gpu.h"
+
+#define CREATE_TRACE_POINTS
+#include "msm_trace.h"
diff --git a/drivers/gpu/drm/msm/sde/sde_backlight.c b/drivers/gpu/drm/msm/sde/sde_backlight.c
new file mode 100644
index 000000000000..78df28a0016b
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_backlight.c
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_connector.h"
+#include <linux/backlight.h>
+#include "dsi_drm.h"
+
+#define SDE_BRIGHT_TO_BL(out, v, bl_max, max_bright) do {\
+ out = (2 * (v) * (bl_max) + max_bright);\
+ do_div(out, 2 * max_bright);\
+} while (0)
+
+static int sde_backlight_device_update_status(struct backlight_device *bd)
+{
+ int brightness;
+ struct drm_connector *connector;
+ struct dsi_display *display;
+ struct sde_connector *c_conn;
+ int bl_lvl;
+
+ brightness = bd->props.brightness;
+
+ if ((bd->props.power != FB_BLANK_UNBLANK) ||
+ (bd->props.state & BL_CORE_FBBLANK) ||
+ (bd->props.state & BL_CORE_SUSPENDED))
+ brightness = 0;
+
+ connector = bl_get_data(bd);
+ c_conn = to_sde_connector(connector);
+ display = (struct dsi_display *) c_conn->display;
+ if (brightness > display->panel[0]->bl_config.bl_max_level)
+ brightness = display->panel[0]->bl_config.bl_max_level;
+
+ /* This maps UI brightness into driver backlight level with
+ * rounding
+ */
+ SDE_BRIGHT_TO_BL(bl_lvl, brightness,
+ display->panel[0]->bl_config.bl_max_level,
+ display->panel[0]->bl_config.brightness_max_level);
+
+ if (!bl_lvl && brightness)
+ bl_lvl = 1;
+
+ if (c_conn->ops.set_backlight)
+ c_conn->ops.set_backlight(c_conn->display, bl_lvl);
+
+ return 0;
+}
+
+static int sde_backlight_device_get_brightness(struct backlight_device *bd)
+{
+ return 0;
+}
+
+static const struct backlight_ops sde_backlight_device_ops = {
+ .update_status = sde_backlight_device_update_status,
+ .get_brightness = sde_backlight_device_get_brightness,
+};
+
+int sde_backlight_setup(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+ struct backlight_device *bd;
+ struct backlight_properties props;
+ struct dsi_display *display;
+ struct dsi_backlight_config *bl_config;
+
+ if (!connector)
+ return -EINVAL;
+
+ c_conn = to_sde_connector(connector);
+ memset(&props, 0, sizeof(props));
+ props.type = BACKLIGHT_RAW;
+ props.power = FB_BLANK_UNBLANK;
+
+ switch (c_conn->connector_type) {
+ case DRM_MODE_CONNECTOR_DSI:
+ display = (struct dsi_display *) c_conn->display;
+ bl_config = &display->panel[0]->bl_config;
+ props.max_brightness = bl_config->brightness_max_level;
+ props.brightness = bl_config->brightness_max_level;
+ bd = backlight_device_register("sde-backlight",
+ connector->kdev,
+ connector,
+ &sde_backlight_device_ops, &props);
+ if (IS_ERR(bd)) {
+ pr_err("Failed to register backlight: %ld\n",
+ PTR_ERR(bd));
+ return -ENODEV;
+ }
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_backlight.h b/drivers/gpu/drm/msm/sde/sde_backlight.h
new file mode 100644
index 000000000000..1ea130592302
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_backlight.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_BACKLIGHT_H_
+#define _SDE_BACKLIGHT_H_
+
+int sde_backlight_setup(struct drm_connector *connector);
+
+#endif /* _SDE_BACKLIGHT_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.c b/drivers/gpu/drm/msm/sde/sde_color_processing.c
new file mode 100644
index 000000000000..8a086dc68328
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.c
@@ -0,0 +1,986 @@
+/* Copyright (c) 2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <drm/msm_drm_pp.h>
+#include "sde_color_processing.h"
+#include "sde_kms.h"
+#include "sde_crtc.h"
+#include "sde_hw_dspp.h"
+#include "sde_hw_lm.h"
+
+struct sde_cp_node {
+ u32 property_id;
+ u32 prop_flags;
+ u32 feature;
+ void *blob_ptr;
+ uint64_t prop_val;
+ const struct sde_pp_blk *pp_blk;
+ struct list_head feature_list;
+ struct list_head active_list;
+ struct list_head dirty_list;
+ bool is_dspp_feature;
+};
+
+struct sde_cp_prop_attach {
+ struct drm_crtc *crtc;
+ struct drm_property *prop;
+ struct sde_cp_node *prop_node;
+ u32 feature;
+ uint64_t val;
+};
+
+static void dspp_pcc_install_property(struct drm_crtc *crtc);
+
+static void dspp_hsic_install_property(struct drm_crtc *crtc);
+
+static void dspp_ad_install_property(struct drm_crtc *crtc);
+
+static void dspp_vlut_install_property(struct drm_crtc *crtc);
+
+typedef void (*dspp_prop_install_func_t)(struct drm_crtc *crtc);
+
+static dspp_prop_install_func_t dspp_prop_install_func[SDE_DSPP_MAX];
+
+#define setup_dspp_prop_install_funcs(func) \
+do { \
+ func[SDE_DSPP_PCC] = dspp_pcc_install_property; \
+ func[SDE_DSPP_HSIC] = dspp_hsic_install_property; \
+ func[SDE_DSPP_AD] = dspp_ad_install_property; \
+ func[SDE_DSPP_VLUT] = dspp_vlut_install_property; \
+} while (0)
+
+typedef void (*lm_prop_install_func_t)(struct drm_crtc *crtc);
+
+static lm_prop_install_func_t lm_prop_install_func[SDE_MIXER_MAX];
+
+static void lm_gc_install_property(struct drm_crtc *crtc);
+
+#define setup_lm_prop_install_funcs(func) \
+ (func[SDE_MIXER_GC] = lm_gc_install_property)
+
+enum {
+ /* Append new DSPP features before SDE_CP_CRTC_DSPP_MAX */
+ /* DSPP Features start */
+ SDE_CP_CRTC_DSPP_IGC,
+ SDE_CP_CRTC_DSPP_PCC,
+ SDE_CP_CRTC_DSPP_GC,
+ SDE_CP_CRTC_DSPP_HSIC,
+ SDE_CP_CRTC_DSPP_SAT,
+ SDE_CP_CRTC_DSPP_VAL,
+ SDE_CP_CRTC_DSPP_CONT,
+ SDE_CP_CRTC_DSPP_MEMCOLOR,
+ SDE_CP_CRTC_DSPP_SIXZONE,
+ SDE_CP_CRTC_DSPP_GAMUT,
+ SDE_CP_CRTC_DSPP_DITHER,
+ SDE_CP_CRTC_DSPP_HIST,
+ SDE_CP_CRTC_DSPP_AD,
+ SDE_CP_CRTC_DSPP_VLUT,
+ SDE_CP_CRTC_DSPP_MAX,
+ /* DSPP features end */
+
+ /* Append new LM features before SDE_CP_CRTC_MAX_FEATURES */
+ /* LM feature start*/
+ SDE_CP_CRTC_LM_GC,
+ /* LM feature end*/
+
+ SDE_CP_CRTC_MAX_FEATURES,
+};
+
+#define INIT_PROP_ATTACH(p, crtc, prop, node, feature, val) \
+ do { \
+ (p)->crtc = crtc; \
+ (p)->prop = prop; \
+ (p)->prop_node = node; \
+ (p)->feature = feature; \
+ (p)->val = val; \
+ } while (0)
+
+static void sde_cp_get_hw_payload(struct sde_cp_node *prop_node,
+ struct sde_hw_cp_cfg *hw_cfg,
+ bool *feature_enabled)
+{
+
+ struct drm_property_blob *blob = NULL;
+
+ memset(hw_cfg, 0, sizeof(*hw_cfg));
+ *feature_enabled = false;
+
+ blob = prop_node->blob_ptr;
+ if (prop_node->prop_flags & DRM_MODE_PROP_BLOB) {
+ if (blob) {
+ hw_cfg->len = blob->length;
+ hw_cfg->payload = blob->data;
+ *feature_enabled = true;
+ }
+ } else if (prop_node->prop_flags & DRM_MODE_PROP_RANGE) {
+ /* Check if local blob is Set */
+ if (!blob) {
+ hw_cfg->len = sizeof(prop_node->prop_val);
+ if (prop_node->prop_val)
+ hw_cfg->payload = &prop_node->prop_val;
+ } else {
+ hw_cfg->len = (prop_node->prop_val) ? blob->length :
+ 0;
+ hw_cfg->payload = (prop_node->prop_val) ? blob->data
+ : NULL;
+ }
+ if (prop_node->prop_val)
+ *feature_enabled = true;
+ } else {
+ DRM_ERROR("property type is not supported\n");
+ }
+}
+
+static int sde_cp_disable_crtc_blob_property(struct sde_cp_node *prop_node)
+{
+ struct drm_property_blob *blob = prop_node->blob_ptr;
+
+ if (!blob)
+ return -EINVAL;
+ drm_property_unreference_blob(blob);
+ prop_node->blob_ptr = NULL;
+ return 0;
+}
+
+static int sde_cp_create_local_blob(struct drm_crtc *crtc, u32 feature, int len)
+{
+ int ret = -EINVAL;
+ bool found = false;
+ struct sde_cp_node *prop_node = NULL;
+ struct drm_property_blob *blob_ptr;
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+
+ list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
+ if (prop_node->feature == feature) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found || prop_node->prop_flags & DRM_MODE_PROP_BLOB) {
+ DRM_ERROR("local blob create failed prop found %d flags %d\n",
+ found, prop_node->prop_flags);
+ return ret;
+ }
+
+ blob_ptr = drm_property_create_blob(crtc->dev, len, NULL);
+ ret = (IS_ERR_OR_NULL(blob_ptr)) ? PTR_ERR(blob_ptr) : 0;
+ if (!ret)
+ prop_node->blob_ptr = blob_ptr;
+
+ return ret;
+}
+
+static void sde_cp_destroy_local_blob(struct sde_cp_node *prop_node)
+{
+ if (!(prop_node->prop_flags & DRM_MODE_PROP_BLOB) &&
+ prop_node->blob_ptr)
+ drm_property_unreference_blob(prop_node->blob_ptr);
+}
+
+static int sde_cp_handle_range_property(struct sde_cp_node *prop_node,
+ uint64_t val)
+{
+ int ret = 0;
+ struct drm_property_blob *blob_ptr = prop_node->blob_ptr;
+
+ if (!blob_ptr) {
+ prop_node->prop_val = val;
+ return 0;
+ }
+
+ if (!val) {
+ prop_node->prop_val = 0;
+ return 0;
+ }
+
+ ret = copy_from_user(blob_ptr->data, (void *)val, blob_ptr->length);
+ if (ret) {
+ DRM_ERROR("failed to get the property info ret %d", ret);
+ ret = -EFAULT;
+ } else {
+ prop_node->prop_val = val;
+ }
+
+ return ret;
+}
+
+static int sde_cp_disable_crtc_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ struct sde_cp_node *prop_node)
+{
+ int ret = -EINVAL;
+
+ if (property->flags & DRM_MODE_PROP_BLOB)
+ ret = sde_cp_disable_crtc_blob_property(prop_node);
+ else if (property->flags & DRM_MODE_PROP_RANGE)
+ ret = sde_cp_handle_range_property(prop_node, 0);
+ return ret;
+}
+
+static int sde_cp_enable_crtc_blob_property(struct drm_crtc *crtc,
+ struct sde_cp_node *prop_node,
+ uint64_t val)
+{
+ struct drm_property_blob *blob = NULL;
+
+ /**
+ * For non-blob based properties add support to create a blob
+ * using the val and store the blob_ptr in prop_node.
+ */
+ blob = drm_property_lookup_blob(crtc->dev, val);
+ if (!blob) {
+ DRM_ERROR("invalid blob id %lld\n", val);
+ return -EINVAL;
+ }
+ /* Release refernce to existing payload of the property */
+ if (prop_node->blob_ptr)
+ drm_property_unreference_blob(prop_node->blob_ptr);
+
+ prop_node->blob_ptr = blob;
+ return 0;
+}
+
+static int sde_cp_enable_crtc_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ struct sde_cp_node *prop_node,
+ uint64_t val)
+{
+ int ret = -EINVAL;
+
+ if (property->flags & DRM_MODE_PROP_BLOB)
+ ret = sde_cp_enable_crtc_blob_property(crtc, prop_node, val);
+ else if (property->flags & DRM_MODE_PROP_RANGE)
+ ret = sde_cp_handle_range_property(prop_node, val);
+ return ret;
+}
+
+static struct sde_kms *get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv = crtc->dev->dev_private;
+
+ return to_sde_kms(priv->kms);
+}
+
+static void sde_cp_crtc_prop_attach(struct sde_cp_prop_attach *prop_attach)
+{
+
+ struct sde_crtc *sde_crtc = to_sde_crtc(prop_attach->crtc);
+
+ drm_object_attach_property(&prop_attach->crtc->base,
+ prop_attach->prop, prop_attach->val);
+
+ INIT_LIST_HEAD(&prop_attach->prop_node->active_list);
+ INIT_LIST_HEAD(&prop_attach->prop_node->dirty_list);
+
+ prop_attach->prop_node->property_id = prop_attach->prop->base.id;
+ prop_attach->prop_node->prop_flags = prop_attach->prop->flags;
+ prop_attach->prop_node->feature = prop_attach->feature;
+
+ if (prop_attach->feature < SDE_CP_CRTC_DSPP_MAX)
+ prop_attach->prop_node->is_dspp_feature = true;
+ else
+ prop_attach->prop_node->is_dspp_feature = false;
+
+ list_add(&prop_attach->prop_node->feature_list,
+ &sde_crtc->feature_list);
+}
+
+void sde_cp_crtc_init(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = NULL;
+
+ if (!crtc) {
+ DRM_ERROR("invalid crtc %pK\n", crtc);
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+ return;
+ }
+
+ INIT_LIST_HEAD(&sde_crtc->active_list);
+ INIT_LIST_HEAD(&sde_crtc->dirty_list);
+ INIT_LIST_HEAD(&sde_crtc->feature_list);
+}
+
+static void sde_cp_crtc_install_immutable_property(struct drm_crtc *crtc,
+ char *name,
+ u32 feature)
+{
+ struct drm_property *prop;
+ struct sde_cp_node *prop_node = NULL;
+ struct msm_drm_private *priv;
+ struct sde_cp_prop_attach prop_attach;
+ uint64_t val = 0;
+
+ if (feature >= SDE_CP_CRTC_MAX_FEATURES) {
+ DRM_ERROR("invalid feature %d max %d\n", feature,
+ SDE_CP_CRTC_MAX_FEATURES);
+ return;
+ }
+
+ prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
+ if (!prop_node)
+ return;
+
+ priv = crtc->dev->dev_private;
+ prop = priv->cp_property[feature];
+
+ if (!prop) {
+ prop = drm_property_create_range(crtc->dev,
+ DRM_MODE_PROP_IMMUTABLE, name, 0, 1);
+ if (!prop) {
+ DRM_ERROR("property create failed: %s\n", name);
+ kfree(prop_node);
+ return;
+ }
+ priv->cp_property[feature] = prop;
+ }
+
+ INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
+ feature, val);
+ sde_cp_crtc_prop_attach(&prop_attach);
+}
+
+static void sde_cp_crtc_install_range_property(struct drm_crtc *crtc,
+ char *name,
+ u32 feature,
+ uint64_t min, uint64_t max,
+ uint64_t val)
+{
+ struct drm_property *prop;
+ struct sde_cp_node *prop_node = NULL;
+ struct msm_drm_private *priv;
+ struct sde_cp_prop_attach prop_attach;
+
+ if (feature >= SDE_CP_CRTC_MAX_FEATURES) {
+ DRM_ERROR("invalid feature %d max %d\n", feature,
+ SDE_CP_CRTC_MAX_FEATURES);
+ return;
+ }
+
+ prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
+ if (!prop_node)
+ return;
+
+ priv = crtc->dev->dev_private;
+ prop = priv->cp_property[feature];
+
+ if (!prop) {
+ prop = drm_property_create_range(crtc->dev, 0, name, min, max);
+ if (!prop) {
+ DRM_ERROR("property create failed: %s\n", name);
+ kfree(prop_node);
+ return;
+ }
+ priv->cp_property[feature] = prop;
+ }
+
+ INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
+ feature, val);
+
+ sde_cp_crtc_prop_attach(&prop_attach);
+}
+
+static void sde_cp_crtc_create_blob_property(struct drm_crtc *crtc, char *name,
+ u32 feature)
+{
+ struct drm_property *prop;
+ struct sde_cp_node *prop_node = NULL;
+ struct msm_drm_private *priv;
+ uint64_t val = 0;
+ struct sde_cp_prop_attach prop_attach;
+
+ if (feature >= SDE_CP_CRTC_MAX_FEATURES) {
+ DRM_ERROR("invalid feature %d max %d\n", feature,
+ SDE_CP_CRTC_MAX_FEATURES);
+ return;
+ }
+
+ prop_node = kzalloc(sizeof(*prop_node), GFP_KERNEL);
+ if (!prop_node)
+ return;
+
+ priv = crtc->dev->dev_private;
+ prop = priv->cp_property[feature];
+
+ if (!prop) {
+ prop = drm_property_create(crtc->dev,
+ DRM_MODE_PROP_BLOB, name, 0);
+ if (!prop) {
+ DRM_ERROR("property create failed: %s\n", name);
+ kfree(prop_node);
+ return;
+ }
+ priv->cp_property[feature] = prop;
+ }
+
+ INIT_PROP_ATTACH(&prop_attach, crtc, prop, prop_node,
+ feature, val);
+
+ sde_cp_crtc_prop_attach(&prop_attach);
+}
+
+static void sde_cp_crtc_setfeature(struct sde_cp_node *prop_node,
+ struct sde_crtc *sde_crtc)
+{
+ struct sde_hw_cp_cfg hw_cfg;
+ struct sde_hw_mixer *hw_lm;
+ struct sde_hw_dspp *hw_dspp;
+ struct drm_msm_pa_hsic *hsic_cfg;
+ u32 num_mixers = sde_crtc->num_mixers;
+ int i = 0;
+ bool feature_enabled = false;
+ int ret = 0;
+
+ sde_cp_get_hw_payload(prop_node, &hw_cfg, &feature_enabled);
+
+ for (i = 0; i < num_mixers && !ret; i++) {
+ hw_lm = sde_crtc->mixers[i].hw_lm;
+ hw_dspp = sde_crtc->mixers[i].hw_dspp;
+
+ switch (prop_node->feature) {
+ case SDE_CP_CRTC_DSPP_VLUT:
+ if (!hw_dspp || !hw_dspp->ops.setup_vlut) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_vlut(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_PCC:
+ if (!hw_dspp || !hw_dspp->ops.setup_pcc) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_pcc(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_IGC:
+ if (!hw_dspp || !hw_dspp->ops.setup_igc) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_igc(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_GC:
+ if (!hw_dspp || !hw_dspp->ops.setup_gc) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_gc(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_HSIC:
+ if (!hw_dspp || !hw_dspp->ops.setup_pa_hsic) {
+ ret = -EINVAL;
+ continue;
+ }
+ if (hw_cfg.payload && (hw_cfg.len ==
+ sizeof(struct drm_msm_pa_hsic))) {
+ /* hw_cfg is valid, check for feature flag */
+ hsic_cfg = (struct drm_msm_pa_hsic *)
+ hw_cfg.payload;
+ if ((hsic_cfg->flags &
+ PA_HSIC_LEFT_DISPLAY_ONLY) && (i > 0)) {
+ /* skip right side programming */
+ continue;
+ } else if ((hsic_cfg->flags &
+ PA_HSIC_RIGHT_DISPLAY_ONLY)
+ && (i == 0)) {
+ /* skip left side programming */
+ continue;
+ }
+ }
+ hw_dspp->ops.setup_pa_hsic(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_MEMCOLOR:
+ if (!hw_dspp || !hw_dspp->ops.setup_pa_memcolor)
+ ret = -EINVAL;
+ continue;
+ hw_dspp->ops.setup_pa_memcolor(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_SIXZONE:
+ if (!hw_dspp || !hw_dspp->ops.setup_sixzone) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_sixzone(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_DSPP_GAMUT:
+ if (!hw_dspp || !hw_dspp->ops.setup_gamut) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_dspp->ops.setup_gamut(hw_dspp, &hw_cfg);
+ break;
+ case SDE_CP_CRTC_LM_GC:
+ if (!hw_lm || !hw_lm->ops.setup_gc) {
+ ret = -EINVAL;
+ continue;
+ }
+ hw_lm->ops.setup_gc(hw_lm, &hw_cfg);
+ break;
+ default:
+ ret = -EINVAL;
+ break;
+ }
+ }
+
+ if (ret) {
+ DRM_ERROR("failed to %s feature %d\n",
+ ((feature_enabled) ? "enable" : "disable"),
+ prop_node->feature);
+ return;
+ }
+
+ if (feature_enabled) {
+ DRM_DEBUG_DRIVER("Add feature to active list %d\n",
+ prop_node->property_id);
+ list_add_tail(&prop_node->active_list, &sde_crtc->active_list);
+ } else {
+ DRM_DEBUG_DRIVER("remove feature from active list %d\n",
+ prop_node->property_id);
+ list_del_init(&prop_node->active_list);
+ }
+ /* Programming of feature done remove from dirty list */
+ list_del_init(&prop_node->dirty_list);
+}
+
+void sde_cp_crtc_apply_properties(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = NULL;
+ bool set_dspp_flush = false, set_lm_flush = false;
+ struct sde_cp_node *prop_node = NULL, *n = NULL;
+ struct sde_hw_ctl *ctl;
+ uint32_t flush_mask = 0;
+ u32 num_mixers = 0, i = 0;
+
+ if (!crtc || !crtc->dev) {
+ DRM_ERROR("invalid crtc %pK dev %pK\n", crtc,
+ (crtc ? crtc->dev : NULL));
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+ return;
+ }
+
+ num_mixers = sde_crtc->num_mixers;
+ if (!num_mixers) {
+ DRM_DEBUG_DRIVER("no mixers for this crtc\n");
+ return;
+ }
+
+ /* Check if dirty list is empty for early return */
+ if (list_empty(&sde_crtc->dirty_list)) {
+ DRM_DEBUG_DRIVER("Dirty list is empty\n");
+ return;
+ }
+
+ list_for_each_entry_safe(prop_node, n, &sde_crtc->dirty_list,
+ dirty_list) {
+ sde_cp_crtc_setfeature(prop_node, sde_crtc);
+ /* Set the flush flag to true */
+ if (prop_node->is_dspp_feature)
+ set_dspp_flush = true;
+ else
+ set_lm_flush = true;
+ }
+
+ for (i = 0; i < num_mixers; i++) {
+ ctl = sde_crtc->mixers[i].hw_ctl;
+ if (!ctl)
+ continue;
+ if (set_dspp_flush && ctl->ops.get_bitmask_dspp
+ && sde_crtc->mixers[i].hw_dspp)
+ ctl->ops.get_bitmask_dspp(ctl,
+ &flush_mask,
+ sde_crtc->mixers[i].hw_dspp->idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+ if (set_lm_flush && ctl->ops.get_bitmask_mixer
+ && sde_crtc->mixers[i].hw_lm)
+ flush_mask = ctl->ops.get_bitmask_mixer(ctl,
+ sde_crtc->mixers[i].hw_lm->idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+ }
+}
+
+void sde_cp_crtc_install_properties(struct drm_crtc *crtc)
+{
+ struct sde_kms *kms = NULL;
+ struct sde_crtc *sde_crtc = NULL;
+ struct sde_mdss_cfg *catalog = NULL;
+ unsigned long features = 0;
+ int i = 0;
+ struct msm_drm_private *priv;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DRM_ERROR("invalid crtc %pK dev %pK\n",
+ crtc, ((crtc) ? crtc->dev : NULL));
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("sde_crtc %pK\n", sde_crtc);
+ return;
+ }
+
+ kms = get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DRM_ERROR("invalid sde kms %pK catalog %pK sde_crtc %pK\n",
+ kms, ((kms) ? kms->catalog : NULL), sde_crtc);
+ return;
+ }
+
+ /**
+ * Function can be called during the atomic_check with test_only flag
+ * and actual commit. Allocate properties only if feature list is
+ * empty during the atomic_check with test_only flag.
+ */
+ if (!list_empty(&sde_crtc->feature_list))
+ return;
+
+ catalog = kms->catalog;
+ priv = crtc->dev->dev_private;
+ /**
+ * DSPP/LM properties are global to all the CRTCS.
+ * Properties are created for first CRTC and re-used for later
+ * crtcs.
+ */
+ if (!priv->cp_property) {
+ priv->cp_property = kzalloc((sizeof(priv->cp_property) *
+ SDE_CP_CRTC_MAX_FEATURES), GFP_KERNEL);
+ setup_dspp_prop_install_funcs(dspp_prop_install_func);
+ setup_lm_prop_install_funcs(lm_prop_install_func);
+ }
+ if (!priv->cp_property)
+ return;
+
+ if (!catalog->dspp_count)
+ goto lm_property;
+
+ /* Check for all the DSPP properties and attach it to CRTC */
+ features = catalog->dspp[0].features;
+ for (i = 0; i < SDE_DSPP_MAX; i++) {
+ if (!test_bit(i, &features))
+ continue;
+ if (dspp_prop_install_func[i])
+ dspp_prop_install_func[i](crtc);
+ }
+
+lm_property:
+ if (!catalog->mixer_count)
+ return;
+
+ /* Check for all the LM properties and attach it to CRTC */
+ features = catalog->mixer[0].features;
+ for (i = 0; i < SDE_MIXER_MAX; i++) {
+ if (!test_bit(i, &features))
+ continue;
+ if (lm_prop_install_func[i])
+ lm_prop_install_func[i](crtc);
+ }
+}
+
+int sde_cp_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct sde_cp_node *prop_node = NULL;
+ struct sde_crtc *sde_crtc = NULL;
+ int ret = 0, i = 0, dspp_cnt, lm_cnt;
+ u8 found = 0;
+
+ if (!crtc || !property) {
+ DRM_ERROR("invalid crtc %pK property %pK\n", crtc, property);
+ return -EINVAL;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+ return -EINVAL;
+ }
+
+ list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
+ if (property->base.id == prop_node->property_id) {
+ found = 1;
+ break;
+ }
+ }
+
+ if (!found)
+ return 0;
+ /**
+ * sde_crtc is virtual ensure that hardware has been attached to the
+ * crtc. Check LM and dspp counts based on whether feature is a
+ * dspp/lm feature.
+ */
+ if (!sde_crtc->num_mixers ||
+ sde_crtc->num_mixers > ARRAY_SIZE(sde_crtc->mixers)) {
+ DRM_ERROR("Invalid mixer config act cnt %d max cnt %ld\n",
+ sde_crtc->num_mixers, ARRAY_SIZE(sde_crtc->mixers));
+ return -EINVAL;
+ }
+
+ dspp_cnt = 0;
+ lm_cnt = 0;
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ if (sde_crtc->mixers[i].hw_dspp)
+ dspp_cnt++;
+ if (sde_crtc->mixers[i].hw_lm)
+ lm_cnt++;
+ }
+
+ if (prop_node->is_dspp_feature && dspp_cnt < sde_crtc->num_mixers) {
+ DRM_ERROR("invalid dspp cnt %d mixer cnt %d\n", dspp_cnt,
+ sde_crtc->num_mixers);
+ return -EINVAL;
+ } else if (lm_cnt < sde_crtc->num_mixers) {
+ DRM_ERROR("invalid lm cnt %d mixer cnt %d\n", lm_cnt,
+ sde_crtc->num_mixers);
+ return -EINVAL;
+ }
+ /* remove the property from dirty list */
+ list_del_init(&prop_node->dirty_list);
+
+ if (!val)
+ ret = sde_cp_disable_crtc_property(crtc, property, prop_node);
+ else
+ ret = sde_cp_enable_crtc_property(crtc, property,
+ prop_node, val);
+
+ if (!ret) {
+ /* remove the property from active list */
+ list_del_init(&prop_node->active_list);
+ /* Mark the feature as dirty */
+ list_add_tail(&prop_node->dirty_list, &sde_crtc->dirty_list);
+ }
+ return ret;
+}
+
+int sde_cp_crtc_get_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t *val)
+{
+ struct sde_cp_node *prop_node = NULL;
+ struct sde_crtc *sde_crtc = NULL;
+
+ if (!crtc || !property || !val) {
+ DRM_ERROR("invalid crtc %pK property %pK val %pK\n",
+ crtc, property, val);
+ return -EINVAL;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+ return -EINVAL;
+ }
+ /* Return 0 if property is not supported */
+ *val = 0;
+ list_for_each_entry(prop_node, &sde_crtc->feature_list, feature_list) {
+ if (property->base.id == prop_node->property_id) {
+ *val = prop_node->prop_val;
+ break;
+ }
+ }
+ return 0;
+}
+
+void sde_cp_crtc_destroy_properties(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = NULL;
+ struct sde_cp_node *prop_node = NULL, *n = NULL;
+
+ if (!crtc) {
+ DRM_ERROR("invalid crtc %pK\n", crtc);
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("invalid sde_crtc %pK\n", sde_crtc);
+ return;
+ }
+
+ list_for_each_entry_safe(prop_node, n, &sde_crtc->feature_list,
+ feature_list) {
+ if (prop_node->prop_flags & DRM_MODE_PROP_BLOB
+ && prop_node->blob_ptr)
+ drm_property_unreference_blob(prop_node->blob_ptr);
+
+ list_del_init(&prop_node->active_list);
+ list_del_init(&prop_node->dirty_list);
+ list_del_init(&prop_node->feature_list);
+ sde_cp_destroy_local_blob(prop_node);
+ kfree(prop_node);
+ }
+
+ INIT_LIST_HEAD(&sde_crtc->active_list);
+ INIT_LIST_HEAD(&sde_crtc->dirty_list);
+ INIT_LIST_HEAD(&sde_crtc->feature_list);
+}
+
+void sde_cp_crtc_suspend(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = NULL;
+ struct sde_cp_node *prop_node = NULL, *n = NULL;
+
+ if (!crtc) {
+ DRM_ERROR("crtc %pK\n", crtc);
+ return;
+ }
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ DRM_ERROR("sde_crtc %pK\n", sde_crtc);
+ return;
+ }
+
+ list_for_each_entry_safe(prop_node, n, &sde_crtc->active_list,
+ active_list) {
+ list_add_tail(&prop_node->dirty_list, &sde_crtc->dirty_list);
+ list_del_init(&prop_node->active_list);
+ }
+}
+
+void sde_cp_crtc_resume(struct drm_crtc *crtc)
+{
+ /* placeholder for operations needed during resume */
+}
+
+static void dspp_pcc_install_property(struct drm_crtc *crtc)
+{
+ char feature_name[256];
+ struct sde_kms *kms = NULL;
+ struct sde_mdss_cfg *catalog = NULL;
+ u32 version;
+
+ kms = get_kms(crtc);
+ catalog = kms->catalog;
+
+ version = catalog->dspp[0].sblk->pcc.version >> 16;
+ snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+ "SDE_DSPP_PCC_V", version);
+ switch (version) {
+ case 1:
+ sde_cp_crtc_create_blob_property(crtc, feature_name,
+ SDE_CP_CRTC_DSPP_PCC);
+ break;
+ default:
+ DRM_ERROR("version %d not supported\n", version);
+ break;
+ }
+}
+
+static void dspp_hsic_install_property(struct drm_crtc *crtc)
+{
+ char feature_name[256];
+ struct sde_kms *kms = NULL;
+ struct sde_mdss_cfg *catalog = NULL;
+ u32 version;
+
+ kms = get_kms(crtc);
+ catalog = kms->catalog;
+ version = catalog->dspp[0].sblk->hsic.version >> 16;
+ switch (version) {
+ case 1:
+ snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+ "SDE_DSPP_PA_HSIC_V", version);
+ sde_cp_crtc_create_blob_property(crtc, feature_name,
+ SDE_CP_CRTC_DSPP_HSIC);
+ break;
+ default:
+ DRM_ERROR("version %d not supported\n", version);
+ break;
+ }
+}
+
+static void dspp_vlut_install_property(struct drm_crtc *crtc)
+{
+ char feature_name[256];
+ struct sde_kms *kms = NULL;
+ struct sde_mdss_cfg *catalog = NULL;
+ u32 version;
+
+ kms = get_kms(crtc);
+ catalog = kms->catalog;
+ version = catalog->dspp[0].sblk->vlut.version >> 16;
+ snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+ "SDE_DSPP_VLUT_V", version);
+ switch (version) {
+ case 1:
+ sde_cp_crtc_install_range_property(crtc, feature_name,
+ SDE_CP_CRTC_DSPP_VLUT, 0, U64_MAX, 0);
+ sde_cp_create_local_blob(crtc,
+ SDE_CP_CRTC_DSPP_VLUT,
+ sizeof(struct drm_msm_pa_vlut));
+ break;
+ default:
+ DRM_ERROR("version %d not supported\n", version);
+ break;
+ }
+}
+
+static void dspp_ad_install_property(struct drm_crtc *crtc)
+{
+ char feature_name[256];
+ struct sde_kms *kms = NULL;
+ struct sde_mdss_cfg *catalog = NULL;
+ u32 version;
+
+ kms = get_kms(crtc);
+ catalog = kms->catalog;
+ version = catalog->dspp[0].sblk->ad.version >> 16;
+ snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+ "SDE_DSPP_AD_V", version);
+ switch (version) {
+ case 3:
+ sde_cp_crtc_install_immutable_property(crtc,
+ feature_name, SDE_CP_CRTC_DSPP_AD);
+ break;
+ default:
+ DRM_ERROR("version %d not supported\n", version);
+ break;
+ }
+}
+
+static void lm_gc_install_property(struct drm_crtc *crtc)
+{
+ char feature_name[256];
+ struct sde_kms *kms = NULL;
+ struct sde_mdss_cfg *catalog = NULL;
+ u32 version;
+
+ kms = get_kms(crtc);
+ catalog = kms->catalog;
+ version = catalog->mixer[0].sblk->gc.version >> 16;
+ snprintf(feature_name, ARRAY_SIZE(feature_name), "%s%d",
+ "SDE_LM_GC_V", version);
+ switch (version) {
+ case 1:
+ sde_cp_crtc_create_blob_property(crtc, feature_name,
+ SDE_CP_CRTC_LM_GC);
+ break;
+ default:
+ DRM_ERROR("version %d not supported\n", version);
+ break;
+ }
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_color_processing.h b/drivers/gpu/drm/msm/sde/sde_color_processing.h
new file mode 100644
index 000000000000..bf954ec6a8e7
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_color_processing.h
@@ -0,0 +1,95 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_COLOR_PROCESSING_H
+#define _SDE_COLOR_PROCESSING_H
+#include <drm/drm_crtc.h>
+
+/*
+ * PA MEMORY COLOR types
+ * @MEMCOLOR_SKIN Skin memory color type
+ * @MEMCOLOR_SKY Sky memory color type
+ * @MEMCOLOR_FOLIAGE Foliage memory color type
+ */
+enum sde_memcolor_type {
+ MEMCOLOR_SKIN = 0,
+ MEMCOLOR_SKY,
+ MEMCOLOR_FOLIAGE
+};
+
+/**
+ * sde_cp_crtc_init(): Initialize color processing lists for a crtc.
+ * Should be called during crtc initialization.
+ * @crtc: Pointer to sde_crtc.
+ */
+void sde_cp_crtc_init(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_install_properties(): Installs the color processing
+ * properties for a crtc.
+ * Should be called during crtc initialization.
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_install_properties(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_destroy_properties: Destroys color processing
+ * properties for a crtc.
+ * should be called during crtc de-initialization.
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_destroy_properties(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_set_property: Set a color processing property
+ * for a crtc.
+ * Should be during atomic set property.
+ * @crtc: Pointer to crtc.
+ * @property: Property that needs to enabled/disabled.
+ * @val: Value of property.
+ */
+int sde_cp_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val);
+
+/**
+ * sde_cp_crtc_apply_properties: Enable/disable properties
+ * for a crtc.
+ * Should be called during atomic commit call.
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_apply_properties(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_get_property: Get value of color processing property
+ * for a crtc.
+ * Should be during atomic get property.
+ * @crtc: Pointer to crtc.
+ * @property: Property that needs to enabled/disabled.
+ * @val: Value of property.
+ *
+ */
+int sde_cp_crtc_get_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t *val);
+
+/**
+ * sde_cp_crtc_suspend: Suspend the crtc features
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_suspend(struct drm_crtc *crtc);
+
+/**
+ * sde_cp_crtc_resume: Resume the crtc features
+ * @crtc: Pointer to crtc.
+ */
+void sde_cp_crtc_resume(struct drm_crtc *crtc);
+#endif /*_SDE_COLOR_PROCESSING_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.c b/drivers/gpu/drm/msm/sde/sde_connector.c
new file mode 100644
index 000000000000..49275e72a75d
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_connector.c
@@ -0,0 +1,1032 @@
+/* Copyright (c) 2016-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/suspend.h>
+
+#include "msm_drv.h"
+
+#include "sde_kms.h"
+#include "sde_connector.h"
+#include "sde_backlight.h"
+#include "sde_splash.h"
+
+#define SDE_DEBUG_CONN(c, fmt, ...) SDE_DEBUG("conn%d " fmt,\
+ (c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_CONN(c, fmt, ...) SDE_ERROR("conn%d " fmt,\
+ (c) ? (c)->base.base.id : -1, ##__VA_ARGS__)
+
+static const struct drm_prop_enum_list e_topology_name[] = {
+ {SDE_RM_TOPOLOGY_UNKNOWN, "sde_unknown"},
+ {SDE_RM_TOPOLOGY_SINGLEPIPE, "sde_singlepipe"},
+ {SDE_RM_TOPOLOGY_DUALPIPE, "sde_dualpipe"},
+ {SDE_RM_TOPOLOGY_PPSPLIT, "sde_ppsplit"},
+ {SDE_RM_TOPOLOGY_DUALPIPEMERGE, "sde_dualpipemerge"}
+};
+static const struct drm_prop_enum_list e_topology_control[] = {
+ {SDE_RM_TOPCTL_RESERVE_LOCK, "reserve_lock"},
+ {SDE_RM_TOPCTL_RESERVE_CLEAR, "reserve_clear"},
+ {SDE_RM_TOPCTL_DSPP, "dspp"},
+ {SDE_RM_TOPCTL_FORCE_TILING, "force_tiling"},
+ {SDE_RM_TOPCTL_PPSPLIT, "ppsplit"},
+ {SDE_RM_TOPCTL_FORCE_MIXER, "force_mixer"}
+};
+
+static const struct drm_prop_enum_list e_power_mode[] = {
+ {SDE_MODE_DPMS_ON, "ON"},
+ {SDE_MODE_DPMS_LP1, "LP1"},
+ {SDE_MODE_DPMS_LP2, "LP2"},
+ {SDE_MODE_DPMS_OFF, "OFF"},
+};
+
+static const struct drm_prop_enum_list hpd_clock_state[] = {
+ {SDE_MODE_HPD_ON, "ON"},
+ {SDE_MODE_HPD_OFF, "OFF"},
+};
+
+int sde_connector_get_info(struct drm_connector *connector,
+ struct msm_display_info *info)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector || !info) {
+ SDE_ERROR("invalid argument(s), conn %pK, info %pK\n",
+ connector, info);
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(connector);
+
+ if (!c_conn->display || !c_conn->ops.get_info) {
+ SDE_ERROR("display info not supported for %pK\n",
+ c_conn->display);
+ return -EINVAL;
+ }
+
+ return c_conn->ops.get_info(info, c_conn->display);
+}
+
+int sde_connector_pre_kickoff(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+ struct sde_connector_state *c_state;
+ struct msm_display_kickoff_params params;
+ int rc;
+
+ if (!connector) {
+ SDE_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(connector);
+ c_state = to_sde_connector_state(connector->state);
+
+ if (!c_conn->display) {
+ SDE_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+
+ if (!c_conn->ops.pre_kickoff)
+ return 0;
+
+ params.hdr_ctrl = &c_state->hdr_ctrl;
+
+ rc = c_conn->ops.pre_kickoff(connector, c_conn->display, &params);
+
+ return rc;
+}
+
+enum sde_csc_type sde_connector_get_csc_type(struct drm_connector *conn)
+{
+ struct sde_connector *c_conn;
+
+ if (!conn) {
+ SDE_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(conn);
+
+ if (!c_conn->display) {
+ SDE_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+
+ if (!c_conn->ops.get_csc_type)
+ return SDE_CSC_RGB2YUV_601L;
+
+ return c_conn->ops.get_csc_type(conn, c_conn->display);
+}
+
+bool sde_connector_mode_needs_full_range(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector) {
+ SDE_ERROR("invalid argument\n");
+ return false;
+ }
+
+ c_conn = to_sde_connector(connector);
+
+ if (!c_conn->display) {
+ SDE_ERROR("invalid argument\n");
+ return false;
+ }
+
+ if (!c_conn->ops.mode_needs_full_range)
+ return false;
+
+ return c_conn->ops.mode_needs_full_range(c_conn->display);
+}
+
+static void sde_connector_destroy(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return;
+ }
+
+ c_conn = to_sde_connector(connector);
+
+ if (c_conn->ops.pre_deinit)
+ c_conn->ops.pre_deinit(connector, c_conn->display);
+
+ if (c_conn->blob_caps)
+ drm_property_unreference_blob(c_conn->blob_caps);
+ if (c_conn->blob_hdr)
+ drm_property_unreference_blob(c_conn->blob_hdr);
+ msm_property_destroy(&c_conn->property_info);
+
+ drm_connector_unregister(connector);
+ mutex_destroy(&c_conn->lock);
+ sde_fence_deinit(&c_conn->retire_fence);
+ drm_connector_cleanup(connector);
+ kfree(c_conn);
+}
+
+/**
+ * _sde_connector_destroy_fb - clean up connector state's out_fb buffer
+ * @c_conn: Pointer to sde connector structure
+ * @c_state: Pointer to sde connector state structure
+ */
+static void _sde_connector_destroy_fb(struct sde_connector *c_conn,
+ struct sde_connector_state *c_state)
+{
+ if (!c_state || !c_state->out_fb) {
+ SDE_ERROR("invalid state %pK\n", c_state);
+ return;
+ }
+
+ msm_framebuffer_cleanup(c_state->out_fb, c_state->aspace);
+ drm_framebuffer_unreference(c_state->out_fb);
+ c_state->out_fb = NULL;
+
+ if (c_conn) {
+ c_state->property_values[CONNECTOR_PROP_OUT_FB] =
+ msm_property_get_default(&c_conn->property_info,
+ CONNECTOR_PROP_OUT_FB);
+ } else {
+ c_state->property_values[CONNECTOR_PROP_OUT_FB] = ~0;
+ }
+}
+
+static void sde_connector_atomic_destroy_state(struct drm_connector *connector,
+ struct drm_connector_state *state)
+{
+ struct sde_connector *c_conn = NULL;
+ struct sde_connector_state *c_state = NULL;
+
+ if (!state) {
+ SDE_ERROR("invalid state\n");
+ return;
+ }
+
+ /*
+ * The base DRM framework currently always passes in a NULL
+ * connector pointer. This is not correct, but attempt to
+ * handle that case as much as possible.
+ */
+ if (connector)
+ c_conn = to_sde_connector(connector);
+ c_state = to_sde_connector_state(state);
+
+ if (c_state->out_fb)
+ _sde_connector_destroy_fb(c_conn, c_state);
+
+ if (!c_conn) {
+ kfree(c_state);
+ } else {
+ /* destroy value helper */
+ msm_property_destroy_state(&c_conn->property_info, c_state,
+ c_state->property_values, 0);
+ }
+}
+
+static void sde_connector_atomic_reset(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+ struct sde_connector_state *c_state;
+
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return;
+ }
+
+ c_conn = to_sde_connector(connector);
+
+ if (connector->state) {
+ sde_connector_atomic_destroy_state(connector, connector->state);
+ connector->state = 0;
+ }
+
+ c_state = msm_property_alloc_state(&c_conn->property_info);
+ if (!c_state) {
+ SDE_ERROR("state alloc failed\n");
+ return;
+ }
+
+ /* reset value helper, zero out state structure and reset properties */
+ msm_property_reset_state(&c_conn->property_info, c_state,
+ c_state->property_values, 0);
+
+ c_state->base.connector = connector;
+ connector->state = &c_state->base;
+}
+
+static struct drm_connector_state *
+sde_connector_atomic_duplicate_state(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+ struct sde_connector_state *c_state, *c_oldstate;
+ int rc;
+
+ if (!connector || !connector->state) {
+ SDE_ERROR("invalid connector %pK\n", connector);
+ return NULL;
+ }
+
+ c_conn = to_sde_connector(connector);
+ c_oldstate = to_sde_connector_state(connector->state);
+ c_state = msm_property_alloc_state(&c_conn->property_info);
+ if (!c_state) {
+ SDE_ERROR("state alloc failed\n");
+ return NULL;
+ }
+
+ /* duplicate value helper */
+ msm_property_duplicate_state(&c_conn->property_info,
+ c_oldstate, c_state, c_state->property_values, 0);
+
+ /* additional handling for drm framebuffer objects */
+ if (c_state->out_fb) {
+ drm_framebuffer_reference(c_state->out_fb);
+ rc = msm_framebuffer_prepare(c_state->out_fb,
+ c_state->aspace);
+ if (rc)
+ SDE_ERROR("failed to prepare fb, %d\n", rc);
+ }
+
+ return &c_state->base;
+}
+
+static int _sde_connector_set_hdr_info(
+ struct sde_connector *c_conn,
+ struct sde_connector_state *c_state,
+ void *usr_ptr)
+{
+ struct drm_connector *connector;
+ struct drm_msm_ext_panel_hdr_ctrl *hdr_ctrl;
+ struct drm_msm_ext_panel_hdr_metadata *hdr_meta;
+ int i;
+
+ if (!c_conn || !c_state) {
+ SDE_ERROR_CONN(c_conn, "invalid args\n");
+ return -EINVAL;
+ }
+
+ connector = &c_conn->base;
+
+ if (!connector->hdr_supported) {
+ SDE_ERROR_CONN(c_conn, "sink doesn't support HDR\n");
+ return -ENOTSUPP;
+ }
+
+ memset(&c_state->hdr_ctrl, 0, sizeof(c_state->hdr_ctrl));
+
+ if (!usr_ptr) {
+ SDE_DEBUG_CONN(c_conn, "hdr control cleared\n");
+ return 0;
+ }
+
+ if (copy_from_user(&c_state->hdr_ctrl,
+ (void __user *)usr_ptr,
+ sizeof(*hdr_ctrl))) {
+ SDE_ERROR_CONN(c_conn, "failed to copy hdr control\n");
+ return -EFAULT;
+ }
+
+ hdr_ctrl = &c_state->hdr_ctrl;
+
+ SDE_DEBUG_CONN(c_conn, "hdr_supported %d\n",
+ hdr_ctrl->hdr_state);
+
+ hdr_meta = &hdr_ctrl->hdr_meta;
+
+ SDE_DEBUG_CONN(c_conn, "hdr_supported %d\n",
+ hdr_meta->hdr_supported);
+ SDE_DEBUG_CONN(c_conn, "eotf %d\n",
+ hdr_meta->eotf);
+ SDE_DEBUG_CONN(c_conn, "white_point_x %d\n",
+ hdr_meta->white_point_x);
+ SDE_DEBUG_CONN(c_conn, "white_point_y %d\n",
+ hdr_meta->white_point_y);
+ SDE_DEBUG_CONN(c_conn, "max_luminance %d\n",
+ hdr_meta->max_luminance);
+ SDE_DEBUG_CONN(c_conn, "max_content_light_level %d\n",
+ hdr_meta->max_content_light_level);
+ SDE_DEBUG_CONN(c_conn, "max_average_light_level %d\n",
+ hdr_meta->max_average_light_level);
+
+ for (i = 0; i < HDR_PRIMARIES_COUNT; i++) {
+ SDE_DEBUG_CONN(c_conn, "display_primaries_x [%d]\n",
+ hdr_meta->display_primaries_x[i]);
+ SDE_DEBUG_CONN(c_conn, "display_primaries_y [%d]\n",
+ hdr_meta->display_primaries_y[i]);
+ }
+
+ return 0;
+}
+
+static int _sde_connector_update_power_locked(struct sde_connector *c_conn)
+{
+ struct drm_connector *connector;
+ void *display;
+ int (*set_power)(struct drm_connector *, int, void *);
+ int mode, rc = 0;
+
+ if (!c_conn)
+ return -EINVAL;
+ connector = &c_conn->base;
+
+ mode = c_conn->lp_mode;
+ if (c_conn->dpms_mode != DRM_MODE_DPMS_ON)
+ mode = SDE_MODE_DPMS_OFF;
+ switch (c_conn->dpms_mode) {
+ case DRM_MODE_DPMS_ON:
+ mode = c_conn->lp_mode;
+ break;
+ case DRM_MODE_DPMS_STANDBY:
+ mode = SDE_MODE_DPMS_STANDBY;
+ break;
+ case DRM_MODE_DPMS_SUSPEND:
+ mode = SDE_MODE_DPMS_SUSPEND;
+ break;
+ case DRM_MODE_DPMS_OFF:
+ mode = SDE_MODE_DPMS_OFF;
+ break;
+ default:
+ mode = c_conn->lp_mode;
+ SDE_ERROR("conn %d dpms set to unrecognized mode %d\n",
+ connector->base.id, mode);
+ break;
+ }
+
+ SDE_DEBUG("conn %d - dpms %d, lp %d, panel %d\n", connector->base.id,
+ c_conn->dpms_mode, c_conn->lp_mode, mode);
+
+ if (mode != c_conn->last_panel_power_mode && c_conn->ops.set_power) {
+ display = c_conn->display;
+ set_power = c_conn->ops.set_power;
+
+ mutex_unlock(&c_conn->lock);
+ rc = set_power(connector, mode, display);
+ mutex_lock(&c_conn->lock);
+ }
+ c_conn->last_panel_power_mode = mode;
+
+ return rc;
+}
+
+static int sde_connector_atomic_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct sde_connector *c_conn;
+ struct sde_connector_state *c_state;
+ int idx, rc;
+ uint64_t fence_fd = 0;
+
+ if (!connector || !state || !property) {
+ SDE_ERROR("invalid argument(s), conn %pK, state %pK, prp %pK\n",
+ connector, state, property);
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(connector);
+ c_state = to_sde_connector_state(state);
+
+ /* generic property handling */
+ rc = msm_property_atomic_set(&c_conn->property_info,
+ c_state->property_values, 0, property, val);
+ if (rc)
+ goto end;
+
+ /* connector-specific property handling */
+ idx = msm_property_index(&c_conn->property_info, property);
+ switch (idx) {
+ case CONNECTOR_PROP_OUT_FB:
+ /* clear old fb, if present */
+ if (c_state->out_fb)
+ _sde_connector_destroy_fb(c_conn, c_state);
+
+ /* convert fb val to drm framebuffer and prepare it */
+ c_state->out_fb =
+ drm_framebuffer_lookup(connector->dev, val);
+ if (!c_state->out_fb) {
+ SDE_ERROR("failed to look up fb %lld\n", val);
+ rc = -EFAULT;
+ } else {
+ if (c_state->out_fb->flags & DRM_MODE_FB_SECURE)
+ c_state->aspace =
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE];
+ else
+ c_state->aspace =
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
+
+ rc = msm_framebuffer_prepare(c_state->out_fb,
+ c_state->aspace);
+ if (rc)
+ SDE_ERROR("prep fb failed, %d\n", rc);
+ }
+ break;
+ case CONNECTOR_PROP_RETIRE_FENCE:
+ if (!val)
+ goto end;
+
+ /*
+ * update the the offset to a timeline for commit completion
+ */
+ rc = sde_fence_create(&c_conn->retire_fence, &fence_fd, 1);
+ if (rc) {
+ SDE_ERROR("fence create failed rc:%d\n", rc);
+ goto end;
+ }
+
+ rc = copy_to_user((uint64_t __user *)val, &fence_fd,
+ sizeof(uint64_t));
+ if (rc) {
+ SDE_ERROR("copy to user failed rc:%d\n", rc);
+ /* fence will be released with timeline update */
+ put_unused_fd(fence_fd);
+ rc = -EFAULT;
+ goto end;
+ }
+ break;
+ case CONNECTOR_PROP_TOPOLOGY_CONTROL:
+ rc = sde_rm_check_property_topctl(val);
+ if (rc)
+ SDE_ERROR("invalid topology_control: 0x%llX\n", val);
+ break;
+ case CONNECTOR_PROP_LP:
+ mutex_lock(&c_conn->lock);
+ c_conn->lp_mode = val;
+ _sde_connector_update_power_locked(c_conn);
+ mutex_unlock(&c_conn->lock);
+ break;
+ case CONNECTOR_PROP_HPD_OFF:
+ c_conn->hpd_mode = val;
+ break;
+ default:
+ break;
+ }
+
+ if (idx == CONNECTOR_PROP_HDR_CONTROL) {
+ rc = _sde_connector_set_hdr_info(c_conn, c_state, (void *)val);
+ if (rc)
+ SDE_ERROR_CONN(c_conn, "cannot set hdr info %d\n", rc);
+ }
+
+ /* check for custom property handling */
+ if (!rc && c_conn->ops.set_property) {
+ rc = c_conn->ops.set_property(connector,
+ state,
+ idx,
+ val,
+ c_conn->display);
+
+ /* potentially clean up out_fb if rc != 0 */
+ if ((idx == CONNECTOR_PROP_OUT_FB) && rc)
+ _sde_connector_destroy_fb(c_conn, c_state);
+ }
+end:
+ return rc;
+}
+
+static int sde_connector_set_property(struct drm_connector *connector,
+ struct drm_property *property,
+ uint64_t val)
+{
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return -EINVAL;
+ }
+
+ return sde_connector_atomic_set_property(connector,
+ connector->state, property, val);
+}
+
+static int sde_connector_atomic_get_property(struct drm_connector *connector,
+ const struct drm_connector_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct sde_connector *c_conn;
+ struct sde_connector_state *c_state;
+ int idx, rc = -EINVAL;
+
+ if (!connector || !state) {
+ SDE_ERROR("invalid argument(s), conn %pK, state %pK\n",
+ connector, state);
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(connector);
+ c_state = to_sde_connector_state(state);
+
+ idx = msm_property_index(&c_conn->property_info, property);
+ if (idx == CONNECTOR_PROP_RETIRE_FENCE) {
+ *val = ~0;
+ rc = 0;
+ } else {
+ /* get cached property value */
+ rc = msm_property_atomic_get(&c_conn->property_info,
+ c_state->property_values, 0, property, val);
+ }
+
+ /* allow for custom override */
+ if (c_conn->ops.get_property)
+ rc = c_conn->ops.get_property(connector,
+ (struct drm_connector_state *)state,
+ idx,
+ val,
+ c_conn->display);
+ return rc;
+}
+
+void sde_connector_prepare_fence(struct drm_connector *connector)
+{
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return;
+ }
+
+ sde_fence_prepare(&to_sde_connector(connector)->retire_fence);
+}
+
+void sde_connector_complete_commit(struct drm_connector *connector)
+{
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ struct sde_connector *c_conn;
+
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return;
+ }
+
+ dev = connector->dev;
+ priv = dev->dev_private;
+
+ /* signal connector's retire fence */
+ sde_fence_signal(&to_sde_connector(connector)->retire_fence, 0);
+
+ /* If below both 2 conditions are met, LK's early splash resources
+ * should be freed.
+ * 1) When get_hibernation_status() is returned as true.
+ * a. hibernation image snapshot failed.
+ * b. hibernation restore successful.
+ * c. hibernation restore failed.
+ * 2) After LK totally exits.
+ */
+ if (get_hibernation_status() &&
+ sde_splash_get_lk_complete_status(priv->kms)) {
+ c_conn = to_sde_connector(connector);
+
+ sde_splash_free_resource(priv->kms, &priv->phandle,
+ c_conn->connector_type,
+ c_conn->display,
+ c_conn->is_shared);
+ }
+
+}
+
+static int sde_connector_dpms(struct drm_connector *connector,
+ int mode)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return -EINVAL;
+ }
+ c_conn = to_sde_connector(connector);
+
+ /* validate incoming dpms request */
+ switch (mode) {
+ case DRM_MODE_DPMS_ON:
+ case DRM_MODE_DPMS_STANDBY:
+ case DRM_MODE_DPMS_SUSPEND:
+ case DRM_MODE_DPMS_OFF:
+ SDE_DEBUG("conn %d dpms set to %d\n",
+ connector->base.id, mode);
+ break;
+ default:
+ SDE_ERROR("conn %d dpms set to unrecognized mode %d\n",
+ connector->base.id, mode);
+ break;
+ }
+
+ mutex_lock(&c_conn->lock);
+ c_conn->dpms_mode = mode;
+ _sde_connector_update_power_locked(c_conn);
+ mutex_unlock(&c_conn->lock);
+
+ /* use helper for boilerplate handling */
+ return drm_atomic_helper_connector_dpms(connector, mode);
+}
+
+int sde_connector_get_dpms(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+ int rc;
+
+ if (!connector) {
+ SDE_DEBUG("invalid connector\n");
+ return DRM_MODE_DPMS_OFF;
+ }
+
+ c_conn = to_sde_connector(connector);
+
+ mutex_lock(&c_conn->lock);
+ rc = c_conn->dpms_mode;
+ mutex_unlock(&c_conn->lock);
+
+ return rc;
+}
+
+static void sde_connector_update_hdr_props(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+ struct drm_msm_ext_panel_hdr_properties hdr_prop = {};
+
+ hdr_prop.hdr_supported = connector->hdr_supported;
+
+ if (hdr_prop.hdr_supported) {
+ hdr_prop.hdr_eotf =
+ connector->hdr_eotf;
+ hdr_prop.hdr_metadata_type_one =
+ connector->hdr_metadata_type_one;
+ hdr_prop.hdr_max_luminance =
+ connector->hdr_max_luminance;
+ hdr_prop.hdr_avg_luminance =
+ connector->hdr_avg_luminance;
+ hdr_prop.hdr_min_luminance =
+ connector->hdr_min_luminance;
+
+ msm_property_set_blob(&c_conn->property_info,
+ &c_conn->blob_hdr,
+ &hdr_prop,
+ sizeof(hdr_prop),
+ CONNECTOR_PROP_HDR_INFO);
+ }
+}
+
+static enum drm_connector_status
+sde_connector_detect(struct drm_connector *connector, bool force)
+{
+ enum drm_connector_status status = connector_status_unknown;
+ struct sde_connector *c_conn;
+
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return status;
+ }
+
+ c_conn = to_sde_connector(connector);
+
+ if (c_conn->ops.detect)
+ status = c_conn->ops.detect(connector,
+ force,
+ c_conn->display);
+
+ return status;
+}
+
+static const struct drm_connector_funcs sde_connector_ops = {
+ .dpms = sde_connector_dpms,
+ .reset = sde_connector_atomic_reset,
+ .detect = sde_connector_detect,
+ .destroy = sde_connector_destroy,
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .atomic_duplicate_state = sde_connector_atomic_duplicate_state,
+ .atomic_destroy_state = sde_connector_atomic_destroy_state,
+ .atomic_set_property = sde_connector_atomic_set_property,
+ .atomic_get_property = sde_connector_atomic_get_property,
+ .set_property = sde_connector_set_property,
+};
+
+static int sde_connector_get_modes(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+ int ret = 0;
+
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return 0;
+ }
+
+ c_conn = to_sde_connector(connector);
+ if (!c_conn->ops.get_modes) {
+ SDE_DEBUG("missing get_modes callback\n");
+ return 0;
+ }
+ ret = c_conn->ops.get_modes(connector, c_conn->display);
+ if (ret)
+ sde_connector_update_hdr_props(connector);
+
+ return ret;
+}
+
+static enum drm_mode_status
+sde_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector || !mode) {
+ SDE_ERROR("invalid argument(s), conn %pK, mode %pK\n",
+ connector, mode);
+ return MODE_ERROR;
+ }
+
+ c_conn = to_sde_connector(connector);
+
+ if (c_conn->ops.mode_valid)
+ return c_conn->ops.mode_valid(connector, mode, c_conn->display);
+
+ /* assume all modes okay by default */
+ return MODE_OK;
+}
+
+static struct drm_encoder *
+sde_connector_best_encoder(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn = to_sde_connector(connector);
+
+ if (!connector) {
+ SDE_ERROR("invalid connector\n");
+ return NULL;
+ }
+
+ /*
+ * This is true for now, revisit this code when multiple encoders are
+ * supported.
+ */
+ return c_conn->encoder;
+}
+
+static const struct drm_connector_helper_funcs sde_connector_helper_ops = {
+ .get_modes = sde_connector_get_modes,
+ .mode_valid = sde_connector_mode_valid,
+ .best_encoder = sde_connector_best_encoder,
+};
+
+struct drm_connector *sde_connector_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ struct drm_panel *panel,
+ void *display,
+ const struct sde_connector_ops *ops,
+ int connector_poll,
+ int connector_type)
+{
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct sde_kms_info *info;
+ struct sde_connector *c_conn = NULL;
+ struct sde_splash_info *sinfo;
+ int rc;
+
+ if (!dev || !dev->dev_private || !encoder) {
+ SDE_ERROR("invalid argument(s), dev %pK, enc %pK\n",
+ dev, encoder);
+ return ERR_PTR(-EINVAL);
+ }
+
+ priv = dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms reference\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ c_conn = kzalloc(sizeof(*c_conn), GFP_KERNEL);
+ if (!c_conn) {
+ SDE_ERROR("failed to alloc sde connector\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rc = drm_connector_init(dev,
+ &c_conn->base,
+ &sde_connector_ops,
+ connector_type);
+ if (rc)
+ goto error_free_conn;
+
+ c_conn->connector_type = connector_type;
+ c_conn->encoder = encoder;
+ c_conn->panel = panel;
+ c_conn->display = display;
+
+ c_conn->dpms_mode = DRM_MODE_DPMS_ON;
+ c_conn->hpd_mode = SDE_MODE_HPD_ON;
+ c_conn->lp_mode = 0;
+ c_conn->last_panel_power_mode = SDE_MODE_DPMS_ON;
+
+
+ sde_kms = to_sde_kms(priv->kms);
+ if (sde_kms->vbif[VBIF_NRT]) {
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
+ } else {
+ c_conn->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ c_conn->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
+ }
+
+ if (ops)
+ c_conn->ops = *ops;
+
+ c_conn->base.helper_private = &sde_connector_helper_ops;
+ c_conn->base.polled = connector_poll;
+ c_conn->base.interlace_allowed = 0;
+ c_conn->base.doublescan_allowed = 0;
+
+ snprintf(c_conn->name,
+ SDE_CONNECTOR_NAME_SIZE,
+ "conn%u",
+ c_conn->base.base.id);
+
+ rc = sde_fence_init(&c_conn->retire_fence, c_conn->name,
+ c_conn->base.base.id);
+ if (rc) {
+ SDE_ERROR("failed to init fence, %d\n", rc);
+ goto error_cleanup_conn;
+ }
+
+ mutex_init(&c_conn->lock);
+
+ rc = drm_connector_register(&c_conn->base);
+ if (rc) {
+ SDE_ERROR("failed to register drm connector, %d\n", rc);
+ goto error_cleanup_fence;
+ }
+
+ rc = drm_mode_connector_attach_encoder(&c_conn->base, encoder);
+ if (rc) {
+ SDE_ERROR("failed to attach encoder to connector, %d\n", rc);
+ goto error_unregister_conn;
+ }
+
+ /* create properties */
+ msm_property_init(&c_conn->property_info, &c_conn->base.base, dev,
+ priv->conn_property, c_conn->property_data,
+ CONNECTOR_PROP_COUNT, CONNECTOR_PROP_BLOBCOUNT,
+ sizeof(struct sde_connector_state));
+
+ if (c_conn->ops.post_init) {
+ info = kmalloc(sizeof(*info), GFP_KERNEL);
+ if (!info) {
+ SDE_ERROR("failed to allocate info buffer\n");
+ rc = -ENOMEM;
+ goto error_unregister_conn;
+ }
+
+ sde_kms_info_reset(info);
+ rc = c_conn->ops.post_init(&c_conn->base, info, display);
+ if (rc) {
+ SDE_ERROR("post-init failed, %d\n", rc);
+ kfree(info);
+ goto error_unregister_conn;
+ }
+
+ msm_property_install_blob(&c_conn->property_info,
+ "capabilities",
+ DRM_MODE_PROP_IMMUTABLE,
+ CONNECTOR_PROP_SDE_INFO);
+
+ msm_property_set_blob(&c_conn->property_info,
+ &c_conn->blob_caps,
+ SDE_KMS_INFO_DATA(info),
+ SDE_KMS_INFO_DATALEN(info),
+ CONNECTOR_PROP_SDE_INFO);
+ kfree(info);
+ }
+
+ if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ msm_property_install_blob(&c_conn->property_info,
+ "hdr_properties",
+ DRM_MODE_PROP_IMMUTABLE,
+ CONNECTOR_PROP_HDR_INFO);
+ }
+
+ msm_property_install_volatile_range(&c_conn->property_info,
+ "hdr_control", 0x0, 0, ~0, 0,
+ CONNECTOR_PROP_HDR_CONTROL);
+
+ msm_property_install_volatile_range(&c_conn->property_info,
+ "RETIRE_FENCE", 0x0, 0, ~0, 0, CONNECTOR_PROP_RETIRE_FENCE);
+
+ msm_property_install_volatile_signed_range(&c_conn->property_info,
+ "PLL_DELTA", 0x0, INT_MIN, INT_MAX, 0,
+ CONNECTOR_PROP_PLL_DELTA);
+
+ msm_property_install_volatile_range(&c_conn->property_info,
+ "PLL_ENABLE", 0x0, 0, 1, 0,
+ CONNECTOR_PROP_PLL_ENABLE);
+
+ msm_property_install_volatile_range(&c_conn->property_info,
+ "HDCP_VERSION", 0x0, 0, U8_MAX, 0,
+ CONNECTOR_PROP_HDCP_VERSION);
+
+ /* enum/bitmask properties */
+ msm_property_install_enum(&c_conn->property_info, "topology_name",
+ DRM_MODE_PROP_IMMUTABLE, 0, e_topology_name,
+ ARRAY_SIZE(e_topology_name),
+ CONNECTOR_PROP_TOPOLOGY_NAME, 0);
+ msm_property_install_enum(&c_conn->property_info, "topology_control",
+ 0, 1, e_topology_control,
+ ARRAY_SIZE(e_topology_control),
+ CONNECTOR_PROP_TOPOLOGY_CONTROL, 0);
+
+ msm_property_install_enum(&c_conn->property_info, "LP",
+ 0, 0, e_power_mode,
+ ARRAY_SIZE(e_power_mode),
+ CONNECTOR_PROP_LP, 0);
+
+ msm_property_install_enum(&c_conn->property_info, "HPD_OFF",
+ DRM_MODE_PROP_ATOMIC, 0, hpd_clock_state,
+ ARRAY_SIZE(hpd_clock_state),
+ CONNECTOR_PROP_HPD_OFF, 0);
+
+ rc = msm_property_install_get_status(&c_conn->property_info);
+ if (rc) {
+ SDE_ERROR("failed to create one or more properties\n");
+ goto error_destroy_property;
+ }
+
+ SDE_DEBUG("connector %d attach encoder %d\n",
+ c_conn->base.base.id, encoder->base.id);
+
+ sinfo = &sde_kms->splash_info;
+ if (sinfo && sinfo->handoff)
+ sde_splash_setup_connector_count(sinfo, connector_type,
+ display, c_conn->is_shared);
+
+ priv->connectors[priv->num_connectors++] = &c_conn->base;
+
+ return &c_conn->base;
+
+error_destroy_property:
+ if (c_conn->blob_caps)
+ drm_property_unreference_blob(c_conn->blob_caps);
+ if (c_conn->blob_hdr)
+ drm_property_unreference_blob(c_conn->blob_hdr);
+ msm_property_destroy(&c_conn->property_info);
+error_unregister_conn:
+ drm_connector_unregister(&c_conn->base);
+error_cleanup_fence:
+ mutex_destroy(&c_conn->lock);
+ sde_fence_deinit(&c_conn->retire_fence);
+error_cleanup_conn:
+ drm_connector_cleanup(&c_conn->base);
+error_free_conn:
+ kfree(c_conn);
+
+ return ERR_PTR(rc);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_connector.h b/drivers/gpu/drm/msm/sde/sde_connector.h
new file mode 100644
index 000000000000..5f44fb7bf094
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_connector.h
@@ -0,0 +1,429 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_CONNECTOR_H_
+#define _SDE_CONNECTOR_H_
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_panel.h>
+
+#include "msm_drv.h"
+#include "msm_prop.h"
+#include "sde_kms.h"
+#include "sde_fence.h"
+
+#define SDE_MODE_HPD_ON 0
+#define SDE_MODE_HPD_OFF 1
+
+#define SDE_CONNECTOR_NAME_SIZE 16
+
+struct sde_connector;
+struct sde_connector_state;
+
+/**
+ * struct sde_connector_ops - callback functions for generic sde connector
+ * Individual callbacks documented below.
+ */
+struct sde_connector_ops {
+ /**
+ * post_init - perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to sde connector info structure
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+ int (*post_init)(struct drm_connector *connector,
+ void *info,
+ void *display);
+
+ /**
+ * pre_deinit - perform additional deinitialization steps
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+ int (*pre_deinit)(struct drm_connector *connector,
+ void *display);
+
+ /**
+ * detect - determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ * Returns: Connector 'is connected' status
+ */
+ enum drm_connector_status (*detect)(struct drm_connector *connector,
+ bool force,
+ void *display);
+
+ /**
+ * get_modes - add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Number of modes added
+ */
+ int (*get_modes)(struct drm_connector *connector,
+ void *display);
+
+ /**
+ * mode_valid - determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ * Returns: Validity status for specified mode
+ */
+ enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display);
+
+ /**
+ * set_property - set property value
+ * @connector: Pointer to drm connector structure
+ * @state: Pointer to drm connector state structure
+ * @property_index: DRM property index
+ * @value: Incoming property value
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+ int (*set_property)(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display);
+
+ /**
+ * get_property - get property value
+ * @connector: Pointer to drm connector structure
+ * @state: Pointer to drm connector state structure
+ * @property_index: DRM property index
+ * @value: Pointer to variable for accepting property value
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+ int (*get_property)(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t *value,
+ void *display);
+
+ /**
+ * get_info - get display information
+ * @info: Pointer to msm display info structure
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+ int (*get_info)(struct msm_display_info *info, void *display);
+
+ /**
+ * set_topology_ctl - set sde display topology property
+ * @connector: Pointer to drm connector structure
+ * @adj_mode: adjusted mode
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+ int (*set_topology_ctl)(struct drm_connector *connector,
+ struct drm_display_mode *adj_mode, void *display);
+
+ int (*set_backlight)(void *display, u32 bl_lvl);
+
+
+ /**
+ * pre_kickoff - trigger display to program kickoff-time features
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display structure
+ * @params: Parameter bundle of connector-stored information for
+ * kickoff-time programming into the display
+ * Returns: Zero on success
+ */
+ int (*pre_kickoff)(struct drm_connector *connector,
+ void *display,
+ struct msm_display_kickoff_params *params);
+
+ /**
+ * mode_needs_full_range - does the mode need full range
+ * quantization
+ * @display: Pointer to private display structure
+ * Returns: true or false based on whether full range is needed
+ */
+ bool (*mode_needs_full_range)(void *display);
+
+ /**
+ * get_csc_type - returns the CSC type to be used
+ * by the CDM block based on HDR state
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display structure
+ * Returns: type of CSC matrix to be used
+ */
+ enum sde_csc_type (*get_csc_type)(struct drm_connector *connector,
+ void *display);
+
+ /**
+ * set_power - update dpms setting
+ * @connector: Pointer to drm connector structure
+ * @power_mode: One of the following,
+ * SDE_MODE_DPMS_ON
+ * SDE_MODE_DPMS_LP1
+ * SDE_MODE_DPMS_LP2
+ * SDE_MODE_DPMS_OFF
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+ int (*set_power)(struct drm_connector *connector,
+ int power_mode, void *display);
+};
+
+/**
+ * struct sde_connector - local sde connector structure
+ * @base: Base drm connector structure
+ * @connector_type: Set to one of DRM_MODE_CONNECTOR_ types
+ * @encoder: Pointer to preferred drm encoder
+ * @panel: Pointer to drm panel, if present
+ * @display: Pointer to private display data structure
+ * @mmu_secure: MMU id for secure buffers
+ * @mmu_unsecure: MMU id for unsecure buffers
+ * @name: ASCII name of connector
+ * @lock: Mutex lock object for this structure
+ * @retire_fence: Retire fence reference
+ * @ops: Local callback function pointer table
+ * @dpms_mode: DPMS property setting from user space
+ * @lp_mode: LP property setting from user space
+ * @last_panel_power_mode: Last consolidated dpms/lp mode setting
+ * @property_info: Private structure for generic property handling
+ * @property_data: Array of private data for generic property handling
+ * @blob_caps: Pointer to blob structure for 'capabilities' property
+ * @blob_hdr: Pointer to blob structure for 'hdr_properties' property
+ * @is_shared: connector is shared
+ * @shared_roi: roi of the shared display
+ */
+struct sde_connector {
+ struct drm_connector base;
+
+ int connector_type;
+
+ struct drm_encoder *encoder;
+ struct drm_panel *panel;
+ void *display;
+
+ struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
+
+ char name[SDE_CONNECTOR_NAME_SIZE];
+
+ struct mutex lock;
+ struct sde_fence retire_fence;
+ struct sde_connector_ops ops;
+ int dpms_mode;
+ u64 hpd_mode;
+ int lp_mode;
+ int last_panel_power_mode;
+
+ struct msm_property_info property_info;
+ struct msm_property_data property_data[CONNECTOR_PROP_COUNT];
+ struct drm_property_blob *blob_caps;
+ struct drm_property_blob *blob_hdr;
+ bool is_shared;
+ struct sde_rect shared_roi;
+};
+
+/**
+ * to_sde_connector - convert drm_connector pointer to sde connector pointer
+ * @X: Pointer to drm_connector structure
+ * Returns: Pointer to sde_connector structure
+ */
+#define to_sde_connector(x) container_of((x), struct sde_connector, base)
+
+/**
+ * sde_connector_get_display - get sde connector's private display pointer
+ * @C: Pointer to drm connector structure
+ * Returns: Pointer to associated private display structure
+ */
+#define sde_connector_get_display(C) \
+ ((C) ? to_sde_connector((C))->display : NULL)
+
+/**
+ * sde_connector_get_panel - get sde connector's private panel pointer
+ * @C: Pointer to drm connector structure
+ * Returns: Pointer to associated private display structure
+ */
+#define sde_connector_get_panel(C) \
+ ((C) ? to_sde_connector((C))->panel : NULL)
+
+/**
+ * sde_connector_get_encoder - get sde connector's private encoder pointer
+ * @C: Pointer to drm connector structure
+ * Returns: Pointer to associated private encoder structure
+ */
+#define sde_connector_get_encoder(C) \
+ ((C) ? to_sde_connector((C))->encoder : NULL)
+
+/**
+ * sde_connector_get_propinfo - get sde connector's property info pointer
+ * @C: Pointer to drm connector structure
+ * Returns: Pointer to associated private property info structure
+ */
+#define sde_connector_get_propinfo(C) \
+ ((C) ? &to_sde_connector((C))->property_info : NULL)
+
+/**
+ * struct sde_connector_state - private connector status structure
+ * @base: Base drm connector structure
+ * @out_fb: Pointer to output frame buffer, if applicable
+ * @aspace: Address space for accessing frame buffer objects, if applicable
+ * @property_values: Local cache of current connector property values
+ * @hdr_ctrl: HDR control info passed from userspace
+ */
+struct sde_connector_state {
+ struct drm_connector_state base;
+ struct drm_framebuffer *out_fb;
+ struct msm_gem_address_space *aspace;
+ uint64_t property_values[CONNECTOR_PROP_COUNT];
+ struct drm_msm_ext_panel_hdr_ctrl hdr_ctrl;
+};
+
+/**
+ * to_sde_connector_state - convert drm_connector_state pointer to
+ * sde connector state pointer
+ * @X: Pointer to drm_connector_state structure
+ * Returns: Pointer to sde_connector_state structure
+ */
+#define to_sde_connector_state(x) \
+ container_of((x), struct sde_connector_state, base)
+
+/**
+ * sde_connector_get_property - query integer value of connector property
+ * @S: Pointer to drm connector state
+ * @X: Property index, from enum msm_mdp_connector_property
+ * Returns: Integer value of requested property
+ */
+#define sde_connector_get_property(S, X) \
+ ((S) && ((X) < CONNECTOR_PROP_COUNT) ? \
+ (to_sde_connector_state((S))->property_values[(X)]) : 0)
+
+/**
+ * sde_connector_get_property_values - retrieve property values cache
+ * @S: Pointer to drm connector state
+ * Returns: Integer value of requested property
+ */
+#define sde_connector_get_property_values(S) \
+ ((S) ? (to_sde_connector_state((S))->property_values) : NULL)
+
+/**
+ * sde_connector_get_out_fb - query out_fb value from sde connector state
+ * @S: Pointer to drm connector state
+ * Returns: Output fb associated with specified connector state
+ */
+#define sde_connector_get_out_fb(S) \
+ ((S) ? to_sde_connector_state((S))->out_fb : NULL)
+
+/**
+ * sde_connector_get_topology_name - helper accessor to retrieve topology_name
+ * @connector: pointer to drm connector
+ * Returns: value of the CONNECTOR_PROP_TOPOLOGY_NAME property or 0
+ */
+static inline uint64_t sde_connector_get_topology_name(
+ struct drm_connector *connector)
+{
+ if (!connector || !connector->state)
+ return 0;
+ return sde_connector_get_property(connector->state,
+ CONNECTOR_PROP_TOPOLOGY_NAME);
+}
+
+/**
+ * sde_connector_init - create drm connector object for a given display
+ * @dev: Pointer to drm device struct
+ * @encoder: Pointer to associated encoder
+ * @panel: Pointer to associated panel, can be NULL
+ * @display: Pointer to associated display object
+ * @ops: Pointer to callback operations function table
+ * @connector_poll: Set to appropriate DRM_CONNECTOR_POLL_ setting
+ * @connector_type: Set to appropriate DRM_MODE_CONNECTOR_ type
+ * Returns: Pointer to newly created drm connector struct
+ */
+struct drm_connector *sde_connector_init(struct drm_device *dev,
+ struct drm_encoder *encoder,
+ struct drm_panel *panel,
+ void *display,
+ const struct sde_connector_ops *ops,
+ int connector_poll,
+ int connector_type);
+
+/**
+ * sde_connector_prepare_fence - prepare fence support for current commit
+ * @connector: Pointer to drm connector object
+ */
+void sde_connector_prepare_fence(struct drm_connector *connector);
+
+/**
+ * sde_connector_complete_commit - signal completion of current commit
+ * @connector: Pointer to drm connector object
+ */
+void sde_connector_complete_commit(struct drm_connector *connector);
+
+/**
+ * sde_connector_get_info - query display specific information
+ * @connector: Pointer to drm connector object
+ * @info: Pointer to msm display information structure
+ * Returns: Zero on success
+ */
+int sde_connector_get_info(struct drm_connector *connector,
+ struct msm_display_info *info);
+
+/**
+ * sde_connector_pre_kickoff - trigger kickoff time feature programming
+ * @connector: Pointer to drm connector object
+ * Returns: Zero on success
+ */
+int sde_connector_pre_kickoff(struct drm_connector *connector);
+
+/**
+ * sde_connector_mode_needs_full_range - query quantization type
+ * for the connector mode
+ * @connector: Pointer to drm connector object
+ * Returns: true OR false based on connector mode
+ */
+bool sde_connector_mode_needs_full_range(struct drm_connector *connector);
+
+/**
+ * sde_connector_get_csc_type - query csc type
+ * to be used for the connector
+ * @connector: Pointer to drm connector object
+ * Returns: csc type based on connector HDR state
+ */
+enum sde_csc_type sde_connector_get_csc_type(struct drm_connector *conn);
+
+/**
+ * sde_connector_get_dpms - query dpms setting
+ * @connector: Pointer to drm connector structure
+ * Returns: Current DPMS setting for connector
+ */
+int sde_connector_get_dpms(struct drm_connector *connector);
+
+/**
+ * sde_connector_needs_offset - adjust the output fence offset based on
+ * display type
+ * @connector: Pointer to drm connector object
+ * Returns: true if offset is required, false for all other cases.
+ */
+static inline bool sde_connector_needs_offset(struct drm_connector *connector)
+{
+ struct sde_connector *c_conn;
+
+ if (!connector)
+ return false;
+
+ c_conn = to_sde_connector(connector);
+ return (c_conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL);
+}
+
+#endif /* _SDE_CONNECTOR_H_ */
+
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.c b/drivers/gpu/drm/msm/sde/sde_core_irq.c
new file mode 100644
index 000000000000..b587e5c02b63
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.c
@@ -0,0 +1,583 @@
+/* Copyright (c) 2015-2017,2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "sde_core_irq.h"
+#include "sde_power_handle.h"
+
+/**
+ * sde_core_irq_callback_handler - dispatch core interrupts
+ * @arg: private data of callback handler
+ * @irq_idx: interrupt index
+ */
+static void sde_core_irq_callback_handler(void *arg, int irq_idx)
+{
+ struct sde_kms *sde_kms = arg;
+ struct sde_irq *irq_obj = &sde_kms->irq_obj;
+ struct sde_irq_callback *cb;
+ unsigned long irq_flags;
+ bool cb_tbl_error = false;
+ int enable_counts = 0;
+
+ pr_debug("irq_idx=%d\n", irq_idx);
+
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+ if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
+ /* print error outside lock */
+ cb_tbl_error = true;
+ enable_counts = atomic_read(
+ &sde_kms->irq_obj.enable_counts[irq_idx]);
+ }
+
+ atomic_inc(&irq_obj->irq_counts[irq_idx]);
+
+ /*
+ * Perform registered function callback
+ */
+ list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
+ if (cb->func)
+ cb->func(cb->arg, irq_idx);
+ spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+ if (cb_tbl_error) {
+ SDE_ERROR("irq has no registered callback, idx %d enables %d\n",
+ irq_idx, enable_counts);
+ SDE_EVT32_IRQ(irq_idx, enable_counts, SDE_EVTLOG_ERROR);
+ }
+
+ /*
+ * Clear pending interrupt status in HW.
+ * NOTE: sde_core_irq_callback_handler is protected by top-level
+ * spinlock, so it is safe to clear any interrupt status here.
+ */
+ sde_kms->hw_intr->ops.clear_interrupt_status(
+ sde_kms->hw_intr,
+ irq_idx);
+}
+
+int sde_core_irq_idx_lookup(struct sde_kms *sde_kms,
+ enum sde_intr_type intr_type, u32 instance_idx)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.irq_idx_lookup)
+ return -EINVAL;
+
+ return sde_kms->hw_intr->ops.irq_idx_lookup(intr_type,
+ instance_idx);
+}
+
+/**
+ * _sde_core_irq_enable - enable core interrupt given by the index
+ * @sde_kms: Pointer to sde kms context
+ * @irq_idx: interrupt index
+ */
+static int _sde_core_irq_enable(struct sde_kms *sde_kms, int irq_idx)
+{
+ unsigned long irq_flags;
+ int ret = 0;
+
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->irq_obj.enable_counts ||
+ !sde_kms->irq_obj.irq_counts) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+ SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
+ atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+ SDE_EVT32(irq_idx,
+ atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+ if (atomic_inc_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 1) {
+ ret = sde_kms->hw_intr->ops.enable_irq(
+ sde_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ SDE_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ irq_idx);
+
+ SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+
+ /* empty callback list but interrupt is enabled */
+ if (list_empty(&sde_kms->irq_obj.irq_cb_tbl[irq_idx]))
+ SDE_ERROR("irq_idx=%d enabled with no callback\n",
+ irq_idx);
+ }
+ spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+ return ret;
+}
+
+int sde_core_irq_enable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
+{
+ int i;
+ int ret = 0;
+
+ if (!sde_kms || !irq_idxs || !irq_count) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; (i < irq_count) && !ret; i++)
+ ret = _sde_core_irq_enable(sde_kms, irq_idxs[i]);
+
+ return ret;
+}
+
+/**
+ * _sde_core_irq_disable - disable core interrupt given by the index
+ * @sde_kms: Pointer to sde kms context
+ * @irq_idx: interrupt index
+ */
+static int _sde_core_irq_disable(struct sde_kms *sde_kms, int irq_idx)
+{
+ unsigned long irq_flags;
+ int ret = 0;
+
+ if (!sde_kms || !sde_kms->hw_intr || !sde_kms->irq_obj.enable_counts) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+ SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("irq_idx=%d enable_count=%d\n", irq_idx,
+ atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+ SDE_EVT32(irq_idx,
+ atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]));
+ if (atomic_dec_return(&sde_kms->irq_obj.enable_counts[irq_idx]) == 0) {
+ ret = sde_kms->hw_intr->ops.disable_irq(
+ sde_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ SDE_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+ irq_idx);
+ SDE_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+ }
+ spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+ return ret;
+}
+
+int sde_core_irq_disable(struct sde_kms *sde_kms, int *irq_idxs, u32 irq_count)
+{
+ int i;
+ int ret = 0;
+
+ if (!sde_kms || !irq_idxs || !irq_count) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; (i < irq_count) && !ret; i++)
+ ret = _sde_core_irq_disable(sde_kms, irq_idxs[i]);
+
+ return ret;
+}
+
+u32 sde_core_irq_read(struct sde_kms *sde_kms, int irq_idx, bool clear)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.get_interrupt_status)
+ return 0;
+
+ return sde_kms->hw_intr->ops.get_interrupt_status(sde_kms->hw_intr,
+ irq_idx, clear);
+}
+
+int sde_core_irq_register_callback(struct sde_kms *sde_kms, int irq_idx,
+ struct sde_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!sde_kms || !register_irq_cb || !register_irq_cb->func ||
+ !sde_kms->irq_obj.irq_cb_tbl) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+ SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+ SDE_EVT32(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ list_add_tail(&register_irq_cb->list,
+ &sde_kms->irq_obj.irq_cb_tbl[irq_idx]);
+ spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+int sde_core_irq_unregister_callback(struct sde_kms *sde_kms, int irq_idx,
+ struct sde_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!sde_kms || !register_irq_cb || !register_irq_cb->func ||
+ !sde_kms->irq_obj.irq_cb_tbl) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= sde_kms->hw_intr->irq_idx_tbl_size) {
+ SDE_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&sde_kms->irq_obj.cb_lock, irq_flags);
+ SDE_EVT32(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ /* empty callback list but interrupt is still enabled */
+ if (list_empty(&sde_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
+ atomic_read(&sde_kms->irq_obj.enable_counts[irq_idx]))
+ SDE_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
+ spin_unlock_irqrestore(&sde_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+static void sde_clear_all_irqs(struct sde_kms *sde_kms)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.clear_all_irqs)
+ return;
+
+ sde_kms->hw_intr->ops.clear_all_irqs(sde_kms->hw_intr);
+}
+
+static void sde_disable_all_irqs(struct sde_kms *sde_kms)
+{
+ if (!sde_kms || !sde_kms->hw_intr ||
+ !sde_kms->hw_intr->ops.disable_all_irqs)
+ return;
+
+ sde_kms->hw_intr->ops.disable_all_irqs(sde_kms->hw_intr);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int sde_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+ struct sde_irq *irq_obj = s->private;
+ struct sde_irq_callback *cb;
+ unsigned long irq_flags;
+ int i, irq_count, enable_count, cb_count;
+
+ if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
+ SDE_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ for (i = 0; i < irq_obj->total_irqs; i++) {
+ spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
+ cb_count = 0;
+ irq_count = atomic_read(&irq_obj->irq_counts[i]);
+ enable_count = atomic_read(&irq_obj->enable_counts[i]);
+ list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
+ cb_count++;
+ spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
+
+ if (irq_count || enable_count || cb_count)
+ seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
+ i, irq_count, enable_count, cb_count);
+ }
+
+ return 0;
+}
+
+DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_core_irq);
+
+static int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
+ struct dentry *parent)
+{
+ sde_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0644,
+ parent, &sde_kms->irq_obj,
+ &sde_debugfs_core_irq_fops);
+
+ return 0;
+}
+
+static void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
+{
+ debugfs_remove(sde_kms->irq_obj.debugfs_file);
+ sde_kms->irq_obj.debugfs_file = NULL;
+}
+
+#else
+static int sde_debugfs_core_irq_init(struct sde_kms *sde_kms,
+ struct dentry *parent)
+{
+ return 0;
+}
+
+static void sde_debugfs_core_irq_destroy(struct sde_kms *sde_kms)
+{
+}
+#endif
+
+void sde_core_irq_preinstall(struct sde_kms *sde_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ } else if (!sde_kms->dev) {
+ SDE_ERROR("invalid drm device\n");
+ return;
+ } else if (!sde_kms->dev->dev_private) {
+ SDE_ERROR("invalid device private\n");
+ return;
+ }
+ priv = sde_kms->dev->dev_private;
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+ sde_clear_all_irqs(sde_kms);
+ sde_disable_all_irqs(sde_kms);
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+ spin_lock_init(&sde_kms->irq_obj.cb_lock);
+
+ /* Create irq callbacks for all possible irq_idx */
+ sde_kms->irq_obj.total_irqs = sde_kms->hw_intr->irq_idx_tbl_size;
+ sde_kms->irq_obj.irq_cb_tbl = kcalloc(sde_kms->irq_obj.total_irqs,
+ sizeof(struct list_head), GFP_KERNEL);
+ if (sde_kms->irq_obj.irq_cb_tbl == NULL) {
+ SDE_ERROR("Failed to allocate\n");
+ return;
+ }
+ sde_kms->irq_obj.enable_counts = kcalloc(sde_kms->irq_obj.total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ if (sde_kms->irq_obj.enable_counts == NULL) {
+ kfree(sde_kms->irq_obj.irq_cb_tbl);
+ sde_kms->irq_obj.irq_cb_tbl = NULL;
+ SDE_ERROR("Failed to allocate\n");
+ return;
+ }
+ sde_kms->irq_obj.irq_counts = kcalloc(sde_kms->irq_obj.total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ if (sde_kms->irq_obj.irq_counts == NULL) {
+ kfree(sde_kms->irq_obj.irq_cb_tbl);
+ kfree(sde_kms->irq_obj.enable_counts);
+ sde_kms->irq_obj.irq_cb_tbl = NULL;
+ sde_kms->irq_obj.enable_counts = NULL;
+ SDE_ERROR("Failed to allocate\n");
+ return;
+ }
+ for (i = 0; i < sde_kms->irq_obj.total_irqs; i++) {
+ INIT_LIST_HEAD(&sde_kms->irq_obj.irq_cb_tbl[i]);
+ atomic_set(&sde_kms->irq_obj.enable_counts[i], 0);
+ atomic_set(&sde_kms->irq_obj.irq_counts[i], 0);
+ }
+
+ sde_debugfs_core_irq_init(sde_kms, sde_kms->debugfs_root);
+}
+
+int sde_core_irq_postinstall(struct sde_kms *sde_kms)
+{
+ return 0;
+}
+
+void sde_core_irq_uninstall(struct sde_kms *sde_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ } else if (!sde_kms->dev) {
+ SDE_ERROR("invalid drm device\n");
+ return;
+ } else if (!sde_kms->dev->dev_private) {
+ SDE_ERROR("invalid device private\n");
+ return;
+ }
+ priv = sde_kms->dev->dev_private;
+
+ sde_debugfs_core_irq_destroy(sde_kms);
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+ for (i = 0; i < sde_kms->irq_obj.total_irqs; i++)
+ if (atomic_read(&sde_kms->irq_obj.enable_counts[i]) ||
+ !list_empty(&sde_kms->irq_obj.irq_cb_tbl[i]))
+ SDE_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+ sde_clear_all_irqs(sde_kms);
+ sde_disable_all_irqs(sde_kms);
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+ kfree(sde_kms->irq_obj.irq_cb_tbl);
+ kfree(sde_kms->irq_obj.enable_counts);
+ kfree(sde_kms->irq_obj.irq_counts);
+ sde_kms->irq_obj.irq_cb_tbl = NULL;
+ sde_kms->irq_obj.enable_counts = NULL;
+ sde_kms->irq_obj.irq_counts = NULL;
+ sde_kms->irq_obj.total_irqs = 0;
+}
+
+static void sde_hw_irq_mask(struct irq_data *irqd)
+{
+ struct sde_kms *sde_kms;
+
+ if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+ SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
+ return;
+ }
+ sde_kms = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static void sde_hw_irq_unmask(struct irq_data *irqd)
+{
+ struct sde_kms *sde_kms;
+
+ if (!irqd || !irq_data_get_irq_chip_data(irqd)) {
+ SDE_ERROR("invalid parameters irqd %d\n", irqd != 0);
+ return;
+ }
+ sde_kms = irq_data_get_irq_chip_data(irqd);
+
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &sde_kms->irq_controller.enabled_mask);
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip sde_hw_irq_chip = {
+ .name = "sde",
+ .irq_mask = sde_hw_irq_mask,
+ .irq_unmask = sde_hw_irq_unmask,
+};
+
+static int sde_hw_irqdomain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct sde_kms *sde_kms;
+ int rc;
+
+ if (!domain || !domain->host_data) {
+ SDE_ERROR("invalid parameters domain %d\n", domain != 0);
+ return -EINVAL;
+ }
+ sde_kms = domain->host_data;
+
+ irq_set_chip_and_handler(irq, &sde_hw_irq_chip, handle_level_irq);
+ rc = irq_set_chip_data(irq, sde_kms);
+
+ return rc;
+}
+
+static struct irq_domain_ops sde_hw_irqdomain_ops = {
+ .map = sde_hw_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+int sde_core_irq_domain_add(struct sde_kms *sde_kms)
+{
+ struct device *dev;
+ struct irq_domain *domain;
+
+ if (!sde_kms->dev || !sde_kms->dev->dev) {
+ pr_err("invalid device handles\n");
+ return -EINVAL;
+ }
+
+ dev = sde_kms->dev->dev;
+
+ domain = irq_domain_add_linear(dev->of_node, 32,
+ &sde_hw_irqdomain_ops, sde_kms);
+ if (!domain) {
+ pr_err("failed to add irq_domain\n");
+ return -EINVAL;
+ }
+
+ sde_kms->irq_controller.enabled_mask = 0;
+ sde_kms->irq_controller.domain = domain;
+
+ return 0;
+}
+
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms)
+{
+ if (sde_kms->irq_controller.domain) {
+ irq_domain_remove(sde_kms->irq_controller.domain);
+ sde_kms->irq_controller.domain = NULL;
+ }
+ return 0;
+}
+
+irqreturn_t sde_core_irq(struct sde_kms *sde_kms)
+{
+ /*
+ * Read interrupt status from all sources. Interrupt status are
+ * stored within hw_intr.
+ * Function will also clear the interrupt status after reading.
+ * Individual interrupt status bit will only get stored if it
+ * is enabled.
+ */
+ sde_kms->hw_intr->ops.get_interrupt_statuses(sde_kms->hw_intr);
+
+ /*
+ * Dispatch to HW driver to handle interrupt lookup that is being
+ * fired. When matching interrupt is located, HW driver will call to
+ * sde_core_irq_callback_handler with the irq_idx from the lookup table.
+ * sde_core_irq_callback_handler will perform the registered function
+ * callback, and do the interrupt status clearing once the registered
+ * callback is finished.
+ */
+ sde_kms->hw_intr->ops.dispatch_irqs(
+ sde_kms->hw_intr,
+ sde_core_irq_callback_handler,
+ sde_kms);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_core_irq.h b/drivers/gpu/drm/msm/sde/sde_core_irq.h
new file mode 100644
index 000000000000..ee1b9bd1d32b
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_core_irq.h
@@ -0,0 +1,152 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_CORE_IRQ_H__
+#define __SDE_CORE_IRQ_H__
+
+#include "sde_kms.h"
+#include "sde_hw_interrupts.h"
+
+/**
+ * sde_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @sde_kms: SDE handle
+ * @return: none
+ */
+void sde_core_irq_preinstall(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_postinstall - perform post-installation of core IRQ handler
+ * @sde_kms: SDE handle
+ * @return: 0 if success; error code otherwise
+ */
+int sde_core_irq_postinstall(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_uninstall - uninstall core IRQ handler
+ * @sde_kms: SDE handle
+ * @return: none
+ */
+void sde_core_irq_uninstall(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_domain_add - Add core IRQ domain for SDE
+ * @sde_kms: SDE handle
+ * @return: none
+ */
+int sde_core_irq_domain_add(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_domain_fini - uninstall core IRQ domain
+ * @sde_kms: SDE handle
+ * @return: 0 if success; error code otherwise
+ */
+int sde_core_irq_domain_fini(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq - core IRQ handler
+ * @sde_kms: SDE handle
+ * @return: interrupt handling status
+ */
+irqreturn_t sde_core_irq(struct sde_kms *sde_kms);
+
+/**
+ * sde_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
+ * interrupt mapping table.
+ * @sde_kms: SDE handle
+ * @intr_type: SDE HW interrupt type for lookup
+ * @instance_idx: SDE HW block instance defined in sde_hw_mdss.h
+ * @return: irq_idx or -EINVAL when fail to lookup
+ */
+int sde_core_irq_idx_lookup(
+ struct sde_kms *sde_kms,
+ enum sde_intr_type intr_type,
+ uint32_t instance_idx);
+
+/**
+ * sde_core_irq_enable - IRQ helper function for enabling one or more IRQs
+ * @sde_kms: SDE handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success enabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable. Interrupts is enabled if count is 0 before increment.
+ */
+int sde_core_irq_enable(
+ struct sde_kms *sde_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * sde_core_irq_disable - IRQ helper function for disabling one of more IRQs
+ * @sde_kms: SDE handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success disabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable. Interrupts is disabled if count is 0 after decrement.
+ */
+int sde_core_irq_disable(
+ struct sde_kms *sde_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * sde_core_irq_read - IRQ helper function for reading IRQ status
+ * @sde_kms: SDE handle
+ * @irq_idx: irq index
+ * @clear: True to clear the irq after read
+ * @return: non-zero if irq detected; otherwise no irq detected
+ */
+u32 sde_core_irq_read(
+ struct sde_kms *sde_kms,
+ int irq_idx,
+ bool clear);
+
+/**
+ * sde_core_irq_register_callback - For registering callback function on IRQ
+ * interrupt
+ * @sde_kms: SDE handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * This must exist until un-registration.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int sde_core_irq_register_callback(
+ struct sde_kms *sde_kms,
+ int irq_idx,
+ struct sde_irq_callback *irq_cb);
+
+/**
+ * sde_core_irq_unregister_callback - For unregistering callback function on IRQ
+ * interrupt
+ * @sde_kms: SDE handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * This must match with registration.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int sde_core_irq_unregister_callback(
+ struct sde_kms *sde_kms,
+ int irq_idx,
+ struct sde_irq_callback *irq_cb);
+
+#endif /* __SDE_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.c b/drivers/gpu/drm/msm/sde/sde_core_perf.c
new file mode 100644
index 000000000000..29e746e1fdf5
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.c
@@ -0,0 +1,634 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include "msm_prop.h"
+
+#include "sde_kms.h"
+#include "sde_fence.h"
+#include "sde_formats.h"
+#include "sde_hw_sspp.h"
+#include "sde_trace.h"
+#include "sde_crtc.h"
+#include "sde_plane.h"
+#include "sde_encoder.h"
+#include "sde_wb.h"
+#include "sde_core_perf.h"
+#include "sde_trace.h"
+
+static struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+
+ if (!crtc->dev || !crtc->dev->dev_private) {
+ SDE_ERROR("invalid device\n");
+ return NULL;
+ }
+
+ priv = crtc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ return to_sde_kms(priv->kms);
+}
+
+static bool _sde_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
+{
+ return sde_crtc_is_enabled(crtc);
+}
+
+static bool _sde_core_video_mode_intf_connected(struct drm_crtc *crtc)
+{
+ struct drm_crtc *tmp_crtc;
+
+ if (!crtc)
+ return 0;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if ((sde_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
+ _sde_core_perf_crtc_is_power_on(tmp_crtc)) {
+ SDE_DEBUG("video interface connected crtc:%d\n",
+ tmp_crtc->base.id);
+ return true;
+ }
+ }
+
+ return false;
+}
+
+static void _sde_core_perf_calc_crtc(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct sde_core_perf_params *perf)
+{
+ struct sde_crtc_state *sde_cstate;
+
+ if (!crtc || !state || !perf) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+
+ sde_cstate = to_sde_crtc_state(state);
+ memset(perf, 0, sizeof(struct sde_core_perf_params));
+
+ perf->bw_ctl = sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_AB);
+ perf->max_per_pipe_ib =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_IB);
+ perf->core_clk_rate =
+ sde_crtc_get_property(sde_cstate, CRTC_PROP_CORE_CLK);
+
+ SDE_DEBUG("crtc=%d clk_rate=%u ib=%llu ab=%llu\n",
+ crtc->base.id, perf->core_clk_rate,
+ perf->max_per_pipe_ib, perf->bw_ctl);
+}
+
+int sde_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ u32 bw, threshold;
+ u64 bw_sum_of_intfs = 0;
+ bool is_video_mode;
+ struct sde_crtc_state *sde_cstate;
+ struct drm_crtc *tmp_crtc;
+ struct sde_kms *kms;
+
+ if (!crtc || !state) {
+ SDE_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _sde_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ SDE_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ /* we only need bandwidth check on real-time clients (interfaces) */
+ if (sde_crtc_is_wb(crtc))
+ return 0;
+
+ sde_cstate = to_sde_crtc_state(state);
+
+ _sde_core_perf_calc_crtc(crtc, state, &sde_cstate->new_perf);
+
+ bw_sum_of_intfs = sde_cstate->new_perf.bw_ctl;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
+ sde_crtc_is_rt(tmp_crtc) && tmp_crtc != crtc) {
+ struct sde_crtc_state *tmp_cstate =
+ to_sde_crtc_state(tmp_crtc->state);
+
+ bw_sum_of_intfs += tmp_cstate->new_perf.bw_ctl;
+ }
+ }
+
+ /* convert bandwidth to kb */
+ bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+ SDE_DEBUG("calculated bandwidth=%uk\n", bw);
+
+ is_video_mode = sde_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
+ threshold = (is_video_mode ||
+ _sde_core_video_mode_intf_connected(crtc)) ?
+ kms->catalog->perf.max_bw_low : kms->catalog->perf.max_bw_high;
+
+ SDE_DEBUG("final threshold bw limit = %d\n", threshold);
+
+ if (!threshold) {
+ SDE_ERROR("no bandwidth limits specified\n");
+ return -E2BIG;
+ } else if (bw > threshold) {
+ SDE_DEBUG("exceeds bandwidth: %ukb > %ukb\n", bw, threshold);
+ return -E2BIG;
+ }
+
+ return 0;
+}
+
+static u64 _sde_core_perf_crtc_calc_client_vote(struct sde_kms *kms,
+ struct drm_crtc *crtc, struct sde_core_perf_params *perf,
+ bool nrt_client, u32 core_clk)
+{
+ u64 bw_sum_of_intfs = 0;
+ struct drm_crtc *tmp_crtc;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_sde_core_perf_crtc_is_power_on(crtc) &&
+ /* RealTime clients */
+ ((!nrt_client) ||
+ /* Non-RealTime clients */
+ (nrt_client && sde_crtc_is_nrt(tmp_crtc)))) {
+ struct sde_crtc_state *sde_cstate =
+ to_sde_crtc_state(tmp_crtc->state);
+
+ perf->max_per_pipe_ib = max(perf->max_per_pipe_ib,
+ sde_cstate->new_perf.max_per_pipe_ib);
+
+ bw_sum_of_intfs += sde_cstate->new_perf.bw_ctl;
+
+ SDE_DEBUG("crtc=%d bw=%llu\n",
+ tmp_crtc->base.id,
+ sde_cstate->new_perf.bw_ctl);
+ }
+ }
+
+ return bw_sum_of_intfs;
+}
+
+static void _sde_core_perf_crtc_update_client_vote(struct sde_kms *kms,
+ struct sde_core_perf_params *params, bool nrt_client, u64 bw_vote)
+{
+ struct msm_drm_private *priv = kms->dev->dev_private;
+ u64 bus_ab_quota, bus_ib_quota;
+
+ bus_ab_quota = max(bw_vote, kms->perf.perf_tune.min_bus_vote);
+ bus_ib_quota = params->max_per_pipe_ib;
+
+ SDE_ATRACE_INT("bus_quota", bus_ib_quota);
+ sde_power_data_bus_set_quota(&priv->phandle, kms->core_client,
+ nrt_client ? SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT :
+ SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+ bus_ab_quota, bus_ib_quota);
+ SDE_DEBUG("client:%s ab=%llu ib=%llu\n", nrt_client ? "nrt" : "rt",
+ bus_ab_quota, bus_ib_quota);
+}
+
+static void _sde_core_perf_crtc_update_bus(struct sde_kms *kms,
+ struct drm_crtc *crtc, u32 core_clk)
+{
+ u64 bw_sum_of_rt_intfs = 0, bw_sum_of_nrt_intfs = 0;
+ struct sde_core_perf_params params = {0};
+
+ SDE_ATRACE_BEGIN(__func__);
+
+ /*
+ * non-real time client
+ */
+ if (sde_crtc_is_nrt(crtc)) {
+ bw_sum_of_nrt_intfs = _sde_core_perf_crtc_calc_client_vote(
+ kms, crtc, &params, true, core_clk);
+ _sde_core_perf_crtc_update_client_vote(kms, &params, true,
+ bw_sum_of_nrt_intfs);
+ }
+
+ /*
+ * real time client
+ */
+ if (!sde_crtc_is_nrt(crtc) ||
+ sde_crtc_is_wb(crtc)) {
+ bw_sum_of_rt_intfs = _sde_core_perf_crtc_calc_client_vote(kms,
+ crtc, &params, false, core_clk);
+ _sde_core_perf_crtc_update_client_vote(kms, &params, false,
+ bw_sum_of_rt_intfs);
+ }
+
+ SDE_ATRACE_END(__func__);
+}
+
+/**
+ * @sde_core_perf_crtc_release_bw() - request zero bandwidth
+ * @crtc - pointer to a crtc
+ *
+ * Function checks a state variable for the crtc, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc)
+{
+ struct drm_crtc *tmp_crtc;
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *sde_cstate;
+ struct sde_kms *kms;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ kms = _sde_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ SDE_ERROR("invalid kms\n");
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ sde_cstate = to_sde_crtc_state(crtc->state);
+
+ /* only do this for command panel or writeback */
+ if ((sde_crtc_get_intf_mode(crtc) != INTF_MODE_CMD) &&
+ (sde_crtc_get_intf_mode(crtc) != INTF_MODE_WB_LINE))
+ return;
+
+ /*
+ * If video interface present, cmd panel bandwidth cannot be
+ * released.
+ */
+ if (sde_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_sde_core_perf_crtc_is_power_on(tmp_crtc) &&
+ sde_crtc_get_intf_mode(tmp_crtc) ==
+ INTF_MODE_VIDEO)
+ return;
+ }
+
+ /* Release the bandwidth */
+ if (kms->perf.enable_bw_release) {
+ trace_sde_cmd_release_bw(crtc->base.id);
+ sde_crtc->cur_perf.bw_ctl = 0;
+ SDE_DEBUG("Release BW crtc=%d\n", crtc->base.id);
+ _sde_core_perf_crtc_update_bus(kms, crtc, 0);
+ }
+}
+
+static int _sde_core_select_clk_lvl(struct sde_kms *kms,
+ u32 clk_rate)
+{
+ return clk_round_rate(kms->perf.core_clk, clk_rate);
+}
+
+static u32 _sde_core_perf_get_core_clk_rate(struct sde_kms *kms,
+ struct sde_core_perf_params *crct_perf, struct drm_crtc *crtc)
+{
+ u32 clk_rate = 0;
+ struct drm_crtc *tmp_crtc;
+ struct sde_crtc_state *sde_cstate;
+ int ncrtc = 0;
+ u32 tmp_rate;
+
+ drm_for_each_crtc(tmp_crtc, kms->dev) {
+ if (_sde_core_perf_crtc_is_power_on(tmp_crtc)) {
+
+ if (crtc->base.id == tmp_crtc->base.id) {
+ /* for current CRTC, use the cached value */
+ tmp_rate = crct_perf->core_clk_rate;
+ } else {
+ sde_cstate = to_sde_crtc_state(tmp_crtc->state);
+ tmp_rate = sde_cstate->new_perf.core_clk_rate;
+ }
+
+ clk_rate = max(tmp_rate, clk_rate);
+ clk_rate = clk_round_rate(kms->perf.core_clk, clk_rate);
+ }
+ ncrtc++;
+ }
+ clk_rate = _sde_core_select_clk_lvl(kms, clk_rate);
+
+ SDE_DEBUG("clk:%u ncrtc:%d\n", clk_rate, ncrtc);
+
+ return clk_rate;
+}
+
+void sde_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req)
+{
+ struct sde_core_perf_params *new, *old;
+ int update_bus = 0, update_clk = 0;
+ u32 clk_rate = 0;
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *sde_cstate;
+ int ret;
+ struct msm_drm_private *priv;
+ struct sde_kms *kms;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ kms = _sde_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ SDE_ERROR("invalid kms\n");
+ return;
+ }
+ priv = kms->dev->dev_private;
+
+ sde_crtc = to_sde_crtc(crtc);
+ sde_cstate = to_sde_crtc_state(crtc->state);
+
+ SDE_DEBUG("crtc:%d stop_req:%d core_clk:%u\n",
+ crtc->base.id, stop_req, kms->perf.core_clk_rate);
+
+ SDE_ATRACE_BEGIN(__func__);
+
+ /*
+ * cache the performance numbers in the crtc prior to the
+ * crtc kickoff, so the same numbers are used during the
+ * perf update that happens post kickoff.
+ */
+
+ if (params_changed)
+ memcpy(&sde_crtc->new_perf, &sde_cstate->new_perf,
+ sizeof(struct sde_core_perf_params));
+
+ old = &sde_crtc->cur_perf;
+ new = &sde_crtc->new_perf;
+
+ if (_sde_core_perf_crtc_is_power_on(crtc) && !stop_req) {
+ /*
+ * cases for bus bandwidth update.
+ * 1. new bandwidth vote or writeback output vote
+ * are higher than current vote for update request.
+ * 2. new bandwidth vote or writeback output vote are
+ * lower than current vote at end of commit or stop.
+ */
+ if ((params_changed && ((new->bw_ctl > old->bw_ctl))) ||
+ (!params_changed && ((new->bw_ctl < old->bw_ctl)))) {
+ SDE_DEBUG("crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+ crtc->base.id, params_changed, new->bw_ctl,
+ old->bw_ctl);
+ old->bw_ctl = new->bw_ctl;
+ old->max_per_pipe_ib = new->max_per_pipe_ib;
+ update_bus = 1;
+ }
+
+ if ((params_changed &&
+ (new->core_clk_rate > old->core_clk_rate)) ||
+ (!params_changed &&
+ (new->core_clk_rate < old->core_clk_rate))) {
+ old->core_clk_rate = new->core_clk_rate;
+ update_clk = 1;
+ }
+ } else {
+ SDE_DEBUG("crtc=%d disable\n", crtc->base.id);
+ memset(old, 0, sizeof(*old));
+ memset(new, 0, sizeof(*new));
+ update_bus = 1;
+ update_clk = 1;
+ }
+
+ /*
+ * Calculate mdp clock before bandwidth calculation. If traffic shaper
+ * is enabled and clock increased, the bandwidth calculation can
+ * use the new clock for the rotator bw calculation.
+ */
+ if (update_clk)
+ clk_rate = _sde_core_perf_get_core_clk_rate(kms, old, crtc);
+
+ if (update_bus)
+ _sde_core_perf_crtc_update_bus(kms, crtc, clk_rate);
+
+ /*
+ * Update the clock after bandwidth vote to ensure
+ * bandwidth is available before clock rate is increased.
+ */
+ if (update_clk) {
+ SDE_ATRACE_INT(kms->perf.clk_name, clk_rate);
+ SDE_EVT32(kms->dev, stop_req, clk_rate, params_changed,
+ old->core_clk_rate, new->core_clk_rate);
+
+ ret = sde_power_clk_set_rate(&priv->phandle,
+ kms->perf.clk_name, clk_rate);
+ if (ret) {
+ SDE_ERROR("failed to set %s clock rate %u\n",
+ kms->perf.clk_name, clk_rate);
+ goto end;
+ }
+
+ kms->perf.core_clk_rate = clk_rate;
+ SDE_DEBUG("update clk rate = %d HZ\n", clk_rate);
+ }
+
+end:
+ SDE_ATRACE_END(__func__);
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t _sde_core_perf_mode_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_core_perf *perf = file->private_data;
+ struct sde_perf_cfg *cfg = &perf->catalog->perf;
+ int perf_mode = 0;
+ char buf[10];
+
+ if (!perf)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 0, &perf_mode))
+ return -EFAULT;
+
+ if (perf_mode) {
+ /* run the driver with max clk and BW vote */
+ perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
+ perf->perf_tune.min_bus_vote =
+ (u64) cfg->max_bw_high * 1000;
+ } else {
+ /* reset the perf tune params to 0 */
+ perf->perf_tune.min_core_clk = 0;
+ perf->perf_tune.min_bus_vote = 0;
+ }
+ return count;
+}
+
+static ssize_t _sde_core_perf_mode_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct sde_core_perf *perf = file->private_data;
+ int len = 0;
+ char buf[40] = {'\0'};
+
+ if (!perf)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "min_mdp_clk %lu min_bus_vote %llu\n",
+ perf->perf_tune.min_core_clk,
+ perf->perf_tune.min_bus_vote);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static const struct file_operations sde_core_perf_mode_fops = {
+ .open = simple_open,
+ .read = _sde_core_perf_mode_read,
+ .write = _sde_core_perf_mode_write,
+};
+
+static void sde_debugfs_core_perf_destroy(struct sde_core_perf *perf)
+{
+ debugfs_remove_recursive(perf->debugfs_root);
+ perf->debugfs_root = NULL;
+}
+
+static int sde_debugfs_core_perf_init(struct sde_core_perf *perf,
+ struct dentry *parent)
+{
+ struct sde_mdss_cfg *catalog = perf->catalog;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ priv = perf->dev->dev_private;
+ if (!priv || !priv->kms) {
+ SDE_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+
+ perf->debugfs_root = debugfs_create_dir("core_perf", parent);
+ if (!perf->debugfs_root) {
+ SDE_ERROR("failed to create core perf debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_u64("max_core_clk_rate", 0644, perf->debugfs_root,
+ &perf->max_core_clk_rate);
+ debugfs_create_u32("core_clk_rate", 0644, perf->debugfs_root,
+ &perf->core_clk_rate);
+ debugfs_create_u32("enable_bw_release", 0644, perf->debugfs_root,
+ (u32 *)&perf->enable_bw_release);
+ debugfs_create_u32("threshold_low", 0644, perf->debugfs_root,
+ (u32 *)&catalog->perf.max_bw_low);
+ debugfs_create_u32("threshold_high", 0644, perf->debugfs_root,
+ (u32 *)&catalog->perf.max_bw_high);
+ debugfs_create_file("perf_mode", 0644, perf->debugfs_root,
+ (u32 *)perf, &sde_core_perf_mode_fops);
+
+ return 0;
+}
+#else
+static void sde_debugfs_core_perf_destroy(struct sde_core_perf *perf)
+{
+}
+
+static int sde_debugfs_core_perf_init(struct sde_core_perf *perf,
+ struct dentry *parent)
+{
+ return 0;
+}
+#endif
+
+void sde_core_perf_destroy(struct sde_core_perf *perf)
+{
+ if (!perf) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+
+ sde_debugfs_core_perf_destroy(perf);
+ perf->max_core_clk_rate = 0;
+ perf->core_clk = NULL;
+ mutex_destroy(&perf->perf_lock);
+ perf->clk_name = NULL;
+ perf->phandle = NULL;
+ perf->catalog = NULL;
+ perf->dev = NULL;
+}
+
+int sde_core_perf_init(struct sde_core_perf *perf,
+ struct drm_device *dev,
+ struct sde_mdss_cfg *catalog,
+ struct sde_power_handle *phandle,
+ struct sde_power_client *pclient,
+ char *clk_name,
+ struct dentry *debugfs_parent)
+{
+ if (!perf || !catalog || !phandle || !pclient ||
+ !clk_name || !debugfs_parent) {
+ SDE_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ perf->dev = dev;
+ perf->catalog = catalog;
+ perf->phandle = phandle;
+ perf->pclient = pclient;
+ perf->clk_name = clk_name;
+ mutex_init(&perf->perf_lock);
+
+ perf->core_clk = sde_power_clk_get_clk(phandle, clk_name);
+ if (!perf->core_clk) {
+ SDE_ERROR("invalid core clk\n");
+ goto err;
+ }
+
+ perf->max_core_clk_rate = sde_power_clk_get_max_rate(phandle, clk_name);
+ if (!perf->max_core_clk_rate) {
+ SDE_ERROR("invalid max core clk rate\n");
+ goto err;
+ }
+
+ sde_debugfs_core_perf_init(perf, debugfs_parent);
+
+ return 0;
+
+err:
+ sde_core_perf_destroy(perf);
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_core_perf.h b/drivers/gpu/drm/msm/sde/sde_core_perf.h
new file mode 100644
index 000000000000..e5dd9b6e6704
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_core_perf.h
@@ -0,0 +1,124 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_CORE_PERF_H__
+#define __SDE_CORE_PERF_H__
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+#include <linux/mutex.h>
+#include <drm/drm_crtc.h>
+
+#include "sde_hw_catalog.h"
+#include "sde_power_handle.h"
+
+/**
+ * struct sde_core_perf_params - definition of performance parameters
+ * @max_per_pipe_ib: maximum instantaneous bandwidth request
+ * @bw_ctl: arbitrated bandwidth request
+ * @core_clk_rate: core clock rate request
+ */
+struct sde_core_perf_params {
+ u64 max_per_pipe_ib;
+ u64 bw_ctl;
+ u32 core_clk_rate;
+};
+
+/**
+ * struct sde_core_perf_tune - definition of performance tuning control
+ * @min_core_clk: minimum core clock
+ * @min_bus_vote: minimum bus vote
+ */
+struct sde_core_perf_tune {
+ unsigned long min_core_clk;
+ u64 min_bus_vote;
+};
+
+/**
+ * struct sde_core_perf - definition of core performance context
+ * @dev: Pointer to drm device
+ * @debugfs_root: top level debug folder
+ * @perf_lock: serialization lock for this context
+ * @catalog: Pointer to catalog configuration
+ * @phandle: Pointer to power handler
+ * @pclient: Pointer to power client
+ * @clk_name: core clock name
+ * @core_clk: Pointer to core clock structure
+ * @core_clk_rate: current core clock rate
+ * @max_core_clk_rate: maximum allowable core clock rate
+ * @perf_tune: debug control for performance tuning
+ * @enable_bw_release: debug control for bandwidth release
+ */
+struct sde_core_perf {
+ struct drm_device *dev;
+ struct dentry *debugfs_root;
+ struct mutex perf_lock;
+ struct sde_mdss_cfg *catalog;
+ struct sde_power_handle *phandle;
+ struct sde_power_client *pclient;
+ char *clk_name;
+ struct clk *core_clk;
+ u32 core_clk_rate;
+ u64 max_core_clk_rate;
+ struct sde_core_perf_tune perf_tune;
+ u32 enable_bw_release;
+};
+
+/**
+ * sde_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
+int sde_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+/**
+ * sde_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * @stop_req: true if this is a stop request
+ */
+void sde_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req);
+
+/**
+ * sde_core_perf_crtc_release_bw - release bandwidth of the given crtc
+ * @crtc: Pointer to crtc
+ */
+void sde_core_perf_crtc_release_bw(struct drm_crtc *crtc);
+
+/**
+ * sde_core_perf_destroy - destroy the given core performance context
+ * @perf: Pointer to core performance context
+ */
+void sde_core_perf_destroy(struct sde_core_perf *perf);
+
+/**
+ * sde_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @dev: Pointer to drm device
+ * @catalog: Pointer to catalog
+ * @phandle: Pointer to power handle
+ * @pclient: Pointer to power client
+ * @clk_name: core clock name
+ * @debugfs_parent: Pointer to parent debugfs
+ */
+int sde_core_perf_init(struct sde_core_perf *perf,
+ struct drm_device *dev,
+ struct sde_mdss_cfg *catalog,
+ struct sde_power_handle *phandle,
+ struct sde_power_client *pclient,
+ char *clk_name,
+ struct dentry *debugfs_parent);
+
+#endif /* __SDE_CORE_PERF_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.c b/drivers/gpu/drm/msm/sde/sde_crtc.c
new file mode 100644
index 000000000000..3ad884b2dbf7
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.c
@@ -0,0 +1,2167 @@
+/*
+ * Copyright (c) 2014-2019 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/sort.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <uapi/drm/sde_drm.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
+
+#include "sde_kms.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_ctl.h"
+#include "sde_crtc.h"
+#include "sde_plane.h"
+#include "sde_color_processing.h"
+#include "sde_encoder.h"
+#include "sde_connector.h"
+#include "sde_power_handle.h"
+#include "sde_core_perf.h"
+#include "sde_trace.h"
+
+/* default input fence timeout, in ms */
+#define SDE_CRTC_INPUT_FENCE_TIMEOUT 10000
+
+/*
+ * The default input fence timeout is 10 seconds while max allowed
+ * range is 10 seconds. Any value above 10 seconds adds glitches beyond
+ * tolerance limit.
+ */
+#define SDE_CRTC_MAX_INPUT_FENCE_TIMEOUT 10000
+
+/* layer mixer index on sde_crtc */
+#define LEFT_MIXER 0
+#define RIGHT_MIXER 1
+
+/* indicates pending page flip events */
+#define PENDING_FLIP 0x2
+
+static inline struct sde_kms *_sde_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ SDE_ERROR("invalid crtc\n");
+ return NULL;
+ }
+ priv = crtc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ return to_sde_kms(priv->kms);
+}
+
+static void sde_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+
+ SDE_DEBUG("\n");
+
+ if (!crtc)
+ return;
+
+ if (sde_crtc->blob_info)
+ drm_property_unreference_blob(sde_crtc->blob_info);
+ msm_property_destroy(&sde_crtc->property_info);
+ sde_cp_crtc_destroy_properties(crtc);
+
+ debugfs_remove_recursive(sde_crtc->debugfs_root);
+ sde_fence_deinit(&sde_crtc->output_fence);
+
+ drm_crtc_cleanup(crtc);
+ mutex_destroy(&sde_crtc->crtc_lock);
+ kfree(sde_crtc);
+}
+
+static bool sde_crtc_mode_fixup(struct drm_crtc *crtc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ SDE_DEBUG("\n");
+
+ if (msm_is_mode_seamless(adjusted_mode) &&
+ (!crtc->enabled || crtc->state->active_changed)) {
+ SDE_ERROR("crtc state prevents seamless transition\n");
+ return false;
+ }
+
+ return true;
+}
+
+static void _sde_crtc_setup_blend_cfg(struct sde_crtc_mixer *mixer,
+ struct sde_plane_state *pstate, struct sde_format *format)
+{
+ uint32_t blend_op, fg_alpha, bg_alpha;
+ uint32_t blend_type;
+ struct sde_hw_mixer *lm = mixer->hw_lm;
+
+ /* default to opaque blending */
+ fg_alpha = sde_plane_get_property(pstate, PLANE_PROP_ALPHA);
+ bg_alpha = 0xFF - fg_alpha;
+ blend_op = SDE_BLEND_FG_ALPHA_FG_CONST | SDE_BLEND_BG_ALPHA_BG_CONST;
+ blend_type = sde_plane_get_property(pstate, PLANE_PROP_BLEND_OP);
+
+ SDE_DEBUG("blend type:0x%x blend alpha:0x%x\n", blend_type, fg_alpha);
+
+ switch (blend_type) {
+
+ case SDE_DRM_BLEND_OP_OPAQUE:
+ blend_op = SDE_BLEND_FG_ALPHA_FG_CONST |
+ SDE_BLEND_BG_ALPHA_BG_CONST;
+ break;
+
+ case SDE_DRM_BLEND_OP_PREMULTIPLIED:
+ if (format->alpha_enable) {
+ blend_op = SDE_BLEND_FG_ALPHA_FG_CONST |
+ SDE_BLEND_BG_ALPHA_FG_PIXEL;
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |= SDE_BLEND_BG_MOD_ALPHA |
+ SDE_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= SDE_BLEND_BG_INV_ALPHA;
+ }
+ }
+ break;
+
+ case SDE_DRM_BLEND_OP_COVERAGE:
+ if (format->alpha_enable) {
+ blend_op = SDE_BLEND_FG_ALPHA_FG_PIXEL |
+ SDE_BLEND_BG_ALPHA_FG_PIXEL;
+ if (fg_alpha != 0xff) {
+ bg_alpha = fg_alpha;
+ blend_op |= SDE_BLEND_FG_MOD_ALPHA |
+ SDE_BLEND_FG_INV_MOD_ALPHA |
+ SDE_BLEND_BG_MOD_ALPHA |
+ SDE_BLEND_BG_INV_MOD_ALPHA;
+ } else {
+ blend_op |= SDE_BLEND_BG_INV_ALPHA;
+ }
+ }
+ break;
+ default:
+ /* do nothing */
+ break;
+ }
+
+ lm->ops.setup_blend_config(lm, pstate->stage, fg_alpha,
+ bg_alpha, blend_op);
+ SDE_DEBUG("format 0x%x, alpha_enable %u fg alpha:0x%x bg alpha:0x%x \"\
+ blend_op:0x%x\n", format->base.pixel_format,
+ format->alpha_enable, fg_alpha, bg_alpha, blend_op);
+}
+
+static void _sde_crtc_blend_setup_mixer(struct drm_crtc *crtc,
+ struct sde_crtc *sde_crtc, struct sde_crtc_mixer *mixer)
+{
+ struct drm_plane *plane;
+
+ struct sde_plane_state *pstate = NULL;
+ struct sde_format *format;
+ struct sde_hw_ctl *ctl = mixer->hw_ctl;
+ struct sde_hw_stage_cfg *stage_cfg = &sde_crtc->stage_cfg;
+ struct sde_crtc_state *cstate;
+
+ u32 flush_mask = 0, crtc_split_width;
+ uint32_t lm_idx = LEFT_MIXER, idx;
+ bool bg_alpha_enable[CRTC_DUAL_MIXERS] = {false};
+ bool lm_right = false;
+ int left_crtc_zpos_cnt[SDE_STAGE_MAX + 1] = {0};
+ int right_crtc_zpos_cnt[SDE_STAGE_MAX + 1] = {0};
+
+ crtc_split_width = get_crtc_split_width(crtc);
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+
+ pstate = to_sde_plane_state(plane->state);
+ cstate = to_sde_crtc_state(crtc->state);
+
+ /* shared dual mixer mode will always enable both LM */
+ if (cstate->is_shared &&
+ sde_crtc->num_mixers == CRTC_DUAL_MIXERS) {
+ lm_idx = LEFT_MIXER;
+ idx = left_crtc_zpos_cnt[pstate->stage]++;
+ lm_right = true;
+ } else {
+ /* always stage plane on either left or right lm */
+ if (plane->state->crtc_x >= crtc_split_width) {
+ lm_idx = RIGHT_MIXER;
+ idx = right_crtc_zpos_cnt[pstate->stage]++;
+ } else {
+ lm_idx = LEFT_MIXER;
+ idx = left_crtc_zpos_cnt[pstate->stage]++;
+ }
+
+ /* stage plane on right LM if it crosses the
+ * boundary.
+ */
+ lm_right = (lm_idx == LEFT_MIXER) &&
+ (plane->state->crtc_x + plane->state->crtc_w >
+ crtc_split_width);
+ }
+
+ /*
+ * program each mixer with two hw pipes in dual mixer mode,
+ */
+ if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS && lm_right) {
+ stage_cfg->stage[LEFT_MIXER][pstate->stage][1] =
+ sde_plane_pipe(plane, 1);
+
+ flush_mask = ctl->ops.get_bitmask_sspp(ctl,
+ sde_plane_pipe(plane, 1));
+ }
+
+ flush_mask |= ctl->ops.get_bitmask_sspp(ctl,
+ sde_plane_pipe(plane, lm_idx ? 1 : 0));
+
+
+
+ stage_cfg->stage[lm_idx][pstate->stage][idx] =
+ sde_plane_pipe(plane, lm_idx ? 1 : 0);
+
+ mixer[lm_idx].flush_mask |= flush_mask;
+
+ SDE_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
+ crtc->base.id,
+ pstate->stage,
+ plane->base.id,
+ sde_plane_pipe(plane,
+ lm_idx ? 1 : 0) - SSPP_VIG0,
+ plane->state->fb ?
+ plane->state->fb->base.id : -1);
+
+ format = to_sde_format(msm_framebuffer_format(pstate->base.fb));
+ if (!format) {
+ SDE_ERROR("%s: get sde format failed\n", __func__);
+ return;
+ }
+
+ /* blend config update */
+ if (pstate->stage != SDE_STAGE_BASE) {
+ _sde_crtc_setup_blend_cfg(mixer + lm_idx, pstate,
+ format);
+
+ if (bg_alpha_enable[lm_idx] && !format->alpha_enable)
+ mixer[lm_idx].mixer_op_mode = 0;
+ else
+ mixer[lm_idx].mixer_op_mode |=
+ 1 << pstate->stage;
+ } else if (format->alpha_enable) {
+ bg_alpha_enable[lm_idx] = true;
+ }
+
+ if (lm_right) {
+ idx = right_crtc_zpos_cnt[pstate->stage]++;
+
+ /*
+ * program each mixer with two hw pipes
+ in dual mixer mode,
+ */
+ if (sde_crtc->num_mixers == CRTC_DUAL_MIXERS) {
+ stage_cfg->stage[RIGHT_MIXER][pstate->stage][1]
+ = sde_plane_pipe(plane, 0);
+ }
+
+ stage_cfg->stage[RIGHT_MIXER][pstate->stage][idx]
+ = sde_plane_pipe(plane, 1);
+
+ mixer[RIGHT_MIXER].flush_mask |= flush_mask;
+
+ /* blend config update */
+ if (pstate->stage != SDE_STAGE_BASE) {
+ _sde_crtc_setup_blend_cfg(mixer + RIGHT_MIXER,
+ pstate, format);
+
+ if (bg_alpha_enable[RIGHT_MIXER] &&
+ !format->alpha_enable)
+ mixer[RIGHT_MIXER].mixer_op_mode = 0;
+ else
+ mixer[RIGHT_MIXER].mixer_op_mode |=
+ 1 << pstate->stage;
+ } else if (format->alpha_enable) {
+ bg_alpha_enable[RIGHT_MIXER] = true;
+ }
+ }
+ }
+}
+
+/**
+ * _sde_crtc_blend_setup - configure crtc mixers
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _sde_crtc_blend_setup(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct sde_crtc_mixer *mixer = sde_crtc->mixers;
+ struct sde_hw_ctl *ctl;
+ struct sde_hw_mixer *lm;
+ struct sde_splash_info *sinfo;
+ struct sde_kms *sde_kms = _sde_crtc_get_kms(crtc);
+ bool splash_enabled = false;
+ u32 mixer_mask = 0, mixer_ext_mask = 0;
+
+ int i;
+
+ SDE_DEBUG("%s\n", sde_crtc->name);
+
+ if (sde_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+ SDE_ERROR("invalid number mixers: %d\n", sde_crtc->num_mixers);
+ return;
+ }
+
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ }
+
+ sinfo = &sde_kms->splash_info;
+ if (!sinfo) {
+ SDE_ERROR("invalid splash info\n");
+ return;
+ }
+
+ sde_splash_get_mixer_mask(sinfo, &splash_enabled,
+ &mixer_mask, &mixer_ext_mask);
+
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
+ SDE_ERROR("invalid lm or ctl assigned to mixer\n");
+ return;
+ }
+ mixer[i].mixer_op_mode = 0;
+ mixer[i].flush_mask = 0;
+ if (mixer[i].hw_ctl->ops.clear_all_blendstages)
+ mixer[i].hw_ctl->ops.clear_all_blendstages(
+ mixer[i].hw_ctl, splash_enabled,
+ mixer_mask, mixer_ext_mask);
+ }
+
+ /* initialize stage cfg */
+ memset(&sde_crtc->stage_cfg, 0, sizeof(struct sde_hw_stage_cfg));
+
+ _sde_crtc_blend_setup_mixer(crtc, sde_crtc, mixer);
+
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ ctl = mixer[i].hw_ctl;
+ lm = mixer[i].hw_lm;
+
+ lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
+
+ mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
+ mixer[i].hw_lm->idx);
+
+ /* stage config flush mask */
+ ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+
+ SDE_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+ mixer[i].hw_lm->idx - LM_0,
+ mixer[i].mixer_op_mode,
+ ctl->idx - CTL_0,
+ mixer[i].flush_mask);
+
+ ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+ &sde_crtc->stage_cfg, i,
+ splash_enabled, mixer_mask, mixer_ext_mask);
+ }
+}
+
+void sde_crtc_prepare_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+ struct drm_connector *conn;
+
+ if (!crtc || !crtc->state) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(crtc->state);
+ SDE_EVT32(DRMID(crtc));
+
+ /* identify connectors attached to this crtc */
+ cstate->is_rt = false;
+ cstate->num_connectors = 0;
+
+ drm_for_each_connector(conn, crtc->dev)
+ if (conn->state && conn->state->crtc == crtc &&
+ cstate->num_connectors < MAX_CONNECTORS) {
+ cstate->connectors[cstate->num_connectors++] = conn;
+ sde_connector_prepare_fence(conn);
+
+ if (conn->connector_type != DRM_MODE_CONNECTOR_VIRTUAL)
+ cstate->is_rt = true;
+ }
+
+ /* prepare main output fence */
+ sde_fence_prepare(&sde_crtc->output_fence);
+}
+
+bool sde_crtc_is_rt(struct drm_crtc *crtc)
+{
+ if (!crtc || !crtc->state) {
+ SDE_ERROR("invalid crtc or state\n");
+ return true;
+ }
+ return to_sde_crtc_state(crtc->state)->is_rt;
+}
+
+/**
+ * _sde_crtc_complete_flip - signal pending page_flip events
+ * Any pending vblank events are added to the vblank_event_list
+ * so that the next vblank interrupt shall signal them.
+ * However PAGE_FLIP events are not handled through the vblank_event_list.
+ * This API signals any pending PAGE_FLIP events requested through
+ * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the sde_crtc->event.
+ * if file!=NULL, this is preclose potential cancel-flip path
+ * @crtc: Pointer to drm crtc structure
+ * @file: Pointer to drm file
+ */
+static void _sde_crtc_complete_flip(struct drm_crtc *crtc,
+ struct drm_file *file)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ struct drm_pending_vblank_event *event;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ event = sde_crtc->event;
+ if (event) {
+ /* if regular vblank case (!file) or if cancel-flip from
+ * preclose on file that requested flip, then send the
+ * event:
+ */
+ if (!file || (event->base.file_priv == file)) {
+ sde_crtc->event = NULL;
+ DRM_DEBUG_VBL("%s: send event: %pK\n",
+ sde_crtc->name, event);
+ SDE_EVT32(DRMID(crtc));
+ drm_crtc_send_vblank_event(crtc, event);
+ }
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ if (!crtc || !crtc->dev) {
+ SDE_ERROR("invalid crtc\n");
+ return INTF_MODE_NONE;
+ }
+
+ drm_for_each_encoder(encoder, crtc->dev)
+ if (encoder->crtc == crtc)
+ return sde_encoder_get_intf_mode(encoder);
+
+ return INTF_MODE_NONE;
+}
+
+static void sde_crtc_vblank_cb(void *data)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ unsigned pending;
+
+ pending = atomic_xchg(&sde_crtc->pending, 0);
+ /* keep statistics on vblank callback - with auto reset via debugfs */
+ if (ktime_equal(sde_crtc->vblank_cb_time, ktime_set(0, 0)))
+ sde_crtc->vblank_cb_time = ktime_get();
+ else
+ sde_crtc->vblank_cb_count++;
+
+ if (pending & PENDING_FLIP)
+ _sde_crtc_complete_flip(crtc, NULL);
+
+ drm_crtc_handle_vblank(crtc);
+ DRM_DEBUG_VBL("crtc%d\n", crtc->base.id);
+ SDE_EVT32_IRQ(DRMID(crtc));
+}
+
+static void sde_crtc_frame_event_work(struct kthread_work *work)
+{
+ struct msm_drm_private *priv;
+ struct sde_crtc_frame_event *fevent;
+ struct drm_crtc *crtc;
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+ struct sde_kms *sde_kms;
+ unsigned long flags;
+
+ if (!work) {
+ SDE_ERROR("invalid work handle\n");
+ return;
+ }
+
+ fevent = container_of(work, struct sde_crtc_frame_event, work);
+ if (!fevent->crtc || !fevent->crtc->state) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ crtc = fevent->crtc;
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(crtc->state);
+
+ sde_kms = _sde_crtc_get_kms(crtc);
+ if (!sde_kms) {
+ SDE_ERROR("invalid kms handle\n");
+ return;
+ }
+ priv = sde_kms->dev->dev_private;
+
+ SDE_DEBUG("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
+ ktime_to_ns(fevent->ts));
+
+ if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE ||
+ fevent->event == SDE_ENCODER_FRAME_EVENT_ERROR) {
+
+ if (atomic_read(&sde_crtc->frame_pending) < 1) {
+ /* this should not happen */
+ SDE_ERROR("crtc%d ts:%lld invalid frame_pending:%d\n",
+ crtc->base.id,
+ ktime_to_ns(fevent->ts),
+ atomic_read(&sde_crtc->frame_pending));
+ SDE_EVT32(DRMID(crtc), fevent->event, 0);
+ } else if (atomic_dec_return(&sde_crtc->frame_pending) == 0) {
+ /* release bandwidth and other resources */
+ SDE_DEBUG("crtc%d ts:%lld last pending\n",
+ crtc->base.id,
+ ktime_to_ns(fevent->ts));
+ SDE_EVT32(DRMID(crtc), fevent->event, 1);
+ sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
+ sde_kms->core_client, false);
+ sde_core_perf_crtc_release_bw(crtc);
+ } else {
+ SDE_EVT32(DRMID(crtc), fevent->event, 2);
+ }
+
+ if (fevent->event == SDE_ENCODER_FRAME_EVENT_DONE)
+ sde_core_perf_crtc_update(crtc, 0, false);
+ } else {
+ SDE_ERROR("crtc%d ts:%lld unknown event %u\n", crtc->base.id,
+ ktime_to_ns(fevent->ts),
+ fevent->event);
+ SDE_EVT32(DRMID(crtc), fevent->event, 3);
+ }
+
+ spin_lock_irqsave(&sde_crtc->spin_lock, flags);
+ list_add_tail(&fevent->list, &sde_crtc->frame_event_list);
+ spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+}
+
+static void sde_crtc_frame_event_cb(void *data, u32 event)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct sde_crtc *sde_crtc;
+ struct msm_drm_private *priv;
+ struct sde_crtc_frame_event *fevent;
+ unsigned long flags;
+ int pipe_id;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+ sde_crtc = to_sde_crtc(crtc);
+ priv = crtc->dev->dev_private;
+ pipe_id = drm_crtc_index(crtc);
+
+ SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+ SDE_EVT32(DRMID(crtc), event);
+
+ spin_lock_irqsave(&sde_crtc->spin_lock, flags);
+ fevent = list_first_entry_or_null(&sde_crtc->frame_event_list,
+ struct sde_crtc_frame_event, list);
+ if (fevent)
+ list_del_init(&fevent->list);
+ spin_unlock_irqrestore(&sde_crtc->spin_lock, flags);
+
+ if (!fevent) {
+ SDE_ERROR("crtc%d event %d overflow\n",
+ crtc->base.id, event);
+ SDE_EVT32(DRMID(crtc), event);
+ return;
+ }
+
+ fevent->event = event;
+ fevent->crtc = crtc;
+ fevent->ts = ktime_get();
+ queue_kthread_work(&priv->disp_thread[pipe_id].worker, &fevent->work);
+}
+
+/**
+ * sde_crtc_request_flip_cb - callback to request page_flip events
+ * Once the HW flush is complete , userspace must be notified of
+ * PAGE_FLIP completed event in the next vblank event.
+ * Using this callback, a hint is set to signal any callers waiting
+ * for a PAGE_FLIP complete event.
+ * This is called within the enc_spinlock.
+ * @data: Pointer to cb private data
+ */
+static void sde_crtc_request_flip_cb(void *data)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct sde_crtc *sde_crtc;
+
+ if (!crtc) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+ sde_crtc = to_sde_crtc(crtc);
+ atomic_or(PENDING_FLIP, &sde_crtc->pending);
+}
+
+void sde_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+ int i;
+
+ if (!crtc || !crtc->state) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(crtc->state);
+ SDE_EVT32(DRMID(crtc));
+
+ /* signal output fence(s) at end of commit */
+ sde_fence_signal(&sde_crtc->output_fence, 0);
+
+ for (i = 0; i < cstate->num_connectors; ++i)
+ sde_connector_complete_commit(cstate->connectors[i]);
+}
+
+/**
+ * _sde_crtc_set_input_fence_timeout - update ns version of in fence timeout
+ * @cstate: Pointer to sde crtc state
+ */
+static void _sde_crtc_set_input_fence_timeout(struct sde_crtc_state *cstate)
+{
+ if (!cstate) {
+ SDE_ERROR("invalid cstate\n");
+ return;
+ }
+ cstate->input_fence_timeout_ns =
+ sde_crtc_get_property(cstate, CRTC_PROP_INPUT_FENCE_TIMEOUT);
+ cstate->input_fence_timeout_ns *= NSEC_PER_MSEC;
+}
+
+/**
+ * _sde_crtc_wait_for_fences - wait for incoming framebuffer sync fences
+ * @crtc: Pointer to CRTC object
+ */
+static void _sde_crtc_wait_for_fences(struct drm_crtc *crtc)
+{
+ struct drm_plane *plane = NULL;
+ uint32_t wait_ms = 1;
+ ktime_t kt_end, kt_wait;
+
+ SDE_DEBUG("\n");
+
+ if (!crtc || !crtc->state) {
+ SDE_ERROR("invalid crtc/state %pK\n", crtc);
+ return;
+ }
+
+ /* use monotonic timer to limit total fence wait time */
+ kt_end = ktime_add_ns(ktime_get(),
+ to_sde_crtc_state(crtc->state)->input_fence_timeout_ns);
+
+ /*
+ * Wait for fences sequentially, as all of them need to be signalled
+ * before we can proceed.
+ *
+ * Limit total wait time to INPUT_FENCE_TIMEOUT, but still call
+ * sde_plane_wait_input_fence with wait_ms == 0 after the timeout so
+ * that each plane can check its fence status and react appropriately
+ * if its fence has timed out.
+ */
+ SDE_ATRACE_BEGIN("plane_wait_input_fence");
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ if (wait_ms) {
+ /* determine updated wait time */
+ kt_wait = ktime_sub(kt_end, ktime_get());
+ if (ktime_compare(kt_wait, ktime_set(0, 0)) >= 0)
+ wait_ms = ktime_to_ms(kt_wait);
+ else
+ wait_ms = 0;
+ }
+ sde_plane_wait_input_fence(plane, wait_ms);
+ }
+ SDE_ATRACE_END("plane_wait_input_fence");
+}
+
+static void _sde_crtc_setup_mixer_for_encoder(
+ struct drm_crtc *crtc,
+ struct drm_encoder *enc)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct sde_kms *sde_kms = _sde_crtc_get_kms(crtc);
+ struct sde_rm *rm = &sde_kms->rm;
+ struct sde_crtc_mixer *mixer;
+ struct sde_hw_ctl *last_valid_ctl = NULL;
+ int i;
+ struct sde_rm_hw_iter lm_iter, ctl_iter, dspp_iter;
+
+ sde_rm_init_hw_iter(&lm_iter, enc->base.id, SDE_HW_BLK_LM);
+ sde_rm_init_hw_iter(&ctl_iter, enc->base.id, SDE_HW_BLK_CTL);
+ sde_rm_init_hw_iter(&dspp_iter, enc->base.id, SDE_HW_BLK_DSPP);
+
+ /* Set up all the mixers and ctls reserved by this encoder */
+ for (i = sde_crtc->num_mixers; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
+ mixer = &sde_crtc->mixers[i];
+
+ if (!sde_rm_get_hw(rm, &lm_iter))
+ break;
+ mixer->hw_lm = (struct sde_hw_mixer *)lm_iter.hw;
+
+ /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
+ if (!sde_rm_get_hw(rm, &ctl_iter)) {
+ SDE_DEBUG("no ctl assigned to lm %d, using previous\n",
+ mixer->hw_lm->idx - LM_0);
+ mixer->hw_ctl = last_valid_ctl;
+ } else {
+ mixer->hw_ctl = (struct sde_hw_ctl *)ctl_iter.hw;
+ last_valid_ctl = mixer->hw_ctl;
+ }
+
+ /* Shouldn't happen, mixers are always >= ctls */
+ if (!mixer->hw_ctl) {
+ SDE_ERROR("no valid ctls found for lm %d\n",
+ mixer->hw_lm->idx - LM_0);
+ return;
+ }
+
+ /* Dspp may be null */
+ (void) sde_rm_get_hw(rm, &dspp_iter);
+ mixer->hw_dspp = (struct sde_hw_dspp *)dspp_iter.hw;
+
+ mixer->encoder = enc;
+
+ sde_crtc->num_mixers++;
+ SDE_DEBUG("setup mixer %d: lm %d\n",
+ i, mixer->hw_lm->idx - LM_0);
+ SDE_DEBUG("setup mixer %d: ctl %d\n",
+ i, mixer->hw_ctl->idx - CTL_0);
+ }
+}
+
+static void _sde_crtc_setup_mixers(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct drm_encoder *enc;
+
+ sde_crtc->num_mixers = 0;
+ memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
+
+ mutex_lock(&sde_crtc->crtc_lock);
+ /* Check for mixers on all encoders attached to this crtc */
+ list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ _sde_crtc_setup_mixer_for_encoder(crtc, enc);
+ }
+ mutex_unlock(&sde_crtc->crtc_lock);
+}
+
+static void _sde_crtc_setup_is_shared(struct drm_crtc_state *state)
+{
+ struct sde_crtc_state *cstate;
+
+ cstate = to_sde_crtc_state(state);
+
+ cstate->is_shared = false;
+ if (cstate->num_connectors) {
+ struct drm_connector *conn = cstate->connectors[0];
+ struct sde_connector *sde_conn = to_sde_connector(conn);
+
+ if (sde_conn->is_shared) {
+ cstate->is_shared = true;
+ cstate->shared_roi = sde_conn->shared_roi;
+ }
+ }
+}
+
+static void sde_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct sde_crtc *sde_crtc;
+ struct drm_device *dev;
+ u32 i;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ SDE_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+ sde_crtc = to_sde_crtc(crtc);
+ dev = crtc->dev;
+
+ if (!sde_crtc->num_mixers) {
+ _sde_crtc_setup_is_shared(crtc->state);
+ _sde_crtc_setup_mixers(crtc);
+ }
+
+ /* Reset flush mask from previous commit */
+ for (i = 0; i < ARRAY_SIZE(sde_crtc->mixers); i++) {
+ struct sde_hw_ctl *ctl = sde_crtc->mixers[i].hw_ctl;
+
+ if (ctl)
+ ctl->ops.clear_pending_flush(ctl);
+ }
+
+ /*
+ * If no mixers have been allocated in sde_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!sde_crtc->num_mixers))
+ return;
+
+ _sde_crtc_blend_setup(crtc);
+ sde_cp_crtc_apply_properties(crtc);
+
+ /*
+ * PP_DONE irq is only used by command mode for now.
+ * It is better to request pending before FLUSH and START trigger
+ * to make sure no pp_done irq missed.
+ * This is safe because no pp_done will happen before SW trigger
+ * in command mode.
+ */
+}
+
+static void sde_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct sde_crtc *sde_crtc;
+ struct drm_device *dev;
+ struct drm_plane *plane;
+ unsigned long flags;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ SDE_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+ sde_crtc = to_sde_crtc(crtc);
+
+ dev = crtc->dev;
+
+ if (sde_crtc->event) {
+ SDE_ERROR("%s already received sde_crtc->event\n",
+ sde_crtc->name);
+ } else {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ sde_crtc->event = crtc->state->event;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ /*
+ * If no mixers has been allocated in sde_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!sde_crtc->num_mixers))
+ return;
+
+ /* wait for acquire fences before anything else is done */
+ _sde_crtc_wait_for_fences(crtc);
+
+ /* update performance setting before crtc kickoff */
+ sde_core_perf_crtc_update(crtc, 1, false);
+
+ /*
+ * Final plane updates: Give each plane a chance to complete all
+ * required writes/flushing before crtc's "flush
+ * everything" call below.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc)
+ sde_plane_flush(plane);
+
+ /* Kickoff will be scheduled by outer layer */
+}
+
+/**
+ * sde_crtc_destroy_state - state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ */
+static void sde_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+
+ if (!crtc || !state) {
+ SDE_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(state);
+
+ SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+ __drm_atomic_helper_crtc_destroy_state(crtc, state);
+
+ /* destroy value helper */
+ msm_property_destroy_state(&sde_crtc->property_info, cstate,
+ cstate->property_values, cstate->property_blobs);
+}
+
+void sde_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ struct sde_crtc *sde_crtc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!crtc) {
+ SDE_ERROR("invalid argument\n");
+ return;
+ }
+ dev = crtc->dev;
+ sde_crtc = to_sde_crtc(crtc);
+ sde_kms = _sde_crtc_get_kms(crtc);
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ }
+
+ priv = sde_kms->dev->dev_private;
+
+ /*
+ * If no mixers has been allocated in sde_crtc_atomic_check(),
+ * it means we are trying to start a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!sde_crtc->num_mixers))
+ return;
+
+
+ SDE_ATRACE_BEGIN("crtc_commit");
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ /*
+ * Encoder will flush/start now, unless it has a tx pending.
+ * If so, it may delay and flush at an irq event (e.g. ppdone)
+ */
+ sde_encoder_prepare_for_kickoff(encoder);
+ }
+
+ if (atomic_read(&sde_crtc->frame_pending) > 2) {
+ /* framework allows only 1 outstanding + current */
+ SDE_ERROR("crtc%d invalid frame pending\n",
+ crtc->base.id);
+ SDE_EVT32(DRMID(crtc), 0);
+ goto end;
+ } else if (atomic_inc_return(&sde_crtc->frame_pending) == 1) {
+ /* acquire bandwidth and other resources */
+ SDE_DEBUG("crtc%d first commit\n", crtc->base.id);
+ SDE_EVT32(DRMID(crtc), 1);
+ sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
+ sde_kms->core_client, true);
+ } else {
+ SDE_DEBUG("crtc%d commit\n", crtc->base.id);
+ SDE_EVT32(DRMID(crtc), 2);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ sde_encoder_kickoff(encoder);
+ }
+end:
+ SDE_ATRACE_END("crtc_commit");
+ return;
+}
+
+/**
+ * _sde_crtc_vblank_enable_nolock - update power resource and vblank request
+ * @sde_crtc: Pointer to sde crtc structure
+ * @enable: Whether to enable/disable vblanks
+ *
+ * @Return: error code
+ */
+static int _sde_crtc_vblank_enable_no_lock(
+ struct sde_crtc *sde_crtc, bool enable)
+{
+ struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct drm_encoder *enc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ int ret = 0;
+
+ if (!sde_crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ crtc = &sde_crtc->base;
+ dev = crtc->dev;
+ priv = dev->dev_private;
+
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+ sde_kms = to_sde_kms(priv->kms);
+
+ if (enable) {
+ ret = sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, true);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ SDE_EVT32(DRMID(crtc), DRMID(enc), enable);
+
+ sde_encoder_register_vblank_callback(enc,
+ sde_crtc_vblank_cb, (void *)crtc);
+ }
+ } else {
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ SDE_EVT32(DRMID(crtc), DRMID(enc), enable);
+
+ sde_encoder_register_vblank_callback(enc, NULL, NULL);
+ }
+ ret = sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, false);
+ }
+
+ return ret;
+}
+
+/**
+ * _sde_crtc_set_suspend - notify crtc of suspend enable/disable
+ * @crtc: Pointer to drm crtc object
+ * @enable: true to enable suspend, false to indicate resume
+ */
+static void _sde_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+{
+ struct sde_crtc *sde_crtc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+ sde_crtc = to_sde_crtc(crtc);
+ priv = crtc->dev->dev_private;
+
+ if (!priv->kms) {
+ SDE_ERROR("invalid crtc kms\n");
+ return;
+ }
+ sde_kms = to_sde_kms(priv->kms);
+
+ SDE_DEBUG("crtc%d suspend = %d\n", crtc->base.id, enable);
+
+ mutex_lock(&sde_crtc->crtc_lock);
+
+ /*
+ * Update CP on suspend/resume transitions
+ */
+ if (enable && !sde_crtc->suspend)
+ sde_cp_crtc_suspend(crtc);
+ else if (!enable && sde_crtc->suspend)
+ sde_cp_crtc_resume(crtc);
+
+ /*
+ * If the vblank refcount != 0, release a power reference on suspend
+ * and take it back during resume (if it is still != 0).
+ */
+ if (sde_crtc->suspend == enable)
+ SDE_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
+ crtc->base.id, enable);
+ else if (sde_crtc->enabled && sde_crtc->vblank_requested)
+ _sde_crtc_vblank_enable_no_lock(sde_crtc, !enable);
+
+ sde_crtc->suspend = enable;
+
+ mutex_unlock(&sde_crtc->crtc_lock);
+}
+
+/**
+ * sde_crtc_duplicate_state - state duplicate hook
+ * @crtc: Pointer to drm crtc structure
+ * @Returns: Pointer to new drm_crtc_state structure
+ */
+static struct drm_crtc_state *sde_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate, *old_cstate;
+
+ if (!crtc || !crtc->state) {
+ SDE_ERROR("invalid argument(s)\n");
+ return NULL;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ old_cstate = to_sde_crtc_state(crtc->state);
+ cstate = msm_property_alloc_state(&sde_crtc->property_info);
+ if (!cstate) {
+ SDE_ERROR("failed to allocate state\n");
+ return NULL;
+ }
+
+ /* duplicate value helper */
+ msm_property_duplicate_state(&sde_crtc->property_info,
+ old_cstate, cstate,
+ cstate->property_values, cstate->property_blobs);
+
+ /* duplicate base helper */
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
+
+ return &cstate->base;
+}
+
+/**
+ * sde_crtc_reset - reset hook for CRTCs
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void sde_crtc_reset(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ /* revert suspend actions, if necessary */
+ if (msm_is_suspend_state(crtc->dev))
+ _sde_crtc_set_suspend(crtc, false);
+
+ /* remove previous state, if present */
+ if (crtc->state) {
+ sde_crtc_destroy_state(crtc, crtc->state);
+ crtc->state = 0;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = msm_property_alloc_state(&sde_crtc->property_info);
+ if (!cstate) {
+ SDE_ERROR("failed to allocate state\n");
+ return;
+ }
+
+ /* reset value helper */
+ msm_property_reset_state(&sde_crtc->property_info, cstate,
+ cstate->property_values, cstate->property_blobs);
+
+ _sde_crtc_set_input_fence_timeout(cstate);
+
+ cstate->base.crtc = crtc;
+ crtc->state = &cstate->base;
+}
+
+static void sde_crtc_disable(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct sde_crtc *sde_crtc;
+ struct sde_kms *sde_kms;
+ struct msm_drm_private *priv;
+ int ret = 0;
+
+ if (!crtc || !crtc->dev || !crtc->state) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+ sde_crtc = to_sde_crtc(crtc);
+ sde_kms = _sde_crtc_get_kms(crtc);
+ if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev_private) {
+ SDE_ERROR("invalid kms handle\n");
+ return;
+ }
+ priv = sde_kms->dev->dev_private;
+
+ SDE_DEBUG("crtc%d\n", crtc->base.id);
+
+ if (msm_is_suspend_state(crtc->dev))
+ _sde_crtc_set_suspend(crtc, true);
+
+ mutex_lock(&sde_crtc->crtc_lock);
+ SDE_EVT32(DRMID(crtc), sde_crtc->enabled, sde_crtc->suspend,
+ sde_crtc->vblank_requested);
+
+ if (sde_crtc->enabled && !sde_crtc->suspend &&
+ sde_crtc->vblank_requested) {
+ ret = _sde_crtc_vblank_enable_no_lock(sde_crtc, false);
+ if (ret)
+ SDE_ERROR("%s vblank disable failed: %d\n",
+ sde_crtc->name, ret);
+ }
+
+ sde_crtc->enabled = false;
+
+ if (atomic_read(&sde_crtc->frame_pending)) {
+ /* release bandwidth and other resources */
+ SDE_ERROR("crtc%d invalid frame pending\n",
+ crtc->base.id);
+ SDE_EVT32(DRMID(crtc));
+ sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
+ sde_kms->core_client, false);
+ sde_core_perf_crtc_release_bw(crtc);
+ atomic_set(&sde_crtc->frame_pending, 0);
+ }
+
+ sde_core_perf_crtc_update(crtc, 0, true);
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ sde_encoder_register_frame_event_callback(encoder, NULL, NULL);
+ sde_encoder_register_request_flip_callback(encoder, NULL, NULL);
+ }
+
+ memset(sde_crtc->mixers, 0, sizeof(sde_crtc->mixers));
+ sde_crtc->num_mixers = 0;
+ mutex_unlock(&sde_crtc->crtc_lock);
+}
+
+static void sde_crtc_enable(struct drm_crtc *crtc)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_mixer *mixer;
+ struct sde_hw_mixer *lm;
+ struct drm_display_mode *mode;
+ struct sde_hw_mixer_cfg cfg;
+ struct drm_encoder *encoder;
+ int i;
+ int ret = 0;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return;
+ }
+
+ SDE_DEBUG("crtc%d\n", crtc->base.id);
+ SDE_EVT32(DRMID(crtc));
+
+ sde_crtc = to_sde_crtc(crtc);
+ mixer = sde_crtc->mixers;
+
+ if (WARN_ON(!crtc->state))
+ return;
+
+ mode = &crtc->state->adjusted_mode;
+
+ drm_mode_debug_printmodeline(mode);
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ sde_encoder_register_frame_event_callback(encoder,
+ sde_crtc_frame_event_cb, (void *)crtc);
+ sde_encoder_register_request_flip_callback(encoder,
+ sde_crtc_request_flip_cb, (void *)crtc);
+ }
+
+ mutex_lock(&sde_crtc->crtc_lock);
+ SDE_EVT32(DRMID(crtc), sde_crtc->enabled, sde_crtc->suspend,
+ sde_crtc->vblank_requested);
+ if (!sde_crtc->enabled && !sde_crtc->suspend &&
+ sde_crtc->vblank_requested) {
+ ret = _sde_crtc_vblank_enable_no_lock(sde_crtc, true);
+ if (ret)
+ SDE_ERROR("%s vblank enable failed: %d\n",
+ sde_crtc->name, ret);
+ }
+ sde_crtc->enabled = true;
+ mutex_unlock(&sde_crtc->crtc_lock);
+
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ lm = mixer[i].hw_lm;
+ cfg.out_width = sde_crtc_mixer_width(sde_crtc, mode);
+ cfg.out_height = mode->vdisplay;
+ cfg.right_mixer = (i == 0) ? false : true;
+ cfg.flags = 0;
+ lm->ops.setup_mixer_out(lm, &cfg);
+ }
+}
+
+struct plane_state {
+ struct sde_plane_state *sde_pstate;
+ struct drm_plane_state *drm_pstate;
+
+ int stage;
+};
+
+static int pstate_cmp(const void *a, const void *b)
+{
+ struct plane_state *pa = (struct plane_state *)a;
+ struct plane_state *pb = (struct plane_state *)b;
+ int rc = 0;
+ int pa_zpos, pb_zpos;
+
+ pa_zpos = sde_plane_get_property(pa->sde_pstate, PLANE_PROP_ZPOS);
+ pb_zpos = sde_plane_get_property(pb->sde_pstate, PLANE_PROP_ZPOS);
+
+ if (pa_zpos != pb_zpos)
+ rc = pa_zpos - pb_zpos;
+ else
+ rc = pa->drm_pstate->crtc_x - pb->drm_pstate->crtc_x;
+
+ return rc;
+}
+
+static int sde_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct sde_crtc *sde_crtc;
+ struct plane_state pstates[SDE_STAGE_MAX * 2];
+
+ struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+
+ int cnt = 0, rc = 0, mixer_width, i, z_pos;
+ int left_zpos_cnt = 0, right_zpos_cnt = 0;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ if (!state->enable || !state->active) {
+ SDE_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
+ crtc->base.id, state->enable, state->active);
+ return 0;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ mode = &state->adjusted_mode;
+ SDE_DEBUG("%s: check", sde_crtc->name);
+
+ /* force a full mode set if active state changed */
+ if (state->active_changed)
+ state->mode_changed = true;
+
+ mixer_width = sde_crtc_mixer_width(sde_crtc, mode);
+ _sde_crtc_setup_is_shared(state);
+
+ /* get plane state for all drm planes associated with crtc state */
+ drm_atomic_crtc_state_for_each_plane(plane, state) {
+ pstate = drm_atomic_get_existing_plane_state(
+ state->state, plane);
+ if (IS_ERR_OR_NULL(pstate)) {
+ SDE_DEBUG("%s: failed to get plane%d state, %d\n",
+ sde_crtc->name, plane->base.id, rc);
+ continue;
+ }
+ if (cnt >= ARRAY_SIZE(pstates))
+ continue;
+
+ pstates[cnt].sde_pstate = to_sde_plane_state(pstate);
+ pstates[cnt].drm_pstate = pstate;
+ pstates[cnt].stage = sde_plane_get_property(
+ pstates[cnt].sde_pstate, PLANE_PROP_ZPOS);
+ cnt++;
+
+ if (CHECK_LAYER_BOUNDS(pstate->crtc_y, pstate->crtc_h,
+ mode->vdisplay) ||
+ CHECK_LAYER_BOUNDS(pstate->crtc_x, pstate->crtc_w,
+ mode->hdisplay)) {
+ SDE_ERROR("invalid vertical/horizontal destination\n");
+ SDE_ERROR("y:%d h:%d vdisp:%d x:%d w:%d hdisp:%d\n",
+ pstate->crtc_y, pstate->crtc_h, mode->vdisplay,
+ pstate->crtc_x, pstate->crtc_w, mode->hdisplay);
+ rc = -E2BIG;
+ goto end;
+ }
+ }
+
+ /* assign mixer stages based on sorted zpos property */
+ sort(pstates, cnt, sizeof(pstates[0]), pstate_cmp, NULL);
+
+ if (!sde_is_custom_client()) {
+ int stage_old = pstates[0].stage;
+
+ z_pos = 0;
+ for (i = 0; i < cnt; i++) {
+ if (stage_old != pstates[i].stage)
+ ++z_pos;
+ stage_old = pstates[i].stage;
+ pstates[i].stage = z_pos;
+ }
+ }
+
+ z_pos = -1;
+ for (i = 0; i < cnt; i++) {
+ /* reset counts at every new blend stage */
+ if (pstates[i].stage != z_pos) {
+ left_zpos_cnt = 0;
+ right_zpos_cnt = 0;
+ z_pos = pstates[i].stage;
+ }
+
+ /* verify z_pos setting before using it */
+ if (z_pos >= SDE_STAGE_MAX - SDE_STAGE_0) {
+ SDE_ERROR("> %d plane stages assigned\n",
+ SDE_STAGE_MAX - SDE_STAGE_0);
+ rc = -EINVAL;
+ goto end;
+ } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
+ if (left_zpos_cnt == 2) {
+ SDE_ERROR("> 2 planes @ stage %d on left\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ left_zpos_cnt++;
+
+ } else {
+ if (right_zpos_cnt == 2) {
+ SDE_ERROR("> 2 planes @ stage %d on right\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ right_zpos_cnt++;
+ }
+
+ pstates[i].sde_pstate->stage = z_pos + SDE_STAGE_0;
+ SDE_DEBUG("%s: zpos %d", sde_crtc->name, z_pos);
+ }
+
+ rc = sde_core_perf_crtc_check(crtc, state);
+ if (rc) {
+ SDE_ERROR("crtc%d failed performance check %d\n",
+ crtc->base.id, rc);
+ goto end;
+ }
+
+ /* validate source split:
+ * use pstates sorted by stage to check planes on same stage
+ * we assume that all pipes are in source split so its valid to compare
+ * without taking into account left/right mixer placement
+ */
+ for (i = 1; i < cnt; i++) {
+ struct plane_state *prv_pstate, *cur_pstate;
+ struct sde_rect left_rect, right_rect;
+ int32_t left_pid, right_pid;
+ int32_t stage;
+
+ prv_pstate = &pstates[i - 1];
+ cur_pstate = &pstates[i];
+ if (prv_pstate->stage != cur_pstate->stage)
+ continue;
+
+ stage = cur_pstate->stage;
+
+ left_pid = prv_pstate->sde_pstate->base.plane->base.id;
+ POPULATE_RECT(&left_rect, prv_pstate->drm_pstate->crtc_x,
+ prv_pstate->drm_pstate->crtc_y,
+ prv_pstate->drm_pstate->crtc_w,
+ prv_pstate->drm_pstate->crtc_h, false);
+
+ right_pid = cur_pstate->sde_pstate->base.plane->base.id;
+ POPULATE_RECT(&right_rect, cur_pstate->drm_pstate->crtc_x,
+ cur_pstate->drm_pstate->crtc_y,
+ cur_pstate->drm_pstate->crtc_w,
+ cur_pstate->drm_pstate->crtc_h, false);
+
+ if (right_rect.x < left_rect.x) {
+ swap(left_pid, right_pid);
+ swap(left_rect, right_rect);
+ }
+
+ /**
+ * - planes are enumerated in pipe-priority order such that
+ * planes with lower drm_id must be left-most in a shared
+ * blend-stage when using source split.
+ * - planes in source split must be contiguous in width
+ * - planes in source split must have same dest yoff and height
+ */
+ if (right_pid < left_pid) {
+ SDE_ERROR(
+ "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
+ stage, left_pid, right_pid);
+ rc = -EINVAL;
+ goto end;
+ } else if (right_rect.x != (left_rect.x + left_rect.w)) {
+ SDE_ERROR(
+ "non-contiguous coordinates for src split. stage: %d left: %d - %d right: %d - %d\n",
+ stage, left_rect.x, left_rect.w,
+ right_rect.x, right_rect.w);
+ rc = -EINVAL;
+ goto end;
+ } else if ((left_rect.y != right_rect.y) ||
+ (left_rect.h != right_rect.h)) {
+ SDE_ERROR(
+ "source split at stage: %d. invalid yoff/height: l_y: %d r_y: %d l_h: %d r_h: %d\n",
+ stage, left_rect.y, right_rect.y,
+ left_rect.h, right_rect.h);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+
+end:
+ return rc;
+}
+
+int sde_crtc_vblank(struct drm_crtc *crtc, bool en)
+{
+ struct sde_crtc *sde_crtc;
+ int ret;
+
+ if (!crtc) {
+ SDE_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+ sde_crtc = to_sde_crtc(crtc);
+
+ mutex_lock(&sde_crtc->crtc_lock);
+ if (sde_crtc->vblank_requested != en) {
+ SDE_EVT32(DRMID(&sde_crtc->base), en, sde_crtc->enabled,
+ sde_crtc->suspend, sde_crtc->vblank_requested);
+ if (sde_crtc->enabled && !sde_crtc->suspend) {
+ ret = _sde_crtc_vblank_enable_no_lock(sde_crtc, en);
+ if (ret)
+ SDE_ERROR("%s vblank enable failed: %d\n",
+ sde_crtc->name, ret);
+ }
+
+ sde_crtc->vblank_requested = en;
+ }
+ mutex_unlock(&sde_crtc->crtc_lock);
+
+ return 0;
+}
+
+void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc,
+ struct drm_file *file)
+{
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+
+ SDE_DEBUG("%s: cancel: %pK\n", sde_crtc->name, file);
+ _sde_crtc_complete_flip(crtc, file);
+}
+
+/**
+ * sde_crtc_install_properties - install all drm properties for crtc
+ * @crtc: Pointer to drm crtc structure
+ */
+static void sde_crtc_install_properties(struct drm_crtc *crtc,
+ struct sde_mdss_cfg *catalog)
+{
+ struct sde_crtc *sde_crtc;
+ struct drm_device *dev;
+ struct sde_kms_info *info;
+ struct sde_kms *sde_kms;
+ static const struct drm_prop_enum_list e_secure_level[] = {
+ {SDE_DRM_SEC_NON_SEC, "sec_and_non_sec"},
+ {SDE_DRM_SEC_ONLY, "sec_only"},
+ };
+
+ SDE_DEBUG("\n");
+
+ if (!crtc || !catalog) {
+ SDE_ERROR("invalid crtc or catalog\n");
+ return;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ dev = crtc->dev;
+ sde_kms = _sde_crtc_get_kms(crtc);
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ }
+
+ info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
+ if (!info) {
+ SDE_ERROR("failed to allocate info memory\n");
+ return;
+ }
+
+ /* range properties */
+ msm_property_install_range(&sde_crtc->property_info,
+ "input_fence_timeout", 0x0, 0, SDE_CRTC_MAX_INPUT_FENCE_TIMEOUT,
+ SDE_CRTC_INPUT_FENCE_TIMEOUT, CRTC_PROP_INPUT_FENCE_TIMEOUT);
+
+ msm_property_install_volatile_range(&sde_crtc->property_info,
+ "output_fence", 0x0, 0, ~0, 0, CRTC_PROP_OUTPUT_FENCE);
+
+ msm_property_install_range(&sde_crtc->property_info,
+ "output_fence_offset", 0x0, 0, 1, 0,
+ CRTC_PROP_OUTPUT_FENCE_OFFSET);
+
+ msm_property_install_range(&sde_crtc->property_info,
+ "core_clk", 0x0, 0, U64_MAX,
+ sde_kms->perf.max_core_clk_rate,
+ CRTC_PROP_CORE_CLK);
+ msm_property_install_range(&sde_crtc->property_info,
+ "core_ab", 0x0, 0, U64_MAX,
+ SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA,
+ CRTC_PROP_CORE_AB);
+ msm_property_install_range(&sde_crtc->property_info,
+ "core_ib", 0x0, 0, U64_MAX,
+ SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA,
+ CRTC_PROP_CORE_IB);
+
+ msm_property_install_blob(&sde_crtc->property_info, "capabilities",
+ DRM_MODE_PROP_IMMUTABLE, CRTC_PROP_INFO);
+
+ msm_property_install_enum(&sde_crtc->property_info, "security_level",
+ 0x0, 0, e_secure_level,
+ ARRAY_SIZE(e_secure_level),
+ CRTC_PROP_SECURITY_LEVEL, SDE_DRM_SEC_NON_SEC);
+
+ sde_kms_info_reset(info);
+
+ sde_kms_info_add_keyint(info, "hw_version", catalog->hwversion);
+ sde_kms_info_add_keyint(info, "max_linewidth",
+ catalog->max_mixer_width);
+
+ /* till now, we can't know which display early RVC will run on.
+ * Not to impact early RVC's layer, we decrease all lm's blend stage.
+ * This should be restored after handoff is done.
+ */
+ if (sde_kms->splash_info.handoff)
+ sde_kms_info_add_keyint(info, "max_blendstages",
+ catalog->max_mixer_blendstages - 2);
+ else
+ sde_kms_info_add_keyint(info, "max_blendstages",
+ catalog->max_mixer_blendstages);
+
+ if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED2)
+ sde_kms_info_add_keystr(info, "qseed_type", "qseed2");
+ if (catalog->qseed_type == SDE_SSPP_SCALER_QSEED3)
+ sde_kms_info_add_keystr(info, "qseed_type", "qseed3");
+ sde_kms_info_add_keyint(info, "has_src_split", catalog->has_src_split);
+ sde_kms_info_add_keyint(info, "has_hdr", catalog->has_hdr);
+ if (catalog->perf.max_bw_low)
+ sde_kms_info_add_keyint(info, "max_bandwidth_low",
+ catalog->perf.max_bw_low);
+ if (catalog->perf.max_bw_high)
+ sde_kms_info_add_keyint(info, "max_bandwidth_high",
+ catalog->perf.max_bw_high);
+ if (sde_kms->perf.max_core_clk_rate)
+ sde_kms_info_add_keyint(info, "max_mdp_clk",
+ sde_kms->perf.max_core_clk_rate);
+ msm_property_set_blob(&sde_crtc->property_info, &sde_crtc->blob_info,
+ info->data, SDE_KMS_INFO_DATALEN(info), CRTC_PROP_INFO);
+
+ kfree(info);
+}
+
+static int _sde_crtc_get_output_fence(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state, uint64_t *val)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+ uint32_t offset;
+
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(state);
+
+ offset = sde_crtc_get_property(cstate, CRTC_PROP_OUTPUT_FENCE_OFFSET);
+
+ /*
+ * Hwcomposer now queries the fences using the commit list in atomic
+ * commit ioctl. The offset should be set to next timeline
+ * which will be incremented during the prepare commit phase
+ */
+ offset++;
+
+ return sde_fence_create(&sde_crtc->output_fence, val, offset);
+}
+
+/**
+ * sde_crtc_atomic_set_property - atomically set a crtc drm property
+ * @crtc: Pointer to drm crtc structure
+ * @state: Pointer to drm crtc state structure
+ * @property: Pointer to targeted drm property
+ * @val: Updated property value
+ * @Returns: Zero on success
+ */
+static int sde_crtc_atomic_set_property(struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t val)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+ int idx, ret = -EINVAL;
+ uint64_t fence_fd = 0;
+
+ if (!crtc || !state || !property) {
+ SDE_ERROR("invalid argument(s)\n");
+ return -EINVAL;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(state);
+
+ ret = msm_property_atomic_set(&sde_crtc->property_info,
+ cstate->property_values, cstate->property_blobs,
+ property, val);
+
+ if (!ret) {
+ idx = msm_property_index(&sde_crtc->property_info,
+ property);
+ switch (idx) {
+ case CRTC_PROP_INPUT_FENCE_TIMEOUT:
+ _sde_crtc_set_input_fence_timeout(cstate);
+ break;
+ case CRTC_PROP_OUTPUT_FENCE:
+ if (!val)
+ goto exit;
+
+ ret = _sde_crtc_get_output_fence(crtc,
+ state, &fence_fd);
+ if (ret) {
+ SDE_ERROR("fence create failed rc:%d\n", ret);
+ goto exit;
+ }
+
+ ret = copy_to_user((uint64_t __user *)val, &fence_fd,
+ sizeof(uint64_t));
+
+ if (ret) {
+ SDE_ERROR("copy to user failed rc:%d\n", ret);
+ put_unused_fd(fence_fd);
+ ret = -EFAULT;
+ goto exit;
+ }
+ break;
+ default:
+ /* nothing to do */
+ break;
+ }
+ } else {
+ ret = sde_cp_crtc_set_property(crtc,
+ property, val);
+ }
+
+exit:
+ if (ret)
+ DRM_ERROR("failed to set the property\n");
+
+ return ret;
+}
+
+/**
+ * sde_crtc_set_property - set a crtc drm property
+ * @crtc: Pointer to drm crtc structure
+ * @property: Pointer to targeted drm property
+ * @val: Updated property value
+ * @Returns: Zero on success
+ */
+static int sde_crtc_set_property(struct drm_crtc *crtc,
+ struct drm_property *property, uint64_t val)
+{
+ SDE_DEBUG("\n");
+
+ return sde_crtc_atomic_set_property(crtc, crtc->state, property, val);
+}
+
+/**
+ * sde_crtc_atomic_get_property - retrieve a crtc drm property
+ * @crtc: Pointer to drm crtc structure
+ * @state: Pointer to drm crtc state structure
+ * @property: Pointer to targeted drm property
+ * @val: Pointer to variable for receiving property value
+ * @Returns: Zero on success
+ */
+static int sde_crtc_atomic_get_property(struct drm_crtc *crtc,
+ const struct drm_crtc_state *state,
+ struct drm_property *property,
+ uint64_t *val)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_state *cstate;
+ int i, ret = -EINVAL;
+
+ if (!crtc || !state) {
+ SDE_ERROR("invalid argument(s)\n");
+ return -EINVAL;
+ }
+
+ sde_crtc = to_sde_crtc(crtc);
+ cstate = to_sde_crtc_state(state);
+
+ i = msm_property_index(&sde_crtc->property_info, property);
+ if (i == CRTC_PROP_OUTPUT_FENCE) {
+ *val = ~0;
+ ret = 0;
+ } else {
+ ret = msm_property_atomic_get(&sde_crtc->property_info,
+ cstate->property_values,
+ cstate->property_blobs, property, val);
+ if (ret)
+ ret = sde_cp_crtc_get_property(crtc,
+ property, val);
+ }
+ if (ret)
+ DRM_ERROR("get property failed\n");
+
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _sde_debugfs_status_show(struct seq_file *s, void *data)
+{
+ struct sde_crtc *sde_crtc;
+ struct sde_plane_state *pstate = NULL;
+ struct sde_crtc_mixer *m;
+
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+
+ int i, out_width;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ sde_crtc = s->private;
+ crtc = &sde_crtc->base;
+
+ mutex_lock(&sde_crtc->crtc_lock);
+ mode = &crtc->state->adjusted_mode;
+ out_width = sde_crtc_mixer_width(sde_crtc, mode);
+
+ seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
+ mode->hdisplay, mode->vdisplay);
+
+ seq_puts(s, "\n");
+
+ for (i = 0; i < sde_crtc->num_mixers; ++i) {
+ m = &sde_crtc->mixers[i];
+ if (!m->hw_lm)
+ seq_printf(s, "\tmixer[%d] has no lm\n", i);
+ else if (!m->hw_ctl)
+ seq_printf(s, "\tmixer[%d] has no ctl\n", i);
+ else
+ seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+ m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
+ out_width, mode->vdisplay);
+ }
+
+ seq_puts(s, "\n");
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_sde_plane_state(plane->state);
+ state = plane->state;
+
+ if (!pstate || !state)
+ continue;
+
+ seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
+ pstate->stage);
+
+ if (plane->state->fb) {
+ fb = plane->state->fb;
+
+ seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u bpp:%d\n",
+ fb->base.id, (char *) &fb->pixel_format,
+ fb->width, fb->height, fb->bits_per_pixel);
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->modifier); i++)
+ seq_printf(s, "modifier[%d]:%8llu ", i,
+ fb->modifier[i]);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
+ seq_printf(s, "pitches[%d]:%8u ", i,
+ fb->pitches[i]);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
+ seq_printf(s, "offsets[%d]:%8u ", i,
+ fb->offsets[i]);
+ seq_puts(s, "\n");
+ }
+
+ seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
+ state->src_x, state->src_y, state->src_w, state->src_h);
+
+ seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
+ state->crtc_x, state->crtc_y, state->crtc_w,
+ state->crtc_h);
+ seq_puts(s, "\n");
+ }
+
+ if (sde_crtc->vblank_cb_count) {
+ ktime_t diff = ktime_sub(ktime_get(), sde_crtc->vblank_cb_time);
+ s64 diff_ms = ktime_to_ms(diff);
+ s64 fps = diff_ms ? DIV_ROUND_CLOSEST(
+ sde_crtc->vblank_cb_count * 1000, diff_ms) : 0;
+
+ seq_printf(s,
+ "vblank fps:%lld count:%u total:%llums\n",
+ fps,
+ sde_crtc->vblank_cb_count,
+ ktime_to_ms(diff));
+
+ /* reset time & count for next measurement */
+ sde_crtc->vblank_cb_count = 0;
+ sde_crtc->vblank_cb_time = ktime_set(0, 0);
+ }
+
+ seq_printf(s, "vblank_enable:%d\n", sde_crtc->vblank_requested);
+
+ mutex_unlock(&sde_crtc->crtc_lock);
+
+ return 0;
+}
+
+static int _sde_debugfs_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, _sde_debugfs_status_show, inode->i_private);
+}
+#endif
+
+static void sde_crtc_suspend(struct drm_crtc *crtc)
+{
+ sde_cp_crtc_suspend(crtc);
+}
+
+static void sde_crtc_resume(struct drm_crtc *crtc)
+{
+ sde_cp_crtc_resume(crtc);
+}
+
+static const struct drm_crtc_funcs sde_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = sde_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .set_property = sde_crtc_set_property,
+ .atomic_set_property = sde_crtc_atomic_set_property,
+ .atomic_get_property = sde_crtc_atomic_get_property,
+ .reset = sde_crtc_reset,
+ .atomic_duplicate_state = sde_crtc_duplicate_state,
+ .atomic_destroy_state = sde_crtc_destroy_state,
+ .save = sde_crtc_suspend,
+ .restore = sde_crtc_resume,
+};
+
+static const struct drm_crtc_helper_funcs sde_crtc_helper_funcs = {
+ .mode_fixup = sde_crtc_mode_fixup,
+ .disable = sde_crtc_disable,
+ .enable = sde_crtc_enable,
+ .atomic_check = sde_crtc_atomic_check,
+ .atomic_begin = sde_crtc_atomic_begin,
+ .atomic_flush = sde_crtc_atomic_flush,
+};
+
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int sde_crtc_debugfs_state_show(struct seq_file *s, void *v)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+ struct sde_crtc *sde_crtc = to_sde_crtc(crtc);
+ struct sde_crtc_state *cstate = to_sde_crtc_state(crtc->state);
+
+ seq_printf(s, "num_connectors: %d\n", cstate->num_connectors);
+ seq_printf(s, "is_rt: %d\n", cstate->is_rt);
+ seq_printf(s, "intf_mode: %d\n", sde_crtc_get_intf_mode(crtc));
+
+ seq_printf(s, "bw_ctl: %llu\n", sde_crtc->cur_perf.bw_ctl);
+ seq_printf(s, "core_clk_rate: %u\n",
+ sde_crtc->cur_perf.core_clk_rate);
+ seq_printf(s, "max_per_pipe_ib: %llu\n",
+ sde_crtc->cur_perf.max_per_pipe_ib);
+
+ return 0;
+}
+DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_crtc_debugfs_state);
+
+static void _sde_crtc_init_debugfs(struct sde_crtc *sde_crtc,
+ struct sde_kms *sde_kms)
+{
+ static const struct file_operations debugfs_status_fops = {
+ .open = _sde_debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+
+ if (sde_crtc && sde_kms) {
+ sde_crtc->debugfs_root = debugfs_create_dir(sde_crtc->name,
+ sde_debugfs_get_root(sde_kms));
+ if (sde_crtc->debugfs_root) {
+ /* don't error check these */
+ debugfs_create_file("status", S_IRUGO,
+ sde_crtc->debugfs_root,
+ sde_crtc, &debugfs_status_fops);
+ debugfs_create_file("state", S_IRUGO | S_IWUSR,
+ sde_crtc->debugfs_root,
+ &sde_crtc->base,
+ &sde_crtc_debugfs_state_fops);
+ }
+ }
+}
+#else
+static void _sde_crtc_init_debugfs(struct sde_crtc *sde_crtc,
+ struct sde_kms *sde_kms)
+{
+}
+#endif
+
+void sde_crtc_update_blob_property(struct drm_crtc *crtc,
+ const char *key,
+ int32_t value)
+{
+ struct sde_crtc *sde_crtc;
+ char *kms_info_str = NULL;
+ size_t len;
+
+ sde_crtc = to_sde_crtc(crtc);
+
+ kms_info_str = (char *)msm_property_get_blob(&sde_crtc->property_info,
+ &sde_crtc->blob_info, &len, CRTC_PROP_INFO);
+ if (!kms_info_str) {
+ SDE_ERROR("get crtc property_info failed");
+ return;
+ }
+
+ sde_kms_info_update_keystr(kms_info_str, key, value);
+}
+
+/* initialize crtc */
+struct drm_crtc *sde_crtc_init(struct drm_device *dev,
+ struct drm_plane *plane)
+{
+ struct drm_crtc *crtc = NULL;
+ struct sde_crtc *sde_crtc = NULL;
+ struct msm_drm_private *priv = NULL;
+ struct sde_kms *kms = NULL;
+ int i;
+
+ priv = dev->dev_private;
+ kms = to_sde_kms(priv->kms);
+
+ sde_crtc = kzalloc(sizeof(*sde_crtc), GFP_KERNEL);
+ if (!sde_crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = &sde_crtc->base;
+ crtc->dev = dev;
+
+ mutex_init(&sde_crtc->crtc_lock);
+ spin_lock_init(&sde_crtc->spin_lock);
+ atomic_set(&sde_crtc->frame_pending, 0);
+
+ INIT_LIST_HEAD(&sde_crtc->frame_event_list);
+ for (i = 0; i < ARRAY_SIZE(sde_crtc->frame_events); i++) {
+ INIT_LIST_HEAD(&sde_crtc->frame_events[i].list);
+ list_add(&sde_crtc->frame_events[i].list,
+ &sde_crtc->frame_event_list);
+ init_kthread_work(&sde_crtc->frame_events[i].work,
+ sde_crtc_frame_event_work);
+ }
+
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &sde_crtc_funcs);
+
+ drm_crtc_helper_add(crtc, &sde_crtc_helper_funcs);
+ plane->crtc = crtc;
+
+ /* save user friendly CRTC name for later */
+ snprintf(sde_crtc->name, SDE_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
+
+ /* initialize output fence support */
+ sde_fence_init(&sde_crtc->output_fence, sde_crtc->name, crtc->base.id);
+
+ /* initialize debugfs support */
+ _sde_crtc_init_debugfs(sde_crtc, kms);
+
+ /* create CRTC properties */
+ msm_property_init(&sde_crtc->property_info, &crtc->base, dev,
+ priv->crtc_property, sde_crtc->property_data,
+ CRTC_PROP_COUNT, CRTC_PROP_BLOBCOUNT,
+ sizeof(struct sde_crtc_state));
+
+ sde_crtc_install_properties(crtc, kms->catalog);
+
+ /* Install color processing properties */
+ sde_cp_crtc_init(crtc);
+ sde_cp_crtc_install_properties(crtc);
+
+ SDE_DEBUG("%s: successfully initialized crtc\n", sde_crtc->name);
+ return crtc;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_crtc.h b/drivers/gpu/drm/msm/sde/sde_crtc.h
new file mode 100644
index 000000000000..be3ff7072f60
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_crtc.h
@@ -0,0 +1,308 @@
+/*
+ * Copyright (c) 2015-2017 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SDE_CRTC_H_
+#define _SDE_CRTC_H_
+
+#include "drm_crtc.h"
+#include "msm_prop.h"
+#include "sde_fence.h"
+#include "sde_kms.h"
+#include "sde_core_perf.h"
+
+#define SDE_CRTC_NAME_SIZE 12
+
+/* define the maximum number of in-flight frame events */
+#define SDE_CRTC_FRAME_EVENT_SIZE 2
+
+/**
+ * struct sde_crtc_mixer: stores the map for each virtual pipeline in the CRTC
+ * @hw_lm: LM HW Driver context
+ * @hw_ctl: CTL Path HW driver context
+ * @hw_dspp: DSPP HW driver context
+ * @encoder: Encoder attached to this lm & ctl
+ * @mixer_op_mode: mixer blending operation mode
+ * @flush_mask: mixer flush mask for ctl, mixer and pipe
+ */
+struct sde_crtc_mixer {
+ struct sde_hw_mixer *hw_lm;
+ struct sde_hw_ctl *hw_ctl;
+ struct sde_hw_dspp *hw_dspp;
+ struct drm_encoder *encoder;
+ u32 mixer_op_mode;
+ u32 flush_mask;
+};
+
+/**
+ * struct sde_crtc_frame_event: stores crtc frame event for crtc processing
+ * @work: base work structure
+ * @crtc: Pointer to crtc handling this event
+ * @list: event list
+ * @ts: timestamp at queue entry
+ * @event: event identifier
+ */
+struct sde_crtc_frame_event {
+ struct kthread_work work;
+ struct drm_crtc *crtc;
+ struct list_head list;
+ ktime_t ts;
+ u32 event;
+};
+
+/**
+ * struct sde_crtc - virtualized CRTC data structure
+ * @base : Base drm crtc structure
+ * @name : ASCII description of this crtc
+ * @num_ctls : Number of ctl paths in use
+ * @num_mixers : Number of mixers in use
+ * @mixer : List of active mixers
+ * @event : Pointer to last received drm vblank event. If there is a
+ * pending vblank event, this will be non-null.
+ * @vsync_count : Running count of received vsync events
+ * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
+ * @property_info : Opaque structure for generic property support
+ * @property_defaults : Array of default values for generic property support
+ * @stage_cfg : H/w mixer stage configuration
+ * @debugfs_root : Parent of debugfs node
+ * @vblank_cb_count : count of vblank callback since last reset
+ * @vblank_cb_time : ktime at vblank count reset
+ * @vblank_requested : whether the user has requested vblank events
+ * @suspend : whether or not a suspend operation is in progress
+ * @enabled : whether the SDE CRTC is currently enabled. updated in the
+ * commit-thread, not state-swap time which is earlier, so
+ * safe to make decisions on during VBLANK on/off work
+ * @feature_list : list of color processing features supported on a crtc
+ * @active_list : list of color processing features are active
+ * @dirty_list : list of color processing features are dirty
+ * @crtc_lock : crtc lock around create, destroy and access.
+ * @frame_pending : Whether or not an update is pending
+ * @frame_events : static allocation of in-flight frame events
+ * @frame_event_list : available frame event list
+ * @pending : Whether any page-flip events are pending signal
+ * @spin_lock : spin lock for frame event, transaction status, etc...
+ * @cur_perf : current performance committed to clock/bandwidth driver
+ * @new_perf : new performance committed to clock/bandwidth driver
+ */
+struct sde_crtc {
+ struct drm_crtc base;
+ char name[SDE_CRTC_NAME_SIZE];
+
+ /* HW Resources reserved for the crtc */
+ u32 num_ctls;
+ u32 num_mixers;
+ struct sde_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+
+ struct drm_pending_vblank_event *event;
+ u32 vsync_count;
+
+ struct msm_property_info property_info;
+ struct msm_property_data property_data[CRTC_PROP_COUNT];
+ struct drm_property_blob *blob_info;
+
+ /* output fence support */
+ struct sde_fence output_fence;
+ atomic_t pending;
+ struct sde_hw_stage_cfg stage_cfg;
+ struct dentry *debugfs_root;
+
+ u32 vblank_cb_count;
+ ktime_t vblank_cb_time;
+ bool vblank_requested;
+ bool suspend;
+ bool enabled;
+
+ struct list_head feature_list;
+ struct list_head active_list;
+ struct list_head dirty_list;
+
+ struct mutex crtc_lock;
+
+ atomic_t frame_pending;
+ struct sde_crtc_frame_event frame_events[SDE_CRTC_FRAME_EVENT_SIZE];
+ struct list_head frame_event_list;
+ spinlock_t spin_lock;
+
+ struct sde_core_perf_params cur_perf;
+ struct sde_core_perf_params new_perf;
+};
+
+#define to_sde_crtc(x) container_of(x, struct sde_crtc, base)
+
+/**
+ * struct sde_crtc_state - sde container for atomic crtc state
+ * @base: Base drm crtc state structure
+ * @connectors : Currently associated drm connectors
+ * @num_connectors: Number of associated drm connectors
+ * @is_rt : Whether or not the current commit contains RT connectors
+ * @intf_mode : Interface mode of the primary connector
+ * @property_values: Current crtc property values
+ * @input_fence_timeout_ns : Cached input fence timeout, in ns
+ * @property_blobs: Reference pointers for blob properties
+ * @new_perf: new performance state being requested
+ * @is_shared: connector is shared
+ * @shared_roi: roi of the shared display
+ */
+struct sde_crtc_state {
+ struct drm_crtc_state base;
+
+ struct drm_connector *connectors[MAX_CONNECTORS];
+ int num_connectors;
+ bool is_rt;
+ enum sde_intf_mode intf_mode;
+
+ uint64_t property_values[CRTC_PROP_COUNT];
+ uint64_t input_fence_timeout_ns;
+ struct drm_property_blob *property_blobs[CRTC_PROP_COUNT];
+
+ struct sde_core_perf_params new_perf;
+ bool is_shared;
+ struct sde_rect shared_roi;
+};
+
+#define to_sde_crtc_state(x) \
+ container_of(x, struct sde_crtc_state, base)
+
+/**
+ * sde_crtc_get_property - query integer value of crtc property
+ * @S: Pointer to crtc state
+ * @X: Property index, from enum msm_mdp_crtc_property
+ * Returns: Integer value of requested property
+ */
+#define sde_crtc_get_property(S, X) \
+ ((S) && ((X) < CRTC_PROP_COUNT) ? ((S)->property_values[(X)]) : 0)
+
+static inline int sde_crtc_mixer_width(struct sde_crtc *sde_crtc,
+ struct drm_display_mode *mode)
+{
+ if (!sde_crtc || !mode)
+ return 0;
+
+ return sde_crtc->num_mixers == CRTC_DUAL_MIXERS ?
+ mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay;
+}
+
+static inline uint32_t get_crtc_split_width(struct drm_crtc *crtc)
+{
+ struct drm_display_mode *mode;
+ struct sde_crtc *sde_crtc;
+
+ if (!crtc)
+ return 0;
+
+ sde_crtc = to_sde_crtc(crtc);
+ mode = &crtc->state->adjusted_mode;
+ return sde_crtc_mixer_width(sde_crtc, mode);
+}
+
+/**
+ * sde_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
+int sde_crtc_vblank(struct drm_crtc *crtc, bool en);
+
+/**
+ * sde_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void sde_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * sde_crtc_prepare_commit - callback to prepare for output fences
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void sde_crtc_prepare_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state);
+
+/**
+ * sde_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void sde_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state);
+
+/**
+ * sde_crtc_init - create a new crtc object
+ * @dev: sde device
+ * @plane: base plane
+ * @Return: new crtc object or error
+ */
+struct drm_crtc *sde_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+
+/**
+ * sde_crtc_cancel_pending_flip - complete flip for clients on lastclose
+ * @crtc: Pointer to drm crtc object
+ * @file: client to cancel's file handle
+ */
+void sde_crtc_cancel_pending_flip(struct drm_crtc *crtc, struct drm_file *file);
+
+/**
+ * sde_crtc_is_rt - query whether real time connectors are present on the crtc
+ * @crtc: Pointer to drm crtc structure
+ * Returns: True if a connector is present with real time constraints
+ */
+bool sde_crtc_is_rt(struct drm_crtc *crtc);
+
+/**
+ * sde_crtc_get_intf_mode - get primary interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
+enum sde_intf_mode sde_crtc_get_intf_mode(struct drm_crtc *crtc);
+
+/**
+ * sde_core_perf_crtc_is_wb - check if writeback is primary output of this crtc
+ * @crtc: Pointer to crtc
+ */
+static inline bool sde_crtc_is_wb(struct drm_crtc *crtc)
+{
+ struct sde_crtc_state *cstate =
+ crtc ? to_sde_crtc_state(crtc->state) : NULL;
+
+ return cstate ? (cstate->intf_mode == INTF_MODE_WB_LINE) : false;
+}
+
+/**
+ * sde_crtc_is_nrt - check if primary output of this crtc is non-realtime client
+ * @crtc: Pointer to crtc
+ */
+static inline bool sde_crtc_is_nrt(struct drm_crtc *crtc)
+{
+ return sde_crtc_is_wb(crtc);
+}
+
+/**
+ * sde_crtc_is_enabled - check if sde crtc is enabled or not
+ * @crtc: Pointer to crtc
+ */
+static inline bool sde_crtc_is_enabled(struct drm_crtc *crtc)
+{
+ return crtc ? crtc->enabled : false;
+}
+
+/**
+ * sde_crtc_update_blob_property - update blob property of a given crtc
+ * @crtc: Pointer to crtc
+ * @key: Pointer to key string
+ * @value: Signed 32 bit integer value
+ */
+void sde_crtc_update_blob_property(struct drm_crtc *crtc,
+ const char *key,
+ int32_t value);
+#endif /* _SDE_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.c b/drivers/gpu/drm/msm/sde/sde_encoder.c
new file mode 100644
index 000000000000..1a18f785a497
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.c
@@ -0,0 +1,1740 @@
+/*
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "msm_drv.h"
+#include "sde_recovery_manager.h"
+#include "sde_kms.h"
+#include "drm_crtc.h"
+#include "drm_crtc_helper.h"
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_intf.h"
+#include "sde_hw_ctl.h"
+#include "sde_formats.h"
+#include "sde_encoder_phys.h"
+#include "sde_color_processing.h"
+#include "sde_trace.h"
+
+#define SDE_DEBUG_ENC(e, fmt, ...) SDE_DEBUG("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_ENC(e, fmt, ...) SDE_ERROR("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+/* timeout in frames waiting for frame done */
+#define SDE_ENCODER_FRAME_DONE_TIMEOUT 60
+
+/* timeout in msecs */
+#define SDE_ENCODER_UNDERRUN_TIMEOUT 200
+/* underrun count threshold value */
+#define SDE_ENCODER_UNDERRUN_CNT_MAX 10
+/* 3 vsync time period in msec, report underrun */
+#define SDE_ENCODER_UNDERRUN_DELTA 50
+
+#define MISR_BUFF_SIZE 256
+
+/*
+ * Two to anticipate panels that can do cmd/vid dynamic switching
+ * plan is to create all possible physical encoder types, and switch between
+ * them at runtime
+ */
+#define NUM_PHYS_ENCODER_TYPES 2
+
+#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
+ (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
+
+#define MAX_CHANNELS_PER_ENC 2
+
+/* rgb to yuv color space conversion matrix */
+static struct sde_csc_cfg sde_csc_10bit_convert[SDE_MAX_CSC] = {
+ [SDE_CSC_RGB2YUV_601L] = {
+ {
+ TO_S15D16(0x0083), TO_S15D16(0x0102), TO_S15D16(0x0032),
+ TO_S15D16(0xffb4), TO_S15D16(0xff6b), TO_S15D16(0x00e1),
+ TO_S15D16(0x00e1), TO_S15D16(0xff44), TO_S15D16(0xffdb),
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0040, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+ },
+
+ [SDE_CSC_RGB2YUV_601FR] = {
+ {
+ TO_S15D16(0x0099), TO_S15D16(0x012d), TO_S15D16(0x003a),
+ TO_S15D16(0xffaa), TO_S15D16(0xff56), TO_S15D16(0x0100),
+ TO_S15D16(0x0100), TO_S15D16(0xff2a), TO_S15D16(0xffd6),
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0000, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
+
+ [SDE_CSC_RGB2YUV_709L] = {
+ {
+ TO_S15D16(0x005d), TO_S15D16(0x013a), TO_S15D16(0x0020),
+ TO_S15D16(0xffcc), TO_S15D16(0xff53), TO_S15D16(0x00e1),
+ TO_S15D16(0x00e1), TO_S15D16(0xff34), TO_S15D16(0xffeb),
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0040, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+ },
+
+ [SDE_CSC_RGB2YUV_2020L] = {
+ {
+ TO_S15D16(0x0073), TO_S15D16(0x0129), TO_S15D16(0x001a),
+ TO_S15D16(0xffc1), TO_S15D16(0xff5e), TO_S15D16(0x00e0),
+ TO_S15D16(0x00e0), TO_S15D16(0xff32), TO_S15D16(0xffee),
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0040, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0040, 0x03ac, 0x0040, 0x03c0, 0x0040, 0x03c0,},
+ },
+
+ [SDE_CSC_RGB2YUV_2020FR] = {
+ {
+ TO_S15D16(0x0086), TO_S15D16(0x015b), TO_S15D16(0x001e),
+ TO_S15D16(0xffb9), TO_S15D16(0xff47), TO_S15D16(0x0100),
+ TO_S15D16(0x0100), TO_S15D16(0xff15), TO_S15D16(0xffeb),
+ },
+ { 0x0, 0x0, 0x0,},
+ { 0x0, 0x0200, 0x0200,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ { 0x0, 0x3ff, 0x0, 0x3ff, 0x0, 0x3ff,},
+ },
+};
+
+/**
+ * struct sde_encoder_virt - virtual encoder. Container of one or more physical
+ * encoders. Virtual encoder manages one "logical" display. Physical
+ * encoders manage one intf block, tied to a specific panel/sub-panel.
+ * Virtual encoder defers as much as possible to the physical encoders.
+ * Virtual encoder registers itself with the DRM Framework as the encoder.
+ * @base: drm_encoder base class for registration with DRM
+ * @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @bus_scaling_client: Client handle to the bus scaling interface
+ * @num_phys_encs: Actual number of physical encoders contained.
+ * @phys_encs: Container of physical encoders managed.
+ * @cur_master: Pointer to the current master in this mode. Optimization
+ * Only valid after enable. Cleared as disable.
+ * @hw_pp Handle to the pingpong blocks used for the display. No.
+ * pingpong blocks can be different than num_phys_encs.
+ * @crtc_vblank_cb: Callback into the upper layer / CRTC for
+ * notification of the VBLANK
+ * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification
+ * @crtc_kickoff_cb: Callback into CRTC that will flush & start
+ * all CTL paths
+ * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
+ * @debugfs_root: Debug file system root file node
+ * @enc_lock: Lock around physical encoder create/destroy and
+ access.
+ * @frame_busy_mask: Bitmask tracking which phys_enc we are still
+ * busy processing current command.
+ * Bit0 = phys_encs[0] etc.
+ * @crtc_frame_event_cb: callback handler for frame event
+ * @crtc_frame_event_cb_data: callback handler private data
+ * @crtc_request_flip_cb: callback handler for requesting page-flip event
+ * @crtc_request_flip_cb_data: callback handler private data
+ * @crtc_frame_event: callback event
+ * @frame_done_timeout: frame done timeout in Hz
+ * @frame_done_timer: watchdog timer for frame done event
+ * @last_underrun_ts: variable to hold the last occurred underrun
+ * timestamp
+ * @underrun_cnt_dwork: underrun counter for delayed work
+ * @dwork: delayed work for deferring the reporting
+ * of underrun error
+ */
+struct sde_encoder_virt {
+ struct drm_encoder base;
+ spinlock_t enc_spinlock;
+ uint32_t bus_scaling_client;
+
+ uint32_t display_num_of_h_tiles;
+
+ unsigned int num_phys_encs;
+ struct sde_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
+ struct sde_encoder_phys *cur_master;
+ struct sde_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+
+ void (*crtc_vblank_cb)(void *);
+ void *crtc_vblank_cb_data;
+
+ struct dentry *debugfs_root;
+ struct mutex enc_lock;
+ DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
+ void (*crtc_frame_event_cb)(void *, u32 event);
+ void *crtc_frame_event_cb_data;
+ void (*crtc_request_flip_cb)(void *);
+ void *crtc_request_flip_cb_data;
+ u32 crtc_frame_event;
+ atomic_t frame_done_timeout;
+ struct timer_list frame_done_timer;
+ atomic_t last_underrun_ts;
+ atomic_t underrun_cnt_dwork;
+ struct delayed_work dwork;
+
+ bool is_shared;
+};
+
+#define to_sde_encoder_virt(x) container_of(x, struct sde_encoder_virt, base)
+
+void sde_encoder_get_hw_resources(struct drm_encoder *drm_enc,
+ struct sde_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ int i = 0;
+
+ if (!hw_res || !drm_enc || !conn_state) {
+ SDE_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
+ drm_enc != 0, hw_res != 0, conn_state != 0);
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ /* Query resources used by phys encs, expected to be without overlap */
+ memset(hw_res, 0, sizeof(*hw_res));
+ hw_res->display_num_of_h_tiles = sde_enc->display_num_of_h_tiles;
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->ops.get_hw_resources)
+ phys->ops.get_hw_resources(phys, hw_res, conn_state);
+ }
+}
+
+void sde_encoder_destroy(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ int i = 0;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ mutex_lock(&sde_enc->enc_lock);
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->ops.destroy) {
+ phys->ops.destroy(phys);
+ --sde_enc->num_phys_encs;
+ sde_enc->phys_encs[i] = NULL;
+ }
+ }
+
+ if (sde_enc->num_phys_encs)
+ SDE_ERROR_ENC(sde_enc, "expected 0 num_phys_encs not %d\n",
+ sde_enc->num_phys_encs);
+ sde_enc->num_phys_encs = 0;
+ mutex_unlock(&sde_enc->enc_lock);
+
+ drm_encoder_cleanup(drm_enc);
+ debugfs_remove_recursive(sde_enc->debugfs_root);
+ mutex_destroy(&sde_enc->enc_lock);
+
+ kfree(sde_enc);
+}
+
+void sde_encoder_helper_split_config(
+ struct sde_encoder_phys *phys_enc,
+ enum sde_intf interface)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct split_pipe_cfg cfg = { 0 };
+ struct sde_hw_mdp *hw_mdptop;
+ enum sde_rm_topology_name topology;
+
+ if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
+ SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(phys_enc->parent);
+ hw_mdptop = phys_enc->hw_mdptop;
+ cfg.en = phys_enc->split_role != ENC_ROLE_SOLO;
+ cfg.mode = phys_enc->intf_mode;
+ cfg.intf = interface;
+
+ if (cfg.en && phys_enc->ops.needs_single_flush &&
+ phys_enc->ops.needs_single_flush(phys_enc))
+ cfg.split_flush_en = true;
+
+ topology = sde_connector_get_topology_name(phys_enc->connector);
+ if (topology == SDE_RM_TOPOLOGY_PPSPLIT)
+ cfg.pp_split_slave = cfg.intf;
+ else
+ cfg.pp_split_slave = INTF_MAX;
+
+ if (phys_enc->split_role != ENC_ROLE_SLAVE) {
+ /* master/solo encoder */
+ SDE_DEBUG_ENC(sde_enc, "enable %d\n", cfg.en);
+
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ } else {
+ /*
+ * slave encoder
+ * - determine split index from master index,
+ * assume master is first pp
+ */
+ cfg.pp_split_index = sde_enc->hw_pp[0]->idx - PINGPONG_0;
+ SDE_DEBUG_ENC(sde_enc, "master using pp%d\n",
+ cfg.pp_split_index);
+
+ if (hw_mdptop->ops.setup_pp_split)
+ hw_mdptop->ops.setup_pp_split(hw_mdptop, &cfg);
+ }
+}
+
+static int sde_encoder_virt_atomic_check(
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ const struct drm_display_mode *mode;
+ struct drm_display_mode *adj_mode;
+ struct sde_connector *sde_conn = NULL;
+ int i = 0;
+ int ret = 0;
+
+ if (!drm_enc || !crtc_state || !conn_state) {
+ SDE_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
+ drm_enc != 0, crtc_state != 0, conn_state != 0);
+ return -EINVAL;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
+ mode = &crtc_state->mode;
+ adj_mode = &crtc_state->adjusted_mode;
+ SDE_EVT32(DRMID(drm_enc));
+
+ /* perform atomic check on the first physical encoder (master) */
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->ops.atomic_check)
+ ret = phys->ops.atomic_check(phys, crtc_state,
+ conn_state);
+ else if (phys && phys->ops.mode_fixup)
+ if (!phys->ops.mode_fixup(phys, mode, adj_mode))
+ ret = -EINVAL;
+
+ if (ret) {
+ SDE_ERROR_ENC(sde_enc,
+ "mode unsupported, phys idx %d\n", i);
+ break;
+ }
+ }
+
+ sde_conn = to_sde_connector(conn_state->connector);
+ if (sde_conn) {
+ if (sde_conn->ops.set_topology_ctl)
+ sde_conn->ops.set_topology_ctl(conn_state->connector,
+ adj_mode, sde_conn->display);
+ }
+
+ /* Reserve dynamic resources now. Indicating AtomicTest phase */
+ if (!ret)
+ ret = sde_rm_reserve(&sde_kms->rm, drm_enc, crtc_state,
+ conn_state, true);
+
+ if (!ret)
+ drm_mode_set_crtcinfo(adj_mode, 0);
+
+ SDE_EVT32(DRMID(drm_enc), adj_mode->flags, adj_mode->private_flags);
+
+ return ret;
+}
+
+static void sde_encoder_virt_mode_set(struct drm_encoder *drm_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct list_head *connector_list;
+ struct drm_connector *conn = NULL, *conn_iter;
+ struct sde_connector *sde_conn = NULL;
+ struct sde_rm_hw_iter pp_iter;
+ int i = 0, ret;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
+ connector_list = &sde_kms->dev->mode_config.connector_list;
+
+ SDE_EVT32(DRMID(drm_enc));
+
+ list_for_each_entry(conn_iter, connector_list, head)
+ if (conn_iter->encoder == drm_enc)
+ conn = conn_iter;
+
+ if (!conn) {
+ SDE_ERROR_ENC(sde_enc, "failed to find attached connector\n");
+ return;
+ } else if (!conn->state) {
+ SDE_ERROR_ENC(sde_enc, "invalid connector state\n");
+ return;
+ }
+
+ sde_conn = to_sde_connector(conn);
+ if (sde_conn) {
+ if (sde_conn->ops.set_topology_ctl)
+ sde_conn->ops.set_topology_ctl(conn,
+ adj_mode, sde_conn->display);
+ }
+
+ /* Reserve dynamic resources now. Indicating non-AtomicTest phase */
+ ret = sde_rm_reserve(&sde_kms->rm, drm_enc, drm_enc->crtc->state,
+ conn->state, false);
+ if (ret) {
+ SDE_ERROR_ENC(sde_enc,
+ "failed to reserve hw resources, %d\n", ret);
+ return;
+ }
+
+ sde_rm_init_hw_iter(&pp_iter, drm_enc->base.id, SDE_HW_BLK_PINGPONG);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ sde_enc->hw_pp[i] = NULL;
+ if (!sde_rm_get_hw(&sde_kms->rm, &pp_iter))
+ break;
+ sde_enc->hw_pp[i] = (struct sde_hw_pingpong *) pp_iter.hw;
+ }
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys) {
+ if (!sde_enc->hw_pp[i] && !sde_enc->is_shared) {
+ SDE_ERROR_ENC(sde_enc,
+ "invalid pingpong block for the encoder\n");
+ return;
+ }
+ phys->hw_pp = sde_enc->hw_pp[i];
+ phys->connector = conn->state->connector;
+ if (phys->ops.mode_set)
+ phys->ops.mode_set(phys, mode, adj_mode);
+ }
+ }
+}
+
+static void sde_encoder_virt_enable(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ int i = 0;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ } else if (!drm_enc->dev) {
+ SDE_ERROR("invalid dev\n");
+ return;
+ } else if (!drm_enc->dev->dev_private) {
+ SDE_ERROR("invalid dev_private\n");
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
+
+ SDE_DEBUG_ENC(sde_enc, "\n");
+ SDE_EVT32(DRMID(drm_enc));
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+
+ sde_enc->cur_master = NULL;
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys) {
+ atomic_set(&phys->vsync_cnt, 0);
+ atomic_set(&phys->underrun_cnt, 0);
+
+ if (phys->ops.is_master && phys->ops.is_master(phys)) {
+ SDE_DEBUG_ENC(sde_enc,
+ "master is now idx %d\n", i);
+ sde_enc->cur_master = phys;
+ } else if (phys->ops.enable) {
+ phys->ops.enable(phys);
+ }
+ }
+ }
+
+ if (!sde_enc->cur_master)
+ SDE_ERROR("virt encoder has no master! num_phys %d\n", i);
+ else if (sde_enc->cur_master->ops.enable)
+ sde_enc->cur_master->ops.enable(sde_enc->cur_master);
+}
+
+static void sde_encoder_virt_disable(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ int i = 0;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ } else if (!drm_enc->dev) {
+ SDE_ERROR("invalid dev\n");
+ return;
+ } else if (!drm_enc->dev->dev_private) {
+ SDE_ERROR("invalid dev_private\n");
+ return;
+ }
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ sde_kms = to_sde_kms(priv->kms);
+
+ SDE_EVT32(DRMID(drm_enc));
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys) {
+ if (phys->ops.disable && !phys->ops.is_master(phys))
+ phys->ops.disable(phys);
+ phys->connector = NULL;
+ atomic_set(&phys->vsync_cnt, 0);
+ atomic_set(&phys->underrun_cnt, 0);
+ }
+ }
+
+ /* after phys waits for frame-done, should be no more frames pending */
+ if (atomic_xchg(&sde_enc->frame_done_timeout, 0)) {
+ SDE_ERROR("enc%d timeout pending\n", drm_enc->base.id);
+ del_timer_sync(&sde_enc->frame_done_timer);
+ }
+
+ if (sde_enc->cur_master && sde_enc->cur_master->ops.disable)
+ sde_enc->cur_master->ops.disable(sde_enc->cur_master);
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->ops.post_disable)
+ phys->ops.post_disable(phys);
+ }
+
+ sde_enc->cur_master = NULL;
+ SDE_DEBUG_ENC(sde_enc, "cleared master\n");
+
+ sde_rm_release(&sde_kms->rm, drm_enc);
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+}
+
+static const struct drm_encoder_helper_funcs sde_encoder_helper_funcs = {
+ .mode_set = sde_encoder_virt_mode_set,
+ .disable = sde_encoder_virt_disable,
+ .enable = sde_encoder_virt_enable,
+ .atomic_check = sde_encoder_virt_atomic_check,
+};
+
+static const struct drm_encoder_funcs sde_encoder_funcs = {
+ .destroy = sde_encoder_destroy,
+};
+
+static enum sde_intf sde_encoder_get_intf(struct sde_mdss_cfg *catalog,
+ enum sde_intf_type type, u32 controller_id)
+{
+ int i = 0;
+
+ for (i = 0; i < catalog->intf_count; i++) {
+ if (catalog->intf[i].type == type
+ && catalog->intf[i].controller_id == controller_id) {
+ return catalog->intf[i].id;
+ }
+ }
+
+ return INTF_MAX;
+}
+
+static enum sde_wb sde_encoder_get_wb(struct sde_mdss_cfg *catalog,
+ enum sde_intf_type type, u32 controller_id)
+{
+ if (controller_id < catalog->wb_count)
+ return catalog->wb[controller_id].id;
+
+ return WB_MAX;
+}
+
+static void sde_encoder_vblank_callback(struct drm_encoder *drm_enc,
+ struct sde_encoder_phys *phy_enc)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ unsigned long lock_flags;
+
+ if (!drm_enc || !phy_enc)
+ return;
+
+ SDE_ATRACE_BEGIN("encoder_vblank_callback");
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
+ if (sde_enc->crtc_vblank_cb)
+ sde_enc->crtc_vblank_cb(sde_enc->crtc_vblank_cb_data);
+ spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
+
+ atomic_inc(&phy_enc->vsync_cnt);
+ SDE_ATRACE_END("encoder_vblank_callback");
+}
+
+static void sde_encoder_underrun_callback(struct drm_encoder *drm_enc,
+ struct sde_encoder_phys *phy_enc)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+
+ if (!phy_enc)
+ return;
+
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ SDE_ATRACE_BEGIN("encoder_underrun_callback");
+ atomic_inc(&phy_enc->underrun_cnt);
+ SDE_EVT32(DRMID(drm_enc), atomic_read(&phy_enc->underrun_cnt));
+
+ /* schedule delayed work if it has not scheduled or executed earlier */
+ if ((!atomic_read(&sde_enc->last_underrun_ts)) &&
+ (!atomic_read(&sde_enc->underrun_cnt_dwork))) {
+ schedule_delayed_work(&sde_enc->dwork,
+ msecs_to_jiffies(SDE_ENCODER_UNDERRUN_TIMEOUT));
+ }
+
+ /* take snapshot of current underrun and increment the count */
+ atomic_set(&sde_enc->last_underrun_ts, jiffies);
+ atomic_inc(&sde_enc->underrun_cnt_dwork);
+
+ trace_sde_encoder_underrun(DRMID(drm_enc),
+ atomic_read(&phy_enc->underrun_cnt));
+ SDE_DBG_CTRL("stop_ftrace");
+ SDE_DBG_CTRL("panic_underrun");
+
+ SDE_ATRACE_END("encoder_underrun_callback");
+}
+
+void sde_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
+ void (*vbl_cb)(void *), void *vbl_data)
+{
+ struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+ int i;
+
+ enable = vbl_cb ? true : false;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ SDE_DEBUG_ENC(sde_enc, "\n");
+ SDE_EVT32(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
+ sde_enc->crtc_vblank_cb = vbl_cb;
+ sde_enc->crtc_vblank_cb_data = vbl_data;
+ spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->ops.control_vblank_irq)
+ phys->ops.control_vblank_irq(phys, enable);
+ }
+}
+
+void sde_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
+ void (*frame_event_cb)(void *, u32 event),
+ void *frame_event_cb_data)
+{
+ struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+
+ enable = frame_event_cb ? true : false;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ SDE_DEBUG_ENC(sde_enc, "\n");
+ SDE_EVT32(DRMID(drm_enc), enable, 0);
+
+ spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
+ sde_enc->crtc_frame_event_cb = frame_event_cb;
+ sde_enc->crtc_frame_event_cb_data = frame_event_cb_data;
+ spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
+}
+
+void sde_encoder_register_request_flip_callback(struct drm_encoder *drm_enc,
+ void (*request_flip_cb)(void *),
+ void *request_flip_cb_data)
+{
+ struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
+ sde_enc->crtc_request_flip_cb = request_flip_cb;
+ sde_enc->crtc_request_flip_cb_data = request_flip_cb_data;
+ spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
+}
+
+static void sde_encoder_frame_done_callback(
+ struct drm_encoder *drm_enc,
+ struct sde_encoder_phys *ready_phys, u32 event)
+{
+ struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+ unsigned int i;
+
+ /* One of the physical encoders has become idle */
+ for (i = 0; i < sde_enc->num_phys_encs; i++)
+ if (sde_enc->phys_encs[i] == ready_phys) {
+ clear_bit(i, sde_enc->frame_busy_mask);
+ sde_enc->crtc_frame_event |= event;
+ SDE_EVT32(DRMID(drm_enc), i,
+ sde_enc->frame_busy_mask[0]);
+ }
+
+ if (!sde_enc->frame_busy_mask[0]) {
+ atomic_set(&sde_enc->frame_done_timeout, 0);
+ del_timer(&sde_enc->frame_done_timer);
+
+ if (sde_enc->crtc_frame_event_cb)
+ sde_enc->crtc_frame_event_cb(
+ sde_enc->crtc_frame_event_cb_data,
+ sde_enc->crtc_frame_event);
+ }
+}
+
+/**
+ * _sde_encoder_trigger_flush - trigger flush for a physical encoder
+ * drm_enc: Pointer to drm encoder structure
+ * phys: Pointer to physical encoder structure
+ * extra_flush_bits: Additional bit mask to include in flush trigger
+ */
+static inline void _sde_encoder_trigger_flush(struct drm_encoder *drm_enc,
+ struct sde_encoder_phys *phys, uint32_t extra_flush_bits)
+{
+ struct sde_hw_ctl *ctl;
+ int pending_kickoff_cnt;
+
+ if (!drm_enc || !phys) {
+ SDE_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
+ drm_enc != 0, phys != 0);
+ return;
+ }
+
+ ctl = phys->hw_ctl;
+ if (!ctl || !ctl->ops.trigger_flush) {
+ SDE_ERROR("missing trigger cb\n");
+ return;
+ }
+
+ pending_kickoff_cnt = sde_encoder_phys_inc_pending(phys);
+ SDE_EVT32(DRMID(&to_sde_encoder_virt(drm_enc)->base),
+ phys->intf_idx, pending_kickoff_cnt);
+
+ if (extra_flush_bits && ctl->ops.update_pending_flush)
+ ctl->ops.update_pending_flush(ctl, extra_flush_bits);
+
+ phys->splash_flush_bits = phys->sde_kms->splash_info.flush_bits;
+
+ ctl->ops.trigger_flush(ctl);
+ SDE_EVT32(DRMID(drm_enc), ctl->idx);
+}
+
+/**
+ * _sde_encoder_trigger_start - trigger start for a physical encoder
+ * phys: Pointer to physical encoder structure
+ */
+static inline void _sde_encoder_trigger_start(struct sde_encoder_phys *phys)
+{
+ if (!phys) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ if (phys->ops.trigger_start && phys->enable_state != SDE_ENC_DISABLED)
+ phys->ops.trigger_start(phys);
+}
+
+void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_hw_ctl *ctl;
+ int ctl_idx = -1;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ ctl = phys_enc->hw_ctl;
+ if (ctl && ctl->ops.trigger_start) {
+ ctl->ops.trigger_start(ctl);
+ ctl_idx = ctl->idx;
+ }
+
+ if (phys_enc && phys_enc->parent)
+ SDE_EVT32(DRMID(phys_enc->parent), ctl_idx);
+}
+
+int sde_encoder_helper_wait_event_timeout(
+ int32_t drm_id,
+ int32_t hw_id,
+ wait_queue_head_t *wq,
+ atomic_t *cnt,
+ s64 timeout_ms)
+{
+ int rc = 0;
+ s64 expected_time = ktime_to_ms(ktime_get()) + timeout_ms;
+ s64 jiffies = msecs_to_jiffies(timeout_ms);
+ s64 time;
+
+ do {
+ rc = wait_event_timeout(*wq, atomic_read(cnt) == 0, jiffies);
+ time = ktime_to_ms(ktime_get());
+
+ SDE_EVT32(drm_id, hw_id, rc, time, expected_time,
+ atomic_read(cnt));
+ /* If we timed out, counter is valid and time is less, wait again */
+ } while (atomic_read(cnt) && (rc == 0) && (time < expected_time));
+
+ return rc;
+}
+
+/**
+ * _sde_encoder_kickoff_phys - handle physical encoder kickoff
+ * Iterate through the physical encoders and perform consolidated flush
+ * and/or control start triggering as needed. This is done in the virtual
+ * encoder rather than the individual physical ones in order to handle
+ * use cases that require visibility into multiple physical encoders at
+ * a time.
+ * sde_enc: Pointer to virtual encoder structure
+ */
+static void _sde_encoder_kickoff_phys(struct sde_encoder_virt *sde_enc)
+{
+ struct sde_hw_ctl *ctl;
+ uint32_t i, pending_flush;
+ unsigned long lock_flags;
+
+ if (!sde_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ pending_flush = 0x0;
+ sde_enc->crtc_frame_event = 0;
+
+ /* update pending counts and trigger kickoff ctl flush atomically */
+ spin_lock_irqsave(&sde_enc->enc_spinlock, lock_flags);
+
+ /* don't perform flush/start operations for slave encoders */
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (!phys || phys->enable_state == SDE_ENC_DISABLED)
+ continue;
+
+ ctl = phys->hw_ctl;
+ if (!ctl)
+ continue;
+
+ set_bit(i, sde_enc->frame_busy_mask);
+
+ if (!phys->ops.needs_single_flush ||
+ !phys->ops.needs_single_flush(phys))
+ _sde_encoder_trigger_flush(&sde_enc->base, phys, 0x0);
+ else if (ctl->ops.get_pending_flush)
+ pending_flush |= ctl->ops.get_pending_flush(ctl);
+ }
+
+ /* for split flush, combine pending flush masks and send to master */
+ if (pending_flush && sde_enc->cur_master) {
+ _sde_encoder_trigger_flush(
+ &sde_enc->base,
+ sde_enc->cur_master,
+ pending_flush);
+ }
+
+ /* HW flush has happened, request a flip complete event now */
+ if (sde_enc->crtc_request_flip_cb)
+ sde_enc->crtc_request_flip_cb(
+ sde_enc->crtc_request_flip_cb_data);
+
+ _sde_encoder_trigger_start(sde_enc->cur_master);
+
+ spin_unlock_irqrestore(&sde_enc->enc_spinlock, lock_flags);
+}
+
+void sde_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct sde_encoder_phys *phys;
+ struct drm_connector *conn_mas = NULL;
+ unsigned int i;
+ enum sde_csc_type conn_csc;
+ struct drm_display_mode *mode;
+ struct sde_hw_cdm *hw_cdm;
+ int mode_is_yuv = 0;
+ int rc;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ SDE_DEBUG_ENC(sde_enc, "\n");
+ SDE_EVT32(DRMID(drm_enc));
+
+ /* prepare for next kickoff, may include waiting on previous kickoff */
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ phys = sde_enc->phys_encs[i];
+ if (phys && phys->ops.prepare_for_kickoff)
+ phys->ops.prepare_for_kickoff(phys);
+ }
+
+ if (sde_enc->cur_master && sde_enc->cur_master->connector) {
+ conn_mas = sde_enc->cur_master->connector;
+ rc = sde_connector_pre_kickoff(conn_mas);
+ if (rc)
+ SDE_ERROR_ENC(sde_enc,
+ "kickoff conn%d failed rc %d\n",
+ conn_mas->base.id,
+ rc);
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ phys = sde_enc->phys_encs[i];
+ if (phys) {
+ mode = &phys->cached_mode;
+ mode_is_yuv = (mode->private_flags &
+ MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420);
+ }
+ /**
+ * Check the CSC matrix type to which the
+ * CDM CSC matrix should be updated to based
+ * on the connector HDR state
+ */
+ conn_csc = sde_connector_get_csc_type(conn_mas);
+ if (phys && mode_is_yuv) {
+ if (phys->enc_cdm_csc != conn_csc) {
+ hw_cdm = phys->hw_cdm;
+ rc = hw_cdm->ops.setup_csc_data(hw_cdm,
+ &sde_csc_10bit_convert[conn_csc]);
+
+ if (rc)
+ SDE_ERROR_ENC(sde_enc,
+ "CSC setup failed rc %d\n",
+ rc);
+ SDE_DEBUG_ENC(sde_enc,
+ "updating CSC %d to %d\n",
+ phys->enc_cdm_csc,
+ conn_csc);
+ phys->enc_cdm_csc = conn_csc;
+
+ }
+ }
+ }
+ }
+}
+
+void sde_encoder_kickoff(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct sde_encoder_phys *phys;
+ unsigned int i;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ SDE_ATRACE_BEGIN("encoder_kickoff");
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ atomic_set(&sde_enc->frame_done_timeout,
+ SDE_ENCODER_FRAME_DONE_TIMEOUT * 1000 /
+ drm_enc->crtc->state->adjusted_mode.vrefresh);
+ mod_timer(&sde_enc->frame_done_timer, jiffies +
+ ((atomic_read(&sde_enc->frame_done_timeout) * HZ) / 1000));
+
+ /* All phys encs are ready to go, trigger the kickoff */
+ _sde_encoder_kickoff_phys(sde_enc);
+
+ /* allow phys encs to handle any post-kickoff business */
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ phys = sde_enc->phys_encs[i];
+ if (phys && phys->ops.handle_post_kickoff)
+ phys->ops.handle_post_kickoff(phys);
+ }
+ SDE_ATRACE_END("encoder_kickoff");
+}
+
+static int _sde_encoder_status_show(struct seq_file *s, void *data)
+{
+ struct sde_encoder_virt *sde_enc;
+ int i;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ sde_enc = s->private;
+
+ mutex_lock(&sde_enc->enc_lock);
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (!phys)
+ continue;
+
+ seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
+ phys->intf_idx - INTF_0,
+ atomic_read(&phys->vsync_cnt),
+ atomic_read(&phys->underrun_cnt));
+
+ switch (phys->intf_mode) {
+ case INTF_MODE_VIDEO:
+ seq_puts(s, "mode: video\n");
+ break;
+ case INTF_MODE_CMD:
+ seq_puts(s, "mode: command\n");
+ break;
+ case INTF_MODE_WB_BLOCK:
+ seq_puts(s, "mode: wb block\n");
+ break;
+ case INTF_MODE_WB_LINE:
+ seq_puts(s, "mode: wb line\n");
+ break;
+ default:
+ seq_puts(s, "mode: ???\n");
+ break;
+ }
+ }
+ mutex_unlock(&sde_enc->enc_lock);
+
+ return 0;
+}
+
+static int _sde_encoder_debugfs_status_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, _sde_encoder_status_show, inode->i_private);
+}
+
+static void _sde_set_misr_params(struct sde_encoder_phys *phys, u32 enable,
+ u32 frame_count)
+{
+ int j;
+
+ if (!phys->misr_map)
+ return;
+
+ phys->misr_map->enable = enable;
+
+ if (frame_count <= SDE_CRC_BATCH_SIZE)
+ phys->misr_map->frame_count = frame_count;
+ else if (frame_count <= 0)
+ phys->misr_map->frame_count = 0;
+ else
+ phys->misr_map->frame_count = SDE_CRC_BATCH_SIZE;
+
+ if (!enable) {
+ phys->misr_map->last_idx = 0;
+ phys->misr_map->frame_count = 0;
+ for (j = 0; j < SDE_CRC_BATCH_SIZE; j++)
+ phys->misr_map->crc_value[j] = 0;
+ }
+}
+
+static ssize_t _sde_encoder_misr_set(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct drm_encoder *drm_enc;
+ int i = 0;
+ char buf[MISR_BUFF_SIZE + 1];
+ size_t buff_copy;
+ u32 enable, frame_count;
+
+ drm_enc = file->private_data;
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ buff_copy = min_t(size_t, MISR_BUFF_SIZE, count);
+ if (copy_from_user(buf, user_buf, buff_copy))
+ return -EINVAL;
+
+ buf[buff_copy] = 0; /* end of string */
+
+ if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+ return -EFAULT;
+
+ mutex_lock(&sde_enc->enc_lock);
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (!phys || !phys->misr_map || !phys->ops.setup_misr)
+ continue;
+
+ _sde_set_misr_params(phys, enable, frame_count);
+ phys->ops.setup_misr(phys, phys->misr_map);
+ }
+ mutex_unlock(&sde_enc->enc_lock);
+ return count;
+}
+
+static ssize_t _sde_encoder_misr_read(
+ struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct sde_encoder_virt *sde_enc;
+ struct drm_encoder *drm_enc;
+ int i = 0, j = 0, len = 0;
+ char buf[512] = {'\0'};
+
+ if (*ppos)
+ return 0;
+
+ drm_enc = file->private_data;
+ sde_enc = to_sde_encoder_virt(drm_enc);
+
+ mutex_lock(&sde_enc->enc_lock);
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+ struct sde_misr_params *misr_map;
+
+ if (!phys || !phys->misr_map)
+ continue;
+
+ misr_map = phys->misr_map;
+
+ len += snprintf(buf+len, sizeof(buf), "INTF%d\n", i);
+ for (j = 0; j < SDE_CRC_BATCH_SIZE; j++)
+ len += snprintf(buf+len, sizeof(buf), "%x\n",
+ misr_map->crc_value[j]);
+ }
+
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+ mutex_unlock(&sde_enc->enc_lock);
+
+ return len;
+}
+
+static void _sde_encoder_init_debugfs(struct drm_encoder *drm_enc,
+ struct sde_encoder_virt *sde_enc, struct sde_kms *sde_kms)
+{
+ static const struct file_operations debugfs_status_fops = {
+ .open = _sde_encoder_debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+
+ static const struct file_operations debugfs_misr_fops = {
+ .open = simple_open,
+ .read = _sde_encoder_misr_read,
+ .write = _sde_encoder_misr_set,
+ };
+
+ char name[SDE_NAME_SIZE];
+
+ if (!drm_enc || !sde_enc || !sde_kms) {
+ SDE_ERROR("invalid encoder or kms\n");
+ return;
+ }
+
+ snprintf(name, SDE_NAME_SIZE, "encoder%u", drm_enc->base.id);
+
+ /* create overall sub-directory for the encoder */
+ sde_enc->debugfs_root = debugfs_create_dir(name,
+ sde_debugfs_get_root(sde_kms));
+ if (sde_enc->debugfs_root) {
+ /* don't error check these */
+ debugfs_create_file("status", S_IRUGO | S_IWUSR,
+ sde_enc->debugfs_root, sde_enc, &debugfs_status_fops);
+
+ debugfs_create_file("misr_data", S_IRUGO | S_IWUSR,
+ sde_enc->debugfs_root, drm_enc, &debugfs_misr_fops);
+
+ }
+}
+
+static int sde_encoder_virt_add_phys_encs(
+ u32 display_caps,
+ struct sde_encoder_virt *sde_enc,
+ struct sde_enc_phys_init_params *params)
+{
+ struct sde_encoder_phys *enc = NULL;
+
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ /*
+ * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
+ * in this function, check up-front.
+ */
+ if (sde_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
+ ARRAY_SIZE(sde_enc->phys_encs)) {
+ SDE_ERROR_ENC(sde_enc, "too many physical encoders %d\n",
+ sde_enc->num_phys_encs);
+ return -EINVAL;
+ }
+
+ if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
+ enc = sde_encoder_phys_vid_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ SDE_ERROR_ENC(sde_enc, "failed to init vid enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
+ ++sde_enc->num_phys_encs;
+ }
+
+ if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
+ enc = sde_encoder_phys_cmd_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ SDE_ERROR_ENC(sde_enc, "failed to init cmd enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
+ ++sde_enc->num_phys_encs;
+ }
+
+ return 0;
+}
+
+static int sde_encoder_virt_add_phys_enc_wb(struct sde_encoder_virt *sde_enc,
+ struct sde_enc_phys_init_params *params)
+{
+ struct sde_encoder_phys *enc = NULL;
+
+ if (!sde_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ if (sde_enc->num_phys_encs + 1 >= ARRAY_SIZE(sde_enc->phys_encs)) {
+ SDE_ERROR_ENC(sde_enc, "too many physical encoders %d\n",
+ sde_enc->num_phys_encs);
+ return -EINVAL;
+ }
+
+ enc = sde_encoder_phys_wb_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ SDE_ERROR_ENC(sde_enc, "failed to init wb enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
+ ++sde_enc->num_phys_encs;
+
+ return 0;
+}
+
+static int sde_encoder_virt_add_phys_enc_shd(struct sde_encoder_virt *sde_enc,
+ struct sde_enc_phys_init_params *params)
+{
+ struct sde_encoder_phys *enc = NULL;
+
+ if (sde_enc->num_phys_encs + 1 >= ARRAY_SIZE(sde_enc->phys_encs)) {
+ SDE_ERROR_ENC(sde_enc, "too many physical encoders %d\n",
+ sde_enc->num_phys_encs);
+ return -EINVAL;
+ }
+
+ enc = sde_encoder_phys_shd_init(params);
+
+ if (IS_ERR(enc)) {
+ SDE_ERROR_ENC(sde_enc, "failed to init shd enc: %ld\n",
+ PTR_ERR(enc));
+ return PTR_ERR(enc);
+ }
+
+ sde_enc->is_shared = true;
+
+ sde_enc->phys_encs[sde_enc->num_phys_encs] = enc;
+ ++sde_enc->num_phys_encs;
+
+ return 0;
+}
+
+static int sde_encoder_setup_display(struct sde_encoder_virt *sde_enc,
+ struct sde_kms *sde_kms,
+ struct msm_display_info *disp_info,
+ int *drm_enc_mode)
+{
+ int ret = 0;
+ int i = 0;
+ enum sde_intf_type intf_type;
+ struct sde_encoder_virt_ops parent_ops = {
+ sde_encoder_vblank_callback,
+ sde_encoder_underrun_callback,
+ sde_encoder_frame_done_callback,
+ };
+ struct sde_enc_phys_init_params phys_params;
+
+ if (!sde_enc || !sde_kms) {
+ SDE_ERROR("invalid arg(s), enc %d kms %d\n",
+ sde_enc != 0, sde_kms != 0);
+ return -EINVAL;
+ }
+
+ memset(&phys_params, 0, sizeof(phys_params));
+ phys_params.sde_kms = sde_kms;
+ phys_params.parent = &sde_enc->base;
+ phys_params.parent_ops = parent_ops;
+ phys_params.enc_spinlock = &sde_enc->enc_spinlock;
+
+ SDE_DEBUG("\n");
+
+ if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
+ *drm_enc_mode = DRM_MODE_ENCODER_DSI;
+ intf_type = INTF_DSI;
+ } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
+ *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+ intf_type = INTF_HDMI;
+ } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_VIRTUAL) {
+ *drm_enc_mode = DRM_MODE_ENCODER_VIRTUAL;
+ intf_type = INTF_WB;
+ } else {
+ SDE_ERROR_ENC(sde_enc, "unsupported display interface type\n");
+ return -EINVAL;
+ }
+
+ WARN_ON(disp_info->num_of_h_tiles < 1);
+
+ sde_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
+
+ SDE_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
+
+ mutex_lock(&sde_enc->enc_lock);
+ for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
+ /*
+ * Left-most tile is at index 0, content is controller id
+ * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
+ * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
+ */
+ u32 controller_id = disp_info->h_tile_instance[i];
+
+ if (disp_info->num_of_h_tiles > 1) {
+ if (i == 0)
+ phys_params.split_role = ENC_ROLE_MASTER;
+ else
+ phys_params.split_role = ENC_ROLE_SLAVE;
+ } else {
+ phys_params.split_role = ENC_ROLE_SOLO;
+ }
+
+ SDE_DEBUG("h_tile_instance %d = %d, split_role %d\n",
+ i, controller_id, phys_params.split_role);
+
+ if (disp_info->capabilities & MSM_DISPLAY_CAP_SHARED) {
+ phys_params.wb_idx = WB_MAX;
+ phys_params.intf_idx = controller_id + INTF_0;
+ } else if (intf_type == INTF_WB) {
+ phys_params.intf_idx = INTF_MAX;
+ phys_params.wb_idx = sde_encoder_get_wb(
+ sde_kms->catalog,
+ intf_type, controller_id);
+ if (phys_params.wb_idx == WB_MAX) {
+ SDE_ERROR_ENC(sde_enc,
+ "could not get wb: type %d, id %d\n",
+ intf_type, controller_id);
+ ret = -EINVAL;
+ }
+ } else {
+ phys_params.wb_idx = WB_MAX;
+ phys_params.intf_idx = sde_encoder_get_intf(
+ sde_kms->catalog, intf_type,
+ controller_id);
+ if (phys_params.intf_idx == INTF_MAX) {
+ SDE_ERROR_ENC(sde_enc,
+ "could not get wb: type %d, id %d\n",
+ intf_type, controller_id);
+ ret = -EINVAL;
+ }
+ }
+
+ if (!ret) {
+ if (disp_info->capabilities & MSM_DISPLAY_CAP_SHARED) {
+ ret = sde_encoder_virt_add_phys_enc_shd(sde_enc,
+ &phys_params);
+ } else if (intf_type == INTF_WB)
+ ret = sde_encoder_virt_add_phys_enc_wb(sde_enc,
+ &phys_params);
+ else
+ ret = sde_encoder_virt_add_phys_encs(
+ disp_info->capabilities,
+ sde_enc,
+ &phys_params);
+ if (ret)
+ SDE_ERROR_ENC(sde_enc,
+ "failed to add phys encs\n");
+ }
+ }
+ mutex_unlock(&sde_enc->enc_lock);
+
+
+ return ret;
+}
+
+static void sde_encoder_frame_done_timeout(unsigned long data)
+{
+ struct drm_encoder *drm_enc = (struct drm_encoder *) data;
+ struct sde_encoder_virt *sde_enc = to_sde_encoder_virt(drm_enc);
+ struct msm_drm_private *priv;
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+ priv = drm_enc->dev->dev_private;
+
+ if (!sde_enc->frame_busy_mask[0] || !sde_enc->crtc_frame_event_cb) {
+ SDE_DEBUG("enc%d invalid timeout\n", drm_enc->base.id);
+ SDE_EVT32(DRMID(drm_enc),
+ sde_enc->frame_busy_mask[0], 0);
+ return;
+ } else if (!atomic_xchg(&sde_enc->frame_done_timeout, 0)) {
+ SDE_ERROR("enc%d invalid timeout\n", drm_enc->base.id);
+ SDE_EVT32(DRMID(drm_enc), 0, 1);
+ return;
+ }
+
+ SDE_EVT32(DRMID(drm_enc), 0, 2);
+ sde_enc->crtc_frame_event_cb(sde_enc->crtc_frame_event_cb_data,
+ SDE_ENCODER_FRAME_EVENT_ERROR);
+}
+
+static void sde_encoder_underrun_work_func(struct work_struct *work)
+{
+ struct sde_encoder_virt *sde_enc =
+ container_of(work, struct sde_encoder_virt, dwork.work);
+
+ unsigned long delta, time;
+
+ if (!sde_enc) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+
+ delta = jiffies - atomic_read(&sde_enc->last_underrun_ts);
+ time = jiffies_to_msecs(delta);
+
+ /*
+ * report underrun error when it exceeds the threshold count
+ * and the occurrence of last underrun error is less than 3
+ * vsync period.
+ */
+ if (atomic_read(&sde_enc->underrun_cnt_dwork) >
+ SDE_ENCODER_UNDERRUN_CNT_MAX &&
+ time < SDE_ENCODER_UNDERRUN_DELTA) {
+ sde_recovery_set_events(SDE_UNDERRUN);
+ }
+
+ /* reset underrun last timestamp and counter */
+ atomic_set(&sde_enc->last_underrun_ts, 0);
+ atomic_set(&sde_enc->underrun_cnt_dwork, 0);
+}
+
+struct drm_encoder *sde_encoder_init(
+ struct drm_device *dev,
+ struct msm_display_info *disp_info)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct sde_kms *sde_kms = to_sde_kms(priv->kms);
+ struct drm_encoder *drm_enc = NULL;
+ struct sde_encoder_virt *sde_enc = NULL;
+ int drm_enc_mode = DRM_MODE_ENCODER_NONE;
+ int ret = 0;
+
+ sde_enc = kzalloc(sizeof(*sde_enc), GFP_KERNEL);
+ if (!sde_enc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ mutex_init(&sde_enc->enc_lock);
+ ret = sde_encoder_setup_display(sde_enc, sde_kms, disp_info,
+ &drm_enc_mode);
+ if (ret)
+ goto fail;
+
+ sde_enc->cur_master = NULL;
+ spin_lock_init(&sde_enc->enc_spinlock);
+ drm_enc = &sde_enc->base;
+ drm_encoder_init(dev, drm_enc, &sde_encoder_funcs, drm_enc_mode);
+ drm_encoder_helper_add(drm_enc, &sde_encoder_helper_funcs);
+
+ atomic_set(&sde_enc->frame_done_timeout, 0);
+ atomic_set(&sde_enc->last_underrun_ts, 0);
+ atomic_set(&sde_enc->underrun_cnt_dwork, 0);
+ setup_timer(&sde_enc->frame_done_timer, sde_encoder_frame_done_timeout,
+ (unsigned long) sde_enc);
+ INIT_DELAYED_WORK(&sde_enc->dwork, sde_encoder_underrun_work_func);
+
+ _sde_encoder_init_debugfs(drm_enc, sde_enc, sde_kms);
+
+ SDE_DEBUG_ENC(sde_enc, "created\n");
+
+ return drm_enc;
+
+fail:
+ SDE_ERROR("failed to create encoder\n");
+ if (drm_enc)
+ sde_encoder_destroy(drm_enc);
+
+ return ERR_PTR(ret);
+}
+
+int sde_encoder_wait_for_commit_done(struct drm_encoder *drm_enc)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ int i, ret = 0;
+
+ if (!drm_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ sde_enc = to_sde_encoder_virt(drm_enc);
+ SDE_DEBUG_ENC(sde_enc, "\n");
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys && phys->ops.wait_for_commit_done) {
+ ret = phys->ops.wait_for_commit_done(phys);
+ if (ret)
+ return ret;
+ }
+
+ if (phys && phys->ops.collect_misr)
+ if (phys->misr_map && phys->misr_map->enable)
+ phys->ops.collect_misr(phys, phys->misr_map);
+ }
+
+ return ret;
+}
+
+enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder)
+{
+ struct sde_encoder_virt *sde_enc = NULL;
+ int i;
+
+ if (!encoder) {
+ SDE_ERROR("invalid encoder\n");
+ return INTF_MODE_NONE;
+ }
+ sde_enc = to_sde_encoder_virt(encoder);
+
+ if (sde_enc->cur_master)
+ return sde_enc->cur_master->intf_mode;
+
+ for (i = 0; i < sde_enc->num_phys_encs; i++) {
+ struct sde_encoder_phys *phys = sde_enc->phys_encs[i];
+
+ if (phys)
+ return phys->intf_mode;
+ }
+
+ return INTF_MODE_NONE;
+}
+
+/**
+ * sde_encoder_phys_setup_cdm - setup chroma down block
+ * @phys_enc: Pointer to physical encoder
+ * @output_type: HDMI/WB
+ * @format: Output format
+ * @roi: Output size
+ */
+void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
+ const struct sde_format *format, u32 output_type,
+ struct sde_rect *roi)
+{
+ struct drm_encoder *encoder = phys_enc->parent;
+ struct sde_encoder_virt *sde_enc = NULL;
+ struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm;
+ struct sde_hw_cdm_cfg *cdm_cfg = &phys_enc->cdm_cfg;
+ struct drm_connector *connector = phys_enc->connector;
+ int ret;
+ u32 csc_type = 0;
+
+ if (!encoder) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ sde_enc = to_sde_encoder_virt(encoder);
+
+ if (!SDE_FORMAT_IS_YUV(format)) {
+ SDE_DEBUG_ENC(sde_enc, "[cdm_disable fmt:%x]\n",
+ format->base.pixel_format);
+
+ if (hw_cdm && hw_cdm->ops.disable)
+ hw_cdm->ops.disable(hw_cdm);
+
+ return;
+ }
+
+ memset(cdm_cfg, 0, sizeof(struct sde_hw_cdm_cfg));
+
+ cdm_cfg->output_width = roi->w;
+ cdm_cfg->output_height = roi->h;
+ cdm_cfg->output_fmt = format;
+ cdm_cfg->output_type = output_type;
+ cdm_cfg->output_bit_depth = SDE_FORMAT_IS_DX(format) ?
+ CDM_CDWN_OUTPUT_10BIT : CDM_CDWN_OUTPUT_8BIT;
+
+ /* enable 10 bit logic */
+ switch (cdm_cfg->output_fmt->chroma_sample) {
+ case SDE_CHROMA_RGB:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ case SDE_CHROMA_H2V1:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ case SDE_CHROMA_420:
+ cdm_cfg->h_cdwn_type = CDM_CDWN_COSITE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_OFFSITE;
+ break;
+ case SDE_CHROMA_H1V2:
+ default:
+ SDE_ERROR("unsupported chroma sampling type\n");
+ cdm_cfg->h_cdwn_type = CDM_CDWN_DISABLE;
+ cdm_cfg->v_cdwn_type = CDM_CDWN_DISABLE;
+ break;
+ }
+
+ SDE_DEBUG_ENC(sde_enc, "[cdm_enable:%d,%d,%X,%d,%d,%d,%d]\n",
+ cdm_cfg->output_width,
+ cdm_cfg->output_height,
+ cdm_cfg->output_fmt->base.pixel_format,
+ cdm_cfg->output_type,
+ cdm_cfg->output_bit_depth,
+ cdm_cfg->h_cdwn_type,
+ cdm_cfg->v_cdwn_type);
+
+ /**
+ * Choose CSC matrix based on following rules:
+ * 1. If connector supports quantization select,
+ * pick Full-Range for better quality.
+ * 2. If non-CEA mode, then pick Full-Range as per CEA spec
+ * 3. Otherwise, pick Limited-Range as all other CEA modes
+ * need a limited range
+ */
+
+ if (output_type == CDM_CDWN_OUTPUT_HDMI) {
+ if (connector && connector->yuv_qs)
+ csc_type = SDE_CSC_RGB2YUV_601FR;
+ else if (connector &&
+ sde_connector_mode_needs_full_range(connector))
+ csc_type = SDE_CSC_RGB2YUV_601FR;
+ else
+ csc_type = SDE_CSC_RGB2YUV_601L;
+ } else if (output_type == CDM_CDWN_OUTPUT_WB) {
+ csc_type = SDE_CSC_RGB2YUV_601L;
+ }
+
+ if (hw_cdm && hw_cdm->ops.setup_csc_data) {
+ ret = hw_cdm->ops.setup_csc_data(hw_cdm,
+ &sde_csc_10bit_convert[csc_type]);
+ if (ret < 0) {
+ SDE_ERROR("failed to setup CSC %d\n", ret);
+ return;
+ }
+ }
+
+ /* Cache the CSC default matrix type */
+ phys_enc->enc_cdm_csc = csc_type;
+
+ if (hw_cdm && hw_cdm->ops.setup_cdwn) {
+ ret = hw_cdm->ops.setup_cdwn(hw_cdm, cdm_cfg);
+ if (ret < 0) {
+ SDE_ERROR("failed to setup CDM %d\n", ret);
+ return;
+ }
+ }
+
+ if (hw_cdm && hw_cdm->ops.enable) {
+ ret = hw_cdm->ops.enable(hw_cdm, cdm_cfg);
+ if (ret < 0) {
+ SDE_ERROR("failed to enable CDM %d\n", ret);
+ return;
+ }
+ }
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder.h b/drivers/gpu/drm/msm/sde/sde_encoder.h
new file mode 100644
index 000000000000..6b74dca13ae9
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __SDE_ENCODER_H__
+#define __SDE_ENCODER_H__
+
+#include <drm/drm_crtc.h>
+
+#include "msm_prop.h"
+#include "sde_hw_mdss.h"
+
+#define SDE_ENCODER_FRAME_EVENT_DONE BIT(0)
+#define SDE_ENCODER_FRAME_EVENT_ERROR BIT(1)
+
+/**
+ * Encoder functions and data types
+ * @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused
+ * @wbs: Writebacks this encoder is using, INTF_MODE_NONE if unused
+ * @needs_cdm: Encoder requests a CDM based on pixel format conversion needs
+ * @display_num_of_h_tiles:
+ */
+struct sde_encoder_hw_resources {
+ enum sde_intf_mode intfs[INTF_MAX];
+ enum sde_intf_mode wbs[WB_MAX];
+ bool needs_cdm;
+ u32 display_num_of_h_tiles;
+};
+
+/**
+ * sde_encoder_get_hw_resources - Populate table of required hardware resources
+ * @encoder: encoder pointer
+ * @hw_res: resource table to populate with encoder required resources
+ * @conn_state: report hw reqs based on this proposed connector state
+ */
+void sde_encoder_get_hw_resources(struct drm_encoder *encoder,
+ struct sde_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state);
+
+/**
+ * sde_encoder_register_vblank_callback - provide callback to encoder that
+ * will be called on the next vblank.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister and disable IRQs
+ * @data: user data provided to callback
+ */
+void sde_encoder_register_vblank_callback(struct drm_encoder *encoder,
+ void (*cb)(void *), void *data);
+
+/**
+ * sde_encoder_register_frame_event_callback - provide callback to encoder that
+ * will be called after the request is complete, or other events.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister
+ * @data: user data provided to callback
+ */
+void sde_encoder_register_frame_event_callback(struct drm_encoder *encoder,
+ void (*cb)(void *, u32), void *data);
+
+/**
+ * sde_encoder_register_request_flip_callback - provide callback to encoder that
+ * will be called after HW flush is complete to request
+ * a page flip event from CRTC.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister
+ * @data: user data provided to callback
+ */
+void sde_encoder_register_request_flip_callback(struct drm_encoder *encoder,
+ void (*cb)(void *), void *data);
+
+/**
+ * sde_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ * path (i.e. ctl flush and start) at next appropriate time.
+ * Immediately: if no previous commit is outstanding.
+ * Delayed: Block until next trigger can be issued.
+ * @encoder: encoder pointer
+ */
+void sde_encoder_prepare_for_kickoff(struct drm_encoder *encoder);
+
+/**
+ * sde_encoder_kickoff - trigger a double buffer flip of the ctl path
+ * (i.e. ctl flush and start) immediately.
+ * @encoder: encoder pointer
+ */
+void sde_encoder_kickoff(struct drm_encoder *encoder);
+
+/**
+ * sde_encoder_wait_nxt_committed - Wait for hardware to have flushed the
+ * current pending frames to hardware at a vblank or ctl_start
+ * Encoders will map this differently depending on irqs
+ * vid mode -> vsync_irq
+ * @encoder: encoder pointer
+ * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int sde_encoder_wait_for_commit_done(struct drm_encoder *drm_encoder);
+
+/*
+ * sde_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
+enum sde_intf_mode sde_encoder_get_intf_mode(struct drm_encoder *encoder);
+
+/**
+ * sde_encoder_init - initialize virtual encoder object
+ * @dev: Pointer to drm device structure
+ * @disp_info: Pointer to display information structure
+ * Returns: Pointer to newly created drm encoder
+ */
+struct drm_encoder *sde_encoder_init(
+ struct drm_device *dev,
+ struct msm_display_info *disp_info);
+
+/**
+ * sde_encoder_destroy - destroy previously initialized virtual encoder
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+void sde_encoder_destroy(struct drm_encoder *drm_enc);
+
+#endif /* __SDE_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys.h b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
new file mode 100644
index 000000000000..0e323f716d2c
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys.h
@@ -0,0 +1,449 @@
+/*
+ * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDE_ENCODER_PHYS_H__
+#define __SDE_ENCODER_PHYS_H__
+
+#include <linux/jiffies.h>
+
+#include "sde_kms.h"
+#include "sde_hw_intf.h"
+#include "sde_hw_pingpong.h"
+#include "sde_hw_ctl.h"
+#include "sde_hw_top.h"
+#include "sde_hw_wb.h"
+#include "sde_hw_cdm.h"
+#include "sde_encoder.h"
+#include "sde_connector.h"
+
+#define SDE_ENCODER_NAME_MAX 16
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KICKOFF_TIMEOUT_MS 84
+#define KICKOFF_TIMEOUT_JIFFIES msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
+
+/**
+ * enum sde_enc_split_role - Role this physical encoder will play in a
+ * split-panel configuration, where one panel is master, and others slaves.
+ * Masters have extra responsibilities, like managing the VBLANK IRQ.
+ * @ENC_ROLE_SOLO: This is the one and only panel. This encoder is master.
+ * @ENC_ROLE_MASTER: This encoder is the master of a split panel config.
+ * @ENC_ROLE_SLAVE: This encoder is not the master of a split panel config.
+ */
+enum sde_enc_split_role {
+ ENC_ROLE_SOLO,
+ ENC_ROLE_MASTER,
+ ENC_ROLE_SLAVE
+};
+
+struct sde_encoder_phys;
+
+/**
+ * struct sde_encoder_virt_ops - Interface the containing virtual encoder
+ * provides for the physical encoders to use to callback.
+ * @handle_vblank_virt: Notify virtual encoder of vblank IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_frame_done: Notify virtual encoder that this phys encoder
+ * completes last request frame.
+ */
+struct sde_encoder_virt_ops {
+ void (*handle_vblank_virt)(struct drm_encoder *,
+ struct sde_encoder_phys *phys);
+ void (*handle_underrun_virt)(struct drm_encoder *,
+ struct sde_encoder_phys *phys);
+ void (*handle_frame_done)(struct drm_encoder *,
+ struct sde_encoder_phys *phys, u32 event);
+};
+
+/**
+ * struct sde_encoder_phys_ops - Interface the physical encoders provide to
+ * the containing virtual encoder.
+ * @is_master: Whether this phys_enc is the current master
+ * encoder. Can be switched at enable time. Based
+ * on split_role and current mode (CMD/VID).
+ * @mode_fixup: DRM Call. Fixup a DRM mode.
+ * @mode_set: DRM Call. Set a DRM mode.
+ * This likely caches the mode, for use at enable.
+ * @enable: DRM Call. Enable a DRM mode.
+ * @disable: DRM Call. Disable mode.
+ * @atomic_check: DRM Call. Atomic check new DRM state.
+ * @destroy: DRM Call. Destroy and release resources.
+ * @get_hw_resources: Populate the structure with the hardware
+ * resources that this phys_enc is using.
+ * Expect no overlap between phys_encs.
+ * @control_vblank_irq Register/Deregister for VBLANK IRQ
+ * @wait_for_commit_done: Wait for hardware to have flushed the
+ * current pending frames to hardware
+ * @prepare_for_kickoff: Do any work necessary prior to a kickoff
+ * For CMD encoder, may wait for previous tx done
+ * @handle_post_kickoff: Do any work necessary post-kickoff work
+ * @trigger_start: Process start event on physical encoder
+ * @needs_single_flush: Whether encoder slaves need to be flushed
+ * @setup_misr: Sets up MISR, enable and disables based on sysfs
+ * @collect_misr: Collects MISR data on frame update
+ */
+
+struct sde_encoder_phys_ops {
+ bool (*is_master)(struct sde_encoder_phys *encoder);
+ bool (*mode_fixup)(struct sde_encoder_phys *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*mode_set)(struct sde_encoder_phys *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*enable)(struct sde_encoder_phys *encoder);
+ void (*disable)(struct sde_encoder_phys *encoder);
+ void (*post_disable)(struct sde_encoder_phys *encoder);
+ int (*atomic_check)(struct sde_encoder_phys *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+ void (*destroy)(struct sde_encoder_phys *encoder);
+ void (*get_hw_resources)(struct sde_encoder_phys *encoder,
+ struct sde_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state);
+ int (*control_vblank_irq)(struct sde_encoder_phys *enc, bool enable);
+ int (*wait_for_commit_done)(struct sde_encoder_phys *phys_enc);
+ void (*prepare_for_kickoff)(struct sde_encoder_phys *phys_enc);
+ void (*handle_post_kickoff)(struct sde_encoder_phys *phys_enc);
+ void (*trigger_start)(struct sde_encoder_phys *phys_enc);
+ bool (*needs_single_flush)(struct sde_encoder_phys *phys_enc);
+
+ void (*setup_misr)(struct sde_encoder_phys *phys_encs,
+ struct sde_misr_params *misr_map);
+ void (*collect_misr)(struct sde_encoder_phys *phys_enc,
+ struct sde_misr_params *misr_map);
+};
+
+/**
+ * enum sde_enc_enable_state - current enabled state of the physical encoder
+ * @SDE_ENC_DISABLED: Encoder is disabled
+ * @SDE_ENC_ENABLING: Encoder transitioning to enabled
+ * Events bounding transition are encoder type specific
+ * @SDE_ENC_ENABLED: Encoder is enabled
+ */
+enum sde_enc_enable_state {
+ SDE_ENC_DISABLED,
+ SDE_ENC_ENABLING,
+ SDE_ENC_ENABLED
+};
+
+/**
+ * enum sde_intr_idx - sde encoder interrupt index
+ * @INTR_IDX_VSYNC: Vsync interrupt for video mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done interrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun interrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR: Readpointer done interrupt for cmd mode panel
+ */
+enum sde_intr_idx {
+ INTR_IDX_VSYNC,
+ INTR_IDX_PINGPONG,
+ INTR_IDX_UNDERRUN,
+ INTR_IDX_RDPTR,
+ INTR_IDX_MAX,
+};
+
+/**
+ * struct sde_encoder_phys - physical encoder that drives a single INTF block
+ * tied to a specific panel / sub-panel. Abstract type, sub-classed by
+ * phys_vid or phys_cmd for video mode or command mode encs respectively.
+ * @parent: Pointer to the containing virtual encoder
+ * @connector: If a mode is set, cached pointer to the active connector
+ * @ops: Operations exposed to the virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @hw_mdptop: Hardware interface to the top registers
+ * @hw_ctl: Hardware interface to the ctl registers
+ * @hw_cdm: Hardware interface to the cdm registers
+ * @cdm_cfg: Chroma-down hardware configuration
+ * @hw_pp: Hardware interface to the ping pong registers
+ * @sde_kms: Pointer to the sde_kms top level
+ * @cached_mode: DRM mode cached at mode_set time, acted on in enable
+ * @misr_map: Interface for setting and collecting MISR data
+ * @enabled: Whether the encoder has enabled and running a mode
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_mode: Interface mode
+ * @intf_idx: Interface index on sde hardware
+ * @enc_cdm_csc: Cached CSC type of CDM block
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enable_state: Enable state tracking
+ * @vblank_refcount: Reference count of vblank request
+ * @vsync_cnt: Vsync count for the physical encoder
+ * @underrun_cnt: Underrun count for the physical encoder
+ * @pending_kickoff_cnt: Atomic counter tracking the number of kickoffs
+ * vs. the number of done/vblank irqs. Should hover
+ * between 0-2 Incremented when a new kickoff is
+ * scheduled. Decremented in irq handler
+ * @pending_kickoff_wq: Wait queue for blocking until kickoff completes
+ * @splash_flush_bits: Flush bits of splash reserved hardware pipes
+ */
+struct sde_encoder_phys {
+ struct drm_encoder *parent;
+ struct drm_connector *connector;
+ struct sde_encoder_phys_ops ops;
+ struct sde_encoder_virt_ops parent_ops;
+ struct sde_hw_mdp *hw_mdptop;
+ struct sde_hw_ctl *hw_ctl;
+ struct sde_hw_cdm *hw_cdm;
+ struct sde_hw_cdm_cfg cdm_cfg;
+ struct sde_hw_pingpong *hw_pp;
+ struct sde_kms *sde_kms;
+ struct drm_display_mode cached_mode;
+ struct sde_misr_params *misr_map;
+ enum sde_enc_split_role split_role;
+ enum sde_intf_mode intf_mode;
+ enum sde_intf intf_idx;
+ enum sde_csc_type enc_cdm_csc;
+ spinlock_t *enc_spinlock;
+ enum sde_enc_enable_state enable_state;
+ atomic_t vblank_refcount;
+ atomic_t vsync_cnt;
+ atomic_t underrun_cnt;
+ atomic_t pending_kickoff_cnt;
+ wait_queue_head_t pending_kickoff_wq;
+ uint32_t splash_flush_bits;
+};
+
+static inline int sde_encoder_phys_inc_pending(struct sde_encoder_phys *phys)
+{
+ return atomic_inc_return(&phys->pending_kickoff_cnt);
+}
+
+/**
+ * struct sde_encoder_phys_vid - sub-class of sde_encoder_phys to handle video
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @irq_idx: IRQ interface lookup index
+ * @irq_cb: interrupt callback
+ * @hw_intf: Hardware interface to the intf registers
+ */
+struct sde_encoder_phys_vid {
+ struct sde_encoder_phys base;
+ int irq_idx[INTR_IDX_MAX];
+ struct sde_irq_callback irq_cb[INTR_IDX_MAX];
+ struct sde_hw_intf *hw_intf;
+};
+
+/**
+ * struct sde_encoder_phys_cmd - sub-class of sde_encoder_phys to handle command
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @intf_idx: Intf Block index used by this phys encoder
+ * @stream_sel: Stream selection for multi-stream interfaces
+ * @pp_rd_ptr_irq_idx: IRQ signifying panel's frame read pointer
+ * For CMD encoders, VBLANK is driven by the PP RD Done IRQ
+ * @pp_tx_done_irq_idx: IRQ signifying frame transmission to panel complete
+ * @irq_cb: interrupt callback
+ */
+struct sde_encoder_phys_cmd {
+ struct sde_encoder_phys base;
+ int intf_idx;
+ int stream_sel;
+ int irq_idx[INTR_IDX_MAX];
+ struct sde_irq_callback irq_cb[INTR_IDX_MAX];
+};
+
+/**
+ * struct sde_encoder_phys_wb - sub-class of sde_encoder_phys to handle
+ * writeback specific operations
+ * @base: Baseclass physical encoder structure
+ * @hw_wb: Hardware interface to the wb registers
+ * @irq_idx: IRQ interface lookup index
+ * @wbdone_timeout: Timeout value for writeback done in msec
+ * @bypass_irqreg: Bypass irq register/unregister if non-zero
+ * @wbdone_complete: for wbdone irq synchronization
+ * @wb_cfg: Writeback hardware configuration
+ * @intf_cfg: Interface hardware configuration
+ * @wb_roi: Writeback region-of-interest
+ * @wb_fmt: Writeback pixel format
+ * @frame_count: Counter of completed writeback operations
+ * @kickoff_count: Counter of issued writeback operations
+ * @aspace: address space identifier for non-secure/secure domain
+ * @wb_dev: Pointer to writeback device
+ * @start_time: Start time of writeback latest request
+ * @end_time: End time of writeback latest request
+ * @wb_name: Name of this writeback device
+ * @debugfs_root: Root entry of writeback debugfs
+ */
+struct sde_encoder_phys_wb {
+ struct sde_encoder_phys base;
+ struct sde_hw_wb *hw_wb;
+ int irq_idx;
+ struct sde_irq_callback irq_cb;
+ u32 wbdone_timeout;
+ u32 bypass_irqreg;
+ struct completion wbdone_complete;
+ struct sde_hw_wb_cfg wb_cfg;
+ struct sde_hw_intf_cfg intf_cfg;
+ struct sde_rect wb_roi;
+ const struct sde_format *wb_fmt;
+ u32 frame_count;
+ u32 kickoff_count;
+ struct msm_gem_address_space *aspace[SDE_IOMMU_DOMAIN_MAX];
+ struct sde_wb_device *wb_dev;
+ ktime_t start_time;
+ ktime_t end_time;
+#ifdef CONFIG_DEBUG_FS
+ char wb_name[SDE_ENCODER_NAME_MAX];
+ struct dentry *debugfs_root;
+#endif
+};
+
+/**
+ * struct sde_encoder_phys_shd - sub-class of sde_encoder_phys to handle shared
+ * display
+ * @base: Baseclass physical encoder structure
+ * @hw_lm: mixer hw block to overwrite base encoder
+ * @hw_ctl: ctl hw block to overwrite base encoder
+ * @irq_idx: IRQ interface lookup index
+ * @irq_cb: interrupt callback
+ * @num_mixers: Number of mixers available in base encoder
+ * @num_ctls: Number of ctls available in base encoder
+ */
+struct sde_encoder_phys_shd {
+ struct sde_encoder_phys base;
+ struct sde_hw_mixer *hw_lm[CRTC_DUAL_MIXERS];
+ struct sde_hw_ctl *hw_ctl[CRTC_DUAL_MIXERS];
+ int irq_idx[INTR_IDX_MAX];
+ struct sde_irq_callback irq_cb[INTR_IDX_MAX];
+ u32 num_mixers;
+ u32 num_ctls;
+};
+
+/**
+ * struct sde_enc_phys_init_params - initialization parameters for phys encs
+ * @sde_kms: Pointer to the sde_kms top level
+ * @parent: Pointer to the containing virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_idx: Interface index this phys_enc will control
+ * @wb_idx: Writeback index this phys_enc will control
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ */
+struct sde_enc_phys_init_params {
+ struct sde_kms *sde_kms;
+ struct drm_encoder *parent;
+ struct sde_encoder_virt_ops parent_ops;
+ enum sde_enc_split_role split_role;
+ enum sde_intf intf_idx;
+ enum sde_wb wb_idx;
+ spinlock_t *enc_spinlock;
+};
+
+/**
+ * sde_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct sde_encoder_phys *sde_encoder_phys_vid_init(
+ struct sde_enc_phys_init_params *p);
+
+/**
+ * sde_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct sde_encoder_phys *sde_encoder_phys_cmd_init(
+ struct sde_enc_phys_init_params *p);
+
+/**
+ * sde_encoder_phys_wb_init - Construct a new writeback physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+#ifdef CONFIG_DRM_SDE_WB
+struct sde_encoder_phys *sde_encoder_phys_wb_init(
+ struct sde_enc_phys_init_params *p);
+#else
+static inline
+struct sde_encoder_phys *sde_encoder_phys_wb_init(
+ struct sde_enc_phys_init_params *p)
+{
+ return NULL;
+}
+#endif
+
+/**
+ * sde_encoder_phys_shd_init - Construct a new shared physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+#ifdef CONFIG_DRM_SDE_SHD
+struct sde_encoder_phys *sde_encoder_phys_shd_init(
+ struct sde_enc_phys_init_params *p);
+#else
+static inline
+struct sde_encoder_phys *sde_encoder_phys_shd_init(
+ struct sde_enc_phys_init_params *p)
+{
+ return NULL;
+}
+#endif
+
+void sde_encoder_phys_setup_cdm(struct sde_encoder_phys *phys_enc,
+ const struct sde_format *format, u32 output_type,
+ struct sde_rect *roi);
+
+/**
+ * sde_encoder_helper_trigger_start - control start helper function
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void sde_encoder_helper_trigger_start(struct sde_encoder_phys *phys_enc);
+
+/**
+ * sde_encoder_helper_wait_event_timeout - wait for event with timeout
+ * taking into account that jiffies may jump between reads leading to
+ * incorrectly detected timeouts. Prevent failure in this scenario by
+ * making sure that elapsed time during wait is valid.
+ * @drm_id: drm object id for logging
+ * @hw_id: hw instance id for logging
+ * @wq: wait queue structure
+ * @cnt: atomic counter to wait on
+ * @timeout_ms: timeout value in milliseconds
+ */
+int sde_encoder_helper_wait_event_timeout(
+ int32_t drm_id,
+ int32_t hw_id,
+ wait_queue_head_t *wq,
+ atomic_t *cnt,
+ s64 timeout_ms);
+
+
+static inline enum sde_3d_blend_mode sde_encoder_helper_get_3d_blend_mode(
+ struct sde_encoder_phys *phys_enc)
+{
+ enum sde_rm_topology_name topology;
+
+ topology = sde_connector_get_topology_name(phys_enc->connector);
+ if (phys_enc->split_role == ENC_ROLE_SOLO &&
+ topology == SDE_RM_TOPOLOGY_DUALPIPEMERGE)
+ return BLEND_3D_H_ROW_INT;
+
+ return BLEND_3D_NONE;
+}
+
+/**
+ * sde_encoder_helper_split_config - split display configuration helper function
+ * This helper function may be used by physical encoders to configure
+ * the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum sde_intf setting
+ */
+void sde_encoder_helper_split_config(
+ struct sde_encoder_phys *phys_enc,
+ enum sde_intf interface);
+
+#endif /* __sde_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
new file mode 100644
index 000000000000..6baaa1652892
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_cmd.c
@@ -0,0 +1,726 @@
+/*
+ * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "sde_encoder_phys.h"
+#include "sde_hw_interrupts.h"
+#include "sde_core_irq.h"
+#include "sde_formats.h"
+
+#define SDE_DEBUG_CMDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_CMDENC(e, fmt, ...) SDE_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_sde_encoder_phys_cmd(x) \
+ container_of(x, struct sde_encoder_phys_cmd, base)
+
+/*
+ * Tearcheck sync start and continue thresholds are empirically found
+ * based on common panels In the future, may want to allow panels to override
+ * these default values
+ */
+#define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
+#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
+
+static inline bool sde_encoder_phys_cmd_is_master(
+ struct sde_encoder_phys *phys_enc)
+{
+ return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
+}
+
+static bool sde_encoder_phys_cmd_mode_fixup(
+ struct sde_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ if (phys_enc)
+ SDE_DEBUG_CMDENC(to_sde_encoder_phys_cmd(phys_enc), "\n");
+ return true;
+}
+
+static void sde_encoder_phys_cmd_mode_set(
+ struct sde_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_rm *rm = &phys_enc->sde_kms->rm;
+ struct sde_rm_hw_iter iter;
+ int i, instance;
+
+ if (!phys_enc || !mode || !adj_mode) {
+ SDE_ERROR("invalid arg(s), enc %d mode %d adj_mode %d\n",
+ phys_enc != 0, mode != 0, adj_mode != 0);
+ return;
+ }
+ phys_enc->cached_mode = *adj_mode;
+ SDE_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
+ drm_mode_debug_printmodeline(adj_mode);
+
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ /* Retrieve previously allocated HW Resources. Shouldn't fail */
+ sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ if (sde_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct sde_hw_ctl *)iter.hw;
+ }
+
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ SDE_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+}
+
+static void sde_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys_cmd *cmd_enc = arg;
+ struct sde_encoder_phys *phys_enc;
+ unsigned long lock_flags;
+ int new_cnt;
+
+ if (!cmd_enc)
+ return;
+
+ phys_enc = &cmd_enc->base;
+
+ /* notify all synchronous clients first, then asynchronous clients */
+ if (phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
+ phys_enc, SDE_ENCODER_FRAME_EVENT_DONE);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ SDE_EVT32_IRQ(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0, new_cnt);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
+static void sde_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys_cmd *cmd_enc = arg;
+ struct sde_encoder_phys *phys_enc = &cmd_enc->base;
+
+ if (!cmd_enc)
+ return;
+
+ if (phys_enc->parent_ops.handle_vblank_virt)
+ phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static bool _sde_encoder_phys_is_ppsplit_slave(
+ struct sde_encoder_phys *phys_enc)
+{
+ enum sde_rm_topology_name topology;
+
+ if (!phys_enc)
+ return false;
+
+ topology = sde_connector_get_topology_name(phys_enc->connector);
+ if (topology == SDE_RM_TOPOLOGY_PPSPLIT &&
+ phys_enc->split_role == ENC_ROLE_SLAVE)
+ return true;
+
+ return false;
+}
+
+static int _sde_encoder_phys_cmd_wait_for_idle(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ u32 irq_status;
+ int ret;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ /* slave encoder doesn't enable for ppsplit */
+ if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
+ return 0;
+
+ /* return EWOULDBLOCK since we know the wait isn't necessary */
+ if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+ SDE_ERROR_CMDENC(cmd_enc, "encoder is disabled\n");
+ return -EWOULDBLOCK;
+ }
+
+ /* wait for previous kickoff to complete */
+ ret = sde_encoder_helper_wait_event_timeout(
+ DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ &phys_enc->pending_kickoff_wq,
+ &phys_enc->pending_kickoff_cnt,
+ KICKOFF_TIMEOUT_MS);
+ if (ret <= 0) {
+ irq_status = sde_core_irq_read(phys_enc->sde_kms,
+ INTR_IDX_PINGPONG, true);
+ if (irq_status) {
+ SDE_EVT32(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ SDE_DEBUG_CMDENC(cmd_enc,
+ "pp:%d done but irq not triggered\n",
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ sde_encoder_phys_cmd_pp_tx_done_irq(cmd_enc,
+ INTR_IDX_PINGPONG);
+ ret = 0;
+ } else {
+ SDE_EVT32(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ SDE_ERROR_CMDENC(cmd_enc, "pp:%d kickoff timed out\n",
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ if (phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_ERROR);
+ ret = -ETIMEDOUT;
+ }
+ } else {
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static void sde_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys_cmd *cmd_enc = arg;
+ struct sde_encoder_phys *phys_enc;
+
+ if (!cmd_enc)
+ return;
+
+ phys_enc = &cmd_enc->base;
+ if (phys_enc->parent_ops.handle_underrun_virt)
+ phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static int sde_encoder_phys_cmd_register_irq(struct sde_encoder_phys *phys_enc,
+ enum sde_intr_type intr_type, int idx,
+ void (*irq_func)(void *, int), const char *irq_name)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ int ret = 0;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ cmd_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
+ intr_type, phys_enc->hw_pp->idx);
+ if (cmd_enc->irq_idx[idx] < 0) {
+ SDE_ERROR_CMDENC(cmd_enc,
+ "failed to lookup IRQ index for %s with pp=%d\n",
+ irq_name,
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ return -EINVAL;
+ }
+
+ cmd_enc->irq_cb[idx].func = irq_func;
+ cmd_enc->irq_cb[idx].arg = cmd_enc;
+ ret = sde_core_irq_register_callback(phys_enc->sde_kms,
+ cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
+ if (ret) {
+ SDE_ERROR_CMDENC(cmd_enc,
+ "failed to register IRQ callback %s\n",
+ irq_name);
+ return ret;
+ }
+
+ ret = sde_core_irq_enable(phys_enc->sde_kms, &cmd_enc->irq_idx[idx], 1);
+ if (ret) {
+ SDE_ERROR_CMDENC(cmd_enc,
+ "failed to enable IRQ for %s, pp %d, irq_idx %d\n",
+ irq_name,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ cmd_enc->irq_idx[idx]);
+ cmd_enc->irq_idx[idx] = -EINVAL;
+
+ /* Unregister callback on IRQ enable failure */
+ sde_core_irq_unregister_callback(phys_enc->sde_kms,
+ cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
+ return ret;
+ }
+
+ SDE_DEBUG_CMDENC(cmd_enc, "registered IRQ %s for pp %d, irq_idx %d\n",
+ irq_name,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ cmd_enc->irq_idx[idx]);
+
+ return ret;
+}
+
+static int sde_encoder_phys_cmd_unregister_irq(
+ struct sde_encoder_phys *phys_enc, int idx)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ sde_core_irq_disable(phys_enc->sde_kms, &cmd_enc->irq_idx[idx], 1);
+ sde_core_irq_unregister_callback(phys_enc->sde_kms,
+ cmd_enc->irq_idx[idx], &cmd_enc->irq_cb[idx]);
+
+ SDE_DEBUG_CMDENC(cmd_enc, "unregistered IRQ for pp %d, irq_idx %d\n",
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ cmd_enc->irq_idx[idx]);
+
+ return 0;
+}
+
+static void sde_encoder_phys_cmd_tearcheck_config(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_hw_tear_check tc_cfg = { 0 };
+ struct drm_display_mode *mode = &phys_enc->cached_mode;
+ bool tc_enable = true;
+ u32 vsync_hz;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (!phys_enc->hw_pp->ops.setup_tearcheck ||
+ !phys_enc->hw_pp->ops.enable_tearcheck) {
+ SDE_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+ return;
+ }
+
+ sde_kms = phys_enc->sde_kms;
+ priv = sde_kms->dev->dev_private;
+ /*
+ * TE default: dsi byte clock calculated base on 70 fps;
+ * around 14 ms to complete a kickoff cycle if te disabled;
+ * vclk_line base on 60 fps; write is faster than read;
+ * init == start == rdptr;
+ *
+ * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
+ * frequency divided by the no. of rows (lines) in the LCDpanel.
+ */
+ vsync_hz = sde_power_clk_get_rate(&priv->phandle, "vsync_clk");
+ if (!vsync_hz) {
+ SDE_DEBUG_CMDENC(cmd_enc, "invalid vsync clock rate\n");
+ return;
+ }
+
+ tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+ tc_cfg.hw_vsync_mode = 1;
+
+ /*
+ * By setting sync_cfg_height to near max register value, we essentially
+ * disable sde hw generated TE signal, since hw TE will arrive first.
+ * Only caveat is if due to error, we hit wrap-around.
+ */
+ tc_cfg.sync_cfg_height = 0xFFF0;
+ tc_cfg.vsync_init_val = mode->vdisplay;
+ tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
+ tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
+ tc_cfg.start_pos = mode->vdisplay;
+ tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+
+ SDE_DEBUG_CMDENC(cmd_enc,
+ "tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
+ mode->vtotal, mode->vrefresh);
+ SDE_DEBUG_CMDENC(cmd_enc,
+ "tc %d enable %u start_pos %u rd_ptr_irq %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
+ tc_cfg.rd_ptr_irq);
+ SDE_DEBUG_CMDENC(cmd_enc,
+ "tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
+ tc_cfg.vsync_count, tc_cfg.vsync_init_val);
+ SDE_DEBUG_CMDENC(cmd_enc,
+ "tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
+ tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
+
+ phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
+ phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
+}
+
+static void sde_encoder_phys_cmd_pingpong_config(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc || !phys_enc->hw_ctl ||
+ !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ SDE_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+ return;
+ }
+
+ SDE_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ drm_mode_debug_printmodeline(&phys_enc->cached_mode);
+
+ intf_cfg.intf = cmd_enc->intf_idx;
+ intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_CMD;
+ intf_cfg.stream_sel = cmd_enc->stream_sel;
+ intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+
+ sde_encoder_phys_cmd_tearcheck_config(phys_enc);
+}
+
+static bool sde_encoder_phys_cmd_needs_single_flush(
+ struct sde_encoder_phys *phys_enc)
+{
+ enum sde_rm_topology_name topology;
+
+ if (!phys_enc)
+ return false;
+
+ topology = sde_connector_get_topology_name(phys_enc->connector);
+ return topology == SDE_RM_TOPOLOGY_PPSPLIT;
+}
+
+static int sde_encoder_phys_cmd_control_vblank_irq(
+ struct sde_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ int ret = 0;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ /* Slave encoders don't report vblank */
+ if (!sde_encoder_phys_cmd_is_master(phys_enc))
+ goto end;
+
+ SDE_DEBUG_CMDENC(cmd_enc, "[%pS] enable=%d/%d\n",
+ __builtin_return_address(0),
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0,
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = sde_encoder_phys_cmd_register_irq(phys_enc,
+ SDE_IRQ_TYPE_PING_PONG_RD_PTR,
+ INTR_IDX_RDPTR,
+ sde_encoder_phys_cmd_pp_rd_ptr_irq,
+ "pp_rd_ptr");
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = sde_encoder_phys_cmd_unregister_irq(phys_enc,
+ INTR_IDX_RDPTR);
+
+end:
+ if (ret)
+ SDE_ERROR_CMDENC(cmd_enc,
+ "control vblank irq error %d, enable %d\n",
+ ret, enable);
+
+ return ret;
+}
+
+static void sde_encoder_phys_cmd_enable(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ struct sde_hw_ctl *ctl;
+ u32 flush_mask;
+ int ret;
+
+ if (!phys_enc || !phys_enc->hw_ctl) {
+ SDE_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ return;
+ }
+ SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (phys_enc->enable_state == SDE_ENC_ENABLED) {
+ SDE_ERROR("already enabled\n");
+ return;
+ }
+
+ sde_encoder_helper_split_config(phys_enc, cmd_enc->intf_idx);
+
+ sde_encoder_phys_cmd_pingpong_config(phys_enc);
+
+ if (_sde_encoder_phys_is_ppsplit_slave(phys_enc))
+ goto update_flush;
+
+ /* Both master and slave need to register for pp_tx_done */
+ ret = sde_encoder_phys_cmd_register_irq(phys_enc,
+ SDE_IRQ_TYPE_PING_PONG_COMP,
+ INTR_IDX_PINGPONG,
+ sde_encoder_phys_cmd_pp_tx_done_irq,
+ "pp_tx_done");
+ if (ret)
+ return;
+
+ ret = sde_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+ if (ret) {
+ sde_encoder_phys_cmd_unregister_irq(phys_enc,
+ INTR_IDX_PINGPONG);
+ return;
+ }
+
+ ret = sde_encoder_phys_cmd_register_irq(phys_enc,
+ SDE_IRQ_TYPE_INTF_UNDER_RUN,
+ INTR_IDX_UNDERRUN,
+ sde_encoder_phys_cmd_underrun_irq,
+ "underrun");
+ if (ret) {
+ sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+ sde_encoder_phys_cmd_unregister_irq(phys_enc,
+ INTR_IDX_PINGPONG);
+ return;
+ }
+
+update_flush:
+ ctl = phys_enc->hw_ctl;
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, cmd_enc->intf_idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+ phys_enc->enable_state = SDE_ENC_ENABLED;
+
+ SDE_DEBUG_CMDENC(cmd_enc, "update pending flush ctl %d flush_mask %x\n",
+ ctl->idx - CTL_0, flush_mask);
+}
+
+static void sde_encoder_phys_cmd_disable(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ int ret;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+ SDE_ERROR_CMDENC(cmd_enc, "already disabled\n");
+ return;
+ }
+
+ SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (!_sde_encoder_phys_is_ppsplit_slave(phys_enc)) {
+ ret = _sde_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (ret) {
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ SDE_ERROR_CMDENC(cmd_enc,
+ "pp %d failed wait for idle, %d\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, ret);
+ SDE_EVT32(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0, ret);
+ }
+
+ sde_encoder_phys_cmd_unregister_irq(
+ phys_enc, INTR_IDX_UNDERRUN);
+ sde_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+ sde_encoder_phys_cmd_unregister_irq(
+ phys_enc, INTR_IDX_PINGPONG);
+ }
+
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+
+ if (atomic_read(&phys_enc->vblank_refcount))
+ SDE_ERROR("enc:%d role:%d invalid vblank refcount %d\n",
+ phys_enc->parent->base.id,
+ phys_enc->split_role,
+ atomic_read(&phys_enc->vblank_refcount));
+}
+
+static void sde_encoder_phys_cmd_post_disable(
+ struct sde_encoder_phys *phys_enc)
+{
+ if (!phys_enc || !phys_enc->hw_ctl) {
+ SDE_ERROR("invalid encoder %d\n", phys_enc != NULL);
+ return;
+ }
+
+ if (!_sde_encoder_phys_is_ppsplit_slave(phys_enc) &&
+ phys_enc->hw_ctl->ops.clear_intf_cfg)
+ phys_enc->hw_ctl->ops.clear_intf_cfg(phys_enc->hw_ctl);
+}
+
+static void sde_encoder_phys_cmd_destroy(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ kfree(cmd_enc);
+}
+
+static void sde_encoder_phys_cmd_get_hw_resources(
+ struct sde_encoder_phys *phys_enc,
+ struct sde_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ SDE_DEBUG_CMDENC(cmd_enc, "\n");
+ hw_res->intfs[cmd_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
+}
+
+static int sde_encoder_phys_cmd_wait_for_commit_done(
+ struct sde_encoder_phys *phys_enc)
+{
+ /*
+ * Since ctl_start "commits" the transaction to hardware, and the
+ * tearcheck block takes it from there, there is no need to have a
+ * separate wait for committed, a la wait-for-vsync in video mode
+ */
+
+ return 0;
+}
+
+static void sde_encoder_phys_cmd_prepare_for_kickoff(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_cmd *cmd_enc =
+ to_sde_encoder_phys_cmd(phys_enc);
+ int ret;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ SDE_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+ SDE_EVT32(DRMID(phys_enc->parent), phys_enc->hw_pp->idx - PINGPONG_0);
+
+ /*
+ * Mark kickoff request as outstanding. If there are more than one,
+ * outstanding, then we have to wait for the previous one to complete
+ */
+ ret = _sde_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (ret) {
+ /* force pending_kickoff_cnt 0 to discard failed kickoff */
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ SDE_EVT32(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ SDE_ERROR("failed wait_for_idle: %d\n", ret);
+ }
+}
+
+static void sde_encoder_phys_cmd_init_ops(
+ struct sde_encoder_phys_ops *ops)
+{
+ ops->is_master = sde_encoder_phys_cmd_is_master;
+ ops->mode_set = sde_encoder_phys_cmd_mode_set;
+ ops->mode_fixup = sde_encoder_phys_cmd_mode_fixup;
+ ops->enable = sde_encoder_phys_cmd_enable;
+ ops->disable = sde_encoder_phys_cmd_disable;
+ ops->post_disable = sde_encoder_phys_cmd_post_disable;
+ ops->destroy = sde_encoder_phys_cmd_destroy;
+ ops->get_hw_resources = sde_encoder_phys_cmd_get_hw_resources;
+ ops->control_vblank_irq = sde_encoder_phys_cmd_control_vblank_irq;
+ ops->wait_for_commit_done = sde_encoder_phys_cmd_wait_for_commit_done;
+ ops->prepare_for_kickoff = sde_encoder_phys_cmd_prepare_for_kickoff;
+ ops->trigger_start = sde_encoder_helper_trigger_start;
+ ops->needs_single_flush = sde_encoder_phys_cmd_needs_single_flush;
+}
+
+struct sde_encoder_phys *sde_encoder_phys_cmd_init(
+ struct sde_enc_phys_init_params *p)
+{
+ struct sde_encoder_phys *phys_enc = NULL;
+ struct sde_encoder_phys_cmd *cmd_enc = NULL;
+ struct sde_hw_mdp *hw_mdp;
+ int i, ret = 0;
+
+ SDE_DEBUG("intf %d\n", p->intf_idx - INTF_0);
+
+ cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+ if (!cmd_enc) {
+ ret = -ENOMEM;
+ SDE_ERROR("failed to allocate\n");
+ goto fail;
+ }
+ phys_enc = &cmd_enc->base;
+
+ hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ SDE_ERROR("failed to get mdptop\n");
+ goto fail_mdp_init;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+
+ cmd_enc->intf_idx = p->intf_idx;
+ phys_enc->intf_idx = p->intf_idx;
+
+ sde_encoder_phys_cmd_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->sde_kms = p->sde_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_CMD;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ cmd_enc->stream_sel = 0;
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+ for (i = 0; i < INTR_IDX_MAX; i++)
+ INIT_LIST_HEAD(&cmd_enc->irq_cb[i].list);
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+
+ SDE_DEBUG_CMDENC(cmd_enc, "created\n");
+
+ return phys_enc;
+
+fail_mdp_init:
+ kfree(cmd_enc);
+fail:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_shd.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_shd.c
new file mode 100644
index 000000000000..116a057a3c87
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_shd.c
@@ -0,0 +1,1041 @@
+/*
+ * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm-shd:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <uapi/drm/sde_drm.h>
+
+#include "sde_encoder_phys.h"
+#include "sde_formats.h"
+#include "sde_hw_top.h"
+#include "sde_hw_interrupts.h"
+#include "sde_core_irq.h"
+#include "sde_crtc.h"
+#include "sde_trace.h"
+#include "sde_shd.h"
+#include "sde_plane.h"
+
+#define SHD_DEBUG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
+
+#define SDE_ERROR_PHYS(p, fmt, ...) SDE_ERROR("enc%d intf%d " fmt,\
+ (p) ? (p)->parent->base.id : -1, \
+ (p) ? (p)->intf_idx - INTF_0 : -1, \
+ ##__VA_ARGS__)
+
+#define SDE_DEBUG_PHYS(p, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt,\
+ (p) ? (p)->parent->base.id : -1, \
+ (p) ? (p)->intf_idx - INTF_0 : -1, \
+ ##__VA_ARGS__)
+
+#define CTL_SSPP_FLUSH_MASK 0xCC183F
+#define CTL_MIXER_FLUSH_MASK 0x1007C0
+
+#define CTL_LAYER(lm) \
+ (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT(lm) \
+ (0x40 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT2(lm) \
+ (0x70 + (((lm) - LM_0) * 0x004))
+
+#define CTL_MIXER_BORDER_OUT BIT(24)
+
+#define LM_BLEND0_OP 0x00
+
+static inline struct sde_encoder_phys_shd *to_sde_encoder_phys_shd(
+ struct sde_encoder_phys *phys_enc)
+{
+ return container_of(phys_enc, struct sde_encoder_phys_shd, base);
+}
+
+static DEFINE_SPINLOCK(hw_ctl_lock);
+
+struct sde_shd_ctl_mixer_cfg {
+ u32 mixercfg;
+ u32 mixercfg_ext;
+ u32 mixercfg_ext2;
+
+ u32 mixercfg_mask;
+ u32 mixercfg_ext_mask;
+ u32 mixercfg_ext2_mask;
+};
+
+struct sde_shd_hw_ctl {
+ struct sde_hw_ctl base;
+ struct shd_stage_range range;
+ struct sde_hw_ctl *orig;
+ u32 flush_mask;
+ struct sde_shd_ctl_mixer_cfg mixer_cfg[MAX_BLOCKS];
+ struct sde_encoder_phys_shd *shd_enc;
+};
+
+struct sde_shd_mixer_cfg {
+ uint32_t fg_alpha;
+ uint32_t bg_alpha;
+ uint32_t blend_op;
+ bool dirty;
+};
+
+struct sde_shd_hw_mixer {
+ struct sde_hw_mixer base;
+ struct shd_stage_range range;
+ struct sde_hw_mixer *orig;
+ struct sde_shd_mixer_cfg cfg[SDE_STAGE_MAX];
+};
+
+static bool sde_encoder_phys_shd_is_master(struct sde_encoder_phys *phys_enc)
+{
+ return true;
+}
+
+static void sde_encoder_phys_shd_vblank_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys *phys_enc = arg;
+ struct sde_hw_ctl *hw_ctl;
+ struct sde_shd_hw_ctl *shd_ctl;
+ unsigned long lock_flags;
+ u32 flush_register = ~0;
+ int new_cnt = -1, old_cnt = -1;
+
+ if (!phys_enc)
+ return;
+
+ hw_ctl = phys_enc->hw_ctl;
+ if (!hw_ctl)
+ return;
+
+ if (phys_enc->parent_ops.handle_vblank_virt)
+ phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
+
+ /*
+ * only decrement the pending flush count if we've actually flushed
+ * hardware. due to sw irq latency, vblank may have already happened
+ * so we need to double-check with hw that it accepted the flush bits
+ */
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+
+ if (hw_ctl && hw_ctl->ops.get_flush_register)
+ flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+ shd_ctl = container_of(hw_ctl, struct sde_shd_hw_ctl, base);
+
+ /*
+ * When bootloader's splash is presented, as bootloader is concurrently
+ * flushing hardware pipes, so when checking flush_register, we need
+ * to care if the active bit in the flush_register matches with the
+ * bootloader's splash pipe flush bits.
+ */
+ if ((flush_register & shd_ctl->flush_mask &
+ ~phys_enc->splash_flush_bits) == 0)
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+ -1, 0);
+
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ SDE_EVT32_IRQ(DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
+ old_cnt, new_cnt, flush_register);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
+static int _sde_encoder_phys_shd_register_irq(
+ struct sde_encoder_phys *phys_enc,
+ enum sde_intr_type intr_type, int idx,
+ void (*irq_func)(void *, int), const char *irq_name)
+{
+ struct sde_encoder_phys_shd *shd_enc;
+ int ret = 0;
+
+ shd_enc = to_sde_encoder_phys_shd(phys_enc);
+ shd_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
+ intr_type, phys_enc->intf_idx);
+
+ if (shd_enc->irq_idx[idx] < 0) {
+ SDE_DEBUG_PHYS(phys_enc,
+ "failed to lookup IRQ index for %s type:%d\n", irq_name,
+ intr_type);
+ return -EINVAL;
+ }
+
+ shd_enc->irq_cb[idx].func = irq_func;
+ shd_enc->irq_cb[idx].arg = phys_enc;
+ ret = sde_core_irq_register_callback(phys_enc->sde_kms,
+ shd_enc->irq_idx[idx], &shd_enc->irq_cb[idx]);
+ if (ret) {
+ SDE_ERROR_PHYS(phys_enc,
+ "failed to register IRQ callback for %s\n", irq_name);
+ shd_enc->irq_idx[idx] = -EINVAL;
+ return ret;
+ }
+
+ SDE_DEBUG_PHYS(phys_enc, "registered irq %s idx: %d\n",
+ irq_name, shd_enc->irq_idx[idx]);
+
+ return ret;
+}
+
+static int _sde_encoder_phys_shd_unregister_irq(
+ struct sde_encoder_phys *phys_enc, int idx)
+{
+ struct sde_encoder_phys_shd *shd_enc;
+
+ shd_enc = to_sde_encoder_phys_shd(phys_enc);
+
+ sde_core_irq_unregister_callback(phys_enc->sde_kms,
+ shd_enc->irq_idx[idx], &shd_enc->irq_cb[idx]);
+
+ SDE_DEBUG_PHYS(phys_enc, "unregistered %d\n", shd_enc->irq_idx[idx]);
+
+ return 0;
+}
+
+static void _sde_shd_hw_ctl_clear_blendstages_in_range(
+ struct sde_shd_hw_ctl *hw_ctl, enum sde_lm lm,
+ bool handoff, u32 splash_mask, u32 splash_ext_mask)
+{
+ struct sde_hw_blk_reg_map *c = &hw_ctl->base.hw;
+ u32 mixercfg, mixercfg_ext;
+ u32 mixercfg_ext2;
+ u32 mask = 0, ext_mask = 0, ext2_mask = 0;
+ u32 start = hw_ctl->range.start + SDE_STAGE_0;
+ u32 end = start + hw_ctl->range.size;
+ u32 i;
+
+ mixercfg = SDE_REG_READ(c, CTL_LAYER(lm));
+ mixercfg_ext = SDE_REG_READ(c, CTL_LAYER_EXT(lm));
+ mixercfg_ext2 = SDE_REG_READ(c, CTL_LAYER_EXT2(lm));
+
+ if (!mixercfg && !mixercfg_ext && !mixercfg_ext2)
+ goto end;
+
+ if (handoff) {
+ mask |= splash_mask;
+ ext_mask |= splash_ext_mask;
+ }
+
+ /* SSPP_VIG0 */
+ i = (mixercfg & 0x7) | ((mixercfg_ext & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= 0x7;
+ ext_mask |= 0x1;
+ }
+
+ /* SSPP_VIG1 */
+ i = ((mixercfg >> 3) & 0x7) | (((mixercfg_ext >> 2) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 3);
+ ext_mask |= (0x1 << 2);
+ }
+
+ /* SSPP_VIG2 */
+ i = ((mixercfg >> 6) & 0x7) | (((mixercfg_ext >> 4) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 6);
+ ext_mask |= (0x1 << 4);
+ }
+
+ /* SSPP_RGB0 */
+ i = ((mixercfg >> 9) & 0x7) | (((mixercfg_ext >> 8) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 9);
+ ext_mask |= (0x1 << 8);
+ }
+
+ /* SSPP_RGB1 */
+ i = ((mixercfg >> 12) & 0x7) | (((mixercfg_ext >> 10) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 12);
+ ext_mask |= (0x1 << 10);
+ }
+
+ /* SSPP_RGB2 */
+ i = ((mixercfg >> 15) & 0x7) | (((mixercfg_ext >> 12) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 15);
+ ext_mask |= (0x1 << 12);
+ }
+
+ /* SSPP_DMA0 */
+ i = ((mixercfg >> 18) & 0x7) | (((mixercfg_ext >> 16) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 18);
+ ext_mask |= (0x1 << 16);
+ }
+
+ /* SSPP_DMA1 */
+ i = ((mixercfg >> 21) & 0x7) | (((mixercfg_ext >> 18) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 21);
+ ext_mask |= (0x1 << 18);
+ }
+
+ /* SSPP_VIG3 */
+ i = ((mixercfg >> 26) & 0x7) | (((mixercfg_ext >> 6) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 26);
+ ext_mask |= (0x1 << 6);
+ }
+
+ /* SSPP_RGB3 */
+ i = ((mixercfg >> 29) & 0x7) | (((mixercfg_ext >> 14) & 1) << 3);
+ if (i > start && i <= end) {
+ mask |= (0x7 << 29);
+ ext_mask |= (0x1 << 14);
+ }
+
+ /* SSPP_CURSOR_0 */
+ i = (mixercfg_ext >> 20) & 0xF;
+ if (i > start && i <= end)
+ ext_mask |= (0xF << 20);
+
+ /* SSPP_CURSOR_1 */
+ i = (mixercfg_ext >> 26) & 0xF;
+ if (i > start && i <= end)
+ ext_mask |= (0xF << 26);
+
+ /* SSPP_DMA2 */
+ i = (mixercfg_ext2 >> 0) & 0xF;
+ if (i > start && i <= end)
+ ext2_mask |= (0xF << 0);
+
+ /* SSPP_DMA3 */
+ i = (mixercfg_ext2 >> 4) & 0xF;
+ if (i > start && i <= end)
+ ext2_mask |= (0xF << 4);
+
+end:
+ hw_ctl->mixer_cfg[lm].mixercfg_mask = mask;
+ hw_ctl->mixer_cfg[lm].mixercfg_ext_mask = ext_mask;
+ hw_ctl->mixer_cfg[lm].mixercfg_ext2_mask = ext2_mask;
+}
+
+static void _sde_shd_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx,
+ bool handoff, u32 splash_mask, u32 splash_ext_mask)
+{
+ struct sde_shd_hw_ctl *hw_ctl;
+ int i;
+
+ if (!ctx)
+ return;
+
+ hw_ctl = container_of(ctx, struct sde_shd_hw_ctl, base);
+
+ for (i = 0; i < ctx->mixer_count; i++) {
+ int mixer_id = ctx->mixer_hw_caps[i].id;
+
+ _sde_shd_hw_ctl_clear_blendstages_in_range(hw_ctl, mixer_id,
+ handoff, splash_mask, splash_ext_mask);
+ }
+}
+
+static inline int _stage_offset(struct sde_hw_mixer *ctx, enum sde_stage stage)
+{
+ const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
+ int rc;
+
+ if (stage == SDE_STAGE_BASE)
+ rc = -EINVAL;
+ else if (stage <= sblk->maxblendstages)
+ rc = sblk->blendstage_base[stage - SDE_STAGE_0];
+ else
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static void _sde_shd_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
+ enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg, u32 index,
+ bool handoff, u32 splash_mask, u32 splash_ext_mask)
+{
+ struct sde_shd_hw_ctl *hw_ctl;
+ u32 mixercfg = 0, mixercfg_ext = 0, mix, ext, full, mixercfg_ext2;
+ u32 mask = 0, ext_mask = 0, ext2_mask = 0;
+ int i, j;
+ int stages;
+ int stage_offset = 0;
+ int pipes_per_stage;
+ struct sde_hw_blk_reg_map *c;
+
+ if (!ctx)
+ return;
+
+ hw_ctl = container_of(ctx, struct sde_shd_hw_ctl, base);
+
+ if (test_bit(SDE_MIXER_SOURCESPLIT,
+ &ctx->mixer_hw_caps->features))
+ pipes_per_stage = PIPES_PER_STAGE;
+ else
+ pipes_per_stage = 1;
+
+ _sde_shd_hw_ctl_clear_blendstages_in_range(hw_ctl, lm, handoff,
+ splash_mask, splash_ext_mask);
+
+ if (!stage_cfg)
+ goto exit;
+
+ mixercfg = CTL_MIXER_BORDER_OUT;
+ stage_offset = hw_ctl->range.start;
+ stages = hw_ctl->range.size;
+
+ c = &hw_ctl->base.hw;
+ if (handoff) {
+ mixercfg = SDE_REG_READ(c, CTL_LAYER(lm));
+ mixercfg_ext = SDE_REG_READ(c, CTL_LAYER_EXT(lm));
+
+ mixercfg &= splash_mask;
+ mixercfg_ext &= splash_ext_mask;
+
+ mask |= splash_mask;
+ ext_mask |= splash_ext_mask;
+ mixercfg |= CTL_MIXER_BORDER_OUT;
+ }
+
+ for (i = SDE_STAGE_0; i <= stages; i++) {
+ /* overflow to ext register if 'i + 1 > 7' */
+ mix = (i + stage_offset + 1) & 0x7;
+ ext = (i + stage_offset) >= 7;
+ full = (i + stage_offset + 1) & 0xF;
+
+ for (j = 0 ; j < pipes_per_stage; j++) {
+ switch (stage_cfg->stage[index][i][j]) {
+ case SSPP_VIG0:
+ mixercfg |= mix << 0;
+ mixercfg_ext |= ext << 0;
+ mask |= 0x7 << 0;
+ ext_mask |= 0x1 << 0;
+ break;
+ case SSPP_VIG1:
+ mixercfg |= mix << 3;
+ mixercfg_ext |= ext << 2;
+ mask |= 0x7 << 3;
+ ext_mask |= 0x1 << 2;
+ break;
+ case SSPP_VIG2:
+ mixercfg |= mix << 6;
+ mixercfg_ext |= ext << 4;
+ mask |= 0x7 << 6;
+ ext_mask |= 0x1 << 4;
+ break;
+ case SSPP_VIG3:
+ mixercfg |= mix << 26;
+ mixercfg_ext |= ext << 6;
+ mask |= 0x7 << 26;
+ ext_mask |= 0x1 << 6;
+ break;
+ case SSPP_RGB0:
+ mixercfg |= mix << 9;
+ mixercfg_ext |= ext << 8;
+ mask |= 0x7 << 9;
+ ext_mask |= 0x1 << 8;
+ break;
+ case SSPP_RGB1:
+ mixercfg |= mix << 12;
+ mixercfg_ext |= ext << 10;
+ mask |= 0x7 << 12;
+ ext_mask |= 0x1 << 10;
+ break;
+ case SSPP_RGB2:
+ mixercfg |= mix << 15;
+ mixercfg_ext |= ext << 12;
+ mask |= 0x7 << 15;
+ ext_mask |= 0x1 << 12;
+ break;
+ case SSPP_RGB3:
+ mixercfg |= mix << 29;
+ mixercfg_ext |= ext << 14;
+ mask |= 0x7 << 29;
+ ext_mask |= 0x1 << 14;
+ break;
+ case SSPP_DMA0:
+ mixercfg |= mix << 18;
+ mixercfg_ext |= ext << 16;
+ mask |= 0x7 << 18;
+ ext_mask |= 0x1 << 16;
+ break;
+ case SSPP_DMA1:
+ mixercfg |= mix << 21;
+ mixercfg_ext |= ext << 18;
+ mask |= 0x7 << 21;
+ ext_mask |= 0x1 << 18;
+ break;
+ case SSPP_DMA2:
+ mix |= full;
+ mixercfg_ext2 |= mix << 0;
+ ext2_mask |= 0xF << 0;
+ break;
+ case SSPP_DMA3:
+ mix |= full;
+ mixercfg_ext2 |= mix << 4;
+ ext2_mask |= 0xF << 4;
+ break;
+ case SSPP_CURSOR0:
+ mixercfg_ext |= full << 20;
+ ext_mask |= 0xF << 20;
+ break;
+ case SSPP_CURSOR1:
+ mixercfg_ext |= full << 26;
+ ext_mask |= 0xF << 26;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ hw_ctl->mixer_cfg[lm].mixercfg_mask |= mask;
+ hw_ctl->mixer_cfg[lm].mixercfg_ext_mask |= ext_mask;
+ hw_ctl->mixer_cfg[lm].mixercfg_ext2_mask |= ext2_mask;
+exit:
+ hw_ctl->mixer_cfg[lm].mixercfg = mixercfg;
+ hw_ctl->mixer_cfg[lm].mixercfg_ext = mixercfg_ext;
+ hw_ctl->mixer_cfg[lm].mixercfg_ext2 = mixercfg_ext2;
+}
+
+static void _sde_shd_hw_ctl_trigger_flush(struct sde_hw_ctl *ctx)
+{
+ struct sde_shd_hw_ctl *hw_ctl;
+ struct sde_hw_blk_reg_map *c;
+ u32 mixercfg, mixercfg_ext;
+ u32 mixercfg_ext2;
+ int i;
+
+ hw_ctl = container_of(ctx, struct sde_shd_hw_ctl, base);
+
+ hw_ctl->flush_mask = ctx->pending_flush_mask;
+
+ hw_ctl->flush_mask &= CTL_SSPP_FLUSH_MASK;
+
+ c = &ctx->hw;
+
+ for (i = 0; i < ctx->mixer_count; i++) {
+ int lm = ctx->mixer_hw_caps[i].id;
+
+ mixercfg = SDE_REG_READ(c, CTL_LAYER(lm));
+ mixercfg_ext = SDE_REG_READ(c, CTL_LAYER_EXT(lm));
+ mixercfg_ext2 = SDE_REG_READ(c, CTL_LAYER_EXT2(lm));
+
+ mixercfg &= ~hw_ctl->mixer_cfg[lm].mixercfg_mask;
+ mixercfg_ext &= ~hw_ctl->mixer_cfg[lm].mixercfg_ext_mask;
+ mixercfg_ext2 &= ~hw_ctl->mixer_cfg[lm].mixercfg_ext2_mask;
+
+ mixercfg |= hw_ctl->mixer_cfg[lm].mixercfg;
+ mixercfg_ext |= hw_ctl->mixer_cfg[lm].mixercfg_ext;
+ mixercfg_ext2 |= hw_ctl->mixer_cfg[lm].mixercfg_ext2;
+
+ SDE_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+ }
+}
+
+static void _sde_shd_setup_blend_config(struct sde_hw_mixer *ctx,
+ uint32_t stage,
+ uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op)
+{
+ struct sde_shd_hw_mixer *hw_lm;
+ struct sde_shd_mixer_cfg *cfg;
+
+ if (!ctx)
+ return;
+
+ hw_lm = container_of(ctx, struct sde_shd_hw_mixer, base);
+
+ cfg = &hw_lm->cfg[stage + hw_lm->range.start];
+
+ cfg->fg_alpha = fg_alpha;
+ cfg->bg_alpha = bg_alpha;
+ cfg->blend_op = blend_op;
+ cfg->dirty = true;
+}
+
+static void _sde_shd_setup_mixer_out(struct sde_hw_mixer *ctx,
+ struct sde_hw_mixer_cfg *cfg)
+{
+ /* do nothing */
+}
+
+static void _sde_shd_flush_hw_lm(struct sde_hw_mixer *ctx)
+{
+ struct sde_shd_hw_mixer *hw_lm;
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off, i;
+ u32 reset = BIT(16), val;
+ int start, end;
+
+ if (!ctx)
+ return;
+
+ hw_lm = container_of(ctx, struct sde_shd_hw_mixer, base);
+
+ start = SDE_STAGE_0 + hw_lm->range.start;
+ end = start + hw_lm->range.size;
+ reset = ~reset;
+ for (i = start; i < end; i++) {
+ stage_off = _stage_offset(ctx, i);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ val = SDE_REG_READ(c, LM_BLEND0_OP + stage_off);
+ val &= reset;
+ SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, val);
+
+ if (hw_lm->cfg[i].dirty) {
+ hw_lm->orig->ops.setup_blend_config(ctx, i,
+ hw_lm->cfg[i].fg_alpha,
+ hw_lm->cfg[i].bg_alpha,
+ hw_lm->cfg[i].blend_op);
+ hw_lm->cfg[i].dirty = false;
+ }
+ }
+}
+
+static void _sde_shd_trigger_flush(struct sde_hw_ctl *ctx)
+{
+ struct sde_shd_hw_ctl *hw_ctl;
+ struct sde_encoder_phys_shd *shd_enc;
+ struct sde_encoder_phys *phys;
+ struct sde_hw_blk_reg_map *c;
+ unsigned long lock_flags;
+ int i;
+
+ hw_ctl = container_of(ctx, struct sde_shd_hw_ctl, base);
+ shd_enc = hw_ctl->shd_enc;
+
+ c = &ctx->hw;
+
+ spin_lock_irqsave(&hw_ctl_lock, lock_flags);
+
+ phys = &shd_enc->base;
+ phys->splash_flush_bits = phys->sde_kms->splash_info.flush_bits;
+
+ _sde_shd_hw_ctl_trigger_flush(ctx);
+
+ for (i = 0; i < shd_enc->num_mixers; i++)
+ _sde_shd_flush_hw_lm(shd_enc->hw_lm[i]);
+
+ hw_ctl->orig->ops.trigger_flush(ctx);
+
+ spin_unlock_irqrestore(&hw_ctl_lock, lock_flags);
+}
+
+static void _sde_encoder_phys_shd_rm_reserve(
+ struct sde_encoder_phys *phys_enc,
+ struct shd_display *display)
+{
+ struct sde_encoder_phys_shd *shd_enc;
+ struct sde_rm *rm;
+ struct sde_rm_hw_iter ctl_iter, lm_iter;
+ struct drm_encoder *encoder;
+ struct sde_shd_hw_ctl *hw_ctl;
+ struct sde_shd_hw_mixer *hw_lm;
+ int i;
+
+ encoder = display->base->encoder;
+ rm = &phys_enc->sde_kms->rm;
+ shd_enc = to_sde_encoder_phys_shd(phys_enc);
+
+ sde_rm_init_hw_iter(&ctl_iter, encoder->base.id, SDE_HW_BLK_CTL);
+ sde_rm_init_hw_iter(&lm_iter, encoder->base.id, SDE_HW_BLK_LM);
+
+ shd_enc->num_mixers = 0;
+ shd_enc->num_ctls = 0;
+
+ for (i = 0; i < CRTC_DUAL_MIXERS; i++) {
+ /* reserve layer mixer */
+ if (!sde_rm_get_hw(rm, &lm_iter))
+ break;
+ hw_lm = container_of(shd_enc->hw_lm[i],
+ struct sde_shd_hw_mixer, base);
+ hw_lm->base = *(struct sde_hw_mixer *)lm_iter.hw;
+ hw_lm->range = display->stage_range;
+ hw_lm->orig = lm_iter.hw;
+ hw_lm->base.ops.setup_blend_config =
+ _sde_shd_setup_blend_config;
+ hw_lm->base.ops.setup_mixer_out =
+ _sde_shd_setup_mixer_out;
+
+ SHD_DEBUG("reserve LM%d %pK from enc %d to %d\n",
+ hw_lm->base.idx, hw_lm,
+ DRMID(encoder),
+ DRMID(phys_enc->parent));
+
+ sde_rm_ext_blk_create_reserve(rm,
+ SDE_HW_BLK_LM, 0,
+ &hw_lm->base, phys_enc->parent);
+ shd_enc->num_mixers++;
+
+ /* reserve ctl */
+ if (!sde_rm_get_hw(rm, &ctl_iter))
+ break;
+ hw_ctl = container_of(shd_enc->hw_ctl[i],
+ struct sde_shd_hw_ctl, base);
+ hw_ctl->base = *(struct sde_hw_ctl *)ctl_iter.hw;
+ hw_ctl->shd_enc = shd_enc;
+ hw_ctl->range = display->stage_range;
+ hw_ctl->orig = ctl_iter.hw;
+ hw_ctl->base.ops.clear_all_blendstages =
+ _sde_shd_hw_ctl_clear_all_blendstages;
+ hw_ctl->base.ops.setup_blendstage =
+ _sde_shd_hw_ctl_setup_blendstage;
+ hw_ctl->base.ops.trigger_flush =
+ _sde_shd_trigger_flush;
+
+ SHD_DEBUG("reserve CTL%d %pK from enc %d to %d\n",
+ hw_ctl->base.idx, hw_ctl,
+ DRMID(encoder),
+ DRMID(phys_enc->parent));
+
+ sde_rm_ext_blk_create_reserve(rm,
+ SDE_HW_BLK_CTL, 0,
+ &hw_ctl->base, phys_enc->parent);
+ shd_enc->num_ctls++;
+ }
+}
+
+static void _sde_encoder_phys_shd_rm_release(
+ struct sde_encoder_phys *phys_enc,
+ struct shd_display *display)
+{
+ struct sde_rm *rm;
+
+ rm = &phys_enc->sde_kms->rm;
+
+ sde_rm_ext_blk_destroy(rm, phys_enc->parent);
+}
+
+static void sde_encoder_phys_shd_mode_set(
+ struct sde_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct drm_connector *connector;
+ struct sde_connector *sde_conn;
+ struct shd_display *display;
+ struct drm_encoder *encoder;
+ struct sde_rm_hw_iter iter;
+ struct sde_rm *rm;
+
+ SHD_DEBUG("%d\n", phys_enc->parent->base.id);
+
+ phys_enc->cached_mode = *adj_mode;
+
+ connector = phys_enc->connector;
+ if (!connector || connector->encoder != phys_enc->parent) {
+ SDE_ERROR("failed to find connector\n");
+ return;
+ }
+
+ sde_conn = to_sde_connector(connector);
+ display = sde_conn->display;
+ encoder = display->base->encoder;
+
+ _sde_encoder_phys_shd_rm_reserve(phys_enc, display);
+
+ rm = &phys_enc->sde_kms->rm;
+
+ sde_rm_init_hw_iter(&iter, DRMID(phys_enc->parent), SDE_HW_BLK_CTL);
+ if (sde_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct sde_hw_ctl *)iter.hw;
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ SHD_DEBUG("failed to init ctl, %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+}
+
+static int _sde_encoder_phys_shd_wait_for_vblank(
+ struct sde_encoder_phys *phys_enc, bool notify)
+{
+ struct sde_encoder_phys_shd *shd_enc;
+ u32 irq_status;
+ int ret = 0;
+
+ if (!phys_enc) {
+ pr_err("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ if (phys_enc->enable_state != SDE_ENC_ENABLED) {
+ SDE_ERROR("encoder not enabled\n");
+ return -EWOULDBLOCK;
+ }
+
+ shd_enc = to_sde_encoder_phys_shd(phys_enc);
+
+ /* Wait for kickoff to complete */
+ ret = sde_encoder_helper_wait_event_timeout(
+ DRMID(phys_enc->parent),
+ phys_enc->intf_idx - INTF_0,
+ &phys_enc->pending_kickoff_wq,
+ &phys_enc->pending_kickoff_cnt,
+ KICKOFF_TIMEOUT_MS);
+
+ if (ret <= 0) {
+ irq_status = sde_core_irq_read(phys_enc->sde_kms,
+ INTR_IDX_VSYNC, true);
+ if (irq_status) {
+ SDE_EVT32(DRMID(phys_enc->parent),
+ phys_enc->intf_idx - INTF_0);
+ SDE_DEBUG_PHYS(phys_enc, "done, irq not triggered\n");
+ if (notify && phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_DONE);
+ sde_encoder_phys_shd_vblank_irq(phys_enc,
+ INTR_IDX_VSYNC);
+ ret = 0;
+ } else {
+ SDE_EVT32(DRMID(phys_enc->parent),
+ phys_enc->intf_idx - INTF_0);
+ SDE_ERROR_PHYS(phys_enc, "kickoff timed out\n");
+ if (notify && phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_ERROR);
+ ret = -ETIMEDOUT;
+ }
+ } else {
+ if (notify && phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_DONE);
+ ret = 0;
+ }
+
+ return ret;
+}
+
+static int sde_encoder_phys_shd_wait_for_vblank(
+ struct sde_encoder_phys *phys_enc)
+{
+ return _sde_encoder_phys_shd_wait_for_vblank(phys_enc, true);
+}
+
+void sde_encoder_phys_shd_handle_post_kickoff(
+ struct sde_encoder_phys *phys_enc)
+{
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ if (phys_enc->enable_state == SDE_ENC_ENABLING) {
+ SDE_EVT32(DRMID(phys_enc->parent));
+ phys_enc->enable_state = SDE_ENC_ENABLED;
+ }
+}
+
+static int sde_encoder_phys_shd_control_vblank_irq(
+ struct sde_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ struct sde_encoder_phys_shd *shd_enc;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ shd_enc = to_sde_encoder_phys_shd(phys_enc);
+
+ SHD_DEBUG("[%pS] %d enable=%d/%d\n",
+ __builtin_return_address(0), DRMID(phys_enc->parent),
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ SDE_EVT32(DRMID(phys_enc->parent), enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = _sde_encoder_phys_shd_register_irq(phys_enc,
+ SDE_IRQ_TYPE_INTF_VSYNC,
+ INTR_IDX_VSYNC,
+ sde_encoder_phys_shd_vblank_irq, "vsync_irq");
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = _sde_encoder_phys_shd_unregister_irq(phys_enc,
+ INTR_IDX_VSYNC);
+
+ if (ret)
+ SHD_DEBUG("control vblank irq error %d, enable %d\n",
+ ret, enable);
+
+ return ret;
+}
+
+static void sde_encoder_phys_shd_enable(struct sde_encoder_phys *phys_enc)
+{
+ struct drm_connector *connector;
+
+ SHD_DEBUG("%d\n", phys_enc->parent->base.id);
+
+ if (!phys_enc->parent || !phys_enc->parent->dev) {
+ SDE_ERROR("invalid drm device\n");
+ return;
+ }
+
+ connector = phys_enc->connector;
+ if (!connector || connector->encoder != phys_enc->parent) {
+ SDE_ERROR("failed to find connector\n");
+ return;
+ }
+
+ sde_encoder_phys_shd_control_vblank_irq(phys_enc, true);
+
+ if (phys_enc->enable_state == SDE_ENC_DISABLED)
+ phys_enc->enable_state = SDE_ENC_ENABLING;
+}
+
+static void sde_encoder_phys_shd_disable(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_connector *sde_conn;
+ struct shd_display *display;
+ bool splash_enabled = false;
+ u32 mixer_mask = 0, mixer_ext_mask = 0;
+
+ SHD_DEBUG("%d\n", phys_enc->parent->base.id);
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ SDE_ERROR("invalid encoder/device\n");
+ return;
+ }
+
+ if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+ SDE_ERROR("already disabled\n");
+ return;
+ }
+
+ sde_splash_get_mixer_mask(&phys_enc->sde_kms->splash_info,
+ &splash_enabled, &mixer_mask, &mixer_ext_mask);
+
+ _sde_shd_hw_ctl_clear_all_blendstages(phys_enc->hw_ctl,
+ splash_enabled, mixer_mask, mixer_ext_mask);
+
+ _sde_shd_trigger_flush(phys_enc->hw_ctl);
+
+ _sde_encoder_phys_shd_wait_for_vblank(phys_enc, false);
+
+ sde_encoder_phys_shd_control_vblank_irq(phys_enc, false);
+
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+
+ if (!phys_enc->connector)
+ return;
+
+ sde_conn = to_sde_connector(phys_enc->connector);
+ display = sde_conn->display;
+
+ _sde_encoder_phys_shd_rm_release(phys_enc, display);
+}
+
+static void sde_encoder_phys_shd_destroy(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_shd *shd_enc =
+ to_sde_encoder_phys_shd(phys_enc);
+
+ if (!phys_enc)
+ return;
+
+ kfree(shd_enc);
+}
+
+/**
+ * sde_encoder_phys_shd_init_ops - initialize writeback operations
+ * @ops: Pointer to encoder operation table
+ */
+static void sde_encoder_phys_shd_init_ops(struct sde_encoder_phys_ops *ops)
+{
+ ops->is_master = sde_encoder_phys_shd_is_master;
+ ops->mode_set = sde_encoder_phys_shd_mode_set;
+ ops->enable = sde_encoder_phys_shd_enable;
+ ops->disable = sde_encoder_phys_shd_disable;
+ ops->destroy = sde_encoder_phys_shd_destroy;
+ ops->control_vblank_irq = sde_encoder_phys_shd_control_vblank_irq;
+ ops->wait_for_commit_done = sde_encoder_phys_shd_wait_for_vblank;
+ ops->handle_post_kickoff = sde_encoder_phys_shd_handle_post_kickoff;
+}
+
+struct sde_encoder_phys *sde_encoder_phys_shd_init(
+ struct sde_enc_phys_init_params *p)
+{
+ struct sde_encoder_phys *phys_enc;
+ struct sde_encoder_phys_shd *shd_enc;
+ struct sde_shd_hw_ctl *hw_ctl;
+ struct sde_shd_hw_mixer *hw_lm;
+ int ret = 0, i;
+
+ SHD_DEBUG("\n");
+
+ if (!p || !p->parent) {
+ SDE_ERROR("invalid params\n");
+ ret = -EINVAL;
+ goto fail_alloc;
+ }
+
+ shd_enc = kzalloc(sizeof(*shd_enc), GFP_KERNEL);
+ if (!shd_enc) {
+ ret = -ENOMEM;
+ goto fail_alloc;
+ }
+
+ for (i = 0; i < CRTC_DUAL_MIXERS; i++) {
+ hw_ctl = kzalloc(sizeof(*hw_ctl), GFP_KERNEL);
+ if (!hw_ctl) {
+ ret = -ENOMEM;
+ goto fail_ctl;
+ }
+ shd_enc->hw_ctl[i] = &hw_ctl->base;
+
+ hw_lm = kzalloc(sizeof(*hw_lm), GFP_KERNEL);
+ if (!hw_lm) {
+ ret = -ENOMEM;
+ goto fail_ctl;
+ }
+ shd_enc->hw_lm[i] = &hw_lm->base;
+ }
+
+ phys_enc = &shd_enc->base;
+
+ sde_encoder_phys_shd_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->sde_kms = p->sde_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_NONE;
+ phys_enc->intf_idx = p->intf_idx;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ for (i = 0; i < INTR_IDX_MAX; i++)
+ INIT_LIST_HEAD(&shd_enc->irq_cb[i].list);
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ phys_enc->splash_flush_bits = 0;
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+
+ return phys_enc;
+
+fail_ctl:
+ for (i = 0; i < CRTC_DUAL_MIXERS; i++) {
+ kfree(shd_enc->hw_ctl[i]);
+ kfree(shd_enc->hw_lm[i]);
+ }
+ kfree(shd_enc);
+fail_alloc:
+ return ERR_PTR(ret);
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
new file mode 100644
index 000000000000..b1e09d336c63
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_vid.c
@@ -0,0 +1,1006 @@
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "sde_recovery_manager.h"
+#include "sde_encoder_phys.h"
+#include "sde_hw_interrupts.h"
+#include "sde_core_irq.h"
+#include "sde_formats.h"
+
+#define SDE_DEBUG_VIDENC(e, fmt, ...) SDE_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_VIDENC(e, fmt, ...) SDE_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_sde_encoder_phys_vid(x) \
+ container_of(x, struct sde_encoder_phys_vid, base)
+
+static bool sde_encoder_phys_vid_is_master(
+ struct sde_encoder_phys *phys_enc)
+{
+ bool ret = false;
+
+ if (phys_enc->split_role != ENC_ROLE_SLAVE)
+ ret = true;
+
+ return ret;
+}
+
+static void drm_mode_to_intf_timing_params(
+ const struct sde_encoder_phys_vid *vid_enc,
+ const struct drm_display_mode *mode,
+ struct intf_timing_params *timing)
+{
+ memset(timing, 0, sizeof(*timing));
+ /*
+ * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
+ * Active Region Front Porch Sync Back Porch
+ * <-----------------><------------><-----><----------->
+ * <- [hv]display --->
+ * <--------- [hv]sync_start ------>
+ * <----------------- [hv]sync_end ------->
+ * <---------------------------- [hv]total ------------->
+ */
+ timing->width = mode->hdisplay; /* active width */
+ timing->height = mode->vdisplay; /* active height */
+ timing->xres = timing->width;
+ timing->yres = timing->height;
+ timing->h_back_porch = mode->htotal - mode->hsync_end;
+ timing->h_front_porch = mode->hsync_start - mode->hdisplay;
+ timing->v_back_porch = mode->vtotal - mode->vsync_end;
+ timing->v_front_porch = mode->vsync_start - mode->vdisplay;
+ timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
+ timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
+ timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+ timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+ timing->border_clr = 0;
+ timing->underflow_clr = 0xff;
+ timing->hsync_skew = mode->hskew;
+
+ /*
+ * For edp only:
+ * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+ * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+ */
+ /*
+ * if (vid_enc->hw->cap->type == INTF_EDP) {
+ * display_v_start += mode->htotal - mode->hsync_start;
+ * display_v_end -= mode->hsync_start - mode->hdisplay;
+ * }
+ */
+}
+
+static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->xres;
+ u32 inactive =
+ timing->h_back_porch + timing->h_front_porch +
+ timing->hsync_pulse_width;
+ return active + inactive;
+}
+
+static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->yres;
+ u32 inactive =
+ timing->v_back_porch + timing->v_front_porch +
+ timing->vsync_pulse_width;
+ return active + inactive;
+}
+
+/*
+ * programmable_fetch_get_num_lines:
+ * Number of fetch lines in vertical front porch
+ * @timing: Pointer to the intf timing information for the requested mode
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * Number of needed prefetch lines is anything that cannot be absorbed in the
+ * start of frame time (back porch + vsync pulse width).
+ *
+ * Some panels have very large VFP, however we only need a total number of
+ * lines based on the chip worst case latencies.
+ */
+static u32 programmable_fetch_get_num_lines(
+ struct sde_encoder_phys_vid *vid_enc,
+ const struct intf_timing_params *timing)
+{
+ u32 worst_case_needed_lines =
+ vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+ u32 start_of_frame_lines =
+ timing->v_back_porch + timing->vsync_pulse_width;
+ u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
+ u32 actual_vfp_lines = 0;
+
+ /* Fetch must be outside active lines, otherwise undefined. */
+ if (start_of_frame_lines >= worst_case_needed_lines) {
+ SDE_DEBUG_VIDENC(vid_enc,
+ "prog fetch is not needed, large vbp+vsw\n");
+ actual_vfp_lines = 0;
+ } else if (timing->v_front_porch < needed_vfp_lines) {
+ /* Warn fetch needed, but not enough porch in panel config */
+ pr_warn_once
+ ("low vbp+vfp may lead to perf issues in some cases\n");
+ SDE_DEBUG_VIDENC(vid_enc,
+ "less vfp than fetch req, using entire vfp\n");
+ actual_vfp_lines = timing->v_front_porch;
+ } else {
+ SDE_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+ actual_vfp_lines = needed_vfp_lines;
+ }
+
+ SDE_DEBUG_VIDENC(vid_enc,
+ "v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
+ timing->v_front_porch, timing->v_back_porch,
+ timing->vsync_pulse_width);
+ SDE_DEBUG_VIDENC(vid_enc,
+ "wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
+ worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+
+ return actual_vfp_lines;
+}
+
+/*
+ * programmable_fetch_config: Programs HW to prefetch lines by offsetting
+ * the start of fetch into the vertical front porch for cases where the
+ * vsync pulse width and vertical back porch time is insufficient
+ *
+ * Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ * HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ *
+ * @timing: Pointer to the intf timing information for the requested mode
+ */
+static void programmable_fetch_config(struct sde_encoder_phys *phys_enc,
+ const struct intf_timing_params *timing)
+{
+ struct sde_encoder_phys_vid *vid_enc =
+ to_sde_encoder_phys_vid(phys_enc);
+ struct intf_prog_fetch f = { 0 };
+ u32 vfp_fetch_lines = 0;
+ u32 horiz_total = 0;
+ u32 vert_total = 0;
+ u32 vfp_fetch_start_vsync_counter = 0;
+ unsigned long lock_flags;
+
+ if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+ return;
+
+ vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+ if (vfp_fetch_lines) {
+ vert_total = get_vertical_total(timing);
+ horiz_total = get_horizontal_total(timing);
+ vfp_fetch_start_vsync_counter =
+ (vert_total - vfp_fetch_lines) * horiz_total + 1;
+ f.enable = 1;
+ f.fetch_start = vfp_fetch_start_vsync_counter;
+ }
+
+ SDE_DEBUG_VIDENC(vid_enc,
+ "vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
+ vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
+static bool sde_encoder_phys_vid_mode_fixup(
+ struct sde_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ if (phys_enc)
+ SDE_DEBUG_VIDENC(to_sde_encoder_phys_vid(phys_enc), "\n");
+
+ /*
+ * Modifying mode has consequences when the mode comes back to us
+ */
+ return true;
+}
+
+static void sde_encoder_phys_vid_setup_timing_engine(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_vid *vid_enc;
+ struct drm_display_mode mode;
+ struct intf_timing_params timing_params = { 0 };
+ const struct sde_format *fmt = NULL;
+ u32 fmt_fourcc = DRM_FORMAT_RGB888;
+ unsigned long lock_flags;
+ struct sde_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc || !phys_enc->hw_ctl ||
+ !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ SDE_ERROR("invalid encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ mode = phys_enc->cached_mode;
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+ SDE_ERROR("timing engine setup is not supported\n");
+ return;
+ }
+
+ SDE_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+ drm_mode_debug_printmodeline(&mode);
+
+ if (phys_enc->split_role != ENC_ROLE_SOLO ||
+ (mode.private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)) {
+ mode.hdisplay >>= 1;
+ mode.htotal >>= 1;
+ mode.hsync_start >>= 1;
+ mode.hsync_end >>= 1;
+ mode.hskew >>= 1;
+
+ SDE_DEBUG_VIDENC(vid_enc,
+ "split_role %d, halve horizontal %d %d %d %d %d\n",
+ phys_enc->split_role,
+ mode.hdisplay, mode.htotal,
+ mode.hsync_start, mode.hsync_end,
+ mode.hskew);
+ }
+
+ drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+
+ fmt = sde_get_sde_format(fmt_fourcc);
+ SDE_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+
+ intf_cfg.intf = vid_enc->hw_intf->idx;
+ intf_cfg.intf_mode_sel = SDE_CTL_MODE_SEL_VID;
+ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+ intf_cfg.mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+ &timing_params, fmt);
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ programmable_fetch_config(phys_enc, &timing_params);
+}
+
+static void sde_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys_vid *vid_enc = arg;
+ struct sde_encoder_phys *phys_enc;
+ struct sde_hw_ctl *hw_ctl;
+ unsigned long lock_flags;
+ u32 flush_register = 0;
+ int new_cnt = -1, old_cnt = -1;
+
+ if (!vid_enc)
+ return;
+
+ phys_enc = &vid_enc->base;
+ hw_ctl = phys_enc->hw_ctl;
+
+ if (phys_enc->parent_ops.handle_vblank_virt)
+ phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
+
+ /*
+ * only decrement the pending flush count if we've actually flushed
+ * hardware. due to sw irq latency, vblank may have already happened
+ * so we need to double-check with hw that it accepted the flush bits
+ */
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ if (hw_ctl && hw_ctl->ops.get_flush_register)
+ flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+ /*
+ * When bootloader's splash is presented, as bootloader is concurrently
+ * flushing hardware pipes, so when checking flush_register, we need
+ * to care if the active bit in the flush_register matches with the
+ * bootloader's splash pipe flush bits.
+ */
+ if ((flush_register & ~phys_enc->splash_flush_bits) == 0)
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+ -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ SDE_EVT32_IRQ(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
+ old_cnt, new_cnt, flush_register);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+}
+
+static void sde_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys_vid *vid_enc = arg;
+ struct sde_encoder_phys *phys_enc;
+
+ if (!vid_enc)
+ return;
+
+ phys_enc = &vid_enc->base;
+ if (phys_enc->parent_ops.handle_underrun_virt)
+ phys_enc->parent_ops.handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static bool _sde_encoder_phys_is_ppsplit(struct sde_encoder_phys *phys_enc)
+{
+ enum sde_rm_topology_name topology;
+
+ if (!phys_enc)
+ return false;
+
+ topology = sde_connector_get_topology_name(phys_enc->connector);
+ if (topology == SDE_RM_TOPOLOGY_PPSPLIT)
+ return true;
+
+ return false;
+}
+
+static bool sde_encoder_phys_vid_needs_single_flush(
+ struct sde_encoder_phys *phys_enc)
+{
+ return phys_enc && _sde_encoder_phys_is_ppsplit(phys_enc);
+}
+
+static int sde_encoder_phys_vid_register_irq(struct sde_encoder_phys *phys_enc,
+ enum sde_intr_type intr_type, int idx,
+ void (*irq_func)(void *, int), const char *irq_name)
+{
+ struct sde_encoder_phys_vid *vid_enc;
+ int ret = 0;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ vid_enc->irq_idx[idx] = sde_core_irq_idx_lookup(phys_enc->sde_kms,
+ intr_type, vid_enc->hw_intf->idx);
+ if (vid_enc->irq_idx[idx] < 0) {
+ SDE_ERROR_VIDENC(vid_enc,
+ "failed to lookup IRQ index for %s type:%d\n", irq_name,
+ intr_type);
+ return -EINVAL;
+ }
+
+ vid_enc->irq_cb[idx].func = irq_func;
+ vid_enc->irq_cb[idx].arg = vid_enc;
+ ret = sde_core_irq_register_callback(phys_enc->sde_kms,
+ vid_enc->irq_idx[idx], &vid_enc->irq_cb[idx]);
+ if (ret) {
+ SDE_ERROR_VIDENC(vid_enc,
+ "failed to register IRQ callback for %s\n", irq_name);
+ return ret;
+ }
+
+ ret = sde_core_irq_enable(phys_enc->sde_kms, &vid_enc->irq_idx[idx], 1);
+ if (ret) {
+ SDE_ERROR_VIDENC(vid_enc,
+ "enable IRQ for intr:%s failed, irq_idx %d\n",
+ irq_name, vid_enc->irq_idx[idx]);
+ vid_enc->irq_idx[idx] = -EINVAL;
+
+ /* unregister callback on IRQ enable failure */
+ sde_core_irq_unregister_callback(phys_enc->sde_kms,
+ vid_enc->irq_idx[idx], &vid_enc->irq_cb[idx]);
+ return ret;
+ }
+
+ SDE_DEBUG_VIDENC(vid_enc, "registered irq %s idx: %d\n",
+ irq_name, vid_enc->irq_idx[idx]);
+
+ return ret;
+}
+
+static int sde_encoder_phys_vid_unregister_irq(
+ struct sde_encoder_phys *phys_enc, int idx)
+{
+ struct sde_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ goto end;
+ }
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ sde_core_irq_disable(phys_enc->sde_kms, &vid_enc->irq_idx[idx], 1);
+
+ sde_core_irq_unregister_callback(phys_enc->sde_kms,
+ vid_enc->irq_idx[idx], &vid_enc->irq_cb[idx]);
+
+ SDE_DEBUG_VIDENC(vid_enc, "unregistered %d\n", vid_enc->irq_idx[idx]);
+
+end:
+ return 0;
+}
+
+static void sde_encoder_phys_vid_mode_set(
+ struct sde_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct sde_rm *rm;
+ struct sde_rm_hw_iter iter;
+ int i, instance;
+ struct sde_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc || !phys_enc->sde_kms) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ phys_enc->hw_ctl = NULL;
+ phys_enc->hw_cdm = NULL;
+
+ rm = &phys_enc->sde_kms->rm;
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ phys_enc->cached_mode = *adj_mode;
+ SDE_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+ drm_mode_debug_printmodeline(adj_mode);
+
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ /* Retrieve previously allocated HW Resources. Shouldn't fail */
+ sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ if (sde_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct sde_hw_ctl *)iter.hw;
+ }
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ SDE_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+
+ /* CDM is optional */
+ sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CDM);
+ for (i = 0; i <= instance; i++) {
+ sde_rm_get_hw(rm, &iter);
+ if (i == instance)
+ phys_enc->hw_cdm = (struct sde_hw_cdm *) iter.hw;
+ }
+
+ if (IS_ERR(phys_enc->hw_cdm)) {
+ SDE_ERROR("CDM required but not allocated: %ld\n",
+ PTR_ERR(phys_enc->hw_cdm));
+ phys_enc->hw_cdm = NULL;
+ }
+}
+
+static int sde_encoder_phys_vid_control_vblank_irq(
+ struct sde_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ struct sde_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+
+ /* Slave encoders don't report vblank */
+ if (!sde_encoder_phys_vid_is_master(phys_enc))
+ return 0;
+
+ SDE_DEBUG_VIDENC(vid_enc, "[%pS] enable=%d/%d\n",
+ __builtin_return_address(0),
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ SDE_EVT32(DRMID(phys_enc->parent), enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = sde_encoder_phys_vid_register_irq(phys_enc,
+ SDE_IRQ_TYPE_INTF_VSYNC,
+ INTR_IDX_VSYNC,
+ sde_encoder_phys_vid_vblank_irq, "vsync_irq");
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = sde_encoder_phys_vid_unregister_irq(phys_enc,
+ INTR_IDX_VSYNC);
+
+ if (ret)
+ SDE_ERROR_VIDENC(vid_enc,
+ "control vblank irq error %d, enable %d\n",
+ ret, enable);
+
+ return ret;
+}
+
+static void sde_encoder_phys_vid_enable(struct sde_encoder_phys *phys_enc)
+{
+ struct msm_drm_private *priv;
+ struct sde_encoder_phys_vid *vid_enc;
+ struct sde_hw_intf *intf;
+ struct sde_hw_ctl *ctl;
+ struct sde_hw_cdm *hw_cdm = NULL;
+ struct drm_display_mode mode;
+ const struct sde_format *fmt = NULL;
+ u32 flush_mask = 0;
+ int ret;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ SDE_ERROR("invalid encoder/device\n");
+ return;
+ }
+ hw_cdm = phys_enc->hw_cdm;
+ priv = phys_enc->parent->dev->dev_private;
+ mode = phys_enc->cached_mode;
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ intf = vid_enc->hw_intf;
+ ctl = phys_enc->hw_ctl;
+ if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ return;
+ }
+
+ SDE_DEBUG_VIDENC(vid_enc, "\n");
+
+ if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ return;
+
+ sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
+ phys_enc->sde_kms->core_client, true);
+
+ sde_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+
+ sde_encoder_phys_vid_setup_timing_engine(phys_enc);
+ ret = sde_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+ if (ret)
+ goto end;
+
+ ret = sde_encoder_phys_vid_register_irq(phys_enc,
+ SDE_IRQ_TYPE_INTF_UNDER_RUN,
+ INTR_IDX_UNDERRUN,
+ sde_encoder_phys_vid_underrun_irq, "underrun");
+ if (ret) {
+ sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+ goto end;
+ }
+
+ if (mode.private_flags & MSM_MODE_FLAG_COLOR_FORMAT_YCBCR420)
+ fmt = sde_get_sde_format(DRM_FORMAT_YUV420);
+
+ if (fmt) {
+ struct sde_rect hdmi_roi;
+
+ hdmi_roi.w = mode.hdisplay;
+ hdmi_roi.h = mode.vdisplay;
+ sde_encoder_phys_setup_cdm(phys_enc, fmt,
+ CDM_CDWN_OUTPUT_HDMI, &hdmi_roi);
+ }
+
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+ if (ctl->ops.get_bitmask_cdm && hw_cdm)
+ ctl->ops.get_bitmask_cdm(ctl, &flush_mask, hw_cdm->idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+
+ SDE_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
+ ctl->idx - CTL_0, flush_mask);
+
+ /* ctl_flush & timing engine enable will be triggered by framework */
+ if (phys_enc->enable_state == SDE_ENC_DISABLED)
+ phys_enc->enable_state = SDE_ENC_ENABLING;
+
+end:
+ return;
+}
+
+static void sde_encoder_phys_vid_destroy(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ SDE_DEBUG_VIDENC(vid_enc, "\n");
+ kfree(vid_enc);
+}
+
+static void sde_encoder_phys_vid_get_hw_resources(
+ struct sde_encoder_phys *phys_enc,
+ struct sde_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct sde_encoder_phys_vid *vid_enc;
+ struct sde_mdss_cfg *vid_catalog;
+
+ if (!phys_enc || !hw_res) {
+ SDE_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
+ phys_enc != NULL, hw_res != NULL, conn_state != NULL);
+ return;
+ }
+
+ vid_catalog = phys_enc->sde_kms->catalog;
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf || !vid_catalog) {
+ SDE_ERROR("invalid arg(s), hw_intf %d vid_catalog %d\n",
+ vid_enc->hw_intf != NULL, vid_catalog != NULL);
+ return;
+ }
+
+ SDE_DEBUG_VIDENC(vid_enc, "\n");
+ if (vid_enc->hw_intf->idx > INTF_MAX) {
+ SDE_ERROR("invalid arg(s), idx %d\n",
+ vid_enc->hw_intf->idx);
+ return;
+ }
+ hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+
+ if (vid_catalog->intf[vid_enc->hw_intf->idx - INTF_0].type
+ == INTF_HDMI)
+ hw_res->needs_cdm = true;
+ SDE_DEBUG_DRIVER("[vid] needs_cdm=%d\n", hw_res->needs_cdm);
+}
+
+static int sde_encoder_phys_vid_wait_for_vblank(
+ struct sde_encoder_phys *phys_enc, bool notify)
+{
+ struct sde_encoder_phys_vid *vid_enc =
+ to_sde_encoder_phys_vid(phys_enc);
+ u32 irq_status;
+ int ret;
+
+ if (!sde_encoder_phys_vid_is_master(phys_enc)) {
+ /* always signal done for slave video encoder */
+ if (notify && phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_DONE);
+ return 0;
+ }
+
+ if (phys_enc->enable_state != SDE_ENC_ENABLED) {
+ SDE_ERROR("encoder not enabled\n");
+ return -EWOULDBLOCK;
+ }
+
+ SDE_EVT32(DRMID(phys_enc->parent), vid_enc->hw_intf->idx - INTF_0,
+ SDE_EVTLOG_FUNC_ENTRY);
+
+ /* Wait for kickoff to complete */
+ ret = sde_encoder_helper_wait_event_timeout(
+ DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0,
+ &phys_enc->pending_kickoff_wq,
+ &phys_enc->pending_kickoff_cnt,
+ KICKOFF_TIMEOUT_MS);
+ if (ret <= 0) {
+ irq_status = sde_core_irq_read(phys_enc->sde_kms,
+ vid_enc->irq_idx[INTR_IDX_VSYNC], true);
+ if (irq_status) {
+ SDE_EVT32(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0);
+ SDE_DEBUG_VIDENC(vid_enc, "done, irq not triggered\n");
+ if (notify && phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_DONE);
+ sde_encoder_phys_vid_vblank_irq(vid_enc,
+ INTR_IDX_VSYNC);
+ ret = 0;
+ } else {
+ SDE_EVT32(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0);
+ SDE_ERROR_VIDENC(vid_enc, "kickoff timed out\n");
+ sde_recovery_set_events(SDE_VSYNC_MISS);
+ if (notify && phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_ERROR);
+ ret = -ETIMEDOUT;
+ }
+ } else {
+ if (notify && phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_DONE);
+ ret = 0;
+ }
+
+ return 0;
+}
+
+static int sde_encoder_phys_vid_wait_for_commit_done(
+ struct sde_encoder_phys *phys_enc)
+{
+ int ret;
+
+ ret = sde_encoder_phys_vid_wait_for_vblank(phys_enc, true);
+
+ return ret;
+}
+
+static void sde_encoder_phys_vid_prepare_for_kickoff(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_vid *vid_enc;
+ struct sde_hw_ctl *ctl;
+ int rc;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+
+ ctl = phys_enc->hw_ctl;
+ if (!ctl || !ctl->ops.wait_reset_status)
+ return;
+
+ /*
+ * hw supports hardware initiated ctl reset, so before we kickoff a new
+ * frame, need to check and wait for hw initiated ctl reset completion
+ */
+ rc = ctl->ops.wait_reset_status(ctl);
+ if (rc) {
+ SDE_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
+ ctl->idx, rc);
+ SDE_DBG_DUMP("panic");
+ }
+}
+
+static void sde_encoder_phys_vid_disable(struct sde_encoder_phys *phys_enc)
+{
+ struct msm_drm_private *priv;
+ struct sde_encoder_phys_vid *vid_enc;
+ unsigned long lock_flags;
+ int ret;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ SDE_ERROR("invalid encoder/device\n");
+ return;
+ }
+ priv = phys_enc->parent->dev->dev_private;
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ SDE_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ return;
+ }
+
+ SDE_DEBUG_VIDENC(vid_enc, "\n");
+
+ if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ return;
+
+ if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+ SDE_ERROR("already disabled\n");
+ return;
+ }
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+ if (sde_encoder_phys_vid_is_master(phys_enc))
+ sde_encoder_phys_inc_pending(phys_enc);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ if (sde_encoder_phys_vid_is_master(phys_enc)) {
+ ret = sde_encoder_phys_vid_wait_for_vblank(phys_enc, false);
+ if (ret) {
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ SDE_ERROR_VIDENC(vid_enc,
+ "failure waiting for disable: %d\n",
+ ret);
+ SDE_EVT32(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0, ret);
+ }
+ sde_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+ }
+
+ sde_power_data_bus_bandwidth_ctrl(&priv->phandle,
+ phys_enc->sde_kms->core_client, false);
+
+ if (atomic_read(&phys_enc->vblank_refcount))
+ SDE_ERROR_VIDENC(vid_enc, "invalid vblank refcount %d\n",
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (phys_enc->hw_cdm && phys_enc->hw_cdm->ops.disable) {
+ SDE_DEBUG_DRIVER("[cdm_disable]\n");
+ phys_enc->hw_cdm->ops.disable(phys_enc->hw_cdm);
+ }
+
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+}
+
+static void sde_encoder_phys_vid_post_disable(
+ struct sde_encoder_phys *phys_enc)
+{
+ if (!phys_enc || !phys_enc->hw_ctl) {
+ SDE_ERROR("invalid encoder %d\n", phys_enc != NULL);
+ return;
+ }
+
+ if (phys_enc->hw_ctl->ops.clear_intf_cfg)
+ phys_enc->hw_ctl->ops.clear_intf_cfg(phys_enc->hw_ctl);
+}
+
+static void sde_encoder_phys_vid_handle_post_kickoff(
+ struct sde_encoder_phys *phys_enc)
+{
+ unsigned long lock_flags;
+ struct sde_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ vid_enc = to_sde_encoder_phys_vid(phys_enc);
+ SDE_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
+
+ /*
+ * Video mode must flush CTL before enabling timing engine
+ * Video encoders need to turn on their interfaces now
+ */
+ if (phys_enc->enable_state == SDE_ENC_ENABLING) {
+ SDE_EVT32(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0);
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->enable_state = SDE_ENC_ENABLED;
+ }
+}
+
+static void sde_encoder_phys_vid_setup_misr(struct sde_encoder_phys *phys_enc,
+ struct sde_misr_params *misr_map)
+{
+ struct sde_encoder_phys_vid *vid_enc =
+ to_sde_encoder_phys_vid(phys_enc);
+
+ if (vid_enc && vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
+ vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf, misr_map);
+}
+
+static void sde_encoder_phys_vid_collect_misr(struct sde_encoder_phys *phys_enc,
+ struct sde_misr_params *misr_map)
+{
+ struct sde_encoder_phys_vid *vid_enc =
+ to_sde_encoder_phys_vid(phys_enc);
+
+ if (vid_enc && vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr)
+ vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf, misr_map);
+}
+
+static void sde_encoder_phys_vid_init_ops(struct sde_encoder_phys_ops *ops)
+{
+ ops->is_master = sde_encoder_phys_vid_is_master;
+ ops->mode_set = sde_encoder_phys_vid_mode_set;
+ ops->mode_fixup = sde_encoder_phys_vid_mode_fixup;
+ ops->enable = sde_encoder_phys_vid_enable;
+ ops->disable = sde_encoder_phys_vid_disable;
+ ops->post_disable = sde_encoder_phys_vid_post_disable;
+ ops->destroy = sde_encoder_phys_vid_destroy;
+ ops->get_hw_resources = sde_encoder_phys_vid_get_hw_resources;
+ ops->control_vblank_irq = sde_encoder_phys_vid_control_vblank_irq;
+ ops->wait_for_commit_done = sde_encoder_phys_vid_wait_for_commit_done;
+ ops->prepare_for_kickoff = sde_encoder_phys_vid_prepare_for_kickoff;
+ ops->handle_post_kickoff = sde_encoder_phys_vid_handle_post_kickoff;
+ ops->needs_single_flush = sde_encoder_phys_vid_needs_single_flush;
+ ops->setup_misr = sde_encoder_phys_vid_setup_misr;
+ ops->collect_misr = sde_encoder_phys_vid_collect_misr;
+}
+
+struct sde_encoder_phys *sde_encoder_phys_vid_init(
+ struct sde_enc_phys_init_params *p)
+{
+ struct sde_encoder_phys *phys_enc = NULL;
+ struct sde_encoder_phys_vid *vid_enc = NULL;
+ struct sde_rm_hw_iter iter;
+ struct sde_hw_mdp *hw_mdp;
+ int i, ret = 0;
+
+ if (!p) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
+ if (!vid_enc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phys_enc = &vid_enc->base;
+
+ hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ SDE_ERROR("failed to get mdptop\n");
+ goto fail;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->intf_idx = p->intf_idx;
+
+ /**
+ * hw_intf resource permanently assigned to this encoder
+ * Other resources allocated at atomic commit time by use case
+ */
+ sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_INTF);
+ while (sde_rm_get_hw(&p->sde_kms->rm, &iter)) {
+ struct sde_hw_intf *hw_intf = (struct sde_hw_intf *)iter.hw;
+
+ if (hw_intf->idx == p->intf_idx) {
+ vid_enc->hw_intf = hw_intf;
+ break;
+ }
+ }
+
+ if (!vid_enc->hw_intf) {
+ ret = -EINVAL;
+ SDE_ERROR("failed to get hw_intf\n");
+ goto fail;
+ }
+
+ phys_enc->misr_map = kzalloc(sizeof(struct sde_misr_params),
+ GFP_KERNEL);
+ if (!phys_enc->misr_map)
+ SDE_ERROR("sde misr map allocation failed\n");
+
+ SDE_DEBUG_VIDENC(vid_enc, "\n");
+
+ sde_encoder_phys_vid_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->sde_kms = p->sde_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_VIDEO;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ for (i = 0; i < INTR_IDX_MAX; i++)
+ INIT_LIST_HEAD(&vid_enc->irq_cb[i].list);
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ phys_enc->splash_flush_bits = 0;
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+
+ SDE_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+
+ return phys_enc;
+
+fail:
+ SDE_ERROR("failed to create encoder\n");
+ if (vid_enc)
+ sde_encoder_phys_vid_destroy(phys_enc);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
new file mode 100644
index 000000000000..38bd11bf162e
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_encoder_phys_wb.c
@@ -0,0 +1,1034 @@
+/*
+ * Copyright (c) 2015-2019 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/debugfs.h>
+
+#include "sde_encoder_phys.h"
+#include "sde_formats.h"
+#include "sde_hw_top.h"
+#include "sde_hw_interrupts.h"
+#include "sde_core_irq.h"
+#include "sde_wb.h"
+#include "sde_vbif.h"
+
+#define to_sde_encoder_phys_wb(x) \
+ container_of(x, struct sde_encoder_phys_wb, base)
+
+#define WBID(wb_enc) ((wb_enc) ? wb_enc->wb_dev->wb_idx : -1)
+
+/**
+ * sde_encoder_phys_wb_is_master - report wb always as master encoder
+ */
+static bool sde_encoder_phys_wb_is_master(struct sde_encoder_phys *phys_enc)
+{
+ return true;
+}
+
+/**
+ * sde_encoder_phys_wb_get_intr_type - get interrupt type based on block mode
+ * @hw_wb: Pointer to h/w writeback driver
+ */
+static enum sde_intr_type sde_encoder_phys_wb_get_intr_type(
+ struct sde_hw_wb *hw_wb)
+{
+ return (hw_wb->caps->features & BIT(SDE_WB_BLOCK_MODE)) ?
+ SDE_IRQ_TYPE_WB_ROT_COMP : SDE_IRQ_TYPE_WB_WFD_COMP;
+}
+
+/**
+ * sde_encoder_phys_wb_set_ot_limit - set OT limit for writeback interface
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_set_ot_limit(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ struct sde_vbif_set_ot_params ot_params;
+
+ memset(&ot_params, 0, sizeof(ot_params));
+ ot_params.xin_id = hw_wb->caps->xin_id;
+ ot_params.num = hw_wb->idx - WB_0;
+ ot_params.width = wb_enc->wb_roi.w;
+ ot_params.height = wb_enc->wb_roi.h;
+ ot_params.is_wfd = true;
+ ot_params.frame_rate = phys_enc->cached_mode.vrefresh;
+ ot_params.vbif_idx = hw_wb->caps->vbif_idx;
+ ot_params.clk_ctrl = hw_wb->caps->clk_ctrl;
+ ot_params.rd = false;
+
+ sde_vbif_set_ot_limit(phys_enc->sde_kms, &ot_params);
+}
+
+/**
+ * sde_encoder_phys_wb_set_traffic_shaper - set traffic shaper for writeback
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_set_traffic_shaper(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb_cfg *wb_cfg = &wb_enc->wb_cfg;
+
+ /* traffic shaper is only enabled for rotator */
+ wb_cfg->ts_cfg.en = false;
+}
+
+/**
+ * sde_encoder_phys_wb_setup_fb - setup output framebuffer
+ * @phys_enc: Pointer to physical encoder
+ * @fb: Pointer to output framebuffer
+ * @wb_roi: Pointer to output region of interest
+ */
+static void sde_encoder_phys_wb_setup_fb(struct sde_encoder_phys *phys_enc,
+ struct drm_framebuffer *fb, struct sde_rect *wb_roi)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb;
+ struct sde_hw_wb_cfg *wb_cfg;
+ const struct msm_format *format;
+ int ret;
+ struct msm_gem_address_space *aspace;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ hw_wb = wb_enc->hw_wb;
+ wb_cfg = &wb_enc->wb_cfg;
+ memset(wb_cfg, 0, sizeof(struct sde_hw_wb_cfg));
+
+ wb_cfg->intf_mode = phys_enc->intf_mode;
+ wb_cfg->is_secure = (fb->flags & DRM_MODE_FB_SECURE) ? true : false;
+ aspace = (wb_cfg->is_secure) ?
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] :
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE];
+
+ SDE_DEBUG("[fb_secure:%d]\n", wb_cfg->is_secure);
+
+ format = msm_framebuffer_format(fb);
+ if (!format) {
+ SDE_DEBUG("invalid format for fb\n");
+ return;
+ }
+
+ wb_cfg->dest.format = sde_get_sde_format_ext(
+ format->pixel_format,
+ fb->modifier,
+ drm_format_num_planes(fb->pixel_format));
+ if (!wb_cfg->dest.format) {
+ /* this error should be detected during atomic_check */
+ SDE_ERROR("failed to get format %x\n", format->pixel_format);
+ return;
+ }
+ wb_cfg->roi = *wb_roi;
+
+ if (hw_wb->caps->features & BIT(SDE_WB_XY_ROI_OFFSET)) {
+ ret = sde_format_populate_layout(aspace, fb, &wb_cfg->dest);
+ if (ret) {
+ SDE_DEBUG("failed to populate layout %d\n", ret);
+ return;
+ }
+ wb_cfg->dest.width = fb->width;
+ wb_cfg->dest.height = fb->height;
+ wb_cfg->dest.num_planes = wb_cfg->dest.format->num_planes;
+ } else {
+ ret = sde_format_populate_layout_with_roi(aspace, fb, wb_roi,
+ &wb_cfg->dest);
+ if (ret) {
+ /* this error should be detected during atomic_check */
+ SDE_DEBUG("failed to populate layout %d\n", ret);
+ return;
+ }
+ }
+
+ if ((wb_cfg->dest.format->fetch_planes == SDE_PLANE_PLANAR) &&
+ (wb_cfg->dest.format->element[0] == C1_B_Cb))
+ swap(wb_cfg->dest.plane_addr[1], wb_cfg->dest.plane_addr[2]);
+
+ SDE_DEBUG("[fb_offset:%8.8x,%8.8x,%8.8x,%8.8x]\n",
+ wb_cfg->dest.plane_addr[0],
+ wb_cfg->dest.plane_addr[1],
+ wb_cfg->dest.plane_addr[2],
+ wb_cfg->dest.plane_addr[3]);
+ SDE_DEBUG("[fb_stride:%8.8x,%8.8x,%8.8x,%8.8x]\n",
+ wb_cfg->dest.plane_pitch[0],
+ wb_cfg->dest.plane_pitch[1],
+ wb_cfg->dest.plane_pitch[2],
+ wb_cfg->dest.plane_pitch[3]);
+
+ if (hw_wb->ops.setup_roi)
+ hw_wb->ops.setup_roi(hw_wb, wb_cfg);
+
+ if (hw_wb->ops.setup_outformat)
+ hw_wb->ops.setup_outformat(hw_wb, wb_cfg);
+
+ if (hw_wb->ops.setup_outaddress)
+ hw_wb->ops.setup_outaddress(hw_wb, wb_cfg);
+}
+
+/**
+ * sde_encoder_phys_wb_setup_cdp - setup chroma down prefetch block
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_setup_cdp(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ struct sde_hw_intf_cfg *intf_cfg = &wb_enc->intf_cfg;
+
+ memset(intf_cfg, 0, sizeof(struct sde_hw_intf_cfg));
+
+ intf_cfg->intf = SDE_NONE;
+ intf_cfg->wb = hw_wb->idx;
+ intf_cfg->mode_3d = sde_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ if (phys_enc->hw_ctl && phys_enc->hw_ctl->ops.setup_intf_cfg)
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl,
+ intf_cfg);
+}
+
+/**
+ * sde_encoder_phys_wb_atomic_check - verify and fixup given atomic states
+ * @phys_enc: Pointer to physical encoder
+ * @crtc_state: Pointer to CRTC atomic state
+ * @conn_state: Pointer to connector atomic state
+ */
+static int sde_encoder_phys_wb_atomic_check(
+ struct sde_encoder_phys *phys_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ const struct sde_wb_cfg *wb_cfg = hw_wb->caps;
+ struct drm_framebuffer *fb;
+ const struct sde_format *fmt;
+ struct sde_rect wb_roi;
+ const struct drm_display_mode *mode = &crtc_state->mode;
+ int rc;
+
+ SDE_DEBUG("[atomic_check:%d,%d,\"%s\",%d,%d]\n",
+ hw_wb->idx - WB_0, mode->base.id, mode->name,
+ mode->hdisplay, mode->vdisplay);
+
+ if (!conn_state || !conn_state->connector) {
+ SDE_ERROR("invalid connector state\n");
+ return -EINVAL;
+ } else if (conn_state->connector->status !=
+ connector_status_connected) {
+ SDE_ERROR("connector not connected %d\n",
+ conn_state->connector->status);
+ return -EINVAL;
+ }
+
+ memset(&wb_roi, 0, sizeof(struct sde_rect));
+
+ rc = sde_wb_connector_state_get_output_roi(conn_state, &wb_roi);
+ if (rc) {
+ SDE_ERROR("failed to get roi %d\n", rc);
+ return rc;
+ }
+
+ SDE_DEBUG("[roi:%u,%u,%u,%u]\n", wb_roi.x, wb_roi.y,
+ wb_roi.w, wb_roi.h);
+
+ fb = sde_wb_connector_state_get_output_fb(conn_state);
+ if (!fb) {
+ SDE_ERROR("no output framebuffer\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
+ fb->width, fb->height);
+
+ fmt = sde_get_sde_format_ext(fb->pixel_format, fb->modifier,
+ drm_format_num_planes(fb->pixel_format));
+ if (!fmt) {
+ SDE_ERROR("unsupported output pixel format:%x\n",
+ fb->pixel_format);
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("[fb_fmt:%x,%llx]\n", fb->pixel_format,
+ fb->modifier[0]);
+
+ if (SDE_FORMAT_IS_YUV(fmt) &&
+ !(wb_cfg->features & BIT(SDE_WB_YUV_CONFIG))) {
+ SDE_ERROR("invalid output format %x\n", fmt->base.pixel_format);
+ return -EINVAL;
+ }
+
+ if (SDE_FORMAT_IS_UBWC(fmt) &&
+ !(wb_cfg->features & BIT(SDE_WB_UBWC_1_0))) {
+ SDE_ERROR("invalid output format %x\n", fmt->base.pixel_format);
+ return -EINVAL;
+ }
+
+ if (SDE_FORMAT_IS_YUV(fmt) != !!phys_enc->hw_cdm)
+ crtc_state->mode_changed = true;
+
+ if (wb_roi.w && wb_roi.h) {
+ if (wb_roi.w != mode->hdisplay) {
+ SDE_ERROR("invalid roi w=%d, mode w=%d\n", wb_roi.w,
+ mode->hdisplay);
+ return -EINVAL;
+ } else if (wb_roi.h != mode->vdisplay) {
+ SDE_ERROR("invalid roi h=%d, mode h=%d\n", wb_roi.h,
+ mode->vdisplay);
+ return -EINVAL;
+ } else if (wb_roi.x + wb_roi.w > fb->width) {
+ SDE_ERROR("invalid roi x=%d, w=%d, fb w=%d\n",
+ wb_roi.x, wb_roi.w, fb->width);
+ return -EINVAL;
+ } else if (wb_roi.y + wb_roi.h > fb->height) {
+ SDE_ERROR("invalid roi y=%d, h=%d, fb h=%d\n",
+ wb_roi.y, wb_roi.h, fb->height);
+ return -EINVAL;
+ } else if (wb_roi.w > wb_cfg->sblk->maxlinewidth) {
+ SDE_ERROR("invalid roi w=%d, maxlinewidth=%u\n",
+ wb_roi.w, wb_cfg->sblk->maxlinewidth);
+ return -EINVAL;
+ }
+ } else {
+ if (wb_roi.x || wb_roi.y) {
+ SDE_ERROR("invalid roi x=%d, y=%d\n",
+ wb_roi.x, wb_roi.y);
+ return -EINVAL;
+ } else if (fb->width != mode->hdisplay) {
+ SDE_ERROR("invalid fb w=%d, mode w=%d\n", fb->width,
+ mode->hdisplay);
+ return -EINVAL;
+ } else if (fb->height != mode->vdisplay) {
+ SDE_ERROR("invalid fb h=%d, mode h=%d\n", fb->height,
+ mode->vdisplay);
+ return -EINVAL;
+ } else if (fb->width > wb_cfg->sblk->maxlinewidth) {
+ SDE_ERROR("invalid fb w=%d, maxlinewidth=%u\n",
+ fb->width, wb_cfg->sblk->maxlinewidth);
+ return -EINVAL;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * sde_encoder_phys_wb_flush - flush hardware update
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_flush(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ struct sde_hw_ctl *hw_ctl = phys_enc->hw_ctl;
+ struct sde_hw_cdm *hw_cdm = phys_enc->hw_cdm;
+ u32 flush_mask = 0;
+
+ SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (!hw_ctl) {
+ SDE_DEBUG("[wb:%d] no ctl assigned\n", hw_wb->idx - WB_0);
+ return;
+ }
+
+ if (hw_ctl->ops.get_bitmask_wb)
+ hw_ctl->ops.get_bitmask_wb(hw_ctl, &flush_mask, hw_wb->idx);
+
+ if (hw_ctl->ops.get_bitmask_cdm && hw_cdm)
+ hw_ctl->ops.get_bitmask_cdm(hw_ctl, &flush_mask, hw_cdm->idx);
+
+ if (hw_ctl->ops.update_pending_flush)
+ hw_ctl->ops.update_pending_flush(hw_ctl, flush_mask);
+
+ SDE_DEBUG("Flushing CTL_ID %d, flush_mask %x, WB %d\n",
+ hw_ctl->idx - CTL_0, flush_mask, hw_wb->idx - WB_0);
+}
+
+/**
+ * sde_encoder_phys_wb_setup - setup writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_setup(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ struct drm_display_mode mode = phys_enc->cached_mode;
+ struct drm_framebuffer *fb;
+ struct sde_rect *wb_roi = &wb_enc->wb_roi;
+
+ SDE_DEBUG("[mode_set:%d,%d,\"%s\",%d,%d]\n",
+ hw_wb->idx - WB_0, mode.base.id, mode.name,
+ mode.hdisplay, mode.vdisplay);
+
+ memset(wb_roi, 0, sizeof(struct sde_rect));
+
+ fb = sde_wb_get_output_fb(wb_enc->wb_dev);
+ if (!fb) {
+ SDE_DEBUG("no output framebuffer\n");
+ return;
+ }
+
+ SDE_DEBUG("[fb_id:%u][fb:%u,%u]\n", fb->base.id,
+ fb->width, fb->height);
+
+ sde_wb_get_output_roi(wb_enc->wb_dev, wb_roi);
+ if (wb_roi->w == 0 || wb_roi->h == 0) {
+ wb_roi->x = 0;
+ wb_roi->y = 0;
+ wb_roi->w = fb->width;
+ wb_roi->h = fb->height;
+ }
+
+ SDE_DEBUG("[roi:%u,%u,%u,%u]\n", wb_roi->x, wb_roi->y,
+ wb_roi->w, wb_roi->h);
+
+ wb_enc->wb_fmt = sde_get_sde_format_ext(fb->pixel_format, fb->modifier,
+ drm_format_num_planes(fb->pixel_format));
+ if (!wb_enc->wb_fmt) {
+ SDE_ERROR("unsupported output pixel format: %d\n",
+ fb->pixel_format);
+ return;
+ }
+
+ SDE_DEBUG("[fb_fmt:%x,%llx]\n", fb->pixel_format,
+ fb->modifier[0]);
+
+ sde_encoder_phys_wb_set_ot_limit(phys_enc);
+
+ sde_encoder_phys_wb_set_traffic_shaper(phys_enc);
+
+ sde_encoder_phys_setup_cdm(phys_enc, wb_enc->wb_fmt,
+ CDM_CDWN_OUTPUT_WB, wb_roi);
+
+ sde_encoder_phys_wb_setup_fb(phys_enc, fb, wb_roi);
+
+ sde_encoder_phys_wb_setup_cdp(phys_enc);
+}
+
+/**
+ * sde_encoder_phys_wb_unregister_irq - unregister writeback interrupt handler
+ * @phys_enc: Pointer to physical encoder
+ */
+static int sde_encoder_phys_wb_unregister_irq(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+
+ if (wb_enc->bypass_irqreg)
+ return 0;
+
+ sde_core_irq_disable(phys_enc->sde_kms, &wb_enc->irq_idx, 1);
+ sde_core_irq_unregister_callback(phys_enc->sde_kms, wb_enc->irq_idx,
+ &wb_enc->irq_cb);
+
+ SDE_DEBUG("un-register IRQ for wb %d, irq_idx=%d\n",
+ hw_wb->idx - WB_0,
+ wb_enc->irq_idx);
+
+ return 0;
+}
+
+/**
+ * sde_encoder_phys_wb_done_irq - writeback interrupt handler
+ * @arg: Pointer to writeback encoder
+ * @irq_idx: interrupt index
+ */
+static void sde_encoder_phys_wb_done_irq(void *arg, int irq_idx)
+{
+ struct sde_encoder_phys_wb *wb_enc = arg;
+ struct sde_encoder_phys *phys_enc = &wb_enc->base;
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+
+ SDE_DEBUG("[wb:%d,%u]\n", hw_wb->idx - WB_0,
+ wb_enc->frame_count);
+
+ if (phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(phys_enc->parent,
+ phys_enc, SDE_ENCODER_FRAME_EVENT_DONE);
+
+ phys_enc->parent_ops.handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ complete_all(&wb_enc->wbdone_complete);
+}
+
+/**
+ * sde_encoder_phys_wb_register_irq - register writeback interrupt handler
+ * @phys_enc: Pointer to physical encoder
+ */
+static int sde_encoder_phys_wb_register_irq(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ struct sde_irq_callback *irq_cb = &wb_enc->irq_cb;
+ enum sde_intr_type intr_type;
+ int ret = 0;
+
+ if (wb_enc->bypass_irqreg)
+ return 0;
+
+ intr_type = sde_encoder_phys_wb_get_intr_type(hw_wb);
+ wb_enc->irq_idx = sde_core_irq_idx_lookup(phys_enc->sde_kms,
+ intr_type, hw_wb->idx);
+ if (wb_enc->irq_idx < 0) {
+ SDE_ERROR(
+ "failed to lookup IRQ index for WB_DONE with wb=%d\n",
+ hw_wb->idx - WB_0);
+ return -EINVAL;
+ }
+
+ irq_cb->func = sde_encoder_phys_wb_done_irq;
+ irq_cb->arg = wb_enc;
+ ret = sde_core_irq_register_callback(phys_enc->sde_kms,
+ wb_enc->irq_idx, irq_cb);
+ if (ret) {
+ SDE_ERROR("failed to register IRQ callback WB_DONE\n");
+ return ret;
+ }
+
+ ret = sde_core_irq_enable(phys_enc->sde_kms, &wb_enc->irq_idx, 1);
+ if (ret) {
+ SDE_ERROR(
+ "failed to enable IRQ for WB_DONE, wb %d, irq_idx=%d\n",
+ hw_wb->idx - WB_0,
+ wb_enc->irq_idx);
+ wb_enc->irq_idx = -EINVAL;
+
+ /* Unregister callback on IRQ enable failure */
+ sde_core_irq_unregister_callback(phys_enc->sde_kms,
+ wb_enc->irq_idx, irq_cb);
+ return ret;
+ }
+
+ SDE_DEBUG("registered IRQ for wb %d, irq_idx=%d\n",
+ hw_wb->idx - WB_0,
+ wb_enc->irq_idx);
+
+ return ret;
+}
+
+/**
+ * sde_encoder_phys_wb_mode_set - set display mode
+ * @phys_enc: Pointer to physical encoder
+ * @mode: Pointer to requested display mode
+ * @adj_mode: Pointer to adjusted display mode
+ */
+static void sde_encoder_phys_wb_mode_set(
+ struct sde_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_rm *rm = &phys_enc->sde_kms->rm;
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ struct sde_rm_hw_iter iter;
+ int i, instance;
+
+ phys_enc->cached_mode = *adj_mode;
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ SDE_DEBUG("[mode_set_cache:%d,%d,\"%s\",%d,%d]\n",
+ hw_wb->idx - WB_0, mode->base.id,
+ mode->name, mode->hdisplay, mode->vdisplay);
+
+ phys_enc->hw_ctl = NULL;
+ phys_enc->hw_cdm = NULL;
+
+ /* Retrieve previously allocated HW Resources. CTL shouldn't fail */
+ sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ sde_rm_get_hw(rm, &iter);
+ if (i == instance)
+ phys_enc->hw_ctl = (struct sde_hw_ctl *) iter.hw;
+ }
+
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ SDE_ERROR("failed init ctl: %ld\n", PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+
+ /* CDM is optional */
+ sde_rm_init_hw_iter(&iter, phys_enc->parent->base.id, SDE_HW_BLK_CDM);
+ for (i = 0; i <= instance; i++) {
+ sde_rm_get_hw(rm, &iter);
+ if (i == instance)
+ phys_enc->hw_cdm = (struct sde_hw_cdm *) iter.hw;
+ }
+
+ if (IS_ERR(phys_enc->hw_cdm)) {
+ SDE_ERROR("CDM required but not allocated: %ld\n",
+ PTR_ERR(phys_enc->hw_cdm));
+ phys_enc->hw_ctl = NULL;
+ }
+}
+
+/**
+ * sde_encoder_phys_wb_wait_for_commit_done - wait until request is committed
+ * @phys_enc: Pointer to physical encoder
+ */
+static int sde_encoder_phys_wb_wait_for_commit_done(
+ struct sde_encoder_phys *phys_enc)
+{
+ unsigned long ret;
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ u32 irq_status;
+ u64 wb_time = 0;
+ int rc = 0;
+
+ /* Return EWOULDBLOCK since we know the wait isn't necessary */
+ if (WARN_ON(phys_enc->enable_state != SDE_ENC_ENABLED))
+ return -EWOULDBLOCK;
+
+ SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->frame_count);
+
+ ret = wait_for_completion_timeout(&wb_enc->wbdone_complete,
+ KICKOFF_TIMEOUT_JIFFIES);
+
+ if (!ret) {
+ SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc),
+ wb_enc->frame_count);
+
+ irq_status = sde_core_irq_read(phys_enc->sde_kms,
+ wb_enc->irq_idx, true);
+ if (irq_status) {
+ SDE_DEBUG("wb:%d done but irq not triggered\n",
+ wb_enc->wb_dev->wb_idx - WB_0);
+ sde_encoder_phys_wb_done_irq(wb_enc, wb_enc->irq_idx);
+ } else {
+ SDE_ERROR("wb:%d kickoff timed out\n",
+ wb_enc->wb_dev->wb_idx - WB_0);
+ if (phys_enc->parent_ops.handle_frame_done)
+ phys_enc->parent_ops.handle_frame_done(
+ phys_enc->parent, phys_enc,
+ SDE_ENCODER_FRAME_EVENT_ERROR);
+ rc = -ETIMEDOUT;
+ }
+ }
+
+ sde_encoder_phys_wb_unregister_irq(phys_enc);
+
+ if (!rc)
+ wb_enc->end_time = ktime_get();
+
+ /* once operation is done, disable traffic shaper */
+ if (wb_enc->wb_cfg.ts_cfg.en && wb_enc->hw_wb &&
+ wb_enc->hw_wb->ops.setup_trafficshaper) {
+ wb_enc->wb_cfg.ts_cfg.en = false;
+ wb_enc->hw_wb->ops.setup_trafficshaper(
+ wb_enc->hw_wb, &wb_enc->wb_cfg);
+ }
+
+ /* remove vote for iommu/clk/bus */
+ wb_enc->frame_count++;
+
+ if (!rc) {
+ wb_time = (u64)ktime_to_us(wb_enc->end_time) -
+ (u64)ktime_to_us(wb_enc->start_time);
+ SDE_DEBUG("wb:%d took %llu us\n",
+ wb_enc->wb_dev->wb_idx - WB_0, wb_time);
+ }
+
+ SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->frame_count,
+ wb_time);
+
+ return rc;
+}
+
+/**
+ * sde_encoder_phys_wb_prepare_for_kickoff - pre-kickoff processing
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_prepare_for_kickoff(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ int ret;
+
+ SDE_DEBUG("[wb:%d,%u]\n", wb_enc->hw_wb->idx - WB_0,
+ wb_enc->kickoff_count);
+
+ reinit_completion(&wb_enc->wbdone_complete);
+
+ ret = sde_encoder_phys_wb_register_irq(phys_enc);
+ if (ret) {
+ SDE_ERROR("failed to register irq %d\n", ret);
+ return;
+ }
+
+ wb_enc->kickoff_count++;
+
+ /* set OT limit & enable traffic shaper */
+ sde_encoder_phys_wb_setup(phys_enc);
+
+ sde_encoder_phys_wb_flush(phys_enc);
+
+ /* vote for iommu/clk/bus */
+ wb_enc->start_time = ktime_get();
+
+ SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc), wb_enc->kickoff_count);
+}
+
+/**
+ * sde_encoder_phys_wb_handle_post_kickoff - post-kickoff processing
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_handle_post_kickoff(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+
+ SDE_DEBUG("[wb:%d]\n", wb_enc->hw_wb->idx - WB_0);
+
+ SDE_EVT32(DRMID(phys_enc->parent), WBID(wb_enc));
+}
+
+/**
+ * sde_encoder_phys_wb_enable - enable writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_enable(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+ struct drm_device *dev;
+ struct drm_connector *connector;
+
+ SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (!wb_enc->base.parent || !wb_enc->base.parent->dev) {
+ SDE_ERROR("invalid drm device\n");
+ return;
+ }
+ dev = wb_enc->base.parent->dev;
+
+ /* find associated writeback connector */
+ mutex_lock(&dev->mode_config.mutex);
+ drm_for_each_connector(connector, phys_enc->parent->dev) {
+ if (connector->encoder == phys_enc->parent)
+ break;
+ }
+ mutex_unlock(&dev->mode_config.mutex);
+
+ if (!connector || connector->encoder != phys_enc->parent) {
+ SDE_ERROR("failed to find writeback connector\n");
+ return;
+ }
+ wb_enc->wb_dev = sde_wb_connector_get_wb(connector);
+
+ phys_enc->enable_state = SDE_ENC_ENABLED;
+}
+
+/**
+ * sde_encoder_phys_wb_disable - disable writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_disable(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+
+ SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (phys_enc->enable_state == SDE_ENC_DISABLED) {
+ SDE_ERROR("encoder is already disabled\n");
+ return;
+ }
+
+ if (wb_enc->frame_count != wb_enc->kickoff_count) {
+ SDE_DEBUG("[wait_for_done: wb:%d, frame:%u, kickoff:%u]\n",
+ hw_wb->idx - WB_0, wb_enc->frame_count,
+ wb_enc->kickoff_count);
+ sde_encoder_phys_wb_wait_for_commit_done(phys_enc);
+ }
+
+ if (phys_enc->hw_cdm && phys_enc->hw_cdm->ops.disable) {
+ SDE_DEBUG_DRIVER("[cdm_disable]\n");
+ phys_enc->hw_cdm->ops.disable(phys_enc->hw_cdm);
+ }
+
+ phys_enc->enable_state = SDE_ENC_DISABLED;
+}
+
+/**
+ * sde_encoder_phys_wb_post_disable - post disable writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_post_disable(
+ struct sde_encoder_phys *phys_enc)
+{
+ if (!phys_enc || !phys_enc->hw_ctl) {
+ SDE_ERROR("invalid encoder %d\n", phys_enc != NULL);
+ return;
+ }
+
+ if (phys_enc->hw_ctl->ops.clear_intf_cfg)
+ phys_enc->hw_ctl->ops.clear_intf_cfg(phys_enc->hw_ctl);
+}
+
+/**
+ * sde_encoder_phys_wb_get_hw_resources - get hardware resources
+ * @phys_enc: Pointer to physical encoder
+ * @hw_res: Pointer to encoder resources
+ */
+static void sde_encoder_phys_wb_get_hw_resources(
+ struct sde_encoder_phys *phys_enc,
+ struct sde_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb;
+ struct drm_framebuffer *fb;
+ const struct sde_format *fmt;
+
+ if (!phys_enc) {
+ SDE_ERROR("invalid encoder\n");
+ return;
+ }
+
+ fb = sde_wb_connector_state_get_output_fb(conn_state);
+ if (!fb) {
+ SDE_ERROR("no output framebuffer\n");
+ return;
+ }
+
+ fmt = sde_get_sde_format_ext(fb->pixel_format, fb->modifier,
+ drm_format_num_planes(fb->pixel_format));
+ if (!fmt) {
+ SDE_ERROR("unsupported output pixel format:%d\n",
+ fb->pixel_format);
+ return;
+ }
+
+ hw_wb = wb_enc->hw_wb;
+ hw_res->wbs[hw_wb->idx - WB_0] = phys_enc->intf_mode;
+ hw_res->needs_cdm = SDE_FORMAT_IS_YUV(fmt);
+ SDE_DEBUG("[wb:%d] intf_mode=%d needs_cdm=%d\n", hw_wb->idx - WB_0,
+ hw_res->wbs[hw_wb->idx - WB_0],
+ hw_res->needs_cdm);
+}
+
+#ifdef CONFIG_DEBUG_FS
+/**
+ * sde_encoder_phys_wb_init_debugfs - initialize writeback encoder debugfs
+ * @phys_enc: Pointer to physical encoder
+ * @sde_kms: Pointer to SDE KMS object
+ */
+static int sde_encoder_phys_wb_init_debugfs(
+ struct sde_encoder_phys *phys_enc, struct sde_kms *kms)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+
+ if (!phys_enc || !kms || !wb_enc->hw_wb)
+ return -EINVAL;
+
+ snprintf(wb_enc->wb_name, ARRAY_SIZE(wb_enc->wb_name), "encoder_wb%d",
+ wb_enc->hw_wb->idx - WB_0);
+
+ wb_enc->debugfs_root =
+ debugfs_create_dir(wb_enc->wb_name,
+ sde_debugfs_get_root(kms));
+ if (!wb_enc->debugfs_root) {
+ SDE_ERROR("failed to create debugfs\n");
+ return -ENOMEM;
+ }
+
+ if (!debugfs_create_u32("wbdone_timeout", S_IRUGO | S_IWUSR,
+ wb_enc->debugfs_root, &wb_enc->wbdone_timeout)) {
+ SDE_ERROR("failed to create debugfs/wbdone_timeout\n");
+ return -ENOMEM;
+ }
+
+ if (!debugfs_create_u32("bypass_irqreg", S_IRUGO | S_IWUSR,
+ wb_enc->debugfs_root, &wb_enc->bypass_irqreg)) {
+ SDE_ERROR("failed to create debugfs/bypass_irqreg\n");
+ return -ENOMEM;
+ }
+
+ return 0;
+}
+
+/**
+ * sde_encoder_phys_wb_destroy_debugfs - destroy writeback encoder debugfs
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_destroy_debugfs(
+ struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+
+ if (!phys_enc)
+ return;
+
+ debugfs_remove_recursive(wb_enc->debugfs_root);
+}
+#else
+static int sde_encoder_phys_wb_init_debugfs(
+ struct sde_encoder_phys *phys_enc, struct sde_kms *kms)
+{
+ return 0;
+}
+static void sde_encoder_phys_wb_destroy_debugfs(
+ struct sde_encoder_phys *phys_enc)
+{
+}
+#endif
+
+/**
+ * sde_encoder_phys_wb_destroy - destroy writeback encoder
+ * @phys_enc: Pointer to physical encoder
+ */
+static void sde_encoder_phys_wb_destroy(struct sde_encoder_phys *phys_enc)
+{
+ struct sde_encoder_phys_wb *wb_enc = to_sde_encoder_phys_wb(phys_enc);
+ struct sde_hw_wb *hw_wb = wb_enc->hw_wb;
+
+ SDE_DEBUG("[wb:%d]\n", hw_wb->idx - WB_0);
+
+ if (!phys_enc)
+ return;
+
+ sde_encoder_phys_wb_destroy_debugfs(phys_enc);
+
+ kfree(wb_enc);
+}
+
+/**
+ * sde_encoder_phys_wb_init_ops - initialize writeback operations
+ * @ops: Pointer to encoder operation table
+ */
+static void sde_encoder_phys_wb_init_ops(struct sde_encoder_phys_ops *ops)
+{
+ ops->is_master = sde_encoder_phys_wb_is_master;
+ ops->mode_set = sde_encoder_phys_wb_mode_set;
+ ops->enable = sde_encoder_phys_wb_enable;
+ ops->disable = sde_encoder_phys_wb_disable;
+ ops->post_disable = sde_encoder_phys_wb_post_disable;
+ ops->destroy = sde_encoder_phys_wb_destroy;
+ ops->atomic_check = sde_encoder_phys_wb_atomic_check;
+ ops->get_hw_resources = sde_encoder_phys_wb_get_hw_resources;
+ ops->wait_for_commit_done = sde_encoder_phys_wb_wait_for_commit_done;
+ ops->prepare_for_kickoff = sde_encoder_phys_wb_prepare_for_kickoff;
+ ops->handle_post_kickoff = sde_encoder_phys_wb_handle_post_kickoff;
+ ops->trigger_start = sde_encoder_helper_trigger_start;
+}
+
+/**
+ * sde_encoder_phys_wb_init - initialize writeback encoder
+ * @init: Pointer to init info structure with initialization params
+ */
+struct sde_encoder_phys *sde_encoder_phys_wb_init(
+ struct sde_enc_phys_init_params *p)
+{
+ struct sde_encoder_phys *phys_enc;
+ struct sde_encoder_phys_wb *wb_enc;
+ struct sde_hw_mdp *hw_mdp;
+ int ret = 0;
+
+ SDE_DEBUG("\n");
+
+ wb_enc = kzalloc(sizeof(*wb_enc), GFP_KERNEL);
+ if (!wb_enc) {
+ ret = -ENOMEM;
+ goto fail_alloc;
+ }
+ wb_enc->irq_idx = -EINVAL;
+ wb_enc->wbdone_timeout = KICKOFF_TIMEOUT_MS;
+ init_completion(&wb_enc->wbdone_complete);
+
+ phys_enc = &wb_enc->base;
+
+ if (p->sde_kms->vbif[VBIF_NRT]) {
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_UNSECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_NRT_SECURE];
+ } else {
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_UNSECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ wb_enc->aspace[SDE_IOMMU_DOMAIN_SECURE] =
+ p->sde_kms->aspace[MSM_SMMU_DOMAIN_SECURE];
+ }
+
+ hw_mdp = sde_rm_get_mdp(&p->sde_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ SDE_ERROR("failed to init hw_top: %d\n", ret);
+ goto fail_mdp_init;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+
+ /**
+ * hw_wb resource permanently assigned to this encoder
+ * Other resources allocated at atomic commit time by use case
+ */
+ if (p->wb_idx != SDE_NONE) {
+ struct sde_rm_hw_iter iter;
+
+ sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_WB);
+ while (sde_rm_get_hw(&p->sde_kms->rm, &iter)) {
+ struct sde_hw_wb *hw_wb = (struct sde_hw_wb *)iter.hw;
+
+ if (hw_wb->idx == p->wb_idx) {
+ wb_enc->hw_wb = hw_wb;
+ break;
+ }
+ }
+
+ if (!wb_enc->hw_wb) {
+ ret = -EINVAL;
+ SDE_ERROR("failed to init hw_wb%d\n", p->wb_idx - WB_0);
+ goto fail_wb_init;
+ }
+ } else {
+ ret = -EINVAL;
+ SDE_ERROR("invalid wb_idx\n");
+ goto fail_wb_check;
+ }
+
+ sde_encoder_phys_wb_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->sde_kms = p->sde_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_WB_LINE;
+ phys_enc->intf_idx = p->intf_idx;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ INIT_LIST_HEAD(&wb_enc->irq_cb.list);
+
+ ret = sde_encoder_phys_wb_init_debugfs(phys_enc, p->sde_kms);
+ if (ret) {
+ SDE_ERROR("failed to init debugfs %d\n", ret);
+ goto fail_debugfs_init;
+ }
+
+ SDE_DEBUG("Created sde_encoder_phys_wb for wb %d\n",
+ wb_enc->hw_wb->idx - WB_0);
+
+ return phys_enc;
+
+fail_debugfs_init:
+fail_wb_init:
+fail_wb_check:
+fail_mdp_init:
+ kfree(wb_enc);
+fail_alloc:
+ return ERR_PTR(ret);
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.c b/drivers/gpu/drm/msm/sde/sde_fence.c
new file mode 100644
index 000000000000..6db6f989006f
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_fence.c
@@ -0,0 +1,232 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <sync.h>
+#include <sw_sync.h>
+#include "msm_drv.h"
+#include "sde_kms.h"
+#include "sde_fence.h"
+
+void *sde_sync_get(uint64_t fd)
+{
+ /* force signed compare, fdget accepts an int argument */
+ return (signed int)fd >= 0 ? sync_fence_fdget(fd) : NULL;
+}
+
+void sde_sync_put(void *fence)
+{
+ if (fence)
+ sync_fence_put(fence);
+}
+
+int sde_sync_wait(void *fence, long timeout_ms)
+{
+ if (!fence)
+ return -EINVAL;
+ return sync_fence_wait(fence, timeout_ms);
+}
+
+uint32_t sde_sync_get_name_prefix(void *fence)
+{
+ char *name;
+ uint32_t i, prefix;
+
+ if (!fence)
+ return 0x0;
+
+ name = ((struct sync_fence *)fence)->name;
+ prefix = 0x0;
+ for (i = 0; i < sizeof(uint32_t) && name[i]; ++i)
+ prefix = (prefix << CHAR_BIT) | name[i];
+
+ return prefix;
+}
+
+#if IS_ENABLED(CONFIG_SW_SYNC)
+/**
+ * _sde_fence_create_fd - create fence object and return an fd for it
+ * This function is NOT thread-safe.
+ * @timeline: Timeline to associate with fence
+ * @name: Name for fence
+ * @val: Timeline value at which to signal the fence
+ * Return: File descriptor on success, or error code on error
+ */
+static int _sde_fence_create_fd(void *timeline, const char *name, uint32_t val)
+{
+ struct sync_pt *sync_pt;
+ struct sync_fence *fence;
+ signed int fd = -EINVAL;
+
+ if (!timeline) {
+ SDE_ERROR("invalid timeline\n");
+ goto exit;
+ }
+
+ if (!name)
+ name = "sde_fence";
+
+ /* create sync point */
+ sync_pt = sw_sync_pt_create(timeline, val);
+ if (sync_pt == NULL) {
+ SDE_ERROR("failed to create sync point, %s\n", name);
+ goto exit;
+ }
+
+ /* create fence */
+ fence = sync_fence_create(name, sync_pt);
+ if (fence == NULL) {
+ sync_pt_free(sync_pt);
+ SDE_ERROR("couldn't create fence, %s\n", name);
+ goto exit;
+ }
+
+ /* create fd */
+ fd = get_unused_fd_flags(0);
+ if (fd < 0) {
+ SDE_ERROR("failed to get_unused_fd_flags(), %s\n", name);
+ sync_fence_put(fence);
+ goto exit;
+ }
+
+ sync_fence_install(fence, fd);
+exit:
+ return fd;
+}
+
+/**
+ * SDE_FENCE_TIMELINE_NAME - macro for accessing s/w timeline's name
+ * @fence: Pointer to sde fence structure
+ * @drm_id: ID number of owning DRM Object
+ * Returns: Pointer to timeline name string
+ */
+#define SDE_FENCE_TIMELINE_NAME(fence) \
+ (((struct sw_sync_timeline *)fence->timeline)->obj.name)
+
+int sde_fence_init(struct sde_fence *fence,
+ const char *name,
+ uint32_t drm_id)
+{
+ if (!fence) {
+ SDE_ERROR("invalid argument(s)\n");
+ return -EINVAL;
+ }
+
+ fence->timeline = sw_sync_timeline_create(name ? name : "sde");
+ if (!fence->timeline) {
+ SDE_ERROR("failed to create timeline\n");
+ return -ENOMEM;
+ }
+
+ fence->commit_count = 0;
+ fence->done_count = 0;
+ fence->drm_id = drm_id;
+
+ mutex_init(&fence->fence_lock);
+ return 0;
+
+}
+
+void sde_fence_deinit(struct sde_fence *fence)
+{
+ if (!fence) {
+ SDE_ERROR("invalid fence\n");
+ return;
+ }
+
+ mutex_destroy(&fence->fence_lock);
+ if (fence->timeline)
+ sync_timeline_destroy(fence->timeline);
+}
+
+int sde_fence_prepare(struct sde_fence *fence)
+{
+ if (!fence) {
+ SDE_ERROR("invalid fence\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&fence->fence_lock);
+ ++fence->commit_count;
+ SDE_EVT32(fence->drm_id, fence->commit_count, fence->done_count);
+ mutex_unlock(&fence->fence_lock);
+ return 0;
+}
+
+int sde_fence_create(struct sde_fence *fence, uint64_t *val, int offset)
+{
+ uint32_t trigger_value;
+ int fd, rc = -EINVAL;
+
+ if (!fence || !fence->timeline || !val) {
+ SDE_ERROR("invalid argument(s), fence %pK, pval %pK\n",
+ fence, val);
+ } else {
+ /*
+ * Allow created fences to have a constant offset with respect
+ * to the timeline. This allows us to delay the fence signalling
+ * w.r.t. the commit completion (e.g., an offset of +1 would
+ * cause fences returned during a particular commit to signal
+ * after an additional delay of one commit, rather than at the
+ * end of the current one.
+ */
+ mutex_lock(&fence->fence_lock);
+ trigger_value = fence->commit_count + (int32_t)offset;
+ fd = _sde_fence_create_fd(fence->timeline,
+ SDE_FENCE_TIMELINE_NAME(fence),
+ trigger_value);
+ *val = fd;
+
+ SDE_EVT32(fence->drm_id, trigger_value, fd);
+ mutex_unlock(&fence->fence_lock);
+
+ if (fd >= 0)
+ rc = 0;
+ }
+
+ return rc;
+}
+
+void sde_fence_signal(struct sde_fence *fence, bool is_error)
+{
+ if (!fence || !fence->timeline) {
+ SDE_ERROR("invalid fence, %pK\n", fence);
+ return;
+ }
+
+ mutex_lock(&fence->fence_lock);
+ if ((fence->done_count - fence->commit_count) < 0)
+ ++fence->done_count;
+ else
+ SDE_ERROR("detected extra signal attempt!\n");
+
+ /*
+ * Always advance 'done' counter,
+ * but only advance timeline if !error
+ */
+ if (!is_error) {
+ int32_t val;
+
+ val = fence->done_count;
+ val -= ((struct sw_sync_timeline *)
+ fence->timeline)->value;
+ if (val < 0)
+ SDE_ERROR("invalid value\n");
+ else
+ sw_sync_timeline_inc(fence->timeline, (int)val);
+ }
+
+ SDE_EVT32(fence->drm_id, fence->done_count,
+ ((struct sw_sync_timeline *) fence->timeline)->value);
+
+ mutex_unlock(&fence->fence_lock);
+}
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_fence.h b/drivers/gpu/drm/msm/sde/sde_fence.h
new file mode 100644
index 000000000000..113d16b916f7
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_fence.h
@@ -0,0 +1,177 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_FENCE_H_
+#define _SDE_FENCE_H_
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+
+#ifndef CHAR_BIT
+#define CHAR_BIT 8 /* define this if limits.h not available */
+#endif
+
+#ifdef CONFIG_SYNC
+/**
+ * sde_sync_get - Query sync fence object from a file handle
+ *
+ * On success, this function also increments the refcount of the sync fence
+ *
+ * @fd: Integer sync fence handle
+ *
+ * Return: Pointer to sync fence object, or NULL
+ */
+void *sde_sync_get(uint64_t fd);
+
+/**
+ * sde_sync_put - Releases a sync fence object acquired by @sde_sync_get
+ *
+ * This function decrements the sync fence's reference count; the object will
+ * be released if the reference count goes to zero.
+ *
+ * @fence: Pointer to sync fence
+ */
+void sde_sync_put(void *fence);
+
+/**
+ * sde_sync_wait - Query sync fence object from a file handle
+ *
+ * @fence: Pointer to sync fence
+ * @timeout_ms: Time to wait, in milliseconds. Waits forever if timeout_ms < 0
+ *
+ * Return: Zero on success, or -ETIME on timeout
+ */
+int sde_sync_wait(void *fence, long timeout_ms);
+
+/**
+ * sde_sync_get_name_prefix - get integer representation of fence name prefix
+ * @fence: Pointer to opaque fence structure
+ *
+ * Return: 32-bit integer containing first 4 characters of fence name,
+ * big-endian notation
+ */
+uint32_t sde_sync_get_name_prefix(void *fence);
+#else
+static inline void *sde_sync_get(uint64_t fd)
+{
+ return NULL;
+}
+
+static inline void sde_sync_put(void *fence)
+{
+}
+
+static inline int sde_sync_wait(void *fence, long timeout_ms)
+{
+ return 0;
+}
+
+static inline uint32_t sde_sync_get_name_prefix(void *fence)
+{
+ return 0x0;
+}
+#endif
+
+/**
+ * struct sde_fence - output fence container structure
+ * @timeline: Pointer to fence timeline
+ * @commit_count: Number of detected commits since bootup
+ * @done_count: Number of completed commits since bootup
+ * @drm_id: ID number of owning DRM Object
+ * @fence_lock: Mutex object to protect local fence variables
+ */
+struct sde_fence {
+ void *timeline;
+ int32_t commit_count;
+ int32_t done_count;
+ uint32_t drm_id;
+ struct mutex fence_lock;
+};
+
+#if IS_ENABLED(CONFIG_SW_SYNC)
+/**
+ * sde_fence_init - initialize fence object
+ * @fence: Pointer to crtc fence object
+ * @drm_id: ID number of owning DRM Object
+ * @name: Timeline name
+ * Returns: Zero on success
+ */
+int sde_fence_init(struct sde_fence *fence,
+ const char *name,
+ uint32_t drm_id);
+
+/**
+ * sde_fence_deinit - deinit fence container
+ * @fence: Pointer fence container
+ */
+void sde_fence_deinit(struct sde_fence *fence);
+
+/**
+ * sde_fence_prepare - prepare to return fences for current commit
+ * @fence: Pointer fence container
+ * Returns: Zero on success
+ */
+int sde_fence_prepare(struct sde_fence *fence);
+
+/**
+ * sde_fence_create - create output fence object
+ * @fence: Pointer fence container
+ * @val: Pointer to output value variable, fence fd will be placed here
+ * @offset: Fence signal commit offset, e.g., +1 to signal on next commit
+ * Returns: Zero on success
+ */
+int sde_fence_create(struct sde_fence *fence, uint64_t *val, int offset);
+
+/**
+ * sde_fence_signal - advance fence timeline to signal outstanding fences
+ * @fence: Pointer fence container
+ * @is_error: Set to non-zero if the commit didn't complete successfully
+ */
+void sde_fence_signal(struct sde_fence *fence, bool is_error);
+#else
+static inline int sde_fence_init(struct sde_fence *fence,
+ const char *name,
+ uint32_t drm_id)
+{
+ /* do nothing */
+ return 0;
+}
+
+static inline void sde_fence_deinit(struct sde_fence *fence)
+{
+ /* do nothing */
+}
+
+static inline void sde_fence_prepare(struct sde_fence *fence)
+{
+ /* do nothing */
+}
+
+static inline int sde_fence_get(struct sde_fence *fence, uint64_t *val)
+{
+ return -EINVAL;
+}
+
+static inline void sde_fence_signal(struct sde_fence *fence, bool is_error)
+{
+ /* do nothing */
+}
+
+static inline int sde_fence_create(struct sde_fence *fence, uint64_t *val,
+ int offset)
+{
+ return 0;
+}
+#endif /* IS_ENABLED(CONFIG_SW_SYNC) */
+
+#endif /* _SDE_FENCE_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.c b/drivers/gpu/drm/msm/sde/sde_formats.c
new file mode 100644
index 000000000000..340cba536367
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_formats.c
@@ -0,0 +1,1294 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <uapi/drm/drm_fourcc.h>
+#include <uapi/media/msm_media_info.h>
+
+#include "sde_kms.h"
+#include "sde_formats.h"
+
+#define SDE_UBWC_META_MACRO_W_H 16
+#define SDE_UBWC_META_BLOCK_SIZE 256
+#define SDE_UBWC_PLANE_SIZE_ALIGNMENT 4096
+
+#define SDE_TILE_HEIGHT_DEFAULT 1
+#define SDE_TILE_HEIGHT_TILED 4
+#define SDE_TILE_HEIGHT_UBWC 4
+#define SDE_TILE_HEIGHT_NV12 8
+
+#define SDE_MAX_IMG_WIDTH 0x3FFF
+#define SDE_MAX_IMG_HEIGHT 0x3FFF
+
+/**
+ * SDE supported format packing, bpp, and other format
+ * information.
+ * SDE currently only supports interleaved RGB formats
+ * UBWC support for a pixel format is indicated by the flag,
+ * there is additional meta data plane for such formats
+ */
+
+#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha, \
+bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = SDE_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = SDE_TILE_HEIGHT_DEFAULT \
+}
+
+#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \
+alpha, bp, flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = SDE_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \
+alpha, chroma, count, bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3)}, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = count, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = SDE_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = SDE_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = SDE_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \
+flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = SDE_PLANE_PLANAR, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 1, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = SDE_TILE_HEIGHT_DEFAULT \
+}
+
+/*
+ * struct sde_media_color_map - maps drm format to media format
+ * @format: DRM base pixel format
+ * @color: Media API color related to DRM format
+ */
+struct sde_media_color_map {
+ uint32_t format;
+ uint32_t color;
+};
+
+static const struct sde_format sde_format_map[] = {
+ INTERLEAVED_RGB_FMT(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 3, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 3, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_LINEAR, 1),
+
+ PSEUDO_YUV_FMT(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV16,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ SDE_CHROMA_H2V1, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV61,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ SDE_CHROMA_H2V1, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(VYUY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
+ false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(UYVY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
+ false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YUYV,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YVYU,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ false, SDE_CHROMA_H2V1, 4, 2, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 2),
+
+ PLANAR_YUV_FMT(YUV420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb, C0_G_Y,
+ false, SDE_CHROMA_420, 1, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 3),
+
+ PLANAR_YUV_FMT(YVU420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr, C0_G_Y,
+ false, SDE_CHROMA_420, 1, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_LINEAR, 3),
+};
+
+/*
+ * A5x tile formats tables:
+ * These tables hold the A5x tile formats supported.
+ */
+static const struct sde_format sde_format_map_tile[] = {
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(BGRX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, SDE_FORMAT_FLAG_DX,
+ SDE_FETCH_UBWC, 1, SDE_TILE_HEIGHT_TILED),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
+
+ PSEUDO_YUV_FMT_TILED(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV,
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
+};
+
+static const struct sde_format sde_format_map_p010_tile[] = {
+ PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX),
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
+};
+
+static const struct sde_format sde_format_map_tp10_tile[] = {
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX),
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_NV12),
+};
+
+/*
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
+ */
+static const struct sde_format sde_format_map_ubwc[] = {
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, SDE_FORMAT_FLAG_COMPRESSED,
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, SDE_FORMAT_FLAG_COMPRESSED,
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, SDE_FORMAT_FLAG_COMPRESSED,
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED,
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, SDE_FORMAT_FLAG_DX | SDE_FORMAT_FLAG_COMPRESSED,
+ SDE_FETCH_UBWC, 2, SDE_TILE_HEIGHT_UBWC),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ SDE_CHROMA_420, SDE_FORMAT_FLAG_YUV |
+ SDE_FORMAT_FLAG_COMPRESSED,
+ SDE_FETCH_UBWC, 4, SDE_TILE_HEIGHT_NV12),
+};
+
+static const struct sde_format sde_format_map_p010[] = {
+ PSEUDO_YUV_FMT_LOOSE(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX),
+ SDE_FETCH_LINEAR, 2),
+};
+
+static const struct sde_format sde_format_map_p010_ubwc[] = {
+ PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX |
+ SDE_FORMAT_FLAG_COMPRESSED),
+ SDE_FETCH_UBWC, 4, SDE_TILE_HEIGHT_NV12),
+};
+
+static const struct sde_format sde_format_map_tp10_ubwc[] = {
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ SDE_CHROMA_420, (SDE_FORMAT_FLAG_YUV | SDE_FORMAT_FLAG_DX |
+ SDE_FORMAT_FLAG_COMPRESSED),
+ SDE_FETCH_UBWC, 4, SDE_TILE_HEIGHT_NV12),
+};
+
+/* _sde_get_v_h_subsample_rate - Get subsample rates for all formats we support
+ * Note: Not using the drm_format_*_subsampling since we have formats
+ */
+static void _sde_get_v_h_subsample_rate(
+ enum sde_chroma_samp_type chroma_sample,
+ uint32_t *v_sample,
+ uint32_t *h_sample)
+{
+ if (!v_sample || !h_sample)
+ return;
+
+ switch (chroma_sample) {
+ case SDE_CHROMA_H2V1:
+ *v_sample = 1;
+ *h_sample = 2;
+ break;
+ case SDE_CHROMA_H1V2:
+ *v_sample = 2;
+ *h_sample = 1;
+ break;
+ case SDE_CHROMA_420:
+ *v_sample = 2;
+ *h_sample = 2;
+ break;
+ default:
+ *v_sample = 1;
+ *h_sample = 1;
+ break;
+ }
+}
+
+static int _sde_format_get_media_color_ubwc(const struct sde_format *fmt)
+{
+ static const struct sde_media_color_map sde_media_ubwc_map[] = {
+ {DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
+ };
+ int color_fmt = -1;
+ int i;
+
+ if (fmt->base.pixel_format == DRM_FORMAT_NV12) {
+ if (SDE_FORMAT_IS_DX(fmt)) {
+ if (fmt->unpack_tight)
+ color_fmt = COLOR_FMT_NV12_BPP10_UBWC;
+ else
+ color_fmt = COLOR_FMT_P010_UBWC;
+ } else
+ color_fmt = COLOR_FMT_NV12_UBWC;
+ return color_fmt;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(sde_media_ubwc_map); ++i)
+ if (fmt->base.pixel_format == sde_media_ubwc_map[i].format) {
+ color_fmt = sde_media_ubwc_map[i].color;
+ break;
+ }
+ return color_fmt;
+}
+
+static int _sde_format_get_plane_sizes_ubwc(
+ const struct sde_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct sde_hw_fmt_layout *layout)
+{
+ int i;
+ int color;
+ bool meta = SDE_FORMAT_IS_UBWC(fmt);
+
+ memset(layout, 0, sizeof(struct sde_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ color = _sde_format_get_media_color_ubwc(fmt);
+ if (color < 0) {
+ DRM_ERROR("UBWC format not supported for fmt:0x%X\n",
+ fmt->base.pixel_format);
+ return -EINVAL;
+ }
+
+ if (SDE_FORMAT_IS_YUV(layout->format)) {
+ uint32_t y_sclines, uv_sclines;
+ uint32_t y_meta_scanlines = 0;
+ uint32_t uv_meta_scanlines = 0;
+
+ layout->num_planes = 2;
+ layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
+ y_sclines = VENUS_Y_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ y_sclines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width);
+ uv_sclines = VENUS_UV_SCANLINES(color, height);
+ layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
+ uv_sclines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ y_meta_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height);
+ layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] *
+ uv_meta_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ } else {
+ uint32_t rgb_scanlines, rgb_meta_scanlines;
+
+ layout->num_planes = 1;
+
+ layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ rgb_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ rgb_meta_scanlines, SDE_UBWC_PLANE_SIZE_ALIGNMENT);
+ }
+
+done:
+ for (i = 0; i < SDE_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int _sde_format_get_plane_sizes_linear(
+ const struct sde_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct sde_hw_fmt_layout *layout)
+{
+ int i;
+
+ memset(layout, 0, sizeof(struct sde_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ /* Due to memset above, only need to set planes of interest */
+ if (fmt->fetch_planes == SDE_PLANE_INTERLEAVED) {
+ layout->num_planes = 1;
+ layout->plane_size[0] = width * height * layout->format->bpp;
+ layout->plane_pitch[0] = width * layout->format->bpp;
+ } else {
+ uint32_t v_subsample, h_subsample;
+ uint32_t chroma_samp;
+ uint32_t bpp = 1;
+
+ chroma_samp = fmt->chroma_sample;
+ _sde_get_v_h_subsample_rate(chroma_samp, &v_subsample,
+ &h_subsample);
+
+ if (width % h_subsample || height % v_subsample) {
+ DRM_ERROR("mismatch in subsample vs dimensions\n");
+ return -EINVAL;
+ }
+
+ if ((fmt->base.pixel_format == DRM_FORMAT_NV12) &&
+ (SDE_FORMAT_IS_DX(fmt)))
+ bpp = 2;
+ layout->plane_pitch[0] = width * bpp;
+ layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
+ layout->plane_size[0] = layout->plane_pitch[0] * height;
+ layout->plane_size[1] = layout->plane_pitch[1] *
+ (height / v_subsample);
+
+ if (fmt->fetch_planes == SDE_PLANE_PSEUDO_PLANAR) {
+ layout->num_planes = 2;
+ layout->plane_size[1] *= 2;
+ layout->plane_pitch[1] *= 2;
+ } else {
+ /* planar */
+ layout->num_planes = 3;
+ layout->plane_size[2] = layout->plane_size[1];
+ layout->plane_pitch[2] = layout->plane_pitch[1];
+ }
+ }
+
+ for (i = 0; i < SDE_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+int sde_format_get_plane_sizes(
+ const struct sde_format *fmt,
+ const uint32_t w,
+ const uint32_t h,
+ struct sde_hw_fmt_layout *layout)
+{
+ if (!layout || !fmt) {
+ DRM_ERROR("invalid pointer\n");
+ return -EINVAL;
+ }
+
+ if ((w > SDE_MAX_IMG_WIDTH) || (h > SDE_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ if (SDE_FORMAT_IS_UBWC(fmt) || SDE_FORMAT_IS_TILE(fmt))
+ return _sde_format_get_plane_sizes_ubwc(fmt, w, h, layout);
+
+ return _sde_format_get_plane_sizes_linear(fmt, w, h, layout);
+}
+
+static int _sde_format_populate_addrs_ubwc(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct sde_hw_fmt_layout *layout)
+{
+ uint32_t base_addr;
+ bool meta;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid pointers\n");
+ return -EINVAL;
+ }
+
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
+ if (!base_addr) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+
+ meta = SDE_FORMAT_IS_UBWC(layout->format);
+
+ /* Per-format logic for verifying active planes */
+ if (SDE_FORMAT_IS_YUV(layout->format)) {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** SDE PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | Y meta | ** | Y bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Y bitstream | ** | CbCr bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Cbcr metadata | ** | Y meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | CbCr bitstream | ** | CbCr meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /************************************************/
+
+ /* configure Y bitstream plane */
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+
+ /* configure CbCr bitstream plane */
+ layout->plane_addr[1] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2] + layout->plane_size[3];
+
+ if (!meta)
+ goto done;
+
+ /* configure Y metadata plane */
+ layout->plane_addr[2] = base_addr;
+
+ /* configure CbCr metadata plane */
+ layout->plane_addr[3] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2];
+
+ } else {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** SDE PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | RGB meta | ** | RGB bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | RGB bitstream | ** | NONE | */
+ /* | data | ** | | */
+ /* ------------------- ** -------------------- */
+ /* ** | RGB meta | */
+ /* ** | plane | */
+ /* ** -------------------- */
+ /************************************************/
+
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+ layout->plane_addr[1] = 0;
+
+ if (!meta)
+ goto done;
+
+ layout->plane_addr[2] = base_addr;
+ layout->plane_addr[3] = 0;
+ }
+done:
+ return 0;
+}
+
+static int _sde_format_populate_addrs_linear(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct sde_hw_fmt_layout *layout)
+{
+ unsigned int i;
+
+ /* Update layout pitches from fb */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (layout->plane_pitch[i] != fb->pitches[i]) {
+ SDE_DEBUG("plane %u expected pitch %u, fb %u\n",
+ i, layout->plane_pitch[i], fb->pitches[i]);
+ layout->plane_pitch[i] = fb->pitches[i];
+ }
+ }
+
+ /* Populate addresses for simple formats here */
+ for (i = 0; i < layout->num_planes; ++i) {
+ layout->plane_addr[i] = msm_framebuffer_iova(fb, aspace, i);
+ if (!layout->plane_addr[i]) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+int sde_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct sde_hw_fmt_layout *layout)
+{
+ uint32_t plane_addr[SDE_MAX_PLANES];
+ int i, ret;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ if ((fb->width > SDE_MAX_IMG_WIDTH) ||
+ (fb->height > SDE_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ layout->format = to_sde_format(msm_framebuffer_format(fb));
+
+ /* Populate the plane sizes etc via get_format */
+ ret = sde_format_get_plane_sizes(layout->format, fb->width, fb->height,
+ layout);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < SDE_MAX_PLANES; ++i)
+ plane_addr[i] = layout->plane_addr[i];
+
+ /* Populate the addresses given the fb */
+ if (SDE_FORMAT_IS_UBWC(layout->format) ||
+ SDE_FORMAT_IS_TILE(layout->format))
+ ret = _sde_format_populate_addrs_ubwc(aspace, fb, layout);
+ else
+ ret = _sde_format_populate_addrs_linear(aspace, fb, layout);
+
+ /* check if anything changed */
+ if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+static void _sde_format_calc_offset_linear(struct sde_hw_fmt_layout *source,
+ u32 x, u32 y)
+{
+ if ((x == 0) && (y == 0))
+ return;
+
+ source->plane_addr[0] += y * source->plane_pitch[0];
+
+ if (source->num_planes == 1) {
+ source->plane_addr[0] += x * source->format->bpp;
+ } else {
+ uint32_t xoff, yoff;
+ uint32_t v_subsample = 1;
+ uint32_t h_subsample = 1;
+
+ _sde_get_v_h_subsample_rate(source->format->chroma_sample,
+ &v_subsample, &h_subsample);
+
+ xoff = x / h_subsample;
+ yoff = y / v_subsample;
+
+ source->plane_addr[0] += x;
+ source->plane_addr[1] += xoff +
+ (yoff * source->plane_pitch[1]);
+ if (source->num_planes == 2) /* pseudo planar */
+ source->plane_addr[1] += xoff;
+ else /* planar */
+ source->plane_addr[2] += xoff +
+ (yoff * source->plane_pitch[2]);
+ }
+}
+
+int sde_format_populate_layout_with_roi(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct sde_rect *roi,
+ struct sde_hw_fmt_layout *layout)
+{
+ int ret;
+
+ ret = sde_format_populate_layout(aspace, fb, layout);
+ if (ret || !roi)
+ return ret;
+
+ if (!roi->w || !roi->h || (roi->x + roi->w > fb->width) ||
+ (roi->y + roi->h > fb->height)) {
+ DRM_ERROR("invalid roi=[%d,%d,%d,%d], fb=[%u,%u]\n",
+ roi->x, roi->y, roi->w, roi->h,
+ fb->width, fb->height);
+ ret = -EINVAL;
+ } else if (SDE_FORMAT_IS_LINEAR(layout->format)) {
+ _sde_format_calc_offset_linear(layout, roi->x, roi->y);
+ layout->width = roi->w;
+ layout->height = roi->h;
+ } else if (roi->x || roi->y || (roi->w != fb->width) ||
+ (roi->h != fb->height)) {
+ DRM_ERROR("non-linear layout with roi not supported\n");
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int sde_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos)
+{
+ int ret, i, num_base_fmt_planes;
+ const struct sde_format *fmt;
+ struct sde_hw_fmt_layout layout;
+ uint32_t bos_total_size = 0;
+
+ if (!msm_fmt || !cmd || !bos) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ fmt = to_sde_format(msm_fmt);
+ num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
+
+ ret = sde_format_get_plane_sizes(fmt, cmd->width, cmd->height,
+ &layout);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_base_fmt_planes; i++) {
+ if (!bos[i]) {
+ DRM_ERROR("invalid handle for plane %d\n", i);
+ return -EINVAL;
+ }
+ if ((i == 0) || (bos[i] != bos[0]))
+ bos_total_size += bos[i]->size;
+ }
+
+ if (bos_total_size < layout.total_size) {
+ DRM_ERROR("buffers total size too small %u expected %u\n",
+ bos_total_size, layout.total_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct sde_format *sde_get_sde_format_ext(
+ const uint32_t format,
+ const uint64_t *modifiers,
+ const uint32_t modifiers_len)
+{
+ uint32_t i = 0;
+ uint64_t mod0 = 0;
+ const struct sde_format *fmt = NULL;
+ const struct sde_format *map = NULL;
+ ssize_t map_size = 0;
+
+ /*
+ * Currently only support exactly zero or one modifier.
+ * All planes used must specify the same modifier.
+ */
+ if (modifiers_len && !modifiers) {
+ SDE_ERROR("invalid modifiers array\n");
+ return NULL;
+ } else if (modifiers && modifiers_len && modifiers[0]) {
+ mod0 = modifiers[0];
+ SDE_DEBUG("plane format modifier 0x%llX\n", mod0);
+ for (i = 1; i < modifiers_len; i++) {
+ if (modifiers[i] != mod0) {
+ SDE_ERROR("bad fmt mod 0x%llX on plane %d\n",
+ modifiers[i], i);
+ return NULL;
+ }
+ }
+ }
+
+ switch (mod0) {
+ case 0:
+ map = sde_format_map;
+ map_size = ARRAY_SIZE(sde_format_map);
+ break;
+ case DRM_FORMAT_MOD_QCOM_COMPRESSED:
+ case DRM_FORMAT_MOD_QCOM_COMPRESSED | DRM_FORMAT_MOD_QCOM_TILE:
+ map = sde_format_map_ubwc;
+ map_size = ARRAY_SIZE(sde_format_map_ubwc);
+ SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+ format);
+ break;
+ case DRM_FORMAT_MOD_QCOM_DX:
+ map = sde_format_map_p010;
+ map_size = ARRAY_SIZE(sde_format_map_p010);
+ SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_DX\n", format);
+ break;
+ case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED):
+ case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
+ DRM_FORMAT_MOD_QCOM_TILE):
+ map = sde_format_map_p010_ubwc;
+ map_size = ARRAY_SIZE(sde_format_map_p010_ubwc);
+ SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX\n",
+ format);
+ break;
+ case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
+ DRM_FORMAT_MOD_QCOM_TIGHT):
+ case (DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_COMPRESSED |
+ DRM_FORMAT_MOD_QCOM_TIGHT | DRM_FORMAT_MOD_QCOM_TILE):
+ map = sde_format_map_tp10_ubwc;
+ map_size = ARRAY_SIZE(sde_format_map_tp10_ubwc);
+ SDE_DEBUG(
+ "found fmt 0x%X DRM_FORMAT_MOD_QCOM_COMPRESSED/DX/TIGHT\n",
+ format);
+ break;
+ case DRM_FORMAT_MOD_QCOM_TILE:
+ map = sde_format_map_tile;
+ map_size = ARRAY_SIZE(sde_format_map_tile);
+ SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE\n", format);
+ break;
+ case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX):
+ map = sde_format_map_p010_tile;
+ map_size = ARRAY_SIZE(sde_format_map_p010_tile);
+ SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE/DX\n",
+ format);
+ break;
+ case (DRM_FORMAT_MOD_QCOM_TILE | DRM_FORMAT_MOD_QCOM_DX |
+ DRM_FORMAT_MOD_QCOM_TIGHT):
+ map = sde_format_map_tp10_tile;
+ map_size = ARRAY_SIZE(sde_format_map_tp10_tile);
+ SDE_DEBUG("found fmt 0x%X DRM_FORMAT_MOD_QCOM_TILE/DX/TIGHT\n",
+ format);
+ break;
+ default:
+ SDE_ERROR("unsupported format modifier %llX\n", mod0);
+ return NULL;
+ }
+
+ for (i = 0; i < map_size; i++) {
+ if (format == map[i].base.pixel_format) {
+ fmt = &map[i];
+ break;
+ }
+ }
+
+ if (fmt == NULL)
+ SDE_ERROR("unsupported fmt 0x%X modifier 0x%llX\n",
+ format, mod0);
+ else
+ SDE_DEBUG("fmt %s mod 0x%llX ubwc %d yuv %d\n",
+ drm_get_format_name(format), mod0,
+ SDE_FORMAT_IS_UBWC(fmt),
+ SDE_FORMAT_IS_YUV(fmt));
+
+ return fmt;
+}
+
+const struct msm_format *sde_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t *modifiers,
+ const uint32_t modifiers_len)
+{
+ const struct sde_format *fmt = sde_get_sde_format_ext(format,
+ modifiers, modifiers_len);
+ if (fmt)
+ return &fmt->base;
+ return NULL;
+}
+
+uint32_t sde_populate_formats(
+ const struct sde_format_extended *format_list,
+ uint32_t *pixel_formats,
+ uint64_t *pixel_modifiers,
+ uint32_t pixel_formats_max)
+{
+ uint32_t i, fourcc_format;
+
+ if (!format_list || !pixel_formats)
+ return 0;
+
+ for (i = 0, fourcc_format = 0;
+ format_list->fourcc_format && i < pixel_formats_max;
+ ++format_list) {
+ /* verify if listed format is in sde_format_map? */
+
+ /* optionally return modified formats */
+ if (pixel_modifiers) {
+ /* assume same modifier for all fb planes */
+ pixel_formats[i] = format_list->fourcc_format;
+ pixel_modifiers[i++] = format_list->modifier;
+ } else {
+ /* assume base formats grouped together */
+ if (fourcc_format != format_list->fourcc_format) {
+ fourcc_format = format_list->fourcc_format;
+ pixel_formats[i++] = fourcc_format;
+ }
+ }
+ }
+
+ return i;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_formats.h b/drivers/gpu/drm/msm/sde/sde_formats.h
new file mode 100644
index 000000000000..ec8f97da4a41
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_formats.h
@@ -0,0 +1,123 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_FORMATS_H
+#define _SDE_FORMATS_H
+
+#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
+#include "sde_hw_mdss.h"
+
+/**
+ * sde_get_sde_format_ext() - Returns sde format structure pointer.
+ * @format: DRM FourCC Code
+ * @modifiers: format modifier array from client, one per plane
+ * @modifiers_len: number of planes and array size for plane_modifiers
+ */
+const struct sde_format *sde_get_sde_format_ext(
+ const uint32_t format,
+ const uint64_t *modifiers,
+ const uint32_t modifiers_len);
+
+#define sde_get_sde_format(f) sde_get_sde_format_ext(f, NULL, 0)
+
+/**
+ * sde_get_msm_format - get an sde_format by its msm_format base
+ * callback function registers with the msm_kms layer
+ * @kms: kms driver
+ * @format: DRM FourCC Code
+ * @modifiers: format modifier array from client, one per plane
+ * @modifiers_len: number of planes and array size for plane_modifiers
+ */
+const struct msm_format *sde_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t *modifiers,
+ const uint32_t modifiers_len);
+
+/**
+ * sde_populate_formats - populate the given array with fourcc codes supported
+ * @format_list: pointer to list of possible formats
+ * @pixel_formats: array to populate with fourcc codes
+ * @pixel_modifiers: array to populate with drm modifiers, can be NULL
+ * @pixel_formats_max: length of pixel formats array
+ * Return: number of elements populated
+ */
+uint32_t sde_populate_formats(
+ const struct sde_format_extended *format_list,
+ uint32_t *pixel_formats,
+ uint64_t *pixel_modifiers,
+ uint32_t pixel_formats_max);
+
+/**
+ * sde_format_get_plane_sizes - calculate size and layout of given buffer format
+ * @fmt: pointer to sde_format
+ * @w: width of the buffer
+ * @h: height of the buffer
+ * @layout: layout of the buffer
+ *
+ * Return: size of the buffer
+ */
+int sde_format_get_plane_sizes(
+ const struct sde_format *fmt,
+ const uint32_t w,
+ const uint32_t h,
+ struct sde_hw_fmt_layout *layout);
+
+/**
+ * sde_format_check_modified_format - validate format and buffers for
+ * sde non-standard, i.e. modified format
+ * @kms: kms driver
+ * @msm_fmt: pointer to the msm_fmt base pointer of an sde_format
+ * @cmd: fb_cmd2 structure user request
+ * @bos: gem buffer object list
+ *
+ * Return: error code on failure, 0 on success
+ */
+int sde_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
+
+/**
+ * sde_format_populate_layout - populate the given format layout based on
+ * mmu, fb, and format found in the fb
+ * @aspace: address space pointer
+ * @fb: framebuffer pointer
+ * @fmtl: format layout structure to populate
+ *
+ * Return: error code on failure, -EAGAIN if success but the addresses
+ * are the same as before or 0 if new addresses were populated
+ */
+int sde_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct sde_hw_fmt_layout *fmtl);
+
+/**
+ * sde_format_populate_layout_with_roi - populate the given format layout
+ * based on mmu, fb, roi, and format found in the fb
+ * @aspace: mmu id handle
+ * @fb: framebuffer pointer
+ * @roi: region of interest (optional)
+ * @fmtl: format layout structure to populate
+ *
+ * Return: error code on failure, 0 on success
+ */
+int sde_format_populate_layout_with_roi(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct sde_rect *roi,
+ struct sde_hw_fmt_layout *fmtl);
+
+#endif /*_SDE_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.c b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
new file mode 100644
index 000000000000..95a25462cadc
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.c
@@ -0,0 +1,2428 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/slab.h>
+#include <linux/of_address.h>
+
+#include <linux/of_platform.h>
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_catalog_format.h"
+#include "sde_kms.h"
+
+/*************************************************************
+ * MACRO DEFINITION
+ *************************************************************/
+
+/**
+ * Max hardware block in certain hardware. For ex: sspp pipes
+ * can have QSEED, pcc, igc, pa, csc, qos entries, etc. This count is
+ * 64 based on software design. It should be increased if any of the
+ * hardware block has more subblocks.
+ */
+#define MAX_SDE_HW_BLK 64
+
+/* each entry will have register address and bit offset in that register */
+#define MAX_BIT_OFFSET 2
+
+/* default line width for sspp */
+#define DEFAULT_SDE_LINE_WIDTH 2048
+
+/* max mixer blend stages */
+#define DEFAULT_SDE_MIXER_BLENDSTAGES 7
+
+/* max bank bit for macro tile and ubwc format */
+#define DEFAULT_SDE_HIGHEST_BANK_BIT 15
+
+/* default hardware block size if dtsi entry is not present */
+#define DEFAULT_SDE_HW_BLOCK_LEN 0x100
+
+/* default rects for multi rect case */
+#define DEFAULT_SDE_SSPP_MAX_RECTS 1
+
+/* total number of intf - dp, dsi, hdmi */
+#define INTF_COUNT 3
+
+#define MAX_SSPP_UPSCALE 20
+#define MAX_SSPP_DOWNSCALE 4
+#define SSPP_UNITY_SCALE 1
+
+#define MAX_HORZ_DECIMATION 4
+#define MAX_VERT_DECIMATION 4
+
+#define MAX_SPLIT_DISPLAY_CTL 2
+#define MAX_PP_SPLIT_DISPLAY_CTL 1
+
+#define MDSS_BASE_OFFSET 0x0
+
+#define ROT_LM_OFFSET 3
+#define LINE_LM_OFFSET 5
+#define LINE_MODE_WB_OFFSET 2
+
+/* maximum XIN halt timeout in usec */
+#define VBIF_XIN_HALT_TIMEOUT 0x4000
+
+#define DEFAULT_CREQ_LUT_NRT 0x0
+#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
+
+/* access property value based on prop_type and hardware index */
+#define PROP_VALUE_ACCESS(p, i, j) ((p + i)->value[j])
+
+/*
+ * access element within PROP_TYPE_BIT_OFFSET_ARRAYs based on prop_type,
+ * hardware index and offset array index
+ */
+#define PROP_BITVALUE_ACCESS(p, i, j, k) ((p + i)->bit_value[j][k])
+
+/*************************************************************
+ * DTSI PROPERTY INDEX
+ *************************************************************/
+enum {
+ HW_OFF,
+ HW_LEN,
+ HW_DISP,
+ HW_PROP_MAX,
+};
+
+enum sde_prop {
+ SDE_OFF,
+ SDE_LEN,
+ SSPP_LINEWIDTH,
+ MIXER_LINEWIDTH,
+ MIXER_BLEND,
+ WB_LINEWIDTH,
+ BANK_BIT,
+ QSEED_TYPE,
+ CSC_TYPE,
+ PANIC_PER_PIPE,
+ CDP,
+ SRC_SPLIT,
+ SDE_PROP_MAX,
+};
+
+enum {
+ PERF_MAX_BW_LOW,
+ PERF_MAX_BW_HIGH,
+ PERF_PROP_MAX,
+};
+
+enum {
+ SSPP_OFF,
+ SSPP_SIZE,
+ SSPP_TYPE,
+ SSPP_XIN,
+ SSPP_CLK_CTRL,
+ SSPP_CLK_STATUS,
+ SSPP_DANGER,
+ SSPP_SAFE,
+ SSPP_MAX_RECTS,
+ SSPP_SCALE_SIZE,
+ SSPP_VIG_BLOCKS,
+ SSPP_RGB_BLOCKS,
+ SSPP_PROP_MAX,
+};
+
+enum {
+ VIG_QSEED_OFF,
+ VIG_QSEED_LEN,
+ VIG_CSC_OFF,
+ VIG_HSIC_PROP,
+ VIG_MEMCOLOR_PROP,
+ VIG_PCC_PROP,
+ VIG_PROP_MAX,
+};
+
+enum {
+ RGB_SCALER_OFF,
+ RGB_SCALER_LEN,
+ RGB_PCC_PROP,
+ RGB_PROP_MAX,
+};
+
+enum {
+ INTF_OFF,
+ INTF_LEN,
+ INTF_PREFETCH,
+ INTF_TYPE,
+ INTF_PROP_MAX,
+};
+
+enum {
+ PP_OFF,
+ PP_LEN,
+ TE_OFF,
+ TE_LEN,
+ TE2_OFF,
+ TE2_LEN,
+ DSC_OFF,
+ DSC_LEN,
+ PP_SLAVE,
+ PP_PROP_MAX,
+};
+
+enum {
+ DSPP_OFF,
+ DSPP_SIZE,
+ DSPP_BLOCKS,
+ DSPP_PROP_MAX,
+};
+
+enum {
+ DSPP_IGC_PROP,
+ DSPP_PCC_PROP,
+ DSPP_GC_PROP,
+ DSPP_HSIC_PROP,
+ DSPP_MEMCOLOR_PROP,
+ DSPP_SIXZONE_PROP,
+ DSPP_GAMUT_PROP,
+ DSPP_DITHER_PROP,
+ DSPP_HIST_PROP,
+ DSPP_VLUT_PROP,
+ DSPP_BLOCKS_PROP_MAX,
+};
+
+enum {
+ AD_OFF,
+ AD_VERSION,
+ AD_PROP_MAX,
+};
+
+enum {
+ MIXER_OFF,
+ MIXER_LEN,
+ MIXER_BLOCKS,
+ MIXER_DISP,
+ MIXER_PROP_MAX,
+};
+
+enum {
+ MIXER_GC_PROP,
+ MIXER_BLOCKS_PROP_MAX,
+};
+
+enum {
+ WB_OFF,
+ WB_LEN,
+ WB_ID,
+ WB_XIN_ID,
+ WB_CLK_CTRL,
+ WB_PROP_MAX,
+};
+
+enum {
+ VBIF_OFF,
+ VBIF_LEN,
+ VBIF_ID,
+ VBIF_DEFAULT_OT_RD_LIMIT,
+ VBIF_DEFAULT_OT_WR_LIMIT,
+ VBIF_DYNAMIC_OT_RD_LIMIT,
+ VBIF_DYNAMIC_OT_WR_LIMIT,
+ VBIF_PROP_MAX,
+};
+
+/*************************************************************
+ * dts property definition
+ *************************************************************/
+enum prop_type {
+ PROP_TYPE_BOOL,
+ PROP_TYPE_U32,
+ PROP_TYPE_U32_ARRAY,
+ PROP_TYPE_STRING,
+ PROP_TYPE_STRING_ARRAY,
+ PROP_TYPE_BIT_OFFSET_ARRAY,
+ PROP_TYPE_NODE,
+};
+
+struct sde_prop_type {
+ /* use property index from enum property for readability purpose */
+ u8 id;
+ /* it should be property name based on dtsi documentation */
+ char *prop_name;
+ /**
+ * if property is marked mandatory then it will fail parsing
+ * when property is not present
+ */
+ u32 is_mandatory;
+ /* property type based on "enum prop_type" */
+ enum prop_type type;
+};
+
+struct sde_prop_value {
+ u32 value[MAX_SDE_HW_BLK];
+ u32 bit_value[MAX_SDE_HW_BLK][MAX_BIT_OFFSET];
+};
+
+/*************************************************************
+ * dts property list
+ *************************************************************/
+static struct sde_prop_type sde_prop[] = {
+ {SDE_OFF, "qcom,sde-off", true, PROP_TYPE_U32},
+ {SDE_LEN, "qcom,sde-len", false, PROP_TYPE_U32},
+ {SSPP_LINEWIDTH, "qcom,sde-sspp-linewidth", false, PROP_TYPE_U32},
+ {MIXER_LINEWIDTH, "qcom,sde-mixer-linewidth", false, PROP_TYPE_U32},
+ {MIXER_BLEND, "qcom,sde-mixer-blendstages", false, PROP_TYPE_U32},
+ {WB_LINEWIDTH, "qcom,sde-wb-linewidth", false, PROP_TYPE_U32},
+ {BANK_BIT, "qcom,sde-highest-bank-bit", false, PROP_TYPE_U32},
+ {QSEED_TYPE, "qcom,sde-qseed-type", false, PROP_TYPE_STRING},
+ {CSC_TYPE, "qcom,sde-csc-type", false, PROP_TYPE_STRING},
+ {PANIC_PER_PIPE, "qcom,sde-panic-per-pipe", false, PROP_TYPE_BOOL},
+ {CDP, "qcom,sde-has-cdp", false, PROP_TYPE_BOOL},
+ {SRC_SPLIT, "qcom,sde-has-src-split", false, PROP_TYPE_BOOL},
+};
+
+static struct sde_prop_type sde_perf_prop[] = {
+ {PERF_MAX_BW_LOW, "qcom,sde-max-bw-low-kbps", false, PROP_TYPE_U32},
+ {PERF_MAX_BW_HIGH, "qcom,sde-max-bw-high-kbps", false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type sspp_prop[] = {
+ {SSPP_OFF, "qcom,sde-sspp-off", true, PROP_TYPE_U32_ARRAY},
+ {SSPP_SIZE, "qcom,sde-sspp-src-size", false, PROP_TYPE_U32},
+ {SSPP_TYPE, "qcom,sde-sspp-type", true, PROP_TYPE_STRING_ARRAY},
+ {SSPP_XIN, "qcom,sde-sspp-xin-id", true, PROP_TYPE_U32_ARRAY},
+ {SSPP_CLK_CTRL, "qcom,sde-sspp-clk-ctrl", false,
+ PROP_TYPE_BIT_OFFSET_ARRAY},
+ {SSPP_CLK_STATUS, "qcom,sde-sspp-clk-status", false,
+ PROP_TYPE_BIT_OFFSET_ARRAY},
+ {SSPP_DANGER, "qcom,sde-sspp-danger-lut", false, PROP_TYPE_U32_ARRAY},
+ {SSPP_SAFE, "qcom,sde-sspp-safe-lut", false, PROP_TYPE_U32_ARRAY},
+ {SSPP_MAX_RECTS, "qcom,sde-sspp-max-rects", false, PROP_TYPE_U32_ARRAY},
+ {SSPP_SCALE_SIZE, "qcom,sde-sspp-scale-size", false, PROP_TYPE_U32},
+ {SSPP_VIG_BLOCKS, "qcom,sde-sspp-vig-blocks", false, PROP_TYPE_NODE},
+ {SSPP_RGB_BLOCKS, "qcom,sde-sspp-rgb-blocks", false, PROP_TYPE_NODE},
+};
+
+static struct sde_prop_type vig_prop[] = {
+ {VIG_QSEED_OFF, "qcom,sde-vig-qseed-off", false, PROP_TYPE_U32},
+ {VIG_QSEED_LEN, "qcom,sde-vig-qseed-size", false, PROP_TYPE_U32},
+ {VIG_CSC_OFF, "qcom,sde-vig-csc-off", false, PROP_TYPE_U32},
+ {VIG_HSIC_PROP, "qcom,sde-vig-hsic", false, PROP_TYPE_U32_ARRAY},
+ {VIG_MEMCOLOR_PROP, "qcom,sde-vig-memcolor", false,
+ PROP_TYPE_U32_ARRAY},
+ {VIG_PCC_PROP, "qcom,sde-vig-pcc", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type rgb_prop[] = {
+ {RGB_SCALER_OFF, "qcom,sde-rgb-scaler-off", false, PROP_TYPE_U32},
+ {RGB_SCALER_LEN, "qcom,sde-rgb-scaler-size", false, PROP_TYPE_U32},
+ {RGB_PCC_PROP, "qcom,sde-rgb-pcc", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type ctl_prop[] = {
+ {HW_OFF, "qcom,sde-ctl-off", true, PROP_TYPE_U32_ARRAY},
+ {HW_LEN, "qcom,sde-ctl-size", false, PROP_TYPE_U32},
+ {HW_DISP, "qcom,sde-ctl-display-pref", false, PROP_TYPE_STRING_ARRAY},
+};
+
+static struct sde_prop_type mixer_prop[] = {
+ {MIXER_OFF, "qcom,sde-mixer-off", true, PROP_TYPE_U32_ARRAY},
+ {MIXER_LEN, "qcom,sde-mixer-size", false, PROP_TYPE_U32},
+ {MIXER_BLOCKS, "qcom,sde-mixer-blocks", false, PROP_TYPE_NODE},
+ {MIXER_DISP, "qcom,sde-mixer-display-pref", false,
+ PROP_TYPE_STRING_ARRAY},
+};
+
+static struct sde_prop_type mixer_blocks_prop[] = {
+ {MIXER_GC_PROP, "qcom,sde-mixer-gc", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type dspp_prop[] = {
+ {DSPP_OFF, "qcom,sde-dspp-off", true, PROP_TYPE_U32_ARRAY},
+ {DSPP_SIZE, "qcom,sde-dspp-size", false, PROP_TYPE_U32},
+ {DSPP_BLOCKS, "qcom,sde-dspp-blocks", false, PROP_TYPE_NODE},
+};
+
+static struct sde_prop_type dspp_blocks_prop[] = {
+ {DSPP_IGC_PROP, "qcom,sde-dspp-igc", false, PROP_TYPE_U32_ARRAY},
+ {DSPP_PCC_PROP, "qcom,sde-dspp-pcc", false, PROP_TYPE_U32_ARRAY},
+ {DSPP_GC_PROP, "qcom,sde-dspp-gc", false, PROP_TYPE_U32_ARRAY},
+ {DSPP_HSIC_PROP, "qcom,sde-dspp-hsic", false, PROP_TYPE_U32_ARRAY},
+ {DSPP_MEMCOLOR_PROP, "qcom,sde-dspp-memcolor", false,
+ PROP_TYPE_U32_ARRAY},
+ {DSPP_SIXZONE_PROP, "qcom,sde-dspp-sixzone", false,
+ PROP_TYPE_U32_ARRAY},
+ {DSPP_GAMUT_PROP, "qcom,sde-dspp-gamut", false, PROP_TYPE_U32_ARRAY},
+ {DSPP_DITHER_PROP, "qcom,sde-dspp-dither", false, PROP_TYPE_U32_ARRAY},
+ {DSPP_HIST_PROP, "qcom,sde-dspp-hist", false, PROP_TYPE_U32_ARRAY},
+ {DSPP_VLUT_PROP, "qcom,sde-dspp-vlut", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type ad_prop[] = {
+ {AD_OFF, "qcom,sde-dspp-ad-off", false, PROP_TYPE_U32_ARRAY},
+ {AD_VERSION, "qcom,sde-dspp-ad-version", false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type pp_prop[] = {
+ {PP_OFF, "qcom,sde-pp-off", true, PROP_TYPE_U32_ARRAY},
+ {PP_LEN, "qcom,sde-pp-size", false, PROP_TYPE_U32},
+ {TE_OFF, "qcom,sde-te-off", false, PROP_TYPE_U32_ARRAY},
+ {TE_LEN, "qcom,sde-te-size", false, PROP_TYPE_U32},
+ {TE2_OFF, "qcom,sde-te2-off", false, PROP_TYPE_U32_ARRAY},
+ {TE2_LEN, "qcom,sde-te2-size", false, PROP_TYPE_U32},
+ {DSC_OFF, "qcom,sde-dsc-off", false, PROP_TYPE_U32_ARRAY},
+ {DSC_LEN, "qcom,sde-dsc-size", false, PROP_TYPE_U32},
+ {PP_SLAVE, "qcom,sde-pp-slave", false, PROP_TYPE_U32_ARRAY},
+};
+
+static struct sde_prop_type cdm_prop[] = {
+ {HW_OFF, "qcom,sde-cdm-off", false, PROP_TYPE_U32_ARRAY},
+ {HW_LEN, "qcom,sde-cdm-size", false, PROP_TYPE_U32},
+};
+
+static struct sde_prop_type intf_prop[] = {
+ {INTF_OFF, "qcom,sde-intf-off", true, PROP_TYPE_U32_ARRAY},
+ {INTF_LEN, "qcom,sde-intf-size", false, PROP_TYPE_U32},
+ {INTF_PREFETCH, "qcom,sde-intf-max-prefetch-lines", false,
+ PROP_TYPE_U32_ARRAY},
+ {INTF_TYPE, "qcom,sde-intf-type", false, PROP_TYPE_STRING_ARRAY},
+};
+
+static struct sde_prop_type wb_prop[] = {
+ {WB_OFF, "qcom,sde-wb-off", true, PROP_TYPE_U32_ARRAY},
+ {WB_LEN, "qcom,sde-wb-size", false, PROP_TYPE_U32},
+ {WB_ID, "qcom,sde-wb-id", true, PROP_TYPE_U32_ARRAY},
+ {WB_XIN_ID, "qcom,sde-wb-xin-id", false, PROP_TYPE_U32_ARRAY},
+ {WB_CLK_CTRL, "qcom,sde-wb-clk-ctrl", false,
+ PROP_TYPE_BIT_OFFSET_ARRAY},
+};
+
+static struct sde_prop_type vbif_prop[] = {
+ {VBIF_OFF, "qcom,sde-vbif-off", true, PROP_TYPE_U32_ARRAY},
+ {VBIF_LEN, "qcom,sde-vbif-size", false, PROP_TYPE_U32},
+ {VBIF_ID, "qcom,sde-vbif-id", false, PROP_TYPE_U32_ARRAY},
+ {VBIF_DEFAULT_OT_RD_LIMIT, "qcom,sde-vbif-default-ot-rd-limit", false,
+ PROP_TYPE_U32},
+ {VBIF_DEFAULT_OT_WR_LIMIT, "qcom,sde-vbif-default-ot-wr-limit", false,
+ PROP_TYPE_U32},
+ {VBIF_DYNAMIC_OT_RD_LIMIT, "qcom,sde-vbif-dynamic-ot-rd-limit", false,
+ PROP_TYPE_U32_ARRAY},
+ {VBIF_DYNAMIC_OT_WR_LIMIT, "qcom,sde-vbif-dynamic-ot-wr-limit", false,
+ PROP_TYPE_U32_ARRAY},
+};
+
+/*************************************************************
+ * static API list
+ *************************************************************/
+
+/**
+ * _sde_copy_formats - copy formats from src_list to dst_list
+ * @dst_list: pointer to destination list where to copy formats
+ * @dst_list_size: size of destination list
+ * @dst_list_pos: starting position on the list where to copy formats
+ * @src_list: pointer to source list where to copy formats from
+ * @src_list_size: size of source list
+ * Return: number of elements populated
+ */
+static uint32_t _sde_copy_formats(
+ struct sde_format_extended *dst_list,
+ uint32_t dst_list_size,
+ uint32_t dst_list_pos,
+ const struct sde_format_extended *src_list,
+ uint32_t src_list_size)
+{
+ uint32_t cur_pos, i;
+
+ if (!dst_list || !src_list || (dst_list_pos >= (dst_list_size - 1)))
+ return 0;
+
+ for (i = 0, cur_pos = dst_list_pos;
+ (cur_pos < (dst_list_size - 1)) && (i < src_list_size)
+ && src_list[i].fourcc_format; ++i, ++cur_pos)
+ dst_list[cur_pos] = src_list[i];
+
+ dst_list[cur_pos].fourcc_format = 0;
+
+ return i;
+}
+
+static int _parse_dt_u32_handler(struct device_node *np,
+ char *prop_name, u32 *offsets, int len, bool mandatory)
+{
+ int rc = -EINVAL;
+
+ if (len > MAX_SDE_HW_BLK) {
+ SDE_ERROR(
+ "prop: %s tries out of bound access for u32 array read len: %d\n",
+ prop_name, len);
+ return -E2BIG;
+ }
+
+ rc = of_property_read_u32_array(np, prop_name, offsets, len);
+ if (rc && mandatory)
+ SDE_ERROR("mandatory prop: %s u32 array read len:%d\n",
+ prop_name, len);
+ else if (rc)
+ SDE_DEBUG("optional prop: %s u32 array read len:%d\n",
+ prop_name, len);
+
+ return rc;
+}
+
+static int _parse_dt_bit_offset(struct device_node *np,
+ char *prop_name, struct sde_prop_value *prop_value, u32 prop_index,
+ u32 count, bool mandatory)
+{
+ int rc = 0, len, i, j;
+ const u32 *arr;
+
+ arr = of_get_property(np, prop_name, &len);
+ if (arr) {
+ len /= sizeof(u32);
+ len &= ~0x1;
+
+ if (len > (MAX_SDE_HW_BLK * MAX_BIT_OFFSET)) {
+ SDE_ERROR(
+ "prop: %s len: %d will lead to out of bound access\n",
+ prop_name, len / MAX_BIT_OFFSET);
+ return -E2BIG;
+ }
+
+ for (i = 0, j = 0; i < len; j++) {
+ PROP_BITVALUE_ACCESS(prop_value, prop_index, j, 0) =
+ be32_to_cpu(arr[i]);
+ i++;
+ PROP_BITVALUE_ACCESS(prop_value, prop_index, j, 1) =
+ be32_to_cpu(arr[i]);
+ i++;
+ }
+ } else {
+ if (mandatory) {
+ SDE_ERROR("error mandatory property '%s' not found\n",
+ prop_name);
+ rc = -EINVAL;
+ } else {
+ SDE_DEBUG("error optional property '%s' not found\n",
+ prop_name);
+ }
+ }
+
+ return rc;
+}
+
+static int _validate_dt_entry(struct device_node *np,
+ struct sde_prop_type *sde_prop, u32 prop_size, int *prop_count,
+ int *off_count)
+{
+ int rc = 0, i, val;
+ struct device_node *snp = NULL;
+
+ if (off_count) {
+ *off_count = of_property_count_u32_elems(np,
+ sde_prop[0].prop_name);
+ if ((*off_count > MAX_BLOCKS) || (*off_count < 0)) {
+ if (sde_prop[0].is_mandatory) {
+ SDE_ERROR(
+ "invalid hw offset prop name:%s count: %d\n",
+ sde_prop[0].prop_name, *off_count);
+ rc = -EINVAL;
+ }
+ *off_count = 0;
+ memset(prop_count, 0, sizeof(int) * prop_size);
+ return rc;
+ }
+ }
+
+ for (i = 0; i < prop_size; i++) {
+ switch (sde_prop[i].type) {
+ case PROP_TYPE_U32:
+ rc = of_property_read_u32(np, sde_prop[i].prop_name,
+ &val);
+ break;
+ case PROP_TYPE_U32_ARRAY:
+ prop_count[i] = of_property_count_u32_elems(np,
+ sde_prop[i].prop_name);
+ if (prop_count[i] < 0)
+ rc = prop_count[i];
+ break;
+ case PROP_TYPE_STRING_ARRAY:
+ prop_count[i] = of_property_count_strings(np,
+ sde_prop[i].prop_name);
+ if (prop_count[i] < 0)
+ rc = prop_count[i];
+ break;
+ case PROP_TYPE_BIT_OFFSET_ARRAY:
+ of_get_property(np, sde_prop[i].prop_name, &val);
+ prop_count[i] = val / (MAX_BIT_OFFSET * sizeof(u32));
+ break;
+ case PROP_TYPE_NODE:
+ snp = of_get_child_by_name(np,
+ sde_prop[i].prop_name);
+ if (!snp)
+ rc = -EINVAL;
+ break;
+ default:
+ SDE_DEBUG("invalid property type:%d\n",
+ sde_prop[i].type);
+ break;
+ }
+ SDE_DEBUG(
+ "prop id:%d prop name:%s prop type:%d prop_count:%d\n",
+ i, sde_prop[i].prop_name,
+ sde_prop[i].type, prop_count[i]);
+
+ if (rc && sde_prop[i].is_mandatory &&
+ ((sde_prop[i].type == PROP_TYPE_U32) ||
+ (sde_prop[i].type == PROP_TYPE_NODE))) {
+ SDE_ERROR("prop:%s not present\n",
+ sde_prop[i].prop_name);
+ goto end;
+ } else if (sde_prop[i].type == PROP_TYPE_U32 ||
+ sde_prop[i].type == PROP_TYPE_BOOL ||
+ sde_prop[i].type == PROP_TYPE_NODE) {
+ rc = 0;
+ continue;
+ }
+
+ if (off_count && (prop_count[i] != *off_count) &&
+ sde_prop[i].is_mandatory) {
+ SDE_ERROR(
+ "prop:%s count:%d is different compared to offset array:%d\n",
+ sde_prop[i].prop_name,
+ prop_count[i], *off_count);
+ rc = -EINVAL;
+ goto end;
+ } else if (off_count && prop_count[i] != *off_count) {
+ SDE_DEBUG(
+ "prop:%s count:%d is different compared to offset array:%d\n",
+ sde_prop[i].prop_name,
+ prop_count[i], *off_count);
+ rc = 0;
+ prop_count[i] = 0;
+ }
+ if (!off_count && prop_count[i] < 0) {
+ prop_count[i] = 0;
+ if (sde_prop[i].is_mandatory) {
+ SDE_ERROR("prop:%s count:%d is negative\n",
+ sde_prop[i].prop_name, prop_count[i]);
+ rc = -EINVAL;
+ } else {
+ rc = 0;
+ SDE_DEBUG("prop:%s count:%d is negative\n",
+ sde_prop[i].prop_name, prop_count[i]);
+ }
+ }
+ }
+
+end:
+ return rc;
+}
+
+static int _read_dt_entry(struct device_node *np,
+ struct sde_prop_type *sde_prop, u32 prop_size, int *prop_count,
+ bool *prop_exists,
+ struct sde_prop_value *prop_value)
+{
+ int rc = 0, i, j;
+
+ for (i = 0; i < prop_size; i++) {
+ prop_exists[i] = true;
+ switch (sde_prop[i].type) {
+ case PROP_TYPE_U32:
+ rc = of_property_read_u32(np, sde_prop[i].prop_name,
+ &PROP_VALUE_ACCESS(prop_value, i, 0));
+ SDE_DEBUG(
+ "prop id:%d prop name:%s prop type:%d value:0x%x\n",
+ i, sde_prop[i].prop_name,
+ sde_prop[i].type,
+ PROP_VALUE_ACCESS(prop_value, i, 0));
+ if (rc)
+ prop_exists[i] = false;
+ break;
+ case PROP_TYPE_BOOL:
+ PROP_VALUE_ACCESS(prop_value, i, 0) =
+ of_property_read_bool(np,
+ sde_prop[i].prop_name);
+ SDE_DEBUG(
+ "prop id:%d prop name:%s prop type:%d value:0x%x\n",
+ i, sde_prop[i].prop_name,
+ sde_prop[i].type,
+ PROP_VALUE_ACCESS(prop_value, i, 0));
+ break;
+ case PROP_TYPE_U32_ARRAY:
+ rc = _parse_dt_u32_handler(np, sde_prop[i].prop_name,
+ &PROP_VALUE_ACCESS(prop_value, i, 0),
+ prop_count[i], sde_prop[i].is_mandatory);
+ if (rc && sde_prop[i].is_mandatory) {
+ SDE_ERROR(
+ "%s prop validation success but read failed\n",
+ sde_prop[i].prop_name);
+ prop_exists[i] = false;
+ goto end;
+ } else {
+ if (rc)
+ prop_exists[i] = false;
+ /* only for debug purpose */
+ SDE_DEBUG("prop id:%d prop name:%s prop \"\
+ type:%d", i, sde_prop[i].prop_name,
+ sde_prop[i].type);
+ for (j = 0; j < prop_count[i]; j++)
+ SDE_DEBUG(" value[%d]:0x%x ", j,
+ PROP_VALUE_ACCESS(prop_value, i,
+ j));
+ SDE_DEBUG("\n");
+ }
+ break;
+ case PROP_TYPE_BIT_OFFSET_ARRAY:
+ rc = _parse_dt_bit_offset(np, sde_prop[i].prop_name,
+ prop_value, i, prop_count[i],
+ sde_prop[i].is_mandatory);
+ if (rc && sde_prop[i].is_mandatory) {
+ SDE_ERROR(
+ "%s prop validation success but read failed\n",
+ sde_prop[i].prop_name);
+ prop_exists[i] = false;
+ goto end;
+ } else {
+ if (rc)
+ prop_exists[i] = false;
+ SDE_DEBUG(
+ "prop id:%d prop name:%s prop type:%d",
+ i, sde_prop[i].prop_name,
+ sde_prop[i].type);
+ for (j = 0; j < prop_count[i]; j++)
+ SDE_DEBUG(
+ "count[%d]: bit:0x%x off:0x%x\n", j,
+ PROP_BITVALUE_ACCESS(prop_value,
+ i, j, 0),
+ PROP_BITVALUE_ACCESS(prop_value,
+ i, j, 1));
+ SDE_DEBUG("\n");
+ }
+ break;
+ case PROP_TYPE_NODE:
+ /* Node will be parsed in calling function */
+ rc = 0;
+ break;
+ default:
+ SDE_DEBUG("invalid property type:%d\n",
+ sde_prop[i].type);
+ break;
+ }
+ rc = 0;
+ }
+
+end:
+ return rc;
+}
+
+static void _sde_sspp_setup_vig(struct sde_mdss_cfg *sde_cfg,
+ struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+ bool *prop_exists, struct sde_prop_value *prop_value, u32 *vig_count)
+{
+ sblk->maxupscale = MAX_SSPP_UPSCALE;
+ sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
+ sblk->format_list = plane_formats_yuv;
+ sspp->id = SSPP_VIG0 + *vig_count;
+ snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+ sspp->id - SSPP_VIG0);
+ sspp->clk_ctrl = SDE_CLK_CTRL_VIG0 + *vig_count;
+ sspp->type = SSPP_TYPE_VIG;
+ set_bit(SDE_SSPP_QOS, &sspp->features);
+ (*vig_count)++;
+
+ if (!prop_value)
+ return;
+
+ if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED2) {
+ set_bit(SDE_SSPP_SCALER_QSEED2, &sspp->features);
+ sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2;
+ sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+ VIG_QSEED_OFF, 0);
+ sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
+ VIG_QSEED_LEN, 0);
+ snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
+ "sspp_scaler%u", sspp->id - SSPP_VIG0);
+ } else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
+ set_bit(SDE_SSPP_SCALER_QSEED3, &sspp->features);
+ sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
+ sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+ VIG_QSEED_OFF, 0);
+ sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
+ VIG_QSEED_LEN, 0);
+ snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
+ "sspp_scaler%u", sspp->id - SSPP_VIG0);
+ }
+
+ sblk->csc_blk.id = SDE_SSPP_CSC;
+ snprintf(sblk->csc_blk.name, SDE_HW_BLK_NAME_LEN,
+ "sspp_csc%u", sspp->id - SSPP_VIG0);
+ if (sde_cfg->csc_type == SDE_SSPP_CSC) {
+ set_bit(SDE_SSPP_CSC, &sspp->features);
+ sblk->csc_blk.base = PROP_VALUE_ACCESS(prop_value,
+ VIG_CSC_OFF, 0);
+ } else if (sde_cfg->csc_type == SDE_SSPP_CSC_10BIT) {
+ set_bit(SDE_SSPP_CSC_10BIT, &sspp->features);
+ sblk->csc_blk.base = PROP_VALUE_ACCESS(prop_value,
+ VIG_CSC_OFF, 0);
+ }
+
+ sblk->hsic_blk.id = SDE_SSPP_HSIC;
+ snprintf(sblk->hsic_blk.name, SDE_HW_BLK_NAME_LEN,
+ "sspp_hsic%u", sspp->id - SSPP_VIG0);
+ if (prop_exists[VIG_HSIC_PROP]) {
+ sblk->hsic_blk.base = PROP_VALUE_ACCESS(prop_value,
+ VIG_HSIC_PROP, 0);
+ sblk->hsic_blk.version = PROP_VALUE_ACCESS(prop_value,
+ VIG_HSIC_PROP, 1);
+ sblk->hsic_blk.len = 0;
+ set_bit(SDE_SSPP_HSIC, &sspp->features);
+ }
+
+ sblk->memcolor_blk.id = SDE_SSPP_MEMCOLOR;
+ snprintf(sblk->memcolor_blk.name, SDE_HW_BLK_NAME_LEN,
+ "sspp_memcolor%u", sspp->id - SSPP_VIG0);
+ if (prop_exists[VIG_MEMCOLOR_PROP]) {
+ sblk->memcolor_blk.base = PROP_VALUE_ACCESS(prop_value,
+ VIG_MEMCOLOR_PROP, 0);
+ sblk->memcolor_blk.version = PROP_VALUE_ACCESS(prop_value,
+ VIG_MEMCOLOR_PROP, 1);
+ sblk->memcolor_blk.len = 0;
+ set_bit(SDE_SSPP_MEMCOLOR, &sspp->features);
+ }
+
+ sblk->pcc_blk.id = SDE_SSPP_PCC;
+ snprintf(sblk->pcc_blk.name, SDE_HW_BLK_NAME_LEN,
+ "sspp_pcc%u", sspp->id - SSPP_VIG0);
+ if (prop_exists[VIG_PCC_PROP]) {
+ sblk->pcc_blk.base = PROP_VALUE_ACCESS(prop_value,
+ VIG_PCC_PROP, 0);
+ sblk->pcc_blk.version = PROP_VALUE_ACCESS(prop_value,
+ VIG_PCC_PROP, 1);
+ sblk->pcc_blk.len = 0;
+ set_bit(SDE_SSPP_PCC, &sspp->features);
+ }
+ snprintf(sspp->name, sizeof(sspp->name), "vig%d", *vig_count-1);
+}
+
+static void _sde_sspp_setup_rgb(struct sde_mdss_cfg *sde_cfg,
+ struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+ bool *prop_exists, struct sde_prop_value *prop_value, u32 *rgb_count)
+{
+ sblk->maxupscale = MAX_SSPP_UPSCALE;
+ sblk->maxdwnscale = MAX_SSPP_DOWNSCALE;
+ sblk->format_list = plane_formats;
+ sspp->id = SSPP_RGB0 + *rgb_count;
+ snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+ sspp->id - SSPP_VIG0);
+ sspp->clk_ctrl = SDE_CLK_CTRL_RGB0 + *rgb_count;
+ sspp->type = SSPP_TYPE_RGB;
+ set_bit(SDE_SSPP_QOS, &sspp->features);
+ (*rgb_count)++;
+
+ if (!prop_value)
+ return;
+
+ if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED2) {
+ set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
+ sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED2;
+ sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+ RGB_SCALER_OFF, 0);
+ sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
+ RGB_SCALER_LEN, 0);
+ snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
+ "sspp_scaler%u", sspp->id - SSPP_VIG0);
+ } else if (sde_cfg->qseed_type == SDE_SSPP_SCALER_QSEED3) {
+ set_bit(SDE_SSPP_SCALER_RGB, &sspp->features);
+ sblk->scaler_blk.id = SDE_SSPP_SCALER_QSEED3;
+ sblk->scaler_blk.base = PROP_VALUE_ACCESS(prop_value,
+ RGB_SCALER_LEN, 0);
+ sblk->scaler_blk.len = PROP_VALUE_ACCESS(prop_value,
+ SSPP_SCALE_SIZE, 0);
+ snprintf(sblk->scaler_blk.name, SDE_HW_BLK_NAME_LEN,
+ "sspp_scaler%u", sspp->id - SSPP_VIG0);
+ }
+
+ sblk->pcc_blk.id = SDE_SSPP_PCC;
+ if (prop_exists[RGB_PCC_PROP]) {
+ sblk->pcc_blk.base = PROP_VALUE_ACCESS(prop_value,
+ RGB_PCC_PROP, 0);
+ sblk->pcc_blk.version = PROP_VALUE_ACCESS(prop_value,
+ RGB_PCC_PROP, 1);
+ sblk->pcc_blk.len = 0;
+ set_bit(SDE_SSPP_PCC, &sspp->features);
+ }
+ snprintf(sspp->name, sizeof(sspp->name), "rgb%d", *rgb_count-1);
+}
+
+static void _sde_sspp_setup_cursor(struct sde_mdss_cfg *sde_cfg,
+ struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+ struct sde_prop_value *prop_value, u32 *cursor_count)
+{
+ set_bit(SDE_SSPP_CURSOR, &sspp->features);
+ sblk->maxupscale = SSPP_UNITY_SCALE;
+ sblk->maxdwnscale = SSPP_UNITY_SCALE;
+ sblk->format_list = cursor_formats;
+ sspp->id = SSPP_CURSOR0 + *cursor_count;
+ snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+ sspp->id - SSPP_VIG0);
+ sspp->clk_ctrl = SDE_CLK_CTRL_CURSOR0 + *cursor_count;
+ sspp->type = SSPP_TYPE_CURSOR;
+ (*cursor_count)++;
+ snprintf(sspp->name, sizeof(sspp->name), "cursor%d", *cursor_count-1);
+}
+
+static void _sde_sspp_setup_dma(struct sde_mdss_cfg *sde_cfg,
+ struct sde_sspp_cfg *sspp, struct sde_sspp_sub_blks *sblk,
+ struct sde_prop_value *prop_value, u32 *dma_count)
+{
+ sblk->maxupscale = SSPP_UNITY_SCALE;
+ sblk->maxdwnscale = SSPP_UNITY_SCALE;
+ sblk->format_list = plane_formats;
+ sspp->id = SSPP_DMA0 + *dma_count;
+ sspp->clk_ctrl = SDE_CLK_CTRL_DMA0 + *dma_count;
+ sspp->type = SSPP_TYPE_DMA;
+ snprintf(sspp->name, SDE_HW_BLK_NAME_LEN, "sspp_%u",
+ sspp->id - SSPP_VIG0);
+ set_bit(SDE_SSPP_QOS, &sspp->features);
+ (*dma_count)++;
+ snprintf(sspp->name, sizeof(sspp->name), "dma%d", *dma_count-1);
+}
+
+static int sde_sspp_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[SSPP_PROP_MAX], off_count, i, j;
+ int vig_prop_count[VIG_PROP_MAX], rgb_prop_count[RGB_PROP_MAX];
+ bool prop_exists[SSPP_PROP_MAX], vig_prop_exists[VIG_PROP_MAX];
+ bool rgb_prop_exists[RGB_PROP_MAX];
+ struct sde_prop_value *prop_value = NULL;
+ struct sde_prop_value *vig_prop_value = NULL, *rgb_prop_value = NULL;
+ const char *type;
+ struct sde_sspp_cfg *sspp;
+ struct sde_sspp_sub_blks *sblk;
+ u32 vig_count = 0, dma_count = 0, rgb_count = 0, cursor_count = 0;
+ u32 danger_count = 0, safe_count = 0;
+ struct device_node *snp = NULL;
+
+ prop_value = kzalloc(SSPP_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, sspp_prop, ARRAY_SIZE(sspp_prop),
+ prop_count, &off_count);
+ if (rc)
+ goto end;
+
+ rc = _validate_dt_entry(np, &sspp_prop[SSPP_DANGER], 1,
+ &prop_count[SSPP_DANGER], &danger_count);
+ if (rc)
+ goto end;
+
+ rc = _validate_dt_entry(np, &sspp_prop[SSPP_SAFE], 1,
+ &prop_count[SSPP_SAFE], &safe_count);
+ if (rc)
+ goto end;
+
+ rc = _read_dt_entry(np, sspp_prop, ARRAY_SIZE(sspp_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ sde_cfg->sspp_count = off_count;
+
+ /* get vig feature dt properties if they exist */
+ snp = of_get_child_by_name(np, sspp_prop[SSPP_VIG_BLOCKS].prop_name);
+ if (snp) {
+ vig_prop_value = kzalloc(VIG_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!vig_prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ rc = _validate_dt_entry(snp, vig_prop, ARRAY_SIZE(vig_prop),
+ vig_prop_count, NULL);
+ if (rc)
+ goto end;
+ rc = _read_dt_entry(snp, vig_prop, ARRAY_SIZE(vig_prop),
+ vig_prop_count, vig_prop_exists,
+ vig_prop_value);
+ }
+
+ /* get rgb feature dt properties if they exist */
+ snp = of_get_child_by_name(np, sspp_prop[SSPP_RGB_BLOCKS].prop_name);
+ if (snp) {
+ rgb_prop_value = kzalloc(RGB_PROP_MAX *
+ sizeof(struct sde_prop_value),
+ GFP_KERNEL);
+ if (!rgb_prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ rc = _validate_dt_entry(snp, rgb_prop, ARRAY_SIZE(rgb_prop),
+ rgb_prop_count, NULL);
+ if (rc)
+ goto end;
+ rc = _read_dt_entry(snp, rgb_prop, ARRAY_SIZE(rgb_prop),
+ rgb_prop_count, rgb_prop_exists,
+ rgb_prop_value);
+ }
+
+ for (i = 0; i < off_count; i++) {
+ sspp = sde_cfg->sspp + i;
+ sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+ if (!sblk) {
+ rc = -ENOMEM;
+ /* catalog deinit will release the allocated blocks */
+ goto end;
+ }
+ sspp->sblk = sblk;
+
+ sspp->base = PROP_VALUE_ACCESS(prop_value, SSPP_OFF, i);
+ sspp->len = PROP_VALUE_ACCESS(prop_value, SSPP_SIZE, 0);
+ sblk->maxlinewidth = sde_cfg->max_sspp_linewidth;
+
+ set_bit(SDE_SSPP_SRC, &sspp->features);
+ sblk->src_blk.id = SDE_SSPP_SRC;
+
+ of_property_read_string_index(np,
+ sspp_prop[SSPP_TYPE].prop_name, i, &type);
+ if (!strcmp(type, "vig")) {
+ _sde_sspp_setup_vig(sde_cfg, sspp, sblk,
+ vig_prop_exists, vig_prop_value, &vig_count);
+ } else if (!strcmp(type, "rgb")) {
+ _sde_sspp_setup_rgb(sde_cfg, sspp, sblk,
+ rgb_prop_exists, rgb_prop_value, &rgb_count);
+ } else if (!strcmp(type, "cursor")) {
+ /* No prop values for cursor pipes */
+ _sde_sspp_setup_cursor(sde_cfg, sspp, sblk, NULL,
+ &cursor_count);
+ } else if (!strcmp(type, "dma")) {
+ /* No prop values for DMA pipes */
+ _sde_sspp_setup_dma(sde_cfg, sspp, sblk, NULL,
+ &dma_count);
+ } else {
+ SDE_ERROR("invalid sspp type:%s\n", type);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ snprintf(sblk->src_blk.name, SDE_HW_BLK_NAME_LEN, "sspp_src_%u",
+ sspp->id - SSPP_VIG0);
+
+ if (sspp->clk_ctrl >= SDE_CLK_CTRL_MAX) {
+ SDE_ERROR("%s: invalid clk ctrl: %d\n",
+ sblk->src_blk.name, sspp->clk_ctrl);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ sblk->maxhdeciexp = MAX_HORZ_DECIMATION;
+ sblk->maxvdeciexp = MAX_VERT_DECIMATION;
+
+ sspp->xin_id = PROP_VALUE_ACCESS(prop_value, SSPP_XIN, i);
+ sblk->danger_lut_linear =
+ PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 0);
+ sblk->danger_lut_tile =
+ PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 1);
+ sblk->danger_lut_nrt =
+ PROP_VALUE_ACCESS(prop_value, SSPP_DANGER, 2);
+ sblk->safe_lut_linear =
+ PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 0);
+ sblk->safe_lut_tile =
+ PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 1);
+ sblk->safe_lut_nrt =
+ PROP_VALUE_ACCESS(prop_value, SSPP_SAFE, 2);
+ sblk->creq_lut_nrt = DEFAULT_CREQ_LUT_NRT;
+ sblk->pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE;
+ sblk->src_blk.len = PROP_VALUE_ACCESS(prop_value, SSPP_SIZE, 0);
+
+ for (j = 0; j < sde_cfg->mdp_count; j++) {
+ sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].reg_off =
+ PROP_BITVALUE_ACCESS(prop_value,
+ SSPP_CLK_CTRL, i, 0);
+ sde_cfg->mdp[j].clk_ctrls[sspp->clk_ctrl].bit_off =
+ PROP_BITVALUE_ACCESS(prop_value,
+ SSPP_CLK_CTRL, i, 1);
+ }
+
+ SDE_DEBUG(
+ "xin:%d danger:%x/%x/%x safe:%x/%x/%x creq:%x ram:%d clk%d:%x/%d\n",
+ sspp->xin_id,
+ sblk->danger_lut_linear,
+ sblk->danger_lut_tile,
+ sblk->danger_lut_nrt,
+ sblk->safe_lut_linear,
+ sblk->safe_lut_tile,
+ sblk->safe_lut_nrt,
+ sblk->creq_lut_nrt,
+ sblk->pixel_ram_size,
+ sspp->clk_ctrl,
+ sde_cfg->mdp[0].clk_ctrls[sspp->clk_ctrl].reg_off,
+ sde_cfg->mdp[0].clk_ctrls[sspp->clk_ctrl].bit_off);
+ }
+
+end:
+ kfree(prop_value);
+ kfree(vig_prop_value);
+ kfree(rgb_prop_value);
+ return rc;
+}
+
+static int sde_ctl_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[HW_PROP_MAX], i;
+ bool prop_exists[HW_PROP_MAX];
+ struct sde_prop_value *prop_value = NULL;
+ struct sde_ctl_cfg *ctl;
+ u32 off_count;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument input param\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(HW_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, ctl_prop, ARRAY_SIZE(ctl_prop), prop_count,
+ &off_count);
+ if (rc)
+ goto end;
+
+ sde_cfg->ctl_count = off_count;
+
+ rc = _read_dt_entry(np, ctl_prop, ARRAY_SIZE(ctl_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ for (i = 0; i < off_count; i++) {
+ const char *disp_pref = NULL;
+ ctl = sde_cfg->ctl + i;
+ ctl->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
+ ctl->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
+ ctl->id = CTL_0 + i;
+ snprintf(ctl->name, SDE_HW_BLK_NAME_LEN, "ctl_%u",
+ ctl->id - CTL_0);
+
+ of_property_read_string_index(np,
+ ctl_prop[HW_DISP].prop_name, i, &disp_pref);
+ if (disp_pref) {
+ if (!strcmp(disp_pref, "primary"))
+ set_bit(SDE_CTL_PRIMARY_PREF, &ctl->features);
+ else if (!strcmp(disp_pref, "secondary"))
+ set_bit(SDE_CTL_SECONDARY_PREF, &ctl->features);
+ else if (!strcmp(disp_pref, "tertiary"))
+ set_bit(SDE_CTL_TERTIARY_PREF, &ctl->features);
+ }
+ if (i < MAX_SPLIT_DISPLAY_CTL)
+ set_bit(SDE_CTL_SPLIT_DISPLAY, &ctl->features);
+ if (i < MAX_PP_SPLIT_DISPLAY_CTL)
+ set_bit(SDE_CTL_PINGPONG_SPLIT, &ctl->features);
+ }
+
+end:
+ kfree(prop_value);
+ return rc;
+}
+
+static int sde_mixer_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[MIXER_PROP_MAX], i;
+ int blocks_prop_count[MIXER_BLOCKS_PROP_MAX];
+ bool prop_exists[MIXER_PROP_MAX];
+ bool blocks_prop_exists[MIXER_BLOCKS_PROP_MAX];
+ struct sde_prop_value *prop_value = NULL, *blocks_prop_value = NULL;
+ u32 off_count, max_blendstages;
+ u32 blend_reg_base[] = {0x20, 0x50, 0x80, 0xb0, 0x230, 0x260, 0x290};
+ u32 lm_pair_mask[] = {LM_1, LM_0, LM_5, 0x0, 0x0, LM_2};
+ struct sde_lm_cfg *mixer;
+ struct sde_lm_sub_blks *sblk;
+ int pp_count, dspp_count;
+ u32 pp_idx, dspp_idx;
+ struct device_node *snp = NULL;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument input param\n");
+ rc = -EINVAL;
+ goto end;
+ }
+ max_blendstages = sde_cfg->max_mixer_blendstages;
+
+ prop_value = kzalloc(MIXER_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, mixer_prop, ARRAY_SIZE(mixer_prop),
+ prop_count, &off_count);
+ if (rc)
+ goto end;
+
+ sde_cfg->mixer_count = off_count;
+
+ rc = _read_dt_entry(np, mixer_prop, ARRAY_SIZE(mixer_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ pp_count = sde_cfg->pingpong_count;
+ dspp_count = sde_cfg->dspp_count;
+
+ /* get mixer feature dt properties if they exist */
+ snp = of_get_child_by_name(np, mixer_prop[MIXER_BLOCKS].prop_name);
+ if (snp) {
+ blocks_prop_value = kzalloc(MIXER_BLOCKS_PROP_MAX *
+ MAX_SDE_HW_BLK * sizeof(struct sde_prop_value),
+ GFP_KERNEL);
+ if (!blocks_prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ rc = _validate_dt_entry(snp, mixer_blocks_prop,
+ ARRAY_SIZE(mixer_blocks_prop), blocks_prop_count, NULL);
+ if (rc)
+ goto end;
+ rc = _read_dt_entry(snp, mixer_blocks_prop,
+ ARRAY_SIZE(mixer_blocks_prop),
+ blocks_prop_count, blocks_prop_exists,
+ blocks_prop_value);
+ }
+
+ for (i = 0, pp_idx = 0, dspp_idx = 0; i < off_count; i++) {
+ const char *disp_pref = NULL;
+ mixer = sde_cfg->mixer + i;
+ sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+ if (!sblk) {
+ rc = -ENOMEM;
+ /* catalog deinit will release the allocated blocks */
+ goto end;
+ }
+ mixer->sblk = sblk;
+
+ mixer->base = PROP_VALUE_ACCESS(prop_value, MIXER_OFF, i);
+ mixer->len = PROP_VALUE_ACCESS(prop_value, MIXER_LEN, 0);
+ mixer->id = LM_0 + i;
+ snprintf(mixer->name, SDE_HW_BLK_NAME_LEN, "lm_%u",
+ mixer->id - LM_0);
+
+ if (!prop_exists[MIXER_LEN])
+ mixer->len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+ if ((i < ARRAY_SIZE(lm_pair_mask)) && lm_pair_mask[i])
+ mixer->lm_pair_mask = 1 << lm_pair_mask[i];
+
+ sblk->maxblendstages = max_blendstages;
+ sblk->maxwidth = sde_cfg->max_mixer_width;
+ memcpy(sblk->blendstage_base, blend_reg_base, sizeof(u32) *
+ min_t(u32, MAX_BLOCKS, min_t(u32,
+ ARRAY_SIZE(blend_reg_base), max_blendstages)));
+ if (sde_cfg->has_src_split)
+ set_bit(SDE_MIXER_SOURCESPLIT, &mixer->features);
+
+ of_property_read_string_index(np,
+ mixer_prop[MIXER_DISP].prop_name, i, &disp_pref);
+
+ if (disp_pref) {
+ if (!strcmp(disp_pref, "primary"))
+ set_bit(SDE_DISP_PRIMARY_PREF,
+ &mixer->features);
+ else if (!strcmp(disp_pref, "secondary"))
+ set_bit(SDE_DISP_SECONDARY_PREF,
+ &mixer->features);
+ else if (!strcmp(disp_pref, "tertiary"))
+ set_bit(SDE_DISP_TERTIARY_PREF,
+ &mixer->features);
+ }
+
+ if ((i < ROT_LM_OFFSET) || (i >= LINE_LM_OFFSET)) {
+ mixer->pingpong = pp_count > 0 ? pp_idx + PINGPONG_0
+ : PINGPONG_MAX;
+ mixer->dspp = dspp_count > 0 ? dspp_idx + DSPP_0
+ : DSPP_MAX;
+ pp_count--;
+ dspp_count--;
+ pp_idx++;
+ dspp_idx++;
+ } else {
+ mixer->pingpong = PINGPONG_MAX;
+ mixer->dspp = DSPP_MAX;
+ }
+
+ sblk->gc.id = SDE_MIXER_GC;
+ if (blocks_prop_value && blocks_prop_exists[MIXER_GC_PROP]) {
+ sblk->gc.base = PROP_VALUE_ACCESS(blocks_prop_value,
+ MIXER_GC_PROP, 0);
+ sblk->gc.version = PROP_VALUE_ACCESS(blocks_prop_value,
+ MIXER_GC_PROP, 1);
+ sblk->gc.len = 0;
+ set_bit(SDE_MIXER_GC, &mixer->features);
+ }
+ }
+
+end:
+ kfree(prop_value);
+ kfree(blocks_prop_value);
+ return rc;
+}
+
+static int sde_intf_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[INTF_PROP_MAX], i;
+ struct sde_prop_value *prop_value = NULL;
+ bool prop_exists[INTF_PROP_MAX];
+ u32 off_count;
+ u32 dsi_count = 0, none_count = 0, hdmi_count = 0, dp_count = 0;
+ const char *type;
+ struct sde_intf_cfg *intf;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(INTF_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, intf_prop, ARRAY_SIZE(intf_prop),
+ prop_count, &off_count);
+ if (rc)
+ goto end;
+
+ sde_cfg->intf_count = off_count;
+
+ rc = _read_dt_entry(np, intf_prop, ARRAY_SIZE(intf_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ for (i = 0; i < off_count; i++) {
+ intf = sde_cfg->intf + i;
+ intf->base = PROP_VALUE_ACCESS(prop_value, INTF_OFF, i);
+ intf->len = PROP_VALUE_ACCESS(prop_value, INTF_LEN, 0);
+ intf->id = INTF_0 + i;
+ snprintf(intf->name, SDE_HW_BLK_NAME_LEN, "intf_%u",
+ intf->id - INTF_0);
+
+ if (!prop_exists[INTF_LEN])
+ intf->len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+ intf->prog_fetch_lines_worst_case =
+ PROP_VALUE_ACCESS(prop_value, INTF_PREFETCH, i);
+
+ of_property_read_string_index(np,
+ intf_prop[INTF_TYPE].prop_name, i, &type);
+ if (!strcmp(type, "dsi")) {
+ intf->type = INTF_DSI;
+ intf->controller_id = dsi_count;
+ dsi_count++;
+ } else if (!strcmp(type, "hdmi")) {
+ intf->type = INTF_HDMI;
+ intf->controller_id = hdmi_count;
+ hdmi_count++;
+ } else if (!strcmp(type, "dp")) {
+ intf->type = INTF_DP;
+ intf->controller_id = dp_count;
+ dp_count++;
+ } else {
+ intf->type = INTF_NONE;
+ intf->controller_id = none_count;
+ none_count++;
+ }
+ }
+
+end:
+ kfree(prop_value);
+ return rc;
+}
+
+static int sde_wb_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[WB_PROP_MAX], i, j;
+ struct sde_prop_value *prop_value = NULL;
+ bool prop_exists[WB_PROP_MAX];
+ u32 off_count;
+ struct sde_wb_cfg *wb;
+ struct sde_wb_sub_blocks *sblk;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(WB_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, wb_prop, ARRAY_SIZE(wb_prop), prop_count,
+ &off_count);
+ if (rc)
+ goto end;
+
+ sde_cfg->wb_count = off_count;
+
+ rc = _read_dt_entry(np, wb_prop, ARRAY_SIZE(wb_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ for (i = 0; i < off_count; i++) {
+ wb = sde_cfg->wb + i;
+ sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+ if (!sblk) {
+ rc = -ENOMEM;
+ /* catalog deinit will release the allocated blocks */
+ goto end;
+ }
+ wb->sblk = sblk;
+
+ wb->base = PROP_VALUE_ACCESS(prop_value, WB_OFF, i);
+ wb->id = WB_0 + PROP_VALUE_ACCESS(prop_value, WB_ID, i);
+ snprintf(wb->name, SDE_HW_BLK_NAME_LEN, "wb_%u",
+ wb->id - WB_0);
+ wb->clk_ctrl = SDE_CLK_CTRL_WB0 +
+ PROP_VALUE_ACCESS(prop_value, WB_ID, i);
+ wb->xin_id = PROP_VALUE_ACCESS(prop_value, WB_XIN_ID, i);
+ wb->vbif_idx = VBIF_NRT;
+
+ if (wb->clk_ctrl >= SDE_CLK_CTRL_MAX) {
+ SDE_ERROR("%s: invalid clk ctrl: %d\n",
+ wb->name, wb->clk_ctrl);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ wb->len = PROP_VALUE_ACCESS(prop_value, WB_LEN, 0);
+ wb->format_list = wb2_formats;
+ if (!prop_exists[WB_LEN])
+ wb->len = DEFAULT_SDE_HW_BLOCK_LEN;
+ sblk->maxlinewidth = sde_cfg->max_wb_linewidth;
+
+ if (wb->id >= LINE_MODE_WB_OFFSET)
+ set_bit(SDE_WB_LINE_MODE, &wb->features);
+ else
+ set_bit(SDE_WB_BLOCK_MODE, &wb->features);
+ set_bit(SDE_WB_TRAFFIC_SHAPER, &wb->features);
+ set_bit(SDE_WB_YUV_CONFIG, &wb->features);
+
+ for (j = 0; j < sde_cfg->mdp_count; j++) {
+ sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].reg_off =
+ PROP_BITVALUE_ACCESS(prop_value,
+ WB_CLK_CTRL, i, 0);
+ sde_cfg->mdp[j].clk_ctrls[wb->clk_ctrl].bit_off =
+ PROP_BITVALUE_ACCESS(prop_value,
+ WB_CLK_CTRL, i, 1);
+ }
+
+ SDE_DEBUG(
+ "wb:%d xin:%d vbif:%d clk%d:%x/%d\n",
+ wb->id - WB_0,
+ wb->xin_id,
+ wb->vbif_idx,
+ wb->clk_ctrl,
+ sde_cfg->mdp[0].clk_ctrls[wb->clk_ctrl].reg_off,
+ sde_cfg->mdp[0].clk_ctrls[wb->clk_ctrl].bit_off);
+ }
+
+end:
+ kfree(prop_value);
+ return rc;
+}
+
+static void _sde_dspp_setup_blocks(struct sde_mdss_cfg *sde_cfg,
+ struct sde_dspp_cfg *dspp, struct sde_dspp_sub_blks *sblk,
+ bool *prop_exists, struct sde_prop_value *prop_value)
+{
+ sblk->igc.id = SDE_DSPP_IGC;
+ if (prop_exists[DSPP_IGC_PROP]) {
+ sblk->igc.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_IGC_PROP, 0);
+ sblk->igc.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_IGC_PROP, 1);
+ sblk->igc.len = 0;
+ set_bit(SDE_DSPP_IGC, &dspp->features);
+ }
+
+ sblk->pcc.id = SDE_DSPP_PCC;
+ if (prop_exists[DSPP_PCC_PROP]) {
+ sblk->pcc.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_PCC_PROP, 0);
+ sblk->pcc.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_PCC_PROP, 1);
+ sblk->pcc.len = 0;
+ set_bit(SDE_DSPP_PCC, &dspp->features);
+ }
+
+ sblk->gc.id = SDE_DSPP_GC;
+ if (prop_exists[DSPP_GC_PROP]) {
+ sblk->gc.base = PROP_VALUE_ACCESS(prop_value, DSPP_GC_PROP, 0);
+ sblk->gc.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_GC_PROP, 1);
+ sblk->gc.len = 0;
+ set_bit(SDE_DSPP_GC, &dspp->features);
+ }
+
+ sblk->gamut.id = SDE_DSPP_GAMUT;
+ if (prop_exists[DSPP_GAMUT_PROP]) {
+ sblk->gamut.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_GAMUT_PROP, 0);
+ sblk->gamut.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_GAMUT_PROP, 1);
+ sblk->gamut.len = 0;
+ set_bit(SDE_DSPP_GAMUT, &dspp->features);
+ }
+
+ sblk->dither.id = SDE_DSPP_DITHER;
+ if (prop_exists[DSPP_DITHER_PROP]) {
+ sblk->dither.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_DITHER_PROP, 0);
+ sblk->dither.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_DITHER_PROP, 1);
+ sblk->dither.len = 0;
+ set_bit(SDE_DSPP_DITHER, &dspp->features);
+ }
+
+ sblk->hist.id = SDE_DSPP_HIST;
+ if (prop_exists[DSPP_HIST_PROP]) {
+ sblk->hist.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_HIST_PROP, 0);
+ sblk->hist.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_HIST_PROP, 1);
+ sblk->hist.len = 0;
+ set_bit(SDE_DSPP_HIST, &dspp->features);
+ }
+
+ sblk->hsic.id = SDE_DSPP_HSIC;
+ if (prop_exists[DSPP_HSIC_PROP]) {
+ sblk->hsic.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_HSIC_PROP, 0);
+ sblk->hsic.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_HSIC_PROP, 1);
+ sblk->hsic.len = 0;
+ set_bit(SDE_DSPP_HSIC, &dspp->features);
+ }
+
+ sblk->memcolor.id = SDE_DSPP_MEMCOLOR;
+ if (prop_exists[DSPP_MEMCOLOR_PROP]) {
+ sblk->memcolor.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_MEMCOLOR_PROP, 0);
+ sblk->memcolor.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_MEMCOLOR_PROP, 1);
+ sblk->memcolor.len = 0;
+ set_bit(SDE_DSPP_MEMCOLOR, &dspp->features);
+ }
+
+ sblk->sixzone.id = SDE_DSPP_SIXZONE;
+ if (prop_exists[DSPP_SIXZONE_PROP]) {
+ sblk->sixzone.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_SIXZONE_PROP, 0);
+ sblk->sixzone.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_SIXZONE_PROP, 1);
+ sblk->sixzone.len = 0;
+ set_bit(SDE_DSPP_SIXZONE, &dspp->features);
+ }
+
+ sblk->vlut.id = SDE_DSPP_VLUT;
+ if (prop_exists[DSPP_VLUT_PROP]) {
+ sblk->vlut.base = PROP_VALUE_ACCESS(prop_value,
+ DSPP_VLUT_PROP, 0);
+ sblk->vlut.version = PROP_VALUE_ACCESS(prop_value,
+ DSPP_VLUT_PROP, 1);
+ sblk->sixzone.len = 0;
+ set_bit(SDE_DSPP_VLUT, &dspp->features);
+ }
+}
+
+static int sde_dspp_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[DSPP_PROP_MAX], i;
+ int ad_prop_count[AD_PROP_MAX];
+ bool prop_exists[DSPP_PROP_MAX], ad_prop_exists[AD_PROP_MAX];
+ bool blocks_prop_exists[DSPP_BLOCKS_PROP_MAX];
+ struct sde_prop_value *ad_prop_value = NULL;
+ int blocks_prop_count[DSPP_BLOCKS_PROP_MAX];
+ struct sde_prop_value *prop_value = NULL, *blocks_prop_value = NULL;
+ u32 off_count, ad_off_count;
+ struct sde_dspp_cfg *dspp;
+ struct sde_dspp_sub_blks *sblk;
+ struct device_node *snp = NULL;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(DSPP_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, dspp_prop, ARRAY_SIZE(dspp_prop),
+ prop_count, &off_count);
+ if (rc)
+ goto end;
+
+ sde_cfg->dspp_count = off_count;
+
+ rc = _read_dt_entry(np, dspp_prop, ARRAY_SIZE(dspp_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ /* Parse AD dtsi entries */
+ ad_prop_value = kzalloc(AD_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!ad_prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ rc = _validate_dt_entry(np, ad_prop, ARRAY_SIZE(ad_prop),
+ ad_prop_count, &ad_off_count);
+ if (rc)
+ goto end;
+ rc = _read_dt_entry(np, ad_prop, ARRAY_SIZE(ad_prop), ad_prop_count,
+ ad_prop_exists, ad_prop_value);
+ if (rc)
+ goto end;
+
+ /* get DSPP feature dt properties if they exist */
+ snp = of_get_child_by_name(np, dspp_prop[DSPP_BLOCKS].prop_name);
+ if (snp) {
+ blocks_prop_value = kzalloc(DSPP_BLOCKS_PROP_MAX *
+ MAX_SDE_HW_BLK * sizeof(struct sde_prop_value),
+ GFP_KERNEL);
+ if (!blocks_prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ rc = _validate_dt_entry(snp, dspp_blocks_prop,
+ ARRAY_SIZE(dspp_blocks_prop), blocks_prop_count, NULL);
+ if (rc)
+ goto end;
+ rc = _read_dt_entry(snp, dspp_blocks_prop,
+ ARRAY_SIZE(dspp_blocks_prop), blocks_prop_count,
+ blocks_prop_exists, blocks_prop_value);
+ if (rc)
+ goto end;
+ }
+
+ for (i = 0; i < off_count; i++) {
+ dspp = sde_cfg->dspp + i;
+ dspp->base = PROP_VALUE_ACCESS(prop_value, DSPP_OFF, i);
+ dspp->len = PROP_VALUE_ACCESS(prop_value, DSPP_SIZE, 0);
+ dspp->id = DSPP_0 + i;
+ snprintf(dspp->name, SDE_HW_BLK_NAME_LEN, "dspp_%u",
+ dspp->id - DSPP_0);
+
+ sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+ if (!sblk) {
+ rc = -ENOMEM;
+ /* catalog deinit will release the allocated blocks */
+ goto end;
+ }
+ dspp->sblk = sblk;
+
+ if (blocks_prop_value)
+ _sde_dspp_setup_blocks(sde_cfg, dspp, sblk,
+ blocks_prop_exists, blocks_prop_value);
+
+ sblk->ad.id = SDE_DSPP_AD;
+ if (ad_prop_value && (i < ad_off_count) &&
+ ad_prop_exists[AD_OFF]) {
+ sblk->ad.base = PROP_VALUE_ACCESS(ad_prop_value,
+ AD_OFF, i);
+ sblk->ad.version = PROP_VALUE_ACCESS(ad_prop_value,
+ AD_VERSION, 0);
+ set_bit(SDE_DSPP_AD, &dspp->features);
+ }
+ }
+
+end:
+ kfree(prop_value);
+ kfree(ad_prop_value);
+ kfree(blocks_prop_value);
+ return rc;
+}
+
+static int sde_cdm_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[HW_PROP_MAX], i;
+ struct sde_prop_value *prop_value = NULL;
+ bool prop_exists[HW_PROP_MAX];
+ u32 off_count;
+ struct sde_cdm_cfg *cdm;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(HW_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, cdm_prop, ARRAY_SIZE(cdm_prop), prop_count,
+ &off_count);
+ if (rc)
+ goto end;
+
+ sde_cfg->cdm_count = off_count;
+
+ rc = _read_dt_entry(np, cdm_prop, ARRAY_SIZE(cdm_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ for (i = 0; i < off_count; i++) {
+ cdm = sde_cfg->cdm + i;
+ cdm->base = PROP_VALUE_ACCESS(prop_value, HW_OFF, i);
+ cdm->id = CDM_0 + i;
+ snprintf(cdm->name, SDE_HW_BLK_NAME_LEN, "cdm_%u",
+ cdm->id - CDM_0);
+ cdm->len = PROP_VALUE_ACCESS(prop_value, HW_LEN, 0);
+
+ /* intf3 and wb2 for cdm block */
+ cdm->wb_connect = sde_cfg->wb_count ? BIT(WB_2) : BIT(31);
+ cdm->intf_connect = sde_cfg->intf_count ? BIT(INTF_3) : BIT(31);
+ }
+
+end:
+ kfree(prop_value);
+ return rc;
+}
+
+static int sde_vbif_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[VBIF_PROP_MAX], i, j, k;
+ struct sde_prop_value *prop_value = NULL;
+ bool prop_exists[VBIF_PROP_MAX];
+ u32 off_count, vbif_len, rd_len = 0, wr_len = 0;
+ struct sde_vbif_cfg *vbif;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(VBIF_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop),
+ prop_count, &off_count);
+ if (rc)
+ goto end;
+
+ rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_RD_LIMIT], 1,
+ &prop_count[VBIF_DYNAMIC_OT_RD_LIMIT], &rd_len);
+ if (rc)
+ goto end;
+
+ rc = _validate_dt_entry(np, &vbif_prop[VBIF_DYNAMIC_OT_WR_LIMIT], 1,
+ &prop_count[VBIF_DYNAMIC_OT_WR_LIMIT], &wr_len);
+ if (rc)
+ goto end;
+
+ sde_cfg->vbif_count = off_count;
+
+ rc = _read_dt_entry(np, vbif_prop, ARRAY_SIZE(vbif_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ vbif_len = PROP_VALUE_ACCESS(prop_value, VBIF_LEN, 0);
+ if (!prop_exists[VBIF_LEN])
+ vbif_len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+ for (i = 0; i < off_count; i++) {
+ vbif = sde_cfg->vbif + i;
+ vbif->base = PROP_VALUE_ACCESS(prop_value, VBIF_OFF, i);
+ vbif->len = vbif_len;
+ vbif->id = VBIF_0 + PROP_VALUE_ACCESS(prop_value, VBIF_ID, i);
+ snprintf(vbif->name, SDE_HW_BLK_NAME_LEN, "vbif_%u",
+ vbif->id - VBIF_0);
+
+ SDE_DEBUG("vbif:%d\n", vbif->id - VBIF_0);
+
+ vbif->xin_halt_timeout = VBIF_XIN_HALT_TIMEOUT;
+
+ vbif->default_ot_rd_limit = PROP_VALUE_ACCESS(prop_value,
+ VBIF_DEFAULT_OT_RD_LIMIT, 0);
+ SDE_DEBUG("default_ot_rd_limit=%u\n",
+ vbif->default_ot_rd_limit);
+
+ vbif->default_ot_wr_limit = PROP_VALUE_ACCESS(prop_value,
+ VBIF_DEFAULT_OT_WR_LIMIT, 0);
+ SDE_DEBUG("default_ot_wr_limit=%u\n",
+ vbif->default_ot_wr_limit);
+
+ vbif->dynamic_ot_rd_tbl.count =
+ prop_count[VBIF_DYNAMIC_OT_RD_LIMIT] / 2;
+ SDE_DEBUG("dynamic_ot_rd_tbl.count=%u\n",
+ vbif->dynamic_ot_rd_tbl.count);
+ if (vbif->dynamic_ot_rd_tbl.count) {
+ vbif->dynamic_ot_rd_tbl.cfg = kcalloc(
+ vbif->dynamic_ot_rd_tbl.count,
+ sizeof(struct sde_vbif_dynamic_ot_cfg),
+ GFP_KERNEL);
+ if (!vbif->dynamic_ot_rd_tbl.cfg) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ }
+
+ for (j = 0, k = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+ vbif->dynamic_ot_rd_tbl.cfg[j].pps = (u64)
+ PROP_VALUE_ACCESS(prop_value,
+ VBIF_DYNAMIC_OT_RD_LIMIT, k++);
+ vbif->dynamic_ot_rd_tbl.cfg[j].ot_limit =
+ PROP_VALUE_ACCESS(prop_value,
+ VBIF_DYNAMIC_OT_RD_LIMIT, k++);
+ SDE_DEBUG("dynamic_ot_rd_tbl[%d].cfg=<%llu %u>\n", j,
+ vbif->dynamic_ot_rd_tbl.cfg[j].pps,
+ vbif->dynamic_ot_rd_tbl.cfg[j].ot_limit);
+ }
+
+ vbif->dynamic_ot_wr_tbl.count =
+ prop_count[VBIF_DYNAMIC_OT_WR_LIMIT] / 2;
+ SDE_DEBUG("dynamic_ot_wr_tbl.count=%u\n",
+ vbif->dynamic_ot_wr_tbl.count);
+ if (vbif->dynamic_ot_wr_tbl.count) {
+ vbif->dynamic_ot_wr_tbl.cfg = kcalloc(
+ vbif->dynamic_ot_wr_tbl.count,
+ sizeof(struct sde_vbif_dynamic_ot_cfg),
+ GFP_KERNEL);
+ if (!vbif->dynamic_ot_wr_tbl.cfg) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ }
+
+ for (j = 0, k = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+ vbif->dynamic_ot_wr_tbl.cfg[j].pps = (u64)
+ PROP_VALUE_ACCESS(prop_value,
+ VBIF_DYNAMIC_OT_WR_LIMIT, k++);
+ vbif->dynamic_ot_wr_tbl.cfg[j].ot_limit =
+ PROP_VALUE_ACCESS(prop_value,
+ VBIF_DYNAMIC_OT_WR_LIMIT, k++);
+ SDE_DEBUG("dynamic_ot_wr_tbl[%d].cfg=<%llu %u>\n", j,
+ vbif->dynamic_ot_wr_tbl.cfg[j].pps,
+ vbif->dynamic_ot_wr_tbl.cfg[j].ot_limit);
+ }
+
+ if (vbif->default_ot_rd_limit || vbif->default_ot_wr_limit ||
+ vbif->dynamic_ot_rd_tbl.count ||
+ vbif->dynamic_ot_wr_tbl.count)
+ set_bit(SDE_VBIF_QOS_OTLIM, &vbif->features);
+ }
+
+end:
+ kfree(prop_value);
+ return rc;
+}
+
+static int sde_pp_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *sde_cfg)
+{
+ int rc, prop_count[PP_PROP_MAX], i;
+ struct sde_prop_value *prop_value = NULL;
+ bool prop_exists[PP_PROP_MAX];
+ u32 off_count;
+ struct sde_pingpong_cfg *pp;
+ struct sde_pingpong_sub_blks *sblk;
+
+ if (!sde_cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(PP_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, pp_prop, ARRAY_SIZE(pp_prop), prop_count,
+ &off_count);
+ if (rc)
+ goto end;
+
+ sde_cfg->pingpong_count = off_count;
+
+ rc = _read_dt_entry(np, pp_prop, ARRAY_SIZE(pp_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ for (i = 0; i < off_count; i++) {
+ pp = sde_cfg->pingpong + i;
+ sblk = kzalloc(sizeof(*sblk), GFP_KERNEL);
+ if (!sblk) {
+ rc = -ENOMEM;
+ /* catalog deinit will release the allocated blocks */
+ goto end;
+ }
+ pp->sblk = sblk;
+
+ pp->base = PROP_VALUE_ACCESS(prop_value, PP_OFF, i);
+ pp->id = PINGPONG_0 + i;
+ snprintf(pp->name, SDE_HW_BLK_NAME_LEN, "pingpong_%u",
+ pp->id - PINGPONG_0);
+ pp->len = PROP_VALUE_ACCESS(prop_value, PP_LEN, 0);
+
+ sblk->te.base = PROP_VALUE_ACCESS(prop_value, TE_OFF, i);
+ sblk->te.id = SDE_PINGPONG_TE;
+ snprintf(sblk->te.name, SDE_HW_BLK_NAME_LEN, "te_%u",
+ pp->id - PINGPONG_0);
+ set_bit(SDE_PINGPONG_TE, &pp->features);
+
+ sblk->te2.base = PROP_VALUE_ACCESS(prop_value, TE2_OFF, i);
+ if (sblk->te2.base) {
+ sblk->te2.id = SDE_PINGPONG_TE2;
+ snprintf(sblk->te2.name, SDE_HW_BLK_NAME_LEN, "te2_%u",
+ pp->id - PINGPONG_0);
+ set_bit(SDE_PINGPONG_TE2, &pp->features);
+ set_bit(SDE_PINGPONG_SPLIT, &pp->features);
+ }
+
+ if (PROP_VALUE_ACCESS(prop_value, PP_SLAVE, i))
+ set_bit(SDE_PINGPONG_SLAVE, &pp->features);
+
+ sblk->dsc.base = PROP_VALUE_ACCESS(prop_value, DSC_OFF, i);
+ if (sblk->dsc.base) {
+ sblk->dsc.id = SDE_PINGPONG_DSC;
+ snprintf(sblk->dsc.name, SDE_HW_BLK_NAME_LEN, "dsc_%u",
+ sblk->dsc.id - PINGPONG_0);
+ set_bit(SDE_PINGPONG_DSC, &pp->features);
+ }
+ }
+
+end:
+ kfree(prop_value);
+ return rc;
+}
+
+static inline u32 _sde_parse_sspp_id(struct sde_mdss_cfg *cfg,
+ const char *name)
+{
+ int i;
+
+ for (i = 0; i < cfg->sspp_count; i++) {
+ if (!strcmp(cfg->sspp[i].name, name))
+ return cfg->sspp[i].id;
+ }
+
+ return SSPP_NONE;
+}
+
+static int _sde_vp_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *cfg)
+{
+ int rc = 0, i = 0;
+ struct device_node *node = NULL;
+ struct device_node *root_node = NULL;
+ struct sde_vp_cfg *vp;
+ struct sde_vp_sub_blks *vp_sub, *vp_sub_next;
+ struct property *prop;
+ const char *cname;
+
+ root_node = of_get_child_by_name(np, "qcom,sde-plane-id-map");
+ if (!root_node) {
+ root_node = of_parse_phandle(np, "qcom,sde-plane-id-map", 0);
+ if (!root_node) {
+ SDE_ERROR("No entry present for qcom,sde-plane-id-map");
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+ for_each_child_of_node(root_node, node) {
+ if (i >= MAX_BLOCKS) {
+ SDE_ERROR("num of nodes(%d) is bigger than max(%d)\n",
+ i, MAX_BLOCKS);
+ rc = -EINVAL;
+ goto end;
+ }
+ cfg->vp_count++;
+ vp = &(cfg->vp[i]);
+ vp->id = i;
+ rc = of_property_read_string(node, "qcom,display-type",
+ &(vp->display_type));
+ if (rc) {
+ SDE_ERROR("failed to read display-type, rc = %d\n", rc);
+ goto end;
+ }
+
+ rc = of_property_read_string(node, "qcom,plane-type",
+ &(vp->plane_type));
+ if (rc) {
+ SDE_ERROR("failed to read plane-type, rc = %d\n", rc);
+ goto end;
+ }
+
+ INIT_LIST_HEAD(&vp->sub_blks);
+ of_property_for_each_string(node, "qcom,plane-name",
+ prop, cname) {
+ vp_sub = kzalloc(sizeof(*vp_sub), GFP_KERNEL);
+ if (!vp_sub) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ vp_sub->sspp_id = _sde_parse_sspp_id(cfg, cname);
+ list_add_tail(&vp_sub->pipeid_list, &vp->sub_blks);
+ }
+ i++;
+ }
+
+end:
+ if (rc && cfg->vp_count) {
+ vp = &(cfg->vp[i]);
+ for (i = 0; i < cfg->vp_count; i++) {
+ list_for_each_entry_safe(vp_sub, vp_sub_next,
+ &vp->sub_blks, pipeid_list) {
+ list_del(&vp_sub->pipeid_list);
+ kfree(vp_sub);
+ }
+ }
+ memset(&(cfg->vp[0]), 0, sizeof(cfg->vp));
+ cfg->vp_count = 0;
+ }
+ return rc;
+}
+
+static int sde_parse_dt(struct device_node *np, struct sde_mdss_cfg *cfg)
+{
+ int rc, len, prop_count[SDE_PROP_MAX];
+ struct sde_prop_value *prop_value = NULL;
+ bool prop_exists[SDE_PROP_MAX];
+ const char *type;
+
+ if (!cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(SDE_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, sde_prop, ARRAY_SIZE(sde_prop), prop_count,
+ &len);
+ if (rc)
+ goto end;
+
+ rc = _read_dt_entry(np, sde_prop, ARRAY_SIZE(sde_prop), prop_count,
+ prop_exists, prop_value);
+ if (rc)
+ goto end;
+
+ cfg->mdss_count = 1;
+ cfg->mdss[0].base = MDSS_BASE_OFFSET;
+ cfg->mdss[0].id = MDP_TOP;
+ snprintf(cfg->mdss[0].name, SDE_HW_BLK_NAME_LEN, "mdss_%u",
+ cfg->mdss[0].id - MDP_TOP);
+
+ cfg->mdp_count = 1;
+ cfg->mdp[0].id = MDP_TOP;
+ snprintf(cfg->mdp[0].name, SDE_HW_BLK_NAME_LEN, "top_%u",
+ cfg->mdp[0].id - MDP_TOP);
+ cfg->mdp[0].base = PROP_VALUE_ACCESS(prop_value, SDE_OFF, 0);
+ cfg->mdp[0].len = PROP_VALUE_ACCESS(prop_value, SDE_LEN, 0);
+ if (!prop_exists[SDE_LEN])
+ cfg->mdp[0].len = DEFAULT_SDE_HW_BLOCK_LEN;
+
+ cfg->max_sspp_linewidth = PROP_VALUE_ACCESS(prop_value,
+ SSPP_LINEWIDTH, 0);
+ if (!prop_exists[SSPP_LINEWIDTH])
+ cfg->max_sspp_linewidth = DEFAULT_SDE_LINE_WIDTH;
+
+ cfg->max_mixer_width = PROP_VALUE_ACCESS(prop_value,
+ MIXER_LINEWIDTH, 0);
+ if (!prop_exists[MIXER_LINEWIDTH])
+ cfg->max_mixer_width = DEFAULT_SDE_LINE_WIDTH;
+
+ cfg->max_mixer_blendstages = PROP_VALUE_ACCESS(prop_value,
+ MIXER_BLEND, 0);
+ if (!prop_exists[MIXER_BLEND])
+ cfg->max_mixer_blendstages = DEFAULT_SDE_MIXER_BLENDSTAGES;
+
+ cfg->max_wb_linewidth = PROP_VALUE_ACCESS(prop_value, WB_LINEWIDTH, 0);
+ if (!prop_exists[WB_LINEWIDTH])
+ cfg->max_wb_linewidth = DEFAULT_SDE_LINE_WIDTH;
+
+ cfg->mdp[0].highest_bank_bit = PROP_VALUE_ACCESS(prop_value,
+ BANK_BIT, 0);
+ if (!prop_exists[BANK_BIT])
+ cfg->mdp[0].highest_bank_bit = DEFAULT_SDE_HIGHEST_BANK_BIT;
+
+ rc = of_property_read_string(np, sde_prop[QSEED_TYPE].prop_name, &type);
+ if (!rc && !strcmp(type, "qseedv3"))
+ cfg->qseed_type = SDE_SSPP_SCALER_QSEED3;
+ else if (!rc && !strcmp(type, "qseedv2"))
+ cfg->qseed_type = SDE_SSPP_SCALER_QSEED2;
+ else if (rc) {
+ SDE_DEBUG("qseed property not found\n");
+ rc = 0;
+ }
+
+ rc = of_property_read_string(np, sde_prop[CSC_TYPE].prop_name, &type);
+ if (!rc && !strcmp(type, "csc"))
+ cfg->csc_type = SDE_SSPP_CSC;
+ else if (!rc && !strcmp(type, "csc-10bit"))
+ cfg->csc_type = SDE_SSPP_CSC_10BIT;
+ else if (rc) {
+ SDE_DEBUG("CSC property not found\n");
+ rc = 0;
+ }
+
+ cfg->has_src_split = PROP_VALUE_ACCESS(prop_value, SRC_SPLIT, 0);
+end:
+ kfree(prop_value);
+ return rc;
+}
+
+static int sde_perf_parse_dt(struct device_node *np,
+ struct sde_mdss_cfg *cfg)
+{
+ int rc, len, prop_count[PERF_PROP_MAX];
+ struct sde_prop_value *prop_value = NULL;
+ bool prop_exists[PERF_PROP_MAX];
+
+ if (!cfg) {
+ SDE_ERROR("invalid argument\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ prop_value = kzalloc(PERF_PROP_MAX *
+ sizeof(struct sde_prop_value), GFP_KERNEL);
+ if (!prop_value) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ rc = _validate_dt_entry(np, sde_perf_prop, ARRAY_SIZE(sde_perf_prop),
+ prop_count, &len);
+ if (rc)
+ goto freeprop;
+
+ rc = _read_dt_entry(np, sde_perf_prop, ARRAY_SIZE(sde_perf_prop),
+ prop_count, prop_exists, prop_value);
+ if (rc)
+ goto freeprop;
+
+ cfg->perf.max_bw_low =
+ PROP_VALUE_ACCESS(prop_value, PERF_MAX_BW_LOW, 0);
+ cfg->perf.max_bw_high =
+ PROP_VALUE_ACCESS(prop_value, PERF_MAX_BW_HIGH, 0);
+
+freeprop:
+ kfree(prop_value);
+end:
+ return rc;
+}
+
+static int sde_hardware_format_caps(struct sde_mdss_cfg *sde_cfg,
+ uint32_t hw_rev)
+{
+ int i, rc = 0;
+ uint32_t dma_list_size, vig_list_size, wb2_list_size;
+ uint32_t cursor_list_size = 0;
+ struct sde_sspp_sub_blks *sblk;
+ uint32_t index = 0;
+
+ if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300)) {
+ cursor_list_size = ARRAY_SIZE(cursor_formats);
+ sde_cfg->cursor_formats = kcalloc(cursor_list_size,
+ sizeof(struct sde_format_extended), GFP_KERNEL);
+ if (!sde_cfg->cursor_formats) {
+ rc = -ENOMEM;
+ goto end;
+ }
+ index = _sde_copy_formats(sde_cfg->cursor_formats,
+ cursor_list_size, 0, cursor_formats,
+ ARRAY_SIZE(cursor_formats));
+ }
+
+ dma_list_size = ARRAY_SIZE(plane_formats);
+ vig_list_size = ARRAY_SIZE(plane_formats_yuv);
+ wb2_list_size = ARRAY_SIZE(wb2_formats);
+
+ dma_list_size += ARRAY_SIZE(rgb_10bit_formats);
+ vig_list_size += ARRAY_SIZE(rgb_10bit_formats)
+ + ARRAY_SIZE(tp10_ubwc_formats)
+ + ARRAY_SIZE(p010_formats);
+ wb2_list_size += ARRAY_SIZE(rgb_10bit_formats)
+ + ARRAY_SIZE(tp10_ubwc_formats);
+
+ sde_cfg->dma_formats = kcalloc(dma_list_size,
+ sizeof(struct sde_format_extended), GFP_KERNEL);
+ if (!sde_cfg->dma_formats) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ sde_cfg->vig_formats = kcalloc(vig_list_size,
+ sizeof(struct sde_format_extended), GFP_KERNEL);
+ if (!sde_cfg->vig_formats) {
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ sde_cfg->wb_formats = kcalloc(wb2_list_size,
+ sizeof(struct sde_format_extended), GFP_KERNEL);
+ if (!sde_cfg->wb_formats) {
+ SDE_ERROR("failed to allocate wb format list\n");
+ rc = -ENOMEM;
+ goto end;
+ }
+
+ if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300) ||
+ IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_301)) {
+ sde_cfg->has_hdr = true;
+ }
+
+ index = _sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
+ 0, plane_formats, ARRAY_SIZE(plane_formats));
+ index += _sde_copy_formats(sde_cfg->dma_formats, dma_list_size,
+ index, rgb_10bit_formats,
+ ARRAY_SIZE(rgb_10bit_formats));
+
+ index = _sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
+ 0, plane_formats_yuv, ARRAY_SIZE(plane_formats_yuv));
+ index += _sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
+ index, rgb_10bit_formats,
+ ARRAY_SIZE(rgb_10bit_formats));
+ index += _sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
+ index, p010_formats, ARRAY_SIZE(p010_formats));
+
+ index += _sde_copy_formats(sde_cfg->vig_formats, vig_list_size,
+ index, tp10_ubwc_formats,
+ ARRAY_SIZE(tp10_ubwc_formats));
+
+ index = _sde_copy_formats(sde_cfg->wb_formats, wb2_list_size,
+ 0, wb2_formats, ARRAY_SIZE(wb2_formats));
+ index += _sde_copy_formats(sde_cfg->wb_formats, wb2_list_size,
+ index, rgb_10bit_formats,
+ ARRAY_SIZE(rgb_10bit_formats));
+ index += _sde_copy_formats(sde_cfg->wb_formats, wb2_list_size,
+ index, tp10_ubwc_formats,
+ ARRAY_SIZE(tp10_ubwc_formats));
+
+ for (i = 0; i < sde_cfg->sspp_count; ++i) {
+ struct sde_sspp_cfg *sspp = &sde_cfg->sspp[i];
+
+ sblk = (struct sde_sspp_sub_blks *)sspp->sblk;
+ switch (sspp->type) {
+ case SSPP_TYPE_VIG:
+ sblk->format_list = sde_cfg->vig_formats;
+ break;
+ case SSPP_TYPE_CURSOR:
+ if (IS_SDE_MAJOR_MINOR_SAME((hw_rev), SDE_HW_VER_300))
+ sblk->format_list = sde_cfg->cursor_formats;
+ else
+ SDE_ERROR("invalid sspp type %d, xin id %d\n",
+ sspp->type, sspp->xin_id);
+ break;
+ case SSPP_TYPE_DMA:
+ sblk->format_list = sde_cfg->dma_formats;
+ break;
+ default:
+ SDE_ERROR("invalid sspp type %d\n", sspp->type);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+ for (i = 0; i < sde_cfg->wb_count; ++i)
+ sde_cfg->wb[i].format_list = sde_cfg->wb_formats;
+
+end:
+ return rc;
+}
+
+static int sde_hardware_caps(struct sde_mdss_cfg *sde_cfg, uint32_t hw_rev)
+{
+ int rc = 0;
+
+ switch (hw_rev) {
+ case SDE_HW_VER_170:
+ case SDE_HW_VER_171:
+ case SDE_HW_VER_172:
+ /* update msm8996 target here */
+ break;
+ case SDE_HW_VER_300:
+ case SDE_HW_VER_301:
+ case SDE_HW_VER_400:
+ /* update cobalt and skunk target here */
+ rc = sde_hardware_format_caps(sde_cfg, hw_rev);
+ break;
+ }
+
+ return rc;
+}
+
+void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg)
+{
+ int i;
+ struct sde_vp_sub_blks *vp_sub, *vp_sub_next;
+
+ if (!sde_cfg)
+ return;
+
+ for (i = 0; i < sde_cfg->sspp_count; i++)
+ kfree(sde_cfg->sspp[i].sblk);
+
+ for (i = 0; i < sde_cfg->mixer_count; i++)
+ kfree(sde_cfg->mixer[i].sblk);
+
+ for (i = 0; i < sde_cfg->wb_count; i++)
+ kfree(sde_cfg->wb[i].sblk);
+
+ for (i = 0; i < sde_cfg->dspp_count; i++)
+ kfree(sde_cfg->dspp[i].sblk);
+
+ for (i = 0; i < sde_cfg->pingpong_count; i++)
+ kfree(sde_cfg->pingpong[i].sblk);
+
+ for (i = 0; i < sde_cfg->vbif_count; i++) {
+ kfree(sde_cfg->vbif[i].dynamic_ot_rd_tbl.cfg);
+ kfree(sde_cfg->vbif[i].dynamic_ot_wr_tbl.cfg);
+ }
+
+ for (i = 0; i < sde_cfg->vp_count; i++) {
+ list_for_each_entry_safe(vp_sub, vp_sub_next,
+ &sde_cfg->vp[i].sub_blks, pipeid_list) {
+ list_del(&vp_sub->pipeid_list);
+ kfree(vp_sub);
+ }
+ }
+
+ kfree(sde_cfg->dma_formats);
+ kfree(sde_cfg->cursor_formats);
+ kfree(sde_cfg->vig_formats);
+ kfree(sde_cfg->wb_formats);
+
+ kfree(sde_cfg);
+}
+
+/*************************************************************
+ * hardware catalog init
+ *************************************************************/
+struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev,
+ u32 hw_rev)
+{
+ int rc;
+ struct sde_mdss_cfg *sde_cfg;
+ struct device_node *np = dev->dev->of_node;
+
+ sde_cfg = kzalloc(sizeof(*sde_cfg), GFP_KERNEL);
+ if (!sde_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ sde_cfg->hwversion = hw_rev;
+
+ rc = sde_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_ctl_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_sspp_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_dspp_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_pp_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ /* mixer parsing should be done after dspp and pp for mapping setup */
+ rc = sde_mixer_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_intf_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_wb_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ /* cdm parsing should be done after intf and wb for mapping setup */
+ rc = sde_cdm_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_vbif_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = sde_perf_parse_dt(np, sde_cfg);
+ if (rc)
+ goto end;
+
+ rc = _sde_vp_parse_dt(np, sde_cfg);
+ if (rc)
+ SDE_DEBUG("virtual plane is not supported.\n");
+
+ rc = sde_hardware_caps(sde_cfg, hw_rev);
+ if (rc)
+ goto end;
+
+ return sde_cfg;
+
+end:
+ sde_hw_catalog_deinit(sde_cfg);
+ return NULL;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
new file mode 100644
index 000000000000..0d09f05bb195
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog.h
@@ -0,0 +1,775 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_CATALOG_H
+#define _SDE_HW_CATALOG_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <linux/msm-bus.h>
+#include <drm/drmP.h>
+
+/**
+ * Max hardware block count: For ex: max 12 SSPP pipes or
+ * 5 ctl paths. In all cases, it can have max 12 hardware blocks
+ * based on current design
+ */
+#define MAX_BLOCKS 12
+
+#define SDE_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28) |\
+ ((MINOR & 0xFFF) << 16) |\
+ (STEP & 0xFFFF))
+
+#define SDE_HW_MAJOR(rev) ((rev) >> 28)
+#define SDE_HW_MINOR(rev) (((rev) >> 16) & 0xFFF)
+#define SDE_HW_STEP(rev) ((rev) & 0xFFFF)
+#define SDE_HW_MAJOR_MINOR(rev) ((rev) >> 16)
+
+#define IS_SDE_MAJOR_MINOR_SAME(rev1, rev2) \
+ (SDE_HW_MAJOR_MINOR((rev1)) == SDE_HW_MAJOR_MINOR((rev2)))
+
+#define SDE_HW_VER_170 SDE_HW_VER(1, 7, 0) /* 8996 v1.0 */
+#define SDE_HW_VER_171 SDE_HW_VER(1, 7, 1) /* 8996 v2.0 */
+#define SDE_HW_VER_172 SDE_HW_VER(1, 7, 2) /* 8996 v3.0 */
+#define SDE_HW_VER_300 SDE_HW_VER(3, 0, 0) /* 8998 v1.0 */
+#define SDE_HW_VER_301 SDE_HW_VER(3, 0, 1) /* 8998 v1.1 */
+#define SDE_HW_VER_400 SDE_HW_VER(4, 0, 0) /* sdm845 v1.0 */
+
+#define IS_MSMSKUNK_TARGET(rev) IS_SDE_MAJOR_MINOR_SAME((rev), SDE_HW_VER_400)
+
+#define SDE_HW_BLK_NAME_LEN 16
+
+#define MAX_IMG_WIDTH 0x3fff
+#define MAX_IMG_HEIGHT 0x3fff
+
+#define CRTC_DUAL_MIXERS 2
+
+#define SDE_COLOR_PROCESS_VER(MAJOR, MINOR) \
+ ((((MAJOR) & 0xFFFF) << 16) | (((MINOR) & 0xFFFF)))
+#define SDE_COLOR_PROCESS_MAJOR(version) (((version) & 0xFFFF0000) >> 16)
+#define SDE_COLOR_PROCESS_MINOR(version) ((version) & 0xFFFF)
+
+/**
+ * MDP TOP BLOCK features
+ * @SDE_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @SDE_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
+ * @SDE_MDP_BWC, MDSS HW supports Bandwidth compression.
+ * @SDE_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
+ * compression initial revision
+ * @SDE_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5
+ * @SDE_MDP_CDP, Client driven prefetch
+ * @SDE_MDP_MAX Maximum value
+
+ */
+enum {
+ SDE_MDP_PANIC_PER_PIPE = 0x1,
+ SDE_MDP_10BIT_SUPPORT,
+ SDE_MDP_BWC,
+ SDE_MDP_UBWC_1_0,
+ SDE_MDP_UBWC_1_5,
+ SDE_MDP_CDP,
+ SDE_MDP_MAX
+};
+
+/**
+ * SSPP sub-blocks/features
+ * @SDE_SSPP_SRC Src and fetch part of the pipes,
+ * @SDE_SSPP_SCALER_QSEED2, QSEED2 algorithm support
+ * @SDE_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
+ * @SDE_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
+ * @SDE_SSPP_CSC, Support of Color space converion
+ * @SDE_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
+ * @SDE_SSPP_HSIC, Global HSIC control
+ * @SDE_SSPP_MEMCOLOR Memory Color Support
+ * @SDE_SSPP_IGC, Inverse gamma correction
+ * @SDE_SSPP_PCC, Color correction support
+ * @SDE_SSPP_CURSOR, SSPP can be used as a cursor layer
+ * @SDE_SSPP_QOS, SSPP support QoS control, danger/safe/creq
+ * @SDE_SSPP_MAX maximum value
+ */
+enum {
+ SDE_SSPP_SRC = 0x1,
+ SDE_SSPP_SCALER_QSEED2,
+ SDE_SSPP_SCALER_QSEED3,
+ SDE_SSPP_SCALER_RGB,
+ SDE_SSPP_CSC,
+ SDE_SSPP_CSC_10BIT,
+ SDE_SSPP_HSIC,
+ SDE_SSPP_MEMCOLOR,
+ SDE_SSPP_IGC,
+ SDE_SSPP_PCC,
+ SDE_SSPP_CURSOR,
+ SDE_SSPP_QOS,
+ SDE_SSPP_MAX
+};
+
+/*
+ * MIXER sub-blocks/features
+ * @SDE_MIXER_LAYER Layer mixer layer blend configuration,
+ * @SDE_MIXER_SOURCESPLIT Layer mixer supports source-split configuration
+ * @SDE_MIXER_GC Gamma correction block
+ * @SDE_DISP_PRIMARY_PREF Primary display prefers this mixer
+ * @SDE_DISP_SECONDARY_PREF Secondary display prefers this mixer
+ * @SDE_DISP_TERTIARY_PREF Tertiary display prefers this mixer
+ * @SDE_MIXER_MAX maximum value
+ */
+enum {
+ SDE_MIXER_LAYER = 0x1,
+ SDE_MIXER_SOURCESPLIT,
+ SDE_MIXER_GC,
+ SDE_DISP_PRIMARY_PREF,
+ SDE_DISP_SECONDARY_PREF,
+ SDE_DISP_TERTIARY_PREF,
+ SDE_MIXER_MAX
+};
+
+/**
+ * DSPP sub-blocks
+ * @SDE_DSPP_IGC DSPP Inverse gamma correction block
+ * @SDE_DSPP_PCC Panel color correction block
+ * @SDE_DSPP_GC Gamma correction block
+ * @SDE_DSPP_HSIC Global HSIC block
+ * @SDE_DSPP_MEMCOLOR Memory Color block
+ * @SDE_DSPP_SIXZONE Six zone block
+ * @SDE_DSPP_GAMUT Gamut bloc
+ * @SDE_DSPP_DITHER Dither block
+ * @SDE_DSPP_HIST Histogram block
+ * @SDE_DSPP_VLUT PA VLUT block
+ * @SDE_DSPP_AD AD block
+ * @SDE_DSPP_MAX maximum value
+ */
+enum {
+ SDE_DSPP_IGC = 0x1,
+ SDE_DSPP_PCC,
+ SDE_DSPP_GC,
+ SDE_DSPP_HSIC,
+ SDE_DSPP_MEMCOLOR,
+ SDE_DSPP_SIXZONE,
+ SDE_DSPP_GAMUT,
+ SDE_DSPP_DITHER,
+ SDE_DSPP_HIST,
+ SDE_DSPP_VLUT,
+ SDE_DSPP_AD,
+ SDE_DSPP_MAX
+};
+
+/**
+ * PINGPONG sub-blocks
+ * @SDE_PINGPONG_TE Tear check block
+ * @SDE_PINGPONG_TE2 Additional tear check block for split pipes
+ * @SDE_PINGPONG_SPLIT PP block supports split fifo
+ * @SDE_PINGPONG_SLAVE PP block is a suitable slave for split fifo
+ * @SDE_PINGPONG_DSC, Display stream compression blocks
+ * @SDE_PINGPONG_MAX
+ */
+enum {
+ SDE_PINGPONG_TE = 0x1,
+ SDE_PINGPONG_TE2,
+ SDE_PINGPONG_SPLIT,
+ SDE_PINGPONG_SLAVE,
+ SDE_PINGPONG_DSC,
+ SDE_PINGPONG_MAX
+};
+
+/**
+ * CTL sub-blocks
+ * @SDE_CTL_SPLIT_DISPLAY CTL supports video mode split display
+ * @SDE_CTL_PINGPONG_SPLIT CTL supports pingpong split
+ * @SDE_CTL_PRIMARY_PREF Primary display perfers this CTL
+ * @SDE_CTL_SECONDARY_PREF Secondary display perfers this CTL
+ * @SDE_CTL_TERTIARY_PREF Tertiary display perfers this CTL
+ * @SDE_CTL_MAX
+ */
+enum {
+ SDE_CTL_SPLIT_DISPLAY = 0x1,
+ SDE_CTL_PINGPONG_SPLIT,
+ SDE_CTL_PRIMARY_PREF,
+ SDE_CTL_SECONDARY_PREF,
+ SDE_CTL_TERTIARY_PREF,
+ SDE_CTL_MAX
+};
+
+/**
+ * WB sub-blocks and features
+ * @SDE_WB_LINE_MODE Writeback module supports line/linear mode
+ * @SDE_WB_BLOCK_MODE Writeback module supports block mode read
+ * @SDE_WB_ROTATE rotation support,this is available if writeback
+ * supports block mode read
+ * @SDE_WB_CSC Writeback color conversion block support
+ * @SDE_WB_CHROMA_DOWN, Writeback chroma down block,
+ * @SDE_WB_DOWNSCALE, Writeback integer downscaler,
+ * @SDE_WB_DITHER, Dither block
+ * @SDE_WB_TRAFFIC_SHAPER, Writeback traffic shaper bloc
+ * @SDE_WB_UBWC_1_0, Writeback Universal bandwidth compression 1.0
+ * support
+ * @SDE_WB_UBWC_1_5 UBWC 1.5 support
+ * @SDE_WB_YUV_CONFIG Writeback supports output of YUV colorspace
+ * @SDE_WB_PIPE_ALPHA Writeback supports pipe alpha
+ * @SDE_WB_XY_ROI_OFFSET Writeback supports x/y-offset of out ROI in
+ * the destination image
+ * @SDE_WB_MAX maximum value
+ */
+enum {
+ SDE_WB_LINE_MODE = 0x1,
+ SDE_WB_BLOCK_MODE,
+ SDE_WB_ROTATE = SDE_WB_BLOCK_MODE,
+ SDE_WB_CSC,
+ SDE_WB_CHROMA_DOWN,
+ SDE_WB_DOWNSCALE,
+ SDE_WB_DITHER,
+ SDE_WB_TRAFFIC_SHAPER,
+ SDE_WB_UBWC_1_0,
+ SDE_WB_YUV_CONFIG,
+ SDE_WB_PIPE_ALPHA,
+ SDE_WB_XY_ROI_OFFSET,
+ SDE_WB_MAX
+};
+
+/**
+ * VBIF sub-blocks and features
+ * @SDE_VBIF_QOS_OTLIM VBIF supports OT Limit
+ * @SDE_VBIF_MAX maximum value
+ */
+enum {
+ SDE_VBIF_QOS_OTLIM = 0x1,
+ SDE_VBIF_MAX
+};
+
+/**
+ * MACRO SDE_HW_BLK_INFO - information of HW blocks inside SDE
+ * @name: string name for debug purposes
+ * @id: enum identifying this block
+ * @base: register base offset to mdss
+ * @len: length of hardware block
+ * @features bit mask identifying sub-blocks/features
+ */
+#define SDE_HW_BLK_INFO \
+ char name[SDE_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len; \
+ unsigned long features; \
+
+/**
+ * MACRO SDE_HW_SUBBLK_INFO - information of HW sub-block inside SDE
+ * @name: string name for debug purposes
+ * @id: enum identifying this sub-block
+ * @base: offset of this sub-block relative to the block
+ * offset
+ * @len register block length of this sub-block
+ */
+#define SDE_HW_SUBBLK_INFO \
+ char name[SDE_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len
+
+/**
+ * struct sde_src_blk: SSPP part of the source pipes
+ * @info: HW register and features supported by this sub-blk
+ */
+struct sde_src_blk {
+ SDE_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct sde_scaler_blk: Scaler information
+ * @info: HW register and features supported by this sub-blk
+ * @version: qseed block revision
+ */
+struct sde_scaler_blk {
+ SDE_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+struct sde_csc_blk {
+ SDE_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct sde_pp_blk : Pixel processing sub-blk information
+ * @info: HW register and features supported by this sub-blk
+ * @version: HW Algorithm version
+ */
+struct sde_pp_blk {
+ SDE_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+/**
+ * struct sde_format_extended - define sde specific pixel format+modifier
+ * @fourcc_format: Base FOURCC pixel format code
+ * @modifier: 64-bit drm format modifier, same modifier must be applied to all
+ * framebuffer planes
+ */
+struct sde_format_extended {
+ uint32_t fourcc_format;
+ uint64_t modifier;
+};
+
+/**
+ * struct sde_sspp_sub_blks : SSPP sub-blocks
+ * @maxdwnscale: max downscale ratio supported(without DECIMATION)
+ * @maxupscale: maxupscale ratio supported
+ * @maxwidth: max pixelwidth supported by this pipe
+ * @danger_lut_linear: LUT to generate danger signals for linear format
+ * @safe_lut_linear: LUT to generate safe signals for linear format
+ * @danger_lut_tile: LUT to generate danger signals for tile format
+ * @safe_lut_tile: LUT to generate safe signals for tile format
+ * @danger_lut_nrt: LUT to generate danger signals for non-realtime use case
+ * @safe_lut_nrt: LUT to generate safe signals for non-realtime use case
+ * @creq_lut_nrt: LUT to generate creq signals for non-realtime use case
+ * @creq_vblank: creq priority during vertical blanking
+ * @danger_vblank: danger priority during vertical blanking
+ * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
+ * @src_blk:
+ * @scaler_blk:
+ * @csc_blk:
+ * @hsic:
+ * @memcolor:
+ * @pcc_blk:
+ * @igc_blk:
+ * @format_list: Pointer to list of supported formats
+ */
+struct sde_sspp_sub_blks {
+ u32 maxlinewidth;
+ u32 danger_lut_linear;
+ u32 safe_lut_linear;
+ u32 danger_lut_tile;
+ u32 safe_lut_tile;
+ u32 danger_lut_nrt;
+ u32 safe_lut_nrt;
+ u32 creq_lut_nrt;
+ u32 creq_vblank;
+ u32 danger_vblank;
+ u32 pixel_ram_size;
+ u32 maxdwnscale;
+ u32 maxupscale;
+ u32 maxhdeciexp; /* max decimation is 2^value */
+ u32 maxvdeciexp; /* max decimation is 2^value */
+ struct sde_src_blk src_blk;
+ struct sde_scaler_blk scaler_blk;
+ struct sde_pp_blk csc_blk;
+ struct sde_pp_blk hsic_blk;
+ struct sde_pp_blk memcolor_blk;
+ struct sde_pp_blk pcc_blk;
+ struct sde_pp_blk igc_blk;
+
+ const struct sde_format_extended *format_list;
+};
+
+/**
+ * struct sde_lm_sub_blks: information of mixer block
+ * @maxwidth: Max pixel width supported by this mixer
+ * @maxblendstages: Max number of blend-stages supported
+ * @blendstage_base: Blend-stage register base offset
+ * @gc: gamma correction block
+ */
+struct sde_lm_sub_blks {
+ u32 maxwidth;
+ u32 maxblendstages;
+ u32 blendstage_base[MAX_BLOCKS];
+ struct sde_pp_blk gc;
+};
+
+struct sde_dspp_sub_blks {
+ struct sde_pp_blk igc;
+ struct sde_pp_blk pcc;
+ struct sde_pp_blk gc;
+ struct sde_pp_blk hsic;
+ struct sde_pp_blk memcolor;
+ struct sde_pp_blk sixzone;
+ struct sde_pp_blk gamut;
+ struct sde_pp_blk dither;
+ struct sde_pp_blk hist;
+ struct sde_pp_blk ad;
+ struct sde_pp_blk vlut;
+};
+
+struct sde_pingpong_sub_blks {
+ struct sde_pp_blk te;
+ struct sde_pp_blk te2;
+ struct sde_pp_blk dsc;
+};
+
+struct sde_wb_sub_blocks {
+ u32 maxlinewidth;
+};
+
+struct sde_mdss_base_cfg {
+ SDE_HW_BLK_INFO;
+};
+
+/**
+ * sde_clk_ctrl_type - Defines top level clock control signals
+ */
+enum sde_clk_ctrl_type {
+ SDE_CLK_CTRL_NONE,
+ SDE_CLK_CTRL_VIG0,
+ SDE_CLK_CTRL_VIG1,
+ SDE_CLK_CTRL_VIG2,
+ SDE_CLK_CTRL_VIG3,
+ SDE_CLK_CTRL_VIG4,
+ SDE_CLK_CTRL_RGB0,
+ SDE_CLK_CTRL_RGB1,
+ SDE_CLK_CTRL_RGB2,
+ SDE_CLK_CTRL_RGB3,
+ SDE_CLK_CTRL_DMA0,
+ SDE_CLK_CTRL_DMA1,
+ SDE_CLK_CTRL_CURSOR0,
+ SDE_CLK_CTRL_CURSOR1,
+ SDE_CLK_CTRL_WB0,
+ SDE_CLK_CTRL_WB1,
+ SDE_CLK_CTRL_WB2,
+ SDE_CLK_CTRL_MAX,
+};
+
+/* struct sde_clk_ctrl_reg : Clock control register
+ * @reg_off: register offset
+ * @bit_off: bit offset
+ */
+struct sde_clk_ctrl_reg {
+ u32 reg_off;
+ u32 bit_off;
+};
+
+/* struct sde_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ * @highest_bank_bit: UBWC parameter
+ * @clk_ctrls clock control register definition
+ */
+struct sde_mdp_cfg {
+ SDE_HW_BLK_INFO;
+ u32 highest_bank_bit;
+ struct sde_clk_ctrl_reg clk_ctrls[SDE_CLK_CTRL_MAX];
+};
+
+/* struct sde_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ */
+struct sde_ctl_cfg {
+ SDE_HW_BLK_INFO;
+};
+
+/**
+ * struct sde_sspp_cfg - information of source pipes
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: SSPP sub-blocks information
+ * @xin_id: bus client identifier
+ * @clk_ctrl clock control identifier
+ * @type sspp type identifier
+ */
+struct sde_sspp_cfg {
+ SDE_HW_BLK_INFO;
+ const struct sde_sspp_sub_blks *sblk;
+ u32 xin_id;
+ enum sde_clk_ctrl_type clk_ctrl;
+ u32 type;
+};
+
+/**
+ * struct sde_lm_cfg - information of layer mixer blocks
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: LM Sub-blocks information
+ * @dspp: ID of connected DSPP, DSPP_MAX if unsupported
+ * @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported
+ * @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
+ */
+struct sde_lm_cfg {
+ SDE_HW_BLK_INFO;
+ const struct sde_lm_sub_blks *sblk;
+ u32 dspp;
+ u32 pingpong;
+ unsigned long lm_pair_mask;
+};
+
+/**
+ * struct sde_dspp_cfg - information of DSPP blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * supported by this block
+ * @sblk sub-blocks information
+ */
+struct sde_dspp_cfg {
+ SDE_HW_BLK_INFO;
+ const struct sde_dspp_sub_blks *sblk;
+};
+
+/**
+ * struct sde_pingpong_cfg - information of PING-PONG blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk sub-blocks information
+ */
+struct sde_pingpong_cfg {
+ SDE_HW_BLK_INFO;
+ const struct sde_pingpong_sub_blks *sblk;
+};
+
+/**
+ * struct sde_cdm_cfg - information of chroma down blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @intf_connect Bitmask of INTF IDs this CDM can connect to
+ * @wb_connect: Bitmask of Writeback IDs this CDM can connect to
+ */
+struct sde_cdm_cfg {
+ SDE_HW_BLK_INFO;
+ unsigned long intf_connect;
+ unsigned long wb_connect;
+};
+
+/**
+ * struct sde_intf_cfg - information of timing engine blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @type: Interface type(DSI, DP, HDMI)
+ * @controller_id: Controller Instance ID in case of multiple of intf type
+ * @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
+ */
+struct sde_intf_cfg {
+ SDE_HW_BLK_INFO;
+ u32 type; /* interface type*/
+ u32 controller_id;
+ u32 prog_fetch_lines_worst_case;
+};
+
+/**
+ * struct sde_wb_cfg - information of writeback blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk sub-block information
+ * @format_list: Pointer to list of supported formats
+ * @vbif_idx vbif identifier
+ * @xin_id client interface identifier
+ * @clk_ctrl clock control identifier
+ */
+struct sde_wb_cfg {
+ SDE_HW_BLK_INFO;
+ const struct sde_wb_sub_blocks *sblk;
+ const struct sde_format_extended *format_list;
+ u32 vbif_idx;
+ u32 xin_id;
+ enum sde_clk_ctrl_type clk_ctrl;
+};
+
+/**
+ * struct sde_vbif_dynamic_ot_cfg - dynamic OT setting
+ * @pps pixel per seconds
+ * @ot_limit OT limit to use up to specified pixel per second
+ */
+struct sde_vbif_dynamic_ot_cfg {
+ u64 pps;
+ u32 ot_limit;
+};
+
+/**
+ * struct sde_vbif_dynamic_ot_tbl - dynamic OT setting table
+ * @count length of cfg
+ * @cfg pointer to array of configuration settings with
+ * ascending requirements
+ */
+struct sde_vbif_dynamic_ot_tbl {
+ u32 count;
+ struct sde_vbif_dynamic_ot_cfg *cfg;
+};
+
+/**
+ * struct sde_vbif_cfg - information of VBIF blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @ot_rd_limit default OT read limit
+ * @ot_wr_limit default OT write limit
+ * @xin_halt_timeout maximum time (in usec) for xin to halt
+ * @dynamic_ot_rd_tbl dynamic OT read configuration table
+ * @dynamic_ot_wr_tbl dynamic OT write configuration table
+ */
+struct sde_vbif_cfg {
+ SDE_HW_BLK_INFO;
+ u32 default_ot_rd_limit;
+ u32 default_ot_wr_limit;
+ u32 xin_halt_timeout;
+ struct sde_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
+ struct sde_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+};
+
+/**
+ * struct sde_perf_cfg - performance control settings
+ * @max_bw_low low threshold of maximum bandwidth (kbps)
+ * @max_bw_high high threshold of maximum bandwidth (kbps)
+ */
+struct sde_perf_cfg {
+ u32 max_bw_low;
+ u32 max_bw_high;
+};
+
+/**
+* struct sde_vp_sub_blks - Virtual Plane sub-blocks
+* @pipeid_list list for hw pipe id
+* @sspp_id SSPP ID, refer to enum sde_sspp.
+*/
+struct sde_vp_sub_blks {
+ struct list_head pipeid_list;
+ u32 sspp_id;
+};
+
+/**
+* struct sde_vp_cfg - information of Virtual Plane SW blocks
+* @id enum identifying this block
+* @sub_blks list head for virtual plane sub blocks
+* @plane_type plane type, such as primary, overlay or cursor
+* @display_type which display the plane bound to, such as primary,
+* secondary or tertiary
+*/
+struct sde_vp_cfg {
+ u32 id;
+ struct list_head sub_blks;
+ const char *plane_type;
+ const char *display_type;
+};
+
+/**
+ * struct sde_mdss_cfg - information of MDSS HW
+ * This is the main catalog data structure representing
+ * this HW version. Contains number of instances,
+ * register offsets, capabilities of the all MDSS HW sub-blocks.
+ *
+ * @max_sspp_linewidth max source pipe line width support.
+ * @max_mixer_width max layer mixer line width support.
+ * @max_mixer_blendstages max layer mixer blend stages or
+ * supported z order
+ * @max_wb_linewidth max writeback line width support.
+ * @highest_bank_bit highest memory bit setting for tile buffers.
+ * @qseed_type qseed2 or qseed3 support.
+ * @csc_type csc or csc_10bit support.
+ * @has_src_split source split feature status
+ * @has_cdp Client driver prefetch feature status
+ * @has_hdr HDR feature support
+ * @dma_formats Supported formats for dma pipe
+ * @cursor_formats Supported formats for cursor pipe
+ * @vig_formats Supported formats for vig pipe
+ * @wb_formats Supported formats for wb
+ */
+struct sde_mdss_cfg {
+ u32 hwversion;
+
+ u32 max_sspp_linewidth;
+ u32 max_mixer_width;
+ u32 max_mixer_blendstages;
+ u32 max_wb_linewidth;
+ u32 highest_bank_bit;
+ u32 qseed_type;
+ u32 csc_type;
+ bool has_src_split;
+ bool has_cdp;
+ bool has_hdr;
+ u32 mdss_count;
+ struct sde_mdss_base_cfg mdss[MAX_BLOCKS];
+
+ u32 mdp_count;
+ struct sde_mdp_cfg mdp[MAX_BLOCKS];
+
+ u32 ctl_count;
+ struct sde_ctl_cfg ctl[MAX_BLOCKS];
+
+ u32 sspp_count;
+ struct sde_sspp_cfg sspp[MAX_BLOCKS];
+
+ u32 mixer_count;
+ struct sde_lm_cfg mixer[MAX_BLOCKS];
+
+ u32 dspp_count;
+ struct sde_dspp_cfg dspp[MAX_BLOCKS];
+
+ u32 pingpong_count;
+ struct sde_pingpong_cfg pingpong[MAX_BLOCKS];
+
+ u32 cdm_count;
+ struct sde_cdm_cfg cdm[MAX_BLOCKS];
+
+ u32 intf_count;
+ struct sde_intf_cfg intf[MAX_BLOCKS];
+
+ u32 wb_count;
+ struct sde_wb_cfg wb[MAX_BLOCKS];
+
+ u32 vbif_count;
+ struct sde_vbif_cfg vbif[MAX_BLOCKS];
+ /* Add additional block data structures here */
+
+ struct sde_perf_cfg perf;
+
+ u32 vp_count;
+ struct sde_vp_cfg vp[MAX_BLOCKS];
+
+ struct sde_format_extended *dma_formats;
+ struct sde_format_extended *cursor_formats;
+ struct sde_format_extended *vig_formats;
+ struct sde_format_extended *wb_formats;
+};
+
+struct sde_mdss_hw_cfg_handler {
+ u32 major;
+ u32 minor;
+ struct sde_mdss_cfg* (*cfg_init)(u32);
+};
+
+/*
+ * Access Macros
+ */
+#define BLK_MDP(s) ((s)->mdp)
+#define BLK_CTL(s) ((s)->ctl)
+#define BLK_VIG(s) ((s)->vig)
+#define BLK_RGB(s) ((s)->rgb)
+#define BLK_DMA(s) ((s)->dma)
+#define BLK_CURSOR(s) ((s)->cursor)
+#define BLK_MIXER(s) ((s)->mixer)
+#define BLK_DSPP(s) ((s)->dspp)
+#define BLK_PINGPONG(s) ((s)->pingpong)
+#define BLK_CDM(s) ((s)->cdm)
+#define BLK_INTF(s) ((s)->intf)
+#define BLK_WB(s) ((s)->wb)
+#define BLK_AD(s) ((s)->ad)
+
+/**
+ * sde_hw_catalog_init - sde hardware catalog init API parses dtsi property
+ * and stores all parsed offset, hardware capabilities in config structure.
+ * @dev: drm device node.
+ * @hw_rev: caller needs provide the hardware revision before parsing.
+ *
+ * Return: parsed sde config structure
+ */
+struct sde_mdss_cfg *sde_hw_catalog_init(struct drm_device *dev, u32 hw_rev);
+
+/**
+ * sde_hw_catalog_deinit - sde hardware catalog cleanup
+ * @sde_cfg: pointer returned from init function
+ */
+void sde_hw_catalog_deinit(struct sde_mdss_cfg *sde_cfg);
+
+#endif /* _SDE_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h b/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
new file mode 100644
index 000000000000..dbc8981a7f8f
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_catalog_format.h
@@ -0,0 +1,177 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+
+static const struct sde_format_extended plane_formats[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+ {0, 0},
+};
+
+static const struct sde_format_extended plane_formats_yuv[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+
+ {DRM_FORMAT_NV12, 0},
+ {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_NV21, 0},
+ {DRM_FORMAT_NV16, 0},
+ {DRM_FORMAT_NV61, 0},
+ {DRM_FORMAT_VYUY, 0},
+ {DRM_FORMAT_UYVY, 0},
+ {DRM_FORMAT_YUYV, 0},
+ {DRM_FORMAT_YVYU, 0},
+ {DRM_FORMAT_YUV420, 0},
+ {DRM_FORMAT_YVU420, 0},
+ {0, 0},
+};
+
+static const struct sde_format_extended cursor_formats[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {0, 0},
+};
+
+static const struct sde_format_extended wb2_formats[] = {
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+
+ {DRM_FORMAT_YUV420, 0},
+ {DRM_FORMAT_NV12, 0},
+ {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_NV16, 0},
+ {DRM_FORMAT_YUYV, 0},
+
+ {0, 0},
+};
+
+static const struct sde_format_extended rgb_10bit_formats[] = {
+ {DRM_FORMAT_BGRA1010102, 0},
+ {DRM_FORMAT_BGRX1010102, 0},
+ {DRM_FORMAT_RGBA1010102, 0},
+ {DRM_FORMAT_RGBX1010102, 0},
+ {DRM_FORMAT_ABGR2101010, 0},
+ {DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_XBGR2101010, 0},
+ {DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_ARGB2101010, 0},
+ {DRM_FORMAT_XRGB2101010, 0},
+};
+
+static const struct sde_format_extended p010_formats[] = {
+ {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_DX},
+};
+
+static const struct sde_format_extended tp10_ubwc_formats[] = {
+ {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED |
+ DRM_FORMAT_MOD_QCOM_DX | DRM_FORMAT_MOD_QCOM_TIGHT},
+};
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.c b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
new file mode 100644
index 000000000000..c056b8198441
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.c
@@ -0,0 +1,309 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_cdm.h"
+#include "sde_dbg.h"
+
+#define CDM_CSC_10_OPMODE 0x000
+#define CDM_CSC_10_BASE 0x004
+
+#define CDM_CDWN2_OP_MODE 0x100
+#define CDM_CDWN2_CLAMP_OUT 0x104
+#define CDM_CDWN2_PARAMS_3D_0 0x108
+#define CDM_CDWN2_PARAMS_3D_1 0x10C
+#define CDM_CDWN2_COEFF_COSITE_H_0 0x110
+#define CDM_CDWN2_COEFF_COSITE_H_1 0x114
+#define CDM_CDWN2_COEFF_COSITE_H_2 0x118
+#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
+#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
+#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
+#define CDM_CDWN2_COEFF_COSITE_V 0x128
+#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C
+#define CDM_CDWN2_OUT_SIZE 0x130
+
+#define CDM_HDMI_PACK_OP_MODE 0x200
+#define CDM_CSC_10_MATRIX_COEFF_0 0x004
+
+/**
+ * Horizontal coefficients for cosite chroma downscale
+ * s13 representation of coefficients
+ */
+static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+
+/**
+ * Horizontal coefficients for offsite chroma downscale
+ */
+static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+
+/**
+ * Vertical coefficients for cosite chroma downscale
+ */
+static u32 cosite_v_coeff[] = {0x00080004};
+/**
+ * Vertical coefficients for offsite chroma downscale
+ */
+static u32 offsite_v_coeff[] = {0x00060002};
+
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct sde_csc_cfg rgb2yuv_cfg = {
+ {
+ 0x0083, 0x0102, 0x0032,
+ 0x1fb5, 0x1f6c, 0x00e1,
+ 0x00e1, 0x1f45, 0x1fdc
+ },
+ { 0x00, 0x00, 0x00 },
+ { 0x0040, 0x0200, 0x0200 },
+ { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+ { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
+static struct sde_cdm_cfg *_cdm_offset(enum sde_cdm cdm,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->cdm_count; i++) {
+ if (cdm == m->cdm[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->cdm[i].base;
+ b->length = m->cdm[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_CDM;
+ return &m->cdm[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int sde_hw_cdm_setup_csc_10bit(struct sde_hw_cdm *ctx,
+ struct sde_csc_cfg *data)
+{
+ sde_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
+
+ return 0;
+}
+
+static int sde_hw_cdm_setup_cdwn(struct sde_hw_cdm *ctx,
+ struct sde_hw_cdm_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 opmode = 0;
+ u32 out_size = 0;
+
+ if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
+ opmode &= ~BIT(7);
+ else
+ opmode |= BIT(7);
+
+ /* ENABLE DWNS_H bit */
+ opmode |= BIT(1);
+
+ switch (cfg->h_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_H field */
+ opmode &= ~(0x18);
+ /* CLEAR DWNS_H bit */
+ opmode &= ~BIT(1);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_H field (pixel drop is 0) */
+ opmode &= ~(0x18);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_H field (Average is 0x1) */
+ opmode &= ~(0x18);
+ opmode |= (0x1 << 0x3);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_H field (Average is 0x2) */
+ opmode &= ~(0x18);
+ opmode |= (0x2 << 0x3);
+ /* Co-site horizontal coefficients */
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
+ cosite_h_coeff[0]);
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
+ cosite_h_coeff[1]);
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
+ cosite_h_coeff[2]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_H field (Average is 0x3) */
+ opmode &= ~(0x18);
+ opmode |= (0x3 << 0x3);
+
+ /* Off-site horizontal coefficients */
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
+ offsite_h_coeff[0]);
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
+ offsite_h_coeff[1]);
+ SDE_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
+ offsite_h_coeff[2]);
+ break;
+ default:
+ pr_err("%s invalid horz down sampling type\n", __func__);
+ return -EINVAL;
+ }
+
+ /* ENABLE DWNS_V bit */
+ opmode |= BIT(2);
+
+ switch (cfg->v_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_V field */
+ opmode &= ~(0x60);
+ /* CLEAR DWNS_V bit */
+ opmode &= ~BIT(2);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_V field (pixel drop is 0) */
+ opmode &= ~(0x60);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_V field (Average is 0x1) */
+ opmode &= ~(0x60);
+ opmode |= (0x1 << 0x5);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_V field (Average is 0x2) */
+ opmode &= ~(0x60);
+ opmode |= (0x2 << 0x5);
+ /* Co-site vertical coefficients */
+ SDE_REG_WRITE(c,
+ CDM_CDWN2_COEFF_COSITE_V,
+ cosite_v_coeff[0]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_V field (Average is 0x3) */
+ opmode &= ~(0x60);
+ opmode |= (0x3 << 0x5);
+
+ /* Off-site vertical coefficients */
+ SDE_REG_WRITE(c,
+ CDM_CDWN2_COEFF_OFFSITE_V,
+ offsite_v_coeff[0]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cfg->v_cdwn_type || cfg->h_cdwn_type)
+ opmode |= BIT(0); /* EN CDWN module */
+ else
+ opmode &= ~BIT(0);
+
+ out_size = (cfg->output_width & 0xFFFF) |
+ ((cfg->output_height & 0xFFFF) << 16);
+ SDE_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
+ SDE_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
+ SDE_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
+ ((0x3FF << 16) | 0x0));
+
+ return 0;
+}
+
+int sde_hw_cdm_enable(struct sde_hw_cdm *ctx,
+ struct sde_hw_cdm_cfg *cdm)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ const struct sde_format *fmt = cdm->output_fmt;
+ struct cdm_output_cfg cdm_cfg = { 0 };
+ u32 opmode = 0;
+ u32 csc = 0;
+
+ if (!SDE_FORMAT_IS_YUV(fmt))
+ return -EINVAL;
+
+ if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
+ if (fmt->chroma_sample == SDE_CHROMA_H1V2)
+ return -EINVAL; /*unsupported format */
+ opmode = BIT(0);
+ opmode |= (fmt->chroma_sample << 1);
+ cdm_cfg.intf_en = true;
+ } else {
+ opmode = 0;
+ cdm_cfg.wb_en = true;
+ }
+
+ csc |= BIT(2);
+ csc &= ~BIT(1);
+ csc |= BIT(0);
+
+ if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+ ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+
+ SDE_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
+ SDE_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
+ return 0;
+}
+
+void sde_hw_cdm_disable(struct sde_hw_cdm *ctx)
+{
+ struct cdm_output_cfg cdm_cfg = { 0 };
+
+ if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+ ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+}
+
+static void _setup_cdm_ops(struct sde_hw_cdm_ops *ops,
+ unsigned long features)
+{
+ ops->setup_csc_data = sde_hw_cdm_setup_csc_10bit;
+ ops->setup_cdwn = sde_hw_cdm_setup_cdwn;
+ ops->enable = sde_hw_cdm_enable;
+ ops->disable = sde_hw_cdm_disable;
+}
+
+struct sde_hw_cdm *sde_hw_cdm_init(enum sde_cdm idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m,
+ struct sde_hw_mdp *hw_mdp)
+{
+ struct sde_hw_cdm *c;
+ struct sde_cdm_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _cdm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->cdm_hw_cap = cfg;
+ _setup_cdm_ops(&c->ops, c->cdm_hw_cap->features);
+ c->hw_mdp = hw_mdp;
+
+ /*
+ * Perform any default initialization for the chroma down module
+ * @setup default csc coefficients
+ */
+ sde_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
+
+ sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+ c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+ return c;
+}
+
+void sde_hw_cdm_destroy(struct sde_hw_cdm *cdm)
+{
+ kfree(cdm);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_cdm.h b/drivers/gpu/drm/msm/sde/sde_hw_cdm.h
new file mode 100644
index 000000000000..a0afd897e867
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_cdm.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_CDM_H
+#define _SDE_HW_CDM_H
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_top.h"
+
+struct sde_hw_cdm;
+
+struct sde_hw_cdm_cfg {
+ u32 output_width;
+ u32 output_height;
+ u32 output_bit_depth;
+ u32 h_cdwn_type;
+ u32 v_cdwn_type;
+ const struct sde_format *output_fmt;
+ u32 output_type;
+ int flags;
+};
+
+enum sde_hw_cdwn_type {
+ CDM_CDWN_DISABLE,
+ CDM_CDWN_PIXEL_DROP,
+ CDM_CDWN_AVG,
+ CDM_CDWN_COSITE,
+ CDM_CDWN_OFFSITE,
+};
+
+enum sde_hw_cdwn_output_type {
+ CDM_CDWN_OUTPUT_HDMI,
+ CDM_CDWN_OUTPUT_WB,
+};
+
+enum sde_hw_cdwn_output_bit_depth {
+ CDM_CDWN_OUTPUT_8BIT,
+ CDM_CDWN_OUTPUT_10BIT,
+};
+
+/**
+ * struct sde_hw_cdm_ops : Interface to the chroma down Hw driver functions
+ * Assumption is these functions will be called after
+ * clocks are enabled
+ * @setup_csc: Programs the csc matrix
+ * @setup_cdwn: Sets up the chroma down sub module
+ * @enable: Enables the output to interface and programs the
+ * output packer
+ * @disable: Puts the cdm in bypass mode
+ */
+struct sde_hw_cdm_ops {
+ /**
+ * Programs the CSC matrix for conversion from RGB space to YUV space,
+ * it is optional to call this function as this matrix is automatically
+ * set during initialization, user should call this if it wants
+ * to program a different matrix than default matrix.
+ * @cdm: Pointer to the chroma down context structure
+ * @data Pointer to CSC configuration data
+ * return: 0 if success; error code otherwise
+ */
+ int (*setup_csc_data)(struct sde_hw_cdm *cdm,
+ struct sde_csc_cfg *data);
+
+ /**
+ * Programs the Chroma downsample part.
+ * @cdm Pointer to chroma down context
+ */
+ int (*setup_cdwn)(struct sde_hw_cdm *cdm,
+ struct sde_hw_cdm_cfg *cfg);
+
+ /**
+ * Enable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ int (*enable)(struct sde_hw_cdm *cdm,
+ struct sde_hw_cdm_cfg *cfg);
+
+ /**
+ * Disable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ void (*disable)(struct sde_hw_cdm *cdm);
+};
+
+struct sde_hw_cdm {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* chroma down */
+ const struct sde_cdm_cfg *cdm_hw_cap;
+ enum sde_cdm idx;
+
+ /* mdp top hw driver */
+ struct sde_hw_mdp *hw_mdp;
+
+ /* ops */
+ struct sde_hw_cdm_ops ops;
+};
+
+/**
+ * sde_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @idx: cdm index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ * @hw_mdp: pointer to mdp top hw driver object
+ */
+struct sde_hw_cdm *sde_hw_cdm_init(enum sde_cdm idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m,
+ struct sde_hw_mdp *hw_mdp);
+
+/**
+ * sde_hw_cdm_destroy - destroys CDM driver context
+ * @cdm: pointer to CDM driver context
+ */
+void sde_hw_cdm_destroy(struct sde_hw_cdm *cdm);
+
+#endif /*_SDE_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing.h b/drivers/gpu/drm/msm/sde/sde_hw_color_processing.h
new file mode 100644
index 000000000000..a30e1a52b046
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing.h
@@ -0,0 +1,18 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_COLOR_PROCESSING_H
+#define _SDE_HW_COLOR_PROCESSING_H
+
+#include "sde_hw_color_processing_v1_7.h"
+
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
new file mode 100644
index 000000000000..76d99c1e8e65
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.c
@@ -0,0 +1,494 @@
+/* Copyright (c) 2016,2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/msm_drm_pp.h>
+#include "sde_hw_color_processing_v1_7.h"
+
+#define PA_HUE_VIG_OFF 0x110
+#define PA_SAT_VIG_OFF 0x114
+#define PA_VAL_VIG_OFF 0x118
+#define PA_CONT_VIG_OFF 0x11C
+
+#define PA_HUE_DSPP_OFF 0x238
+#define PA_SAT_DSPP_OFF 0x23C
+#define PA_VAL_DSPP_OFF 0x240
+#define PA_CONT_DSPP_OFF 0x244
+
+#define PA_LUTV_DSPP_OFF 0x1400
+#define PA_LUT_SWAP_OFF 0x234
+
+#define PA_HUE_MASK 0xFFF
+#define PA_SAT_MASK 0xFFFF
+#define PA_VAL_MASK 0xFF
+#define PA_CONT_MASK 0xFF
+
+#define MEMCOL_PWL0_OFF 0x88
+#define MEMCOL_PWL0_MASK 0xFFFF07FF
+#define MEMCOL_PWL1_OFF 0x8C
+#define MEMCOL_PWL1_MASK 0xFFFFFFFF
+#define MEMCOL_HUE_REGION_OFF 0x90
+#define MEMCOL_HUE_REGION_MASK 0x7FF07FF
+#define MEMCOL_SAT_REGION_OFF 0x94
+#define MEMCOL_SAT_REGION_MASK 0xFFFFFF
+#define MEMCOL_VAL_REGION_OFF 0x98
+#define MEMCOL_VAL_REGION_MASK 0xFFFFFF
+#define MEMCOL_P0_LEN 0x14
+#define MEMCOL_P1_LEN 0x8
+#define MEMCOL_PWL2_OFF 0x218
+#define MEMCOL_PWL2_MASK 0xFFFFFFFF
+#define MEMCOL_BLEND_GAIN_OFF 0x21C
+#define MEMCOL_PWL_HOLD_OFF 0x214
+
+#define VIG_OP_PA_EN BIT(4)
+#define VIG_OP_PA_SKIN_EN BIT(5)
+#define VIG_OP_PA_FOL_EN BIT(6)
+#define VIG_OP_PA_SKY_EN BIT(7)
+#define VIG_OP_PA_HUE_EN BIT(25)
+#define VIG_OP_PA_SAT_EN BIT(26)
+#define VIG_OP_PA_VAL_EN BIT(27)
+#define VIG_OP_PA_CONT_EN BIT(28)
+
+#define DSPP_OP_SZ_VAL_EN BIT(31)
+#define DSPP_OP_SZ_SAT_EN BIT(30)
+#define DSPP_OP_SZ_HUE_EN BIT(29)
+#define DSPP_OP_PA_HUE_EN BIT(25)
+#define DSPP_OP_PA_SAT_EN BIT(26)
+#define DSPP_OP_PA_VAL_EN BIT(27)
+#define DSPP_OP_PA_CONT_EN BIT(28)
+#define DSPP_OP_PA_EN BIT(20)
+#define DSPP_OP_PA_LUTV_EN BIT(19)
+#define DSPP_OP_PA_SKIN_EN BIT(5)
+#define DSPP_OP_PA_FOL_EN BIT(6)
+#define DSPP_OP_PA_SKY_EN BIT(7)
+
+#define REG_MASK(n) ((BIT(n)) - 1)
+
+#define PA_VIG_DISABLE_REQUIRED(x) \
+ !((x) & (VIG_OP_PA_SKIN_EN | VIG_OP_PA_SKY_EN | \
+ VIG_OP_PA_FOL_EN | VIG_OP_PA_HUE_EN | \
+ VIG_OP_PA_SAT_EN | VIG_OP_PA_VAL_EN | \
+ VIG_OP_PA_CONT_EN))
+
+
+#define PA_DSPP_DISABLE_REQUIRED(x) \
+ !((x) & (DSPP_OP_PA_SKIN_EN | DSPP_OP_PA_SKY_EN | \
+ DSPP_OP_PA_FOL_EN | DSPP_OP_PA_HUE_EN | \
+ DSPP_OP_PA_SAT_EN | DSPP_OP_PA_VAL_EN | \
+ DSPP_OP_PA_CONT_EN | DSPP_OP_PA_LUTV_EN))
+
+#define DSPP_OP_PCC_ENABLE BIT(0)
+#define PCC_OP_MODE_OFF 0
+#define PCC_CONST_COEFF_OFF 4
+#define PCC_R_COEFF_OFF 0x10
+#define PCC_G_COEFF_OFF 0x1C
+#define PCC_B_COEFF_OFF 0x28
+#define PCC_RG_COEFF_OFF 0x34
+#define PCC_RB_COEFF_OFF 0x40
+#define PCC_GB_COEFF_OFF 0x4C
+#define PCC_RGB_COEFF_OFF 0x58
+#define PCC_CONST_COEFF_MASK 0xFFFF
+#define PCC_COEFF_MASK 0x3FFFF
+
+#define SSPP 0
+#define DSPP 1
+
+static void __setup_pa_hue(struct sde_hw_blk_reg_map *hw,
+ const struct sde_pp_blk *blk, uint32_t hue,
+ int location)
+{
+ u32 base = blk->base;
+ u32 offset = (location == DSPP) ? PA_HUE_DSPP_OFF : PA_HUE_VIG_OFF;
+ u32 op_hue_en = (location == DSPP) ? DSPP_OP_PA_HUE_EN :
+ VIG_OP_PA_HUE_EN;
+ u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+ u32 disable_req;
+ u32 opmode;
+
+ SDE_REG_WRITE(hw, base + offset, hue & PA_HUE_MASK);
+
+ opmode = SDE_REG_READ(hw, base);
+
+ if (!hue) {
+ opmode &= ~op_hue_en;
+ disable_req = (location == DSPP) ?
+ PA_DSPP_DISABLE_REQUIRED(opmode) :
+ PA_VIG_DISABLE_REQUIRED(opmode);
+ if (disable_req)
+ opmode &= ~op_pa_en;
+ } else {
+ opmode |= op_hue_en | op_pa_en;
+ }
+
+ SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_hue_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+ uint32_t hue = *((uint32_t *)cfg);
+
+ __setup_pa_hue(&ctx->hw, &ctx->cap->sblk->hsic_blk, hue, SSPP);
+}
+
+void sde_setup_dspp_pa_hue_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+ uint32_t hue = *((uint32_t *)cfg);
+
+ __setup_pa_hue(&ctx->hw, &ctx->cap->sblk->hsic, hue, DSPP);
+}
+
+static void __setup_pa_sat(struct sde_hw_blk_reg_map *hw,
+ const struct sde_pp_blk *blk, uint32_t sat,
+ int location)
+{
+ u32 base = blk->base;
+ u32 offset = (location == DSPP) ? PA_SAT_DSPP_OFF : PA_SAT_VIG_OFF;
+ u32 op_sat_en = (location == DSPP) ?
+ DSPP_OP_PA_SAT_EN : VIG_OP_PA_SAT_EN;
+ u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+ u32 disable_req;
+ u32 opmode;
+
+ SDE_REG_WRITE(hw, base + offset, sat & PA_SAT_MASK);
+
+ opmode = SDE_REG_READ(hw, base);
+
+ if (!sat) {
+ opmode &= ~op_sat_en;
+ disable_req = (location == DSPP) ?
+ PA_DSPP_DISABLE_REQUIRED(opmode) :
+ PA_VIG_DISABLE_REQUIRED(opmode);
+ if (disable_req)
+ opmode &= ~op_pa_en;
+ } else {
+ opmode |= op_sat_en | op_pa_en;
+ }
+
+ SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_sat_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+ uint32_t sat = *((uint32_t *)cfg);
+
+ __setup_pa_sat(&ctx->hw, &ctx->cap->sblk->hsic_blk, sat, SSPP);
+}
+
+static void __setup_pa_val(struct sde_hw_blk_reg_map *hw,
+ const struct sde_pp_blk *blk, uint32_t value,
+ int location)
+{
+ u32 base = blk->base;
+ u32 offset = (location == DSPP) ? PA_VAL_DSPP_OFF : PA_VAL_VIG_OFF;
+ u32 op_val_en = (location == DSPP) ?
+ DSPP_OP_PA_VAL_EN : VIG_OP_PA_VAL_EN;
+ u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+ u32 disable_req;
+ u32 opmode;
+
+ SDE_REG_WRITE(hw, base + offset, value & PA_VAL_MASK);
+
+ opmode = SDE_REG_READ(hw, base);
+
+ if (!value) {
+ opmode &= ~op_val_en;
+ disable_req = (location == DSPP) ?
+ PA_DSPP_DISABLE_REQUIRED(opmode) :
+ PA_VIG_DISABLE_REQUIRED(opmode);
+ if (disable_req)
+ opmode &= ~op_pa_en;
+ } else {
+ opmode |= op_val_en | op_pa_en;
+ }
+
+ SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_val_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+ uint32_t value = *((uint32_t *)cfg);
+
+ __setup_pa_val(&ctx->hw, &ctx->cap->sblk->hsic_blk, value, SSPP);
+}
+
+static void __setup_pa_cont(struct sde_hw_blk_reg_map *hw,
+ const struct sde_pp_blk *blk, uint32_t contrast,
+ int location)
+{
+ u32 base = blk->base;
+ u32 offset = (location == DSPP) ? PA_CONT_DSPP_OFF : PA_CONT_VIG_OFF;
+ u32 op_cont_en = (location == DSPP) ? DSPP_OP_PA_CONT_EN :
+ VIG_OP_PA_CONT_EN;
+ u32 op_pa_en = (location == DSPP) ? DSPP_OP_PA_EN : VIG_OP_PA_EN;
+ u32 disable_req;
+ u32 opmode;
+
+ SDE_REG_WRITE(hw, base + offset, contrast & PA_CONT_MASK);
+
+ opmode = SDE_REG_READ(hw, base);
+
+ if (!contrast) {
+ opmode &= ~op_cont_en;
+ disable_req = (location == DSPP) ?
+ PA_DSPP_DISABLE_REQUIRED(opmode) :
+ PA_VIG_DISABLE_REQUIRED(opmode);
+ if (disable_req)
+ opmode &= ~op_pa_en;
+ } else {
+ opmode |= op_cont_en | op_pa_en;
+ }
+
+ SDE_REG_WRITE(hw, base, opmode);
+}
+
+void sde_setup_pipe_pa_cont_v1_7(struct sde_hw_pipe *ctx, void *cfg)
+{
+ uint32_t contrast = *((uint32_t *)cfg);
+
+ __setup_pa_cont(&ctx->hw, &ctx->cap->sblk->hsic_blk, contrast, SSPP);
+}
+
+void sde_setup_dspp_pa_hsic_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ struct drm_msm_pa_hsic *hsic_cfg;
+ u32 hue = 0;
+ u32 sat = 0;
+ u32 val = 0;
+ u32 cont = 0;
+
+ if (!ctx || !cfg) {
+ DRM_ERROR("invalid param ctx %pK cfg %pK\n", ctx, cfg);
+ return;
+ }
+
+ if (hw_cfg->payload &&
+ (hw_cfg->len != sizeof(struct drm_msm_pa_hsic))) {
+ DRM_ERROR("invalid size of payload len %d exp %zd\n",
+ hw_cfg->len, sizeof(struct drm_msm_pa_hsic));
+ return;
+ }
+
+ if (!hw_cfg->payload) {
+ DRM_DEBUG_DRIVER("disable pa hsic feature\n");
+ } else {
+ hsic_cfg = hw_cfg->payload;
+ if (hsic_cfg->flags & PA_HSIC_HUE_ENABLE)
+ hue = hsic_cfg->hue;
+ if (hsic_cfg->flags & PA_HSIC_SAT_ENABLE)
+ sat = hsic_cfg->saturation;
+ if (hsic_cfg->flags & PA_HSIC_VAL_ENABLE)
+ val = hsic_cfg->value;
+ if (hsic_cfg->flags & PA_HSIC_CONT_ENABLE)
+ cont = hsic_cfg->contrast;
+ }
+
+ __setup_pa_hue(&ctx->hw, &ctx->cap->sblk->hsic, hue, DSPP);
+ __setup_pa_sat(&ctx->hw, &ctx->cap->sblk->hsic, sat, DSPP);
+ __setup_pa_val(&ctx->hw, &ctx->cap->sblk->hsic, val, DSPP);
+ __setup_pa_cont(&ctx->hw, &ctx->cap->sblk->hsic, cont, DSPP);
+}
+
+void sde_setup_pipe_pa_memcol_v1_7(struct sde_hw_pipe *ctx,
+ enum sde_memcolor_type type,
+ void *cfg)
+{
+ struct drm_msm_memcol *mc = cfg;
+ u32 base = ctx->cap->sblk->memcolor_blk.base;
+ u32 off, op, mc_en, hold = 0;
+ u32 mc_i = 0;
+
+ switch (type) {
+ case MEMCOLOR_SKIN:
+ mc_en = VIG_OP_PA_SKIN_EN;
+ mc_i = 0;
+ break;
+ case MEMCOLOR_SKY:
+ mc_en = VIG_OP_PA_SKY_EN;
+ mc_i = 1;
+ break;
+ case MEMCOLOR_FOLIAGE:
+ mc_en = VIG_OP_PA_FOL_EN;
+ mc_i = 2;
+ break;
+ default:
+ DRM_ERROR("Invalid memory color type %d\n", type);
+ return;
+ }
+
+ op = SDE_REG_READ(&ctx->hw, base);
+ if (!mc) {
+ op &= ~mc_en;
+ if (PA_VIG_DISABLE_REQUIRED(op))
+ op &= ~VIG_OP_PA_EN;
+ SDE_REG_WRITE(&ctx->hw, base, op);
+ return;
+ }
+
+ off = base + (mc_i * MEMCOL_P0_LEN);
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL0_OFF),
+ mc->color_adjust_p0 & MEMCOL_PWL0_MASK);
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL1_OFF),
+ mc->color_adjust_p1 & MEMCOL_PWL1_MASK);
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_HUE_REGION_OFF),
+ mc->hue_region & MEMCOL_HUE_REGION_MASK);
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_SAT_REGION_OFF),
+ mc->sat_region & MEMCOL_SAT_REGION_MASK);
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_VAL_REGION_OFF),
+ mc->val_region & MEMCOL_VAL_REGION_MASK);
+
+ off = base + (mc_i * MEMCOL_P1_LEN);
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL2_OFF),
+ mc->color_adjust_p2 & MEMCOL_PWL2_MASK);
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_BLEND_GAIN_OFF), mc->blend_gain);
+
+ hold = SDE_REG_READ(&ctx->hw, off + MEMCOL_PWL_HOLD_OFF);
+ hold &= ~(0xF << (mc_i * 4));
+ hold |= ((mc->sat_hold & 0x3) << (mc_i * 4));
+ hold |= ((mc->val_hold & 0x3) << ((mc_i * 4) + 2));
+ SDE_REG_WRITE(&ctx->hw, (off + MEMCOL_PWL_HOLD_OFF), hold);
+
+ op |= VIG_OP_PA_EN | mc_en;
+ SDE_REG_WRITE(&ctx->hw, base, op);
+}
+
+void sde_setup_dspp_pcc_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ struct drm_msm_pcc *pcc;
+ void __iomem *base;
+
+ if (!hw_cfg || (hw_cfg->len != sizeof(*pcc) && hw_cfg->payload)) {
+ DRM_ERROR("invalid params hw %pK payload %pK payloadsize %d \"\
+ exp size %zd\n",
+ hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
+ ((hw_cfg) ? hw_cfg->len : 0), sizeof(*pcc));
+ return;
+ }
+ base = ctx->hw.base_off + ctx->cap->base;
+
+ /* Turn off feature */
+ if (!hw_cfg->payload) {
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base,
+ PCC_OP_MODE_OFF);
+ return;
+ }
+ DRM_DEBUG_DRIVER("Enable PCC feature\n");
+ pcc = hw_cfg->payload;
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF,
+ pcc->r.c & PCC_CONST_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw,
+ ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF + 4,
+ pcc->g.c & PCC_CONST_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw,
+ ctx->cap->sblk->pcc.base + PCC_CONST_COEFF_OFF + 8,
+ pcc->b.c & PCC_CONST_COEFF_MASK);
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF,
+ pcc->r.r & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF + 4,
+ pcc->g.r & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_R_COEFF_OFF + 8,
+ pcc->b.r & PCC_COEFF_MASK);
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF,
+ pcc->r.g & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF + 4,
+ pcc->g.g & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_G_COEFF_OFF + 8,
+ pcc->b.g & PCC_COEFF_MASK);
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF,
+ pcc->r.b & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF + 4,
+ pcc->g.b & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_B_COEFF_OFF + 8,
+ pcc->b.b & PCC_COEFF_MASK);
+
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF,
+ pcc->r.rg & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF + 4,
+ pcc->g.rg & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RG_COEFF_OFF + 8,
+ pcc->b.rg & PCC_COEFF_MASK);
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF,
+ pcc->r.rb & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF + 4,
+ pcc->g.rb & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RB_COEFF_OFF + 8,
+ pcc->b.rb & PCC_COEFF_MASK);
+
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF,
+ pcc->r.gb & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF + 4,
+ pcc->g.gb & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_GB_COEFF_OFF + 8,
+ pcc->b.gb & PCC_COEFF_MASK);
+
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF,
+ pcc->r.rgb & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw,
+ ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF + 4,
+ pcc->g.rgb & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw,
+ ctx->cap->sblk->pcc.base + PCC_RGB_COEFF_OFF + 8,
+ pcc->b.rgb & PCC_COEFF_MASK);
+ SDE_REG_WRITE(&ctx->hw, ctx->cap->sblk->pcc.base, DSPP_OP_PCC_ENABLE);
+}
+
+void sde_setup_dspp_pa_vlut_v1_7(struct sde_hw_dspp *ctx, void *cfg)
+{
+ struct drm_msm_pa_vlut *payload = NULL;
+ struct sde_hw_cp_cfg *hw_cfg = cfg;
+ u32 base = ctx->cap->sblk->vlut.base;
+ u32 offset = base + PA_LUTV_DSPP_OFF;
+ u32 op_mode, tmp;
+ int i = 0, j = 0;
+
+ if (!hw_cfg || (hw_cfg->payload && hw_cfg->len !=
+ sizeof(struct drm_msm_pa_vlut))) {
+ DRM_ERROR("hw %pK payload %pK payloadsize %d exp size %zd\n",
+ hw_cfg, ((hw_cfg) ? hw_cfg->payload : NULL),
+ ((hw_cfg) ? hw_cfg->len : 0),
+ sizeof(struct drm_msm_pa_vlut));
+ return;
+ }
+ op_mode = SDE_REG_READ(&ctx->hw, base);
+ if (!hw_cfg->payload) {
+ DRM_DEBUG_DRIVER("Disable vlut feature\n");
+ /**
+ * In the PA_VLUT disable case, remove PA_VLUT enable bit(19)
+ * first, then check whether any other PA sub-features are
+ * enabled or not. If none of the sub-features are enabled,
+ * remove the PA global enable bit(20).
+ */
+ op_mode &= ~((u32)DSPP_OP_PA_LUTV_EN);
+ if (PA_DSPP_DISABLE_REQUIRED(op_mode))
+ op_mode &= ~((u32)DSPP_OP_PA_EN);
+ SDE_REG_WRITE(&ctx->hw, base, op_mode);
+ return;
+ }
+ payload = hw_cfg->payload;
+ DRM_DEBUG_DRIVER("Enable vlut feature flags %llx\n", payload->flags);
+ for (i = 0, j = 0; i < ARRAY_SIZE(payload->val); i += 2, j += 4) {
+ tmp = (payload->val[i] & REG_MASK(10)) |
+ ((payload->val[i + 1] & REG_MASK(10)) << 16);
+ SDE_REG_WRITE(&ctx->hw, (offset + j),
+ tmp);
+ }
+ SDE_REG_WRITE(&ctx->hw, (base + PA_LUT_SWAP_OFF), 1);
+ op_mode |= DSPP_OP_PA_EN | DSPP_OP_PA_LUTV_EN;
+ SDE_REG_WRITE(&ctx->hw, base, op_mode);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
new file mode 100644
index 000000000000..185f6b548b65
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_color_processing_v1_7.h
@@ -0,0 +1,85 @@
+/* Copyright (c) 2016,2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_COLOR_PROCESSING_V1_7_H
+#define _SDE_HW_COLOR_PROCESSING_V1_7_H
+
+#include "sde_hw_sspp.h"
+#include "sde_hw_dspp.h"
+
+/**
+ * sde_setup_pipe_pa_hue_v1_7 - setup SSPP hue feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to hue data
+ */
+void sde_setup_pipe_pa_hue_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_sat_v1_7 - setup SSPP saturation feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to saturation data
+ */
+void sde_setup_pipe_pa_sat_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_val_v1_7 - setup SSPP value feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to value data
+ */
+void sde_setup_pipe_pa_val_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_cont_v1_7 - setup SSPP contrast feature in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to contrast data
+ */
+void sde_setup_pipe_pa_cont_v1_7(struct sde_hw_pipe *ctx, void *cfg);
+
+/**
+ * sde_setup_dspp_pa_hsic_v1_7 - setup DSPP hsic feature in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ * @cfg: Pointer to hsic data
+ */
+void sde_setup_dspp_pa_hsic_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
+ * sde_setup_pipe_pa_memcol_v1_7 - setup SSPP memory color in v1.7 hardware
+ * @ctx: Pointer to pipe context
+ * @type: Memory color type (Skin, sky, or foliage)
+ * @cfg: Pointer to memory color config data
+ */
+void sde_setup_pipe_pa_memcol_v1_7(struct sde_hw_pipe *ctx,
+ enum sde_memcolor_type type,
+ void *cfg);
+
+/**
+ * sde_setup_dspp_pcc_v1_7 - setup DSPP PCC veature in v1.7 hardware
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to PCC data
+ */
+void sde_setup_dspp_pcc_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
+ * sde_setup_dspp_pa_hue_v1_7 - setup DSPP hue feature in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ * @cfg: Pointer to hue data
+ */
+void sde_setup_dspp_pa_hue_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+/**
+ * sde_setup_dspp_pa_vlut_v1_7 - setup DSPP PA vLUT feature in v1.7 hardware
+ * @ctx: Pointer to DSPP context
+ * @cfg: Pointer to vLUT data
+ */
+void sde_setup_dspp_pa_vlut_v1_7(struct sde_hw_dspp *ctx, void *cfg);
+
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.c b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
new file mode 100644
index 000000000000..964e02f7293c
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.c
@@ -0,0 +1,586 @@
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include "sde_hwio.h"
+#include "sde_hw_ctl.h"
+#include "sde_dbg.h"
+
+#define CTL_LAYER(lm) \
+ (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT(lm) \
+ (0x40 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT2(lm) \
+ (0x70 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT3(lm) \
+ (0xA0 + (((lm) - LM_0) * 0x004))
+
+#define CTL_TOP 0x014
+#define CTL_FLUSH 0x018
+#define CTL_START 0x01C
+#define CTL_SW_RESET 0x030
+#define CTL_LAYER_EXTN_OFFSET 0x40
+
+#define SDE_REG_RESET_TIMEOUT_COUNT 20
+
+static struct sde_ctl_cfg *_ctl_offset(enum sde_ctl ctl,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->ctl_count; i++) {
+ if (ctl == m->ctl[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->ctl[i].base;
+ b->length = m->ctl[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_CTL;
+ return &m->ctl[i];
+ }
+ }
+ return ERR_PTR(-ENOMEM);
+}
+
+static int _mixer_stages(const struct sde_lm_cfg *mixer, int count,
+ enum sde_lm lm)
+{
+ int i;
+ int stages = -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ if (lm == mixer[i].id) {
+ stages = mixer[i].sblk->maxblendstages;
+ break;
+ }
+ }
+
+ return stages;
+}
+
+static inline void sde_hw_ctl_trigger_start(struct sde_hw_ctl *ctx)
+{
+ SDE_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline void sde_hw_ctl_clear_pending_flush(struct sde_hw_ctl *ctx)
+{
+ ctx->pending_flush_mask = 0x0;
+}
+
+static inline void sde_hw_ctl_update_pending_flush(struct sde_hw_ctl *ctx,
+ u32 flushbits)
+{
+ ctx->pending_flush_mask |= flushbits;
+}
+
+static u32 sde_hw_ctl_get_pending_flush(struct sde_hw_ctl *ctx)
+{
+ if (!ctx)
+ return 0x0;
+
+ return ctx->pending_flush_mask;
+}
+
+static inline void sde_hw_ctl_trigger_flush(struct sde_hw_ctl *ctx)
+{
+ SDE_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static inline u32 sde_hw_ctl_get_flush_register(struct sde_hw_ctl *ctx)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ return SDE_REG_READ(c, CTL_FLUSH);
+}
+
+static inline uint32_t sde_hw_ctl_get_bitmask_sspp(struct sde_hw_ctl *ctx,
+ enum sde_sspp sspp)
+{
+ uint32_t flushbits = 0;
+
+ switch (sspp) {
+ case SSPP_VIG0:
+ flushbits = BIT(0);
+ break;
+ case SSPP_VIG1:
+ flushbits = BIT(1);
+ break;
+ case SSPP_VIG2:
+ flushbits = BIT(2);
+ break;
+ case SSPP_VIG3:
+ flushbits = BIT(18);
+ break;
+ case SSPP_RGB0:
+ flushbits = BIT(3);
+ break;
+ case SSPP_RGB1:
+ flushbits = BIT(4);
+ break;
+ case SSPP_RGB2:
+ flushbits = BIT(5);
+ break;
+ case SSPP_RGB3:
+ flushbits = BIT(19);
+ break;
+ case SSPP_DMA0:
+ flushbits = BIT(11);
+ break;
+ case SSPP_DMA1:
+ flushbits = BIT(12);
+ break;
+ case SSPP_DMA2:
+ flushbits = BIT(24);
+ break;
+ case SSPP_DMA3:
+ flushbits = BIT(25);
+ break;
+ case SSPP_CURSOR0:
+ flushbits = BIT(22);
+ break;
+ case SSPP_CURSOR1:
+ flushbits = BIT(23);
+ break;
+ default:
+ break;
+ }
+
+ return flushbits;
+}
+
+static inline uint32_t sde_hw_ctl_get_bitmask_mixer(struct sde_hw_ctl *ctx,
+ enum sde_lm lm)
+{
+ uint32_t flushbits = 0;
+
+ switch (lm) {
+ case LM_0:
+ flushbits = BIT(6);
+ break;
+ case LM_1:
+ flushbits = BIT(7);
+ break;
+ case LM_2:
+ flushbits = BIT(8);
+ break;
+ case LM_3:
+ flushbits = BIT(9);
+ break;
+ case LM_4:
+ flushbits = BIT(10);
+ break;
+ case LM_5:
+ flushbits = BIT(20);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ flushbits |= BIT(17); /* CTL */
+
+ return flushbits;
+}
+
+static inline int sde_hw_ctl_get_bitmask_dspp(struct sde_hw_ctl *ctx,
+ u32 *flushbits, enum sde_dspp dspp)
+{
+ switch (dspp) {
+ case DSPP_0:
+ *flushbits |= BIT(13);
+ break;
+ case DSPP_1:
+ *flushbits |= BIT(14);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int sde_hw_ctl_get_bitmask_intf(struct sde_hw_ctl *ctx,
+ u32 *flushbits, enum sde_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ *flushbits |= BIT(31);
+ break;
+ case INTF_1:
+ *flushbits |= BIT(30);
+ break;
+ case INTF_2:
+ *flushbits |= BIT(29);
+ break;
+ case INTF_3:
+ *flushbits |= BIT(28);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int sde_hw_ctl_get_bitmask_wb(struct sde_hw_ctl *ctx,
+ u32 *flushbits, enum sde_wb wb)
+{
+ switch (wb) {
+ case WB_0:
+ case WB_1:
+ case WB_2:
+ *flushbits |= BIT(16);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int sde_hw_ctl_get_bitmask_cdm(struct sde_hw_ctl *ctx,
+ u32 *flushbits, enum sde_cdm cdm)
+{
+ switch (cdm) {
+ case CDM_0:
+ *flushbits |= BIT(26);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u32 sde_hw_ctl_poll_reset_status(struct sde_hw_ctl *ctx, u32 count)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 status;
+
+ /* protect to do at least one iteration */
+ if (!count)
+ count = 1;
+
+ /*
+ * it takes around 30us to have mdp finish resetting its ctl path
+ * poll every 50us so that reset should be completed at 1st poll
+ */
+ do {
+ status = SDE_REG_READ(c, CTL_SW_RESET);
+ status &= 0x01;
+ if (status)
+ usleep_range(20, 50);
+ } while (status && --count > 0);
+
+ return status;
+}
+
+static int sde_hw_ctl_reset_control(struct sde_hw_ctl *ctx)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
+ SDE_REG_WRITE(c, CTL_SW_RESET, 0x1);
+ if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_RESET_TIMEOUT_COUNT))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sde_hw_ctl_wait_reset_status(struct sde_hw_ctl *ctx)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 status;
+
+ status = SDE_REG_READ(c, CTL_SW_RESET);
+ status &= 0x01;
+ if (!status)
+ return 0;
+
+ pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
+ if (sde_hw_ctl_poll_reset_status(ctx, SDE_REG_RESET_TIMEOUT_COUNT)) {
+ pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void sde_hw_ctl_clear_all_blendstages(struct sde_hw_ctl *ctx,
+ bool handoff, u32 splash_mask, u32 splash_ext_mask)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ int i;
+ u32 mixercfg = 0;
+ u32 mixercfg_ext = 0;
+ int mixer_id;
+
+ for (i = 0; i < ctx->mixer_count; i++) {
+ mixer_id = ctx->mixer_hw_caps[i].id;
+
+ /*
+ * if bootloaer still has early splash or RVC running,
+ * mixer status can't be directly cleared.
+ */
+ if (handoff) {
+ mixercfg = SDE_REG_READ(c, CTL_LAYER(mixer_id));
+ mixercfg_ext = SDE_REG_READ(c,
+ CTL_LAYER_EXT(mixer_id));
+ mixercfg &= splash_mask;
+ mixercfg_ext &= splash_ext_mask;
+ }
+ SDE_REG_WRITE(c, CTL_LAYER(mixer_id), mixercfg);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT(mixer_id), mixercfg_ext);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT2(mixer_id), 0);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT3(mixer_id), 0);
+ }
+}
+
+static void sde_hw_ctl_setup_blendstage(struct sde_hw_ctl *ctx,
+ enum sde_lm lm, struct sde_hw_stage_cfg *stage_cfg, u32 index,
+ bool handoff, u32 splash_mask, u32 splash_ext_mask)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 mixercfg, mixercfg_ext, mix, ext, mixercfg_ext2;
+ int i, j;
+ u8 stages;
+ int pipes_per_stage;
+
+ if (index >= CRTC_DUAL_MIXERS)
+ return;
+
+ stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+ if (stages < 0)
+ return;
+
+ if (test_bit(SDE_MIXER_SOURCESPLIT,
+ &ctx->mixer_hw_caps->features))
+ pipes_per_stage = PIPES_PER_STAGE;
+ else
+ pipes_per_stage = 1;
+
+ mixercfg = BIT(24); /* always set BORDER_OUT */
+ mixercfg_ext = 0;
+ mixercfg_ext2 = 0;
+
+ /*
+ * if bootloader still have RVC running, its mixer stauts
+ * should be updated to kernel's mixer setup.
+ */
+ if (handoff) {
+ mixercfg = SDE_REG_READ(c, CTL_LAYER(lm));
+ mixercfg_ext = SDE_REG_READ(c, CTL_LAYER_EXT(lm));
+ mixercfg &= splash_mask;
+ mixercfg_ext &= splash_ext_mask;
+ mixercfg |= BIT(24);
+ }
+
+ for (i = 0; i <= stages; i++) {
+ /* overflow to ext register if 'i + 1 > 7' */
+ mix = (i + 1) & 0x7;
+ ext = i >= 7;
+
+ for (j = 0 ; j < pipes_per_stage; j++) {
+ switch (stage_cfg->stage[index][i][j]) {
+ case SSPP_VIG0:
+ mixercfg |= mix << 0;
+ mixercfg_ext |= ext << 0;
+ break;
+ case SSPP_VIG1:
+ mixercfg |= mix << 3;
+ mixercfg_ext |= ext << 2;
+ break;
+ case SSPP_VIG2:
+ mixercfg |= mix << 6;
+ mixercfg_ext |= ext << 4;
+ break;
+ case SSPP_VIG3:
+ mixercfg |= mix << 26;
+ mixercfg_ext |= ext << 6;
+ break;
+ case SSPP_RGB0:
+ mixercfg |= mix << 9;
+ mixercfg_ext |= ext << 8;
+ break;
+ case SSPP_RGB1:
+ mixercfg |= mix << 12;
+ mixercfg_ext |= ext << 10;
+ break;
+ case SSPP_RGB2:
+ mixercfg |= mix << 15;
+ mixercfg_ext |= ext << 12;
+ break;
+ case SSPP_RGB3:
+ mixercfg |= mix << 29;
+ mixercfg_ext |= ext << 14;
+ break;
+ case SSPP_DMA0:
+ mixercfg |= mix << 18;
+ mixercfg_ext |= ext << 16;
+ break;
+ case SSPP_DMA1:
+ mixercfg |= mix << 21;
+ mixercfg_ext |= ext << 18;
+ break;
+ case SSPP_DMA2:
+ mix = (i + 1) & 0xf;
+ mixercfg_ext2 |= mix << 0;
+ break;
+ case SSPP_DMA3:
+ mix = (i + 1) & 0xf;
+ mixercfg_ext2 |= mix << 4;
+ break;
+ case SSPP_CURSOR0:
+ mixercfg_ext |= ((i + 1) & 0xF) << 20;
+ break;
+ case SSPP_CURSOR1:
+ mixercfg_ext |= ((i + 1) & 0xF) << 26;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+ SDE_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+ SDE_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+}
+
+static void sde_hw_ctl_intf_cfg(struct sde_hw_ctl *ctx,
+ struct sde_hw_intf_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_cfg = 0;
+
+ intf_cfg |= (cfg->intf & 0xF) << 4;
+
+ if (cfg->wb)
+ intf_cfg |= (cfg->wb & 0x3) + 2;
+
+ if (cfg->mode_3d) {
+ intf_cfg |= BIT(19);
+ intf_cfg |= (cfg->mode_3d - 0x1) << 20;
+ }
+
+ switch (cfg->intf_mode_sel) {
+ case SDE_CTL_MODE_SEL_VID:
+ intf_cfg &= ~BIT(17);
+ intf_cfg &= ~(0x3 << 15);
+ break;
+ case SDE_CTL_MODE_SEL_CMD:
+ intf_cfg |= BIT(17);
+ intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
+ break;
+ default:
+ pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
+ return;
+ }
+
+ SDE_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
+static void sde_hw_ctl_clear_intf_cfg(struct sde_hw_ctl *ctx)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ SDE_REG_WRITE(c, CTL_TOP, 0);
+}
+
+static inline u32 sde_hw_ctl_read_ctl_top_for_splash(struct sde_hw_ctl *ctx)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 ctl_top;
+
+ if (!ctx) {
+ pr_err("Invalid ctx\n");
+ return 0;
+ }
+
+ c = &ctx->hw;
+ ctl_top = SDE_REG_READ(c, CTL_TOP);
+ return ctl_top;
+}
+
+static inline u32 sde_hw_ctl_read_ctl_layers_for_splash(struct sde_hw_ctl *ctx,
+ int index)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 ctl_top;
+
+ if (!ctx) {
+ pr_err("Invalid ctx\n");
+ return 0;
+ }
+
+ c = &ctx->hw;
+ ctl_top = SDE_REG_READ(c, CTL_LAYER(index));
+
+ return ctl_top;
+}
+
+static void _setup_ctl_ops(struct sde_hw_ctl_ops *ops,
+ unsigned long cap)
+{
+ ops->clear_pending_flush = sde_hw_ctl_clear_pending_flush;
+ ops->update_pending_flush = sde_hw_ctl_update_pending_flush;
+ ops->get_pending_flush = sde_hw_ctl_get_pending_flush;
+ ops->trigger_flush = sde_hw_ctl_trigger_flush;
+ ops->get_flush_register = sde_hw_ctl_get_flush_register;
+ ops->trigger_start = sde_hw_ctl_trigger_start;
+ ops->setup_intf_cfg = sde_hw_ctl_intf_cfg;
+ ops->clear_intf_cfg = sde_hw_ctl_clear_intf_cfg;
+ ops->reset = sde_hw_ctl_reset_control;
+ ops->wait_reset_status = sde_hw_ctl_wait_reset_status;
+ ops->clear_all_blendstages = sde_hw_ctl_clear_all_blendstages;
+ ops->setup_blendstage = sde_hw_ctl_setup_blendstage;
+ ops->get_bitmask_sspp = sde_hw_ctl_get_bitmask_sspp;
+ ops->get_bitmask_mixer = sde_hw_ctl_get_bitmask_mixer;
+ ops->get_bitmask_dspp = sde_hw_ctl_get_bitmask_dspp;
+ ops->get_bitmask_intf = sde_hw_ctl_get_bitmask_intf;
+ ops->get_bitmask_cdm = sde_hw_ctl_get_bitmask_cdm;
+ ops->get_bitmask_wb = sde_hw_ctl_get_bitmask_wb;
+ ops->read_ctl_top_for_splash = sde_hw_ctl_read_ctl_top_for_splash;
+ ops->read_ctl_layers_for_splash = sde_hw_ctl_read_ctl_layers_for_splash;
+};
+
+struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_ctl *c;
+ struct sde_ctl_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _ctl_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create sde_hw_ctl %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->caps = cfg;
+ _setup_ctl_ops(&c->ops, c->caps->features);
+ c->idx = idx;
+ c->mixer_count = m->mixer_count;
+ c->mixer_hw_caps = m->mixer;
+
+ sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+ c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+ return c;
+}
+
+void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx)
+{
+ kfree(ctx);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_ctl.h b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
new file mode 100644
index 000000000000..dab0b686cb74
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_ctl.h
@@ -0,0 +1,234 @@
+/* Copyright (c) 2015-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_CTL_H
+#define _SDE_HW_CTL_H
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+#include "sde_hw_catalog.h"
+#include "sde_splash.h"
+
+/**
+ * sde_ctl_mode_sel: Interface mode selection
+ * SDE_CTL_MODE_SEL_VID: Video mode interface
+ * SDE_CTL_MODE_SEL_CMD: Command mode interface
+ */
+enum sde_ctl_mode_sel {
+ SDE_CTL_MODE_SEL_VID = 0,
+ SDE_CTL_MODE_SEL_CMD
+};
+
+struct sde_hw_ctl;
+/**
+ * struct sde_hw_stage_cfg - blending stage cfg
+ * @stage
+ */
+struct sde_hw_stage_cfg {
+ enum sde_sspp stage[CRTC_DUAL_MIXERS][SDE_STAGE_MAX][PIPES_PER_STAGE];
+};
+
+/**
+ * struct sde_hw_intf_cfg :Describes how the SDE writes data to output interface
+ * @intf : Interface id
+ * @wb: Writeback id
+ * @mode_3d: 3d mux configuration
+ * @intf_mode_sel: Interface mode, cmd / vid
+ * @stream_sel: Stream selection for multi-stream interfaces
+ */
+struct sde_hw_intf_cfg {
+ enum sde_intf intf;
+ enum sde_wb wb;
+ enum sde_3d_blend_mode mode_3d;
+ enum sde_ctl_mode_sel intf_mode_sel;
+ int stream_sel;
+};
+
+/**
+ * struct sde_hw_ctl_ops - Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_ctl_ops {
+ /**
+ * kickoff hw operation for Sw controlled interfaces
+ * DSI cmd mode and WB interface are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_start)(struct sde_hw_ctl *ctx);
+
+ /**
+ * Clear the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_pending_flush)(struct sde_hw_ctl *ctx);
+
+ /**
+ * Query the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ u32 (*get_pending_flush)(struct sde_hw_ctl *ctx);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @flushbits : module flushmask
+ */
+ void (*update_pending_flush)(struct sde_hw_ctl *ctx,
+ u32 flushbits);
+
+ /**
+ * Write the value of the pending_flush_mask to hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_flush)(struct sde_hw_ctl *ctx);
+
+ /**
+ * Read the value of the flush register
+ * @ctx : ctl path ctx pointer
+ * @Return: value of the ctl flush register.
+ */
+ u32 (*get_flush_register)(struct sde_hw_ctl *ctx);
+
+ /**
+ * Setup ctl_path interface config
+ * @ctx
+ * @cfg : interface config structure pointer
+ */
+ void (*setup_intf_cfg)(struct sde_hw_ctl *ctx,
+ struct sde_hw_intf_cfg *cfg);
+
+ /**
+ * Clear ctl_path interface config
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_intf_cfg)(struct sde_hw_ctl *ctx);
+
+ int (*reset)(struct sde_hw_ctl *c);
+
+ /*
+ * wait_reset_status - checks ctl reset status
+ * @ctx : ctl path ctx pointer
+ *
+ * This function checks the ctl reset status bit.
+ * If the reset bit is set, it keeps polling the status till the hw
+ * reset is complete.
+ * Returns: 0 on success or -error if reset incomplete within interval
+ */
+ int (*wait_reset_status)(struct sde_hw_ctl *ctx);
+
+ uint32_t (*get_bitmask_sspp)(struct sde_hw_ctl *ctx,
+ enum sde_sspp blk);
+
+ uint32_t (*get_bitmask_mixer)(struct sde_hw_ctl *ctx,
+ enum sde_lm blk);
+
+ int (*get_bitmask_dspp)(struct sde_hw_ctl *ctx,
+ u32 *flushbits,
+ enum sde_dspp blk);
+
+ int (*get_bitmask_intf)(struct sde_hw_ctl *ctx,
+ u32 *flushbits,
+ enum sde_intf blk);
+
+ int (*get_bitmask_cdm)(struct sde_hw_ctl *ctx,
+ u32 *flushbits,
+ enum sde_cdm blk);
+
+ int (*get_bitmask_wb)(struct sde_hw_ctl *ctx,
+ u32 *flushbits,
+ enum sde_wb blk);
+
+ /**
+ * Set all blend stages to disabled
+ * @ctx : ctl path ctx pointer
+ * @handoff : indicate if lk is prepare for handoff
+ * @splash_mask : layer mixer mask of splash layers
+ * @splash_ext_mask: layer mixer extension mask of splash layers
+ */
+ void (*clear_all_blendstages)(struct sde_hw_ctl *ctx,
+ bool handoff, u32 splash_mask, u32 splash_ext_mask);
+
+ /**
+ * Configure layer mixer to pipe configuration
+ * @ctx : ctl path ctx pointer
+ * @lm : layer mixer enumeration
+ * @cfg : blend stage configuration
+ * @handoff : indicate if lk is prepare for handoff
+ * @splash_mask : layer mixer mask of splash layers
+ * @splash_ext_mask: layer mixer extension mask of splash layers
+ */
+ void (*setup_blendstage)(struct sde_hw_ctl *ctx,
+ enum sde_lm lm, struct sde_hw_stage_cfg *cfg, u32 index,
+ bool handoff, u32 splash_mask, u32 splash_ext_mask);
+
+ /**
+ * read CTL_TOP register value for splash case
+ * @ctx : ctl path ctx pointer
+ * @Return : CTL top register value
+ */
+ u32 (*read_ctl_top_for_splash)(struct sde_hw_ctl *ctx);
+
+ /**
+ * read CTL layers register value for splash case
+ * @ctx : ctl path ctx pointer
+ * @index : layer index for this ctl path
+ * @Return : CTL layers register value
+ */
+ u32 (*read_ctl_layers_for_splash)(struct sde_hw_ctl *ctx, int index);
+};
+
+/**
+ * struct sde_hw_ctl : CTL PATH driver object
+ * @hw: block register map object
+ * @idx: control path index
+ * @ctl_hw_caps: control path capabilities
+ * @mixer_count: number of mixers
+ * @mixer_hw_caps: mixer hardware capabilities
+ * @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @ops: operation list
+ */
+struct sde_hw_ctl {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* ctl path */
+ int idx;
+ const struct sde_ctl_cfg *caps;
+ int mixer_count;
+ const struct sde_lm_cfg *mixer_hw_caps;
+ u32 pending_flush_mask;
+
+ /* ops */
+ struct sde_hw_ctl_ops ops;
+};
+
+/**
+ * sde_hw_ctl_init(): Initializes the ctl_path hw driver object.
+ * should be called before accessing every ctl path registers.
+ * @idx: ctl_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_ctl *sde_hw_ctl_init(enum sde_ctl idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_ctl_destroy(): Destroys ctl driver context
+ * should be called to free the context
+ */
+void sde_hw_ctl_destroy(struct sde_hw_ctl *ctx);
+
+#endif /*_SDE_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.c b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
new file mode 100644
index 000000000000..4c5af0666d88
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.c
@@ -0,0 +1,126 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#include <drm/msm_drm_pp.h>
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_dspp.h"
+#include "sde_hw_color_processing.h"
+#include "sde_dbg.h"
+
+static struct sde_dspp_cfg *_dspp_offset(enum sde_dspp dspp,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->dspp_count; i++) {
+ if (dspp == m->dspp[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->dspp[i].base;
+ b->length = m->dspp[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_DSPP;
+ return &m->dspp[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+void sde_dspp_setup_histogram(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_read_histogram(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_update_igc(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_setup_sharpening(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_setup_danger_safe(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+void sde_dspp_setup_dither(struct sde_hw_dspp *ctx, void *cfg)
+{
+}
+
+static void _setup_dspp_ops(struct sde_hw_dspp *c, unsigned long features)
+{
+ int i = 0;
+
+ for (i = 0; i < SDE_DSPP_MAX; i++) {
+ if (!test_bit(i, &features))
+ continue;
+ switch (i) {
+ case SDE_DSPP_PCC:
+ if (c->cap->sblk->pcc.version ==
+ (SDE_COLOR_PROCESS_VER(0x1, 0x7)))
+ c->ops.setup_pcc = sde_setup_dspp_pcc_v1_7;
+ break;
+ case SDE_DSPP_HSIC:
+ if (c->cap->sblk->hsic.version ==
+ (SDE_COLOR_PROCESS_VER(0x1, 0x7)))
+ c->ops.setup_pa_hsic =
+ sde_setup_dspp_pa_hsic_v1_7;
+ break;
+ case SDE_DSPP_VLUT:
+ if (c->cap->sblk->vlut.version ==
+ (SDE_COLOR_PROCESS_VER(0x1, 0x7))) {
+ c->ops.setup_vlut = sde_setup_dspp_pa_vlut_v1_7;
+ }
+ default:
+ break;
+ }
+ }
+}
+
+struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_dspp *c;
+ struct sde_dspp_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _dspp_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_dspp_ops(c, c->cap->features);
+
+ sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+ c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+ return c;
+}
+
+void sde_hw_dspp_destroy(struct sde_hw_dspp *dspp)
+{
+ kfree(dspp);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_dspp.h b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
new file mode 100644
index 000000000000..e1e8622dd11f
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_dspp.h
@@ -0,0 +1,162 @@
+/* Copyright (c) 2015-2016, 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_DSPP_H
+#define _SDE_HW_DSPP_H
+
+struct sde_hw_dspp;
+
+/**
+ * struct sde_hw_dspp_ops - interface to the dspp hardware driver functions
+ * Caller must call the init function to get the dspp context for each dspp
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_dspp_ops {
+ /**
+ * setup_histogram - setup dspp histogram
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_histogram)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * read_histogram - read dspp histogram
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*read_histogram)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_igc - update dspp igc
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_igc)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_pa - setup dspp pa
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_pa)(struct sde_hw_dspp *dspp, void *cfg);
+
+ /**
+ * setup_pcc - setup dspp pcc
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_pcc)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_sharpening - setup dspp sharpening
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_sharpening)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_pa_memcolor - setup dspp memcolor
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_pa_memcolor)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_sixzone - setup dspp six zone
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_sixzone)(struct sde_hw_dspp *dspp, void *cfg);
+
+ /**
+ * setup_danger_safe - setup danger safe LUTS
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_danger_safe)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_dither - setup dspp dither
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_dither)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_cont - setup dspp PA hsic
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_pa_hsic)(struct sde_hw_dspp *dspp, void *cfg);
+
+ /**
+ * setup_vlut - setup dspp PA VLUT
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_vlut)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_gc - update dspp gc
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_gc)(struct sde_hw_dspp *ctx, void *cfg);
+
+ /**
+ * setup_gamut - update dspp gamut
+ * @ctx: Pointer to dspp context
+ * @cfg: Pointer to configuration
+ */
+ void (*setup_gamut)(struct sde_hw_dspp *ctx, void *cfg);
+};
+
+/**
+ * struct sde_hw_dspp - dspp description
+ * @base_off: MDP register mapped offset
+ * @blk_off: DSPP offset relative to mdss offset
+ * @length Length of register block offset
+ * @hwversion Mdss hw version number
+ * @idx: DSPP index
+ * @dspp_hw_cap: Pointer to layer_cfg
+ * @highest_bank_bit:
+ * @ops: Pointer to operations possible for this dspp
+ */
+struct sde_hw_dspp {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* dspp */
+ enum sde_dspp idx;
+ const struct sde_dspp_cfg *cap;
+
+ /* Ops */
+ struct sde_hw_dspp_ops ops;
+};
+
+/**
+ * sde_hw_dspp_init - initializes the dspp hw driver object.
+ * should be called once before accessing every dspp.
+ * @idx: DSPP index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ */
+struct sde_hw_dspp *sde_hw_dspp_init(enum sde_dspp idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_dspp_destroy(): Destroys DSPP driver context
+ * @dspp: Pointer to DSPP driver context
+ */
+void sde_hw_dspp_destroy(struct sde_hw_dspp *dspp);
+
+#endif /*_SDE_HW_DSPP_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_hwio.h b/drivers/gpu/drm/msm/sde/sde_hw_hwio.h
new file mode 100644
index 000000000000..e69de29bb2d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_hwio.h
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
new file mode 100644
index 000000000000..1535d1d1ade5
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.c
@@ -0,0 +1,986 @@
+/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include "sde_kms.h"
+#include "sde_hw_interrupts.h"
+#include "sde_hw_util.h"
+#include "sde_hw_mdss.h"
+
+/**
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. to the MDSS base
+ */
+#define HW_INTR_STATUS 0x0010
+#define MDP_SSPP_TOP0_OFF 0x1000
+#define MDP_INTF_0_OFF 0x6B000
+#define MDP_INTF_1_OFF 0x6B800
+#define MDP_INTF_2_OFF 0x6C000
+#define MDP_INTF_3_OFF 0x6C800
+#define MDP_INTF_4_OFF 0x6D000
+
+/**
+ * WB interrupt status bit definitions
+ */
+#define SDE_INTR_WB_0_DONE BIT(0)
+#define SDE_INTR_WB_1_DONE BIT(1)
+#define SDE_INTR_WB_2_DONE BIT(4)
+
+/**
+ * WDOG timer interrupt status bit definitions
+ */
+#define SDE_INTR_WD_TIMER_0_DONE BIT(2)
+#define SDE_INTR_WD_TIMER_1_DONE BIT(3)
+#define SDE_INTR_WD_TIMER_2_DONE BIT(5)
+#define SDE_INTR_WD_TIMER_3_DONE BIT(6)
+#define SDE_INTR_WD_TIMER_4_DONE BIT(7)
+
+/**
+ * Pingpong interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_0_DONE BIT(8)
+#define SDE_INTR_PING_PONG_1_DONE BIT(9)
+#define SDE_INTR_PING_PONG_2_DONE BIT(10)
+#define SDE_INTR_PING_PONG_3_DONE BIT(11)
+#define SDE_INTR_PING_PONG_0_RD_PTR BIT(12)
+#define SDE_INTR_PING_PONG_1_RD_PTR BIT(13)
+#define SDE_INTR_PING_PONG_2_RD_PTR BIT(14)
+#define SDE_INTR_PING_PONG_3_RD_PTR BIT(15)
+#define SDE_INTR_PING_PONG_0_WR_PTR BIT(16)
+#define SDE_INTR_PING_PONG_1_WR_PTR BIT(17)
+#define SDE_INTR_PING_PONG_2_WR_PTR BIT(18)
+#define SDE_INTR_PING_PONG_3_WR_PTR BIT(19)
+#define SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
+#define SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
+#define SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
+#define SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
+
+/**
+ * Interface interrupt status bit definitions
+ */
+#define SDE_INTR_INTF_0_UNDERRUN BIT(24)
+#define SDE_INTR_INTF_1_UNDERRUN BIT(26)
+#define SDE_INTR_INTF_2_UNDERRUN BIT(28)
+#define SDE_INTR_INTF_3_UNDERRUN BIT(30)
+#define SDE_INTR_INTF_0_VSYNC BIT(25)
+#define SDE_INTR_INTF_1_VSYNC BIT(27)
+#define SDE_INTR_INTF_2_VSYNC BIT(29)
+#define SDE_INTR_INTF_3_VSYNC BIT(31)
+
+/**
+ * Pingpong Secondary interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
+#define SDE_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define SDE_INTR_PING_PONG_S0_RD_PTR BIT(8)
+#define SDE_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
+#define SDE_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
+
+/**
+ * Pingpong TEAR detection interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
+#define SDE_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
+#define SDE_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
+#define SDE_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
+
+/**
+ * Pingpong TE detection interrupt status bit definitions
+ */
+#define SDE_INTR_PING_PONG_0_TE_DETECTED BIT(24)
+#define SDE_INTR_PING_PONG_1_TE_DETECTED BIT(25)
+#define SDE_INTR_PING_PONG_2_TE_DETECTED BIT(26)
+#define SDE_INTR_PING_PONG_3_TE_DETECTED BIT(27)
+
+/**
+ * Concurrent WB overflow interrupt status bit definitions
+ */
+#define SDE_INTR_CWB_2_OVERFLOW BIT(14)
+#define SDE_INTR_CWB_3_OVERFLOW BIT(15)
+
+/**
+ * Histogram VIG done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_VIG_0_DONE BIT(0)
+#define SDE_INTR_HIST_VIG_1_DONE BIT(4)
+#define SDE_INTR_HIST_VIG_2_DONE BIT(8)
+#define SDE_INTR_HIST_VIG_3_DONE BIT(10)
+
+/**
+ * Histogram VIG reset Sequence done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
+#define SDE_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
+#define SDE_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
+#define SDE_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
+
+/**
+ * Histogram DSPP done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_DSPP_0_DONE BIT(12)
+#define SDE_INTR_HIST_DSPP_1_DONE BIT(16)
+#define SDE_INTR_HIST_DSPP_2_DONE BIT(20)
+#define SDE_INTR_HIST_DSPP_3_DONE BIT(22)
+
+/**
+ * Histogram DSPP reset Sequence done interrupt status bit definitions
+ */
+#define SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
+#define SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
+#define SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
+#define SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
+
+/**
+ * INTF interrupt status bit definitions
+ */
+#define SDE_INTR_VIDEO_INTO_STATIC BIT(0)
+#define SDE_INTR_VIDEO_OUTOF_STATIC BIT(1)
+#define SDE_INTR_DSICMD_0_INTO_STATIC BIT(2)
+#define SDE_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
+#define SDE_INTR_DSICMD_1_INTO_STATIC BIT(4)
+#define SDE_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
+#define SDE_INTR_DSICMD_2_INTO_STATIC BIT(6)
+#define SDE_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
+#define SDE_INTR_PROG_LINE BIT(8)
+
+/**
+ * struct sde_intr_reg - array of SDE register sets
+ * @clr_off: offset to CLEAR reg
+ * @en_off: offset to ENABLE reg
+ * @status_off: offset to STATUS reg
+ */
+struct sde_intr_reg {
+ u32 clr_off;
+ u32 en_off;
+ u32 status_off;
+};
+
+/**
+ * struct sde_irq_type - maps each irq with i/f
+ * @intr_type: type of interrupt listed in sde_intr_type
+ * @instance_idx: instance index of the associated HW block in SDE
+ * @irq_mask: corresponding bit in the interrupt status reg
+ * @reg_idx: which reg set to use
+ */
+struct sde_irq_type {
+ u32 intr_type;
+ u32 instance_idx;
+ u32 irq_mask;
+ u32 reg_idx;
+};
+
+/**
+ * List of SDE interrupt registers
+ */
+static const struct sde_intr_reg sde_intr_set[] = {
+ {
+ MDP_SSPP_TOP0_OFF+INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR_EN,
+ MDP_SSPP_TOP0_OFF+INTR_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR2_EN,
+ MDP_SSPP_TOP0_OFF+INTR2_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
+ },
+ {
+ MDP_INTF_0_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_0_OFF+INTF_INTR_EN,
+ MDP_INTF_0_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_1_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_1_OFF+INTF_INTR_EN,
+ MDP_INTF_1_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_2_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_2_OFF+INTF_INTR_EN,
+ MDP_INTF_2_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_3_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_3_OFF+INTF_INTR_EN,
+ MDP_INTF_3_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_4_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_4_OFF+INTF_INTR_EN,
+ MDP_INTF_4_OFF+INTF_INTR_STATUS
+ }
+};
+
+/**
+ * IRQ mapping table - use for lookup an irq_idx in this table that have
+ * a matching interface type and instance index.
+ */
+static const struct sde_irq_type sde_irq_map[] = {
+ /* BEGIN MAP_RANGE: 0-31, INTR */
+ /* irq_idx: 0-3 */
+ { SDE_IRQ_TYPE_WB_ROT_COMP, WB_0, SDE_INTR_WB_0_DONE, 0},
+ { SDE_IRQ_TYPE_WB_ROT_COMP, WB_1, SDE_INTR_WB_1_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_0, SDE_INTR_WD_TIMER_0_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_1, SDE_INTR_WD_TIMER_1_DONE, 0},
+ /* irq_idx: 4-7 */
+ { SDE_IRQ_TYPE_WB_WFD_COMP, WB_2, SDE_INTR_WB_2_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_2, SDE_INTR_WD_TIMER_2_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_3, SDE_INTR_WD_TIMER_3_DONE, 0},
+ { SDE_IRQ_TYPE_WD_TIMER, WD_TIMER_4, SDE_INTR_WD_TIMER_4_DONE, 0},
+ /* irq_idx: 8-11 */
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_DONE, 0},
+ /* irq_idx: 12-15 */
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_RD_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_RD_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_RD_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_RD_PTR, 0},
+ /* irq_idx: 16-19 */
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_WR_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_WR_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_WR_PTR, 0},
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_WR_PTR, 0},
+ /* irq_idx: 20-23 */
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+ /* irq_idx: 24-27 */
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, SDE_INTR_INTF_0_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_0, SDE_INTR_INTF_0_VSYNC, 0},
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, SDE_INTR_INTF_1_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_1, SDE_INTR_INTF_1_VSYNC, 0},
+ /* irq_idx: 28-31 */
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, SDE_INTR_INTF_2_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_2, SDE_INTR_INTF_2_VSYNC, 0},
+ { SDE_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, SDE_INTR_INTF_3_UNDERRUN, 0},
+ { SDE_IRQ_TYPE_INTF_VSYNC, INTF_3, SDE_INTR_INTF_3_VSYNC, 0},
+
+ /* BEGIN MAP_RANGE: 32-64, INTR2 */
+ /* irq_idx: 32-35 */
+ { SDE_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 36-39 */
+ { SDE_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_WR_PTR, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 40-43 */
+ { SDE_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_RD_PTR, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 44-47 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_2, SDE_INTR_CWB_2_OVERFLOW, 1},
+ { SDE_IRQ_TYPE_CWB_OVERFLOW, CWB_3, SDE_INTR_CWB_3_OVERFLOW, 1},
+ /* irq_idx: 48-51 */
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_TEAR_DETECTED, 1},
+ /* irq_idx: 52-55 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 56-59 */
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
+ SDE_INTR_PING_PONG_0_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
+ SDE_INTR_PING_PONG_1_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
+ SDE_INTR_PING_PONG_2_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
+ SDE_INTR_PING_PONG_3_TE_DETECTED, 1},
+ /* irq_idx: 60-63 */
+ { SDE_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
+ SDE_INTR_PING_PONG_S0_TE_DETECTED, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 1},
+
+ /* BEGIN MAP_RANGE: 64-95 HIST */
+ /* irq_idx: 64-67 */
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, SDE_INTR_HIST_VIG_0_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
+ SDE_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 68-71 */
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, SDE_INTR_HIST_VIG_1_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
+ SDE_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 72-75 */
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, SDE_INTR_HIST_VIG_2_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
+ SDE_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, SDE_INTR_HIST_VIG_3_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
+ SDE_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 76-79 */
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, SDE_INTR_HIST_DSPP_0_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
+ SDE_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 80-83 */
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, SDE_INTR_HIST_DSPP_1_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
+ SDE_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 84-87 */
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, SDE_INTR_HIST_DSPP_2_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
+ SDE_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, SDE_INTR_HIST_DSPP_3_DONE, 2},
+ { SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
+ SDE_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 88-91 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 92-95 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 2},
+
+ /* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
+ /* irq_idx: 96-99 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+ SDE_INTR_VIDEO_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 3},
+ /* irq_idx: 100-103 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 3},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 3},
+ /* irq_idx: 104-107 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_0, SDE_INTR_PROG_LINE, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 108-111 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 112-115 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 116-119 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 120-123 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 124-127 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 3},
+
+ /* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
+ /* irq_idx: 128-131 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+ SDE_INTR_VIDEO_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 4},
+ /* irq_idx: 132-135 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 4},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 4},
+ /* irq_idx: 136-139 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_1, SDE_INTR_PROG_LINE, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 140-143 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 144-147 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 148-151 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 152-155 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 156-159 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 4},
+
+ /* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
+ /* irq_idx: 160-163 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
+ SDE_INTR_VIDEO_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 5},
+ /* irq_idx: 164-167 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 5},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 5},
+ /* irq_idx: 168-171 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_2, SDE_INTR_PROG_LINE, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 172-175 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 176-179 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 180-183 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 184-187 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 188-191 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 5},
+
+ /* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
+ /* irq_idx: 192-195 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+ SDE_INTR_VIDEO_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 6},
+ /* irq_idx: 196-199 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 6},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 6},
+ /* irq_idx: 200-203 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_3, SDE_INTR_PROG_LINE, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 204-207 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 208-211 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 212-215 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 216-219 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 220-223 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 6},
+
+ /* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
+ /* irq_idx: 224-227 */
+ { SDE_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
+ SDE_INTR_VIDEO_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
+ SDE_INTR_VIDEO_OUTOF_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
+ SDE_INTR_DSICMD_0_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
+ SDE_INTR_DSICMD_0_OUTOF_STATIC, 7},
+ /* irq_idx: 228-231 */
+ { SDE_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
+ SDE_INTR_DSICMD_1_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
+ SDE_INTR_DSICMD_1_OUTOF_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
+ SDE_INTR_DSICMD_2_INTO_STATIC, 7},
+ { SDE_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
+ SDE_INTR_DSICMD_2_OUTOF_STATIC, 7},
+ /* irq_idx: 232-235 */
+ { SDE_IRQ_TYPE_PROG_LINE, INTF_4, SDE_INTR_PROG_LINE, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 236-239 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 240-243 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 244-247 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 248-251 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 252-255 */
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { SDE_IRQ_TYPE_RESERVED, 0, 0, 7},
+};
+
+static int sde_hw_intr_irqidx_lookup(enum sde_intr_type intr_type,
+ u32 instance_idx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sde_irq_map); i++) {
+ if (intr_type == sde_irq_map[i].intr_type &&
+ instance_idx == sde_irq_map[i].instance_idx)
+ return i;
+ }
+
+ pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
+ intr_type, instance_idx);
+ return -EINVAL;
+}
+
+static void sde_hw_intr_set_mask(struct sde_hw_intr *intr, uint32_t reg_off,
+ uint32_t mask)
+{
+ SDE_REG_WRITE(&intr->hw, reg_off, mask);
+}
+
+static void sde_hw_intr_dispatch_irq(struct sde_hw_intr *intr,
+ void (*cbfunc)(void *, int),
+ void *arg)
+{
+ int reg_idx;
+ int irq_idx;
+ int start_idx;
+ int end_idx;
+ u32 irq_status;
+ unsigned long irq_flags;
+
+ /*
+ * The dispatcher will save the IRQ status before calling here.
+ * Now need to go through each IRQ status and find matching
+ * irq lookup index.
+ */
+ spin_lock_irqsave(&intr->status_lock, irq_flags);
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(sde_intr_set); reg_idx++) {
+ irq_status = intr->save_irq_status[reg_idx];
+
+ /*
+ * Each Interrupt register has a range of 32 indexes, and
+ * that is static for sde_irq_map.
+ */
+ start_idx = reg_idx * 32;
+ end_idx = start_idx + 32;
+
+ /*
+ * Search through matching intr status from irq map.
+ * start_idx and end_idx defined the search range in
+ * the sde_irq_map.
+ */
+ for (irq_idx = start_idx;
+ (irq_idx < end_idx) && irq_status;
+ irq_idx++)
+ if ((irq_status & sde_irq_map[irq_idx].irq_mask) &&
+ (sde_irq_map[irq_idx].reg_idx == reg_idx)) {
+ /*
+ * Once a match on irq mask, perform a callback
+ * to the given cbfunc. cbfunc will take care
+ * the interrupt status clearing. If cbfunc is
+ * not provided, then the interrupt clearing
+ * is here.
+ */
+ if (cbfunc)
+ cbfunc(arg, irq_idx);
+ else
+ intr->ops.clear_interrupt_status(
+ intr, irq_idx);
+
+ /*
+ * When callback finish, clear the irq_status
+ * with the matching mask. Once irq_status
+ * is all cleared, the search can be stopped.
+ */
+ irq_status &= ~sde_irq_map[irq_idx].irq_mask;
+ }
+ }
+ spin_unlock_irqrestore(&intr->status_lock, irq_flags);
+}
+
+static int sde_hw_intr_enable_irq(struct sde_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ const struct sde_intr_reg *reg;
+ const struct sde_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &sde_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &sde_intr_set[reg_idx];
+
+ spin_lock_irqsave(&intr->mask_lock, irq_flags);
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if (cache_irq_mask & irq->irq_mask) {
+ dbgstr = "SDE IRQ already set:";
+ } else {
+ dbgstr = "SDE IRQ enabled:";
+
+ cache_irq_mask |= irq->irq_mask;
+ /* Cleaning any pending interrupt */
+ SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+ /* Enabling interrupts with the new mask */
+ SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+ spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int sde_hw_intr_disable_irq(struct sde_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ const struct sde_intr_reg *reg;
+ const struct sde_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(sde_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &sde_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &sde_intr_set[reg_idx];
+
+ spin_lock_irqsave(&intr->mask_lock, irq_flags);
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if ((cache_irq_mask & irq->irq_mask) == 0) {
+ dbgstr = "SDE IRQ is already cleared:";
+ } else {
+ dbgstr = "SDE IRQ mask disable:";
+
+ cache_irq_mask &= ~irq->irq_mask;
+ /* Disable interrupts based on the new mask */
+ SDE_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+ /* Cleaning any pending interrupt */
+ SDE_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+ spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int sde_hw_intr_clear_irqs(struct sde_hw_intr *intr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off, 0xffffffff);
+
+ return 0;
+}
+
+static int sde_hw_intr_disable_irqs(struct sde_hw_intr *intr)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++)
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[i].en_off, 0x00000000);
+
+ return 0;
+}
+
+static int sde_hw_intr_get_valid_interrupts(struct sde_hw_intr *intr,
+ uint32_t *mask)
+{
+ *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
+ | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+ return 0;
+}
+
+static int sde_hw_intr_get_interrupt_sources(struct sde_hw_intr *intr,
+ uint32_t *sources)
+{
+ *sources = SDE_REG_READ(&intr->hw, HW_INTR_STATUS);
+ return 0;
+}
+
+static void sde_hw_intr_get_interrupt_statuses(struct sde_hw_intr *intr)
+{
+ int i;
+ u32 enable_mask;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&intr->status_lock, irq_flags);
+ for (i = 0; i < ARRAY_SIZE(sde_intr_set); i++) {
+ /* Read interrupt status */
+ intr->save_irq_status[i] = SDE_REG_READ(&intr->hw,
+ sde_intr_set[i].status_off);
+
+ /* Read enable mask */
+ enable_mask = SDE_REG_READ(&intr->hw, sde_intr_set[i].en_off);
+
+ /* and clear the interrupt */
+ if (intr->save_irq_status[i])
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[i].clr_off,
+ intr->save_irq_status[i]);
+
+ /* Finally update IRQ status based on enable mask */
+ intr->save_irq_status[i] &= enable_mask;
+ }
+ spin_unlock_irqrestore(&intr->status_lock, irq_flags);
+}
+
+static void sde_hw_intr_clear_interrupt_status(struct sde_hw_intr *intr,
+ int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+
+ spin_lock_irqsave(&intr->mask_lock, irq_flags);
+
+ reg_idx = sde_irq_map[irq_idx].reg_idx;
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
+ sde_irq_map[irq_idx].irq_mask);
+
+ spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+}
+
+static u32 sde_hw_intr_get_interrupt_status(struct sde_hw_intr *intr,
+ int irq_idx, bool clear)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ u32 intr_status;
+
+ spin_lock_irqsave(&intr->mask_lock, irq_flags);
+
+ reg_idx = sde_irq_map[irq_idx].reg_idx;
+ intr_status = SDE_REG_READ(&intr->hw,
+ sde_intr_set[reg_idx].status_off) &
+ sde_irq_map[irq_idx].irq_mask;
+ if (intr_status && clear)
+ SDE_REG_WRITE(&intr->hw, sde_intr_set[reg_idx].clr_off,
+ intr_status);
+
+ spin_unlock_irqrestore(&intr->mask_lock, irq_flags);
+
+ return intr_status;
+}
+
+static void __setup_intr_ops(struct sde_hw_intr_ops *ops)
+{
+ ops->set_mask = sde_hw_intr_set_mask;
+ ops->irq_idx_lookup = sde_hw_intr_irqidx_lookup;
+ ops->enable_irq = sde_hw_intr_enable_irq;
+ ops->disable_irq = sde_hw_intr_disable_irq;
+ ops->dispatch_irqs = sde_hw_intr_dispatch_irq;
+ ops->clear_all_irqs = sde_hw_intr_clear_irqs;
+ ops->disable_all_irqs = sde_hw_intr_disable_irqs;
+ ops->get_valid_interrupts = sde_hw_intr_get_valid_interrupts;
+ ops->get_interrupt_sources = sde_hw_intr_get_interrupt_sources;
+ ops->get_interrupt_statuses = sde_hw_intr_get_interrupt_statuses;
+ ops->clear_interrupt_status = sde_hw_intr_clear_interrupt_status;
+ ops->get_interrupt_status = sde_hw_intr_get_interrupt_status;
+}
+
+static struct sde_mdss_base_cfg *__intr_offset(struct sde_mdss_cfg *m,
+ void __iomem *addr, struct sde_hw_blk_reg_map *hw)
+{
+ if (m->mdp_count == 0)
+ return NULL;
+
+ hw->base_off = addr;
+ hw->blk_off = m->mdss[0].base;
+ hw->hwversion = m->hwversion;
+ return &m->mdss[0];
+}
+
+struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_intr *intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ struct sde_mdss_base_cfg *cfg;
+
+ if (!intr)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = __intr_offset(m, addr, &intr->hw);
+ if (!cfg) {
+ kfree(intr);
+ return ERR_PTR(-EINVAL);
+ }
+ __setup_intr_ops(&intr->ops);
+
+ intr->irq_idx_tbl_size = ARRAY_SIZE(sde_irq_map);
+
+ intr->cache_irq_mask = kcalloc(ARRAY_SIZE(sde_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->cache_irq_mask == NULL) {
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ intr->save_irq_status = kcalloc(ARRAY_SIZE(sde_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->save_irq_status == NULL) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&intr->mask_lock);
+ spin_lock_init(&intr->status_lock);
+
+ return intr;
+}
+
+void sde_hw_intr_destroy(struct sde_hw_intr *intr)
+{
+ if (intr) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr->save_irq_status);
+ kfree(intr);
+ }
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
new file mode 100644
index 000000000000..261ef64c0065
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_interrupts.h
@@ -0,0 +1,257 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_INTERRUPTS_H
+#define _SDE_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_util.h"
+#include "sde_hw_mdss.h"
+
+#define IRQ_SOURCE_MDP BIT(0)
+#define IRQ_SOURCE_DSI0 BIT(4)
+#define IRQ_SOURCE_DSI1 BIT(5)
+#define IRQ_SOURCE_HDMI BIT(8)
+#define IRQ_SOURCE_EDP BIT(12)
+#define IRQ_SOURCE_MHL BIT(16)
+
+/**
+ * sde_intr_type - HW Interrupt Type
+ * @SDE_IRQ_TYPE_WB_ROT_COMP: WB rotator done
+ * @SDE_IRQ_TYPE_WB_WFD_COMP: WB WFD done
+ * @SDE_IRQ_TYPE_PING_PONG_COMP: PingPong done
+ * @SDE_IRQ_TYPE_PING_PONG_RD_PTR: PingPong read pointer
+ * @SDE_IRQ_TYPE_PING_PONG_WR_PTR: PingPong write pointer
+ * @SDE_IRQ_TYPE_PING_PONG_AUTO_REF: PingPong auto refresh
+ * @SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK: PingPong Tear check
+ * @SDE_IRQ_TYPE_PING_PONG_TE_CHECK: PingPong TE detection
+ * @SDE_IRQ_TYPE_INTF_UNDER_RUN: INTF underrun
+ * @SDE_IRQ_TYPE_INTF_VSYNC: INTF VSYNC
+ * @SDE_IRQ_TYPE_CWB_OVERFLOW: Concurrent WB overflow
+ * @SDE_IRQ_TYPE_HIST_VIG_DONE: VIG Histogram done
+ * @SDE_IRQ_TYPE_HIST_VIG_RSTSEQ: VIG Histogram reset
+ * @SDE_IRQ_TYPE_HIST_DSPP_DONE: DSPP Histogram done
+ * @SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ: DSPP Histogram reset
+ * @SDE_IRQ_TYPE_WD_TIMER: Watchdog timer
+ * @SDE_IRQ_TYPE_SFI_VIDEO_IN: Video static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_VIDEO_OUT: Video static frame INTR out-of static
+ * @SDE_IRQ_TYPE_SFI_CMD_0_IN: DSI CMD0 static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_CMD_0_OUT: DSI CMD0 static frame INTR out-of static
+ * @SDE_IRQ_TYPE_SFI_CMD_1_IN: DSI CMD1 static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_CMD_1_OUT: DSI CMD1 static frame INTR out-of static
+ * @SDE_IRQ_TYPE_SFI_CMD_2_IN: DSI CMD2 static frame INTR into static
+ * @SDE_IRQ_TYPE_SFI_CMD_2_OUT: DSI CMD2 static frame INTR out-of static
+ * @SDE_IRQ_TYPE_PROG_LINE: Programmable Line interrupt
+ * @SDE_IRQ_TYPE_RESERVED: Reserved for expansion
+ */
+enum sde_intr_type {
+ SDE_IRQ_TYPE_WB_ROT_COMP,
+ SDE_IRQ_TYPE_WB_WFD_COMP,
+ SDE_IRQ_TYPE_PING_PONG_COMP,
+ SDE_IRQ_TYPE_PING_PONG_RD_PTR,
+ SDE_IRQ_TYPE_PING_PONG_WR_PTR,
+ SDE_IRQ_TYPE_PING_PONG_AUTO_REF,
+ SDE_IRQ_TYPE_PING_PONG_TEAR_CHECK,
+ SDE_IRQ_TYPE_PING_PONG_TE_CHECK,
+ SDE_IRQ_TYPE_INTF_UNDER_RUN,
+ SDE_IRQ_TYPE_INTF_VSYNC,
+ SDE_IRQ_TYPE_CWB_OVERFLOW,
+ SDE_IRQ_TYPE_HIST_VIG_DONE,
+ SDE_IRQ_TYPE_HIST_VIG_RSTSEQ,
+ SDE_IRQ_TYPE_HIST_DSPP_DONE,
+ SDE_IRQ_TYPE_HIST_DSPP_RSTSEQ,
+ SDE_IRQ_TYPE_WD_TIMER,
+ SDE_IRQ_TYPE_SFI_VIDEO_IN,
+ SDE_IRQ_TYPE_SFI_VIDEO_OUT,
+ SDE_IRQ_TYPE_SFI_CMD_0_IN,
+ SDE_IRQ_TYPE_SFI_CMD_0_OUT,
+ SDE_IRQ_TYPE_SFI_CMD_1_IN,
+ SDE_IRQ_TYPE_SFI_CMD_1_OUT,
+ SDE_IRQ_TYPE_SFI_CMD_2_IN,
+ SDE_IRQ_TYPE_SFI_CMD_2_OUT,
+ SDE_IRQ_TYPE_PROG_LINE,
+ SDE_IRQ_TYPE_RESERVED,
+};
+
+struct sde_hw_intr;
+
+/**
+ * Interrupt operations.
+ */
+struct sde_hw_intr_ops {
+ /**
+ * set_mask - Programs the given interrupt register with the
+ * given interrupt mask. Register value will get overwritten.
+ * @intr: HW interrupt handle
+ * @reg_off: MDSS HW register offset
+ * @irqmask: IRQ mask value
+ */
+ void (*set_mask)(
+ struct sde_hw_intr *intr,
+ uint32_t reg,
+ uint32_t irqmask);
+
+ /**
+ * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
+ * Used for all irq related ops
+ * @intr_type: Interrupt type defined in sde_intr_type
+ * @instance_idx: HW interrupt block instance
+ * @return: irq_idx or -EINVAL for lookup fail
+ */
+ int (*irq_idx_lookup)(
+ enum sde_intr_type intr_type,
+ u32 instance_idx);
+
+ /**
+ * enable_irq - Enable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*enable_irq)(
+ struct sde_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * disable_irq - Disable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_irq)(
+ struct sde_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
+ * any asserted IRQs). Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*clear_all_irqs)(
+ struct sde_hw_intr *intr);
+
+ /**
+ * disable_all_irqs - Disables all the interrupts. Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_all_irqs)(
+ struct sde_hw_intr *intr);
+
+ /**
+ * dispatch_irqs - IRQ dispatcher will call the given callback
+ * function when a matching interrupt status bit is
+ * found in the irq mapping table.
+ * @intr: HW interrupt handle
+ * @cbfunc: Callback function pointer
+ * @arg: Argument to pass back during callback
+ */
+ void (*dispatch_irqs)(
+ struct sde_hw_intr *intr,
+ void (*cbfunc)(void *arg, int irq_idx),
+ void *arg);
+
+ /**
+ * get_interrupt_statuses - Gets and store value from all interrupt
+ * status registers that are currently fired.
+ * @intr: HW interrupt handle
+ */
+ void (*get_interrupt_statuses)(
+ struct sde_hw_intr *intr);
+
+ /**
+ * clear_interrupt_status - Clears HW interrupt status based on given
+ * lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_interrupt_status)(
+ struct sde_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * get_interrupt_status - Gets HW interrupt status, and clear if set,
+ * based on given lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @clear: True to clear irq after read
+ */
+ u32 (*get_interrupt_status)(
+ struct sde_hw_intr *intr,
+ int irq_idx,
+ bool clear);
+
+ /**
+ * get_valid_interrupts - Gets a mask of all valid interrupt sources
+ * within SDE. These are actually status bits
+ * within interrupt registers that specify the
+ * source of the interrupt in IRQs. For example,
+ * valid interrupt sources can be MDP, DSI,
+ * HDMI etc.
+ * @intr: HW interrupt handle
+ * @mask: Returning the interrupt source MASK
+ * @return: 0 for success, otherwise failure
+ */
+ int (*get_valid_interrupts)(
+ struct sde_hw_intr *intr,
+ uint32_t *mask);
+
+ /**
+ * get_interrupt_sources - Gets the bitmask of the SDE interrupt
+ * source that are currently fired.
+ * @intr: HW interrupt handle
+ * @sources: Returning the SDE interrupt source status bit mask
+ * @return: 0 for success, otherwise failure
+ */
+ int (*get_interrupt_sources)(
+ struct sde_hw_intr *intr,
+ uint32_t *sources);
+};
+
+/**
+ * struct sde_hw_intr: hw interrupts handling data structure
+ * @hw: virtual address mapping
+ * @ops: function pointer mapping for IRQ handling
+ * @cache_irq_mask: array of IRQ enable masks reg storage created during init
+ * @save_irq_status: array of IRQ status reg storage created during init
+ * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @mask_lock: spinlock for accessing IRQ mask
+ * @status_lock: spinlock for accessing IRQ status
+ */
+struct sde_hw_intr {
+ struct sde_hw_blk_reg_map hw;
+ struct sde_hw_intr_ops ops;
+ u32 *cache_irq_mask;
+ u32 *save_irq_status;
+ u32 irq_idx_tbl_size;
+ spinlock_t mask_lock;
+ spinlock_t status_lock;
+};
+
+/**
+ * sde_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_intr *sde_hw_intr_init(void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void sde_hw_intr_destroy(struct sde_hw_intr *intr);
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.c b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
new file mode 100644
index 000000000000..9e1b97800cb9
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.c
@@ -0,0 +1,339 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_intf.h"
+#include "sde_dbg.h"
+
+#define INTF_TIMING_ENGINE_EN 0x000
+#define INTF_CONFIG 0x004
+#define INTF_HSYNC_CTL 0x008
+#define INTF_VSYNC_PERIOD_F0 0x00C
+#define INTF_VSYNC_PERIOD_F1 0x010
+#define INTF_VSYNC_PULSE_WIDTH_F0 0x014
+#define INTF_VSYNC_PULSE_WIDTH_F1 0x018
+#define INTF_DISPLAY_V_START_F0 0x01C
+#define INTF_DISPLAY_V_START_F1 0x020
+#define INTF_DISPLAY_V_END_F0 0x024
+#define INTF_DISPLAY_V_END_F1 0x028
+#define INTF_ACTIVE_V_START_F0 0x02C
+#define INTF_ACTIVE_V_START_F1 0x030
+#define INTF_ACTIVE_V_END_F0 0x034
+#define INTF_ACTIVE_V_END_F1 0x038
+#define INTF_DISPLAY_HCTL 0x03C
+#define INTF_ACTIVE_HCTL 0x040
+#define INTF_BORDER_COLOR 0x044
+#define INTF_UNDERFLOW_COLOR 0x048
+#define INTF_HSYNC_SKEW 0x04C
+#define INTF_POLARITY_CTL 0x050
+#define INTF_TEST_CTL 0x054
+#define INTF_TP_COLOR0 0x058
+#define INTF_TP_COLOR1 0x05C
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_DEFLICKER_CONFIG 0x0F0
+#define INTF_DEFLICKER_STRNG_COEFF 0x0F4
+#define INTF_DEFLICKER_WEAK_COEFF 0x0F8
+
+#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084
+#define INTF_PANEL_FORMAT 0x090
+#define INTF_TPG_ENABLE 0x100
+#define INTF_TPG_MAIN_CONTROL 0x104
+#define INTF_TPG_VIDEO_CONFIG 0x108
+#define INTF_TPG_COMPONENT_LIMITS 0x10C
+#define INTF_TPG_RECTANGLE 0x110
+#define INTF_TPG_INITIAL_VALUE 0x114
+#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118
+#define INTF_TPG_RGB_MAPPING 0x11C
+#define INTF_PROG_FETCH_START 0x170
+
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_MISR_CTRL 0x180
+#define INTF_MISR_SIGNATURE 0x184
+
+#define MISR_FRAME_COUNT_MASK 0xFF
+#define MISR_CTRL_ENABLE BIT(8)
+#define MISR_CTRL_STATUS BIT(9)
+#define MISR_CTRL_STATUS_CLEAR BIT(10)
+#define INTF_MISR_CTRL_FREE_RUN_MASK BIT(31)
+
+static struct sde_intf_cfg *_intf_offset(enum sde_intf intf,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->intf_count; i++) {
+ if ((intf == m->intf[i].id) &&
+ (m->intf[i].type != INTF_NONE)) {
+ b->base_off = addr;
+ b->blk_off = m->intf[i].base;
+ b->length = m->intf[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_INTF;
+ return &m->intf[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void sde_hw_intf_setup_timing_engine(struct sde_hw_intf *ctx,
+ const struct intf_timing_params *p,
+ const struct sde_format *fmt)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 hsync_period, vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 active_h_start, active_h_end;
+ u32 active_v_start, active_v_end;
+ u32 active_hctl, display_hctl, hsync_ctl;
+ u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
+ u32 panel_format;
+ u32 intf_cfg;
+
+ /* read interface_cfg */
+ intf_cfg = SDE_REG_READ(c, INTF_CONFIG);
+ hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
+ p->h_front_porch;
+ vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
+ p->v_front_porch;
+
+ display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+ hsync_period) + p->hsync_skew;
+ display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+ p->hsync_skew - 1;
+
+ if (ctx->cap->type == INTF_EDP) {
+ display_v_start += p->hsync_pulse_width + p->h_back_porch;
+ display_v_end -= p->h_front_porch;
+ }
+
+ hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+ hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+ if (p->width != p->xres) {
+ active_h_start = hsync_start_x;
+ active_h_end = active_h_start + p->xres - 1;
+ } else {
+ active_h_start = 0;
+ active_h_end = 0;
+ }
+
+ if (p->height != p->yres) {
+ active_v_start = display_v_start;
+ active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+ } else {
+ active_v_start = 0;
+ active_v_end = 0;
+ }
+
+ if (active_h_end) {
+ active_hctl = (active_h_end << 16) | active_h_start;
+ intf_cfg |= BIT(29); /* ACTIVE_H_ENABLE */
+ } else {
+ active_hctl = 0;
+ }
+
+ if (active_v_end)
+ intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
+
+ hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+ den_polarity = 0;
+ hsync_polarity = p->hsync_polarity;
+ vsync_polarity = p->vsync_polarity;
+ polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
+ (vsync_polarity << 1) | /* VSYNC Polarity */
+ (hsync_polarity << 0); /* HSYNC Polarity */
+
+ if (!SDE_FORMAT_IS_YUV(fmt))
+ panel_format = (fmt->bits[C0_G_Y] |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (0x21 << 8));
+ else
+ /* Interface treats all the pixel data in RGB888 format */
+ panel_format = (COLOR_8BIT |
+ (COLOR_8BIT << 2) |
+ (COLOR_8BIT << 4) |
+ (0x21 << 8));
+
+ SDE_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
+ SDE_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
+ SDE_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
+ p->vsync_pulse_width * hsync_period);
+ SDE_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
+ SDE_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
+ SDE_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
+ SDE_REG_WRITE(c, INTF_ACTIVE_HCTL, active_hctl);
+ SDE_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
+ SDE_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
+ SDE_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
+ SDE_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
+ SDE_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
+ SDE_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
+ SDE_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
+ SDE_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+ SDE_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+}
+
+static void sde_hw_intf_enable_timing_engine(
+ struct sde_hw_intf *intf,
+ u8 enable)
+{
+ struct sde_hw_blk_reg_map *c = &intf->hw;
+ /* Note: Display interface select is handled in top block hw layer */
+ SDE_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
+}
+
+static void sde_hw_intf_setup_prg_fetch(
+ struct sde_hw_intf *intf,
+ const struct intf_prog_fetch *fetch)
+{
+ struct sde_hw_blk_reg_map *c = &intf->hw;
+ int fetch_enable;
+
+ /*
+ * Fetch should always be outside the active lines. If the fetching
+ * is programmed within active region, hardware behavior is unknown.
+ */
+
+ fetch_enable = SDE_REG_READ(c, INTF_CONFIG);
+ if (fetch->enable) {
+ fetch_enable |= BIT(31);
+ SDE_REG_WRITE(c, INTF_PROG_FETCH_START,
+ fetch->fetch_start);
+ } else {
+ fetch_enable &= ~BIT(31);
+ }
+
+ SDE_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
+static void sde_hw_intf_get_status(
+ struct sde_hw_intf *intf,
+ struct intf_status *s)
+{
+ struct sde_hw_blk_reg_map *c = &intf->hw;
+
+ s->is_en = SDE_REG_READ(c, INTF_TIMING_ENGINE_EN);
+ if (s->is_en) {
+ s->frame_count = SDE_REG_READ(c, INTF_FRAME_COUNT);
+ s->line_count = SDE_REG_READ(c, INTF_LINE_COUNT);
+ } else {
+ s->line_count = 0;
+ s->frame_count = 0;
+ }
+}
+
+static void sde_hw_intf_set_misr(struct sde_hw_intf *intf,
+ struct sde_misr_params *misr_map)
+{
+ struct sde_hw_blk_reg_map *c = &intf->hw;
+ u32 config = 0;
+
+ if (!misr_map)
+ return;
+
+ SDE_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+ /* Clear data */
+ wmb();
+
+ if (misr_map->enable) {
+ config = (MISR_FRAME_COUNT_MASK & 1) |
+ (MISR_CTRL_ENABLE);
+
+ SDE_REG_WRITE(c, INTF_MISR_CTRL, config);
+ } else {
+ SDE_REG_WRITE(c, INTF_MISR_CTRL, 0);
+ }
+}
+
+static void sde_hw_intf_collect_misr(struct sde_hw_intf *intf,
+ struct sde_misr_params *misr_map)
+{
+ struct sde_hw_blk_reg_map *c = &intf->hw;
+
+ if (!misr_map)
+ return;
+
+ if (misr_map->enable) {
+ if (misr_map->last_idx < misr_map->frame_count &&
+ misr_map->last_idx < SDE_CRC_BATCH_SIZE)
+ misr_map->crc_value[misr_map->last_idx] =
+ SDE_REG_READ(c, INTF_MISR_SIGNATURE);
+ }
+
+ misr_map->enable =
+ misr_map->enable & (misr_map->last_idx <= SDE_CRC_BATCH_SIZE);
+
+ misr_map->last_idx++;
+}
+
+static void _setup_intf_ops(struct sde_hw_intf_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_timing_gen = sde_hw_intf_setup_timing_engine;
+ ops->setup_prg_fetch = sde_hw_intf_setup_prg_fetch;
+ ops->get_status = sde_hw_intf_get_status;
+ ops->enable_timing = sde_hw_intf_enable_timing_engine;
+ ops->setup_misr = sde_hw_intf_set_misr;
+ ops->collect_misr = sde_hw_intf_collect_misr;
+}
+
+struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_intf *c;
+ struct sde_intf_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _intf_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create sde_hw_intf %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ c->mdss = m;
+ _setup_intf_ops(&c->ops, c->cap->features);
+
+ sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+ c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+ return c;
+}
+
+void sde_hw_intf_destroy(struct sde_hw_intf *intf)
+{
+ kfree(intf);
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_intf.h b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
new file mode 100644
index 000000000000..f4a01cb64d7f
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_intf.h
@@ -0,0 +1,133 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_INTF_H
+#define _SDE_HW_INTF_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_intf;
+
+/* Batch size of frames for collecting MISR data */
+#define SDE_CRC_BATCH_SIZE 16
+
+/**
+ * struct sde_misr_params : Interface for getting and setting MISR data
+ * Assumption is these functions will be called after clocks are enabled
+ * @ enable : enables/disables MISR
+ * @ frame_count : represents number of frames for which MISR is enabled
+ * @ last_idx: number of frames for which MISR data is collected
+ * @ crc_value: stores the collected MISR data
+ */
+struct sde_misr_params {
+ bool enable;
+ u32 frame_count;
+ u32 last_idx;
+ u32 crc_value[SDE_CRC_BATCH_SIZE];
+};
+
+/* intf timing settings */
+struct intf_timing_params {
+ u32 width; /* active width */
+ u32 height; /* active height */
+ u32 xres; /* Display panel width */
+ u32 yres; /* Display panel height */
+
+ u32 h_back_porch;
+ u32 h_front_porch;
+ u32 v_back_porch;
+ u32 v_front_porch;
+ u32 hsync_pulse_width;
+ u32 vsync_pulse_width;
+ u32 hsync_polarity;
+ u32 vsync_polarity;
+ u32 border_clr;
+ u32 underflow_clr;
+ u32 hsync_skew;
+};
+
+struct intf_prog_fetch {
+ u8 enable;
+ /* vsync counter for the front porch pixel line */
+ u32 fetch_start;
+};
+
+struct intf_status {
+ u8 is_en; /* interface timing engine is enabled or not */
+ u32 frame_count; /* frame count since timing engine enabled */
+ u32 line_count; /* current line count including blanking */
+};
+
+/**
+ * struct sde_hw_intf_ops : Interface to the interface Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @ setup_timing_gen : programs the timing engine
+ * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ enable_timing: enable/disable timing engine
+ * @ get_status: returns if timing engine is enabled or not
+ * @ setup_misr: enables/disables MISR in HW register
+ * @ collect_misr: reads and stores MISR data from HW register
+ */
+struct sde_hw_intf_ops {
+ void (*setup_timing_gen)(struct sde_hw_intf *intf,
+ const struct intf_timing_params *p,
+ const struct sde_format *fmt);
+
+ void (*setup_prg_fetch)(struct sde_hw_intf *intf,
+ const struct intf_prog_fetch *fetch);
+
+ void (*enable_timing)(struct sde_hw_intf *intf,
+ u8 enable);
+
+ void (*get_status)(struct sde_hw_intf *intf,
+ struct intf_status *status);
+
+ void (*setup_misr)(struct sde_hw_intf *intf,
+ struct sde_misr_params *misr_map);
+
+ void (*collect_misr)(struct sde_hw_intf *intf,
+ struct sde_misr_params *misr_map);
+};
+
+struct sde_hw_intf {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* intf */
+ enum sde_intf idx;
+ const struct sde_intf_cfg *cap;
+ const struct sde_mdss_cfg *mdss;
+
+ /* ops */
+ struct sde_hw_intf_ops ops;
+};
+
+/**
+ * sde_hw_intf_init(): Initializes the intf driver for the passed
+ * interface idx.
+ * @idx: interface index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_intf *sde_hw_intf_init(enum sde_intf idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_intf_destroy(): Destroys INTF driver context
+ * @intf: Pointer to INTF driver context
+ */
+void sde_hw_intf_destroy(struct sde_hw_intf *intf);
+
+#endif /*_SDE_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.c b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
new file mode 100644
index 000000000000..8b4e0901458f
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.c
@@ -0,0 +1,209 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_catalog.h"
+#include "sde_hwio.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_mdss.h"
+#include "sde_dbg.h"
+
+#define LM_OP_MODE 0x00
+#define LM_OUT_SIZE 0x04
+#define LM_BORDER_COLOR_0 0x08
+#define LM_BORDER_COLOR_1 0x010
+
+/* These register are offset to mixer base + stage base */
+#define LM_BLEND0_OP 0x00
+#define LM_BLEND0_CONST_ALPHA 0x04
+#define LM_BLEND0_FG_ALPHA 0x04
+#define LM_BLEND0_BG_ALPHA 0x08
+
+static struct sde_lm_cfg *_lm_offset(enum sde_lm mixer,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->mixer_count; i++) {
+ if (mixer == m->mixer[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->mixer[i].base;
+ b->length = m->mixer[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_LM;
+ return &m->mixer[i];
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * _stage_offset(): returns the relative offset of the blend registers
+ * for the stage to be setup
+ * @c: mixer ctx contains the mixer to be programmed
+ * @stage: stage index to setup
+ */
+static inline int _stage_offset(struct sde_hw_mixer *ctx, enum sde_stage stage)
+{
+ const struct sde_lm_sub_blks *sblk = ctx->cap->sblk;
+ int rc;
+
+ if (stage == SDE_STAGE_BASE)
+ rc = -EINVAL;
+ else if (stage <= sblk->maxblendstages)
+ rc = sblk->blendstage_base[stage - 1];
+ else
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static void sde_hw_lm_setup_out(struct sde_hw_mixer *ctx,
+ struct sde_hw_mixer_cfg *mixer)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 outsize;
+ u32 op_mode;
+
+ op_mode = SDE_REG_READ(c, LM_OP_MODE);
+
+ outsize = mixer->out_height << 16 | mixer->out_width;
+ SDE_REG_WRITE(c, LM_OUT_SIZE, outsize);
+
+ /* SPLIT_LEFT_RIGHT */
+ if (mixer->right_mixer)
+ op_mode |= BIT(31);
+ else
+ op_mode &= ~BIT(31);
+ SDE_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void sde_hw_lm_setup_border_color(struct sde_hw_mixer *ctx,
+ struct sde_mdss_color *color,
+ u8 border_en)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ if (border_en) {
+ SDE_REG_WRITE(c, LM_BORDER_COLOR_0,
+ (color->color_0 & 0xFFF) |
+ ((color->color_1 & 0xFFF) << 0x10));
+ SDE_REG_WRITE(c, LM_BORDER_COLOR_1,
+ (color->color_2 & 0xFFF) |
+ ((color->color_3 & 0xFFF) << 0x10));
+ }
+}
+
+static void sde_hw_lm_setup_blend_config_msmskunk(struct sde_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+ u32 const_alpha;
+
+ if (stage == SDE_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
+ SDE_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
+ SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void sde_hw_lm_setup_blend_config(struct sde_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+
+ if (stage == SDE_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ SDE_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
+ SDE_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
+ SDE_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void sde_hw_lm_setup_color3(struct sde_hw_mixer *ctx,
+ uint32_t mixer_op_mode)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ int op_mode;
+
+ /* read the existing op_mode configuration */
+ op_mode = SDE_REG_READ(c, LM_OP_MODE);
+
+ op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
+
+ SDE_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void sde_hw_lm_gc(struct sde_hw_mixer *mixer,
+ void *cfg)
+{
+}
+
+static void _setup_mixer_ops(struct sde_mdss_cfg *m,
+ struct sde_hw_lm_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_mixer_out = sde_hw_lm_setup_out;
+ if (IS_MSMSKUNK_TARGET(m->hwversion))
+ ops->setup_blend_config = sde_hw_lm_setup_blend_config_msmskunk;
+ else
+ ops->setup_blend_config = sde_hw_lm_setup_blend_config;
+ ops->setup_alpha_out = sde_hw_lm_setup_color3;
+ ops->setup_border_color = sde_hw_lm_setup_border_color;
+ ops->setup_gc = sde_hw_lm_gc;
+};
+
+struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_mixer *c;
+ struct sde_lm_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _lm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_mixer_ops(m, &c->ops, c->cap->features);
+
+ sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+ c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+ return c;
+}
+
+void sde_hw_lm_destroy(struct sde_hw_mixer *lm)
+{
+ kfree(lm);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_lm.h b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
new file mode 100644
index 000000000000..7318c18ddaba
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_lm.h
@@ -0,0 +1,102 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_LM_H
+#define _SDE_HW_LM_H
+
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_mixer;
+
+struct sde_hw_mixer_cfg {
+ u32 out_width;
+ u32 out_height;
+ bool right_mixer;
+ int flags;
+};
+
+struct sde_hw_color3_cfg {
+ u8 keep_fg[SDE_STAGE_MAX];
+};
+
+/**
+ *
+ * struct sde_hw_lm_ops : Interface to the mixer Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_lm_ops {
+ /*
+ * Sets up mixer output width and height
+ * and border color if enabled
+ */
+ void (*setup_mixer_out)(struct sde_hw_mixer *ctx,
+ struct sde_hw_mixer_cfg *cfg);
+
+ /*
+ * Alpha blending configuration
+ * for the specified stage
+ */
+ void (*setup_blend_config)(struct sde_hw_mixer *ctx, uint32_t stage,
+ uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
+
+ /*
+ * Alpha color component selection from either fg or bg
+ */
+ void (*setup_alpha_out)(struct sde_hw_mixer *ctx, uint32_t mixer_op);
+
+ /**
+ * setup_border_color : enable/disable border color
+ */
+ void (*setup_border_color)(struct sde_hw_mixer *ctx,
+ struct sde_mdss_color *color,
+ u8 border_en);
+ /**
+ * setup_gc : enable/disable gamma correction feature
+ */
+ void (*setup_gc)(struct sde_hw_mixer *mixer,
+ void *cfg);
+
+};
+
+struct sde_hw_mixer {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* lm */
+ enum sde_lm idx;
+ const struct sde_lm_cfg *cap;
+ const struct sde_mdp_cfg *mdp;
+ const struct sde_ctl_cfg *ctl;
+
+ /* ops */
+ struct sde_hw_lm_ops ops;
+};
+
+/**
+ * sde_hw_lm_init(): Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @idx: mixer index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct sde_hw_mixer *sde_hw_lm_init(enum sde_lm idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_lm_destroy(): Destroys layer mixer driver context
+ * @lm: Pointer to LM driver context
+ */
+void sde_hw_lm_destroy(struct sde_hw_mixer *lm);
+
+#endif /*_SDE_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_mdss.h b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
new file mode 100644
index 000000000000..3d63d01a6d4e
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_mdss.h
@@ -0,0 +1,470 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_MDSS_H
+#define _SDE_HW_MDSS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "msm_drv.h"
+
+#define SDE_DBG_NAME "sde"
+
+#define SDE_NONE 0
+
+#ifndef SDE_CSC_MATRIX_COEFF_SIZE
+#define SDE_CSC_MATRIX_COEFF_SIZE 9
+#endif
+
+#ifndef SDE_CSC_CLAMP_SIZE
+#define SDE_CSC_CLAMP_SIZE 6
+#endif
+
+#ifndef SDE_CSC_BIAS_SIZE
+#define SDE_CSC_BIAS_SIZE 3
+#endif
+
+#ifndef SDE_MAX_PLANES
+#define SDE_MAX_PLANES 4
+#endif
+
+#define PIPES_PER_STAGE 2
+#ifndef SDE_MAX_DE_CURVES
+#define SDE_MAX_DE_CURVES 3
+#endif
+
+enum sde_format_flags {
+ SDE_FORMAT_FLAG_YUV_BIT,
+ SDE_FORMAT_FLAG_DX_BIT,
+ SDE_FORMAT_FLAG_COMPRESSED_BIT,
+ SDE_FORMAT_FLAG_BIT_MAX,
+};
+
+#define SDE_FORMAT_FLAG_YUV BIT(SDE_FORMAT_FLAG_YUV_BIT)
+#define SDE_FORMAT_FLAG_DX BIT(SDE_FORMAT_FLAG_DX_BIT)
+#define SDE_FORMAT_FLAG_COMPRESSED BIT(SDE_FORMAT_FLAG_COMPRESSED_BIT)
+#define SDE_FORMAT_IS_YUV(X) \
+ (test_bit(SDE_FORMAT_FLAG_YUV_BIT, (X)->flag))
+#define SDE_FORMAT_IS_DX(X) \
+ (test_bit(SDE_FORMAT_FLAG_DX_BIT, (X)->flag))
+#define SDE_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == SDE_FETCH_LINEAR)
+#define SDE_FORMAT_IS_TILE(X) \
+ (((X)->fetch_mode == SDE_FETCH_UBWC) && \
+ !test_bit(SDE_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define SDE_FORMAT_IS_UBWC(X) \
+ (((X)->fetch_mode == SDE_FETCH_UBWC) && \
+ test_bit(SDE_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+
+#define TO_S15D16(_x_) ((_x_) << 7)
+
+#define SDE_BLEND_FG_ALPHA_FG_CONST (0 << 0)
+#define SDE_BLEND_FG_ALPHA_BG_CONST (1 << 0)
+#define SDE_BLEND_FG_ALPHA_FG_PIXEL (2 << 0)
+#define SDE_BLEND_FG_ALPHA_BG_PIXEL (3 << 0)
+#define SDE_BLEND_FG_INV_ALPHA (1 << 2)
+#define SDE_BLEND_FG_MOD_ALPHA (1 << 3)
+#define SDE_BLEND_FG_INV_MOD_ALPHA (1 << 4)
+#define SDE_BLEND_FG_TRANSP_EN (1 << 5)
+#define SDE_BLEND_BG_ALPHA_FG_CONST (0 << 8)
+#define SDE_BLEND_BG_ALPHA_BG_CONST (1 << 8)
+#define SDE_BLEND_BG_ALPHA_FG_PIXEL (2 << 8)
+#define SDE_BLEND_BG_ALPHA_BG_PIXEL (3 << 8)
+#define SDE_BLEND_BG_INV_ALPHA (1 << 10)
+#define SDE_BLEND_BG_MOD_ALPHA (1 << 11)
+#define SDE_BLEND_BG_INV_MOD_ALPHA (1 << 12)
+#define SDE_BLEND_BG_TRANSP_EN (1 << 13)
+
+enum sde_hw_blk_type {
+ SDE_HW_BLK_TOP = 0,
+ SDE_HW_BLK_SSPP,
+ SDE_HW_BLK_LM,
+ SDE_HW_BLK_DSPP,
+ SDE_HW_BLK_CTL,
+ SDE_HW_BLK_CDM,
+ SDE_HW_BLK_PINGPONG,
+ SDE_HW_BLK_INTF,
+ SDE_HW_BLK_WB,
+ SDE_HW_BLK_MAX,
+};
+
+enum sde_mdp {
+ MDP_TOP = 0x1,
+ MDP_MAX,
+};
+
+enum sde_sspp {
+ SSPP_NONE,
+ SSPP_VIG0,
+ SSPP_VIG1,
+ SSPP_VIG2,
+ SSPP_VIG3,
+ SSPP_RGB0,
+ SSPP_RGB1,
+ SSPP_RGB2,
+ SSPP_RGB3,
+ SSPP_DMA0,
+ SSPP_DMA1,
+ SSPP_DMA2,
+ SSPP_DMA3,
+ SSPP_CURSOR0,
+ SSPP_CURSOR1,
+ SSPP_MAX
+};
+
+enum sde_sspp_type {
+ SSPP_TYPE_VIG,
+ SSPP_TYPE_RGB,
+ SSPP_TYPE_DMA,
+ SSPP_TYPE_CURSOR,
+ SSPP_TYPE_MAX
+};
+
+enum sde_lm {
+ LM_0 = 1,
+ LM_1,
+ LM_2,
+ LM_3,
+ LM_4,
+ LM_5,
+ LM_6,
+ LM_MAX
+};
+
+enum sde_stage {
+ SDE_STAGE_BASE = 0,
+ SDE_STAGE_0,
+ SDE_STAGE_1,
+ SDE_STAGE_2,
+ SDE_STAGE_3,
+ SDE_STAGE_4,
+ SDE_STAGE_5,
+ SDE_STAGE_6,
+ SDE_STAGE_MAX
+};
+enum sde_dspp {
+ DSPP_0 = 1,
+ DSPP_1,
+ DSPP_2,
+ DSPP_3,
+ DSPP_MAX
+};
+
+enum sde_ctl {
+ CTL_0 = 1,
+ CTL_1,
+ CTL_2,
+ CTL_3,
+ CTL_4,
+ CTL_MAX
+};
+
+enum sde_cdm {
+ CDM_0 = 1,
+ CDM_1,
+ CDM_MAX
+};
+
+enum sde_pingpong {
+ PINGPONG_0 = 1,
+ PINGPONG_1,
+ PINGPONG_2,
+ PINGPONG_3,
+ PINGPONG_4,
+ PINGPONG_S0,
+ PINGPONG_MAX
+};
+
+enum sde_intf {
+ INTF_0 = 1,
+ INTF_1,
+ INTF_2,
+ INTF_3,
+ INTF_4,
+ INTF_5,
+ INTF_6,
+ INTF_MAX
+};
+
+enum sde_intf_type {
+ INTF_NONE = 0x0,
+ INTF_DSI = 0x1,
+ INTF_HDMI = 0x3,
+ INTF_LCDC = 0x5,
+ INTF_EDP = 0x9,
+ INTF_DP = 0xa,
+ INTF_TYPE_MAX,
+
+ /* virtual interfaces */
+ INTF_WB = 0x100,
+};
+
+enum sde_intf_mode {
+ INTF_MODE_NONE = 0,
+ INTF_MODE_CMD,
+ INTF_MODE_VIDEO,
+ INTF_MODE_WB_BLOCK,
+ INTF_MODE_WB_LINE,
+ INTF_MODE_MAX
+};
+
+enum sde_wb {
+ WB_0 = 1,
+ WB_1,
+ WB_2,
+ WB_3,
+ WB_MAX
+};
+
+enum sde_ad {
+ AD_0 = 0x1,
+ AD_1,
+ AD_MAX
+};
+
+enum sde_cwb {
+ CWB_0 = 0x1,
+ CWB_1,
+ CWB_2,
+ CWB_3,
+ CWB_MAX
+};
+
+enum sde_wd_timer {
+ WD_TIMER_0 = 0x1,
+ WD_TIMER_1,
+ WD_TIMER_2,
+ WD_TIMER_3,
+ WD_TIMER_4,
+ WD_TIMER_5,
+ WD_TIMER_MAX
+};
+
+enum sde_vbif {
+ VBIF_0,
+ VBIF_1,
+ VBIF_MAX,
+ VBIF_RT = VBIF_0,
+ VBIF_NRT = VBIF_1
+};
+
+enum sde_iommu_domain {
+ SDE_IOMMU_DOMAIN_UNSECURE,
+ SDE_IOMMU_DOMAIN_SECURE,
+ SDE_IOMMU_DOMAIN_MAX
+};
+
+/**
+ * SDE HW,Component order color map
+ */
+enum {
+ C0_G_Y = 0,
+ C1_B_Cb = 1,
+ C2_R_Cr = 2,
+ C3_ALPHA = 3
+};
+
+/**
+ * enum sde_plane_type - defines how the color component pixel packing
+ * @SDE_PLANE_INTERLEAVED : Color components in single plane
+ * @SDE_PLANE_PLANAR : Color component in separate planes
+ * @SDE_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
+ */
+enum sde_plane_type {
+ SDE_PLANE_INTERLEAVED,
+ SDE_PLANE_PLANAR,
+ SDE_PLANE_PSEUDO_PLANAR,
+};
+
+/**
+ * enum sde_chroma_samp_type - chroma sub-samplng type
+ * @SDE_CHROMA_RGB : No chroma subsampling
+ * @SDE_CHROMA_H2V1 : Chroma pixels are horizontally subsampled
+ * @SDE_CHROMA_H1V2 : Chroma pixels are vertically subsampled
+ * @SDE_CHROMA_420 : 420 subsampling
+ */
+enum sde_chroma_samp_type {
+ SDE_CHROMA_RGB,
+ SDE_CHROMA_H2V1,
+ SDE_CHROMA_H1V2,
+ SDE_CHROMA_420
+};
+
+/**
+ * sde_fetch_type - Defines How SDE HW fetches data
+ * @SDE_FETCH_LINEAR : fetch is line by line
+ * @SDE_FETCH_TILE : fetches data in Z order from a tile
+ * @SDE_FETCH_UBWC : fetch and decompress data
+ */
+enum sde_fetch_type {
+ SDE_FETCH_LINEAR,
+ SDE_FETCH_TILE,
+ SDE_FETCH_UBWC
+};
+
+/**
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+ COLOR_ALPHA_1BIT = 0,
+ COLOR_ALPHA_4BIT = 1,
+ COLOR_4BIT = 0,
+ COLOR_5BIT = 1, /* No 5-bit Alpha */
+ COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
+ COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
+};
+
+/**
+ * enum sde_3d_blend_mode
+ * Desribes how the 3d data is blended
+ * @BLEND_3D_NONE : 3d blending not enabled
+ * @BLEND_3D_FRAME_INT : Frame interleaving
+ * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
+ * @BLEND_3D_V_ROW_INT : vertical row interleaving
+ * @BLEND_3D_COL_INT : column interleaving
+ * @BLEND_3D_MAX :
+ */
+enum sde_3d_blend_mode {
+ BLEND_3D_NONE = 0,
+ BLEND_3D_FRAME_INT,
+ BLEND_3D_H_ROW_INT,
+ BLEND_3D_V_ROW_INT,
+ BLEND_3D_COL_INT,
+ BLEND_3D_MAX
+};
+
+enum sde_csc_type {
+ SDE_CSC_RGB2YUV_601L,
+ SDE_CSC_RGB2YUV_601FR,
+ SDE_CSC_RGB2YUV_709L,
+ SDE_CSC_RGB2YUV_2020L,
+ SDE_CSC_RGB2YUV_2020FR,
+ SDE_MAX_CSC
+};
+
+/** struct sde_format - defines the format configuration which
+ * allows SDE HW to correctly fetch and decode the format
+ * @base: base msm_format struture containing fourcc code
+ * @fetch_planes: how the color components are packed in pixel format
+ * @element: element color ordering
+ * @bits: element bit widths
+ * @chroma_sample: chroma sub-samplng type
+ * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
+ * @unpack_tight: 0 for loose, 1 for tight
+ * @unpack_count: 0 = 1 component, 1 = 2 component
+ * @bpp: bytes per pixel
+ * @alpha_enable: whether the format has an alpha channel
+ * @num_planes: number of planes (including meta data planes)
+ * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
+ * @is_yuv: is format a yuv variant
+ * @flag: usage bit flags
+ * @tile_width: format tile width
+ * @tile_height: format tile height
+ */
+struct sde_format {
+ struct msm_format base;
+ enum sde_plane_type fetch_planes;
+ u8 element[SDE_MAX_PLANES];
+ u8 bits[SDE_MAX_PLANES];
+ enum sde_chroma_samp_type chroma_sample;
+ u8 unpack_align_msb;
+ u8 unpack_tight;
+ u8 unpack_count;
+ u8 bpp;
+ u8 alpha_enable;
+ u8 num_planes;
+ enum sde_fetch_type fetch_mode;
+ DECLARE_BITMAP(flag, SDE_FORMAT_FLAG_BIT_MAX);
+ u16 tile_width;
+ u16 tile_height;
+};
+#define to_sde_format(x) container_of(x, struct sde_format, base)
+
+/**
+ * struct sde_hw_fmt_layout - format information of the source pixel data
+ * @format: pixel format parameters
+ * @num_planes: number of planes (including meta data planes)
+ * @width: image width
+ * @height: image height
+ * @total_size: total size in bytes
+ * @plane_addr: address of each plane
+ * @plane_size: length of each plane
+ * @plane_pitch: pitch of each plane
+ */
+struct sde_hw_fmt_layout {
+ const struct sde_format *format;
+ uint32_t num_planes;
+ uint32_t width;
+ uint32_t height;
+ uint32_t total_size;
+ uint32_t plane_addr[SDE_MAX_PLANES];
+ uint32_t plane_size[SDE_MAX_PLANES];
+ uint32_t plane_pitch[SDE_MAX_PLANES];
+};
+
+struct sde_rect {
+ u16 x;
+ u16 y;
+ u16 w;
+ u16 h;
+};
+
+struct sde_csc_cfg {
+ /* matrix coefficients in S15.16 format */
+ uint32_t csc_mv[SDE_CSC_MATRIX_COEFF_SIZE];
+ uint32_t csc_pre_bv[SDE_CSC_BIAS_SIZE];
+ uint32_t csc_post_bv[SDE_CSC_BIAS_SIZE];
+ uint32_t csc_pre_lv[SDE_CSC_CLAMP_SIZE];
+ uint32_t csc_post_lv[SDE_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct sde_mdss_color - mdss color description
+ * color 0 : green
+ * color 1 : blue
+ * color 2 : red
+ * color 3 : alpha
+ */
+struct sde_mdss_color {
+ u32 color_0;
+ u32 color_1;
+ u32 color_2;
+ u32 color_3;
+};
+
+/*
+ * Define bit masks for h/w logging.
+ */
+#define SDE_DBG_MASK_NONE (1 << 0)
+#define SDE_DBG_MASK_CDM (1 << 1)
+#define SDE_DBG_MASK_DSPP (1 << 2)
+#define SDE_DBG_MASK_INTF (1 << 3)
+#define SDE_DBG_MASK_LM (1 << 4)
+#define SDE_DBG_MASK_CTL (1 << 5)
+#define SDE_DBG_MASK_PINGPONG (1 << 6)
+#define SDE_DBG_MASK_SSPP (1 << 7)
+#define SDE_DBG_MASK_WB (1 << 8)
+#define SDE_DBG_MASK_TOP (1 << 9)
+#define SDE_DBG_MASK_VBIF (1 << 10)
+
+/**
+ * struct sde_hw_cp_cfg: hardware dspp/lm feature payload.
+ * @payload: Feature specific payload.
+ * @len: Length of the payload.
+ */
+struct sde_hw_cp_cfg {
+ void *payload;
+ u32 len;
+};
+
+#endif /* _SDE_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
new file mode 100644
index 000000000000..8488d03af79a
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.c
@@ -0,0 +1,173 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_pingpong.h"
+#include "sde_dbg.h"
+
+#define PP_TEAR_CHECK_EN 0x000
+#define PP_SYNC_CONFIG_VSYNC 0x004
+#define PP_SYNC_CONFIG_HEIGHT 0x008
+#define PP_SYNC_WRCOUNT 0x00C
+#define PP_VSYNC_INIT_VAL 0x010
+#define PP_INT_COUNT_VAL 0x014
+#define PP_SYNC_THRESH 0x018
+#define PP_START_POS 0x01C
+#define PP_RD_PTR_IRQ 0x020
+#define PP_WR_PTR_IRQ 0x024
+#define PP_OUT_LINE_COUNT 0x028
+#define PP_LINE_COUNT 0x02C
+#define PP_AUTOREFRESH_CONFIG 0x030
+
+#define PP_FBC_MODE 0x034
+#define PP_FBC_BUDGET_CTL 0x038
+#define PP_FBC_LOSSY_MODE 0x03C
+#define PP_DSC_MODE 0x0a0
+#define PP_DCE_DATA_IN_SWAP 0x0ac
+#define PP_DCE_DATA_OUT_SWAP 0x0c8
+
+static struct sde_pingpong_cfg *_pingpong_offset(enum sde_pingpong pp,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->pingpong_count; i++) {
+ if (pp == m->pingpong[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->pingpong[i].base;
+ b->length = m->pingpong[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_PINGPONG;
+ return &m->pingpong[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int sde_hw_pp_setup_te_config(struct sde_hw_pingpong *pp,
+ struct sde_hw_tear_check *te)
+{
+ struct sde_hw_blk_reg_map *c = &pp->hw;
+ int cfg;
+
+ cfg = BIT(19); /*VSYNC_COUNTER_EN */
+ if (te->hw_vsync_mode)
+ cfg |= BIT(20);
+
+ cfg |= te->vsync_count;
+
+ SDE_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ SDE_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+ SDE_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
+ SDE_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+ SDE_REG_WRITE(c, PP_START_POS, te->start_pos);
+ SDE_REG_WRITE(c, PP_SYNC_THRESH,
+ ((te->sync_threshold_continue << 16) |
+ te->sync_threshold_start));
+ SDE_REG_WRITE(c, PP_SYNC_WRCOUNT,
+ (te->start_pos + te->sync_threshold_start + 1));
+
+ return 0;
+}
+
+int sde_hw_pp_setup_autorefresh_config(struct sde_hw_pingpong *pp,
+ struct sde_hw_autorefresh *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &pp->hw;
+ u32 refresh_cfg;
+
+ if (cfg->enable)
+ refresh_cfg = BIT(31) | cfg->frame_count;
+ else
+ refresh_cfg = 0;
+
+ SDE_REG_WRITE(c, PP_AUTOREFRESH_CONFIG,
+ refresh_cfg);
+
+ return 0;
+}
+
+int sde_hw_pp_setup_dsc_compression(struct sde_hw_pingpong *pp,
+ struct sde_hw_dsc_cfg *cfg)
+{
+ return 0;
+}
+int sde_hw_pp_enable_te(struct sde_hw_pingpong *pp, bool enable)
+{
+ struct sde_hw_blk_reg_map *c = &pp->hw;
+
+ SDE_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
+ return 0;
+}
+
+int sde_hw_pp_get_vsync_info(struct sde_hw_pingpong *pp,
+ struct sde_hw_pp_vsync_info *info)
+{
+ struct sde_hw_blk_reg_map *c = &pp->hw;
+ u32 val;
+
+ val = SDE_REG_READ(c, PP_VSYNC_INIT_VAL);
+ info->init_val = val & 0xffff;
+
+ val = SDE_REG_READ(c, PP_INT_COUNT_VAL);
+ info->vsync_count = (val & 0xffff0000) >> 16;
+ info->line_count = val & 0xffff;
+
+ return 0;
+}
+
+static void _setup_pingpong_ops(struct sde_hw_pingpong_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_tearcheck = sde_hw_pp_setup_te_config;
+ ops->enable_tearcheck = sde_hw_pp_enable_te;
+ ops->get_vsync_info = sde_hw_pp_get_vsync_info;
+ ops->setup_autorefresh = sde_hw_pp_setup_autorefresh_config;
+ ops->setup_dsc = sde_hw_pp_setup_dsc_compression;
+};
+
+struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m)
+{
+ struct sde_hw_pingpong *c;
+ struct sde_pingpong_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _pingpong_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->pingpong_hw_cap = cfg;
+ _setup_pingpong_ops(&c->ops, c->pingpong_hw_cap->features);
+
+ sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+ c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+ return c;
+}
+
+void sde_hw_pingpong_destroy(struct sde_hw_pingpong *pp)
+{
+ kfree(pp);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
new file mode 100644
index 000000000000..fc3bea54b485
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_pingpong.h
@@ -0,0 +1,123 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_PINGPONG_H
+#define _SDE_HW_PINGPONG_H
+
+struct sde_hw_pingpong;
+
+struct sde_hw_tear_check {
+ /*
+ * This is ratio of MDP VSYNC clk freq(Hz) to
+ * refresh rate divided by no of lines
+ */
+ u32 vsync_count;
+ u32 sync_cfg_height;
+ u32 vsync_init_val;
+ u32 sync_threshold_start;
+ u32 sync_threshold_continue;
+ u32 start_pos;
+ u32 rd_ptr_irq;
+ u8 hw_vsync_mode;
+};
+
+struct sde_hw_autorefresh {
+ bool enable;
+ u32 frame_count;
+};
+
+struct sde_hw_pp_vsync_info {
+ u32 init_val; /* value of rd pointer at vsync edge */
+ u32 vsync_count; /* mdp clocks to complete one line */
+ u32 line_count; /* current line count */
+};
+
+struct sde_hw_dsc_cfg {
+ u8 enable;
+};
+
+/**
+ *
+ * struct sde_hw_pingpong_ops : Interface to the pingpong Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @setup_tearcheck :
+ * @enable_tearcheck :
+ * @get_vsync_info :
+ * @setup_autorefresh :
+ * #setup_dsc :
+ */
+struct sde_hw_pingpong_ops {
+ /**
+ * enables vysnc generation and sets up init value of
+ * read pointer and programs the tear check cofiguration
+ */
+ int (*setup_tearcheck)(struct sde_hw_pingpong *pp,
+ struct sde_hw_tear_check *cfg);
+
+ /**
+ * enables tear check block
+ */
+ int (*enable_tearcheck)(struct sde_hw_pingpong *pp,
+ bool enable);
+
+ /**
+ * provides the programmed and current
+ * line_count
+ */
+ int (*get_vsync_info)(struct sde_hw_pingpong *pp,
+ struct sde_hw_pp_vsync_info *info);
+
+ /**
+ * configure and enable the autorefresh config
+ */
+ int (*setup_autorefresh)(struct sde_hw_pingpong *pp,
+ struct sde_hw_autorefresh *cfg);
+
+ /**
+ * Program the dsc compression block
+ */
+ int (*setup_dsc)(struct sde_hw_pingpong *pp,
+ struct sde_hw_dsc_cfg *cfg);
+};
+
+struct sde_hw_pingpong {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* pingpong */
+ enum sde_pingpong idx;
+ const struct sde_pingpong_cfg *pingpong_hw_cap;
+
+ /* ops */
+ struct sde_hw_pingpong_ops ops;
+};
+
+/**
+ * sde_hw_pingpong_init - initializes the pingpong driver for the passed
+ * pingpong idx.
+ * @idx: Pingpong index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ * Returns: Error code or allocated sde_hw_pingpong context
+ */
+struct sde_hw_pingpong *sde_hw_pingpong_init(enum sde_pingpong idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m);
+
+/**
+ * sde_hw_pingpong_destroy - destroys pingpong driver context
+ * should be called to free the context
+ * @pp: Pointer to PP driver context returned by sde_hw_pingpong_init
+ */
+void sde_hw_pingpong_destroy(struct sde_hw_pingpong *pp);
+
+#endif /*_SDE_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.c b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
new file mode 100644
index 000000000000..f5b433c61776
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.c
@@ -0,0 +1,959 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_sspp.h"
+#include "sde_hw_color_processing.h"
+#include "sde_dbg.h"
+
+#define SDE_FETCH_CONFIG_RESET_VALUE 0x00000087
+
+/* SDE_SSPP_SRC */
+#define SSPP_SRC_SIZE 0x00
+#define SSPP_SRC_XY 0x08
+#define SSPP_OUT_SIZE 0x0c
+#define SSPP_OUT_XY 0x10
+#define SSPP_SRC0_ADDR 0x14
+#define SSPP_SRC1_ADDR 0x18
+#define SSPP_SRC2_ADDR 0x1C
+#define SSPP_SRC3_ADDR 0x20
+#define SSPP_SRC_YSTRIDE0 0x24
+#define SSPP_SRC_YSTRIDE1 0x28
+#define SSPP_SRC_FORMAT 0x30
+#define SSPP_SRC_UNPACK_PATTERN 0x34
+#define SSPP_SRC_OP_MODE 0x38
+#define MDSS_MDP_OP_DEINTERLACE BIT(22)
+
+#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1 BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0 BIT(17)
+#define MDSS_MDP_OP_IGC_EN BIT(16)
+#define MDSS_MDP_OP_FLIP_UD BIT(14)
+#define MDSS_MDP_OP_FLIP_LR BIT(13)
+#define MDSS_MDP_OP_BWC_EN BIT(0)
+#define MDSS_MDP_OP_PE_OVERRIDE BIT(31)
+#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED (2 << 1)
+
+#define SSPP_SRC_CONSTANT_COLOR 0x3c
+#define SSPP_FETCH_CONFIG 0x048
+#define SSPP_DANGER_LUT 0x60
+#define SSPP_SAFE_LUT 0x64
+#define SSPP_CREQ_LUT 0x68
+#define SSPP_QOS_CTRL 0x6C
+#define SSPP_DECIMATION_CONFIG 0xB4
+#define SSPP_SRC_ADDR_SW_STATUS 0x70
+#define SSPP_SW_PIX_EXT_C0_LR 0x100
+#define SSPP_SW_PIX_EXT_C0_TB 0x104
+#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108
+#define SSPP_SW_PIX_EXT_C1C2_LR 0x110
+#define SSPP_SW_PIX_EXT_C1C2_TB 0x114
+#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS 0x118
+#define SSPP_SW_PIX_EXT_C3_LR 0x120
+#define SSPP_SW_PIX_EXT_C3_TB 0x124
+#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128
+#define SSPP_UBWC_ERROR_STATUS 0x138
+#define SSPP_VIG_OP_MODE 0x0
+#define SSPP_VIG_CSC_10_OP_MODE 0x0
+
+/* SSPP_QOS_CTRL */
+#define SSPP_QOS_CTRL_VBLANK_EN BIT(16)
+#define SSPP_QOS_CTRL_DANGER_SAFE_EN BIT(0)
+#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF 4
+#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF 20
+
+/* SDE_SSPP_SCALER_QSEED2 */
+#define SCALE_CONFIG 0x04
+#define COMP0_3_PHASE_STEP_X 0x10
+#define COMP0_3_PHASE_STEP_Y 0x14
+#define COMP1_2_PHASE_STEP_X 0x18
+#define COMP1_2_PHASE_STEP_Y 0x1c
+#define COMP0_3_INIT_PHASE_X 0x20
+#define COMP0_3_INIT_PHASE_Y 0x24
+#define COMP1_2_INIT_PHASE_X 0x28
+#define COMP1_2_INIT_PHASE_Y 0x2C
+#define VIG_0_QSEED2_SHARP 0x30
+
+/* SDE_SSPP_SCALER_QSEED3 */
+#define QSEED3_HW_VERSION 0x00
+#define QSEED3_OP_MODE 0x04
+#define QSEED3_RGB2Y_COEFF 0x08
+#define QSEED3_PHASE_INIT 0x0C
+#define QSEED3_PHASE_STEP_Y_H 0x10
+#define QSEED3_PHASE_STEP_Y_V 0x14
+#define QSEED3_PHASE_STEP_UV_H 0x18
+#define QSEED3_PHASE_STEP_UV_V 0x1C
+#define QSEED3_PRELOAD 0x20
+#define QSEED3_DE_SHARPEN 0x24
+#define QSEED3_DE_SHARPEN_CTL 0x28
+#define QSEED3_DE_SHAPE_CTL 0x2C
+#define QSEED3_DE_THRESHOLD 0x30
+#define QSEED3_DE_ADJUST_DATA_0 0x34
+#define QSEED3_DE_ADJUST_DATA_1 0x38
+#define QSEED3_DE_ADJUST_DATA_2 0x3C
+#define QSEED3_SRC_SIZE_Y_RGB_A 0x40
+#define QSEED3_SRC_SIZE_UV 0x44
+#define QSEED3_DST_SIZE 0x48
+#define QSEED3_COEF_LUT_CTRL 0x4C
+#define QSEED3_COEF_LUT_SWAP_BIT 0
+#define QSEED3_COEF_LUT_DIR_BIT 1
+#define QSEED3_COEF_LUT_Y_CIR_BIT 2
+#define QSEED3_COEF_LUT_UV_CIR_BIT 3
+#define QSEED3_COEF_LUT_Y_SEP_BIT 4
+#define QSEED3_COEF_LUT_UV_SEP_BIT 5
+#define QSEED3_BUFFER_CTRL 0x50
+#define QSEED3_CLK_CTRL0 0x54
+#define QSEED3_CLK_CTRL1 0x58
+#define QSEED3_CLK_STATUS 0x5C
+#define QSEED3_MISR_CTRL 0x70
+#define QSEED3_MISR_SIGNATURE_0 0x74
+#define QSEED3_MISR_SIGNATURE_1 0x78
+#define QSEED3_PHASE_INIT_Y_H 0x90
+#define QSEED3_PHASE_INIT_Y_V 0x94
+#define QSEED3_PHASE_INIT_UV_H 0x98
+#define QSEED3_PHASE_INIT_UV_V 0x9C
+#define QSEED3_COEF_LUT 0x100
+#define QSEED3_FILTERS 5
+#define QSEED3_LUT_REGIONS 4
+#define QSEED3_CIRCULAR_LUTS 9
+#define QSEED3_SEPARABLE_LUTS 10
+#define QSEED3_LUT_SIZE 60
+#define QSEED3_ENABLE 2
+#define QSEED3_DIR_LUT_SIZE (200 * sizeof(u32))
+#define QSEED3_CIR_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
+#define QSEED3_SEP_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+
+/*
+ * Definitions for ViG op modes
+ */
+#define VIG_OP_CSC_DST_DATAFMT BIT(19)
+#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
+#define VIG_OP_CSC_EN BIT(17)
+#define VIG_OP_MEM_PROT_CONT BIT(15)
+#define VIG_OP_MEM_PROT_VAL BIT(14)
+#define VIG_OP_MEM_PROT_SAT BIT(13)
+#define VIG_OP_MEM_PROT_HUE BIT(12)
+#define VIG_OP_HIST BIT(8)
+#define VIG_OP_SKY_COL BIT(7)
+#define VIG_OP_FOIL BIT(6)
+#define VIG_OP_SKIN_COL BIT(5)
+#define VIG_OP_PA_EN BIT(4)
+#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
+#define VIG_OP_MEM_PROT_BLEND BIT(1)
+
+/*
+ * Definitions for CSC 10 op modes
+ */
+#define VIG_CSC_10_SRC_DATAFMT BIT(1)
+#define VIG_CSC_10_EN BIT(0)
+#define CSC_10BIT_OFFSET 4
+
+static inline int _sspp_subblk_offset(struct sde_hw_pipe *ctx,
+ int s_id,
+ u32 *idx)
+{
+ int rc = 0;
+ const struct sde_sspp_sub_blks *sblk = ctx->cap->sblk;
+
+ if (!ctx)
+ return -EINVAL;
+
+ switch (s_id) {
+ case SDE_SSPP_SRC:
+ *idx = sblk->src_blk.base;
+ break;
+ case SDE_SSPP_SCALER_QSEED2:
+ case SDE_SSPP_SCALER_QSEED3:
+ case SDE_SSPP_SCALER_RGB:
+ *idx = sblk->scaler_blk.base;
+ break;
+ case SDE_SSPP_CSC:
+ case SDE_SSPP_CSC_10BIT:
+ *idx = sblk->csc_blk.base;
+ break;
+ case SDE_SSPP_HSIC:
+ *idx = sblk->hsic_blk.base;
+ break;
+ case SDE_SSPP_PCC:
+ *idx = sblk->pcc_blk.base;
+ break;
+ case SDE_SSPP_MEMCOLOR:
+ *idx = sblk->memcolor_blk.base;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static void _sspp_setup_opmode(struct sde_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (!test_bit(SDE_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
+ _sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) ||
+ !test_bit(SDE_SSPP_CSC, &ctx->cap->features))
+ return;
+
+ opmode = SDE_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
+
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ SDE_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
+}
+
+static void _sspp_setup_csc10_opmode(struct sde_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_CSC_10BIT, &idx))
+ return;
+
+ opmode = SDE_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ SDE_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
+}
+
+/**
+ * Setup source pixel format, flip,
+ */
+static void sde_hw_sspp_setup_format(struct sde_hw_pipe *ctx,
+ const struct sde_format *fmt, u32 flags)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 chroma_samp, unpack, src_format;
+ u32 secure = 0;
+ u32 opmode = 0;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !fmt)
+ return;
+
+ c = &ctx->hw;
+ opmode = SDE_REG_READ(c, SSPP_SRC_OP_MODE + idx);
+ opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
+ MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
+
+ if (flags & SDE_SSPP_SECURE_OVERLAY_SESSION)
+ secure = 0xF;
+
+ if (flags & SDE_SSPP_FLIP_LR)
+ opmode |= MDSS_MDP_OP_FLIP_LR;
+ if (flags & SDE_SSPP_FLIP_UD)
+ opmode |= MDSS_MDP_OP_FLIP_UD;
+
+ chroma_samp = fmt->chroma_sample;
+ if (flags & SDE_SSPP_SOURCE_ROTATED_90) {
+ if (chroma_samp == SDE_CHROMA_H2V1)
+ chroma_samp = SDE_CHROMA_H1V2;
+ else if (chroma_samp == SDE_CHROMA_H1V2)
+ chroma_samp = SDE_CHROMA_H2V1;
+ }
+
+ src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+
+ if (flags & SDE_SSPP_ROT_90)
+ src_format |= BIT(11); /* ROT90 */
+
+ if (fmt->alpha_enable && fmt->fetch_planes == SDE_PLANE_INTERLEAVED)
+ src_format |= BIT(8); /* SRCC3_EN */
+
+ if (flags & SDE_SSPP_SOLID_FILL)
+ src_format |= BIT(22);
+
+ unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) | (fmt->element[0] << 0);
+ src_format |= ((fmt->unpack_count - 1) << 12) |
+ (fmt->unpack_tight << 17) |
+ (fmt->unpack_align_msb << 18) |
+ ((fmt->bpp - 1) << 9);
+
+ if (fmt->fetch_mode != SDE_FETCH_LINEAR) {
+ if (SDE_FORMAT_IS_UBWC(fmt))
+ opmode |= MDSS_MDP_OP_BWC_EN;
+ src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
+ SDE_REG_WRITE(c, SSPP_FETCH_CONFIG,
+ SDE_FETCH_CONFIG_RESET_VALUE |
+ ctx->highest_bank_bit << 18);
+ }
+
+ opmode |= MDSS_MDP_OP_PE_OVERRIDE;
+
+ /* if this is YUV pixel format, enable CSC */
+ if (SDE_FORMAT_IS_YUV(fmt))
+ src_format |= BIT(15);
+
+ if (SDE_FORMAT_IS_DX(fmt))
+ src_format |= BIT(14);
+
+ /* update scaler opmode, if appropriate */
+ if (test_bit(SDE_SSPP_CSC, &ctx->cap->features))
+ _sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
+ SDE_FORMAT_IS_YUV(fmt));
+ else if (test_bit(SDE_SSPP_CSC_10BIT, &ctx->cap->features))
+ _sspp_setup_csc10_opmode(ctx,
+ VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
+ SDE_FORMAT_IS_YUV(fmt));
+
+ SDE_REG_WRITE(c, SSPP_SRC_FORMAT + idx, src_format);
+ SDE_REG_WRITE(c, SSPP_SRC_UNPACK_PATTERN + idx, unpack);
+ SDE_REG_WRITE(c, SSPP_SRC_OP_MODE + idx, opmode);
+ SDE_REG_WRITE(c, SSPP_SRC_ADDR_SW_STATUS + idx, secure);
+
+ /* clear previous UBWC error */
+ SDE_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
+}
+
+static void sde_hw_sspp_setup_pe_config(struct sde_hw_pipe *ctx,
+ struct sde_hw_pixel_ext *pe_ext)
+{
+ struct sde_hw_blk_reg_map *c;
+ u8 color;
+ u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
+ const u32 bytemask = 0xff;
+ const u32 shortmask = 0xffff;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !pe_ext)
+ return;
+
+ c = &ctx->hw;
+
+ /* program SW pixel extension override for all pipes*/
+ for (color = 0; color < SDE_MAX_PLANES; color++) {
+ /* color 2 has the same set of registers as color 1 */
+ if (color == 2)
+ continue;
+
+ lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
+ ((pe_ext->right_rpt[color] & bytemask) << 16)|
+ ((pe_ext->left_ftch[color] & bytemask) << 8)|
+ (pe_ext->left_rpt[color] & bytemask);
+
+ tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
+ ((pe_ext->btm_rpt[color] & bytemask) << 16)|
+ ((pe_ext->top_ftch[color] & bytemask) << 8)|
+ (pe_ext->top_rpt[color] & bytemask);
+
+ tot_req_pixels[color] = (((pe_ext->roi_h[color] +
+ pe_ext->num_ext_pxls_top[color] +
+ pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
+ ((pe_ext->roi_w[color] +
+ pe_ext->num_ext_pxls_left[color] +
+ pe_ext->num_ext_pxls_right[color]) & shortmask);
+ }
+
+ /* color 0 */
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
+ tot_req_pixels[0]);
+
+ /* color 1 and color 2 */
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
+ tot_req_pixels[1]);
+
+ /* color 3 */
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
+ SDE_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
+ tot_req_pixels[3]);
+}
+
+static void _sde_hw_sspp_setup_scaler(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *sspp,
+ struct sde_hw_pixel_ext *pe,
+ void *scaler_cfg)
+{
+ struct sde_hw_blk_reg_map *c;
+ int config_h = 0x0;
+ int config_v = 0x0;
+ u32 idx;
+
+ (void)sspp;
+ (void)scaler_cfg;
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) || !pe)
+ return;
+
+ c = &ctx->hw;
+
+ /* enable scaler(s) if valid filter set */
+ if (pe->horz_filter[SDE_SSPP_COMP_0] < SDE_SCALE_FILTER_MAX)
+ config_h |= pe->horz_filter[SDE_SSPP_COMP_0] << 8;
+ if (pe->horz_filter[SDE_SSPP_COMP_1_2] < SDE_SCALE_FILTER_MAX)
+ config_h |= pe->horz_filter[SDE_SSPP_COMP_1_2] << 12;
+ if (pe->horz_filter[SDE_SSPP_COMP_3] < SDE_SCALE_FILTER_MAX)
+ config_h |= pe->horz_filter[SDE_SSPP_COMP_3] << 16;
+
+ if (config_h)
+ config_h |= BIT(0);
+
+ if (pe->vert_filter[SDE_SSPP_COMP_0] < SDE_SCALE_FILTER_MAX)
+ config_v |= pe->vert_filter[SDE_SSPP_COMP_0] << 10;
+ if (pe->vert_filter[SDE_SSPP_COMP_1_2] < SDE_SCALE_FILTER_MAX)
+ config_v |= pe->vert_filter[SDE_SSPP_COMP_1_2] << 14;
+ if (pe->vert_filter[SDE_SSPP_COMP_3] < SDE_SCALE_FILTER_MAX)
+ config_v |= pe->vert_filter[SDE_SSPP_COMP_3] << 18;
+
+ if (config_v)
+ config_v |= BIT(1);
+
+ SDE_REG_WRITE(c, SCALE_CONFIG + idx, config_h | config_v);
+ SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_X + idx,
+ pe->init_phase_x[SDE_SSPP_COMP_0]);
+ SDE_REG_WRITE(c, COMP0_3_INIT_PHASE_Y + idx,
+ pe->init_phase_y[SDE_SSPP_COMP_0]);
+ SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_X + idx,
+ pe->phase_step_x[SDE_SSPP_COMP_0]);
+ SDE_REG_WRITE(c, COMP0_3_PHASE_STEP_Y + idx,
+ pe->phase_step_y[SDE_SSPP_COMP_0]);
+
+ SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_X + idx,
+ pe->init_phase_x[SDE_SSPP_COMP_1_2]);
+ SDE_REG_WRITE(c, COMP1_2_INIT_PHASE_Y + idx,
+ pe->init_phase_y[SDE_SSPP_COMP_1_2]);
+ SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_X + idx,
+ pe->phase_step_x[SDE_SSPP_COMP_1_2]);
+ SDE_REG_WRITE(c, COMP1_2_PHASE_STEP_Y + idx,
+ pe->phase_step_y[SDE_SSPP_COMP_1_2]);
+}
+
+static void _sde_hw_sspp_setup_scaler3_lut(struct sde_hw_pipe *ctx,
+ struct sde_hw_scaler3_cfg *scaler3_cfg)
+{
+ u32 idx;
+ int i, j, filter;
+ int config_lut = 0x0;
+ unsigned long lut_flags;
+ u32 lut_addr, lut_offset, lut_len;
+ u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+ static const uint32_t offset[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+ {{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+ {{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+ {{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+ {{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+ {{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+ };
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx) ||
+ !scaler3_cfg)
+ return;
+
+ lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+ if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
+ (scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
+ lut[0] = scaler3_cfg->dir_lut;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[1] = scaler3_cfg->cir_lut +
+ scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[2] = scaler3_cfg->cir_lut +
+ scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[3] = scaler3_cfg->sep_lut +
+ scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[4] = scaler3_cfg->sep_lut +
+ scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+
+ if (config_lut) {
+ for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+ if (!lut[filter])
+ continue;
+ lut_offset = 0;
+ for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
+ lut_addr = QSEED3_COEF_LUT + idx
+ + offset[filter][i][1];
+ lut_len = offset[filter][i][0] << 2;
+ for (j = 0; j < lut_len; j++) {
+ SDE_REG_WRITE(&ctx->hw,
+ lut_addr,
+ (lut[filter])[lut_offset++]);
+ lut_addr += 4;
+ }
+ }
+ }
+ }
+
+ if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+ SDE_REG_WRITE(&ctx->hw, QSEED3_COEF_LUT_CTRL + idx, BIT(0));
+
+}
+
+static void _sde_hw_sspp_setup_scaler3_de(struct sde_hw_pipe *ctx,
+ struct sde_hw_scaler3_de_cfg *de_cfg)
+{
+ u32 idx;
+ u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
+ u32 adjust_a, adjust_b, adjust_c;
+ struct sde_hw_blk_reg_map *hw;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx) || !de_cfg)
+ return;
+
+ if (!de_cfg->enable)
+ return;
+
+ hw = &ctx->hw;
+ sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
+ ((de_cfg->sharpen_level2 & 0x1FF) << 16);
+
+ sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
+ ((de_cfg->prec_shift & 0x7) << 13) |
+ ((de_cfg->clip & 0x7) << 16);
+
+ shape_ctl = (de_cfg->thr_quiet & 0xFF) |
+ ((de_cfg->thr_dieout & 0x3FF) << 16);
+
+ de_thr = (de_cfg->thr_low & 0x3FF) |
+ ((de_cfg->thr_high & 0x3FF) << 16);
+
+ adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
+ ((de_cfg->adjust_a[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_a[2] & 0x3FF) << 20);
+
+ adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
+ ((de_cfg->adjust_b[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_b[2] & 0x3FF) << 20);
+
+ adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
+ ((de_cfg->adjust_c[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_c[2] & 0x3FF) << 20);
+
+ SDE_REG_WRITE(hw, QSEED3_DE_SHARPEN + idx, sharp_lvl);
+ SDE_REG_WRITE(hw, QSEED3_DE_SHARPEN_CTL + idx, sharp_ctl);
+ SDE_REG_WRITE(hw, QSEED3_DE_SHAPE_CTL + idx, shape_ctl);
+ SDE_REG_WRITE(hw, QSEED3_DE_THRESHOLD + idx, de_thr);
+ SDE_REG_WRITE(hw, QSEED3_DE_ADJUST_DATA_0 + idx, adjust_a);
+ SDE_REG_WRITE(hw, QSEED3_DE_ADJUST_DATA_1 + idx, adjust_b);
+ SDE_REG_WRITE(hw, QSEED3_DE_ADJUST_DATA_2 + idx, adjust_c);
+
+}
+
+static void _sde_hw_sspp_setup_scaler3(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *sspp,
+ struct sde_hw_pixel_ext *pe,
+ void *scaler_cfg)
+{
+ u32 idx;
+ u32 op_mode = 0;
+ u32 phase_init, preload, src_y_rgb, src_uv, dst;
+ struct sde_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
+
+ (void)pe;
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED3, &idx) || !sspp
+ || !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+ return;
+
+ if (!scaler3_cfg->enable)
+ goto end;
+
+ op_mode |= BIT(0);
+ op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
+
+ if (SDE_FORMAT_IS_YUV(sspp->layout.format)) {
+ op_mode |= BIT(12);
+ op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
+ }
+
+ op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
+ op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
+
+ preload =
+ ((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
+ ((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
+ ((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
+ ((scaler3_cfg->preload_y[1] & 0x7F) << 24);
+
+ src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
+
+ src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
+
+ dst = (scaler3_cfg->dst_width & 0x1FFFF) |
+ ((scaler3_cfg->dst_height & 0x1FFFF) << 16);
+
+ if (scaler3_cfg->de.enable) {
+ _sde_hw_sspp_setup_scaler3_de(ctx, &scaler3_cfg->de);
+ op_mode |= BIT(8);
+ }
+
+ if (scaler3_cfg->lut_flag)
+ _sde_hw_sspp_setup_scaler3_lut(ctx, scaler3_cfg);
+
+ if (ctx->cap->sblk->scaler_blk.version == 0x1002) {
+ phase_init =
+ ((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
+ ((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
+ ((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
+ ((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT + idx, phase_init);
+ } else {
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_Y_H + idx,
+ scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_Y_V + idx,
+ scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_UV_H + idx,
+ scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_INIT_UV_V + idx,
+ scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
+ }
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_Y_H + idx,
+ scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_Y_V + idx,
+ scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_UV_H + idx,
+ scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PHASE_STEP_UV_V + idx,
+ scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_PRELOAD + idx, preload);
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_SRC_SIZE_Y_RGB_A + idx, src_y_rgb);
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_SRC_SIZE_UV + idx, src_uv);
+
+ SDE_REG_WRITE(&ctx->hw, QSEED3_DST_SIZE + idx, dst);
+
+end:
+ if (!SDE_FORMAT_IS_DX(sspp->layout.format))
+ op_mode |= BIT(14);
+
+ if (sspp->layout.format->alpha_enable) {
+ op_mode |= BIT(10);
+ if (ctx->cap->sblk->scaler_blk.version == 0x1002)
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
+ else
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
+ }
+ SDE_REG_WRITE(&ctx->hw, QSEED3_OP_MODE + idx, op_mode);
+}
+
+/**
+ * sde_hw_sspp_setup_rects()
+ */
+static void sde_hw_sspp_setup_rects(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *cfg,
+ struct sde_hw_pixel_ext *pe_ext,
+ void *scale_cfg)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+ u32 decimation = 0;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx) || !cfg)
+ return;
+
+ c = &ctx->hw;
+
+ /* program pixel extension override */
+ if (pe_ext)
+ sde_hw_sspp_setup_pe_config(ctx, pe_ext);
+
+ /* src and dest rect programming */
+ src_xy = (cfg->src_rect.y << 16) | (cfg->src_rect.x);
+ src_size = (cfg->src_rect.h << 16) | (cfg->src_rect.w);
+ dst_xy = (cfg->dst_rect.y << 16) | (cfg->dst_rect.x);
+ dst_size = (cfg->dst_rect.h << 16) | (cfg->dst_rect.w);
+
+ ystride0 = (cfg->layout.plane_pitch[0]) |
+ (cfg->layout.plane_pitch[1] << 16);
+ ystride1 = (cfg->layout.plane_pitch[2]) |
+ (cfg->layout.plane_pitch[3] << 16);
+
+ /* program scaler, phase registers, if pipes supporting scaling */
+ if (ctx->cap->features & SDE_SSPP_SCALER) {
+ /* program decimation */
+ decimation = ((1 << cfg->horz_decimation) - 1) << 8;
+ decimation |= ((1 << cfg->vert_decimation) - 1);
+ ctx->ops.setup_scaler(ctx, cfg, pe_ext, scale_cfg);
+ }
+
+ /* rectangle register programming */
+ SDE_REG_WRITE(c, SSPP_SRC_SIZE + idx, src_size);
+ SDE_REG_WRITE(c, SSPP_SRC_XY + idx, src_xy);
+ SDE_REG_WRITE(c, SSPP_OUT_SIZE + idx, dst_size);
+ SDE_REG_WRITE(c, SSPP_OUT_XY + idx, dst_xy);
+
+ SDE_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
+ SDE_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
+ SDE_REG_WRITE(c, SSPP_DECIMATION_CONFIG + idx, decimation);
+}
+
+static void sde_hw_sspp_setup_sourceaddress(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *cfg)
+{
+ int i;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
+ SDE_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
+ cfg->layout.plane_addr[i]);
+}
+
+static void sde_hw_sspp_setup_csc(struct sde_hw_pipe *ctx,
+ struct sde_csc_cfg *data)
+{
+ u32 idx;
+ bool csc10 = false;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_CSC, &idx) || !data)
+ return;
+
+ if (test_bit(SDE_SSPP_CSC_10BIT, &ctx->cap->features)) {
+ idx += CSC_10BIT_OFFSET;
+ csc10 = true;
+ }
+
+ sde_hw_csc_setup(&ctx->hw, idx, data, csc10);
+}
+
+static void sde_hw_sspp_setup_sharpening(struct sde_hw_pipe *ctx,
+ struct sde_hw_sharp_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SCALER_QSEED2, &idx) || !cfg ||
+ !test_bit(SDE_SSPP_SCALER_QSEED2, &ctx->cap->features))
+ return;
+
+ c = &ctx->hw;
+
+ SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx, cfg->strength);
+ SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0x4, cfg->edge_thr);
+ SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0x8, cfg->smooth_thr);
+ SDE_REG_WRITE(c, VIG_0_QSEED2_SHARP + idx + 0xC, cfg->noise_thr);
+}
+
+static void sde_hw_sspp_setup_solidfill(struct sde_hw_pipe *ctx, u32 color)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ SDE_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
+}
+
+static void sde_hw_sspp_setup_danger_safe_lut(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ SDE_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
+ SDE_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
+}
+
+static void sde_hw_sspp_setup_creq_lut(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ SDE_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+}
+
+static void sde_hw_sspp_setup_qos_ctrl(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+ u32 qos_ctrl = 0;
+
+ if (_sspp_subblk_offset(ctx, SDE_SSPP_SRC, &idx))
+ return;
+
+ if (cfg->vblank_en) {
+ qos_ctrl |= ((cfg->creq_vblank &
+ SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
+ qos_ctrl |= ((cfg->danger_vblank &
+ SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
+ qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
+ }
+
+ if (cfg->danger_safe_en)
+ qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
+
+ SDE_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
+}
+
+static void _setup_layer_ops(struct sde_hw_pipe *c,
+ unsigned long features)
+{
+ if (test_bit(SDE_SSPP_SRC, &features)) {
+ c->ops.setup_format = sde_hw_sspp_setup_format;
+ c->ops.setup_rects = sde_hw_sspp_setup_rects;
+ c->ops.setup_sourceaddress = sde_hw_sspp_setup_sourceaddress;
+ c->ops.setup_solidfill = sde_hw_sspp_setup_solidfill;
+ }
+ if (test_bit(SDE_SSPP_QOS, &features)) {
+ c->ops.setup_danger_safe_lut =
+ sde_hw_sspp_setup_danger_safe_lut;
+ c->ops.setup_creq_lut = sde_hw_sspp_setup_creq_lut;
+ c->ops.setup_qos_ctrl = sde_hw_sspp_setup_qos_ctrl;
+ }
+
+ if (test_bit(SDE_SSPP_CSC, &features) ||
+ test_bit(SDE_SSPP_CSC_10BIT, &features))
+ c->ops.setup_csc = sde_hw_sspp_setup_csc;
+
+ if (test_bit(SDE_SSPP_SCALER_QSEED2, &features))
+ c->ops.setup_sharpening = sde_hw_sspp_setup_sharpening;
+
+ if (test_bit(SDE_SSPP_SCALER_QSEED3, &features))
+ c->ops.setup_scaler = _sde_hw_sspp_setup_scaler3;
+ else
+ c->ops.setup_scaler = _sde_hw_sspp_setup_scaler;
+
+ if (test_bit(SDE_SSPP_HSIC, &features)) {
+ /* TODO: add version based assignment here as inline or macro */
+ if (c->cap->sblk->hsic_blk.version ==
+ (SDE_COLOR_PROCESS_VER(0x1, 0x7))) {
+ c->ops.setup_pa_hue = sde_setup_pipe_pa_hue_v1_7;
+ c->ops.setup_pa_sat = sde_setup_pipe_pa_sat_v1_7;
+ c->ops.setup_pa_val = sde_setup_pipe_pa_val_v1_7;
+ c->ops.setup_pa_cont = sde_setup_pipe_pa_cont_v1_7;
+ }
+ }
+
+ if (test_bit(SDE_SSPP_MEMCOLOR, &features)) {
+ if (c->cap->sblk->memcolor_blk.version ==
+ (SDE_COLOR_PROCESS_VER(0x1, 0x7)))
+ c->ops.setup_pa_memcolor =
+ sde_setup_pipe_pa_memcol_v1_7;
+ }
+}
+
+static struct sde_sspp_cfg *_sspp_offset(enum sde_sspp sspp,
+ void __iomem *addr,
+ struct sde_mdss_cfg *catalog,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ if ((sspp < SSPP_MAX) && catalog && addr && b) {
+ for (i = 0; i < catalog->sspp_count; i++) {
+ if (sspp == catalog->sspp[i].id) {
+ b->base_off = addr;
+ b->blk_off = catalog->sspp[i].base;
+ b->length = catalog->sspp[i].len;
+ b->hwversion = catalog->hwversion;
+ b->log_mask = SDE_DBG_MASK_SSPP;
+ return &catalog->sspp[i];
+ }
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *catalog)
+{
+ struct sde_hw_pipe *hw_pipe;
+ struct sde_sspp_cfg *cfg;
+
+ hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+ if (!hw_pipe)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(hw_pipe);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ hw_pipe->idx = idx;
+ hw_pipe->cap = cfg;
+ _setup_layer_ops(hw_pipe, hw_pipe->cap->features);
+ hw_pipe->highest_bank_bit = catalog->mdp[0].highest_bank_bit;
+
+ sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
+ hw_pipe->hw.blk_off,
+ hw_pipe->hw.blk_off + hw_pipe->hw.length,
+ hw_pipe->hw.xin_id);
+
+ if (cfg->sblk->scaler_blk.len)
+ sde_dbg_reg_register_dump_range(SDE_DBG_NAME,
+ cfg->sblk->scaler_blk.name,
+ hw_pipe->hw.blk_off + cfg->sblk->scaler_blk.base,
+ hw_pipe->hw.blk_off + cfg->sblk->scaler_blk.base +
+ cfg->sblk->scaler_blk.len,
+ hw_pipe->hw.xin_id);
+
+ return hw_pipe;
+}
+
+void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx)
+{
+ kfree(ctx);
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_sspp.h b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
new file mode 100644
index 000000000000..ceb48282081d
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_sspp.h
@@ -0,0 +1,479 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_SSPP_H
+#define _SDE_HW_SSPP_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+#include "sde_formats.h"
+#include "sde_color_processing.h"
+
+struct sde_hw_pipe;
+
+/**
+ * Flags
+ */
+#define SDE_SSPP_SECURE_OVERLAY_SESSION 0x1
+#define SDE_SSPP_FLIP_LR 0x2
+#define SDE_SSPP_FLIP_UD 0x4
+#define SDE_SSPP_SOURCE_ROTATED_90 0x8
+#define SDE_SSPP_ROT_90 0x10
+#define SDE_SSPP_SOLID_FILL 0x20
+
+/**
+ * Define all scaler feature bits in catalog
+ */
+#define SDE_SSPP_SCALER ((1UL << SDE_SSPP_SCALER_RGB) | \
+ (1UL << SDE_SSPP_SCALER_QSEED2) | \
+ (1UL << SDE_SSPP_SCALER_QSEED3))
+
+/**
+ * Component indices
+ */
+enum {
+ SDE_SSPP_COMP_0,
+ SDE_SSPP_COMP_1_2,
+ SDE_SSPP_COMP_2,
+ SDE_SSPP_COMP_3,
+
+ SDE_SSPP_COMP_MAX
+};
+
+enum {
+ SDE_FRAME_LINEAR,
+ SDE_FRAME_TILE_A4X,
+ SDE_FRAME_TILE_A5X,
+};
+
+enum sde_hw_filter {
+ SDE_SCALE_FILTER_NEAREST = 0,
+ SDE_SCALE_FILTER_BIL,
+ SDE_SCALE_FILTER_PCMN,
+ SDE_SCALE_FILTER_CA,
+ SDE_SCALE_FILTER_MAX
+};
+
+enum sde_hw_filter_alpa {
+ SDE_SCALE_ALPHA_PIXEL_REP,
+ SDE_SCALE_ALPHA_BIL
+};
+
+enum sde_hw_filter_yuv {
+ SDE_SCALE_2D_4X4,
+ SDE_SCALE_2D_CIR,
+ SDE_SCALE_1D_SEP,
+ SDE_SCALE_BIL
+};
+
+struct sde_hw_sharp_cfg {
+ u32 strength;
+ u32 edge_thr;
+ u32 smooth_thr;
+ u32 noise_thr;
+};
+
+struct sde_hw_pixel_ext {
+ /* scaling factors are enabled for this input layer */
+ uint8_t enable_pxl_ext;
+
+ int init_phase_x[SDE_MAX_PLANES];
+ int phase_step_x[SDE_MAX_PLANES];
+ int init_phase_y[SDE_MAX_PLANES];
+ int phase_step_y[SDE_MAX_PLANES];
+
+ /*
+ * Number of pixels extension in left, right, top and bottom direction
+ * for all color components. This pixel value for each color component
+ * should be sum of fetch + repeat pixels.
+ */
+ int num_ext_pxls_left[SDE_MAX_PLANES];
+ int num_ext_pxls_right[SDE_MAX_PLANES];
+ int num_ext_pxls_top[SDE_MAX_PLANES];
+ int num_ext_pxls_btm[SDE_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top and
+ * bottom directions from source image for scaling.
+ */
+ int left_ftch[SDE_MAX_PLANES];
+ int right_ftch[SDE_MAX_PLANES];
+ int top_ftch[SDE_MAX_PLANES];
+ int btm_ftch[SDE_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int left_rpt[SDE_MAX_PLANES];
+ int right_rpt[SDE_MAX_PLANES];
+ int top_rpt[SDE_MAX_PLANES];
+ int btm_rpt[SDE_MAX_PLANES];
+
+ uint32_t roi_w[SDE_MAX_PLANES];
+ uint32_t roi_h[SDE_MAX_PLANES];
+
+ /*
+ * Filter type to be used for scaling in horizontal and vertical
+ * directions
+ */
+ enum sde_hw_filter horz_filter[SDE_MAX_PLANES];
+ enum sde_hw_filter vert_filter[SDE_MAX_PLANES];
+
+};
+
+/**
+ * struct sde_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
+ * @enable: detail enhancer enable/disable
+ * @sharpen_level1: sharpening strength for noise
+ * @sharpen_level2: sharpening strength for signal
+ * @ clip: clip shift
+ * @ limit: limit value
+ * @ thr_quiet: quiet threshold
+ * @ thr_dieout: dieout threshold
+ * @ thr_high: low threshold
+ * @ thr_high: high threshold
+ * @ prec_shift: precision shift
+ * @ adjust_a: A-coefficients for mapping curve
+ * @ adjust_b: B-coefficients for mapping curve
+ * @ adjust_c: C-coefficients for mapping curve
+ */
+struct sde_hw_scaler3_de_cfg {
+ u32 enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[SDE_MAX_DE_CURVES];
+ int16_t adjust_b[SDE_MAX_DE_CURVES];
+ int16_t adjust_c[SDE_MAX_DE_CURVES];
+};
+
+/**
+ * struct sde_hw_scaler3_cfg : QSEEDv3 configuration
+ * @enable: scaler enable
+ * @dir_en: direction detection block enable
+ * @ init_phase_x: horizontal initial phase
+ * @ phase_step_x: horizontal phase step
+ * @ init_phase_y: vertical initial phase
+ * @ phase_step_y: vertical phase step
+ * @ preload_x: horizontal preload value
+ * @ preload_y: vertical preload value
+ * @ src_width: source width
+ * @ src_height: source height
+ * @ dst_width: destination width
+ * @ dst_height: destination height
+ * @ y_rgb_filter_cfg: y/rgb plane filter configuration
+ * @ uv_filter_cfg: uv plane filter configuration
+ * @ alpha_filter_cfg: alpha filter configuration
+ * @ blend_cfg: blend coefficients configuration
+ * @ lut_flag: scaler LUT update flags
+ * 0x1 swap LUT bank
+ * 0x2 update 2D filter LUT
+ * 0x4 update y circular filter LUT
+ * 0x8 update uv circular filter LUT
+ * 0x10 update y separable filter LUT
+ * 0x20 update uv separable filter LUT
+ * @ dir_lut_idx: 2D filter LUT index
+ * @ y_rgb_cir_lut_idx: y circular filter LUT index
+ * @ uv_cir_lut_idx: uv circular filter LUT index
+ * @ y_rgb_sep_lut_idx: y circular filter LUT index
+ * @ uv_sep_lut_idx: uv separable filter LUT index
+ * @ dir_lut: pointer to 2D LUT
+ * @ cir_lut: pointer to circular filter LUT
+ * @ sep_lut: pointer to separable filter LUT
+ * @ de: detail enhancer configuration
+ */
+struct sde_hw_scaler3_cfg {
+ u32 enable;
+ u32 dir_en;
+ int32_t init_phase_x[SDE_MAX_PLANES];
+ int32_t phase_step_x[SDE_MAX_PLANES];
+ int32_t init_phase_y[SDE_MAX_PLANES];
+ int32_t phase_step_y[SDE_MAX_PLANES];
+
+ u32 preload_x[SDE_MAX_PLANES];
+ u32 preload_y[SDE_MAX_PLANES];
+ u32 src_width[SDE_MAX_PLANES];
+ u32 src_height[SDE_MAX_PLANES];
+
+ u32 dst_width;
+ u32 dst_height;
+
+ u32 y_rgb_filter_cfg;
+ u32 uv_filter_cfg;
+ u32 alpha_filter_cfg;
+ u32 blend_cfg;
+
+ u32 lut_flag;
+ u32 dir_lut_idx;
+
+ u32 y_rgb_cir_lut_idx;
+ u32 uv_cir_lut_idx;
+ u32 y_rgb_sep_lut_idx;
+ u32 uv_sep_lut_idx;
+ u32 *dir_lut;
+ size_t dir_len;
+ u32 *cir_lut;
+ size_t cir_len;
+ u32 *sep_lut;
+ size_t sep_len;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct sde_hw_scaler3_de_cfg de;
+};
+
+/**
+ * struct sde_hw_pipe_cfg : Pipe description
+ * @layout: format layout information for programming buffer to hardware
+ * @src_rect: src ROI, caller takes into account the different operations
+ * such as decimation, flip etc to program this field
+ * @dest_rect: destination ROI.
+ * @ horz_decimation : horizontal decimation factor( 0, 2, 4, 8, 16)
+ * @ vert_decimation : vertical decimation factor( 0, 2, 4, 8, 16)
+ * 2: Read 1 line/pixel drop 1 line/pixel
+ * 4: Read 1 line/pixel drop 3 lines/pixels
+ * 8: Read 1 line/pixel drop 7 lines/pixels
+ * 16: Read 1 line/pixel drop 15 line/pixels
+ */
+struct sde_hw_pipe_cfg {
+ struct sde_hw_fmt_layout layout;
+ struct sde_rect src_rect;
+ struct sde_rect dst_rect;
+ u8 horz_decimation;
+ u8 vert_decimation;
+};
+
+/**
+ * struct sde_hw_pipe_qos_cfg : Source pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @creq_vblank: creq value generated to vbif during vertical blanking
+ * @danger_vblank: danger value generated during vertical blanking
+ * @vblank_en: enable creq_vblank and danger_vblank during vblank
+ * @danger_safe_en: enable danger safe generation
+ */
+struct sde_hw_pipe_qos_cfg {
+ u32 danger_lut;
+ u32 safe_lut;
+ u32 creq_lut;
+ u32 creq_vblank;
+ u32 danger_vblank;
+ bool vblank_en;
+ bool danger_safe_en;
+};
+
+/**
+ * struct sde_hw_sspp_ops - interface to the SSPP Hw driver functions
+ * Caller must call the init function to get the pipe context for each pipe
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_sspp_ops {
+ /**
+ * setup_format - setup pixel format cropping rectangle, flip
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @flags: Extra flags for format config
+ */
+ void (*setup_format)(struct sde_hw_pipe *ctx,
+ const struct sde_format *fmt, u32 flags);
+
+ /**
+ * setup_rects - setup pipe ROI rectangles
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @pe_ext: Pointer to pixel ext settings
+ * @scale_cfg: Pointer to scaler settings
+ */
+ void (*setup_rects)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *cfg,
+ struct sde_hw_pixel_ext *pe_ext,
+ void *scale_cfg);
+
+ /**
+ * setup_sourceaddress - setup pipe source addresses
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ */
+ void (*setup_sourceaddress)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *cfg);
+
+ /**
+ * setup_csc - setup color space coversion
+ * @ctx: Pointer to pipe context
+ * @data: Pointer to config structure
+ */
+ void (*setup_csc)(struct sde_hw_pipe *ctx, struct sde_csc_cfg *data);
+
+ /**
+ * setup_solidfill - enable/disable colorfill
+ * @ctx: Pointer to pipe context
+ * @const_color: Fill color value
+ * @flags: Pipe flags
+ */
+ void (*setup_solidfill)(struct sde_hw_pipe *ctx, u32 color);
+
+ /**
+ * setup_sharpening - setup sharpening
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to config structure
+ */
+ void (*setup_sharpening)(struct sde_hw_pipe *ctx,
+ struct sde_hw_sharp_cfg *cfg);
+
+
+ /**
+ * setup_pa_hue(): Setup source hue adjustment
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to hue data
+ */
+ void (*setup_pa_hue)(struct sde_hw_pipe *ctx, void *cfg);
+
+ /**
+ * setup_pa_sat(): Setup source saturation adjustment
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to saturation data
+ */
+ void (*setup_pa_sat)(struct sde_hw_pipe *ctx, void *cfg);
+
+ /**
+ * setup_pa_val(): Setup source value adjustment
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to value data
+ */
+ void (*setup_pa_val)(struct sde_hw_pipe *ctx, void *cfg);
+
+ /**
+ * setup_pa_cont(): Setup source contrast adjustment
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer contrast data
+ */
+ void (*setup_pa_cont)(struct sde_hw_pipe *ctx, void *cfg);
+
+ /**
+ * setup_pa_memcolor - setup source color processing
+ * @ctx: Pointer to pipe context
+ * @type: Memcolor type (Skin, sky or foliage)
+ * @cfg: Pointer to memory color config data
+ */
+ void (*setup_pa_memcolor)(struct sde_hw_pipe *ctx,
+ enum sde_memcolor_type type, void *cfg);
+
+ /**
+ * setup_igc - setup inverse gamma correction
+ * @ctx: Pointer to pipe context
+ */
+ void (*setup_igc)(struct sde_hw_pipe *ctx);
+
+ /**
+ * setup_danger_safe_lut - setup danger safe LUTs
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_danger_safe_lut)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_creq_lut - setup CREQ LUT
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_creq_lut)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_qos_ctrl - setup QoS control
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_qos_ctrl)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_histogram - setup histograms
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to histogram configuration
+ */
+ void (*setup_histogram)(struct sde_hw_pipe *ctx,
+ void *cfg);
+
+ /**
+ * setup_scaler - setup scaler
+ * @ctx: Pointer to pipe context
+ * @pipe_cfg: Pointer to pipe configuration
+ * @pe_cfg: Pointer to pixel extension configuration
+ * @scaler_cfg: Pointer to scaler configuration
+ */
+ void (*setup_scaler)(struct sde_hw_pipe *ctx,
+ struct sde_hw_pipe_cfg *pipe_cfg,
+ struct sde_hw_pixel_ext *pe_cfg,
+ void *scaler_cfg);
+};
+
+/**
+ * struct sde_hw_pipe - pipe description
+ * @base_off: mdp register mapped offset
+ * @blk_off: pipe offset relative to mdss offset
+ * @length length of register block offset
+ * @hwversion mdss hw version number
+ * @idx: pipe index
+ * @type : pipe type, VIG/DMA/RGB/CURSOR, certain operations are not
+ * supported for each pipe type
+ * @pipe_hw_cap: pointer to layer_cfg
+ * @highest_bank_bit:
+ * @ops: pointer to operations possible for this pipe
+ */
+struct sde_hw_pipe {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* Pipe */
+ enum sde_sspp idx;
+ const struct sde_sspp_cfg *cap;
+ u32 highest_bank_bit;
+
+ /* Ops */
+ struct sde_hw_sspp_ops ops;
+};
+
+/**
+ * sde_hw_sspp_init - initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @idx: Pipe index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @catalog : Pointer to mdss catalog data
+ */
+struct sde_hw_pipe *sde_hw_sspp_init(enum sde_sspp idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *catalog);
+
+/**
+ * sde_hw_sspp_destroy(): Destroys SSPP driver context
+ * should be called during Hw pipe cleanup.
+ * @ctx: Pointer to SSPP driver context returned by sde_hw_sspp_init
+ */
+void sde_hw_sspp_destroy(struct sde_hw_pipe *ctx);
+
+#endif /*_SDE_HW_SSPP_H */
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.c b/drivers/gpu/drm/msm/sde/sde_hw_top.c
new file mode 100644
index 000000000000..218797e623a2
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.c
@@ -0,0 +1,275 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_top.h"
+#include "sde_dbg.h"
+
+#define SSPP_SPARE 0x28
+
+#define FLD_SPLIT_DISPLAY_CMD BIT(1)
+#define FLD_SMART_PANEL_FREE_RUN BIT(2)
+#define FLD_INTF_1_SW_TRG_MUX BIT(4)
+#define FLD_INTF_2_SW_TRG_MUX BIT(8)
+#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
+
+#define DANGER_STATUS 0x360
+#define SAFE_STATUS 0x364
+
+#define TE_LINE_INTERVAL 0x3F4
+
+#define TRAFFIC_SHAPER_EN BIT(31)
+#define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
+#define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
+#define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
+
+static void sde_hw_setup_split_pipe(struct sde_hw_mdp *mdp,
+ struct split_pipe_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &mdp->hw;
+ u32 upper_pipe = 0;
+ u32 lower_pipe = 0;
+
+ if (!mdp || !cfg)
+ return;
+
+ /* The SPLIT registers are only for DSI interfaces */
+ if ((cfg->intf != INTF_1) && (cfg->intf != INTF_2))
+ return;
+
+ if (cfg->en) {
+ if (cfg->mode == INTF_MODE_CMD) {
+ lower_pipe = FLD_SPLIT_DISPLAY_CMD;
+ /* interface controlling sw trigger */
+ if (cfg->intf == INTF_2)
+ lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
+ else
+ lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
+
+ /* free run */
+ if (cfg->pp_split_slave != INTF_MAX)
+ lower_pipe = FLD_SMART_PANEL_FREE_RUN;
+
+ upper_pipe = lower_pipe;
+ } else {
+ if (cfg->intf == INTF_2) {
+ lower_pipe = FLD_INTF_1_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_2_SW_TRG_MUX;
+ } else {
+ lower_pipe = FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_1_SW_TRG_MUX;
+ }
+ }
+ }
+
+ SDE_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
+ SDE_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
+ SDE_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
+ SDE_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
+}
+
+static void sde_hw_setup_pp_split(struct sde_hw_mdp *mdp,
+ struct split_pipe_cfg *cfg)
+{
+ u32 ppb_config = 0x0;
+ u32 ppb_control = 0x0;
+
+ if (!mdp || !cfg)
+ return;
+
+ if (cfg->en && cfg->pp_split_slave != INTF_MAX) {
+ ppb_config |= (cfg->pp_split_slave - INTF_0 + 1) << 20;
+ ppb_config |= BIT(16); /* split enable */
+ ppb_control = BIT(5); /* horz split*/
+ }
+ if (cfg->pp_split_index) {
+ SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, 0x0);
+ SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, 0x0);
+ SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, ppb_config);
+ SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, ppb_control);
+ } else {
+ SDE_REG_WRITE(&mdp->hw, PPB0_CONFIG, ppb_config);
+ SDE_REG_WRITE(&mdp->hw, PPB0_CNTL, ppb_control);
+ SDE_REG_WRITE(&mdp->hw, PPB1_CONFIG, 0x0);
+ SDE_REG_WRITE(&mdp->hw, PPB1_CNTL, 0x0);
+ }
+}
+
+static void sde_hw_setup_cdm_output(struct sde_hw_mdp *mdp,
+ struct cdm_output_cfg *cfg)
+{
+ struct sde_hw_blk_reg_map *c = &mdp->hw;
+ u32 out_ctl = 0;
+
+ if (cfg->wb_en)
+ out_ctl |= BIT(24);
+ else if (cfg->intf_en)
+ out_ctl |= BIT(19);
+
+ SDE_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
+}
+
+static bool sde_hw_setup_clk_force_ctrl(struct sde_hw_mdp *mdp,
+ enum sde_clk_ctrl_type clk_ctrl, bool enable)
+{
+ struct sde_hw_blk_reg_map *c = &mdp->hw;
+ u32 reg_off, bit_off;
+ u32 reg_val, new_val;
+ bool clk_forced_on;
+
+ if (clk_ctrl <= SDE_CLK_CTRL_NONE || clk_ctrl >= SDE_CLK_CTRL_MAX)
+ return false;
+
+ reg_off = mdp->cap->clk_ctrls[clk_ctrl].reg_off;
+ bit_off = mdp->cap->clk_ctrls[clk_ctrl].bit_off;
+
+ reg_val = SDE_REG_READ(c, reg_off);
+
+ if (enable)
+ new_val = reg_val | BIT(bit_off);
+ else
+ new_val = reg_val & ~BIT(bit_off);
+
+ SDE_REG_WRITE(c, reg_off, new_val);
+
+ clk_forced_on = !(reg_val & BIT(bit_off));
+
+ return clk_forced_on;
+}
+
+
+static void sde_hw_get_danger_status(struct sde_hw_mdp *mdp,
+ struct sde_danger_safe_status *status)
+{
+ struct sde_hw_blk_reg_map *c = &mdp->hw;
+ u32 value;
+
+ value = SDE_REG_READ(c, DANGER_STATUS);
+ status->mdp = (value >> 0) & 0x3;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
+ status->wb[WB_0] = 0;
+ status->wb[WB_1] = 0;
+ status->wb[WB_2] = (value >> 2) & 0x3;
+ status->wb[WB_3] = 0;
+}
+
+static void sde_hw_get_safe_status(struct sde_hw_mdp *mdp,
+ struct sde_danger_safe_status *status)
+{
+ struct sde_hw_blk_reg_map *c = &mdp->hw;
+ u32 value;
+
+ value = SDE_REG_READ(c, SAFE_STATUS);
+ status->mdp = (value >> 0) & 0x1;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
+ status->wb[WB_0] = 0;
+ status->wb[WB_1] = 0;
+ status->wb[WB_2] = (value >> 2) & 0x1;
+ status->wb[WB_3] = 0;
+}
+
+static void _setup_mdp_ops(struct sde_hw_mdp_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_split_pipe = sde_hw_setup_split_pipe;
+ ops->setup_pp_split = sde_hw_setup_pp_split;
+ ops->setup_cdm_output = sde_hw_setup_cdm_output;
+ ops->setup_clk_force_ctrl = sde_hw_setup_clk_force_ctrl;
+ ops->get_danger_status = sde_hw_get_danger_status;
+ ops->get_safe_status = sde_hw_get_safe_status;
+}
+
+static const struct sde_mdp_cfg *_top_offset(enum sde_mdp mdp,
+ const struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->mdp_count; i++) {
+ if (mdp == m->mdp[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->mdp[i].base;
+ b->length = m->mdp[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_TOP;
+ return &m->mdp[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
+ void __iomem *addr,
+ const struct sde_mdss_cfg *m)
+{
+ struct sde_hw_mdp *mdp;
+ const struct sde_mdp_cfg *cfg;
+
+ mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+ if (!mdp)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &mdp->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(mdp);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ mdp->idx = idx;
+ mdp->cap = cfg;
+ _setup_mdp_ops(&mdp->ops, mdp->cap->features);
+
+ sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name,
+ mdp->hw.blk_off, mdp->hw.blk_off + mdp->hw.length,
+ mdp->hw.xin_id);
+ sde_dbg_set_sde_top_offset(mdp->hw.blk_off);
+
+ return mdp;
+}
+
+void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp)
+{
+ kfree(mdp);
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_top.h b/drivers/gpu/drm/msm/sde/sde_hw_top.h
new file mode 100644
index 000000000000..780d051e7408
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_top.h
@@ -0,0 +1,170 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_TOP_H
+#define _SDE_HW_TOP_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_mdp;
+
+/**
+ * struct traffic_shaper_cfg: traffic shaper configuration
+ * @en : enable/disable traffic shaper
+ * @rd_client : true if read client; false if write client
+ * @client_id : client identifier
+ * @bpc_denom : denominator of byte per clk
+ * @bpc_numer : numerator of byte per clk
+ */
+struct traffic_shaper_cfg {
+ bool en;
+ bool rd_client;
+ u32 client_id;
+ u32 bpc_denom;
+ u64 bpc_numer;
+};
+
+/**
+ * struct split_pipe_cfg - pipe configuration for dual display panels
+ * @en : Enable/disable dual pipe confguration
+ * @mode : Panel interface mode
+ * @intf : Interface id for main control path
+ * @pp_split_slave: Slave interface for ping pong split, INTF_MAX to disable
+ * @pp_split_idx: Ping pong index for ping pong split
+ * @split_flush_en: Allows both the paths to be flushed when master path is
+ * flushed
+ */
+struct split_pipe_cfg {
+ bool en;
+ enum sde_intf_mode mode;
+ enum sde_intf intf;
+ enum sde_intf pp_split_slave;
+ u32 pp_split_index;
+ bool split_flush_en;
+};
+
+/**
+ * struct cdm_output_cfg: output configuration for cdm
+ * @wb_en : enable/disable writeback output
+ * @intf_en : enable/disable interface output
+ */
+struct cdm_output_cfg {
+ bool wb_en;
+ bool intf_en;
+};
+
+/**
+ * struct sde_danger_safe_status: danger and safe status signals
+ * @mdp: top level status
+ * @sspp: source pipe status
+ * @wb: writebck output status
+ */
+struct sde_danger_safe_status {
+ u8 mdp;
+ u8 sspp[SSPP_MAX];
+ u8 wb[WB_MAX];
+};
+
+/**
+ * struct sde_hw_mdp_ops - interface to the MDP TOP Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled.
+ * @setup_split_pipe : Programs the pipe control registers
+ * @setup_pp_split : Programs the pp split control registers
+ * @setup_cdm_output : programs cdm control
+ * @setup_traffic_shaper : programs traffic shaper control
+ */
+struct sde_hw_mdp_ops {
+ /** setup_split_pipe() : Regsiters are not double buffered, thisk
+ * function should be called before timing control enable
+ * @mdp : mdp top context driver
+ * @cfg : upper and lower part of pipe configuration
+ */
+ void (*setup_split_pipe)(struct sde_hw_mdp *mdp,
+ struct split_pipe_cfg *p);
+
+ /** setup_pp_split() : Configure pp split related registers
+ * @mdp : mdp top context driver
+ * @cfg : upper and lower part of pipe configuration
+ */
+ void (*setup_pp_split)(struct sde_hw_mdp *mdp,
+ struct split_pipe_cfg *cfg);
+
+ /**
+ * setup_cdm_output() : Setup selection control of the cdm data path
+ * @mdp : mdp top context driver
+ * @cfg : cdm output configuration
+ */
+ void (*setup_cdm_output)(struct sde_hw_mdp *mdp,
+ struct cdm_output_cfg *cfg);
+
+ /**
+ * setup_traffic_shaper() : Setup traffic shaper control
+ * @mdp : mdp top context driver
+ * @cfg : traffic shaper configuration
+ */
+ void (*setup_traffic_shaper)(struct sde_hw_mdp *mdp,
+ struct traffic_shaper_cfg *cfg);
+
+ /**
+ * setup_clk_force_ctrl - set clock force control
+ * @mdp: mdp top context driver
+ * @clk_ctrl: clock to be controlled
+ * @enable: force on enable
+ * @return: if the clock is forced-on by this function
+ */
+ bool (*setup_clk_force_ctrl)(struct sde_hw_mdp *mdp,
+ enum sde_clk_ctrl_type clk_ctrl, bool enable);
+
+ /**
+ * get_danger_status - get danger status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_danger_status)(struct sde_hw_mdp *mdp,
+ struct sde_danger_safe_status *status);
+
+ /**
+ * get_safe_status - get safe status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_safe_status)(struct sde_hw_mdp *mdp,
+ struct sde_danger_safe_status *status);
+};
+
+struct sde_hw_mdp {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* intf */
+ enum sde_mdp idx;
+ const struct sde_mdp_cfg *cap;
+
+ /* ops */
+ struct sde_hw_mdp_ops ops;
+};
+
+/**
+ * sde_hw_intf_init - initializes the intf driver for the passed interface idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ */
+struct sde_hw_mdp *sde_hw_mdptop_init(enum sde_mdp idx,
+ void __iomem *addr,
+ const struct sde_mdss_cfg *m);
+
+void sde_hw_mdp_destroy(struct sde_hw_mdp *mdp);
+
+#endif /*_SDE_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.c b/drivers/gpu/drm/msm/sde/sde_hw_util.c
new file mode 100644
index 000000000000..b899f0c2f71c
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_util.c
@@ -0,0 +1,93 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "msm_drv.h"
+#include "sde_kms.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+
+/* using a file static variables for debugfs access */
+static u32 sde_hw_util_log_mask = SDE_DBG_MASK_NONE;
+
+void sde_reg_write(struct sde_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name)
+{
+ /* don't need to mutex protect this */
+ if (c->log_mask & sde_hw_util_log_mask)
+ SDE_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
+ name, c->blk_off + reg_off, val);
+ writel_relaxed(val, c->base_off + c->blk_off + reg_off);
+}
+
+int sde_reg_read(struct sde_hw_blk_reg_map *c, u32 reg_off)
+{
+ return readl_relaxed(c->base_off + c->blk_off + reg_off);
+}
+
+u32 *sde_hw_util_get_log_mask_ptr(void)
+{
+ return &sde_hw_util_log_mask;
+}
+
+void sde_hw_csc_setup(struct sde_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct sde_csc_cfg *data, bool csc10)
+{
+ static const u32 matrix_shift = 7;
+ u32 clamp_shift = csc10 ? 16 : 8;
+ u32 val;
+
+ /* matrix coeff - convert S15.16 to S4.9 */
+ val = ((data->csc_mv[0] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[1] >> matrix_shift) & 0x1FFF) << 16);
+ SDE_REG_WRITE(c, csc_reg_off, val);
+ val = ((data->csc_mv[2] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[3] >> matrix_shift) & 0x1FFF) << 16);
+ SDE_REG_WRITE(c, csc_reg_off + 0x4, val);
+ val = ((data->csc_mv[4] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[5] >> matrix_shift) & 0x1FFF) << 16);
+ SDE_REG_WRITE(c, csc_reg_off + 0x8, val);
+ val = ((data->csc_mv[6] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[7] >> matrix_shift) & 0x1FFF) << 16);
+ SDE_REG_WRITE(c, csc_reg_off + 0xc, val);
+ val = (data->csc_mv[8] >> matrix_shift) & 0x1FFF;
+ SDE_REG_WRITE(c, csc_reg_off + 0x10, val);
+
+ /* Pre clamp */
+ val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1];
+ SDE_REG_WRITE(c, csc_reg_off + 0x14, val);
+ val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3];
+ SDE_REG_WRITE(c, csc_reg_off + 0x18, val);
+ val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5];
+ SDE_REG_WRITE(c, csc_reg_off + 0x1c, val);
+
+ /* Post clamp */
+ val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1];
+ SDE_REG_WRITE(c, csc_reg_off + 0x20, val);
+ val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3];
+ SDE_REG_WRITE(c, csc_reg_off + 0x24, val);
+ val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5];
+ SDE_REG_WRITE(c, csc_reg_off + 0x28, val);
+
+ /* Pre-Bias */
+ SDE_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
+
+ /* Post-Bias */
+ SDE_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
+ SDE_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
+}
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_util.h b/drivers/gpu/drm/msm/sde/sde_hw_util.h
new file mode 100644
index 000000000000..008b657966b6
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_util.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_UTIL_H
+#define _SDE_HW_UTIL_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "sde_hw_mdss.h"
+
+/*
+ * This is the common struct maintained by each sub block
+ * for mapping the register offsets in this block to the
+ * absoulute IO address
+ * @base_off: mdp register mapped offset
+ * @blk_off: pipe offset relative to mdss offset
+ * @length length of register block offset
+ * @xin_id xin id
+ * @hwversion mdss hw version number
+ */
+struct sde_hw_blk_reg_map {
+ void __iomem *base_off;
+ u32 blk_off;
+ u32 length;
+ u32 xin_id;
+ u32 hwversion;
+ u32 log_mask;
+};
+
+u32 *sde_hw_util_get_log_mask_ptr(void);
+
+void sde_reg_write(struct sde_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name);
+int sde_reg_read(struct sde_hw_blk_reg_map *c, u32 reg_off);
+
+#define SDE_REG_WRITE(c, off, val) sde_reg_write(c, off, val, #off)
+#define SDE_REG_READ(c, off) sde_reg_read(c, off)
+
+void *sde_hw_util_get_dir(void);
+
+void sde_hw_csc_setup(struct sde_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct sde_csc_cfg *data, bool csc10);
+
+#endif /* _SDE_HW_UTIL_H */
+
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.c b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
new file mode 100644
index 000000000000..048ec47d7c72
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.c
@@ -0,0 +1,169 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_vbif.h"
+#include "sde_dbg.h"
+
+#define VBIF_VERSION 0x0000
+#define VBIF_CLK_FORCE_CTRL0 0x0008
+#define VBIF_CLK_FORCE_CTRL1 0x000C
+#define VBIF_QOS_REMAP_00 0x0020
+#define VBIF_QOS_REMAP_01 0x0024
+#define VBIF_QOS_REMAP_10 0x0028
+#define VBIF_QOS_REMAP_11 0x002C
+#define VBIF_WRITE_GATHTER_EN 0x00AC
+#define VBIF_IN_RD_LIM_CONF0 0x00B0
+#define VBIF_IN_RD_LIM_CONF1 0x00B4
+#define VBIF_IN_RD_LIM_CONF2 0x00B8
+#define VBIF_IN_WR_LIM_CONF0 0x00C0
+#define VBIF_IN_WR_LIM_CONF1 0x00C4
+#define VBIF_IN_WR_LIM_CONF2 0x00C8
+#define VBIF_OUT_RD_LIM_CONF0 0x00D0
+#define VBIF_OUT_WR_LIM_CONF0 0x00D4
+#define VBIF_XIN_HALT_CTRL0 0x0200
+#define VBIF_XIN_HALT_CTRL1 0x0204
+
+static void sde_hw_set_limit_conf(struct sde_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit)
+{
+ struct sde_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = SDE_REG_READ(c, reg_off);
+ reg_val &= ~(0xFF << bit_off);
+ reg_val |= (limit) << bit_off;
+ SDE_REG_WRITE(c, reg_off, reg_val);
+}
+
+static u32 sde_hw_get_limit_conf(struct sde_hw_vbif *vbif,
+ u32 xin_id, bool rd)
+{
+ struct sde_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+ u32 limit;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = SDE_REG_READ(c, reg_off);
+ limit = (reg_val >> bit_off) & 0xFF;
+
+ return limit;
+}
+
+static void sde_hw_set_halt_ctrl(struct sde_hw_vbif *vbif,
+ u32 xin_id, bool enable)
+{
+ struct sde_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL0);
+
+ if (enable)
+ reg_val |= BIT(xin_id);
+ else
+ reg_val &= ~BIT(xin_id);
+
+ SDE_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
+}
+
+static bool sde_hw_get_halt_ctrl(struct sde_hw_vbif *vbif,
+ u32 xin_id)
+{
+ struct sde_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = SDE_REG_READ(c, VBIF_XIN_HALT_CTRL1);
+
+ return (reg_val & BIT(xin_id)) ? true : false;
+}
+
+static void _setup_vbif_ops(struct sde_hw_vbif_ops *ops,
+ unsigned long cap)
+{
+ ops->set_limit_conf = sde_hw_set_limit_conf;
+ ops->get_limit_conf = sde_hw_get_limit_conf;
+ ops->set_halt_ctrl = sde_hw_set_halt_ctrl;
+ ops->get_halt_ctrl = sde_hw_get_halt_ctrl;
+}
+
+static const struct sde_vbif_cfg *_top_offset(enum sde_vbif vbif,
+ const struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->vbif_count; i++) {
+ if (vbif == m->vbif[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->vbif[i].base;
+ b->length = m->vbif[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_VBIF;
+ return &m->vbif[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct sde_hw_vbif *sde_hw_vbif_init(enum sde_vbif idx,
+ void __iomem *addr,
+ const struct sde_mdss_cfg *m)
+{
+ struct sde_hw_vbif *c;
+ const struct sde_vbif_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_vbif_ops(&c->ops, c->cap->features);
+
+ /* no need to register sub-range in sde dbg, dump entire vbif io base */
+
+ return c;
+}
+
+void sde_hw_vbif_destroy(struct sde_hw_vbif *vbif)
+{
+ kfree(vbif);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_vbif.h b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
new file mode 100644
index 000000000000..de7fac0ed8f2
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_vbif.h
@@ -0,0 +1,90 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_VBIF_H
+#define _SDE_HW_VBIF_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_vbif;
+
+/**
+ * struct sde_hw_vbif_ops : Interface to the VBIF hardware driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_vbif_ops {
+ /**
+ * set_limit_conf - set transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @limit: outstanding transaction limit
+ */
+ void (*set_limit_conf)(struct sde_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit);
+
+ /**
+ * get_limit_conf - get transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @return: outstanding transaction limit
+ */
+ u32 (*get_limit_conf)(struct sde_hw_vbif *vbif,
+ u32 xin_id, bool rd);
+
+ /**
+ * set_halt_ctrl - set halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @enable: halt control enable
+ */
+ void (*set_halt_ctrl)(struct sde_hw_vbif *vbif,
+ u32 xin_id, bool enable);
+
+ /**
+ * get_halt_ctrl - get halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @return: halt control enable
+ */
+ bool (*get_halt_ctrl)(struct sde_hw_vbif *vbif,
+ u32 xin_id);
+};
+
+struct sde_hw_vbif {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* vbif */
+ enum sde_vbif idx;
+ const struct sde_vbif_cfg *cap;
+
+ /* ops */
+ struct sde_hw_vbif_ops ops;
+};
+
+/**
+ * sde_hw_vbif_init - initializes the vbif driver for the passed interface idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ * @m: Pointer to mdss catalog data
+ */
+struct sde_hw_vbif *sde_hw_vbif_init(enum sde_vbif idx,
+ void __iomem *addr,
+ const struct sde_mdss_cfg *m);
+
+void sde_hw_vbif_destroy(struct sde_hw_vbif *vbif);
+
+#endif /*_SDE_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.c b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
new file mode 100644
index 000000000000..320b05f67669
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.c
@@ -0,0 +1,229 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "sde_hw_mdss.h"
+#include "sde_hwio.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_wb.h"
+#include "sde_formats.h"
+#include "sde_dbg.h"
+
+#define WB_DST_FORMAT 0x000
+#define WB_DST_OP_MODE 0x004
+#define WB_DST_PACK_PATTERN 0x008
+#define WB_DST0_ADDR 0x00C
+#define WB_DST1_ADDR 0x010
+#define WB_DST2_ADDR 0x014
+#define WB_DST3_ADDR 0x018
+#define WB_DST_YSTRIDE0 0x01C
+#define WB_DST_YSTRIDE1 0x020
+#define WB_DST_YSTRIDE1 0x020
+#define WB_DST_DITHER_BITDEPTH 0x024
+#define WB_DST_MATRIX_ROW0 0x030
+#define WB_DST_MATRIX_ROW1 0x034
+#define WB_DST_MATRIX_ROW2 0x038
+#define WB_DST_MATRIX_ROW3 0x03C
+#define WB_DST_WRITE_CONFIG 0x048
+#define WB_ROTATION_DNSCALER 0x050
+#define WB_ROTATOR_PIPE_DOWNSCALER 0x054
+#define WB_N16_INIT_PHASE_X_C03 0x060
+#define WB_N16_INIT_PHASE_X_C12 0x064
+#define WB_N16_INIT_PHASE_Y_C03 0x068
+#define WB_N16_INIT_PHASE_Y_C12 0x06C
+#define WB_OUT_SIZE 0x074
+#define WB_ALPHA_X_VALUE 0x078
+#define WB_CSC_BASE 0x260
+#define WB_DST_ADDR_SW_STATUS 0x2B0
+#define WB_CDP_CTRL 0x2B4
+#define WB_OUT_IMAGE_SIZE 0x2C0
+#define WB_OUT_XY 0x2C4
+
+static struct sde_wb_cfg *_wb_offset(enum sde_wb wb,
+ struct sde_mdss_cfg *m,
+ void __iomem *addr,
+ struct sde_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->wb_count; i++) {
+ if (wb == m->wb[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->wb[i].base;
+ b->length = m->wb[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = SDE_DBG_MASK_WB;
+ return &m->wb[i];
+ }
+ }
+ return ERR_PTR(-EINVAL);
+}
+
+static void sde_hw_wb_setup_outaddress(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *data)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+
+ SDE_REG_WRITE(c, WB_DST0_ADDR, data->dest.plane_addr[0]);
+ SDE_REG_WRITE(c, WB_DST1_ADDR, data->dest.plane_addr[1]);
+ SDE_REG_WRITE(c, WB_DST2_ADDR, data->dest.plane_addr[2]);
+ SDE_REG_WRITE(c, WB_DST3_ADDR, data->dest.plane_addr[3]);
+}
+
+static void sde_hw_wb_setup_format(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *data)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ const struct sde_format *fmt = data->dest.format;
+ u32 dst_format, pattern, ystride0, ystride1, outsize, chroma_samp;
+ u32 write_config = 0;
+ u32 opmode = 0;
+ u32 dst_addr_sw = 0;
+ u32 cdp_settings = 0x0;
+
+ chroma_samp = fmt->chroma_sample;
+
+ dst_format = (chroma_samp << 23) |
+ (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C0_G_Y] << 0);
+
+ if (fmt->bits[C3_ALPHA] || fmt->alpha_enable) {
+ dst_format |= BIT(8); /* DSTC3_EN */
+ if (!fmt->alpha_enable ||
+ !(ctx->caps->features & BIT(SDE_WB_PIPE_ALPHA)))
+ dst_format |= BIT(14); /* DST_ALPHA_X */
+ }
+
+ if (SDE_FORMAT_IS_YUV(fmt) &&
+ (ctx->caps->features & BIT(SDE_WB_YUV_CONFIG)))
+ dst_format |= BIT(15);
+
+ if (SDE_FORMAT_IS_DX(fmt))
+ dst_format |= BIT(21);
+
+ pattern = (fmt->element[3] << 24) |
+ (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) |
+ (fmt->element[0] << 0);
+
+ dst_format |= (fmt->unpack_align_msb << 18) |
+ (fmt->unpack_tight << 17) |
+ ((fmt->unpack_count - 1) << 12) |
+ ((fmt->bpp - 1) << 9);
+
+ ystride0 = data->dest.plane_pitch[0] |
+ (data->dest.plane_pitch[1] << 16);
+ ystride1 = data->dest.plane_pitch[2] |
+ (data->dest.plane_pitch[3] << 16);
+
+ if (data->roi.h && data->roi.w)
+ outsize = (data->roi.h << 16) | data->roi.w;
+ else
+ outsize = (data->dest.height << 16) | data->dest.width;
+
+ if (SDE_FORMAT_IS_UBWC(fmt)) {
+ opmode |= BIT(0);
+ dst_format |= BIT(31);
+ if (ctx->highest_bank_bit)
+ write_config |= (ctx->highest_bank_bit << 8);
+ if (fmt->base.pixel_format == DRM_FORMAT_RGB565)
+ write_config |= 0x8;
+ }
+
+ if (data->is_secure)
+ dst_addr_sw |= BIT(0);
+
+ SDE_REG_WRITE(c, WB_ALPHA_X_VALUE, 0xFF);
+ SDE_REG_WRITE(c, WB_DST_FORMAT, dst_format);
+ SDE_REG_WRITE(c, WB_DST_OP_MODE, opmode);
+ SDE_REG_WRITE(c, WB_DST_PACK_PATTERN, pattern);
+ SDE_REG_WRITE(c, WB_DST_YSTRIDE0, ystride0);
+ SDE_REG_WRITE(c, WB_DST_YSTRIDE1, ystride1);
+ SDE_REG_WRITE(c, WB_OUT_SIZE, outsize);
+ SDE_REG_WRITE(c, WB_DST_WRITE_CONFIG, write_config);
+ SDE_REG_WRITE(c, WB_DST_ADDR_SW_STATUS, dst_addr_sw);
+
+ /* Enable CDP */
+ cdp_settings = BIT(0);
+
+ if (!SDE_FORMAT_IS_LINEAR(fmt))
+ cdp_settings |= BIT(1);
+
+ /* Enable 64 transactions if line mode*/
+ if (data->intf_mode == INTF_MODE_WB_LINE)
+ cdp_settings |= BIT(3);
+
+ SDE_REG_WRITE(c, WB_CDP_CTRL, cdp_settings);
+}
+
+static void sde_hw_wb_roi(struct sde_hw_wb *ctx, struct sde_hw_wb_cfg *wb)
+{
+ struct sde_hw_blk_reg_map *c = &ctx->hw;
+ u32 image_size, out_size, out_xy;
+
+ image_size = (wb->dest.height << 16) | wb->dest.width;
+ out_xy = (wb->roi.y << 16) | wb->roi.x;
+ out_size = (wb->roi.h << 16) | wb->roi.w;
+
+ SDE_REG_WRITE(c, WB_OUT_IMAGE_SIZE, image_size);
+ SDE_REG_WRITE(c, WB_OUT_XY, out_xy);
+ SDE_REG_WRITE(c, WB_OUT_SIZE, out_size);
+}
+
+static void _setup_wb_ops(struct sde_hw_wb_ops *ops,
+ unsigned long features)
+{
+ ops->setup_outaddress = sde_hw_wb_setup_outaddress;
+ ops->setup_outformat = sde_hw_wb_setup_format;
+
+ if (test_bit(SDE_WB_XY_ROI_OFFSET, &features))
+ ops->setup_roi = sde_hw_wb_roi;
+}
+
+struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m,
+ struct sde_hw_mdp *hw_mdp)
+{
+ struct sde_hw_wb *c;
+ struct sde_wb_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _wb_offset(idx, m, addr, &c->hw);
+ if (IS_ERR(cfg)) {
+ WARN(1, "Unable to find wb idx=%d\n", idx);
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_wb_ops(&c->ops, c->caps->features);
+ c->highest_bank_bit = m->mdp[0].highest_bank_bit;
+ c->hw_mdp = hw_mdp;
+
+ sde_dbg_reg_register_dump_range(SDE_DBG_NAME, cfg->name, c->hw.blk_off,
+ c->hw.blk_off + c->hw.length, c->hw.xin_id);
+
+ return c;
+}
+
+void sde_hw_wb_destroy(struct sde_hw_wb *hw_wb)
+{
+ kfree(hw_wb);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_hw_wb.h b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
new file mode 100644
index 000000000000..52a5ee5b06a5
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hw_wb.h
@@ -0,0 +1,105 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HW_WB_H
+#define _SDE_HW_WB_H
+
+#include "sde_hw_catalog.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_top.h"
+#include "sde_hw_util.h"
+
+struct sde_hw_wb;
+
+struct sde_hw_wb_cfg {
+ struct sde_hw_fmt_layout dest;
+ enum sde_intf_mode intf_mode;
+ struct traffic_shaper_cfg ts_cfg;
+ struct sde_rect roi;
+ bool is_secure;
+};
+
+/**
+ *
+ * struct sde_hw_wb_ops : Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct sde_hw_wb_ops {
+ void (*setup_csc_data)(struct sde_hw_wb *ctx,
+ struct sde_csc_cfg *data);
+
+ void (*setup_outaddress)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_outformat)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_rotator)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_dither)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_cdwn)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_trafficshaper)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+
+ void (*setup_roi)(struct sde_hw_wb *ctx,
+ struct sde_hw_wb_cfg *wb);
+};
+
+/**
+ * struct sde_hw_wb : WB driver object
+ * @struct sde_hw_blk_reg_map *hw;
+ * @idx
+ * @wb_hw_caps
+ * @ops
+ * @highest_bank_bit: GPU highest memory bank bit used
+ * @hw_mdp: MDP top level hardware block
+ */
+struct sde_hw_wb {
+ /* base */
+ struct sde_hw_blk_reg_map hw;
+
+ /* wb path */
+ int idx;
+ const struct sde_wb_cfg *caps;
+
+ /* ops */
+ struct sde_hw_wb_ops ops;
+
+ u32 highest_bank_bit;
+
+ struct sde_hw_mdp *hw_mdp;
+};
+
+/**
+ * sde_hw_wb_init(): Initializes and return writeback hw driver object.
+ * @idx: wb_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ * @hw_mdp: pointer to mdp top hw driver object
+ */
+struct sde_hw_wb *sde_hw_wb_init(enum sde_wb idx,
+ void __iomem *addr,
+ struct sde_mdss_cfg *m,
+ struct sde_hw_mdp *hw_mdp);
+
+/**
+ * sde_hw_wb_destroy(): Destroy writeback hw driver object.
+ * @hw_wb: Pointer to writeback hw driver object
+ */
+void sde_hw_wb_destroy(struct sde_hw_wb *hw_wb);
+
+#endif /*_SDE_HW_WB_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_hwio.h b/drivers/gpu/drm/msm/sde/sde_hwio.h
new file mode 100644
index 000000000000..c95bace3a004
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_hwio.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _SDE_HWIO_H
+#define _SDE_HWIO_H
+
+#include "sde_hw_util.h"
+
+/**
+ * MDP TOP block Register and bit fields and defines
+ */
+#define DISP_INTF_SEL 0x004
+#define INTR_EN 0x010
+#define INTR_STATUS 0x014
+#define INTR_CLEAR 0x018
+#define INTR2_EN 0x008
+#define INTR2_STATUS 0x00c
+#define INTR2_CLEAR 0x02c
+#define HIST_INTR_EN 0x01c
+#define HIST_INTR_STATUS 0x020
+#define HIST_INTR_CLEAR 0x024
+#define INTF_INTR_EN 0x1C0
+#define INTF_INTR_STATUS 0x1C4
+#define INTF_INTR_CLEAR 0x1C8
+#define SPLIT_DISPLAY_EN 0x2F4
+#define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8
+#define DSPP_IGC_COLOR0_RAM_LUTN 0x300
+#define DSPP_IGC_COLOR1_RAM_LUTN 0x304
+#define DSPP_IGC_COLOR2_RAM_LUTN 0x308
+#define PPB0_CNTL 0x330
+#define PPB0_CONFIG 0x334
+#define PPB1_CNTL 0x338
+#define PPB1_CONFIG 0x33C
+#define HW_EVENTS_CTL 0x37C
+#define CLK_CTRL3 0x3A8
+#define CLK_STATUS3 0x3AC
+#define CLK_CTRL4 0x3B0
+#define CLK_STATUS4 0x3B4
+#define CLK_CTRL5 0x3B8
+#define CLK_STATUS5 0x3BC
+#define CLK_CTRL7 0x3D0
+#define CLK_STATUS7 0x3D4
+#define SPLIT_DISPLAY_LOWER_PIPE_CTRL 0x3F0
+#define SPLIT_DISPLAY_TE_LINE_INTERVAL 0x3F4
+#define INTF_SW_RESET_MASK 0x3FC
+#define MDP_OUT_CTL_0 0x410
+#define MDP_VSYNC_SEL 0x414
+#define DCE_SEL 0x450
+
+#endif /*_SDE_HWIO_H */
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.c b/drivers/gpu/drm/msm/sde/sde_irq.c
new file mode 100644
index 000000000000..7864b9fef87b
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_irq.c
@@ -0,0 +1,112 @@
+/* Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "sde_irq.h"
+#include "sde_core_irq.h"
+
+static uint32_t g_sde_irq_status;
+
+irqreturn_t sde_irq(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ u32 interrupts;
+
+ sde_kms->hw_intr->ops.get_interrupt_sources(sde_kms->hw_intr,
+ &interrupts);
+
+ /* store irq status in case of irq-storm debugging */
+ g_sde_irq_status = interrupts;
+
+ /*
+ * Taking care of MDP interrupt
+ */
+ if (interrupts & IRQ_SOURCE_MDP) {
+ interrupts &= ~IRQ_SOURCE_MDP;
+ sde_core_irq(sde_kms);
+ }
+
+ /*
+ * Routing all other interrupts to external drivers
+ */
+ while (interrupts) {
+ irq_hw_number_t hwirq = fls(interrupts) - 1;
+ unsigned int mapping;
+ int rc;
+
+ mapping = irq_find_mapping(sde_kms->irq_controller.domain,
+ hwirq);
+ if (mapping == 0) {
+ SDE_EVT32(hwirq, SDE_EVTLOG_ERROR);
+ goto error;
+ }
+
+ rc = generic_handle_irq(mapping);
+ if (rc < 0) {
+ SDE_EVT32(hwirq, mapping, rc, SDE_EVTLOG_ERROR);
+ goto error;
+ }
+
+ interrupts &= ~(1 << hwirq);
+ }
+
+ return IRQ_HANDLED;
+
+error:
+ /* bad situation, inform irq system, it may disable overall MDSS irq */
+ return IRQ_NONE;
+}
+
+void sde_irq_preinstall(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+
+ if (!sde_kms->dev || !sde_kms->dev->dev) {
+ pr_err("invalid device handles\n");
+ return;
+ }
+
+ sde_core_irq_preinstall(sde_kms);
+}
+
+int sde_irq_postinstall(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ int rc;
+
+ if (!kms) {
+ SDE_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ rc = sde_core_irq_postinstall(sde_kms);
+
+ return rc;
+}
+
+void sde_irq_uninstall(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+
+ if (!kms) {
+ SDE_ERROR("invalid parameters\n");
+ return;
+ }
+
+ sde_core_irq_uninstall(sde_kms);
+ sde_core_irq_domain_fini(sde_kms);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_irq.h b/drivers/gpu/drm/msm/sde/sde_irq.h
new file mode 100644
index 000000000000..e10900719f3f
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_irq.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_IRQ_H__
+#define __SDE_IRQ_H__
+
+#include <linux/kernel.h>
+#include <linux/irqdomain.h>
+
+#include "msm_kms.h"
+
+/**
+ * sde_irq_controller - define MDSS level interrupt controller context
+ * @enabled_mask: enable status of MDSS level interrupt
+ * @domain: interrupt domain of this controller
+ */
+struct sde_irq_controller {
+ unsigned long enabled_mask;
+ struct irq_domain *domain;
+};
+
+/**
+ * sde_irq_preinstall - perform pre-installation of MDSS IRQ handler
+ * @kms: pointer to kms context
+ * @return: none
+ */
+void sde_irq_preinstall(struct msm_kms *kms);
+
+/**
+ * sde_irq_postinstall - perform post-installation of MDSS IRQ handler
+ * @kms: pointer to kms context
+ * @return: 0 if success; error code otherwise
+ */
+int sde_irq_postinstall(struct msm_kms *kms);
+
+/**
+ * sde_irq_uninstall - uninstall MDSS IRQ handler
+ * @drm_dev: pointer to kms context
+ * @return: none
+ */
+void sde_irq_uninstall(struct msm_kms *kms);
+
+/**
+ * sde_irq - MDSS level IRQ handler
+ * @kms: pointer to kms context
+ * @return: interrupt handling status
+ */
+irqreturn_t sde_irq(struct msm_kms *kms);
+
+#endif /* __SDE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.c b/drivers/gpu/drm/msm/sde/sde_kms.c
new file mode 100644
index 000000000000..0aa4729ac30d
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_kms.c
@@ -0,0 +1,1677 @@
+/*
+ * Copyright (c) 2014-2019, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+
+#include "dsi_display.h"
+#include "dsi_drm.h"
+#include "sde_wb.h"
+#include "sde_hdmi.h"
+#include "sde_shd.h"
+
+#include "sde_kms.h"
+#include "sde_core_irq.h"
+#include "sde_formats.h"
+#include "sde_hw_vbif.h"
+#include "sde_vbif.h"
+#include "sde_encoder.h"
+#include "sde_plane.h"
+#include "sde_crtc.h"
+#include "sde_recovery_manager.h"
+
+#define CREATE_TRACE_POINTS
+#include "sde_trace.h"
+
+/**
+ * Controls size of event log buffer. Specified as a power of 2.
+ */
+#define SDE_EVTLOG_SIZE 1024
+
+/*
+ * To enable overall DRM driver logging
+ * # echo 0x2 > /sys/module/drm/parameters/debug
+ *
+ * To enable DRM driver h/w logging
+ * # echo <mask> > /sys/kernel/debug/dri/0/hw_log_mask
+ *
+ * See sde_hw_mdss.h for h/w logging mask definitions (search for SDE_DBG_MASK_)
+ */
+#define SDE_DEBUGFS_DIR "msm_sde"
+#define SDE_DEBUGFS_HWMASKNAME "hw_log_mask"
+
+static int sde_kms_recovery_callback(int err_code,
+ struct recovery_client_info *client_info);
+
+static struct recovery_client_info info = {
+ .name = "sde_kms",
+ .recovery_cb = sde_kms_recovery_callback,
+ .err_supported[0] = {SDE_UNDERRUN, 0, 0},
+ .err_supported[1] = {SDE_VSYNC_MISS, 0, 0},
+ .err_supported[2] = {SDE_SMMU_FAULT, 0, 0},
+ .no_of_err = 3,
+ .handle = NULL,
+ .pdata = NULL,
+};
+
+/**
+ * sdecustom - enable certain driver customizations for sde clients
+ * Enabling this modifies the standard DRM behavior slightly and assumes
+ * that the clients have specific knowledge about the modifications that
+ * are involved, so don't enable this unless you know what you're doing.
+ *
+ * Parts of the driver that are affected by this setting may be located by
+ * searching for invocations of the 'sde_is_custom_client()' function.
+ *
+ * This is disabled by default.
+ */
+static bool sdecustom = true;
+module_param(sdecustom, bool, 0400);
+MODULE_PARM_DESC(sdecustom, "Enable customizations for sde clients");
+
+static int sde_kms_hw_init(struct msm_kms *kms);
+static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms);
+
+bool sde_is_custom_client(void)
+{
+ return sdecustom;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _sde_danger_signal_status(struct seq_file *s,
+ bool danger_status)
+{
+ struct sde_kms *kms = (struct sde_kms *)s->private;
+ struct msm_drm_private *priv;
+ struct sde_danger_safe_status status;
+ int i;
+
+ if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+ SDE_ERROR("invalid arg(s)\n");
+ return 0;
+ }
+
+ priv = kms->dev->dev_private;
+ memset(&status, 0, sizeof(struct sde_danger_safe_status));
+
+ sde_power_resource_enable(&priv->phandle, kms->core_client, true);
+ if (danger_status) {
+ seq_puts(s, "\nDanger signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ } else {
+ seq_puts(s, "\nSafe signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ }
+ sde_power_resource_enable(&priv->phandle, kms->core_client, false);
+
+ seq_printf(s, "MDP : 0x%x\n", status.mdp);
+
+ for (i = SSPP_VIG0; i < SSPP_MAX; i++)
+ seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
+ status.sspp[i]);
+ seq_puts(s, "\n");
+
+ for (i = WB_0; i < WB_MAX; i++)
+ seq_printf(s, "WB%d : 0x%x \t", i - WB_0,
+ status.wb[i]);
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+#define DEFINE_SDE_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int sde_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+ return _sde_danger_signal_status(s, true);
+}
+DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_danger_stats);
+
+static int sde_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+ return _sde_danger_signal_status(s, false);
+}
+DEFINE_SDE_DEBUGFS_SEQ_FOPS(sde_debugfs_safe_stats);
+
+static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms)
+{
+ debugfs_remove_recursive(sde_kms->debugfs_danger);
+ sde_kms->debugfs_danger = NULL;
+}
+
+static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
+ struct dentry *parent)
+{
+ sde_kms->debugfs_danger = debugfs_create_dir("danger",
+ parent);
+ if (!sde_kms->debugfs_danger) {
+ SDE_ERROR("failed to create danger debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_file("danger_status", 0644, sde_kms->debugfs_danger,
+ sde_kms, &sde_debugfs_danger_stats_fops);
+ debugfs_create_file("safe_status", 0644, sde_kms->debugfs_danger,
+ sde_kms, &sde_debugfs_safe_stats_fops);
+
+ return 0;
+}
+
+static int _sde_debugfs_show_regset32(struct seq_file *s, void *data)
+{
+ struct sde_debugfs_regset32 *regset;
+ struct sde_kms *sde_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ void __iomem *base;
+ uint32_t i, addr;
+
+ if (!s || !s->private)
+ return 0;
+
+ regset = s->private;
+
+ sde_kms = regset->sde_kms;
+ if (!sde_kms || !sde_kms->mmio)
+ return 0;
+
+ dev = sde_kms->dev;
+ if (!dev)
+ return 0;
+
+ priv = dev->dev_private;
+ if (!priv)
+ return 0;
+
+ base = sde_kms->mmio + regset->offset;
+
+ /* insert padding spaces, if needed */
+ if (regset->offset & 0xF) {
+ seq_printf(s, "[%x]", regset->offset & ~0xF);
+ for (i = 0; i < (regset->offset & 0xF); i += 4)
+ seq_puts(s, " ");
+ }
+
+ if (sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, true)) {
+ seq_puts(s, "failed to enable sde clocks\n");
+ return 0;
+ }
+
+ /* main register output */
+ for (i = 0; i < regset->blk_len; i += 4) {
+ addr = regset->offset + i;
+ if ((addr & 0xF) == 0x0)
+ seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
+ seq_printf(s, " %08x", readl_relaxed(base + i));
+ }
+ seq_puts(s, "\n");
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+ return 0;
+}
+
+static int sde_debugfs_open_regset32(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, _sde_debugfs_show_regset32, inode->i_private);
+}
+
+static const struct file_operations sde_fops_regset32 = {
+ .open = sde_debugfs_open_regset32,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void sde_debugfs_setup_regset32(struct sde_debugfs_regset32 *regset,
+ uint32_t offset, uint32_t length, struct sde_kms *sde_kms)
+{
+ if (regset) {
+ regset->offset = offset;
+ regset->blk_len = length;
+ regset->sde_kms = sde_kms;
+ }
+}
+
+void *sde_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent, struct sde_debugfs_regset32 *regset)
+{
+ if (!name || !regset || !regset->sde_kms || !regset->blk_len)
+ return NULL;
+
+ /* make sure offset is a multiple of 4 */
+ regset->offset = round_down(regset->offset, 4);
+
+ return debugfs_create_file(name, mode, parent,
+ regset, &sde_fops_regset32);
+}
+
+void *sde_debugfs_get_root(struct sde_kms *sde_kms)
+{
+ return sde_kms ? sde_kms->debugfs_root : 0;
+}
+
+static int _sde_debugfs_init(struct sde_kms *sde_kms)
+{
+ void *p;
+
+ p = sde_hw_util_get_log_mask_ptr();
+
+ if (!sde_kms || !p)
+ return -EINVAL;
+
+ if (sde_kms->dev && sde_kms->dev->primary)
+ sde_kms->debugfs_root = sde_kms->dev->primary->debugfs_root;
+ else
+ sde_kms->debugfs_root = debugfs_create_dir(SDE_DEBUGFS_DIR, 0);
+
+ /* allow debugfs_root to be NULL */
+ debugfs_create_x32(SDE_DEBUGFS_HWMASKNAME,
+ 0644, sde_kms->debugfs_root, p);
+
+ /* create common folder for debug information */
+ sde_kms->debugfs_debug = debugfs_create_dir("debug",
+ sde_kms->debugfs_root);
+ if (!sde_kms->debugfs_debug)
+ SDE_ERROR("failed to create debugfs debug directory\n");
+
+ sde_debugfs_danger_init(sde_kms, sde_kms->debugfs_debug);
+ sde_debugfs_vbif_init(sde_kms, sde_kms->debugfs_debug);
+
+ return 0;
+}
+
+static void _sde_debugfs_destroy(struct sde_kms *sde_kms)
+{
+ /* don't need to NULL check debugfs_root */
+ if (sde_kms) {
+ sde_debugfs_vbif_destroy(sde_kms);
+ sde_debugfs_danger_destroy(sde_kms);
+ debugfs_remove_recursive(sde_kms->debugfs_debug);
+ sde_kms->debugfs_debug = 0;
+ debugfs_remove_recursive(sde_kms->debugfs_root);
+ sde_kms->debugfs_root = 0;
+ }
+}
+#else
+static void sde_debugfs_danger_destroy(struct sde_kms *sde_kms,
+ struct dentry *parent)
+{
+}
+
+static int sde_debugfs_danger_init(struct sde_kms *sde_kms,
+ struct dentry *parent)
+{
+ return 0;
+}
+#endif
+
+static int sde_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ return sde_crtc_vblank(crtc, true);
+}
+
+static void sde_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ sde_crtc_vblank(crtc, false);
+}
+
+static void sde_kms_prepare_commit(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct drm_device *dev = sde_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, true);
+
+ if (sde_kms->splash_info.handoff &&
+ sde_kms->splash_info.display_splash_enabled)
+ sde_splash_lk_stop_splash(kms, state);
+
+ shd_display_prepare_commit(sde_kms, state);
+}
+
+static void sde_kms_commit(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i) {
+ if (crtc->state->active) {
+ SDE_EVT32(DRMID(crtc));
+ sde_crtc_commit_kickoff(crtc);
+ }
+ }
+}
+
+static void sde_kms_complete_commit(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct drm_device *dev = sde_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
+ sde_crtc_complete_commit(crtc, old_crtc_state);
+
+ shd_display_complete_commit(sde_kms, old_state);
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+ SDE_EVT32(SDE_EVTLOG_FUNC_EXIT);
+}
+
+static void sde_kms_wait_for_commit_done(struct msm_kms *kms,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ int ret;
+
+ dev = crtc->dev;
+ if (!dev) {
+ SDE_ERROR("invalid dev\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ SDE_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+ return;
+ }
+
+ if (!crtc->state->active) {
+ SDE_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+ return;
+ }
+
+ ret = drm_crtc_vblank_get(crtc);
+ if (ret)
+ return;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+ /*
+ * Wait post-flush if necessary to delay before plane_cleanup
+ * For example, wait for vsync in case of video mode panels
+ * This should be a no-op for command mode panels
+ */
+ SDE_EVT32(DRMID(crtc));
+ ret = sde_encoder_wait_for_commit_done(encoder);
+ if (ret && ret != -EWOULDBLOCK) {
+ SDE_ERROR("wait for commit done returned %d\n", ret);
+ break;
+ }
+ }
+
+ drm_crtc_vblank_put(crtc);
+}
+
+static void sde_kms_prepare_fence(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i, rc;
+
+ if (!kms || !old_state || !old_state->dev || !old_state->acquire_ctx) {
+ SDE_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+retry:
+ /* attempt to acquire ww mutex for connection */
+ rc = drm_modeset_lock(&old_state->dev->mode_config.connection_mutex,
+ old_state->acquire_ctx);
+
+ if (rc == -EDEADLK) {
+ drm_modeset_backoff(old_state->acquire_ctx);
+ goto retry;
+ }
+
+ /* old_state actually contains updated crtc pointers */
+ for_each_crtc_in_state(old_state, crtc, old_crtc_state, i)
+ sde_crtc_prepare_commit(crtc, old_crtc_state);
+}
+
+/**
+ * _sde_kms_get_displays - query for underlying display handles and cache them
+ * @sde_kms: Pointer to sde kms structure
+ * Returns: Zero on success
+ */
+static int _sde_kms_get_displays(struct sde_kms *sde_kms)
+{
+ int rc = -ENOMEM;
+
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde kms\n");
+ return -EINVAL;
+ }
+
+ /* dsi */
+ sde_kms->dsi_displays = NULL;
+ sde_kms->dsi_display_count = dsi_display_get_num_of_displays();
+ if (sde_kms->dsi_display_count) {
+ sde_kms->dsi_displays = kcalloc(sde_kms->dsi_display_count,
+ sizeof(void *),
+ GFP_KERNEL);
+ if (!sde_kms->dsi_displays) {
+ SDE_ERROR("failed to allocate dsi displays\n");
+ goto exit_deinit_dsi;
+ }
+ sde_kms->dsi_display_count =
+ dsi_display_get_active_displays(sde_kms->dsi_displays,
+ sde_kms->dsi_display_count);
+ }
+
+ /* wb */
+ sde_kms->wb_displays = NULL;
+ sde_kms->wb_display_count = sde_wb_get_num_of_displays();
+ if (sde_kms->wb_display_count) {
+ sde_kms->wb_displays = kcalloc(sde_kms->wb_display_count,
+ sizeof(void *),
+ GFP_KERNEL);
+ if (!sde_kms->wb_displays) {
+ SDE_ERROR("failed to allocate wb displays\n");
+ goto exit_deinit_wb;
+ }
+ sde_kms->wb_display_count =
+ wb_display_get_displays(sde_kms->wb_displays,
+ sde_kms->wb_display_count);
+ }
+
+ /* hdmi */
+ sde_kms->hdmi_displays = NULL;
+ sde_kms->hdmi_display_count = sde_hdmi_get_num_of_displays();
+ SDE_DEBUG("hdmi display count=%d", sde_kms->hdmi_display_count);
+ if (sde_kms->hdmi_display_count) {
+ sde_kms->hdmi_displays = kcalloc(sde_kms->hdmi_display_count,
+ sizeof(void *),
+ GFP_KERNEL);
+ if (!sde_kms->hdmi_displays) {
+ SDE_ERROR("failed to allocate hdmi displays\n");
+ goto exit_deinit_hdmi;
+ }
+ sde_kms->hdmi_display_count =
+ sde_hdmi_get_displays(sde_kms->hdmi_displays,
+ sde_kms->hdmi_display_count);
+ }
+
+ /* shd */
+ sde_kms->shd_displays = NULL;
+ sde_kms->shd_display_count = shd_display_get_num_of_displays();
+ if (sde_kms->shd_display_count) {
+ sde_kms->shd_displays = kcalloc(sde_kms->shd_display_count,
+ sizeof(void *), GFP_KERNEL);
+ if (!sde_kms->shd_displays)
+ goto exit_deinit_shd;
+ sde_kms->shd_display_count =
+ shd_display_get_displays(sde_kms->shd_displays,
+ sde_kms->shd_display_count);
+ }
+
+ return 0;
+
+exit_deinit_shd:
+ kfree(sde_kms->shd_displays);
+ sde_kms->shd_display_count = 0;
+ sde_kms->shd_displays = NULL;
+exit_deinit_hdmi:
+ sde_kms->hdmi_display_count = 0;
+ sde_kms->hdmi_displays = NULL;
+
+exit_deinit_wb:
+ kfree(sde_kms->wb_displays);
+ sde_kms->wb_display_count = 0;
+ sde_kms->wb_displays = NULL;
+
+exit_deinit_dsi:
+ kfree(sde_kms->dsi_displays);
+ sde_kms->dsi_display_count = 0;
+ sde_kms->dsi_displays = NULL;
+ return rc;
+}
+
+/**
+ * _sde_kms_release_displays - release cache of underlying display handles
+ * @sde_kms: Pointer to sde kms structure
+ */
+static void _sde_kms_release_displays(struct sde_kms *sde_kms)
+{
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde kms\n");
+ return;
+ }
+ kfree(sde_kms->hdmi_displays);
+ sde_kms->hdmi_display_count = 0;
+ sde_kms->hdmi_displays = NULL;
+
+ kfree(sde_kms->wb_displays);
+ sde_kms->wb_displays = NULL;
+ sde_kms->wb_display_count = 0;
+
+ kfree(sde_kms->dsi_displays);
+ sde_kms->dsi_displays = NULL;
+ sde_kms->dsi_display_count = 0;
+}
+
+/**
+ * _sde_kms_setup_displays - create encoders, bridges and connectors
+ * for underlying displays
+ * @dev: Pointer to drm device structure
+ * @priv: Pointer to private drm device data
+ * @sde_kms: Pointer to sde kms structure
+ * Returns: Zero on success
+ */
+static int _sde_kms_setup_displays(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct sde_kms *sde_kms)
+{
+ static const struct sde_connector_ops dsi_ops = {
+ .post_init = dsi_conn_post_init,
+ .detect = dsi_conn_detect,
+ .get_modes = dsi_connector_get_modes,
+ .mode_valid = dsi_conn_mode_valid,
+ .get_info = dsi_display_get_info,
+ .set_backlight = dsi_display_set_backlight,
+ .set_topology_ctl = dsi_display_set_top_ctl,
+ };
+ static const struct sde_connector_ops wb_ops = {
+ .post_init = sde_wb_connector_post_init,
+ .detect = sde_wb_connector_detect,
+ .get_modes = sde_wb_connector_get_modes,
+ .set_property = sde_wb_connector_set_property,
+ .get_info = sde_wb_get_info,
+ };
+ static const struct sde_connector_ops hdmi_ops = {
+ .pre_deinit = sde_hdmi_connector_pre_deinit,
+ .post_init = sde_hdmi_connector_post_init,
+ .detect = sde_hdmi_connector_detect,
+ .get_modes = sde_hdmi_connector_get_modes,
+ .mode_valid = sde_hdmi_mode_valid,
+ .get_info = sde_hdmi_get_info,
+ .set_property = sde_hdmi_set_property,
+ .get_property = sde_hdmi_get_property,
+ .pre_kickoff = sde_hdmi_pre_kickoff,
+ .mode_needs_full_range = sde_hdmi_mode_needs_full_range,
+ .get_csc_type = sde_hdmi_get_csc_type,
+ .set_topology_ctl = sde_hdmi_set_top_ctl,
+ };
+ static const struct sde_connector_ops shd_ops = {
+ .post_init = shd_connector_post_init,
+ .detect = shd_connector_detect,
+ .get_modes = shd_connector_get_modes,
+ .mode_valid = shd_connector_mode_valid,
+ .get_info = shd_connector_get_info,
+ };
+ struct msm_display_info info = {0};
+ struct drm_encoder *encoder;
+ void *display, *connector;
+ int i, max_encoders;
+ int rc = 0;
+ int connector_poll;
+
+ if (!dev || !priv || !sde_kms) {
+ SDE_ERROR("invalid argument(s)\n");
+ return -EINVAL;
+ }
+
+ max_encoders = sde_kms->dsi_display_count +
+ sde_kms->wb_display_count +
+ sde_kms->hdmi_display_count +
+ sde_kms->shd_display_count;
+
+ if (max_encoders > ARRAY_SIZE(priv->encoders)) {
+ max_encoders = ARRAY_SIZE(priv->encoders);
+ SDE_ERROR("capping number of displays to %d", max_encoders);
+ }
+
+ /* dsi */
+ for (i = 0; i < sde_kms->dsi_display_count &&
+ priv->num_encoders < max_encoders; ++i) {
+ display = sde_kms->dsi_displays[i];
+ encoder = NULL;
+
+ memset(&info, 0x0, sizeof(info));
+ rc = dsi_display_get_info(&info, display);
+ if (rc) {
+ SDE_ERROR("dsi get_info %d failed\n", i);
+ continue;
+ }
+
+ encoder = sde_encoder_init(dev, &info);
+ if (IS_ERR_OR_NULL(encoder)) {
+ SDE_ERROR("encoder init failed for dsi %d\n", i);
+ continue;
+ }
+
+ rc = sde_splash_setup_display_resource(&sde_kms->splash_info,
+ display, DRM_MODE_CONNECTOR_DSI, false);
+ if (rc) {
+ SDE_ERROR("dsi %d splash resource setup failed %d\n",
+ i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
+ rc = dsi_display_drm_bridge_init(display, encoder);
+ if (rc) {
+ SDE_ERROR("dsi bridge %d init failed, %d\n", i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
+ connector = sde_connector_init(dev,
+ encoder,
+ 0,
+ display,
+ &dsi_ops,
+ DRM_CONNECTOR_POLL_HPD,
+ DRM_MODE_CONNECTOR_DSI);
+ if (connector) {
+ priv->encoders[priv->num_encoders++] = encoder;
+ } else {
+ SDE_ERROR("dsi %d connector init failed\n", i);
+ dsi_display_drm_bridge_deinit(display);
+ sde_encoder_destroy(encoder);
+ }
+ }
+
+ /* wb */
+ for (i = 0; i < sde_kms->wb_display_count &&
+ priv->num_encoders < max_encoders; ++i) {
+ display = sde_kms->wb_displays[i];
+ encoder = NULL;
+
+ memset(&info, 0x0, sizeof(info));
+ rc = sde_wb_get_info(&info, display);
+ if (rc) {
+ SDE_ERROR("wb get_info %d failed\n", i);
+ continue;
+ }
+
+ encoder = sde_encoder_init(dev, &info);
+ if (IS_ERR_OR_NULL(encoder)) {
+ SDE_ERROR("encoder init failed for wb %d\n", i);
+ continue;
+ }
+
+ rc = sde_wb_drm_init(display, encoder);
+ if (rc) {
+ SDE_ERROR("wb bridge %d init failed, %d\n", i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
+ connector = sde_connector_init(dev,
+ encoder,
+ 0,
+ display,
+ &wb_ops,
+ DRM_CONNECTOR_POLL_HPD,
+ DRM_MODE_CONNECTOR_VIRTUAL);
+ if (connector) {
+ priv->encoders[priv->num_encoders++] = encoder;
+ } else {
+ SDE_ERROR("wb %d connector init failed\n", i);
+ sde_wb_drm_deinit(display);
+ sde_encoder_destroy(encoder);
+ }
+ }
+
+ /* hdmi */
+ for (i = 0; i < sde_kms->hdmi_display_count &&
+ priv->num_encoders < max_encoders; ++i) {
+ display = sde_kms->hdmi_displays[i];
+ encoder = NULL;
+
+ memset(&info, 0x0, sizeof(info));
+ rc = sde_hdmi_dev_init(display);
+ if (rc) {
+ SDE_ERROR("hdmi dev_init %d failed\n", i);
+ continue;
+ }
+ rc = sde_hdmi_get_info(&info, display);
+ if (rc) {
+ SDE_ERROR("hdmi get_info %d failed\n", i);
+ continue;
+ }
+ if (info.capabilities & MSM_DISPLAY_CAP_HOT_PLUG)
+ connector_poll = DRM_CONNECTOR_POLL_HPD;
+ else
+ connector_poll = 0;
+ encoder = sde_encoder_init(dev, &info);
+ if (IS_ERR_OR_NULL(encoder)) {
+ SDE_ERROR("encoder init failed for hdmi %d\n", i);
+ continue;
+ }
+
+ rc = sde_splash_setup_display_resource(&sde_kms->splash_info,
+ display, DRM_MODE_CONNECTOR_HDMIA, false);
+ if (rc) {
+ SDE_ERROR("hdmi %d splash resource setup failed %d\n",
+ i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
+ rc = sde_hdmi_drm_init(display, encoder);
+ if (rc) {
+ SDE_ERROR("hdmi drm %d init failed, %d\n", i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
+ connector = sde_connector_init(dev,
+ encoder,
+ 0,
+ display,
+ &hdmi_ops,
+ connector_poll,
+ DRM_MODE_CONNECTOR_HDMIA);
+ if (connector) {
+ priv->encoders[priv->num_encoders++] = encoder;
+ } else {
+ SDE_ERROR("hdmi %d connector init failed\n", i);
+ sde_hdmi_dev_deinit(display);
+ sde_hdmi_drm_deinit(display);
+ sde_encoder_destroy(encoder);
+ }
+ }
+
+ /* shd */
+ for (i = 0; i < sde_kms->shd_display_count &&
+ priv->num_encoders < max_encoders; ++i) {
+ display = sde_kms->shd_displays[i];
+ encoder = NULL;
+
+ memset(&info, 0x0, sizeof(info));
+ rc = shd_connector_get_info(&info, display);
+ if (rc) {
+ SDE_ERROR("shd get_info %d failed\n", i);
+ continue;
+ }
+
+ encoder = sde_encoder_init(dev, &info);
+ if (IS_ERR_OR_NULL(encoder)) {
+ SDE_ERROR("shd encoder init failed %d\n", i);
+ continue;
+ }
+
+ rc = sde_splash_setup_display_resource(&sde_kms->splash_info,
+ display, info.intf_type, true);
+ if (rc) {
+ SDE_ERROR("shared %d splash res setup failed %d\n",
+ i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
+ rc = shd_drm_bridge_init(display, encoder);
+ if (rc) {
+ SDE_ERROR("shd bridge %d init failed, %d\n", i, rc);
+ sde_encoder_destroy(encoder);
+ continue;
+ }
+
+ connector = sde_connector_init(dev,
+ encoder,
+ NULL,
+ display,
+ &shd_ops,
+ DRM_CONNECTOR_POLL_HPD,
+ info.intf_type);
+ if (connector) {
+ priv->encoders[priv->num_encoders++] = encoder;
+ priv->connectors[priv->num_connectors++] = connector;
+ } else {
+ SDE_ERROR("shd %d connector init failed\n", i);
+ shd_drm_bridge_deinit(display);
+ sde_encoder_destroy(encoder);
+ }
+ }
+ return 0;
+}
+
+static void _sde_kms_drm_obj_destroy(struct sde_kms *sde_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!sde_kms) {
+ SDE_ERROR("invalid sde_kms\n");
+ return;
+ } else if (!sde_kms->dev) {
+ SDE_ERROR("invalid dev\n");
+ return;
+ } else if (!sde_kms->dev->dev_private) {
+ SDE_ERROR("invalid dev_private\n");
+ return;
+ }
+ priv = sde_kms->dev->dev_private;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
+ priv->num_crtcs = 0;
+
+ for (i = 0; i < priv->num_planes; i++)
+ priv->planes[i]->funcs->destroy(priv->planes[i]);
+ priv->num_planes = 0;
+
+ for (i = 0; i < priv->num_connectors; i++)
+ priv->connectors[i]->funcs->destroy(priv->connectors[i]);
+ priv->num_connectors = 0;
+
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->funcs->destroy(priv->encoders[i]);
+ priv->num_encoders = 0;
+
+ _sde_kms_release_displays(sde_kms);
+}
+
+static inline int sde_get_crtc_id(const char *display_type)
+{
+ if (!strcmp(display_type, "primary"))
+ return 0;
+ else if (!strcmp(display_type, "secondary"))
+ return 1;
+ else
+ return 2;
+}
+
+static int _sde_kms_drm_obj_init(struct sde_kms *sde_kms)
+{
+ struct drm_device *dev;
+ struct drm_plane *primary_planes[MAX_PLANES], *plane;
+ struct drm_crtc *crtc;
+
+ struct msm_drm_private *priv;
+ struct sde_mdss_cfg *catalog;
+ struct sde_splash_info *sinfo;
+
+ int primary_planes_idx, i, ret;
+ int max_crtc_count, max_plane_count;
+
+ if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
+ SDE_ERROR("invalid sde_kms\n");
+ return -EINVAL;
+ }
+
+ dev = sde_kms->dev;
+ priv = dev->dev_private;
+ catalog = sde_kms->catalog;
+ sinfo = &sde_kms->splash_info;
+
+ ret = sde_core_irq_domain_add(sde_kms);
+ if (ret)
+ goto fail_irq;
+ /*
+ * Query for underlying display drivers, and create connectors,
+ * bridges and encoders for them.
+ */
+ if (!_sde_kms_get_displays(sde_kms))
+ (void)_sde_kms_setup_displays(dev, priv, sde_kms);
+
+ max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
+
+ /* Create the planes */
+ primary_planes_idx = 0;
+ if (catalog->vp_count) {
+ max_plane_count = min_t(u32, catalog->vp_count, MAX_PLANES);
+
+ for (i = 0; i < max_plane_count; i++) {
+ bool primary = true;
+ int crtc_id =
+ sde_get_crtc_id(catalog->vp[i].display_type);
+
+ if (strcmp(catalog->vp[i].plane_type, "primary"))
+ primary = false;
+
+ plane = sde_plane_init(dev, catalog->vp[i].id,
+ primary, 1UL << crtc_id, true, false);
+ if (IS_ERR(plane)) {
+ SDE_ERROR("sde_plane_init failed\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
+
+ if (primary) {
+ primary_planes[crtc_id] = plane;
+ primary_planes_idx++;
+ }
+ }
+ } else {
+ max_plane_count = min_t(u32, catalog->sspp_count, MAX_PLANES);
+
+ for (i = 0; i < max_plane_count; i++) {
+ bool primary = true;
+ bool resv_plane = false;
+
+ if (catalog->sspp[i].features & BIT(SDE_SSPP_CURSOR)
+ || primary_planes_idx >= max_crtc_count)
+ primary = false;
+
+ if (sde_splash_query_plane_is_reserved(sinfo,
+ catalog->sspp[i].id)) {
+ resv_plane = true;
+ DRM_INFO("pipe%d is reserved\n",
+ catalog->sspp[i].id);
+ }
+
+ plane = sde_plane_init(dev, catalog->sspp[i].id,
+ primary, (1UL << max_crtc_count) - 1,
+ false, resv_plane);
+ if (IS_ERR(plane)) {
+ SDE_ERROR("sde_plane_init failed\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
+
+ if (primary)
+ primary_planes[primary_planes_idx++] = plane;
+ }
+ }
+
+ max_crtc_count = min(max_crtc_count, primary_planes_idx);
+
+ /* Create one CRTC per encoder */
+ for (i = 0; i < max_crtc_count; i++) {
+ crtc = sde_crtc_init(dev, primary_planes[i]);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ }
+
+ if (sde_is_custom_client()) {
+ /* All CRTCs are compatible with all planes */
+ for (i = 0; i < priv->num_planes; i++)
+ priv->planes[i]->possible_crtcs =
+ (1 << priv->num_crtcs) - 1;
+ }
+
+ /* All CRTCs are compatible with all encoders */
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+ return 0;
+fail:
+ _sde_kms_drm_obj_destroy(sde_kms);
+fail_irq:
+ sde_core_irq_domain_fini(sde_kms);
+ return ret;
+}
+
+static int sde_kms_postinit(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct drm_device *dev;
+
+ if (!sde_kms || !sde_kms->dev || !sde_kms->dev->dev) {
+ SDE_ERROR("invalid sde_kms\n");
+ return -EINVAL;
+ }
+
+ dev = sde_kms->dev;
+
+ /*
+ * Allow vblank interrupt to be disabled by drm vblank timer.
+ */
+ dev->vblank_disable_allowed = true;
+
+ shd_display_post_init(sde_kms);
+
+ return 0;
+}
+
+static long sde_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ return rate;
+}
+
+static void _sde_kms_hw_destroy(struct sde_kms *sde_kms,
+ struct platform_device *pdev)
+{
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!sde_kms || !pdev)
+ return;
+
+ dev = sde_kms->dev;
+ if (!dev)
+ return;
+
+ priv = dev->dev_private;
+ if (!priv)
+ return;
+
+ if (sde_kms->hw_intr)
+ sde_hw_intr_destroy(sde_kms->hw_intr);
+ sde_kms->hw_intr = NULL;
+
+ _sde_kms_release_displays(sde_kms);
+
+ /* safe to call these more than once during shutdown */
+ _sde_debugfs_destroy(sde_kms);
+ _sde_kms_mmu_destroy(sde_kms);
+ sde_core_perf_destroy(&sde_kms->perf);
+
+ if (sde_kms->catalog) {
+ for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
+ u32 vbif_idx = sde_kms->catalog->vbif[i].id;
+
+ if ((vbif_idx < VBIF_MAX) && sde_kms->hw_vbif[vbif_idx])
+ sde_hw_vbif_destroy(sde_kms->hw_vbif[vbif_idx]);
+ }
+ }
+
+ if (sde_kms->rm_init)
+ sde_rm_destroy(&sde_kms->rm);
+ sde_kms->rm_init = false;
+
+ if (sde_kms->catalog)
+ sde_hw_catalog_deinit(sde_kms->catalog);
+ sde_kms->catalog = NULL;
+
+ if (sde_kms->splash_info.handoff) {
+ if (sde_kms->core_client)
+ sde_splash_destroy(&sde_kms->splash_info,
+ &priv->phandle, sde_kms->core_client);
+ }
+
+ if (sde_kms->core_client)
+ sde_power_client_destroy(&priv->phandle,
+ sde_kms->core_client);
+ sde_kms->core_client = NULL;
+
+ if (sde_kms->vbif[VBIF_NRT])
+ msm_iounmap(pdev, sde_kms->vbif[VBIF_NRT]);
+ sde_kms->vbif[VBIF_NRT] = NULL;
+
+ if (sde_kms->vbif[VBIF_RT])
+ msm_iounmap(pdev, sde_kms->vbif[VBIF_RT]);
+ sde_kms->vbif[VBIF_RT] = NULL;
+
+ if (sde_kms->mmio)
+ msm_iounmap(pdev, sde_kms->mmio);
+ sde_kms->mmio = NULL;
+}
+
+static void sde_kms_destroy(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms;
+ struct drm_device *dev;
+
+ if (!kms) {
+ SDE_ERROR("invalid kms\n");
+ return;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ dev = sde_kms->dev;
+ if (!dev) {
+ SDE_ERROR("invalid device\n");
+ return;
+ }
+
+ sde_recovery_client_unregister(info.handle);
+ info.handle = NULL;
+ _sde_kms_hw_destroy(sde_kms, dev->platformdev);
+ kfree(sde_kms);
+}
+
+static void sde_kms_preclose(struct msm_kms *kms, struct drm_file *file)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct drm_device *dev = sde_kms->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ unsigned i;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ sde_crtc_cancel_pending_flip(priv->crtcs[i], file);
+}
+
+static bool sde_kms_early_display_status(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+
+ return sde_kms->splash_info.handoff;
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+ .hw_init = sde_kms_hw_init,
+ .postinit = sde_kms_postinit,
+ .irq_preinstall = sde_irq_preinstall,
+ .irq_postinstall = sde_irq_postinstall,
+ .irq_uninstall = sde_irq_uninstall,
+ .irq = sde_irq,
+ .preclose = sde_kms_preclose,
+ .prepare_fence = sde_kms_prepare_fence,
+ .prepare_commit = sde_kms_prepare_commit,
+ .commit = sde_kms_commit,
+ .complete_commit = sde_kms_complete_commit,
+ .wait_for_crtc_commit_done = sde_kms_wait_for_commit_done,
+ .enable_vblank = sde_kms_enable_vblank,
+ .disable_vblank = sde_kms_disable_vblank,
+ .check_modified_format = sde_format_check_modified_format,
+ .get_format = sde_get_msm_format,
+ .round_pixclk = sde_kms_round_pixclk,
+ .destroy = sde_kms_destroy,
+ .early_display_status = sde_kms_early_display_status,
+};
+
+/* the caller api needs to turn on clock before calling it */
+static inline void _sde_kms_core_hw_rev_init(struct sde_kms *sde_kms)
+{
+ sde_kms->core_rev = readl_relaxed(sde_kms->mmio + 0x0);
+}
+
+static int _sde_kms_mmu_destroy(struct sde_kms *sde_kms)
+{
+ struct msm_mmu *mmu;
+ int i;
+
+ for (i = ARRAY_SIZE(sde_kms->aspace) - 1; i >= 0; i--) {
+ if (!sde_kms->aspace[i])
+ continue;
+
+ mmu = sde_kms->aspace[i]->mmu;
+
+ mmu->funcs->detach(mmu);
+ msm_gem_address_space_put(sde_kms->aspace[i]);
+
+ sde_kms->aspace[i] = NULL;
+ }
+
+ return 0;
+}
+
+static int sde_smmu_fault_handler(struct iommu_domain *iommu,
+ struct device *dev, unsigned long iova, int flags, void *arg)
+{
+
+ dev_info(dev, "%s: iova=0x%08lx, flags=0x%x, iommu=%pK\n", __func__,
+ iova, flags, iommu);
+
+ sde_recovery_set_events(SDE_SMMU_FAULT);
+
+ return 0;
+}
+
+static int _sde_kms_mmu_init(struct sde_kms *sde_kms)
+{
+ struct msm_mmu *mmu;
+ int i, ret;
+ int data = 0;
+
+ for (i = 0; i < MSM_SMMU_DOMAIN_MAX; i++) {
+ struct msm_gem_address_space *aspace;
+
+ mmu = msm_smmu_new(sde_kms->dev->dev, i);
+ if (IS_ERR(mmu)) {
+ /* MMU's can be optional depending on platform */
+ ret = PTR_ERR(mmu);
+ DRM_INFO("failed to init iommu id %d: rc: %d\n", i,
+ ret);
+ continue;
+ }
+
+ msm_smmu_register_fault_handler(mmu, sde_smmu_fault_handler);
+
+ /* Attaching smmu means IOMMU HW starts to work immediately.
+ * However, display HW in LK is still accessing memory
+ * while the memory map is not done yet.
+ * So first set DOMAIN_ATTR_EARLY_MAP attribute 1 to bypass
+ * stage 1 translation in IOMMU HW.
+ */
+ if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
+ sde_kms->splash_info.handoff) {
+ ret = mmu->funcs->set_property(mmu,
+ DOMAIN_ATTR_EARLY_MAP,
+ &sde_kms->splash_info.handoff);
+ if (ret) {
+ SDE_ERROR("failed to set map att: %d\n", ret);
+ mmu->funcs->destroy(mmu);
+ goto fail;
+ }
+ }
+
+ aspace = msm_gem_smmu_address_space_create(sde_kms->dev->dev,
+ mmu, "sde");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
+ mmu->funcs->destroy(mmu);
+ goto fail;
+ }
+
+ sde_kms->aspace[i] = aspace;
+
+ ret = mmu->funcs->attach(mmu, NULL, 0);
+ if (ret) {
+ SDE_ERROR("failed to attach iommu %d: %d\n", i, ret);
+ msm_gem_address_space_put(aspace);
+ goto fail;
+ }
+
+ /*
+ * It's safe now to map the physical memory blcok LK accesses.
+ */
+ if ((i == MSM_SMMU_DOMAIN_UNSECURE) &&
+ sde_kms->splash_info.handoff) {
+ ret = sde_splash_smmu_map(sde_kms->dev, mmu,
+ &sde_kms->splash_info);
+ if (ret) {
+ SDE_ERROR("map rsv mem failed: %d\n", ret);
+ msm_gem_address_space_put(aspace);
+ goto fail;
+ }
+
+ /*
+ * Enable stage 1 smmu after user has finished early
+ * mapping of splash memory.
+ */
+ ret = mmu->funcs->set_property(mmu,
+ DOMAIN_ATTR_EARLY_MAP,
+ &data);
+ if (ret) {
+ SDE_ERROR("failed to set map att(%d): %d\n",
+ data, ret);
+ msm_gem_address_space_put(aspace);
+ goto fail;
+ }
+ }
+ }
+
+ return 0;
+fail:
+ _sde_kms_mmu_destroy(sde_kms);
+
+ return ret;
+}
+
+static void __iomem *_sde_kms_ioremap(struct platform_device *pdev,
+ const char *name, unsigned long *out_size)
+{
+ struct resource *res;
+ unsigned long size;
+ void __iomem *ptr;
+
+ if (out_size)
+ *out_size = 0;
+
+ if (name)
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ else
+ res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+
+ if (!res) {
+ /* availability depends on platform */
+ SDE_DEBUG("failed to get memory resource: %s\n", name);
+ return NULL;
+ }
+
+ size = resource_size(res);
+
+ ptr = devm_ioremap_nocache(&pdev->dev, res->start, size);
+ if (!ptr) {
+ SDE_ERROR("failed to ioremap: %s\n", name);
+ return NULL;
+ }
+
+ SDE_DEBUG("IO:region %s %pK %08lx\n", name, ptr, size);
+
+ if (out_size)
+ *out_size = size;
+
+ return ptr;
+}
+
+
+static int sde_kms_hw_init(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ struct sde_splash_info *sinfo;
+ int i, rc = -EINVAL;
+
+ if (!kms) {
+ SDE_ERROR("invalid kms\n");
+ goto end;
+ }
+
+ rc = sde_recovery_client_register(&info);
+ if (rc)
+ pr_err("%s recovery mgr register failed %d\n",
+ __func__, rc);
+
+ sde_kms = to_sde_kms(kms);
+ dev = sde_kms->dev;
+ if (!dev || !dev->platformdev) {
+ SDE_ERROR("invalid device\n");
+ goto end;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ SDE_ERROR("invalid private data\n");
+ goto end;
+ }
+
+ sde_kms->mmio = _sde_kms_ioremap(dev->platformdev, "mdp_phys",
+ &sde_kms->mmio_len);
+ if (!sde_kms->mmio) {
+ SDE_ERROR("mdp register memory map failed\n");
+ goto error;
+ }
+ DRM_INFO("mapped mdp address space @%pK\n", sde_kms->mmio);
+
+ rc = sde_dbg_reg_register_base(SDE_DBG_NAME, sde_kms->mmio,
+ sde_kms->mmio_len);
+ if (rc)
+ SDE_ERROR("dbg base register kms failed: %d\n", rc);
+
+ sde_kms->vbif[VBIF_RT] = _sde_kms_ioremap(dev->platformdev, "vbif_phys",
+ &sde_kms->vbif_len[VBIF_RT]);
+ if (!sde_kms->vbif[VBIF_RT]) {
+ SDE_ERROR("vbif register memory map failed\n");
+ goto error;
+ }
+
+ rc = sde_dbg_reg_register_base("vbif_rt", sde_kms->vbif[VBIF_RT],
+ sde_kms->vbif_len[VBIF_RT]);
+ if (rc)
+ SDE_ERROR("dbg base register vbif_rt failed: %d\n", rc);
+
+ sde_kms->vbif[VBIF_NRT] = _sde_kms_ioremap(dev->platformdev,
+ "vbif_nrt_phys", &sde_kms->vbif_len[VBIF_NRT]);
+ if (!sde_kms->vbif[VBIF_NRT]) {
+ SDE_DEBUG("VBIF NRT is not defined");
+ } else {
+ rc = sde_dbg_reg_register_base("vbif_nrt",
+ sde_kms->vbif[VBIF_NRT],
+ sde_kms->vbif_len[VBIF_NRT]);
+ if (rc)
+ SDE_ERROR("dbg base register vbif_nrt failed: %d\n",
+ rc);
+ }
+
+ sde_kms->core_client = sde_power_client_create(&priv->phandle, "core");
+ if (IS_ERR_OR_NULL(sde_kms->core_client)) {
+ rc = PTR_ERR(sde_kms->core_client);
+ SDE_ERROR("sde power client create failed: %d\n", rc);
+ sde_kms->core_client = NULL;
+ goto error;
+ }
+
+ rc = sde_power_resource_enable(&priv->phandle, sde_kms->core_client,
+ true);
+ if (rc) {
+ SDE_ERROR("resource enable failed: %d\n", rc);
+ goto error;
+ }
+
+ _sde_kms_core_hw_rev_init(sde_kms);
+
+ pr_info("sde hardware revision:0x%x\n", sde_kms->core_rev);
+
+ sde_kms->catalog = sde_hw_catalog_init(dev, sde_kms->core_rev);
+ if (IS_ERR_OR_NULL(sde_kms->catalog)) {
+ rc = PTR_ERR(sde_kms->catalog);
+ SDE_ERROR("catalog init failed: %d\n", rc);
+ sde_kms->catalog = NULL;
+ goto power_error;
+ }
+
+ sde_dbg_init_dbg_buses(sde_kms->core_rev);
+
+ rc = sde_rm_init(&sde_kms->rm, sde_kms->catalog, sde_kms->mmio,
+ sde_kms->dev);
+ if (rc) {
+ SDE_ERROR("rm init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ sde_kms->rm_init = true;
+
+ sde_kms->hw_mdp = sde_rm_get_mdp(&sde_kms->rm);
+ if (IS_ERR_OR_NULL(sde_kms->hw_mdp)) {
+ rc = PTR_ERR(sde_kms->hw_mdp);
+ SDE_ERROR("failed to get hw_mdp: %d\n", rc);
+ sde_kms->hw_mdp = NULL;
+ goto power_error;
+ }
+
+ /*
+ * Read the DISP_INTF_SEL register to check
+ * whether early display is enabled in LK.
+ */
+ rc = sde_splash_get_handoff_status(kms);
+ if (rc) {
+ SDE_ERROR("get early splash status failed: %d\n", rc);
+ goto power_error;
+ }
+
+ /*
+ * when LK has enabled early display, sde_splash_parse_dt and
+ * sde_splash_init must be called. The first function is to parse the
+ * mandatory memory node for splash function, and the second function
+ * will first do bandwidth voting job, because display hardware is now
+ * accessing AHB data bus, otherwise device reboot will happen, and then
+ * to check if the memory is reserved.
+ */
+ sinfo = &sde_kms->splash_info;
+ if (sinfo->handoff) {
+ rc = sde_splash_parse_memory_dt(dev);
+ if (rc) {
+ SDE_ERROR("parse memory dt failed: %d\n", rc);
+ goto power_error;
+ }
+
+ rc = sde_splash_parse_reserved_plane_dt(dev, sinfo,
+ sde_kms->catalog);
+ if (rc)
+ SDE_ERROR("parse reserved plane dt failed: %d\n", rc);
+
+ sde_splash_init(&priv->phandle, kms);
+ }
+
+ for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
+ u32 vbif_idx = sde_kms->catalog->vbif[i].id;
+
+ sde_kms->hw_vbif[i] = sde_hw_vbif_init(vbif_idx,
+ sde_kms->vbif[vbif_idx], sde_kms->catalog);
+ if (IS_ERR_OR_NULL(sde_kms->hw_vbif[vbif_idx])) {
+ rc = PTR_ERR(sde_kms->hw_vbif[vbif_idx]);
+ SDE_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
+ sde_kms->hw_vbif[vbif_idx] = NULL;
+ goto power_error;
+ }
+ }
+
+ /*
+ * Now we need to read the HW catalog and initialize resources such as
+ * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+ */
+ rc = _sde_kms_mmu_init(sde_kms);
+ if (rc) {
+ SDE_ERROR("sde_kms_mmu_init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ /*
+ * NOTE: Calling sde_debugfs_init here so that the drm_minor device for
+ * 'primary' is already created.
+ */
+ rc = _sde_debugfs_init(sde_kms);
+ if (rc) {
+ SDE_ERROR("sde_debugfs init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ rc = sde_core_perf_init(&sde_kms->perf, dev, sde_kms->catalog,
+ &priv->phandle, priv->pclient, "core_clk_src",
+ sde_kms->debugfs_debug);
+ if (rc) {
+ SDE_ERROR("failed to init perf %d\n", rc);
+ goto perf_err;
+ }
+
+ sde_kms->hw_intr = sde_hw_intr_init(sde_kms->mmio, sde_kms->catalog);
+ if (IS_ERR_OR_NULL(sde_kms->hw_intr)) {
+ rc = PTR_ERR(sde_kms->hw_intr);
+ SDE_ERROR("hw_intr init failed: %d\n", rc);
+ sde_kms->hw_intr = NULL;
+ goto hw_intr_init_err;
+ }
+
+ /*
+ * _sde_kms_drm_obj_init should create the DRM related objects
+ * i.e. CRTCs, planes, encoders, connectors and so forth
+ */
+ rc = _sde_kms_drm_obj_init(sde_kms);
+ if (rc) {
+ SDE_ERROR("modeset init failed: %d\n", rc);
+ goto drm_obj_init_err;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * max crtc width is equal to the max mixer width * 2 and max height is
+ * is 4K
+ */
+ dev->mode_config.max_width = sde_kms->catalog->max_mixer_width * 2;
+ dev->mode_config.max_height = 4096;
+
+ /*
+ * Support format modifiers for compression etc.
+ */
+ dev->mode_config.allow_fb_modifiers = true;
+
+ if (!sde_kms->splash_info.handoff)
+ sde_power_resource_enable(&priv->phandle,
+ sde_kms->core_client, false);
+
+ return 0;
+
+drm_obj_init_err:
+ sde_core_perf_destroy(&sde_kms->perf);
+hw_intr_init_err:
+perf_err:
+power_error:
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+error:
+ _sde_kms_hw_destroy(sde_kms, dev->platformdev);
+end:
+ return rc;
+}
+
+static int sde_kms_recovery_callback(int err_code,
+ struct recovery_client_info *client_info)
+{
+ int rc = 0;
+
+ switch (err_code) {
+ case SDE_UNDERRUN:
+ pr_debug("%s [SDE_UNDERRUN] error is auto HW receovered\n",
+ __func__);
+ break;
+
+ case SDE_VSYNC_MISS:
+ pr_debug("%s [SDE_VSYNC_MISS] trigger soft reset\n", __func__);
+ break;
+
+ case SDE_SMMU_FAULT:
+ pr_debug("%s [SDE_SMMU_FAULT] trigger soft reset\n", __func__);
+ break;
+
+ default:
+ pr_err("%s error %d undefined\n", __func__, err_code);
+
+ }
+
+ return rc;
+}
+
+struct msm_kms *sde_kms_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ int rc = 0;
+
+ if (!dev || !dev->dev_private) {
+ SDE_ERROR("drm device node invalid\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ priv = dev->dev_private;
+
+ sde_kms = kzalloc(sizeof(*sde_kms), GFP_KERNEL);
+ if (!sde_kms) {
+ SDE_ERROR("failed to allocate sde kms\n");
+ return ERR_PTR(-ENOMEM);
+ }
+
+ rc = sde_init_recovery_mgr(dev);
+ if (rc) {
+ SDE_ERROR("Failed SDE recovery mgr Init, err = %d\n", rc);
+ kfree(sde_kms);
+ return ERR_PTR(-EFAULT);
+ }
+
+ msm_kms_init(&sde_kms->base, &kms_funcs);
+ sde_kms->dev = dev;
+
+ return &sde_kms->base;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_kms.h b/drivers/gpu/drm/msm/sde/sde_kms.h
new file mode 100644
index 000000000000..d3975e9b53e6
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_kms.h
@@ -0,0 +1,434 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __SDE_KMS_H__
+#define __SDE_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+#include "sde_dbg.h"
+#include "sde_hw_catalog.h"
+#include "sde_hw_ctl.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_interrupts.h"
+#include "sde_hw_wb.h"
+#include "sde_hw_top.h"
+#include "sde_rm.h"
+#include "sde_power_handle.h"
+#include "sde_irq.h"
+#include "sde_core_perf.h"
+#include "sde_splash.h"
+
+#define DRMID(x) ((x) ? (x)->base.id : -1)
+
+/**
+ * SDE_DEBUG - macro for kms/plane/crtc/encoder/connector logs
+ * @fmt: Pointer to format string
+ */
+#define SDE_DEBUG(fmt, ...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_KMS)) \
+ drm_ut_debug_printk(__func__, fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * SDE_DEBUG_DRIVER - macro for hardware driver logging
+ * @fmt: Pointer to format string
+ */
+#define SDE_DEBUG_DRIVER(fmt, ...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_DRIVER)) \
+ drm_ut_debug_printk(__func__, fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define SDE_ERROR(fmt, ...) pr_err("[sde error]" fmt, ##__VA_ARGS__)
+
+#define POPULATE_RECT(rect, a, b, c, d, Q16_flag) \
+ do { \
+ (rect)->x = (Q16_flag) ? (a) >> 16 : (a); \
+ (rect)->y = (Q16_flag) ? (b) >> 16 : (b); \
+ (rect)->w = (Q16_flag) ? (c) >> 16 : (c); \
+ (rect)->h = (Q16_flag) ? (d) >> 16 : (d); \
+ } while (0)
+
+#define CHECK_LAYER_BOUNDS(offset, size, max_size) \
+ (((size) > (max_size)) || ((offset) > ((max_size) - (size))))
+
+/**
+ * ktime_compare_safe - compare two ktime structures
+ * This macro is similar to the standard ktime_compare() function, but
+ * attempts to also handle ktime overflows.
+ * @A: First ktime value
+ * @B: Second ktime value
+ * Returns: -1 if A < B, 0 if A == B, 1 if A > B
+ */
+#define ktime_compare_safe(A, B) \
+ ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+
+#define SDE_NAME_SIZE 12
+
+/*
+ * struct sde_irq_callback - IRQ callback handlers
+ * @list: list to callback
+ * @func: intr handler
+ * @arg: argument for the handler
+ */
+struct sde_irq_callback {
+ struct list_head list;
+ void (*func)(void *arg, int irq_idx);
+ void *arg;
+};
+
+/**
+ * struct sde_irq: IRQ structure contains callback registration info
+ * @total_irq: total number of irq_idx obtained from HW interrupts mapping
+ * @irq_cb_tbl: array of IRQ callbacks setting
+ * @enable_counts array of IRQ enable counts
+ * @cb_lock: callback lock
+ * @debugfs_file: debugfs file for irq statistics
+ */
+struct sde_irq {
+ u32 total_irqs;
+ struct list_head *irq_cb_tbl;
+ atomic_t *enable_counts;
+ atomic_t *irq_counts;
+ spinlock_t cb_lock;
+ struct dentry *debugfs_file;
+};
+
+struct sde_kms {
+ struct msm_kms base;
+ struct drm_device *dev;
+ int core_rev;
+ struct sde_mdss_cfg *catalog;
+
+ struct msm_gem_address_space *aspace[MSM_SMMU_DOMAIN_MAX];
+ struct sde_power_client *core_client;
+
+ /* directory entry for debugfs */
+ void *debugfs_root;
+ struct dentry *debugfs_debug;
+ struct dentry *debugfs_danger;
+ struct dentry *debugfs_vbif;
+
+ /* io/register spaces: */
+ void __iomem *mmio, *vbif[VBIF_MAX];
+ unsigned long mmio_len, vbif_len[VBIF_MAX];
+
+ struct regulator *vdd;
+ struct regulator *mmagic;
+ struct regulator *venus;
+
+ struct sde_irq_controller irq_controller;
+
+ struct sde_hw_intr *hw_intr;
+ struct sde_irq irq_obj;
+
+ struct sde_core_perf perf;
+
+ struct sde_rm rm;
+ bool rm_init;
+
+ struct sde_hw_vbif *hw_vbif[VBIF_MAX];
+ struct sde_hw_mdp *hw_mdp;
+ int dsi_display_count;
+ void **dsi_displays;
+ int wb_display_count;
+ void **wb_displays;
+ bool has_danger_ctrl;
+ void **hdmi_displays;
+ int hdmi_display_count;
+ int shd_display_count;
+ void **shd_displays;
+
+ /* splash handoff structure */
+ struct sde_splash_info splash_info;
+};
+
+struct vsync_info {
+ u32 frame_count;
+ u32 line_count;
+};
+
+#define to_sde_kms(x) container_of(x, struct sde_kms, base)
+
+/**
+ * sde_is_custom_client - whether or not to enable non-standard customizations
+ *
+ * Return: Whether or not the 'sdeclient' module parameter was set on boot up
+ */
+bool sde_is_custom_client(void);
+
+/**
+ * Debugfs functions - extra helper functions for debugfs support
+ *
+ * Main debugfs documentation is located at,
+ *
+ * Documentation/filesystems/debugfs.txt
+ *
+ * @sde_debugfs_setup_regset32: Initialize data for sde_debugfs_create_regset32
+ * @sde_debugfs_create_regset32: Create 32-bit register dump file
+ * @sde_debugfs_get_root: Get root dentry for SDE_KMS's debugfs node
+ */
+
+/**
+ * Companion structure for sde_debugfs_create_regset32. Do not initialize the
+ * members of this structure explicitly; use sde_debugfs_setup_regset32 instead.
+ */
+struct sde_debugfs_regset32 {
+ uint32_t offset;
+ uint32_t blk_len;
+ struct sde_kms *sde_kms;
+};
+
+/**
+ * sde_debugfs_setup_regset32 - Initialize register block definition for debugfs
+ * This function is meant to initialize sde_debugfs_regset32 structures for use
+ * with sde_debugfs_create_regset32.
+ * @regset: opaque register definition structure
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @sde_kms: pointer to sde kms structure
+ */
+void sde_debugfs_setup_regset32(struct sde_debugfs_regset32 *regset,
+ uint32_t offset, uint32_t length, struct sde_kms *sde_kms);
+
+/**
+ * sde_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * Similar to the related debugfs_create_regset32 API, the structure pointed to
+ * by regset needs to persist for the lifetime of the created file. The calling
+ * code is responsible for initialization/management of this structure.
+ *
+ * The structure pointed to by regset is meant to be opaque. Please use
+ * sde_debugfs_setup_regset32 to initialize it.
+ *
+ * @name: File name within debugfs
+ * @mode: File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @regset: Pointer to persistent register block definition
+ *
+ * Return: dentry pointer for newly created file, use either debugfs_remove()
+ * or debugfs_remove_recursive() (on a parent directory) to remove the
+ * file
+ */
+void *sde_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent, struct sde_debugfs_regset32 *regset);
+
+/**
+ * sde_debugfs_get_root - Return root directory entry for SDE's debugfs
+ *
+ * The return value should be passed as the 'parent' argument to subsequent
+ * debugfs create calls.
+ *
+ * @sde_kms: Pointer to SDE's KMS structure
+ *
+ * Return: dentry pointer for SDE's debugfs location
+ */
+void *sde_debugfs_get_root(struct sde_kms *sde_kms);
+
+/**
+ * SDE info management functions
+ * These functions/definitions allow for building up a 'sde_info' structure
+ * containing one or more "key=value\n" entries.
+ */
+#define SDE_KMS_INFO_MAX_SIZE 4096
+
+/**
+ * struct sde_kms_info - connector information structure container
+ * @data: Array of information character data
+ * @len: Current length of information data
+ * @staged_len: Temporary data buffer length, commit to
+ * len using sde_kms_info_stop
+ * @start: Whether or not a partial data entry was just started
+ */
+struct sde_kms_info {
+ char data[SDE_KMS_INFO_MAX_SIZE];
+ uint32_t len;
+ uint32_t staged_len;
+ bool start;
+};
+
+/**
+ * SDE_KMS_INFO_DATA - Macro for accessing sde_kms_info data bytes
+ * @S: Pointer to sde_kms_info structure
+ * Returns: Pointer to byte data
+ */
+#define SDE_KMS_INFO_DATA(S) ((S) ? ((struct sde_kms_info *)(S))->data : 0)
+
+/**
+ * SDE_KMS_INFO_DATALEN - Macro for accessing sde_kms_info data length
+ * it adds an extra character length to count null.
+ * @S: Pointer to sde_kms_info structure
+ * Returns: Size of available byte data
+ */
+#define SDE_KMS_INFO_DATALEN(S) ((S) ? ((struct sde_kms_info *)(S))->len + 1 \
+ : 0)
+
+/**
+ * sde_kms_info_reset - reset sde_kms_info structure
+ * @info: Pointer to sde_kms_info structure
+ */
+void sde_kms_info_reset(struct sde_kms_info *info);
+
+/**
+ * sde_kms_info_add_keyint - add integer value to 'sde_kms_info'
+ * @info: Pointer to sde_kms_info structure
+ * @key: Pointer to key string
+ * @value: Signed 32-bit integer value
+ */
+void sde_kms_info_add_keyint(struct sde_kms_info *info,
+ const char *key,
+ int32_t value);
+
+/**
+ * sde_kms_info_update_keystr - update the special string's value.
+ * @info_str: Pointer to source blob str
+ * @key: Pointer to key string
+ * @value: Signed 32-bit integer value
+ */
+void sde_kms_info_update_keystr(char *info_str,
+ const char *key,
+ int32_t value);
+
+/**
+ * sde_kms_info_add_keystr - add string value to 'sde_kms_info'
+ * @info: Pointer to sde_kms_info structure
+ * @key: Pointer to key string
+ * @value: Pointer to string value
+ */
+void sde_kms_info_add_keystr(struct sde_kms_info *info,
+ const char *key,
+ const char *value);
+
+/**
+ * sde_kms_info_start - begin adding key to 'sde_kms_info'
+ * Usage:
+ * sde_kms_info_start(key)
+ * sde_kms_info_append(val_1)
+ * ...
+ * sde_kms_info_append(val_n)
+ * sde_kms_info_stop
+ * @info: Pointer to sde_kms_info structure
+ * @key: Pointer to key string
+ */
+void sde_kms_info_start(struct sde_kms_info *info,
+ const char *key);
+
+/**
+ * sde_kms_info_append - append value string to 'sde_kms_info'
+ * Usage:
+ * sde_kms_info_start(key)
+ * sde_kms_info_append(val_1)
+ * ...
+ * sde_kms_info_append(val_n)
+ * sde_kms_info_stop
+ * @info: Pointer to sde_kms_info structure
+ * @str: Pointer to partial value string
+ */
+void sde_kms_info_append(struct sde_kms_info *info,
+ const char *str);
+
+/**
+ * sde_kms_info_append_format - append format code string to 'sde_kms_info'
+ * Usage:
+ * sde_kms_info_start(key)
+ * sde_kms_info_append_format(fourcc, modifier)
+ * ...
+ * sde_kms_info_stop
+ * @info: Pointer to sde_kms_info structure
+ * @pixel_format: FOURCC format code
+ * @modifier: 64-bit drm format modifier
+ */
+void sde_kms_info_append_format(struct sde_kms_info *info,
+ uint32_t pixel_format,
+ uint64_t modifier);
+
+/**
+ * sde_kms_info_stop - finish adding key to 'sde_kms_info'
+ * Usage:
+ * sde_kms_info_start(key)
+ * sde_kms_info_append(val_1)
+ * ...
+ * sde_kms_info_append(val_n)
+ * sde_kms_info_stop
+ * @info: Pointer to sde_kms_info structure
+ */
+void sde_kms_info_stop(struct sde_kms_info *info);
+
+/**
+ * sde_kms_rect_intersect - intersect two rectangles
+ * @r1: first rectangle
+ * @r2: scissor rectangle
+ * @result: result rectangle, all 0's on no intersection found
+ */
+void sde_kms_rect_intersect(const struct sde_rect *r1,
+ const struct sde_rect *r2,
+ struct sde_rect *result);
+
+/**
+ * sde_kms_rect_is_equal - compares two rects
+ * @r1: rect value to compare
+ * @r2: rect value to compare
+ *
+ * Returns 1 if the rects are same, 0 otherwise.
+ */
+static inline bool sde_kms_rect_is_equal(struct sde_rect *r1,
+ struct sde_rect *r2)
+{
+ if ((!r1 && r2) || (r1 && !r2))
+ return false;
+
+ if (!r1 && !r2)
+ return true;
+
+ return r1->x == r2->x && r1->y == r2->y && r1->w == r2->w &&
+ r1->h == r2->h;
+}
+
+/**
+ * sde_kms_rect_is_null - returns true if the width or height of a rect is 0
+ * @rect: rectangle to check for zero size
+ * @Return: True if width or height of rectangle is 0
+ */
+static inline bool sde_kms_rect_is_null(const struct sde_rect *r)
+{
+ if (!r)
+ return true;
+
+ return (!r->w || !r->h);
+}
+
+/**
+ * Vblank enable/disable functions
+ */
+int sde_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void sde_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+#endif /* __sde_kms_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_kms_utils.c b/drivers/gpu/drm/msm/sde/sde_kms_utils.c
new file mode 100644
index 000000000000..90fd3912eb59
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_kms_utils.c
@@ -0,0 +1,235 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "sde-kms_utils:[%s] " fmt, __func__
+
+#include "sde_kms.h"
+
+void sde_kms_info_reset(struct sde_kms_info *info)
+{
+ if (info) {
+ info->len = 0;
+ info->staged_len = 0;
+ }
+}
+
+void sde_kms_info_add_keyint(struct sde_kms_info *info,
+ const char *key,
+ int32_t value)
+{
+ uint32_t len;
+
+ if (info && key) {
+ len = snprintf(info->data + info->len,
+ SDE_KMS_INFO_MAX_SIZE - info->len,
+ "%s=%d\n",
+ key,
+ value);
+
+ /* check if snprintf truncated the string */
+ if ((info->len + len) < SDE_KMS_INFO_MAX_SIZE)
+ info->len += len;
+ }
+}
+
+void sde_kms_info_update_keystr(char *info_str,
+ const char *key,
+ int32_t value)
+{
+ char *str, *temp, *append_str;
+ uint32_t dst_len = 0, prefix_len = 0;
+ char c;
+ int32_t size = 0;
+
+ if (info_str && key) {
+ str = strnstr(info_str, key, strlen(info_str));
+ if (str) {
+ temp = str + strlen(key);
+ c = *temp;
+ while (c != '\n') {
+ dst_len++;
+ c = *(++temp);
+ }
+ /*
+ * If input key string to update is exactly the last
+ * string in source string, no need to allocate one
+ * memory to store the string after string key. Just
+ * replace the value of the last string.
+ *
+ * If it is not, allocate one new memory to save
+ * the string after string key+"\n". This new allocated
+ * string will be appended to the whole source string
+ * after key value is updated.
+ */
+ size = strlen(str) - strlen(key) - dst_len - 1;
+ if (size > 0) {
+ append_str = kzalloc(size + 1, GFP_KERNEL);
+ if (!append_str) {
+ SDE_ERROR("failed to alloc memory\n");
+ return;
+ }
+ memcpy(append_str,
+ str + strlen(key) + dst_len + 1, size);
+ }
+
+ prefix_len = strlen(info_str) - strlen(str);
+ /* Update string with new value for the string key. */
+ snprintf(info_str + prefix_len,
+ SDE_KMS_INFO_MAX_SIZE - prefix_len,
+ "%s%d\n", key, value);
+
+ /* Append the string save aboved. */
+ if (size > 0 && append_str) {
+ size = prefix_len + strlen(key) + dst_len + 1;
+ snprintf(info_str + size,
+ SDE_KMS_INFO_MAX_SIZE - size,
+ "%s", append_str);
+ kfree(append_str);
+ }
+ }
+ }
+}
+
+void sde_kms_info_add_keystr(struct sde_kms_info *info,
+ const char *key,
+ const char *value)
+{
+ uint32_t len;
+
+ if (info && key && value) {
+ len = snprintf(info->data + info->len,
+ SDE_KMS_INFO_MAX_SIZE - info->len,
+ "%s=%s\n",
+ key,
+ value);
+
+ /* check if snprintf truncated the string */
+ if ((info->len + len) < SDE_KMS_INFO_MAX_SIZE)
+ info->len += len;
+ }
+}
+
+void sde_kms_info_start(struct sde_kms_info *info,
+ const char *key)
+{
+ uint32_t len;
+
+ if (info && key) {
+ len = snprintf(info->data + info->len,
+ SDE_KMS_INFO_MAX_SIZE - info->len,
+ "%s=",
+ key);
+
+ info->start = true;
+
+ /* check if snprintf truncated the string */
+ if ((info->len + len) < SDE_KMS_INFO_MAX_SIZE)
+ info->staged_len = info->len + len;
+ }
+}
+
+void sde_kms_info_append(struct sde_kms_info *info,
+ const char *str)
+{
+ uint32_t len;
+
+ if (info) {
+ len = snprintf(info->data + info->staged_len,
+ SDE_KMS_INFO_MAX_SIZE - info->staged_len,
+ "%s",
+ str);
+
+ /* check if snprintf truncated the string */
+ if ((info->staged_len + len) < SDE_KMS_INFO_MAX_SIZE) {
+ info->staged_len += len;
+ info->start = false;
+ }
+ }
+}
+
+void sde_kms_info_append_format(struct sde_kms_info *info,
+ uint32_t pixel_format,
+ uint64_t modifier)
+{
+ uint32_t len;
+
+ if (!info)
+ return;
+
+ if (modifier) {
+ len = snprintf(info->data + info->staged_len,
+ SDE_KMS_INFO_MAX_SIZE - info->staged_len,
+ info->start ?
+ "%c%c%c%c/%llX/%llX" : " %c%c%c%c/%llX/%llX",
+ (pixel_format >> 0) & 0xFF,
+ (pixel_format >> 8) & 0xFF,
+ (pixel_format >> 16) & 0xFF,
+ (pixel_format >> 24) & 0xFF,
+ (modifier >> 56) & 0xFF,
+ modifier & ((1ULL << 56) - 1));
+ } else {
+ len = snprintf(info->data + info->staged_len,
+ SDE_KMS_INFO_MAX_SIZE - info->staged_len,
+ info->start ?
+ "%c%c%c%c" : " %c%c%c%c",
+ (pixel_format >> 0) & 0xFF,
+ (pixel_format >> 8) & 0xFF,
+ (pixel_format >> 16) & 0xFF,
+ (pixel_format >> 24) & 0xFF);
+ }
+
+ /* check if snprintf truncated the string */
+ if ((info->staged_len + len) < SDE_KMS_INFO_MAX_SIZE) {
+ info->staged_len += len;
+ info->start = false;
+ }
+}
+
+void sde_kms_info_stop(struct sde_kms_info *info)
+{
+ uint32_t len;
+
+ if (info) {
+ /* insert final delimiter */
+ len = snprintf(info->data + info->staged_len,
+ SDE_KMS_INFO_MAX_SIZE - info->staged_len,
+ "\n");
+
+ /* check if snprintf truncated the string */
+ if ((info->staged_len + len) < SDE_KMS_INFO_MAX_SIZE)
+ info->len = info->staged_len + len;
+ }
+}
+
+void sde_kms_rect_intersect(const struct sde_rect *r1,
+ const struct sde_rect *r2,
+ struct sde_rect *result)
+{
+ int l, t, r, b;
+
+ if (!r1 || !r2 || !result)
+ return;
+
+ l = max(r1->x, r2->x);
+ t = max(r1->y, r2->y);
+ r = min((r1->x + r1->w), (r2->x + r2->w));
+ b = min((r1->y + r1->h), (r2->y + r2->h));
+
+ if (r < l || b < t) {
+ memset(result, 0, sizeof(*result));
+ } else {
+ result->x = l;
+ result->y = t;
+ result->w = r - l;
+ result->h = b - t;
+ }
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.c b/drivers/gpu/drm/msm/sde/sde_plane.c
new file mode 100644
index 000000000000..ae66861c69d4
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_plane.c
@@ -0,0 +1,2929 @@
+/*
+ * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <uapi/drm/sde_drm.h>
+#include <uapi/drm/msm_drm_pp.h>
+
+#include "msm_prop.h"
+
+#include "sde_kms.h"
+#include "sde_fence.h"
+#include "sde_formats.h"
+#include "sde_hw_sspp.h"
+#include "sde_trace.h"
+#include "sde_crtc.h"
+#include "sde_vbif.h"
+#include "sde_plane.h"
+#include "sde_color_processing.h"
+
+#define SDE_DEBUG_PLANE(pl, fmt, ...) SDE_DEBUG("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define SDE_ERROR_PLANE(pl, fmt, ...) SDE_ERROR("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define PHASE_STEP_SHIFT 21
+#define PHASE_STEP_UNIT_SCALE ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL 15
+
+#define SHARP_STRENGTH_DEFAULT 32
+#define SHARP_EDGE_THR_DEFAULT 112
+#define SHARP_SMOOTH_THR_DEFAULT 8
+#define SHARP_NOISE_THR_DEFAULT 2
+
+#define SDE_NAME_SIZE 12
+
+#define SDE_PLANE_COLOR_FILL_FLAG BIT(31)
+
+/* dirty bits for update function */
+#define SDE_PLANE_DIRTY_RECTS 0x1
+#define SDE_PLANE_DIRTY_FORMAT 0x2
+#define SDE_PLANE_DIRTY_SHARPEN 0x4
+#define SDE_PLANE_DIRTY_ALL 0xFFFFFFFF
+
+#define SDE_QSEED3_DEFAULT_PRELOAD_H 0x4
+#define SDE_QSEED3_DEFAULT_PRELOAD_V 0x3
+
+/**
+ * enum sde_plane_qos - Different qos configurations for each pipe
+ *
+ * @SDE_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
+ * @SDE_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
+ * this configuration is mutually exclusive from VBLANK_CTRL.
+ * @SDE_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe.
+ */
+enum sde_plane_qos {
+ SDE_PLANE_QOS_VBLANK_CTRL = BIT(0),
+ SDE_PLANE_QOS_VBLANK_AMORTIZE = BIT(1),
+ SDE_PLANE_QOS_PANIC_CTRL = BIT(2),
+};
+
+/*
+ * struct sde_phy_plane - physical plane structure
+ * @sde_plane: Points to virtual plane
+ * @phy_plane_list: list of hw pipe(physical plane)
+ * @index: index of physical plane (starts from 0, order from left to right)
+ * @features: capabilities from catalog
+ * @csc_cfg: Decoded user configuration for csc
+ * @csc_usr_ptr: Points to csc_cfg if valid user config available
+ * @csc_ptr: Points to sde_csc_cfg structure to use for current
+ */
+struct sde_phy_plane {
+ struct sde_plane *sde_plane;
+ struct list_head phy_plane_list;
+ enum sde_sspp pipe;
+ uint32_t index;
+
+ uint32_t features;
+ uint32_t nformats;
+ uint32_t formats[64];
+
+ struct sde_hw_pipe *pipe_hw;
+ struct sde_hw_pipe_cfg pipe_cfg;
+ struct sde_hw_sharp_cfg sharp_cfg;
+ struct sde_hw_scaler3_cfg *scaler3_cfg;
+ struct sde_hw_pipe_qos_cfg pipe_qos_cfg;
+ uint32_t color_fill;
+ bool is_rt_pipe;
+
+ struct sde_hw_pixel_ext pixel_ext;
+ bool pixel_ext_usr;
+
+ struct sde_csc_cfg csc_cfg;
+ struct sde_csc_cfg *csc_usr_ptr;
+ struct sde_csc_cfg *csc_ptr;
+
+ const struct sde_sspp_sub_blks *pipe_sblk;
+};
+
+/*
+ * struct sde_plane - local sde plane structure
+ */
+struct sde_plane {
+ struct drm_plane base;
+
+ struct msm_gem_address_space *aspace;
+ struct mutex lock;
+ bool is_error;
+ char pipe_name[SDE_NAME_SIZE];
+
+ struct list_head phy_plane_head;
+ u32 num_of_phy_planes;
+
+ struct msm_property_info property_info;
+ struct msm_property_data property_data[PLANE_PROP_COUNT];
+ struct drm_property_blob *blob_info;
+
+ /* debugfs related stuff */
+ struct dentry *debugfs_root;
+ struct sde_debugfs_regset32 debugfs_src;
+ struct sde_debugfs_regset32 debugfs_scaler;
+ struct sde_debugfs_regset32 debugfs_csc;
+ bool debugfs_default_scale;
+};
+
+#define to_sde_plane(x) container_of(x, struct sde_plane, base)
+
+static bool sde_plane_enabled(struct drm_plane_state *state)
+{
+ return state && state->fb && state->crtc;
+}
+
+static struct sde_kms *_sde_plane_get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv;
+
+ if (!plane || !plane->dev)
+ return NULL;
+
+ priv = plane->dev->dev_private;
+ if (!priv)
+ return NULL;
+
+ return to_sde_kms(priv->kms);
+}
+
+/**
+ * _sde_plane_calc_fill_level - calculate fill level of the given source format
+ * @plane: Pointer to drm plane
+ * @fmt: Pointer to source buffer format
+ * @src_wdith: width of source buffer
+ * Return: fill level corresponding to the source buffer/format or 0 if error
+ */
+static inline int _sde_plane_calc_fill_level(struct sde_phy_plane *pp,
+ const struct sde_format *fmt, u32 src_width)
+{
+ struct sde_plane *psde;
+ u32 fixed_buff_size;
+ u32 total_fl;
+
+ if (!pp || !fmt) {
+ SDE_ERROR("invalid arguments\n");
+ return 0;
+ }
+
+ psde = pp->sde_plane;
+ fixed_buff_size = pp->pipe_sblk->pixel_ram_size;
+
+ if (fmt->fetch_planes == SDE_PLANE_PSEUDO_PLANAR) {
+ if (fmt->chroma_sample == SDE_CHROMA_420) {
+ /* NV12 */
+ total_fl = (fixed_buff_size / 2) /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ /* non NV12 */
+ total_fl = (fixed_buff_size) /
+ ((src_width + 32) * fmt->bpp);
+ }
+ } else {
+ total_fl = (fixed_buff_size * 2) /
+ ((src_width + 32) * fmt->bpp);
+ }
+
+ SDE_DEBUG("plane%u: pnum:%d fmt:%x w:%u fl:%u\n",
+ psde->base.base.id, pp->pipe - SSPP_VIG0,
+ fmt->base.pixel_format, src_width, total_fl);
+
+ return total_fl;
+}
+
+/**
+ * _sde_plane_get_qos_lut_linear - get linear LUT mapping
+ * @total_fl: fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+static inline u32 _sde_plane_get_qos_lut_linear(u32 total_fl)
+{
+ u32 qos_lut;
+
+ if (total_fl <= 4)
+ qos_lut = 0x1B;
+ else if (total_fl <= 5)
+ qos_lut = 0x5B;
+ else if (total_fl <= 6)
+ qos_lut = 0x15B;
+ else if (total_fl <= 7)
+ qos_lut = 0x55B;
+ else if (total_fl <= 8)
+ qos_lut = 0x155B;
+ else if (total_fl <= 9)
+ qos_lut = 0x555B;
+ else if (total_fl <= 10)
+ qos_lut = 0x1555B;
+ else if (total_fl <= 11)
+ qos_lut = 0x5555B;
+ else if (total_fl <= 12)
+ qos_lut = 0x15555B;
+ else
+ qos_lut = 0x55555B;
+
+ return qos_lut;
+}
+
+/**
+ * _sde_plane_get_qos_lut_macrotile - get macrotile LUT mapping
+ * @total_fl: fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+static inline u32 _sde_plane_get_qos_lut_macrotile(u32 total_fl)
+{
+ u32 qos_lut;
+
+ if (total_fl <= 10)
+ qos_lut = 0x1AAff;
+ else if (total_fl <= 11)
+ qos_lut = 0x5AAFF;
+ else if (total_fl <= 12)
+ qos_lut = 0x15AAFF;
+ else
+ qos_lut = 0x55AAFF;
+
+ return qos_lut;
+}
+
+/**
+ * _sde_plane_set_qos_lut - set QoS LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ */
+static void _sde_plane_set_qos_lut(struct sde_phy_plane *pp,
+ struct drm_framebuffer *fb)
+{
+ struct sde_plane *psde;
+ const struct sde_format *fmt = NULL;
+ u32 qos_lut;
+ u32 total_fl = 0;
+
+ if (!pp || !fb) {
+ SDE_ERROR("invalid arguments phy_plane %d fb %d\n",
+ pp != NULL, fb != NULL);
+ return;
+ }
+
+ psde = pp->sde_plane;
+
+ if (!pp->pipe_hw || !pp->pipe_sblk) {
+ SDE_ERROR("invalid arguments\n");
+ return;
+ } else if (!pp->pipe_hw->ops.setup_creq_lut) {
+ return;
+ }
+
+ if (!pp->is_rt_pipe) {
+ qos_lut = pp->pipe_sblk->creq_lut_nrt;
+ } else {
+ fmt = sde_get_sde_format_ext(
+ fb->pixel_format,
+ fb->modifier,
+ drm_format_num_planes(fb->pixel_format));
+ if (!fmt) {
+ SDE_ERROR("%s: faile to get fmt\n", __func__);
+ return;
+ }
+
+ total_fl = _sde_plane_calc_fill_level(pp, fmt,
+ pp->pipe_cfg.src_rect.w);
+
+ if (SDE_FORMAT_IS_LINEAR(fmt))
+ qos_lut = _sde_plane_get_qos_lut_linear(total_fl);
+ else
+ qos_lut = _sde_plane_get_qos_lut_macrotile(total_fl);
+ }
+
+ pp->pipe_qos_cfg.creq_lut = qos_lut;
+
+ trace_sde_perf_set_qos_luts(pp->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ pp->is_rt_pipe, total_fl, qos_lut,
+ (fmt) ? SDE_FORMAT_IS_LINEAR(fmt) : 0);
+
+ SDE_DEBUG("plane%u: pnum:%d fmt:%x rt:%d fl:%u lut:0x%x\n",
+ psde->base.base.id,
+ pp->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ pp->is_rt_pipe, total_fl, qos_lut);
+
+ pp->pipe_hw->ops.setup_creq_lut(pp->pipe_hw, &pp->pipe_qos_cfg);
+}
+
+/**
+ * _sde_plane_set_panic_lut - set danger/safe LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ */
+static void _sde_plane_set_danger_lut(struct sde_phy_plane *pp,
+ struct drm_framebuffer *fb)
+{
+ struct sde_plane *psde;
+ const struct sde_format *fmt = NULL;
+ u32 danger_lut, safe_lut;
+
+ if (!pp || !fb) {
+ SDE_ERROR("invalid arguments\n");
+ return;
+ }
+
+ psde = pp->sde_plane;
+
+ if (!pp->pipe_hw || !pp->pipe_sblk) {
+ SDE_ERROR("invalid arguments\n");
+ return;
+ } else if (!pp->pipe_hw->ops.setup_danger_safe_lut) {
+ return;
+ }
+
+ if (!pp->is_rt_pipe) {
+ danger_lut = pp->pipe_sblk->danger_lut_nrt;
+ safe_lut = pp->pipe_sblk->safe_lut_nrt;
+ } else {
+ fmt = sde_get_sde_format_ext(
+ fb->pixel_format,
+ fb->modifier,
+ drm_format_num_planes(fb->pixel_format));
+ if (!fmt) {
+ SDE_ERROR("%s: fail to get fmt\n", __func__);
+ return;
+ }
+
+ if (SDE_FORMAT_IS_LINEAR(fmt)) {
+ danger_lut = pp->pipe_sblk->danger_lut_linear;
+ safe_lut = pp->pipe_sblk->safe_lut_linear;
+ } else {
+ danger_lut = pp->pipe_sblk->danger_lut_tile;
+ safe_lut = pp->pipe_sblk->safe_lut_tile;
+ }
+ }
+
+ pp->pipe_qos_cfg.danger_lut = danger_lut;
+ pp->pipe_qos_cfg.safe_lut = safe_lut;
+
+ trace_sde_perf_set_danger_luts(pp->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ (fmt) ? fmt->fetch_mode : 0,
+ pp->pipe_qos_cfg.danger_lut,
+ pp->pipe_qos_cfg.safe_lut);
+
+ SDE_DEBUG("plane%u: pnum:%d fmt:%x mode:%d luts[0x%x, 0x%x]\n",
+ psde->base.base.id,
+ pp->pipe - SSPP_VIG0,
+ fmt ? fmt->base.pixel_format : 0,
+ fmt ? fmt->fetch_mode : -1,
+ pp->pipe_qos_cfg.danger_lut,
+ pp->pipe_qos_cfg.safe_lut);
+
+ pp->pipe_hw->ops.setup_danger_safe_lut(pp->pipe_hw,
+ &pp->pipe_qos_cfg);
+}
+
+/**
+ * _sde_plane_set_qos_ctrl - set QoS control of the given plane
+ * @plane: Pointer to drm plane
+ * @enable: true to enable QoS control
+ * @flags: QoS control mode (enum sde_plane_qos)
+ */
+static void _sde_plane_set_qos_ctrl(struct sde_phy_plane *pp,
+ bool enable, u32 flags)
+{
+ struct sde_plane *psde;
+
+ if (!pp) {
+ SDE_ERROR("invalid arguments\n");
+ return;
+ }
+
+ psde = pp->sde_plane;
+
+ if (!pp->pipe_hw || !pp->pipe_sblk) {
+ SDE_ERROR("invalid arguments\n");
+ return;
+ } else if (!pp->pipe_hw->ops.setup_qos_ctrl) {
+ return;
+ }
+
+ if (flags & SDE_PLANE_QOS_VBLANK_CTRL) {
+ pp->pipe_qos_cfg.creq_vblank = pp->pipe_sblk->creq_vblank;
+ pp->pipe_qos_cfg.danger_vblank =
+ pp->pipe_sblk->danger_vblank;
+ pp->pipe_qos_cfg.vblank_en = enable;
+ }
+
+ if (flags & SDE_PLANE_QOS_VBLANK_AMORTIZE) {
+ /* this feature overrules previous VBLANK_CTRL */
+ pp->pipe_qos_cfg.vblank_en = false;
+ pp->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+ }
+
+ if (flags & SDE_PLANE_QOS_PANIC_CTRL)
+ pp->pipe_qos_cfg.danger_safe_en = enable;
+
+ if (!pp->is_rt_pipe) {
+ pp->pipe_qos_cfg.vblank_en = false;
+ pp->pipe_qos_cfg.danger_safe_en = false;
+ }
+
+ SDE_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
+ psde->base.base.id,
+ pp->pipe - SSPP_VIG0,
+ pp->pipe_qos_cfg.danger_safe_en,
+ pp->pipe_qos_cfg.vblank_en,
+ pp->pipe_qos_cfg.creq_vblank,
+ pp->pipe_qos_cfg.danger_vblank,
+ pp->is_rt_pipe);
+
+ pp->pipe_hw->ops.setup_qos_ctrl(pp->pipe_hw,
+ &pp->pipe_qos_cfg);
+}
+
+static int sde_plane_danger_signal_ctrl(struct sde_phy_plane *pp, bool enable)
+{
+ struct sde_plane *psde;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!pp) {
+ SDE_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+ psde = pp->sde_plane;
+
+ if (!psde->base.dev) {
+ SDE_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ priv = psde->base.dev->dev_private;
+ if (!priv || !priv->kms) {
+ SDE_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+
+ if (!pp->is_rt_pipe)
+ goto end;
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, true);
+
+ _sde_plane_set_qos_ctrl(pp, enable, SDE_PLANE_QOS_PANIC_CTRL);
+
+ sde_power_resource_enable(&priv->phandle, sde_kms->core_client, false);
+
+end:
+ return 0;
+}
+
+/**
+ * _sde_plane_set_ot_limit - set OT limit for the given plane
+ * @plane: Pointer to drm plane
+ * @crtc: Pointer to drm crtc
+ */
+static void _sde_plane_set_ot_limit(struct sde_phy_plane *pp,
+ struct drm_crtc *crtc)
+{
+ struct sde_plane *psde;
+ struct sde_vbif_set_ot_params ot_params;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+
+ if (!pp || !crtc) {
+ SDE_ERROR("invalid arguments phy_plane %d crtc %d\n",
+ pp != NULL, crtc != NULL);
+ return;
+ }
+ psde = pp->sde_plane;
+ if (!psde->base.dev) {
+ SDE_ERROR("invalid DRM device\n");
+ return;
+ }
+
+ priv = psde->base.dev->dev_private;
+ if (!priv || !priv->kms) {
+ SDE_ERROR("invalid KMS reference\n");
+ return;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+ if (!pp->pipe_hw) {
+ SDE_ERROR("invalid pipe reference\n");
+ return;
+ }
+
+ memset(&ot_params, 0, sizeof(ot_params));
+ ot_params.xin_id = pp->pipe_hw->cap->xin_id;
+ ot_params.num = pp->pipe_hw->idx - SSPP_NONE;
+ ot_params.width = pp->pipe_cfg.src_rect.w;
+ ot_params.height = pp->pipe_cfg.src_rect.h;
+ ot_params.is_wfd = !pp->is_rt_pipe;
+ ot_params.frame_rate = crtc->mode.vrefresh;
+ ot_params.vbif_idx = VBIF_RT;
+ ot_params.clk_ctrl = pp->pipe_hw->cap->clk_ctrl;
+ ot_params.rd = true;
+
+ sde_vbif_set_ot_limit(sde_kms, &ot_params);
+}
+
+/* helper to update a state's input fence pointer from the property */
+static void _sde_plane_set_input_fence(struct sde_plane *psde,
+ struct sde_plane_state *pstate, uint64_t fd)
+{
+ if (!psde || !pstate) {
+ SDE_ERROR("invalid arg(s), plane %d state %d\n",
+ psde != 0, pstate != 0);
+ return;
+ }
+
+ /* clear previous reference */
+ if (pstate->input_fence)
+ sde_sync_put(pstate->input_fence);
+
+ /* get fence pointer for later */
+ pstate->input_fence = sde_sync_get(fd);
+
+ SDE_DEBUG_PLANE(psde, "0x%llX\n", fd);
+}
+
+int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms)
+{
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+ uint32_t prefix;
+ void *input_fence;
+ int ret = -EINVAL;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ } else if (!plane->state) {
+ SDE_ERROR_PLANE(to_sde_plane(plane), "invalid state\n");
+ } else {
+ psde = to_sde_plane(plane);
+ pstate = to_sde_plane_state(plane->state);
+ input_fence = pstate->input_fence;
+
+ if (input_fence) {
+ prefix = sde_sync_get_name_prefix(input_fence);
+ ret = sde_sync_wait(input_fence, wait_ms);
+
+ SDE_EVT32(DRMID(plane), -ret, prefix);
+
+ switch (ret) {
+ case 0:
+ SDE_DEBUG_PLANE(psde, "signaled\n");
+ break;
+ case -ETIME:
+ SDE_ERROR_PLANE(psde, "%ums timeout on %08X\n",
+ wait_ms, prefix);
+ psde->is_error = true;
+ break;
+ default:
+ SDE_ERROR_PLANE(psde, "error %d on %08X\n",
+ ret, prefix);
+ psde->is_error = true;
+ break;
+ }
+ } else {
+ ret = 0;
+ }
+ }
+ return ret;
+}
+
+/**
+ * _sde_plane_get_aspace: gets the address space based on the
+ * fb_translation mode property
+ */
+static int _sde_plane_get_aspace(
+ struct sde_plane *psde,
+ struct sde_plane_state *pstate,
+ struct msm_gem_address_space **aspace)
+{
+ struct sde_kms *kms;
+ int mode;
+
+ if (!psde || !pstate || !aspace) {
+ SDE_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ kms = _sde_plane_get_kms(&psde->base);
+ if (!kms) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ mode = sde_plane_get_property(pstate,
+ PLANE_PROP_FB_TRANSLATION_MODE);
+
+ switch (mode) {
+ case SDE_DRM_FB_NON_SEC:
+ *aspace = kms->aspace[MSM_SMMU_DOMAIN_UNSECURE];
+ if (!aspace)
+ return -EINVAL;
+ break;
+ case SDE_DRM_FB_SEC:
+ *aspace = kms->aspace[MSM_SMMU_DOMAIN_SECURE];
+ if (!aspace)
+ return -EINVAL;
+ break;
+ case SDE_DRM_FB_SEC_DIR_TRANS:
+ case SDE_DRM_FB_NON_SEC_DIR_TRANS:
+ *aspace = NULL;
+ break;
+ default:
+ SDE_ERROR("invalid fb_translation mode:%d\n", mode);
+ return -EFAULT;
+ }
+
+ return 0;
+}
+
+static inline void _sde_plane_set_scanout(struct sde_phy_plane *pp,
+ struct sde_plane_state *pstate,
+ struct sde_hw_pipe_cfg *pipe_cfg,
+ struct drm_framebuffer *fb)
+{
+ struct sde_plane *psde;
+ struct msm_gem_address_space *aspace = NULL;
+ int ret;
+
+ if (!pp || !pstate || !pipe_cfg || !fb) {
+ SDE_ERROR(
+ "invalid arg(s), phy_plane %d state %d cfg %d fb %d\n",
+ pp != 0, pstate != 0, pipe_cfg != 0, fb != 0);
+ return;
+ }
+
+ psde = pp->sde_plane;
+ if (!pp->pipe_hw) {
+ SDE_ERROR_PLANE(psde, "invalid pipe_hw\n");
+ return;
+ }
+
+ ret = _sde_plane_get_aspace(psde, pstate, &aspace);
+ if (ret) {
+ SDE_ERROR_PLANE(psde, "Failed to get aspace %d\n", ret);
+ return;
+ }
+
+ ret = sde_format_populate_layout(aspace, fb, &pipe_cfg->layout);
+ if (ret == -EAGAIN)
+ SDE_DEBUG_PLANE(psde, "not updating same src addrs\n");
+ else if (ret)
+ SDE_ERROR_PLANE(psde, "failed to get format layout, %d\n", ret);
+ else if (pp->pipe_hw && pp->pipe_hw->ops.setup_sourceaddress)
+ pp->pipe_hw->ops.setup_sourceaddress(pp->pipe_hw, pipe_cfg);
+}
+
+static int _sde_plane_setup_scaler3_lut(struct sde_phy_plane *pp,
+ struct sde_plane_state *pstate)
+{
+ struct sde_plane *psde;
+ struct sde_hw_scaler3_cfg *cfg;
+ int ret = 0;
+
+ if (!pp || !pp->sde_plane || !pp->scaler3_cfg) {
+ SDE_ERROR("invalid args\n");
+ return -EINVAL;
+ } else if (!pstate) {
+ /* pstate is expected to be null on forced color fill */
+ SDE_DEBUG("null pstate\n");
+ return -EINVAL;
+ }
+
+ psde = pp->sde_plane;
+ cfg = pp->scaler3_cfg;
+
+ cfg->dir_lut = msm_property_get_blob(
+ &psde->property_info,
+ pstate->property_blobs, &cfg->dir_len,
+ PLANE_PROP_SCALER_LUT_ED);
+ cfg->cir_lut = msm_property_get_blob(
+ &psde->property_info,
+ pstate->property_blobs, &cfg->cir_len,
+ PLANE_PROP_SCALER_LUT_CIR);
+ cfg->sep_lut = msm_property_get_blob(
+ &psde->property_info,
+ pstate->property_blobs, &cfg->sep_len,
+ PLANE_PROP_SCALER_LUT_SEP);
+ if (!cfg->dir_lut || !cfg->cir_lut || !cfg->sep_lut)
+ ret = -ENODATA;
+ return ret;
+}
+
+static void _sde_plane_setup_scaler3(struct sde_phy_plane *pp,
+ uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
+ struct sde_hw_scaler3_cfg *scale_cfg,
+ const struct sde_format *fmt,
+ uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+ uint32_t decimated, i;
+
+ if (!pp || !scale_cfg || !fmt || !chroma_subsmpl_h ||
+ !chroma_subsmpl_v) {
+ SDE_ERROR("psde %pK scale_cfg %pK fmt %pK smp_h %d smp_v %d\n"
+ , pp, scale_cfg, fmt, chroma_subsmpl_h,
+ chroma_subsmpl_v);
+ return;
+ }
+
+ memset(scale_cfg, 0, sizeof(*scale_cfg));
+ memset(&pp->pixel_ext, 0, sizeof(struct sde_hw_pixel_ext));
+
+ decimated = DECIMATED_DIMENSION(src_w,
+ pp->pipe_cfg.horz_decimation);
+ scale_cfg->phase_step_x[SDE_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), decimated, dst_w);
+ decimated = DECIMATED_DIMENSION(src_h,
+ pp->pipe_cfg.vert_decimation);
+ scale_cfg->phase_step_y[SDE_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), decimated, dst_h);
+
+
+ scale_cfg->phase_step_y[SDE_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_y[SDE_SSPP_COMP_0] / chroma_subsmpl_v;
+ scale_cfg->phase_step_x[SDE_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_x[SDE_SSPP_COMP_0] / chroma_subsmpl_h;
+
+ scale_cfg->phase_step_x[SDE_SSPP_COMP_2] =
+ scale_cfg->phase_step_x[SDE_SSPP_COMP_1_2];
+ scale_cfg->phase_step_y[SDE_SSPP_COMP_2] =
+ scale_cfg->phase_step_y[SDE_SSPP_COMP_1_2];
+
+ scale_cfg->phase_step_x[SDE_SSPP_COMP_3] =
+ scale_cfg->phase_step_x[SDE_SSPP_COMP_0];
+ scale_cfg->phase_step_y[SDE_SSPP_COMP_3] =
+ scale_cfg->phase_step_y[SDE_SSPP_COMP_0];
+
+ for (i = 0; i < SDE_MAX_PLANES; i++) {
+ scale_cfg->src_width[i] = DECIMATED_DIMENSION(src_w,
+ pp->pipe_cfg.horz_decimation);
+ scale_cfg->src_height[i] = DECIMATED_DIMENSION(src_h,
+ pp->pipe_cfg.vert_decimation);
+ if (SDE_FORMAT_IS_YUV(fmt))
+ scale_cfg->src_width[i] &= ~0x1;
+ if (i == SDE_SSPP_COMP_1_2 || i == SDE_SSPP_COMP_2) {
+ scale_cfg->src_width[i] /= chroma_subsmpl_h;
+ scale_cfg->src_height[i] /= chroma_subsmpl_v;
+ }
+ scale_cfg->preload_x[i] = SDE_QSEED3_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = SDE_QSEED3_DEFAULT_PRELOAD_V;
+ pp->pixel_ext.num_ext_pxls_top[i] =
+ scale_cfg->src_height[i];
+ pp->pixel_ext.num_ext_pxls_left[i] =
+ scale_cfg->src_width[i];
+ }
+ if (!(SDE_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
+ && (src_w == dst_w))
+ return;
+
+ scale_cfg->dst_width = dst_w;
+ scale_cfg->dst_height = dst_h;
+ scale_cfg->y_rgb_filter_cfg = SDE_SCALE_BIL;
+ scale_cfg->uv_filter_cfg = SDE_SCALE_BIL;
+ scale_cfg->alpha_filter_cfg = SDE_SCALE_ALPHA_BIL;
+ scale_cfg->lut_flag = 0;
+ scale_cfg->blend_cfg = 1;
+ scale_cfg->enable = 1;
+}
+
+/**
+ * _sde_plane_setup_scaler2 - determine default scaler phase steps/filter type
+ * @psde: Pointer to SDE plane object
+ * @src: Source size
+ * @dst: Destination size
+ * @phase_steps: Pointer to output array for phase steps
+ * @filter: Pointer to output array for filter type
+ * @fmt: Pointer to format definition
+ * @chroma_subsampling: Subsampling amount for chroma channel
+ *
+ * Returns: 0 on success
+ */
+static int _sde_plane_setup_scaler2(struct sde_plane *psde,
+ uint32_t src, uint32_t dst, uint32_t *phase_steps,
+ enum sde_hw_filter *filter, const struct sde_format *fmt,
+ uint32_t chroma_subsampling)
+{
+ if (!psde || !phase_steps || !filter || !fmt) {
+ SDE_ERROR(
+ "invalid arg(s), plane %d phase %d filter %d fmt %d\n",
+ psde != 0, phase_steps != 0, filter != 0, fmt != 0);
+ return -EINVAL;
+ }
+
+ /* calculate phase steps, leave init phase as zero */
+ phase_steps[SDE_SSPP_COMP_0] =
+ mult_frac(1 << PHASE_STEP_SHIFT, src, dst);
+ phase_steps[SDE_SSPP_COMP_1_2] =
+ phase_steps[SDE_SSPP_COMP_0] / chroma_subsampling;
+ phase_steps[SDE_SSPP_COMP_2] = phase_steps[SDE_SSPP_COMP_1_2];
+ phase_steps[SDE_SSPP_COMP_3] = phase_steps[SDE_SSPP_COMP_0];
+
+ /* calculate scaler config, if necessary */
+ if (SDE_FORMAT_IS_YUV(fmt) || src != dst) {
+ filter[SDE_SSPP_COMP_3] =
+ (src <= dst) ? SDE_SCALE_FILTER_BIL :
+ SDE_SCALE_FILTER_PCMN;
+
+ if (SDE_FORMAT_IS_YUV(fmt)) {
+ filter[SDE_SSPP_COMP_0] = SDE_SCALE_FILTER_CA;
+ filter[SDE_SSPP_COMP_1_2] = filter[SDE_SSPP_COMP_3];
+ } else {
+ filter[SDE_SSPP_COMP_0] = filter[SDE_SSPP_COMP_3];
+ filter[SDE_SSPP_COMP_1_2] =
+ SDE_SCALE_FILTER_NEAREST;
+ }
+ } else {
+ /* disable scaler */
+ filter[SDE_SSPP_COMP_0] = SDE_SCALE_FILTER_MAX;
+ filter[SDE_SSPP_COMP_1_2] = SDE_SCALE_FILTER_MAX;
+ filter[SDE_SSPP_COMP_3] = SDE_SCALE_FILTER_MAX;
+ }
+ return 0;
+}
+
+/**
+ * _sde_plane_setup_pixel_ext - determine default pixel extension values
+ * @psde: Pointer to SDE plane object
+ * @src: Source size
+ * @dst: Destination size
+ * @decimated_src: Source size after decimation, if any
+ * @phase_steps: Pointer to output array for phase steps
+ * @out_src: Output array for pixel extension values
+ * @out_edge1: Output array for pixel extension first edge
+ * @out_edge2: Output array for pixel extension second edge
+ * @filter: Pointer to array for filter type
+ * @fmt: Pointer to format definition
+ * @chroma_subsampling: Subsampling amount for chroma channel
+ * @post_compare: Whether to chroma subsampled source size for comparisions
+ */
+static void _sde_plane_setup_pixel_ext(struct sde_plane *psde,
+ uint32_t src, uint32_t dst, uint32_t decimated_src,
+ uint32_t *phase_steps, uint32_t *out_src, int *out_edge1,
+ int *out_edge2, enum sde_hw_filter *filter,
+ const struct sde_format *fmt, uint32_t chroma_subsampling,
+ bool post_compare)
+{
+ int64_t edge1, edge2, caf;
+ uint32_t src_work;
+ int i, tmp;
+
+ if (psde && phase_steps && out_src && out_edge1 &&
+ out_edge2 && filter && fmt) {
+ /* handle CAF for YUV formats */
+ if (SDE_FORMAT_IS_YUV(fmt) && *filter == SDE_SCALE_FILTER_CA)
+ caf = PHASE_STEP_UNIT_SCALE;
+ else
+ caf = 0;
+
+ for (i = 0; i < SDE_MAX_PLANES; i++) {
+ src_work = decimated_src;
+ if (i == SDE_SSPP_COMP_1_2 || i == SDE_SSPP_COMP_2)
+ src_work /= chroma_subsampling;
+ if (post_compare)
+ src = src_work;
+ if (!SDE_FORMAT_IS_YUV(fmt) && (src == dst)) {
+ /* unity */
+ edge1 = 0;
+ edge2 = 0;
+ } else if (dst >= src) {
+ /* upscale */
+ edge1 = (1 << PHASE_RESIDUAL);
+ edge1 -= caf;
+ edge2 = (1 << PHASE_RESIDUAL);
+ edge2 += (dst - 1) * *(phase_steps + i);
+ edge2 -= (src_work - 1) * PHASE_STEP_UNIT_SCALE;
+ edge2 += caf;
+ edge2 = -(edge2);
+ } else {
+ /* downscale */
+ edge1 = 0;
+ edge2 = (dst - 1) * *(phase_steps + i);
+ edge2 -= (src_work - 1) * PHASE_STEP_UNIT_SCALE;
+ edge2 += *(phase_steps + i);
+ edge2 = -(edge2);
+ }
+
+ /* only enable CAF for luma plane */
+ caf = 0;
+
+ /* populate output arrays */
+ *(out_src + i) = src_work;
+
+ /* edge updates taken from __pxl_extn_helper */
+ if (edge1 >= 0) {
+ tmp = (uint32_t)edge1;
+ tmp >>= PHASE_STEP_SHIFT;
+ *(out_edge1 + i) = -tmp;
+ } else {
+ tmp = (uint32_t)(-edge1);
+ *(out_edge1 + i) =
+ (tmp + PHASE_STEP_UNIT_SCALE - 1) >>
+ PHASE_STEP_SHIFT;
+ }
+ if (edge2 >= 0) {
+ tmp = (uint32_t)edge2;
+ tmp >>= PHASE_STEP_SHIFT;
+ *(out_edge2 + i) = -tmp;
+ } else {
+ tmp = (uint32_t)(-edge2);
+ *(out_edge2 + i) =
+ (tmp + PHASE_STEP_UNIT_SCALE - 1) >>
+ PHASE_STEP_SHIFT;
+ }
+ }
+ }
+}
+
+static inline void _sde_plane_setup_csc(struct sde_phy_plane *pp)
+{
+ static const struct sde_csc_cfg sde_csc_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+ };
+ static const struct sde_csc_cfg sde_csc10_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+ };
+
+ struct sde_plane *psde;
+
+ if (!pp) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ }
+ psde = pp->sde_plane;
+
+ /* revert to kernel default if override not available */
+ if (pp->csc_usr_ptr)
+ pp->csc_ptr = pp->csc_usr_ptr;
+ else if (BIT(SDE_SSPP_CSC_10BIT) & pp->features)
+ pp->csc_ptr = (struct sde_csc_cfg *)&sde_csc10_YUV2RGB_601L;
+ else
+ pp->csc_ptr = (struct sde_csc_cfg *)&sde_csc_YUV2RGB_601L;
+
+ SDE_DEBUG_PLANE(psde, "using 0x%X 0x%X 0x%X...\n",
+ pp->csc_ptr->csc_mv[0],
+ pp->csc_ptr->csc_mv[1],
+ pp->csc_ptr->csc_mv[2]);
+}
+
+static void sde_color_process_plane_setup(struct drm_plane *plane,
+ struct sde_phy_plane *pp)
+{
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+ uint32_t hue, saturation, value, contrast;
+ struct drm_msm_memcol *memcol = NULL;
+ size_t memcol_sz = 0;
+
+ psde = pp->sde_plane;
+ pstate = to_sde_plane_state(plane->state);
+
+ hue = (uint32_t) sde_plane_get_property(pstate, PLANE_PROP_HUE_ADJUST);
+ if (pp->pipe_hw->ops.setup_pa_hue)
+ pp->pipe_hw->ops.setup_pa_hue(pp->pipe_hw, &hue);
+ saturation = (uint32_t) sde_plane_get_property(pstate,
+ PLANE_PROP_SATURATION_ADJUST);
+ if (pp->pipe_hw->ops.setup_pa_sat)
+ pp->pipe_hw->ops.setup_pa_sat(pp->pipe_hw, &saturation);
+ value = (uint32_t) sde_plane_get_property(pstate,
+ PLANE_PROP_VALUE_ADJUST);
+ if (pp->pipe_hw->ops.setup_pa_val)
+ pp->pipe_hw->ops.setup_pa_val(pp->pipe_hw, &value);
+ contrast = (uint32_t) sde_plane_get_property(pstate,
+ PLANE_PROP_CONTRAST_ADJUST);
+ if (pp->pipe_hw->ops.setup_pa_cont)
+ pp->pipe_hw->ops.setup_pa_cont(pp->pipe_hw, &contrast);
+
+ if (pp->pipe_hw->ops.setup_pa_memcolor) {
+ /* Skin memory color setup */
+ memcol = msm_property_get_blob(&psde->property_info,
+ pstate->property_blobs,
+ &memcol_sz,
+ PLANE_PROP_SKIN_COLOR);
+ pp->pipe_hw->ops.setup_pa_memcolor(pp->pipe_hw,
+ MEMCOLOR_SKIN, memcol);
+
+ /* Sky memory color setup */
+ memcol = msm_property_get_blob(&psde->property_info,
+ pstate->property_blobs,
+ &memcol_sz,
+ PLANE_PROP_SKY_COLOR);
+ pp->pipe_hw->ops.setup_pa_memcolor(pp->pipe_hw,
+ MEMCOLOR_SKY, memcol);
+
+ /* Foliage memory color setup */
+ memcol = msm_property_get_blob(&psde->property_info,
+ pstate->property_blobs,
+ &memcol_sz,
+ PLANE_PROP_FOLIAGE_COLOR);
+ pp->pipe_hw->ops.setup_pa_memcolor(pp->pipe_hw,
+ MEMCOLOR_FOLIAGE, memcol);
+ }
+}
+
+static void _sde_plane_setup_scaler(struct sde_phy_plane *pp,
+ const struct sde_format *fmt,
+ struct sde_plane_state *pstate)
+{
+ struct sde_hw_pixel_ext *pe;
+ uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+ struct sde_plane *psde;
+
+ if (!pp || !fmt || !pstate || !pp->sde_plane) {
+ SDE_ERROR("invalid arg(s), phy_plane %d fmt %d\n",
+ pp != NULL, fmt != NULL);
+ return;
+ }
+ psde = pp->sde_plane;
+
+ pe = &(pp->pixel_ext);
+
+ pp->pipe_cfg.horz_decimation =
+ sde_plane_get_property(pstate, PLANE_PROP_H_DECIMATE);
+ pp->pipe_cfg.vert_decimation =
+ sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
+
+ /* don't chroma subsample if decimating */
+ chroma_subsmpl_h = pp->pipe_cfg.horz_decimation ? 1 :
+ drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
+ chroma_subsmpl_v = pp->pipe_cfg.vert_decimation ? 1 :
+ drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
+
+ /* update scaler */
+ if (pp->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+ int error;
+
+ error = _sde_plane_setup_scaler3_lut(pp, pstate);
+ if (error || !pp->pixel_ext_usr ||
+ psde->debugfs_default_scale) {
+ memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
+ /* calculate default config for QSEED3 */
+ _sde_plane_setup_scaler3(pp,
+ pp->pipe_cfg.src_rect.w,
+ pp->pipe_cfg.src_rect.h,
+ pp->pipe_cfg.dst_rect.w,
+ pp->pipe_cfg.dst_rect.h,
+ pp->scaler3_cfg, fmt,
+ chroma_subsmpl_h, chroma_subsmpl_v);
+ }
+ } else if (!pp->pixel_ext_usr || !pstate ||
+ psde->debugfs_default_scale) {
+ uint32_t deci_dim, i;
+
+ /* calculate default configuration for QSEED2 */
+ memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
+
+ SDE_DEBUG_PLANE(psde, "default config\n");
+ deci_dim = DECIMATED_DIMENSION(pp->pipe_cfg.src_rect.w,
+ pp->pipe_cfg.horz_decimation);
+ _sde_plane_setup_scaler2(psde,
+ deci_dim,
+ pp->pipe_cfg.dst_rect.w,
+ pe->phase_step_x,
+ pe->horz_filter, fmt, chroma_subsmpl_h);
+
+ if (SDE_FORMAT_IS_YUV(fmt))
+ deci_dim &= ~0x1;
+ _sde_plane_setup_pixel_ext(psde, pp->pipe_cfg.src_rect.w,
+ pp->pipe_cfg.dst_rect.w, deci_dim,
+ pe->phase_step_x,
+ pe->roi_w,
+ pe->num_ext_pxls_left,
+ pe->num_ext_pxls_right, pe->horz_filter, fmt,
+ chroma_subsmpl_h, 0);
+
+ deci_dim = DECIMATED_DIMENSION(pp->pipe_cfg.src_rect.h,
+ pp->pipe_cfg.vert_decimation);
+ _sde_plane_setup_scaler2(psde,
+ deci_dim,
+ pp->pipe_cfg.dst_rect.h,
+ pe->phase_step_y,
+ pe->vert_filter, fmt, chroma_subsmpl_v);
+ _sde_plane_setup_pixel_ext(psde, pp->pipe_cfg.src_rect.h,
+ pp->pipe_cfg.dst_rect.h, deci_dim,
+ pe->phase_step_y,
+ pe->roi_h,
+ pe->num_ext_pxls_top,
+ pe->num_ext_pxls_btm, pe->vert_filter, fmt,
+ chroma_subsmpl_v, 1);
+
+ for (i = 0; i < SDE_MAX_PLANES; i++) {
+ if (pe->num_ext_pxls_left[i] >= 0)
+ pe->left_rpt[i] = pe->num_ext_pxls_left[i];
+ else
+ pe->left_ftch[i] = pe->num_ext_pxls_left[i];
+
+ if (pe->num_ext_pxls_right[i] >= 0)
+ pe->right_rpt[i] = pe->num_ext_pxls_right[i];
+ else
+ pe->right_ftch[i] = pe->num_ext_pxls_right[i];
+
+ if (pe->num_ext_pxls_top[i] >= 0)
+ pe->top_rpt[i] = pe->num_ext_pxls_top[i];
+ else
+ pe->top_ftch[i] = pe->num_ext_pxls_top[i];
+
+ if (pe->num_ext_pxls_btm[i] >= 0)
+ pe->btm_rpt[i] = pe->num_ext_pxls_btm[i];
+ else
+ pe->btm_ftch[i] = pe->num_ext_pxls_btm[i];
+ }
+ }
+}
+
+/**
+ * _sde_plane_color_fill - enables color fill on plane
+ * @psde: Pointer to SDE plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+static int _sde_plane_color_fill(struct sde_phy_plane *pp,
+ uint32_t color, uint32_t alpha)
+{
+ const struct sde_format *fmt;
+
+ if (!pp) {
+ SDE_ERROR("invalid plane\n");
+ return -EINVAL;
+ }
+
+ if (!pp->pipe_hw) {
+ SDE_ERROR_PLANE(pp->sde_plane, "invalid plane h/w pointer\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG_PLANE(pp->sde_plane, "\n");
+
+ /*
+ * select fill format to match user property expectation,
+ * h/w only supports RGB variants
+ */
+ fmt = sde_get_sde_format(DRM_FORMAT_ABGR8888);
+
+ /* update sspp */
+ if (fmt && pp->pipe_hw->ops.setup_solidfill) {
+ pp->pipe_hw->ops.setup_solidfill(pp->pipe_hw,
+ (color & 0xFFFFFF) | ((alpha & 0xFF) << 24));
+
+ /* override scaler/decimation if solid fill */
+ pp->pipe_cfg.src_rect.x = 0;
+ pp->pipe_cfg.src_rect.y = 0;
+ pp->pipe_cfg.src_rect.w = pp->pipe_cfg.dst_rect.w;
+ pp->pipe_cfg.src_rect.h = pp->pipe_cfg.dst_rect.h;
+
+ _sde_plane_setup_scaler(pp, fmt, NULL);
+
+ if (pp->pipe_hw->ops.setup_format)
+ pp->pipe_hw->ops.setup_format(pp->pipe_hw,
+ fmt, SDE_SSPP_SOLID_FILL);
+
+ if (pp->pipe_hw->ops.setup_rects)
+ pp->pipe_hw->ops.setup_rects(pp->pipe_hw,
+ &pp->pipe_cfg, &pp->pixel_ext,
+ pp->scaler3_cfg);
+ }
+
+ return 0;
+}
+
+static int _sde_plane_mode_set(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ uint32_t nplanes, src_flags = 0x0;
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+ struct sde_crtc_state *cstate;
+ const struct sde_format *fmt;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct sde_rect src, dst;
+ bool q16_data = true;
+ int idx;
+ struct sde_phy_plane *pp;
+ uint32_t num_of_phy_planes = 0;
+ int mode = 0;
+ uint32_t crtc_split_width;
+ bool is_across_mixer_boundary = false;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ return -EINVAL;
+ } else if (!plane->state) {
+ SDE_ERROR("invalid plane state\n");
+ return -EINVAL;
+ }
+
+ psde = to_sde_plane(plane);
+ pstate = to_sde_plane_state(plane->state);
+
+ crtc = state->crtc;
+ crtc_split_width = get_crtc_split_width(crtc);
+ fb = state->fb;
+ if (!crtc || !fb) {
+ SDE_ERROR_PLANE(psde, "invalid crtc %d or fb %d\n",
+ crtc != 0, fb != 0);
+ return -EINVAL;
+ }
+ fmt = to_sde_format(msm_framebuffer_format(fb));
+ nplanes = fmt->num_planes;
+
+ /* determine what needs to be refreshed */
+ while ((idx = msm_property_pop_dirty(&psde->property_info)) >= 0) {
+ switch (idx) {
+ case PLANE_PROP_SCALER_V1:
+ case PLANE_PROP_SCALER_V2:
+ case PLANE_PROP_H_DECIMATE:
+ case PLANE_PROP_V_DECIMATE:
+ case PLANE_PROP_SRC_CONFIG:
+ case PLANE_PROP_ZPOS:
+ pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+ break;
+ case PLANE_PROP_CSC_V1:
+ pstate->dirty |= SDE_PLANE_DIRTY_FORMAT;
+ break;
+ case PLANE_PROP_COLOR_FILL:
+ /* potentially need to refresh everything */
+ pstate->dirty = SDE_PLANE_DIRTY_ALL;
+ break;
+ case PLANE_PROP_ROTATION:
+ pstate->dirty |= SDE_PLANE_DIRTY_FORMAT;
+ break;
+ case PLANE_PROP_INFO:
+ case PLANE_PROP_ALPHA:
+ case PLANE_PROP_INPUT_FENCE:
+ case PLANE_PROP_BLEND_OP:
+ /* no special action required */
+ break;
+ default:
+ /* unknown property, refresh everything */
+ pstate->dirty |= SDE_PLANE_DIRTY_ALL;
+ SDE_ERROR("executing full mode set, prp_idx %d\n", idx);
+ break;
+ }
+ }
+
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ if (pstate->dirty & SDE_PLANE_DIRTY_RECTS)
+ memset(&(pp->pipe_cfg), 0,
+ sizeof(struct sde_hw_pipe_cfg));
+
+ _sde_plane_set_scanout(pp, pstate, &pp->pipe_cfg, fb);
+
+ pstate->pending = true;
+
+ pp->is_rt_pipe = sde_crtc_is_rt(crtc);
+ _sde_plane_set_qos_ctrl(pp, false, SDE_PLANE_QOS_PANIC_CTRL);
+ }
+
+ /* early out if nothing dirty */
+ if (!pstate->dirty)
+ return 0;
+
+ memset(&src, 0, sizeof(struct sde_rect));
+
+ /* update secure session flag */
+ mode = sde_plane_get_property(pstate,
+ PLANE_PROP_FB_TRANSLATION_MODE);
+ if ((mode == SDE_DRM_FB_SEC) ||
+ (mode == SDE_DRM_FB_SEC_DIR_TRANS))
+ src_flags |= SDE_SSPP_SECURE_OVERLAY_SESSION;
+
+
+ /* update roi config */
+ if (pstate->dirty & SDE_PLANE_DIRTY_RECTS) {
+ POPULATE_RECT(&src, state->src_x, state->src_y,
+ state->src_w, state->src_h, q16_data);
+ POPULATE_RECT(&dst, state->crtc_x, state->crtc_y,
+ state->crtc_w, state->crtc_h, !q16_data);
+
+ SDE_DEBUG_PLANE(psde,
+ "FB[%u] %u,%u,%ux%u->crtc%u %d,%d,%ux%u, %s ubwc %d\n",
+ fb->base.id, src.x, src.y, src.w, src.h,
+ crtc->base.id, dst.x, dst.y, dst.w, dst.h,
+ drm_get_format_name(fmt->base.pixel_format),
+ SDE_FORMAT_IS_UBWC(fmt));
+
+ if (sde_plane_get_property(pstate, PLANE_PROP_SRC_CONFIG) &
+ BIT(SDE_DRM_DEINTERLACE)) {
+ SDE_DEBUG_PLANE(psde, "deinterlace\n");
+ for (idx = 0; idx < SDE_MAX_PLANES; ++idx)
+ pp->pipe_cfg.layout.plane_pitch[idx] <<= 1;
+ src.h /= 2;
+ src.y = DIV_ROUND_UP(src.y, 2);
+ src.y &= ~0x1;
+ }
+
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list)
+ num_of_phy_planes++;
+
+ /*
+ * Only need to use one physical plane if plane width
+ * is still within the limitation.
+ */
+ is_across_mixer_boundary =
+ (plane->state->crtc_x < crtc_split_width) &&
+ (plane->state->crtc_x + plane->state->crtc_w >
+ crtc_split_width);
+ if (crtc_split_width >= (src.x + src.w) &&
+ !is_across_mixer_boundary)
+ num_of_phy_planes = 1;
+
+ if (num_of_phy_planes > 1) {
+ /* Adjust width for multi-pipe */
+ src.w /= num_of_phy_planes;
+ dst.w /= num_of_phy_planes;
+ }
+
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ /* Adjust offset for multi-pipe */
+ if (num_of_phy_planes > 1) {
+ src.x += src.w * pp->index;
+ dst.x += dst.w * pp->index;
+ }
+
+ /* add extra offset for shared display */
+ if (crtc->state) {
+ cstate = to_sde_crtc_state(crtc->state);
+ if (cstate->is_shared) {
+ dst.x += cstate->shared_roi.x;
+ dst.y += cstate->shared_roi.y;
+
+ if (sde_plane_get_property(pstate,
+ PLANE_PROP_SRC_CONFIG) &
+ BIT(SDE_DRM_LINEPADDING)) {
+ src.h = cstate->shared_roi.h;
+ dst.h = cstate->shared_roi.h;
+ }
+ }
+ }
+
+ pp->pipe_cfg.src_rect = src;
+ pp->pipe_cfg.dst_rect = dst;
+
+ /* check for color fill */
+ pp->color_fill = (uint32_t)sde_plane_get_property(
+ pstate, PLANE_PROP_COLOR_FILL);
+ if (pp->color_fill & SDE_PLANE_COLOR_FILL_FLAG) {
+ /* skip remaining processing on color fill */
+ pstate->dirty = 0x0;
+ } else if (pp->pipe_hw->ops.setup_rects) {
+ _sde_plane_setup_scaler(pp, fmt, pstate);
+
+ pp->pipe_hw->ops.setup_rects(pp->pipe_hw,
+ &pp->pipe_cfg, &pp->pixel_ext,
+ pp->scaler3_cfg);
+ }
+ }
+ }
+
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ if (((pstate->dirty & SDE_PLANE_DIRTY_FORMAT) ||
+ (src_flags &
+ SDE_SSPP_SECURE_OVERLAY_SESSION)) &&
+ pp->pipe_hw->ops.setup_format) {
+ SDE_DEBUG_PLANE(psde, "rotation 0x%llX\n",
+ sde_plane_get_property(pstate, PLANE_PROP_ROTATION));
+ if (sde_plane_get_property(pstate, PLANE_PROP_ROTATION)
+ & BIT(DRM_REFLECT_X))
+ src_flags |= SDE_SSPP_FLIP_LR;
+ if (sde_plane_get_property(pstate,
+ PLANE_PROP_ROTATION) & BIT(DRM_REFLECT_Y))
+ src_flags |= SDE_SSPP_FLIP_UD;
+
+ /* update format */
+ pp->pipe_hw->ops.setup_format(pp->pipe_hw,
+ fmt, src_flags);
+
+ /* update csc */
+ if (SDE_FORMAT_IS_YUV(fmt))
+ _sde_plane_setup_csc(pp);
+ else
+ pp->csc_ptr = NULL;
+ }
+
+ sde_color_process_plane_setup(plane, pp);
+
+ /* update sharpening */
+ if ((pstate->dirty & SDE_PLANE_DIRTY_SHARPEN) &&
+ pp->pipe_hw->ops.setup_sharpening) {
+ pp->sharp_cfg.strength = SHARP_STRENGTH_DEFAULT;
+ pp->sharp_cfg.edge_thr = SHARP_EDGE_THR_DEFAULT;
+ pp->sharp_cfg.smooth_thr = SHARP_SMOOTH_THR_DEFAULT;
+ pp->sharp_cfg.noise_thr = SHARP_NOISE_THR_DEFAULT;
+
+ pp->pipe_hw->ops.setup_sharpening(pp->pipe_hw,
+ &pp->sharp_cfg);
+ }
+
+ _sde_plane_set_qos_lut(pp, fb);
+ _sde_plane_set_danger_lut(pp, fb);
+
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ _sde_plane_set_qos_ctrl(pp, true,
+ SDE_PLANE_QOS_PANIC_CTRL);
+ _sde_plane_set_ot_limit(pp, crtc);
+ }
+ }
+
+ /* clear dirty */
+ pstate->dirty = 0x0;
+
+ return 0;
+}
+
+static int sde_plane_prepare_fb(struct drm_plane *plane,
+ const struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb;
+ struct sde_plane *psde = to_sde_plane(plane);
+ struct sde_plane_state *pstate;
+ int rc;
+
+ if (!psde || !new_state)
+ return -EINVAL;
+
+ if (!new_state->fb)
+ return 0;
+
+ fb = new_state->fb;
+ pstate = to_sde_plane_state(new_state);
+ rc = _sde_plane_get_aspace(psde, pstate, &psde->aspace);
+
+ if (rc) {
+ SDE_ERROR_PLANE(psde, "Failed to get aspace %d\n", rc);
+ return rc;
+ }
+
+ SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
+ return msm_framebuffer_prepare(fb, psde->aspace);
+}
+
+static void sde_plane_cleanup_fb(struct drm_plane *plane,
+ const struct drm_plane_state *old_state)
+{
+ struct drm_framebuffer *fb = old_state ? old_state->fb : NULL;
+ struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
+
+ if (!fb || !psde)
+ return;
+
+ SDE_DEBUG_PLANE(psde, "FB[%u]\n", fb->base.id);
+ msm_framebuffer_cleanup(fb, psde->aspace);
+}
+
+static void _sde_plane_atomic_check_mode_changed(struct sde_plane *psde,
+ struct drm_plane_state *state,
+ struct drm_plane_state *old_state)
+{
+ struct sde_plane_state *pstate = to_sde_plane_state(state);
+
+ /* no need to check it again */
+ if (pstate->dirty == SDE_PLANE_DIRTY_ALL)
+ return;
+
+ if (!sde_plane_enabled(state) || !sde_plane_enabled(old_state)
+ || psde->is_error) {
+ SDE_DEBUG_PLANE(psde,
+ "enabling/disabling full modeset required\n");
+ pstate->dirty |= SDE_PLANE_DIRTY_ALL;
+ } else if (to_sde_plane_state(old_state)->pending) {
+ SDE_DEBUG_PLANE(psde, "still pending\n");
+ pstate->dirty |= SDE_PLANE_DIRTY_ALL;
+ } else if (state->src_w != old_state->src_w ||
+ state->src_h != old_state->src_h ||
+ state->src_x != old_state->src_x ||
+ state->src_y != old_state->src_y) {
+ SDE_DEBUG_PLANE(psde, "src rect updated\n");
+ pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+ } else if (state->crtc_w != old_state->crtc_w ||
+ state->crtc_h != old_state->crtc_h ||
+ state->crtc_x != old_state->crtc_x ||
+ state->crtc_y != old_state->crtc_y) {
+ SDE_DEBUG_PLANE(psde, "crtc rect updated\n");
+ pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+ }
+
+ if (!state->fb || !old_state->fb) {
+ SDE_DEBUG_PLANE(psde, "can't compare fb handles\n");
+ } else if (state->fb->pixel_format != old_state->fb->pixel_format) {
+ SDE_DEBUG_PLANE(psde, "format change\n");
+ pstate->dirty |= SDE_PLANE_DIRTY_FORMAT | SDE_PLANE_DIRTY_RECTS;
+ } else {
+ uint64_t *new_mods = state->fb->modifier;
+ uint64_t *old_mods = old_state->fb->modifier;
+ uint32_t *new_pitches = state->fb->pitches;
+ uint32_t *old_pitches = old_state->fb->pitches;
+ uint32_t *new_offset = state->fb->offsets;
+ uint32_t *old_offset = old_state->fb->offsets;
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(state->fb->modifier); i++) {
+ if (new_mods[i] != old_mods[i]) {
+ SDE_DEBUG_PLANE(psde,
+ "format modifiers change\"\
+ plane:%d new_mode:%llu old_mode:%llu\n",
+ i, new_mods[i], old_mods[i]);
+ pstate->dirty |= SDE_PLANE_DIRTY_FORMAT |
+ SDE_PLANE_DIRTY_RECTS;
+ break;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(state->fb->pitches); i++) {
+ if (new_pitches[i] != old_pitches[i]) {
+ SDE_DEBUG_PLANE(psde,
+ "pitches change plane:%d\"\
+ old_pitches:%u new_pitches:%u\n",
+ i, old_pitches[i], new_pitches[i]);
+ pstate->dirty |= SDE_PLANE_DIRTY_RECTS;
+ break;
+ }
+ }
+ for (i = 0; i < ARRAY_SIZE(state->fb->offsets); i++) {
+ if (new_offset[i] != old_offset[i]) {
+ SDE_DEBUG_PLANE(psde,
+ "offset change plane:%d\"\
+ old_offset:%u new_offset:%u\n",
+ i, old_offset[i], new_offset[i]);
+ pstate->dirty |= SDE_PLANE_DIRTY_FORMAT |
+ SDE_PLANE_DIRTY_RECTS;
+ break;
+ }
+ }
+ }
+}
+
+static int sde_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ int ret = 0;
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+ const struct sde_format *fmt;
+ struct sde_rect src, dst;
+ uint32_t deci_w, deci_h, src_deci_w, src_deci_h;
+ uint32_t max_upscale, max_downscale, min_src_size, max_linewidth;
+ bool q16_data = true;
+ struct sde_phy_plane *pp;
+ uint32_t num_of_phy_planes = 0;
+
+ if (!plane || !state) {
+ SDE_ERROR("invalid arg(s), plane %d state %d.\n",
+ plane != NULL, state != NULL);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ psde = to_sde_plane(plane);
+ pstate = to_sde_plane_state(state);
+
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list)
+ num_of_phy_planes++;
+
+ deci_w = sde_plane_get_property(pstate, PLANE_PROP_H_DECIMATE);
+ deci_h = sde_plane_get_property(pstate, PLANE_PROP_V_DECIMATE);
+
+ /* src values are in Q16 fixed point, convert to integer */
+ POPULATE_RECT(&src, state->src_x, state->src_y, state->src_w,
+ state->src_h, q16_data);
+ POPULATE_RECT(&dst, state->crtc_x, state->crtc_y, state->crtc_w,
+ state->crtc_h, !q16_data);
+
+ src_deci_w = DECIMATED_DIMENSION(src.w, deci_w);
+ src_deci_h = DECIMATED_DIMENSION(src.h, deci_h);
+
+ SDE_DEBUG_PLANE(psde, "check %d -> %d\n",
+ sde_plane_enabled(plane->state), sde_plane_enabled(state));
+
+ if (!sde_plane_enabled(state))
+ goto modeset_update;
+
+ fmt = to_sde_format(msm_framebuffer_format(state->fb));
+
+ min_src_size = SDE_FORMAT_IS_YUV(fmt) ? 2 : 1;
+
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ if (!pp->pipe_sblk) {
+ SDE_ERROR("invalid plane catalog\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ max_upscale = pp->pipe_sblk->maxupscale;
+ max_downscale = pp->pipe_sblk->maxdwnscale;
+ max_linewidth = pp->pipe_sblk->maxlinewidth;
+
+ if (SDE_FORMAT_IS_YUV(fmt) &&
+ (!(pp->features & SDE_SSPP_SCALER) ||
+ !(pp->features & (BIT(SDE_SSPP_CSC)
+ | BIT(SDE_SSPP_CSC_10BIT))))) {
+ SDE_ERROR_PLANE(psde,
+ "plane doesn't have scaler/csc for yuv\n");
+ ret = -EINVAL;
+
+ /* check src bounds */
+ } else if (state->fb->width > MAX_IMG_WIDTH ||
+ state->fb->height > MAX_IMG_HEIGHT ||
+ src.w < min_src_size || src.h < min_src_size ||
+ CHECK_LAYER_BOUNDS(src.x, src.w, state->fb->width) ||
+ CHECK_LAYER_BOUNDS(src.y, src.h, state->fb->height)) {
+ SDE_ERROR_PLANE(psde, "invalid source %u, %u, %ux%u\n",
+ src.x, src.y, src.w, src.h);
+ ret = -E2BIG;
+
+ /* valid yuv image */
+ } else if (SDE_FORMAT_IS_YUV(fmt) && ((src.x & 0x1)
+ || (src.y & 0x1) || (src.w & 0x1)
+ || (src.h & 0x1))) {
+ SDE_ERROR_PLANE(psde, "invalid yuv source %u, %u,\"\
+ %ux%u\n", src.x, src.y, src.w, src.h);
+ ret = -EINVAL;
+
+ /* min dst support */
+ } else if (dst.w < 0x1 || dst.h < 0x1) {
+ SDE_ERROR_PLANE(psde, "invalid dest rect %u, %u,\"\
+ %ux%u\n", dst.x, dst.y, dst.w, dst.h);
+ ret = -EINVAL;
+
+ /* decimation validation */
+ } else if (deci_w || deci_h) {
+ if ((deci_w > pp->pipe_sblk->maxhdeciexp) ||
+ (deci_h > pp->pipe_sblk->maxvdeciexp)) {
+ SDE_ERROR_PLANE(psde,
+ "too much decimation requested\n");
+ ret = -EINVAL;
+ } else if (fmt->fetch_mode != SDE_FETCH_LINEAR) {
+ SDE_ERROR_PLANE(psde,
+ "decimation requires linear fetch\n");
+ ret = -EINVAL;
+ }
+
+ } else if (!(pp->features & SDE_SSPP_SCALER) &&
+ ((src.w != dst.w) || (src.h != dst.h))) {
+ SDE_ERROR_PLANE(psde,
+ "pipe doesn't support scaling %ux%u->%ux%u\n",
+ src.w, src.h, dst.w, dst.h);
+ ret = -EINVAL;
+
+ /* check decimated source width */
+ } else if (src_deci_w > max_linewidth * num_of_phy_planes) {
+ SDE_ERROR_PLANE(psde,
+ "invalid src w:%u, deci w:%u, line w:%u, num_phy_planes:%u\n",
+ src.w, src_deci_w, max_linewidth,
+ num_of_phy_planes);
+ ret = -E2BIG;
+
+ /* check max scaler capability */
+ } else if (((src_deci_w * max_upscale) < dst.w) ||
+ ((src_deci_h * max_upscale) < dst.h) ||
+ ((dst.w * max_downscale) < src_deci_w) ||
+ ((dst.h * max_downscale) < src_deci_h)) {
+ SDE_ERROR_PLANE(psde,
+ "too much scaling requested %ux%u->%ux%u\n",
+ src_deci_w, src_deci_h, dst.w, dst.h);
+ ret = -E2BIG;
+ }
+ }
+
+modeset_update:
+ if (!ret)
+ _sde_plane_atomic_check_mode_changed(psde, state, plane->state);
+exit:
+ return ret;
+}
+
+/**
+ * sde_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void sde_plane_flush(struct drm_plane *plane)
+{
+ struct sde_plane *psde;
+ struct sde_phy_plane *pp;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ }
+
+ psde = to_sde_plane(plane);
+
+ /*
+ * These updates have to be done immediately before the plane flush
+ * timing, and may not be moved to the atomic_update/mode_set functions.
+ */
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ if (psde->is_error)
+ /* force white frame with 100% alpha pipe output on error */
+ _sde_plane_color_fill(pp, 0xFFFFFF, 0xFF);
+ else if (pp->color_fill & SDE_PLANE_COLOR_FILL_FLAG)
+ /* force 100% alpha */
+ _sde_plane_color_fill(pp, pp->color_fill, 0xFF);
+ else if (pp->pipe_hw && pp->csc_ptr &&
+ pp->pipe_hw->ops.setup_csc)
+ pp->pipe_hw->ops.setup_csc(pp->pipe_hw, pp->csc_ptr);
+ }
+
+ /* flag h/w flush complete */
+ if (plane->state)
+ to_sde_plane_state(plane->state)->pending = false;
+}
+
+static void sde_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct sde_plane *psde;
+ struct drm_plane_state *state;
+ struct sde_plane_state *pstate;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ } else if (!plane->state) {
+ SDE_ERROR("invalid plane state\n");
+ return;
+ }
+
+ psde = to_sde_plane(plane);
+ psde->is_error = false;
+ state = plane->state;
+ pstate = to_sde_plane_state(state);
+
+ SDE_DEBUG_PLANE(psde, "\n");
+
+ if (!sde_plane_enabled(state)) {
+ pstate->pending = true;
+ } else {
+ int ret;
+
+ ret = _sde_plane_mode_set(plane, state);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+ }
+}
+
+
+/* helper to install properties which are common to planes and crtcs */
+static void _sde_plane_install_properties(struct drm_plane *plane,
+ struct sde_mdss_cfg *catalog, bool plane_reserved)
+{
+ static const struct drm_prop_enum_list e_blend_op[] = {
+ {SDE_DRM_BLEND_OP_NOT_DEFINED, "not_defined"},
+ {SDE_DRM_BLEND_OP_OPAQUE, "opaque"},
+ {SDE_DRM_BLEND_OP_PREMULTIPLIED, "premultiplied"},
+ {SDE_DRM_BLEND_OP_COVERAGE, "coverage"}
+ };
+ static const struct drm_prop_enum_list e_src_config[] = {
+ {SDE_DRM_DEINTERLACE, "deinterlace"},
+ {SDE_DRM_LINEPADDING, "linepadding"},
+ };
+ static const struct drm_prop_enum_list e_fb_translation_mode[] = {
+ {SDE_DRM_FB_NON_SEC, "non_sec"},
+ {SDE_DRM_FB_SEC, "sec"},
+ {SDE_DRM_FB_NON_SEC_DIR_TRANS, "non_sec_direct_translation"},
+ {SDE_DRM_FB_SEC_DIR_TRANS, "sec_direct_translation"},
+ };
+ const struct sde_format_extended *format_list = NULL;
+ struct sde_kms_info *info;
+ struct sde_plane *psde = to_sde_plane(plane);
+ int zpos_max = 255;
+ int zpos_def = 0;
+ char feature_name[256];
+ struct sde_phy_plane *pp;
+ uint32_t features = 0xFFFFFFFF, nformats = 64;
+ u32 maxlinewidth = 0, maxupscale = 0, maxdwnscale = 0;
+ u32 maxhdeciexp = 0, maxvdeciexp = 0;
+
+ if (!plane || !psde) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ }
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ if (!pp->pipe_hw || !pp->pipe_sblk) {
+ SDE_ERROR("invalid phy_plane, pipe_hw %d\"\
+ pipe_sblk %d\n", pp->pipe_hw != NULL,
+ pp->pipe_sblk != NULL);
+ return;
+ }
+ }
+ if (!catalog) {
+ SDE_ERROR("invalid catalog\n");
+ return;
+ }
+
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ /* Get common features for all pipes */
+ features &= pp->features;
+ if (nformats > pp->nformats) {
+ nformats = pp->nformats;
+ format_list = pp->pipe_sblk->format_list;
+ }
+ if (maxlinewidth < pp->pipe_sblk->maxlinewidth)
+ maxlinewidth = pp->pipe_sblk->maxlinewidth;
+ if (maxupscale < pp->pipe_sblk->maxupscale)
+ maxupscale = pp->pipe_sblk->maxupscale;
+ if (maxdwnscale < pp->pipe_sblk->maxdwnscale)
+ maxdwnscale = pp->pipe_sblk->maxdwnscale;
+ if (maxhdeciexp < pp->pipe_sblk->maxhdeciexp)
+ maxhdeciexp = pp->pipe_sblk->maxhdeciexp;
+ if (maxvdeciexp < pp->pipe_sblk->maxvdeciexp)
+ maxvdeciexp = pp->pipe_sblk->maxvdeciexp;
+ break;
+ }
+
+ if (sde_is_custom_client()) {
+ if (catalog->mixer_count &&
+ catalog->mixer[0].sblk->maxblendstages) {
+ zpos_max = catalog->mixer[0].sblk->maxblendstages - 1;
+ if (zpos_max > SDE_STAGE_MAX - SDE_STAGE_0 - 1)
+ zpos_max = SDE_STAGE_MAX - SDE_STAGE_0 - 1;
+ }
+ } else if (plane->type != DRM_PLANE_TYPE_PRIMARY) {
+ /* reserve zpos == 0 for primary planes */
+ zpos_def = drm_plane_index(plane) + 1;
+ }
+
+ msm_property_install_range(&psde->property_info, "zpos",
+ 0x0, 0, zpos_max, zpos_def, PLANE_PROP_ZPOS);
+
+ msm_property_install_range(&psde->property_info, "alpha",
+ 0x0, 0, 255, 255, PLANE_PROP_ALPHA);
+
+ /* linux default file descriptor range on each process */
+ msm_property_install_range(&psde->property_info, "input_fence",
+ 0x0, 0, INR_OPEN_MAX, 0, PLANE_PROP_INPUT_FENCE);
+
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ if (pp->pipe_sblk->maxhdeciexp) {
+ msm_property_install_range(&psde->property_info,
+ "h_decimate", 0x0, 0,
+ pp->pipe_sblk->maxhdeciexp, 0,
+ PLANE_PROP_H_DECIMATE);
+ }
+
+ if (pp->pipe_sblk->maxvdeciexp) {
+ msm_property_install_range(&psde->property_info,
+ "v_decimate", 0x0, 0,
+ pp->pipe_sblk->maxvdeciexp, 0,
+ PLANE_PROP_V_DECIMATE);
+ }
+ break;
+ }
+
+ if (features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+ msm_property_install_volatile_range(&psde->property_info,
+ "scaler_v2", 0x0, 0, ~0, 0, PLANE_PROP_SCALER_V2);
+ msm_property_install_blob(&psde->property_info, "lut_ed", 0,
+ PLANE_PROP_SCALER_LUT_ED);
+ msm_property_install_blob(&psde->property_info, "lut_cir", 0,
+ PLANE_PROP_SCALER_LUT_CIR);
+ msm_property_install_blob(&psde->property_info, "lut_sep", 0,
+ PLANE_PROP_SCALER_LUT_SEP);
+ } else if (features & SDE_SSPP_SCALER) {
+ msm_property_install_volatile_range(&psde->property_info,
+ "scaler_v1", 0x0, 0, ~0, 0, PLANE_PROP_SCALER_V1);
+ }
+
+ if (features & BIT(SDE_SSPP_CSC)) {
+ msm_property_install_volatile_range(&psde->property_info,
+ "csc_v1", 0x0, 0, ~0, 0, PLANE_PROP_CSC_V1);
+ }
+
+ if (features & BIT(SDE_SSPP_HSIC)) {
+ snprintf(feature_name, sizeof(feature_name), "%s%d",
+ "SDE_SSPP_HUE_V",
+ pp->pipe_sblk->hsic_blk.version >> 16);
+ msm_property_install_range(&psde->property_info,
+ feature_name, 0, 0, 0xFFFFFFFF, 0,
+ PLANE_PROP_HUE_ADJUST);
+ snprintf(feature_name, sizeof(feature_name), "%s%d",
+ "SDE_SSPP_SATURATION_V",
+ pp->pipe_sblk->hsic_blk.version >> 16);
+ msm_property_install_range(&psde->property_info,
+ feature_name, 0, 0, 0xFFFFFFFF, 0,
+ PLANE_PROP_SATURATION_ADJUST);
+ snprintf(feature_name, sizeof(feature_name), "%s%d",
+ "SDE_SSPP_VALUE_V",
+ pp->pipe_sblk->hsic_blk.version >> 16);
+ msm_property_install_range(&psde->property_info,
+ feature_name, 0, 0, 0xFFFFFFFF, 0,
+ PLANE_PROP_VALUE_ADJUST);
+ snprintf(feature_name, sizeof(feature_name), "%s%d",
+ "SDE_SSPP_CONTRAST_V",
+ pp->pipe_sblk->hsic_blk.version >> 16);
+ msm_property_install_range(&psde->property_info,
+ feature_name, 0, 0, 0xFFFFFFFF, 0,
+ PLANE_PROP_CONTRAST_ADJUST);
+ }
+
+ /* standard properties */
+ msm_property_install_rotation(&psde->property_info,
+ BIT(DRM_REFLECT_X) | BIT(DRM_REFLECT_Y), PLANE_PROP_ROTATION);
+
+ msm_property_install_enum(&psde->property_info, "blend_op", 0x0, 0,
+ e_blend_op, ARRAY_SIZE(e_blend_op), PLANE_PROP_BLEND_OP,
+ SDE_DRM_BLEND_OP_PREMULTIPLIED);
+
+ msm_property_install_enum(&psde->property_info, "src_config", 0x0, 1,
+ e_src_config, ARRAY_SIZE(e_src_config), PLANE_PROP_SRC_CONFIG,
+ 0);
+
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ if (pp->pipe_hw->ops.setup_solidfill)
+ msm_property_install_range(&psde->property_info,
+ "color_fill", 0, 0, 0xFFFFFFFF, 0,
+ PLANE_PROP_COLOR_FILL);
+ break;
+ }
+
+ info = kzalloc(sizeof(struct sde_kms_info), GFP_KERNEL);
+ if (!info) {
+ SDE_ERROR("failed to allocate info memory\n");
+ return;
+ }
+
+ msm_property_install_blob(&psde->property_info, "capabilities",
+ DRM_MODE_PROP_IMMUTABLE, PLANE_PROP_INFO);
+ sde_kms_info_reset(info);
+
+ if (format_list) {
+ sde_kms_info_start(info, "pixel_formats");
+ while (format_list->fourcc_format) {
+ sde_kms_info_append_format(info,
+ format_list->fourcc_format,
+ format_list->modifier);
+ ++format_list;
+ }
+ sde_kms_info_stop(info);
+ }
+
+ sde_kms_info_add_keyint(info, "max_linewidth", maxlinewidth);
+ sde_kms_info_add_keyint(info, "max_upscale", maxupscale);
+ sde_kms_info_add_keyint(info, "max_downscale", maxdwnscale);
+ sde_kms_info_add_keyint(info, "max_horizontal_deci", maxhdeciexp);
+ sde_kms_info_add_keyint(info, "max_vertical_deci", maxvdeciexp);
+
+ /* When early RVC is enabled in bootloader and doesn't exit,
+ * user app should not touch the pipe which RVC is on.
+ * So mark the plane_unavailibility to the special pipe's property,
+ * user can parse this property of this pipe and stop this pipe's
+ * allocation after parsing.
+ * plane_reserved is 1, means the pipe is occupied in bootloader.
+ * plane_reserved is 0, means it's not used in bootloader.
+ */
+ sde_kms_info_add_keyint(info, "plane_unavailability", plane_reserved);
+ msm_property_set_blob(&psde->property_info, &psde->blob_info,
+ info->data, info->len, PLANE_PROP_INFO);
+
+ kfree(info);
+
+ if (features & BIT(SDE_SSPP_MEMCOLOR)) {
+ snprintf(feature_name, sizeof(feature_name), "%s%d",
+ "SDE_SSPP_SKIN_COLOR_V",
+ pp->pipe_sblk->memcolor_blk.version >> 16);
+ msm_property_install_blob(&psde->property_info, feature_name, 0,
+ PLANE_PROP_SKIN_COLOR);
+ snprintf(feature_name, sizeof(feature_name), "%s%d",
+ "SDE_SSPP_SKY_COLOR_V",
+ pp->pipe_sblk->memcolor_blk.version >> 16);
+ msm_property_install_blob(&psde->property_info, feature_name, 0,
+ PLANE_PROP_SKY_COLOR);
+ snprintf(feature_name, sizeof(feature_name), "%s%d",
+ "SDE_SSPP_FOLIAGE_COLOR_V",
+ pp->pipe_sblk->memcolor_blk.version >> 16);
+ msm_property_install_blob(&psde->property_info, feature_name, 0,
+ PLANE_PROP_FOLIAGE_COLOR);
+ }
+
+ msm_property_install_enum(&psde->property_info, "fb_translation_mode",
+ 0x0,
+ 0, e_fb_translation_mode,
+ ARRAY_SIZE(e_fb_translation_mode),
+ PLANE_PROP_FB_TRANSLATION_MODE, SDE_DRM_FB_NON_SEC);
+}
+
+static inline void _sde_plane_set_csc_v1(struct sde_phy_plane *pp,
+ void *usr_ptr)
+{
+ struct sde_drm_csc_v1 csc_v1;
+ struct sde_plane *psde;
+ int i;
+
+ if (!pp) {
+ SDE_ERROR("invalid phy_plane\n");
+ return;
+ }
+ psde = pp->sde_plane;
+
+ pp->csc_usr_ptr = NULL;
+ if (!usr_ptr) {
+ SDE_DEBUG_PLANE(psde, "csc data removed\n");
+ return;
+ }
+
+ if (copy_from_user(&csc_v1, usr_ptr, sizeof(csc_v1))) {
+ SDE_ERROR_PLANE(psde, "failed to copy csc data\n");
+ return;
+ }
+
+ /* populate from user space */
+ for (i = 0; i < SDE_CSC_MATRIX_COEFF_SIZE; ++i)
+ pp->csc_cfg.csc_mv[i] = csc_v1.ctm_coeff[i] >> 16;
+ for (i = 0; i < SDE_CSC_BIAS_SIZE; ++i) {
+ pp->csc_cfg.csc_pre_bv[i] = csc_v1.pre_bias[i];
+ pp->csc_cfg.csc_post_bv[i] = csc_v1.post_bias[i];
+ }
+ for (i = 0; i < SDE_CSC_CLAMP_SIZE; ++i) {
+ pp->csc_cfg.csc_pre_lv[i] = csc_v1.pre_clamp[i];
+ pp->csc_cfg.csc_post_lv[i] = csc_v1.post_clamp[i];
+ }
+ pp->csc_usr_ptr = &pp->csc_cfg;
+}
+
+static inline void _sde_plane_set_scaler_v1(struct sde_phy_plane *pp,
+ void *usr)
+{
+ struct sde_drm_scaler_v1 scale_v1;
+ struct sde_hw_pixel_ext *pe;
+ struct sde_plane *psde;
+ int i;
+
+ if (!pp) {
+ SDE_ERROR("invalid phy_plane\n");
+ return;
+ }
+ psde = pp->sde_plane;
+
+ pp->pixel_ext_usr = false;
+ if (!usr) {
+ SDE_DEBUG_PLANE(psde, "scale data removed\n");
+ return;
+ }
+
+ if (copy_from_user(&scale_v1, usr, sizeof(scale_v1))) {
+ SDE_ERROR_PLANE(psde, "failed to copy scale data\n");
+ return;
+ }
+
+ /* populate from user space */
+ pe = &(pp->pixel_ext);
+ memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
+ for (i = 0; i < SDE_MAX_PLANES; i++) {
+ pe->init_phase_x[i] = scale_v1.init_phase_x[i];
+ pe->phase_step_x[i] = scale_v1.phase_step_x[i];
+ pe->init_phase_y[i] = scale_v1.init_phase_y[i];
+ pe->phase_step_y[i] = scale_v1.phase_step_y[i];
+
+ pe->horz_filter[i] = scale_v1.horz_filter[i];
+ pe->vert_filter[i] = scale_v1.vert_filter[i];
+ }
+ for (i = 0; i < SDE_MAX_PLANES; i++) {
+ pe->left_ftch[i] = scale_v1.pe.left_ftch[i];
+ pe->right_ftch[i] = scale_v1.pe.right_ftch[i];
+ pe->left_rpt[i] = scale_v1.pe.left_rpt[i];
+ pe->right_rpt[i] = scale_v1.pe.right_rpt[i];
+ pe->roi_w[i] = scale_v1.pe.num_ext_pxls_lr[i];
+
+ pe->top_ftch[i] = scale_v1.pe.top_ftch[i];
+ pe->btm_ftch[i] = scale_v1.pe.btm_ftch[i];
+ pe->top_rpt[i] = scale_v1.pe.top_rpt[i];
+ pe->btm_rpt[i] = scale_v1.pe.btm_rpt[i];
+ pe->roi_h[i] = scale_v1.pe.num_ext_pxls_tb[i];
+ }
+
+ pp->pixel_ext_usr = true;
+
+ SDE_DEBUG_PLANE(psde, "user property data copied\n");
+}
+
+static inline void _sde_plane_set_scaler_v2(struct sde_phy_plane *pp,
+ struct sde_plane_state *pstate, void *usr)
+{
+ struct sde_drm_scaler_v2 scale_v2;
+ struct sde_hw_pixel_ext *pe;
+ int i;
+ struct sde_hw_scaler3_cfg *cfg;
+ struct sde_plane *psde;
+
+ if (!pp) {
+ SDE_ERROR("invalid phy_plane\n");
+ return;
+ }
+ psde = pp->sde_plane;
+
+ cfg = pp->scaler3_cfg;
+ pp->pixel_ext_usr = false;
+ if (!usr) {
+ SDE_DEBUG_PLANE(psde, "scale data removed\n");
+ return;
+ }
+
+ if (copy_from_user(&scale_v2, usr, sizeof(scale_v2))) {
+ SDE_ERROR_PLANE(psde, "failed to copy scale data\n");
+ return;
+ }
+
+ /* detach/ignore user data if 'disabled' */
+ if (!scale_v2.enable) {
+ SDE_DEBUG_PLANE(psde, "scale data removed\n");
+ return;
+ }
+
+ /* populate from user space */
+ pe = &(pp->pixel_ext);
+ memset(pe, 0, sizeof(struct sde_hw_pixel_ext));
+ cfg->enable = scale_v2.enable;
+ cfg->dir_en = scale_v2.dir_en;
+ for (i = 0; i < SDE_MAX_PLANES; i++) {
+ cfg->init_phase_x[i] = scale_v2.init_phase_x[i];
+ cfg->phase_step_x[i] = scale_v2.phase_step_x[i];
+ cfg->init_phase_y[i] = scale_v2.init_phase_y[i];
+ cfg->phase_step_y[i] = scale_v2.phase_step_y[i];
+
+ cfg->preload_x[i] = scale_v2.preload_x[i];
+ cfg->preload_y[i] = scale_v2.preload_y[i];
+ cfg->src_width[i] = scale_v2.src_width[i];
+ cfg->src_height[i] = scale_v2.src_height[i];
+ }
+ cfg->dst_width = scale_v2.dst_width;
+ cfg->dst_height = scale_v2.dst_height;
+
+ cfg->y_rgb_filter_cfg = scale_v2.y_rgb_filter_cfg;
+ cfg->uv_filter_cfg = scale_v2.uv_filter_cfg;
+ cfg->alpha_filter_cfg = scale_v2.alpha_filter_cfg;
+ cfg->blend_cfg = scale_v2.blend_cfg;
+
+ cfg->lut_flag = scale_v2.lut_flag;
+ cfg->dir_lut_idx = scale_v2.dir_lut_idx;
+ cfg->y_rgb_cir_lut_idx = scale_v2.y_rgb_cir_lut_idx;
+ cfg->uv_cir_lut_idx = scale_v2.uv_cir_lut_idx;
+ cfg->y_rgb_sep_lut_idx = scale_v2.y_rgb_sep_lut_idx;
+ cfg->uv_sep_lut_idx = scale_v2.uv_sep_lut_idx;
+
+ cfg->de.enable = scale_v2.de.enable;
+ cfg->de.sharpen_level1 = scale_v2.de.sharpen_level1;
+ cfg->de.sharpen_level2 = scale_v2.de.sharpen_level2;
+ cfg->de.clip = scale_v2.de.clip;
+ cfg->de.limit = scale_v2.de.limit;
+ cfg->de.thr_quiet = scale_v2.de.thr_quiet;
+ cfg->de.thr_dieout = scale_v2.de.thr_dieout;
+ cfg->de.thr_low = scale_v2.de.thr_low;
+ cfg->de.thr_high = scale_v2.de.thr_high;
+ cfg->de.prec_shift = scale_v2.de.prec_shift;
+ for (i = 0; i < SDE_MAX_DE_CURVES; i++) {
+ cfg->de.adjust_a[i] = scale_v2.de.adjust_a[i];
+ cfg->de.adjust_b[i] = scale_v2.de.adjust_b[i];
+ cfg->de.adjust_c[i] = scale_v2.de.adjust_c[i];
+ }
+ for (i = 0; i < SDE_MAX_PLANES; i++) {
+ pe->left_ftch[i] = scale_v2.pe.left_ftch[i];
+ pe->right_ftch[i] = scale_v2.pe.right_ftch[i];
+ pe->left_rpt[i] = scale_v2.pe.left_rpt[i];
+ pe->right_rpt[i] = scale_v2.pe.right_rpt[i];
+ pe->roi_w[i] = scale_v2.pe.num_ext_pxls_lr[i];
+
+ pe->top_ftch[i] = scale_v2.pe.top_ftch[i];
+ pe->btm_ftch[i] = scale_v2.pe.btm_ftch[i];
+ pe->top_rpt[i] = scale_v2.pe.top_rpt[i];
+ pe->btm_rpt[i] = scale_v2.pe.btm_rpt[i];
+ pe->roi_h[i] = scale_v2.pe.num_ext_pxls_tb[i];
+ }
+ pp->pixel_ext_usr = true;
+
+ SDE_DEBUG_PLANE(psde, "user property data copied\n");
+}
+
+static int sde_plane_atomic_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val)
+{
+ struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
+ struct sde_plane_state *pstate;
+ int idx, ret = -EINVAL;
+ struct sde_phy_plane *pp;
+
+ SDE_DEBUG_PLANE(psde, "\n");
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ } else if (!state) {
+ SDE_ERROR_PLANE(psde, "invalid state\n");
+ } else {
+ pstate = to_sde_plane_state(state);
+ ret = msm_property_atomic_set(&psde->property_info,
+ pstate->property_values, pstate->property_blobs,
+ property, val);
+ if (!ret) {
+ idx = msm_property_index(&psde->property_info,
+ property);
+ switch (idx) {
+ case PLANE_PROP_INPUT_FENCE:
+ _sde_plane_set_input_fence(psde, pstate, val);
+ break;
+ case PLANE_PROP_CSC_V1:
+ list_for_each_entry(pp, &psde->phy_plane_head,
+ phy_plane_list) {
+ _sde_plane_set_csc_v1(pp, (void *)val);
+ }
+ break;
+ case PLANE_PROP_SCALER_V1:
+ list_for_each_entry(pp, &psde->phy_plane_head,
+ phy_plane_list) {
+ _sde_plane_set_scaler_v1(pp,
+ (void *)val);
+ }
+ break;
+ case PLANE_PROP_SCALER_V2:
+ list_for_each_entry(pp, &psde->phy_plane_head,
+ phy_plane_list) {
+ _sde_plane_set_scaler_v2(pp, pstate,
+ (void *)val);
+ }
+ break;
+ default:
+ /* nothing to do */
+ break;
+ }
+ }
+ }
+
+ return ret;
+}
+
+static int sde_plane_set_property(struct drm_plane *plane,
+ struct drm_property *property, uint64_t val)
+{
+ SDE_DEBUG("\n");
+
+ return sde_plane_atomic_set_property(plane,
+ plane->state, property, val);
+}
+
+static int sde_plane_atomic_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state,
+ struct drm_property *property, uint64_t *val)
+{
+ struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
+ struct sde_plane_state *pstate;
+ int ret = -EINVAL;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ } else if (!state) {
+ SDE_ERROR("invalid state\n");
+ } else {
+ SDE_DEBUG_PLANE(psde, "\n");
+ pstate = to_sde_plane_state(state);
+ ret = msm_property_atomic_get(&psde->property_info,
+ pstate->property_values, pstate->property_blobs,
+ property, val);
+ }
+
+ return ret;
+}
+
+static void sde_plane_destroy(struct drm_plane *plane)
+{
+ struct sde_plane *psde = plane ? to_sde_plane(plane) : NULL;
+ struct sde_phy_plane *pp, *n;
+
+ SDE_DEBUG_PLANE(psde, "\n");
+
+ if (psde) {
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ _sde_plane_set_qos_ctrl(pp,
+ false, SDE_PLANE_QOS_PANIC_CTRL);
+ }
+ debugfs_remove_recursive(psde->debugfs_root);
+
+ if (psde->blob_info)
+ drm_property_unreference_blob(psde->blob_info);
+ msm_property_destroy(&psde->property_info);
+ mutex_destroy(&psde->lock);
+
+ drm_plane_helper_disable(plane);
+
+ /* this will destroy the states as well */
+ drm_plane_cleanup(plane);
+
+ list_for_each_entry_safe(pp, n,
+ &psde->phy_plane_head, phy_plane_list) {
+ if (pp->pipe_hw)
+ sde_hw_sspp_destroy(pp->pipe_hw);
+ list_del(&pp->phy_plane_list);
+ kfree(pp);
+ }
+
+ kfree(psde);
+ }
+}
+
+static void sde_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+
+ if (!plane || !state) {
+ SDE_ERROR("invalid arg(s), plane %d state %d\n",
+ plane != 0, state != 0);
+ return;
+ }
+
+ psde = to_sde_plane(plane);
+ pstate = to_sde_plane_state(state);
+
+ SDE_DEBUG_PLANE(psde, "\n");
+
+ /* remove ref count for frame buffers */
+ if (state->fb)
+ drm_framebuffer_unreference(state->fb);
+
+ /* remove ref count for fence */
+ if (pstate->input_fence)
+ sde_sync_put(pstate->input_fence);
+
+ /* destroy value helper */
+ msm_property_destroy_state(&psde->property_info, pstate,
+ pstate->property_values, pstate->property_blobs);
+}
+
+static struct drm_plane_state *
+sde_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+ struct sde_plane_state *old_state;
+ uint64_t input_fence_default;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ return NULL;
+ } else if (!plane->state) {
+ SDE_ERROR("invalid plane state\n");
+ return NULL;
+ }
+
+ old_state = to_sde_plane_state(plane->state);
+ psde = to_sde_plane(plane);
+ pstate = msm_property_alloc_state(&psde->property_info);
+ if (!pstate) {
+ SDE_ERROR_PLANE(psde, "failed to allocate state\n");
+ return NULL;
+ }
+
+ SDE_DEBUG_PLANE(psde, "\n");
+
+ /* duplicate value helper */
+ msm_property_duplicate_state(&psde->property_info, old_state, pstate,
+ pstate->property_values, pstate->property_blobs);
+
+ /* add ref count for frame buffer */
+ if (pstate->base.fb)
+ drm_framebuffer_reference(pstate->base.fb);
+
+ /* clear out any input fence */
+ pstate->input_fence = 0;
+ input_fence_default = msm_property_get_default(
+ &psde->property_info, PLANE_PROP_INPUT_FENCE);
+ msm_property_set_property(&psde->property_info, pstate->property_values,
+ PLANE_PROP_INPUT_FENCE, input_fence_default);
+
+ pstate->dirty = 0x0;
+ pstate->pending = false;
+
+ return &pstate->base;
+}
+
+static void sde_plane_reset(struct drm_plane *plane)
+{
+ struct sde_plane *psde;
+ struct sde_plane_state *pstate;
+
+ if (!plane) {
+ SDE_ERROR("invalid plane\n");
+ return;
+ }
+
+ psde = to_sde_plane(plane);
+ SDE_DEBUG_PLANE(psde, "\n");
+
+ /* remove previous state, if present */
+ if (plane->state) {
+ sde_plane_destroy_state(plane, plane->state);
+ plane->state = 0;
+ }
+
+ pstate = msm_property_alloc_state(&psde->property_info);
+ if (!pstate) {
+ SDE_ERROR_PLANE(psde, "failed to allocate state\n");
+ return;
+ }
+
+ /* reset value helper */
+ msm_property_reset_state(&psde->property_info, pstate,
+ pstate->property_values, pstate->property_blobs);
+
+ pstate->base.plane = plane;
+
+ plane->state = &pstate->base;
+}
+
+static const struct drm_plane_funcs sde_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = sde_plane_destroy,
+ .set_property = sde_plane_set_property,
+ .atomic_set_property = sde_plane_atomic_set_property,
+ .atomic_get_property = sde_plane_atomic_get_property,
+ .reset = sde_plane_reset,
+ .atomic_duplicate_state = sde_plane_duplicate_state,
+ .atomic_destroy_state = sde_plane_destroy_state,
+};
+
+static const struct drm_plane_helper_funcs sde_plane_helper_funcs = {
+ .prepare_fb = sde_plane_prepare_fb,
+ .cleanup_fb = sde_plane_cleanup_fb,
+ .atomic_check = sde_plane_atomic_check,
+ .atomic_update = sde_plane_atomic_update,
+};
+
+enum sde_sspp sde_plane_pipe(struct drm_plane *plane, uint32_t index)
+{
+ struct sde_plane *sde_plane = to_sde_plane(plane);
+ struct sde_phy_plane *pp;
+ int i = 0;
+ enum sde_sspp default_sspp = SSPP_NONE;
+
+ list_for_each_entry(pp, &sde_plane->phy_plane_head, phy_plane_list) {
+ if (i == 0)
+ default_sspp = pp->pipe;
+ if (i == index)
+ return pp->pipe;
+ i++;
+ }
+
+ return default_sspp;
+}
+
+static ssize_t _sde_plane_danger_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct sde_kms *kms = file->private_data;
+ struct sde_mdss_cfg *cfg = kms->catalog;
+ int len = 0;
+ char buf[40] = {'\0'};
+
+ if (!cfg)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static void _sde_plane_set_danger_state(struct sde_kms *kms, bool enable)
+{
+ struct drm_plane *plane;
+ struct sde_plane *psde;
+ struct sde_phy_plane *pp;
+
+ drm_for_each_plane(plane, kms->dev) {
+ if (plane->fb && plane->state) {
+ psde = to_sde_plane(plane);
+ list_for_each_entry(pp, &psde->phy_plane_head,
+ phy_plane_list) {
+ sde_plane_danger_signal_ctrl(pp, enable);
+ }
+ SDE_DEBUG("plane:%d img:%dx%d ",
+ plane->base.id, plane->fb->width,
+ plane->fb->height);
+ SDE_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->crtc_w, plane->state->crtc_h);
+ } else {
+ SDE_DEBUG("Inactive plane:%d\n", plane->base.id);
+ }
+ }
+}
+
+static ssize_t _sde_plane_danger_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_kms *kms = file->private_data;
+ struct sde_mdss_cfg *cfg = kms->catalog;
+ int disable_panic;
+ char buf[10];
+
+ if (!cfg)
+ return -EFAULT;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 0, &disable_panic))
+ return -EFAULT;
+
+ if (disable_panic) {
+ /* Disable panic signal for all active pipes */
+ SDE_DEBUG("Disabling danger:\n");
+ _sde_plane_set_danger_state(kms, false);
+ kms->has_danger_ctrl = false;
+ } else {
+ /* Enable panic signal for all active pipes */
+ SDE_DEBUG("Enabling danger:\n");
+ kms->has_danger_ctrl = true;
+ _sde_plane_set_danger_state(kms, true);
+ }
+
+ return count;
+}
+
+static const struct file_operations sde_plane_danger_enable = {
+ .open = simple_open,
+ .read = _sde_plane_danger_read,
+ .write = _sde_plane_danger_write,
+};
+
+static void _sde_plane_init_debugfs(struct sde_plane *psde,
+ struct sde_kms *kms)
+{
+ const struct sde_sspp_sub_blks *sblk = 0;
+ const struct sde_sspp_cfg *cfg = 0;
+ struct sde_phy_plane *pp;
+
+ if (!psde || !kms) {
+ SDE_ERROR("invalid arg(s), psde %d kms %d\n",
+ psde != NULL, kms != NULL);
+ return;
+ }
+
+ /* create overall sub-directory for the pipe */
+ psde->debugfs_root = debugfs_create_dir(psde->pipe_name,
+ sde_debugfs_get_root(kms));
+ if (!psde->debugfs_root)
+ return;
+
+ list_for_each_entry(pp, &psde->phy_plane_head, phy_plane_list) {
+ debugfs_create_u32("pipe", S_IRUGO | S_IWUSR,
+ psde->debugfs_root, &pp->pipe);
+
+ if (!pp->pipe_hw || !pp->pipe_hw->cap ||
+ !pp->pipe_hw->cap->sblk)
+ continue;
+ cfg = pp->pipe_hw->cap;
+ sblk = cfg->sblk;
+
+ /* don't error check these */
+ debugfs_create_x32("features", S_IRUGO | S_IWUSR,
+ psde->debugfs_root, &pp->features);
+
+ /* add register dump support */
+ sde_debugfs_setup_regset32(&psde->debugfs_src,
+ sblk->src_blk.base + cfg->base,
+ sblk->src_blk.len,
+ kms);
+ sde_debugfs_create_regset32("src_blk", S_IRUGO,
+ psde->debugfs_root, &psde->debugfs_src);
+
+ sde_debugfs_setup_regset32(&psde->debugfs_scaler,
+ sblk->scaler_blk.base + cfg->base,
+ sblk->scaler_blk.len,
+ kms);
+ sde_debugfs_create_regset32("scaler_blk", S_IRUGO,
+ psde->debugfs_root,
+ &psde->debugfs_scaler);
+ debugfs_create_bool("default_scaling",
+ 0644,
+ psde->debugfs_root,
+ &psde->debugfs_default_scale);
+
+ sde_debugfs_setup_regset32(&psde->debugfs_csc,
+ sblk->csc_blk.base + cfg->base,
+ sblk->csc_blk.len,
+ kms);
+ sde_debugfs_create_regset32("csc_blk", S_IRUGO,
+ psde->debugfs_root, &psde->debugfs_csc);
+
+ debugfs_create_u32("xin_id",
+ S_IRUGO,
+ psde->debugfs_root,
+ (u32 *) &cfg->xin_id);
+ debugfs_create_u32("clk_ctrl",
+ S_IRUGO,
+ psde->debugfs_root,
+ (u32 *) &cfg->clk_ctrl);
+ debugfs_create_x32("creq_vblank",
+ S_IRUGO | S_IWUSR,
+ psde->debugfs_root,
+ (u32 *) &sblk->creq_vblank);
+ debugfs_create_x32("danger_vblank",
+ S_IRUGO | S_IWUSR,
+ psde->debugfs_root,
+ (u32 *) &sblk->danger_vblank);
+
+ debugfs_create_file("disable_danger",
+ S_IRUGO | S_IWUSR,
+ psde->debugfs_root,
+ kms, &sde_plane_danger_enable);
+
+ break;
+ }
+}
+
+static int _sde_init_phy_plane(struct sde_kms *sde_kms,
+ struct sde_plane *psde, uint32_t pipe, uint32_t index,
+ struct sde_phy_plane *pp)
+{
+ int rc = 0;
+
+ pp->pipe_hw = sde_rm_get_hw_by_id(&sde_kms->rm,
+ SDE_HW_BLK_SSPP, pipe);
+ if (!pp->pipe_hw) {
+ SDE_ERROR("Not found resource for id=%d\n", pipe);
+ rc = -EINVAL;
+ goto end;
+ } else if (!pp->pipe_hw->cap || !pp->pipe_hw->cap->sblk) {
+ SDE_ERROR("[%u]SSPP returned invalid cfg\n", pipe);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* cache features mask for later */
+ pp->features = pp->pipe_hw->cap->features;
+ pp->pipe_sblk = pp->pipe_hw->cap->sblk;
+ if (!pp->pipe_sblk) {
+ SDE_ERROR("invalid sblk on pipe %d\n", pipe);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (pp->features & BIT(SDE_SSPP_SCALER_QSEED3)) {
+ pp->scaler3_cfg = kzalloc(sizeof(struct sde_hw_scaler3_cfg),
+ GFP_KERNEL);
+ if (!pp->scaler3_cfg) {
+ SDE_ERROR("[%u]failed to allocate scale struct\n",
+ pipe);
+ rc = -ENOMEM;
+ goto end;
+ }
+ }
+
+ /* add plane to DRM framework */
+ pp->nformats = sde_populate_formats(
+ pp->pipe_sblk->format_list,
+ pp->formats,
+ NULL,
+ ARRAY_SIZE(pp->formats));
+
+ if (!pp->nformats) {
+ SDE_ERROR("[%u]no valid formats for plane\n", pipe);
+ if (pp->scaler3_cfg)
+ kzfree(pp->scaler3_cfg);
+
+ rc = -EINVAL;
+ goto end;
+ }
+
+ pp->sde_plane = psde;
+ pp->pipe = pipe;
+ pp->index = index;
+
+end:
+ return rc;
+}
+
+void sde_plane_update_blob_property(struct drm_plane *plane,
+ const char *key,
+ int32_t value)
+{
+ char *kms_info_str = NULL;
+ struct sde_plane *sde_plane = to_sde_plane(plane);
+ size_t len;
+
+ kms_info_str = (char *)msm_property_get_blob(&sde_plane->property_info,
+ &sde_plane->blob_info, &len, 0);
+ if (!kms_info_str) {
+ SDE_ERROR("get plane property_info failed\n");
+ return;
+ }
+
+ sde_kms_info_update_keystr(kms_info_str, key, value);
+}
+
+/* initialize plane */
+struct drm_plane *sde_plane_init(struct drm_device *dev,
+ uint32_t pipe, bool primary_plane,
+ unsigned long possible_crtcs,
+ bool vp_enabled, bool plane_reserved)
+{
+ struct drm_plane *plane = NULL;
+ struct sde_plane *psde;
+ struct sde_phy_plane *pp, *n;
+ struct msm_drm_private *priv;
+ struct sde_kms *kms;
+ enum drm_plane_type type;
+ int ret = -EINVAL;
+ struct sde_vp_cfg *vp;
+ struct sde_vp_sub_blks *vp_sub;
+ uint32_t features = 0xFFFFFFFF, nformats = 64, formats[64];
+ uint32_t index = 0;
+
+ if (!dev) {
+ SDE_ERROR("[%u]device is NULL\n", pipe);
+ goto exit;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ SDE_ERROR("[%u]private data is NULL\n", pipe);
+ goto exit;
+ }
+
+ if (!priv->kms) {
+ SDE_ERROR("[%u]invalid KMS reference\n", pipe);
+ goto exit;
+ }
+ kms = to_sde_kms(priv->kms);
+
+ if (!kms->catalog) {
+ SDE_ERROR("[%u]invalid catalog reference\n", pipe);
+ goto exit;
+ }
+
+ /* create and zero local structure */
+ psde = kzalloc(sizeof(*psde), GFP_KERNEL);
+ if (!psde) {
+ SDE_ERROR("[%u]failed to allocate local plane struct\n", pipe);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* cache local stuff for later */
+ plane = &psde->base;
+
+ INIT_LIST_HEAD(&psde->phy_plane_head);
+
+ /* initialize underlying h/w driver */
+ if (vp_enabled) {
+ vp = &(kms->catalog->vp[pipe]);
+ list_for_each_entry(vp_sub, &vp->sub_blks, pipeid_list) {
+ pp = kzalloc(sizeof(*pp), GFP_KERNEL);
+ if (!pp) {
+ SDE_ERROR("out of memory\n");
+ ret = -ENOMEM;
+ goto clean_plane;
+ }
+
+ ret = _sde_init_phy_plane(kms, psde, vp_sub->sspp_id,
+ index, pp);
+ if (ret) {
+ SDE_ERROR("_sde_init_phy_plane error vp=%d\n",
+ pipe);
+ kfree(pp);
+ ret = -EINVAL;
+ goto clean_plane;
+ }
+ /* Get common features for all pipes */
+ features &= pp->features;
+ if (nformats > pp->nformats) {
+ nformats = pp->nformats;
+ memcpy(formats, pp->formats,
+ sizeof(formats));
+ }
+ list_add_tail(&pp->phy_plane_list,
+ &psde->phy_plane_head);
+ index++;
+ psde->num_of_phy_planes++;
+ }
+ } else {
+ pp = kzalloc(sizeof(*pp), GFP_KERNEL);
+ if (!pp) {
+ SDE_ERROR("out of memory\n");
+ ret = -ENOMEM;
+ goto clean_plane;
+ }
+
+ ret = _sde_init_phy_plane(kms, psde, pipe, index, pp);
+ if (ret) {
+ SDE_ERROR("_sde_init_phy_plane error id=%d\n",
+ pipe);
+ kfree(pp);
+ ret = -EINVAL;
+ goto clean_plane;
+ }
+ features = pp->features;
+ nformats = pp->nformats;
+ memcpy(formats, pp->formats,
+ sizeof(uint32_t) * 64);
+ list_add_tail(&pp->phy_plane_list,
+ &psde->phy_plane_head);
+ psde->num_of_phy_planes++;
+ }
+
+ if (features & BIT(SDE_SSPP_CURSOR))
+ type = DRM_PLANE_TYPE_CURSOR;
+ else if (primary_plane)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+ ret = drm_universal_plane_init(dev, plane, possible_crtcs,
+ &sde_plane_funcs, formats, nformats, type);
+ if (ret)
+ goto clean_plane;
+
+ /* success! finalize initialization */
+ drm_plane_helper_add(plane, &sde_plane_helper_funcs);
+
+ msm_property_init(&psde->property_info, &plane->base, dev,
+ priv->plane_property, psde->property_data,
+ PLANE_PROP_COUNT, PLANE_PROP_BLOBCOUNT,
+ sizeof(struct sde_plane_state));
+
+ _sde_plane_install_properties(plane, kms->catalog, plane_reserved);
+
+ /* save user friendly pipe name for later */
+ snprintf(psde->pipe_name, SDE_NAME_SIZE, "plane%u", plane->base.id);
+
+ mutex_init(&psde->lock);
+
+ _sde_plane_init_debugfs(psde, kms);
+
+ DRM_INFO("%s created for pipe %u\n", psde->pipe_name, pipe);
+ return plane;
+
+clean_plane:
+ if (psde) {
+ list_for_each_entry_safe(pp, n,
+ &psde->phy_plane_head, phy_plane_list) {
+ if (pp->pipe_hw)
+ sde_hw_sspp_destroy(pp->pipe_hw);
+
+ kfree(pp->scaler3_cfg);
+ list_del(&pp->phy_plane_list);
+ kfree(pp);
+ }
+ kfree(psde);
+ }
+
+exit:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_plane.h b/drivers/gpu/drm/msm/sde/sde_plane.h
new file mode 100644
index 000000000000..c1414e7db74e
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_plane.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _SDE_PLANE_H_
+#define _SDE_PLANE_H_
+
+#include <drm/drm_crtc.h>
+
+#include "msm_prop.h"
+#include "sde_hw_mdss.h"
+
+/**
+ * struct sde_plane_state: Define sde extension of drm plane state object
+ * @base: base drm plane state object
+ * @property_values: cached plane property values
+ * @property_blobs: blob properties
+ * @input_fence: dereferenced input fence pointer
+ * @stage: assigned by crtc blender
+ * @dirty: bitmask for which pipe h/w config functions need to be updated
+ * @pending: whether the current update is still pending
+ */
+struct sde_plane_state {
+ struct drm_plane_state base;
+ uint64_t property_values[PLANE_PROP_COUNT];
+ struct drm_property_blob *property_blobs[PLANE_PROP_BLOBCOUNT];
+ void *input_fence;
+ enum sde_stage stage;
+ uint32_t dirty;
+ bool pending;
+};
+
+#define to_sde_plane_state(x) \
+ container_of(x, struct sde_plane_state, base)
+
+/**
+ * sde_plane_get_property - Query integer value of plane property
+ * @S: Pointer to plane state
+ * @X: Property index, from enum msm_mdp_plane_property
+ * Returns: Integer value of requested property
+ */
+#define sde_plane_get_property(S, X) \
+ ((S) && ((X) < PLANE_PROP_COUNT) ? ((S)->property_values[(X)]) : 0)
+
+/**
+ * sde_plane_pipe - return sspp identifier for the given plane
+ * @plane: Pointer to DRM plane object
+ * @index: Plane index
+ * Returns: sspp identifier of the given plane
+ */
+enum sde_sspp sde_plane_pipe(struct drm_plane *plane, uint32_t index);
+
+/**
+ * sde_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void sde_plane_flush(struct drm_plane *plane);
+
+/**
+ * sde_plane_init - create new sde plane for the given pipe
+ * @dev: Pointer to DRM device
+ * @pipe: sde hardware pipe identifier
+ * @primary_plane: true if this pipe is primary plane for crtc
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ * @vp_enabled: Flag indicating if virtual planes enabled
+ * @plane_reserved: Flag indicating the plane is occupied in bootloader
+ */
+struct drm_plane *sde_plane_init(struct drm_device *dev,
+ uint32_t pipe, bool primary_plane,
+ unsigned long possible_crtcs,
+ bool vp_enabled, bool plane_reserved);
+
+/**
+ * sde_plane_wait_input_fence - wait for input fence object
+ * @plane: Pointer to DRM plane object
+ * @wait_ms: Wait timeout value
+ * Returns: Zero on success
+ */
+int sde_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
+
+/**
+ * sde_plane_color_fill - enables color fill on plane
+ * @plane: Pointer to DRM plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+int sde_plane_color_fill(struct drm_plane *plane,
+ uint32_t color, uint32_t alpha);
+
+/**
+ * sde_plane_update_blob_property - update plane blob property
+ * @plane: Pointer to DRM plane object
+ * @key: Pointer to key string
+ * @value: Signed 32 bit integer value
+ */
+void sde_plane_update_blob_property(struct drm_plane *plane,
+ const char *key,
+ int32_t value);
+#endif /* _SDE_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_recovery_manager.c b/drivers/gpu/drm/msm/sde/sde_recovery_manager.c
new file mode 100644
index 000000000000..ae42fd309293
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_recovery_manager.c
@@ -0,0 +1,399 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include "sde_recovery_manager.h"
+#include "sde_kms.h"
+
+
+static struct recovery_mgr_info *rec_mgr;
+
+static ssize_t sde_recovery_mgr_rda_clients_attr(struct device *dev,
+ struct device_attribute *attr, char *buf)
+{
+ ssize_t len = 0;
+ struct list_head *pos;
+ struct recovery_client_db *temp = NULL;
+
+ mutex_lock(&rec_mgr->rec_lock);
+
+ len = snprintf(buf, PAGE_SIZE, "Clients:\n");
+
+ list_for_each(pos, &rec_mgr->client_list) {
+ temp = list_entry(pos, struct recovery_client_db, list);
+
+ len += snprintf(buf + len, PAGE_SIZE - len, "%s\n",
+ temp->client_info.name);
+ }
+
+ mutex_unlock(&rec_mgr->rec_lock);
+
+ return len;
+}
+
+static DEVICE_ATTR(clients, S_IRUGO, sde_recovery_mgr_rda_clients_attr, NULL);
+
+static struct attribute *recovery_attrs[] = {
+ &dev_attr_clients.attr,
+ NULL,
+};
+
+static struct attribute_group recovery_mgr_attr_group = {
+ .attrs = recovery_attrs,
+};
+
+static void sde_recovery_mgr_notify(bool err_state)
+{
+ char *envp[2];
+ char *uevent_str = kzalloc(SZ_4K, GFP_KERNEL);
+
+ if (uevent_str == NULL) {
+ DRM_ERROR("failed to allocate event string\n");
+ return;
+ }
+ if (err_state == true)
+ snprintf(uevent_str, MAX_REC_UEVENT_LEN,
+ "DISPLAY_ERROR_RECOVERED\n");
+ else
+ snprintf(uevent_str, MAX_REC_UEVENT_LEN,
+ "DISPLAY_CRITICAL_ERROR\n");
+
+ DRM_DEBUG("generating uevent [%s]\n", uevent_str);
+
+ envp[0] = uevent_str;
+ envp[1] = NULL;
+
+ mutex_lock(&rec_mgr->dev->mode_config.mutex);
+ kobject_uevent_env(&rec_mgr->dev->primary->kdev->kobj,
+ KOBJ_CHANGE, envp);
+ mutex_unlock(&rec_mgr->dev->mode_config.mutex);
+ kfree(uevent_str);
+}
+
+static void sde_recovery_mgr_recover(int err_code)
+{
+ struct list_head *pos;
+ struct recovery_client_db *c = NULL;
+ int tmp_err, rc, pre, post, i;
+ bool found = false;
+ static bool rec_flag = true;
+
+ mutex_lock(&rec_mgr->rec_lock);
+ list_for_each(pos, &rec_mgr->client_list) {
+ c = list_entry(pos, struct recovery_client_db, list);
+
+ mutex_unlock(&rec_mgr->rec_lock);
+
+ for (i = 0; i < MAX_REC_ERR_SUPPORT; i++) {
+ tmp_err = c->client_info.err_supported[i].
+ reported_err_code;
+ if (tmp_err == err_code) {
+ found = true;
+ break;
+ }
+ }
+
+ if (found == true) {
+
+ pre = c->client_info.err_supported[i].pre_err_code;
+ if (pre && pre != '0')
+ sde_recovery_mgr_recover(pre);
+
+ if (c->client_info.recovery_cb) {
+ rc = c->client_info.recovery_cb(err_code,
+ &c->client_info);
+ if (rc) {
+ pr_err("%s failed to recover error %d\n",
+ __func__, err_code);
+ rec_flag = false;
+ } else {
+ pr_debug("%s Recovery successful[%d]\n",
+ __func__, err_code);
+ }
+ }
+
+ post = c->client_info.err_supported[i].post_err_code;
+ if (post && post != '0')
+ sde_recovery_mgr_recover(post);
+
+ }
+ mutex_lock(&rec_mgr->rec_lock);
+
+ if (found)
+ break;
+ }
+
+ if (rec_flag) {
+ pr_debug("%s successful full recovery\n", __func__);
+ sde_recovery_mgr_notify(true);
+ }
+
+ mutex_unlock(&rec_mgr->rec_lock);
+}
+
+static void sde_recovery_mgr_event_work(struct work_struct *work)
+{
+ struct list_head *pos, *q;
+ struct recovery_event_db *temp_event;
+ int err_code;
+
+ if (!rec_mgr) {
+ pr_err("%s recovery manager is NULL\n", __func__);
+ return;
+ }
+
+ mutex_lock(&rec_mgr->rec_lock);
+
+ list_for_each_safe(pos, q, &rec_mgr->event_list) {
+ temp_event = list_entry(pos, struct recovery_event_db, list);
+
+ err_code = temp_event->err;
+
+ rec_mgr->recovery_ongoing = true;
+
+ mutex_unlock(&rec_mgr->rec_lock);
+
+ /* notify error */
+ sde_recovery_mgr_notify(false);
+ /* recover error */
+ sde_recovery_mgr_recover(err_code);
+
+ mutex_lock(&rec_mgr->rec_lock);
+
+ list_del(pos);
+ kfree(temp_event);
+ }
+
+ rec_mgr->recovery_ongoing = false;
+ mutex_unlock(&rec_mgr->rec_lock);
+
+}
+
+int sde_recovery_set_events(int err)
+{
+ int rc = 0;
+ struct list_head *pos;
+ struct recovery_event_db *temp;
+ bool found = false;
+
+ mutex_lock(&rec_mgr->rec_lock);
+
+ /* check if there is same event in the list */
+ list_for_each(pos, &rec_mgr->event_list) {
+ temp = list_entry(pos, struct recovery_event_db, list);
+ if (err == temp->err) {
+ found = true;
+ pr_info("%s error %d is already present in list\n",
+ __func__, err);
+ break;
+ }
+ }
+
+ if (!found) {
+ temp = kzalloc(sizeof(struct recovery_event_db), GFP_KERNEL);
+ if (!temp) {
+ pr_err("%s out of memory\n", __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ temp->err = err;
+
+ list_add_tail(&temp->list, &rec_mgr->event_list);
+ queue_work(rec_mgr->event_queue, &rec_mgr->event_work);
+ }
+
+out:
+ mutex_unlock(&rec_mgr->rec_lock);
+ return rc;
+}
+
+int sde_recovery_client_register(struct recovery_client_info *client)
+{
+ int rc = 0;
+ struct list_head *pos;
+ struct recovery_client_db *c = NULL;
+ bool found = false;
+
+ if (!rec_mgr) {
+ pr_err("%s recovery manager is not initialized\n", __func__);
+ return -EPERM;
+ }
+
+ if (!strlen(client->name)) {
+ pr_err("%s client name is empty\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rec_mgr->rec_lock);
+
+ /* check if there is same client */
+ list_for_each(pos, &rec_mgr->client_list) {
+ c = list_entry(pos, struct recovery_client_db, list);
+ if (!strcmp(c->client_info.name,
+ client->name)) {
+ found = true;
+ break;
+ }
+ }
+
+ if (!found) {
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c) {
+ pr_err("%s out of memory for client", __func__);
+ rc = -ENOMEM;
+ goto out;
+ }
+ } else {
+ pr_err("%s client = %s is already registered\n",
+ __func__, client->name);
+ client->handle = c;
+ goto out;
+ }
+
+ memcpy(&(c->client_info), client, sizeof(struct recovery_client_info));
+
+ list_add_tail(&c->list, &rec_mgr->client_list);
+ rec_mgr->num_of_clients++;
+
+ client->handle = c;
+
+out:
+ mutex_unlock(&rec_mgr->rec_lock);
+ return rc;
+}
+
+int sde_recovery_client_unregister(void *handle)
+{
+ struct list_head *pos, *q, *pos1;
+ struct recovery_client_db *temp_client;
+ struct recovery_event_db *temp;
+ int client_err = 0;
+ bool found = false;
+ bool found_pending = false;
+ int i, rc = 0;
+ struct recovery_client_info *client =
+ &((struct recovery_client_db *)handle)->client_info;
+
+ if (!handle) {
+ pr_err("%s handle is NULL\n", __func__);
+ return -EINVAL;
+ }
+
+ if (!strlen(client->name)) {
+ pr_err("%s client name is empty\n", __func__);
+ return -EINVAL;
+ }
+
+ mutex_lock(&rec_mgr->rec_lock);
+
+ if (rec_mgr->recovery_ongoing) {
+ pr_err("%s SDE Executing Recovery, Failed! Unregister client %s\n",
+ __func__, client->name);
+ goto out;
+ }
+
+ /* check if client is present in the list */
+ list_for_each_safe(pos, q, &rec_mgr->client_list) {
+ temp_client = list_entry(pos, struct recovery_client_db, list);
+ if (!strcmp(temp_client->client_info.name, client->name)) {
+ found = true;
+
+ /* free any pending event for this client */
+ list_for_each(pos1, &rec_mgr->event_list) {
+ temp = list_entry(pos1,
+ struct recovery_event_db, list);
+
+ found_pending = false;
+ for (i = 0; i < MAX_REC_ERR_SUPPORT; i++) {
+ client_err = temp_client->
+ client_info.err_supported[i].
+ reported_err_code;
+ if (temp->err == client_err)
+ found_pending = true;
+ }
+
+ if (found_pending) {
+ list_del(pos1);
+ kfree(temp);
+ }
+ }
+
+ list_del(pos);
+ kfree(temp_client);
+ rec_mgr->num_of_clients--;
+ break;
+ }
+ }
+
+ if (!found) {
+ pr_err("%s can't find the client[%s] from db\n",
+ __func__, client->name);
+ rc = -EFAULT;
+ }
+
+out:
+ mutex_unlock(&rec_mgr->rec_lock);
+ return rc;
+}
+
+int sde_init_recovery_mgr(struct drm_device *dev)
+{
+ struct recovery_mgr_info *rec = NULL;
+ int rc = 0;
+
+ if (!dev || !dev->dev_private) {
+ SDE_ERROR("drm device node invalid\n");
+ return -EINVAL;
+ }
+
+ rec = kzalloc(sizeof(struct recovery_mgr_info), GFP_KERNEL);
+ if (!rec)
+ return -ENOMEM;
+
+ mutex_init(&rec->rec_lock);
+
+ rec->dev = dev;
+ rc = sysfs_create_group(&dev->primary->kdev->kobj,
+ &recovery_mgr_attr_group);
+ if (rc) {
+ pr_err("%s sysfs_create_group fails=%d", __func__, rc);
+ rec->sysfs_created = false;
+ } else {
+ rec->sysfs_created = true;
+ }
+
+ INIT_LIST_HEAD(&rec->event_list);
+ INIT_LIST_HEAD(&rec->client_list);
+ INIT_WORK(&rec->event_work, sde_recovery_mgr_event_work);
+ rec->event_queue = create_workqueue("recovery_event");
+
+ if (IS_ERR_OR_NULL(rec->event_queue)) {
+ pr_err("%s unable to create queue; errno = %ld",
+ __func__, PTR_ERR(rec->event_queue));
+ rec->event_queue = NULL;
+ rc = -EFAULT;
+ goto err;
+ }
+
+ rec_mgr = rec;
+
+ return rc;
+
+err:
+ mutex_destroy(&rec->rec_lock);
+ if (rec->sysfs_created)
+ sysfs_remove_group(&rec_mgr->dev->primary->kdev->kobj,
+ &recovery_mgr_attr_group);
+ kfree(rec);
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_recovery_manager.h b/drivers/gpu/drm/msm/sde/sde_recovery_manager.h
new file mode 100644
index 000000000000..aeaecbd194f4
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_recovery_manager.h
@@ -0,0 +1,124 @@
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDE_RECOVERY_MANAGER_H__
+#define __SDE_RECOVERY_MANAGER_H__
+
+#include <linux/list.h>
+#include <linux/kthread.h>
+#include <linux/platform_device.h>
+#include <linux/kobject.h>
+#include <drm/msm_drm.h>
+#include <linux/slab.h>
+#include <drm/drmP.h>
+
+
+
+/* MSM Recovery Manager related definitions */
+
+#define MAX_REC_NAME_LEN (16)
+#define MAX_REC_UEVENT_LEN (64)
+#define MAX_REC_ERR_SUPPORT (3)
+
+/* MSM Recovery Manager Error Code */
+#define SDE_SMMU_FAULT 111
+#define SDE_UNDERRUN 222
+#define SDE_VSYNC_MISS 333
+/*
+ * instance id of bridge chip is added to make error code
+ * unique to individual bridge chip instance
+ */
+#define DBA_BRIDGE_CRITICAL_ERR 444
+
+/**
+ * struct recovery_mgr_info - Recovery manager information
+ * @dev: drm device.
+ * @rec_lock: mutex lock for synchronized access to recovery mgr data.
+ * @event_list: list of reported events.
+ * @client_list: list of registered clients.
+ * @event_work: work for event handling.
+ * @event_queue: Queue for scheduling the event work.
+ * @num_of_clients: no. of clients registered.
+ * @recovery_ongoing: status indicating execution of recovery thread.
+ */
+struct recovery_mgr_info {
+ struct drm_device *dev;
+ struct mutex rec_lock;
+ struct list_head event_list;
+ struct list_head client_list;
+ struct work_struct event_work;
+ struct workqueue_struct *event_queue;
+ int num_of_clients;
+ int sysfs_created;
+ int recovery_ongoing;
+};
+
+/**
+ * struct recovery_error_info - Error information
+ * @reported_err_code: error reported for recovery.
+ * @pre_err_code: list of errors to be recovered before reported_err_code.
+ * @post_err_code: list of errors to be recovered after reported_err_code.
+ */
+struct recovery_error_info {
+ int reported_err_code;
+ int pre_err_code;
+ int post_err_code;
+};
+
+/**
+ * struct recovery_client_info - Client information
+ * @name: name of the client.
+ * @recovery_cb: recovery callback to recover the errors reported.
+ * @err_supported: list of errors that can be detected by client.
+ * @no_of_err: no. of errors supported by the client.
+ * @handle: Opaque handle passed to client
+ */
+struct recovery_client_info {
+ char name[MAX_REC_NAME_LEN];
+ int (*recovery_cb)(int err_code,
+ struct recovery_client_info *client_info);
+ struct recovery_error_info
+ err_supported[MAX_REC_ERR_SUPPORT];
+ int no_of_err;
+ void *pdata;
+ void *handle;
+};
+
+/**
+ * struct recovery_event_db - event database.
+ * @err: error code that client reports.
+ * @list: list pointer.
+ */
+struct recovery_event_db {
+ int err;
+ struct list_head list;
+};
+
+/**
+ * struct recovery_client_db - client database.
+ * @client_info: information that client registers.
+ * @list: list pointer.
+ */
+struct recovery_client_db {
+ struct recovery_client_info client_info;
+ struct list_head list;
+};
+
+int sde_recovery_set_events(int err);
+int sde_recovery_client_register(struct recovery_client_info *client);
+int sde_recovery_client_unregister(void *handle);
+int sde_init_recovery_mgr(struct drm_device *dev);
+
+
+#endif /* __SDE_RECOVERY_MANAGER_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.c b/drivers/gpu/drm/msm/sde/sde_rm.c
new file mode 100644
index 000000000000..4281d93fb182
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_rm.c
@@ -0,0 +1,1763 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
+#include "sde_kms.h"
+#include "sde_hw_lm.h"
+#include "sde_hw_ctl.h"
+#include "sde_hw_cdm.h"
+#include "sde_hw_dspp.h"
+#include "sde_hw_pingpong.h"
+#include "sde_hw_intf.h"
+#include "sde_hw_wb.h"
+#include "sde_encoder.h"
+#include "sde_connector.h"
+#include "sde_hw_sspp.h"
+#include "sde_splash.h"
+#include "dsi_display.h"
+#include "sde_hdmi.h"
+
+#define RESERVED_BY_OTHER(h, r) \
+ ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
+
+#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK))
+#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_CLEAR))
+#define RM_RQ_DSPP(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_DSPP))
+#define RM_RQ_PPSPLIT(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_PPSPLIT))
+#define RM_RQ_FORCE_TILING(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_FORCE_TILING))
+#define RM_RQ_FORCE_MIXER(r) ((r)->top_ctrl & BIT(SDE_RM_TOPCTL_FORCE_MIXER))
+
+/**
+ * struct sde_rm_requirements - Reservation requirements parameter bundle
+ * @top_name: DRM<->HW topology use case user is trying to enable
+ * @dspp: Whether the user requires a DSPP
+ * @num_lm: Number of layer mixers needed in the use case
+ * @hw_res: Hardware resources required as reported by the encoders
+ * @disp_id: Current display ID, lm/ctl may have prefer display
+ */
+struct sde_rm_requirements {
+ enum sde_rm_topology_name top_name;
+ uint64_t top_ctrl;
+ int num_lm;
+ int num_ctl;
+ bool needs_split_display;
+ struct sde_encoder_hw_resources hw_res;
+ uint32_t disp_id;
+};
+
+/**
+ * struct sde_rm_rsvp - Use Case Reservation tagging structure
+ * Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
+ * By using as a tag, rather than lists of pointers to HW blocks used
+ * we can avoid some list management since we don't know how many blocks
+ * of each type a given use case may require.
+ * @list: List head for list of all reservations
+ * @seq: Global RSVP sequence number for debugging, especially for
+ * differentiating differenct allocations for same encoder.
+ * @enc_id: Reservations are tracked by Encoder DRM object ID.
+ * CRTCs may be connected to multiple Encoders.
+ * An encoder or connector id identifies the display path.
+ * @topology DRM<->HW topology use case
+ */
+struct sde_rm_rsvp {
+ struct list_head list;
+ uint32_t seq;
+ uint32_t enc_id;
+ enum sde_rm_topology_name topology;
+};
+
+/**
+ * struct sde_rm_hw_blk - hardware block tracking list member
+ * @list: List head for list of all hardware blocks tracking items
+ * @rsvp: Pointer to use case reservation if reserved by a client
+ * @rsvp_nxt: Temporary pointer used during reservation to the incoming
+ * request. Will be swapped into rsvp if proposal is accepted
+ * @type: Type of hardware block this structure tracks
+ * @id: Hardware ID number, within it's own space, ie. LM_X
+ * @catalog: Pointer to the hardware catalog entry for this block
+ * @hw: Pointer to the hardware register access object for this block
+ */
+struct sde_rm_hw_blk {
+ struct list_head list;
+ struct sde_rm_rsvp *rsvp;
+ struct sde_rm_rsvp *rsvp_nxt;
+ enum sde_hw_blk_type type;
+ const char *type_name;
+ uint32_t id;
+ void *catalog;
+ void *hw;
+};
+
+/**
+ * sde_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
+ */
+enum sde_rm_dbg_rsvp_stage {
+ SDE_RM_STAGE_BEGIN,
+ SDE_RM_STAGE_AFTER_CLEAR,
+ SDE_RM_STAGE_AFTER_RSVPNEXT,
+ SDE_RM_STAGE_FINAL
+};
+
+static void _sde_rm_print_rsvps(
+ struct sde_rm *rm,
+ enum sde_rm_dbg_rsvp_stage stage)
+{
+ struct sde_rm_rsvp *rsvp;
+ struct sde_rm_hw_blk *blk;
+ enum sde_hw_blk_type type;
+
+ SDE_DEBUG("%d\n", stage);
+
+ list_for_each_entry(rsvp, &rm->rsvps, list) {
+ SDE_DEBUG("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
+ rsvp->enc_id, rsvp->topology);
+ SDE_EVT32(stage, rsvp->seq, rsvp->enc_id, rsvp->topology);
+ }
+
+ for (type = 0; type < SDE_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (!blk->rsvp && !blk->rsvp_nxt)
+ continue;
+
+ SDE_DEBUG("%d rsvp[s%ue%u->s%ue%u] %s %d\n", stage,
+ (blk->rsvp) ? blk->rsvp->seq : 0,
+ (blk->rsvp) ? blk->rsvp->enc_id : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+ blk->type_name, blk->id);
+
+ SDE_EVT32(stage,
+ (blk->rsvp) ? blk->rsvp->seq : 0,
+ (blk->rsvp) ? blk->rsvp->enc_id : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+ blk->type, blk->id);
+ }
+ }
+}
+
+struct sde_hw_mdp *sde_rm_get_mdp(struct sde_rm *rm)
+{
+ return rm->hw_mdp;
+}
+
+void sde_rm_init_hw_iter(
+ struct sde_rm_hw_iter *iter,
+ uint32_t enc_id,
+ enum sde_hw_blk_type type)
+{
+ memset(iter, 0, sizeof(*iter));
+ iter->enc_id = enc_id;
+ iter->type = type;
+}
+
+static bool _sde_rm_get_hw_locked(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+{
+ struct list_head *blk_list;
+
+ if (!rm || !i || i->type >= SDE_HW_BLK_MAX) {
+ SDE_ERROR("invalid rm\n");
+ return false;
+ }
+
+ i->hw = NULL;
+ blk_list = &rm->hw_blks[i->type];
+
+ if (i->blk && (&i->blk->list == blk_list)) {
+ SDE_ERROR("attempt resume iteration past last\n");
+ return false;
+ }
+
+ i->blk = list_prepare_entry(i->blk, blk_list, list);
+
+ list_for_each_entry_continue(i->blk, blk_list, list) {
+ struct sde_rm_rsvp *rsvp = i->blk->rsvp;
+
+ if (i->blk->type != i->type) {
+ SDE_ERROR("found incorrect block type %d on %d list\n",
+ i->blk->type, i->type);
+ return false;
+ }
+
+ if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
+ i->hw = i->blk->hw;
+ SDE_DEBUG("found type %d %s id %d for enc %d\n",
+ i->type, i->blk->type_name, i->blk->id,
+ i->enc_id);
+ return true;
+ }
+ }
+
+ SDE_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
+
+ return false;
+}
+
+bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *i)
+{
+ bool ret;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _sde_rm_get_hw_locked(rm, i);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
+static void *_sde_rm_get_hw_by_id_locked(
+ struct sde_rm *rm,
+ enum sde_hw_blk_type type,
+ int id)
+{
+ struct list_head *blk_list;
+ struct sde_rm_hw_blk *blk;
+ void *hw = NULL;
+
+ if (!rm || type >= SDE_HW_BLK_MAX) {
+ SDE_ERROR("invalid rm\n");
+ return hw;
+ }
+
+ blk_list = &rm->hw_blks[type];
+
+ list_for_each_entry(blk, blk_list, list) {
+ if (blk->id == id) {
+ hw = blk->hw;
+ SDE_DEBUG("found type %d %s id %d\n",
+ type, blk->type_name, blk->id);
+ return hw;
+ }
+ }
+
+ SDE_DEBUG("no match, type %d id=%d\n", type, id);
+
+ return hw;
+}
+
+void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id)
+{
+ void *ret = NULL;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _sde_rm_get_hw_by_id_locked(rm, type, id);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
+static void _sde_rm_hw_destroy(enum sde_hw_blk_type type, void *hw)
+{
+ switch (type) {
+ case SDE_HW_BLK_LM:
+ sde_hw_lm_destroy(hw);
+ break;
+ case SDE_HW_BLK_DSPP:
+ sde_hw_dspp_destroy(hw);
+ break;
+ case SDE_HW_BLK_CTL:
+ sde_hw_ctl_destroy(hw);
+ break;
+ case SDE_HW_BLK_CDM:
+ sde_hw_cdm_destroy(hw);
+ break;
+ case SDE_HW_BLK_PINGPONG:
+ sde_hw_pingpong_destroy(hw);
+ break;
+ case SDE_HW_BLK_INTF:
+ sde_hw_intf_destroy(hw);
+ break;
+ case SDE_HW_BLK_WB:
+ sde_hw_wb_destroy(hw);
+ break;
+ case SDE_HW_BLK_SSPP:
+ sde_hw_sspp_destroy(hw);
+ break;
+ case SDE_HW_BLK_TOP:
+ /* Top is a singleton, not managed in hw_blks list */
+ case SDE_HW_BLK_MAX:
+ default:
+ SDE_ERROR("unsupported block type %d\n", type);
+ break;
+ }
+}
+
+int sde_rm_destroy(struct sde_rm *rm)
+{
+
+ struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
+ struct sde_rm_hw_blk *hw_cur, *hw_nxt;
+ enum sde_hw_blk_type type;
+
+ if (!rm) {
+ SDE_ERROR("invalid rm\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
+ list_del(&rsvp_cur->list);
+ kfree(rsvp_cur);
+ }
+
+
+ for (type = 0; type < SDE_HW_BLK_MAX; type++) {
+ list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
+ list) {
+ list_del(&hw_cur->list);
+ _sde_rm_hw_destroy(hw_cur->type, hw_cur->hw);
+ kfree(hw_cur);
+ }
+ }
+
+ sde_hw_mdp_destroy(rm->hw_mdp);
+ rm->hw_mdp = NULL;
+
+ mutex_destroy(&rm->rm_lock);
+
+ return 0;
+}
+
+static int _sde_rm_hw_blk_create(
+ struct sde_rm *rm,
+ struct sde_mdss_cfg *cat,
+ void *mmio,
+ enum sde_hw_blk_type type,
+ uint32_t id,
+ void *hw_catalog_info)
+{
+ struct sde_rm_hw_blk *blk;
+ struct sde_hw_mdp *hw_mdp;
+ const char *name;
+ void *hw;
+
+ hw_mdp = rm->hw_mdp;
+
+ switch (type) {
+ case SDE_HW_BLK_LM:
+ hw = sde_hw_lm_init(id, mmio, cat);
+ name = "lm";
+ break;
+ case SDE_HW_BLK_DSPP:
+ hw = sde_hw_dspp_init(id, mmio, cat);
+ name = "dspp";
+ break;
+ case SDE_HW_BLK_CTL:
+ hw = sde_hw_ctl_init(id, mmio, cat);
+ name = "ctl";
+ break;
+ case SDE_HW_BLK_CDM:
+ hw = sde_hw_cdm_init(id, mmio, cat, hw_mdp);
+ name = "cdm";
+ break;
+ case SDE_HW_BLK_PINGPONG:
+ hw = sde_hw_pingpong_init(id, mmio, cat);
+ name = "pp";
+ break;
+ case SDE_HW_BLK_INTF:
+ hw = sde_hw_intf_init(id, mmio, cat);
+ name = "intf";
+ break;
+ case SDE_HW_BLK_WB:
+ hw = sde_hw_wb_init(id, mmio, cat, hw_mdp);
+ name = "wb";
+ break;
+ case SDE_HW_BLK_SSPP:
+ hw = sde_hw_sspp_init(id, (void __iomem *)mmio, cat);
+ name = "sspp";
+ break;
+ case SDE_HW_BLK_TOP:
+ /* Top is a singleton, not managed in hw_blks list */
+ case SDE_HW_BLK_MAX:
+ default:
+ SDE_ERROR("unsupported block type %d\n", type);
+ return -EINVAL;
+ }
+
+ if (IS_ERR_OR_NULL(hw)) {
+ SDE_ERROR("failed hw object creation: type %d, err %ld\n",
+ type, PTR_ERR(hw));
+ return -EFAULT;
+ }
+
+ blk = kzalloc(sizeof(*blk), GFP_KERNEL);
+ if (!blk) {
+ _sde_rm_hw_destroy(type, hw);
+ return -ENOMEM;
+ }
+
+ blk->type_name = name;
+ blk->type = type;
+ blk->id = id;
+ blk->catalog = hw_catalog_info;
+ blk->hw = hw;
+ list_add_tail(&blk->list, &rm->hw_blks[type]);
+
+ return 0;
+}
+
+int sde_rm_init(struct sde_rm *rm,
+ struct sde_mdss_cfg *cat,
+ void *mmio,
+ struct drm_device *dev)
+{
+ int rc, i;
+ enum sde_hw_blk_type type;
+
+ if (!rm || !cat || !mmio || !dev) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ /* Clear, setup lists */
+ memset(rm, 0, sizeof(*rm));
+
+ mutex_init(&rm->rm_lock);
+
+ rm->dev = dev;
+
+ INIT_LIST_HEAD(&rm->rsvps);
+ for (type = 0; type < SDE_HW_BLK_MAX; type++)
+ INIT_LIST_HEAD(&rm->hw_blks[type]);
+
+ /* Some of the sub-blocks require an mdptop to be created */
+ rm->hw_mdp = sde_hw_mdptop_init(MDP_TOP, mmio, cat);
+ if (IS_ERR_OR_NULL(rm->hw_mdp)) {
+ rc = PTR_ERR(rm->hw_mdp);
+ rm->hw_mdp = NULL;
+ SDE_ERROR("failed: mdp hw not available\n");
+ goto fail;
+ }
+
+ for (i = 0; i < cat->sspp_count; i++) {
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_SSPP,
+ cat->sspp[i].id, &cat->sspp[i]);
+ if (rc)
+ goto fail;
+ }
+
+ /* Interrogate HW catalog and create tracking items for hw blocks */
+ for (i = 0; i < cat->mixer_count; i++) {
+ struct sde_lm_cfg *lm = &cat->mixer[i];
+
+ if (lm->pingpong == PINGPONG_MAX) {
+ SDE_DEBUG("skip mixer %d without pingpong\n", lm->id);
+ continue;
+ }
+
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_LM,
+ cat->mixer[i].id, &cat->mixer[i]);
+ if (rc) {
+ SDE_ERROR("failed: lm hw not available\n");
+ goto fail;
+ }
+
+ if (!rm->lm_max_width) {
+ rm->lm_max_width = lm->sblk->maxwidth;
+ } else if (rm->lm_max_width != lm->sblk->maxwidth) {
+ /*
+ * Don't expect to have hw where lm max widths differ.
+ * If found, take the min.
+ */
+ SDE_ERROR("unsupported: lm maxwidth differs\n");
+ if (rm->lm_max_width > lm->sblk->maxwidth)
+ rm->lm_max_width = lm->sblk->maxwidth;
+ }
+ }
+
+ for (i = 0; i < cat->dspp_count; i++) {
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_DSPP,
+ cat->dspp[i].id, &cat->dspp[i]);
+ if (rc) {
+ SDE_ERROR("failed: dspp hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->pingpong_count; i++) {
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_PINGPONG,
+ cat->pingpong[i].id, &cat->pingpong[i]);
+ if (rc) {
+ SDE_ERROR("failed: pp hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->intf_count; i++) {
+ if (cat->intf[i].type == INTF_NONE) {
+ SDE_DEBUG("skip intf %d with type none\n", i);
+ continue;
+ }
+
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_INTF,
+ cat->intf[i].id, &cat->intf[i]);
+ if (rc) {
+ SDE_ERROR("failed: intf hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->wb_count; i++) {
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_WB,
+ cat->wb[i].id, &cat->wb[i]);
+ if (rc) {
+ SDE_ERROR("failed: wb hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->ctl_count; i++) {
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_CTL,
+ cat->ctl[i].id, &cat->ctl[i]);
+ if (rc) {
+ SDE_ERROR("failed: ctl hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->cdm_count; i++) {
+ rc = _sde_rm_hw_blk_create(rm, cat, mmio, SDE_HW_BLK_CDM,
+ cat->cdm[i].id, &cat->cdm[i]);
+ if (rc) {
+ SDE_ERROR("failed: cdm hw not available\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ sde_rm_destroy(rm);
+
+ return rc;
+}
+
+/**
+ * _sde_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
+ * proposed use case requirements, incl. hardwired dependent blocks like
+ * pingpong, and dspp.
+ * @rm: sde resource manager handle
+ * @rsvp: reservation currently being created
+ * @reqs: proposed use case requirements
+ * @lm: proposed layer mixer, function checks if lm, and all other hardwired
+ * blocks connected to the lm (pp, dspp) are available and appropriate
+ * @dspp: output parameter, dspp block attached to the layer mixer.
+ * NULL if dspp was not available, or not matching requirements.
+ * @pp: output parameter, pingpong block attached to the layer mixer.
+ * NULL if dspp was not available, or not matching requirements.
+ * @primary_lm: if non-null, this function check if lm is compatible primary_lm
+ * as well as satisfying all other requirements
+ * @Return: true if lm matches all requirements, false otherwise
+ */
+static bool _sde_rm_check_lm_and_get_connected_blks(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ struct sde_rm_requirements *reqs,
+ struct sde_rm_hw_blk *lm,
+ struct sde_rm_hw_blk **dspp,
+ struct sde_rm_hw_blk **pp,
+ struct sde_rm_hw_blk *primary_lm)
+{
+ struct sde_lm_cfg *lm_cfg = (struct sde_lm_cfg *)lm->catalog;
+ struct sde_pingpong_cfg *pp_cfg;
+ struct sde_rm_hw_iter iter;
+ unsigned long caps = ((struct sde_lm_cfg *)lm->catalog)->features;
+ unsigned int preferred_disp_id = 0;
+ bool preferred_disp_match = false;
+ *dspp = NULL;
+ *pp = NULL;
+
+ SDE_DEBUG("check lm %d: dspp %d pp %d\n", lm_cfg->id, lm_cfg->dspp,
+ lm_cfg->pingpong);
+
+ /* Check if this layer mixer is a peer of the proposed primary LM */
+ if (primary_lm) {
+ struct sde_lm_cfg *prim_lm_cfg =
+ (struct sde_lm_cfg *)primary_lm->catalog;
+
+ if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+ SDE_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
+ prim_lm_cfg->id);
+ return false;
+ }
+ }
+
+ /* bypass rest of the checks if preferred display is found */
+ if (BIT(SDE_DISP_PRIMARY_PREF) & caps)
+ preferred_disp_id = 1;
+ else if (BIT(SDE_DISP_SECONDARY_PREF) & caps)
+ preferred_disp_id = 2;
+ else if (BIT(SDE_DISP_TERTIARY_PREF) & caps)
+ preferred_disp_id = 3;
+
+ if (reqs->disp_id == preferred_disp_id)
+ preferred_disp_match = true;
+
+ /* Matches user requirements? */
+ if (!preferred_disp_match &&
+ ((RM_RQ_DSPP(reqs) && lm_cfg->dspp == DSPP_MAX) ||
+ (!RM_RQ_DSPP(reqs) && lm_cfg->dspp != DSPP_MAX))) {
+ SDE_DEBUG("dspp req mismatch lm %d reqdspp %d, lm->dspp %d\n",
+ lm_cfg->id, (bool)(RM_RQ_DSPP(reqs)),
+ lm_cfg->dspp);
+ return false;
+ }
+
+ /* Already reserved? */
+ if (RESERVED_BY_OTHER(lm, rsvp)) {
+ SDE_DEBUG("lm %d already reserved\n", lm_cfg->id);
+ return false;
+ }
+
+ if (lm_cfg->dspp != DSPP_MAX) {
+ sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_DSPP);
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
+ if (iter.blk->id == lm_cfg->dspp) {
+ *dspp = iter.blk;
+ break;
+ }
+ }
+
+ if (!*dspp) {
+ SDE_DEBUG("lm %d failed to retrieve dspp %d\n", lm->id,
+ lm_cfg->dspp);
+ return false;
+ }
+
+ if (RESERVED_BY_OTHER(*dspp, rsvp)) {
+ SDE_DEBUG("lm %d dspp %d already reserved\n",
+ lm->id, (*dspp)->id);
+ return false;
+ }
+ }
+
+ sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_PINGPONG);
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
+ if (iter.blk->id == lm_cfg->pingpong) {
+ *pp = iter.blk;
+ break;
+ }
+ }
+
+ if (!*pp) {
+ SDE_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
+ return false;
+ }
+
+ if (RESERVED_BY_OTHER(*pp, rsvp)) {
+ SDE_DEBUG("lm %d pp %d already reserved\n", lm->id,
+ (*pp)->id);
+ *dspp = NULL;
+ return false;
+ }
+
+ pp_cfg = (struct sde_pingpong_cfg *)((*pp)->catalog);
+ if ((reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT) &&
+ !(test_bit(SDE_PINGPONG_SPLIT, &pp_cfg->features))) {
+ SDE_DEBUG("pp %d doesn't support ppsplit\n", pp_cfg->id);
+ *dspp = NULL;
+ return false;
+ }
+
+ return true;
+}
+
+static int _sde_rm_reserve_lms(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ struct sde_rm_requirements *reqs,
+ uint32_t prefer_lm_id)
+
+{
+ struct sde_rm_hw_blk *lm[MAX_BLOCKS];
+ struct sde_rm_hw_blk *dspp[MAX_BLOCKS];
+ struct sde_rm_hw_blk *pp[MAX_BLOCKS];
+ struct sde_rm_hw_iter iter_i, iter_j;
+ int lm_count = 0;
+ int i, rc = 0;
+
+ if (!reqs->num_lm) {
+ SDE_DEBUG("invalid no of lm %d\n", reqs->num_lm);
+ return -EINVAL;
+ }
+
+ /* Find a primary mixer */
+ sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_LM);
+ while (lm_count != reqs->num_lm &&
+ _sde_rm_get_hw_locked(rm, &iter_i)) {
+ memset(&lm, 0, sizeof(lm));
+ memset(&dspp, 0, sizeof(dspp));
+ memset(&pp, 0, sizeof(pp));
+
+ lm_count = 0;
+ lm[lm_count] = iter_i.blk;
+
+ /* find the matched lm id */
+ if ((prefer_lm_id > 0) && (iter_i.blk->id != prefer_lm_id))
+ continue;
+
+ if (!_sde_rm_check_lm_and_get_connected_blks(rm, rsvp, reqs,
+ lm[lm_count], &dspp[lm_count], &pp[lm_count],
+ NULL))
+ continue;
+
+ ++lm_count;
+
+ /* Valid primary mixer found, find matching peers */
+ sde_rm_init_hw_iter(&iter_j, 0, SDE_HW_BLK_LM);
+
+ while (lm_count != reqs->num_lm &&
+ _sde_rm_get_hw_locked(rm, &iter_j)) {
+ if (iter_i.blk == iter_j.blk)
+ continue;
+
+ if (!_sde_rm_check_lm_and_get_connected_blks(rm, rsvp,
+ reqs, iter_j.blk, &dspp[lm_count],
+ &pp[lm_count], iter_i.blk))
+ continue;
+
+ lm[lm_count] = iter_j.blk;
+
+ ++lm_count;
+ }
+ }
+
+ if (lm_count != reqs->num_lm) {
+ SDE_DEBUG("unable to find appropriate mixers\n");
+ return -ENAVAIL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lm); i++) {
+ if (!lm[i])
+ break;
+
+ lm[i]->rsvp_nxt = rsvp;
+ pp[i]->rsvp_nxt = rsvp;
+ if (dspp[i])
+ dspp[i]->rsvp_nxt = rsvp;
+
+ SDE_EVT32(lm[i]->type, rsvp->enc_id, lm[i]->id, pp[i]->id,
+ dspp[i] ? dspp[i]->id : 0);
+ }
+
+ if (reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT) {
+ /* reserve a free PINGPONG_SLAVE block */
+ rc = -ENAVAIL;
+ sde_rm_init_hw_iter(&iter_i, 0, SDE_HW_BLK_PINGPONG);
+ while (_sde_rm_get_hw_locked(rm, &iter_i)) {
+ struct sde_pingpong_cfg *pp_cfg =
+ (struct sde_pingpong_cfg *)
+ (iter_i.blk->catalog);
+
+ if (!(test_bit(SDE_PINGPONG_SLAVE, &pp_cfg->features)))
+ continue;
+ if (RESERVED_BY_OTHER(iter_i.blk, rsvp))
+ continue;
+
+ iter_i.blk->rsvp_nxt = rsvp;
+ rc = 0;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+static int _sde_rm_reserve_ctls(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ struct sde_rm_requirements *reqs,
+ uint32_t prefer_ctl_id)
+{
+ struct sde_rm_hw_blk *ctls[MAX_BLOCKS];
+ struct sde_rm_hw_iter iter;
+ int i = 0;
+
+ memset(&ctls, 0, sizeof(ctls));
+
+ sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CTL);
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
+ unsigned long caps;
+ bool has_split_display, has_ppsplit;
+ bool ctl_found = false;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ continue;
+
+ caps = ((struct sde_ctl_cfg *)iter.blk->catalog)->features;
+ has_split_display = BIT(SDE_CTL_SPLIT_DISPLAY) & caps;
+ has_ppsplit = BIT(SDE_CTL_PINGPONG_SPLIT) & caps;
+
+ SDE_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, caps);
+
+ /* early return when finding the matched ctl id */
+ if ((prefer_ctl_id > 0) && (iter.blk->id == prefer_ctl_id))
+ ctl_found = true;
+
+ switch (reqs->disp_id) {
+ case 1:
+ if (BIT(SDE_CTL_PRIMARY_PREF) & caps)
+ ctl_found = true;
+ break;
+ case 2:
+ if (BIT(SDE_CTL_SECONDARY_PREF) & caps)
+ ctl_found = true;
+ break;
+ case 3:
+ if (BIT(SDE_CTL_TERTIARY_PREF) & caps)
+ ctl_found = true;
+ break;
+ default:
+ break;
+ }
+
+ if (ctl_found) {
+ ctls[i] = iter.blk;
+ prefer_ctl_id = 0;
+ if (++i == reqs->num_ctl)
+ break;
+ else
+ continue;
+ }
+
+ if (reqs->needs_split_display != has_split_display)
+ continue;
+
+ if (reqs->top_name == SDE_RM_TOPOLOGY_PPSPLIT && !has_ppsplit)
+ continue;
+
+ ctls[i] = iter.blk;
+ SDE_DEBUG("ctl %d match\n", iter.blk->id);
+
+ if (++i == reqs->num_ctl)
+ break;
+ }
+
+ if (i != reqs->num_ctl)
+ return -ENAVAIL;
+
+ for (i = 0; i < ARRAY_SIZE(ctls) && i < reqs->num_ctl; i++) {
+ ctls[i]->rsvp_nxt = rsvp;
+ SDE_EVT32(ctls[i]->type, rsvp->enc_id, ctls[i]->id);
+ }
+
+ return 0;
+}
+
+static int _sde_rm_reserve_cdm(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ uint32_t id,
+ enum sde_hw_blk_type type)
+{
+ struct sde_rm_hw_iter iter;
+ struct sde_cdm_cfg *cdm;
+
+ sde_rm_init_hw_iter(&iter, 0, SDE_HW_BLK_CDM);
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
+ bool match = false;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ continue;
+
+ cdm = (struct sde_cdm_cfg *)(iter.blk->catalog);
+
+ if (type == SDE_HW_BLK_INTF && id != INTF_MAX)
+ match = test_bit(id, &cdm->intf_connect);
+ else if (type == SDE_HW_BLK_WB && id != WB_MAX)
+ match = test_bit(id, &cdm->wb_connect);
+
+ SDE_DEBUG("type %d id %d, cdm intfs %lu wbs %lu match %d\n",
+ type, id, cdm->intf_connect, cdm->wb_connect,
+ match);
+
+ if (!match)
+ continue;
+
+ iter.blk->rsvp_nxt = rsvp;
+ SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
+ break;
+ }
+
+ if (!iter.hw) {
+ SDE_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
+ return -ENAVAIL;
+ }
+
+ return 0;
+}
+
+static int _sde_rm_reserve_intf_or_wb(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ uint32_t id,
+ enum sde_hw_blk_type type,
+ bool needs_cdm)
+{
+ struct sde_rm_hw_iter iter;
+ int ret = 0;
+
+ /* Find the block entry in the rm, and note the reservation */
+ sde_rm_init_hw_iter(&iter, 0, type);
+ while (_sde_rm_get_hw_locked(rm, &iter)) {
+ if (iter.blk->id != id)
+ continue;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
+ SDE_ERROR("type %d id %d already reserved\n", type, id);
+ return -ENAVAIL;
+ }
+
+ iter.blk->rsvp_nxt = rsvp;
+ SDE_EVT32(iter.blk->type, rsvp->enc_id, iter.blk->id);
+ break;
+ }
+
+ /* Shouldn't happen since wbs / intfs are fixed at probe */
+ if (!iter.hw) {
+ SDE_ERROR("couldn't find type %d id %d\n", type, id);
+ return -EINVAL;
+ }
+
+ /* Expected only one intf or wb will request cdm */
+ if (needs_cdm)
+ ret = _sde_rm_reserve_cdm(rm, rsvp, id, type);
+
+ return ret;
+}
+
+static int _sde_rm_reserve_intf_related_hw(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ struct sde_encoder_hw_resources *hw_res)
+{
+ int i, ret = 0;
+ u32 id;
+
+ for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
+ if (hw_res->intfs[i] == INTF_MODE_NONE)
+ continue;
+ id = i + INTF_0;
+ ret = _sde_rm_reserve_intf_or_wb(rm, rsvp, id,
+ SDE_HW_BLK_INTF, hw_res->needs_cdm);
+ if (ret)
+ return ret;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(hw_res->wbs); i++) {
+ if (hw_res->wbs[i] == INTF_MODE_NONE)
+ continue;
+ id = i + WB_0;
+ ret = _sde_rm_reserve_intf_or_wb(rm, rsvp, id,
+ SDE_HW_BLK_WB, hw_res->needs_cdm);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int _sde_rm_make_next_rsvp(
+ struct sde_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct sde_rm_rsvp *rsvp,
+ struct sde_rm_requirements *reqs)
+{
+ int ret;
+ struct sde_connector *sde_conn =
+ to_sde_connector(conn_state->connector);
+ struct dsi_display *dsi;
+ struct sde_hdmi *hdmi;
+ const char *display_type;
+
+ if (sde_conn->connector_type == DRM_MODE_CONNECTOR_DSI) {
+ dsi = (struct dsi_display *)sde_conn->display;
+ display_type = dsi->display_type;
+ } else if (sde_conn->connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ hdmi = (struct sde_hdmi *)sde_conn->display;
+ display_type = hdmi->display_type;
+ } else {
+ /* virtual display does not have display type */
+ display_type = "none";
+ }
+ if (!strcmp("primary", display_type))
+ reqs->disp_id = 1;
+ else if (!strcmp("secondary", display_type))
+ reqs->disp_id = 2;
+ else if (!strcmp("tertiary", display_type))
+ reqs->disp_id = 3;
+ else /* No display type set in dtsi */
+ reqs->disp_id = 0;
+
+ /* Create reservation info, tag reserved blocks with it as we go */
+ rsvp->seq = ++rm->rsvp_next_seq;
+ rsvp->enc_id = enc->base.id;
+ rsvp->topology = reqs->top_name;
+ list_add_tail(&rsvp->list, &rm->rsvps);
+
+ /*
+ * Assign LMs and blocks whose usage is tied to them: DSPP & Pingpong.
+ * Do assignment preferring to give away low-resource mixers first:
+ * - Check mixers without DSPPs
+ * - Only then allow to grab from mixers with DSPP capability
+ */
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs, 0);
+ if (ret && !RM_RQ_DSPP(reqs)) {
+ reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs, 0);
+ }
+
+ if (ret) {
+ SDE_ERROR("unable to find appropriate mixers\n");
+ return ret;
+ }
+
+ /*
+ * Do assignment preferring to give away low-resource CTLs first:
+ * - Check mixers without Split Display
+ * - Only then allow to grab from CTLs with split display capability
+ */
+ _sde_rm_reserve_ctls(rm, rsvp, reqs, 0);
+ if (ret && !reqs->needs_split_display) {
+ reqs->needs_split_display = true;
+ _sde_rm_reserve_ctls(rm, rsvp, reqs, 0);
+ }
+ if (ret) {
+ SDE_ERROR("unable to find appropriate CTL\n");
+ return ret;
+ }
+
+ /* Assign INTFs, WBs, and blks whose usage is tied to them: CTL & CDM */
+ ret = _sde_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int _sde_rm_make_next_rsvp_for_splash(
+ struct sde_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct sde_rm_rsvp *rsvp,
+ struct sde_rm_requirements *reqs)
+{
+ int ret;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct sde_splash_info *sinfo;
+ int i;
+ int intf_id = INTF_0;
+ u32 prefer_lm_id = 0;
+ u32 prefer_ctl_id = 0;
+
+ if (!enc->dev || !enc->dev->dev_private) {
+ SDE_ERROR("drm device invalid\n");
+ return -EINVAL;
+ }
+
+ priv = enc->dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+ sinfo = &sde_kms->splash_info;
+
+ /* Get the intf id first, and reserve the same lk and ctl
+ * in bootloader for kernel resource manager
+ */
+ for (i = 0; i < ARRAY_SIZE(reqs->hw_res.intfs); i++) {
+ if (reqs->hw_res.intfs[i] == INTF_MODE_NONE)
+ continue;
+ intf_id = i + INTF_0;
+ break;
+ }
+
+ /* get preferred lm id and ctl id */
+ for (i = 0; i < CTL_MAX - 1; i++) {
+ if (sinfo->res.top[i].intf_sel != intf_id)
+ continue;
+
+ prefer_lm_id = sinfo->res.top[i].lm[0].lm_id;
+ prefer_ctl_id = sinfo->res.top[i].lm[0].ctl_id;
+ break;
+ }
+
+ SDE_DEBUG("intf_id %d, prefer lm_id %d, ctl_id %d\n",
+ intf_id, prefer_lm_id, prefer_ctl_id);
+
+ /* Create reservation info, tag reserved blocks with it as we go */
+ rsvp->seq = ++rm->rsvp_next_seq;
+ rsvp->enc_id = enc->base.id;
+ rsvp->topology = reqs->top_name;
+ list_add_tail(&rsvp->list, &rm->rsvps);
+
+ /*
+ * Assign LMs and blocks whose usage is tied to them: DSPP & Pingpong.
+ * Do assignment preferring to give away low-resource mixers first:
+ * - Check mixers without DSPPs
+ * - Only then allow to grab from mixers with DSPP capability
+ */
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs, prefer_lm_id);
+ if (ret && !RM_RQ_DSPP(reqs)) {
+ reqs->top_ctrl |= BIT(SDE_RM_TOPCTL_DSPP);
+ ret = _sde_rm_reserve_lms(rm, rsvp, reqs, prefer_lm_id);
+ }
+
+ if (ret) {
+ SDE_ERROR("unable to find appropriate mixers\n");
+ return ret;
+ }
+
+ /*
+ * Do assignment preferring to give away low-resource CTLs first:
+ * - Check mixers without Split Display
+ * - Only then allow to grab from CTLs with split display capability
+ */
+ for (i = 0; i < sinfo->res.ctl_top_cnt; i++)
+ SDE_DEBUG("splash_info ctl_ids[%d] = %d\n",
+ i, sinfo->res.ctl_ids[i]);
+
+ ret = _sde_rm_reserve_ctls(rm, rsvp, reqs, prefer_ctl_id);
+ if (ret && !reqs->needs_split_display) {
+ reqs->needs_split_display = true;
+ _sde_rm_reserve_ctls(rm, rsvp, reqs, prefer_ctl_id);
+ }
+
+ if (ret) {
+ SDE_ERROR("unable to find appropriate CTL\n");
+ return ret;
+ }
+
+ /* Assign INTFs, WBs, and blks whose usage is tied to them: CTL & CDM */
+ ret = _sde_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+
+ return ret;
+}
+
+static int _sde_rm_populate_requirements(
+ struct sde_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct sde_rm_requirements *reqs)
+{
+ const struct drm_display_mode *mode = &crtc_state->mode;
+
+ /**
+ * DRM<->HW Topologies
+ *
+ * Name: SINGLEPIPE
+ * Description: 1 LM, 1 PP, 1 INTF
+ * Condition: 1 DRM Encoder w/ 1 Display Tiles (Default)
+ *
+ * Name: DUALPIPE
+ * Description: 2 LM, 2 PP, 2 INTF
+ * Condition: 1 DRM Encoder w/ 2 Display Tiles
+ *
+ * Name: PPSPLIT
+ * Description: 1 LM, 1 PP + 1 Slave PP, 2 INTF
+ * Condition:
+ * 1 DRM Encoder w/ 2 Display Tiles
+ * topology_control & SDE_TOPREQ_PPSPLIT
+ *
+ * Name: DUALPIPEMERGE
+ * Description: 2 LM, 2 PP, 3DMux, 1 INTF
+ * Condition:
+ * 1 DRM Encoder w/ 1 Display Tiles
+ * display_info.max_width >= layer_mixer.max_width
+ *
+ * Name: DUALPIPEMERGE
+ * Description: 2 LM, 2 PP, 3DMux, 1 INTF
+ * Condition:
+ * 1 DRM Encoder w/ 1 Display Tiles
+ * display_info.max_width <= layer_mixer.max_width
+ * topology_control & SDE_TOPREQ_FORCE_TILING
+ */
+
+ memset(reqs, 0, sizeof(*reqs));
+
+ reqs->top_ctrl = sde_connector_get_property(conn_state,
+ CONNECTOR_PROP_TOPOLOGY_CONTROL);
+ SDE_DEBUG("%s reqs->top_ctrl = %llu\n", __func__, reqs->top_ctrl);
+
+ sde_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
+
+ /* Base assumption is LMs = h_tiles, conditions below may override */
+ reqs->num_lm = reqs->hw_res.display_num_of_h_tiles;
+
+ if (reqs->num_lm == 2) {
+ if (RM_RQ_PPSPLIT(reqs)) {
+ /* user requests serving dual display with 1 lm */
+ reqs->top_name = SDE_RM_TOPOLOGY_PPSPLIT;
+ reqs->num_lm = 1;
+ reqs->num_ctl = 1;
+ reqs->needs_split_display = true;
+ } else {
+ /* dual display, serve with 2 lms */
+ reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPE;
+ reqs->num_ctl = 2;
+ reqs->needs_split_display = true;
+ }
+
+ } else if (reqs->num_lm == 1) {
+ if (RM_RQ_FORCE_MIXER(reqs)) {
+ /* user request serving wide display with 1 lm */
+ reqs->top_name = SDE_RM_TOPOLOGY_SINGLEPIPE;
+ reqs->num_ctl = 1;
+ reqs->needs_split_display = false;
+ } else if (mode->hdisplay > rm->lm_max_width) {
+ /* wide display, must split across 2 lm and merge */
+ reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPEMERGE;
+ reqs->num_lm = 2;
+ reqs->num_ctl = 1;
+ reqs->needs_split_display = false;
+ } else if (RM_RQ_FORCE_TILING(reqs)) {
+ /* thin display, but user requests 2 lm and merge */
+ reqs->top_name = SDE_RM_TOPOLOGY_DUALPIPEMERGE;
+ reqs->num_lm = 2;
+ reqs->num_ctl = 1;
+ reqs->needs_split_display = false;
+ } else {
+ /* thin display, serve with only 1 lm */
+ reqs->top_name = SDE_RM_TOPOLOGY_SINGLEPIPE;
+ reqs->num_ctl = 1;
+ reqs->needs_split_display = false;
+ }
+
+ } else {
+ /* Currently no configurations with # LM > 2 */
+ SDE_ERROR("unsupported # of mixers %d\n", reqs->num_lm);
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("top_ctrl 0x%llX num_h_tiles %d\n", reqs->top_ctrl,
+ reqs->hw_res.display_num_of_h_tiles);
+ SDE_DEBUG("display_max_width %d rm->lm_max_width %d\n",
+ mode->hdisplay, rm->lm_max_width);
+ SDE_DEBUG("num_lm %d num_ctl %d topology_name %d\n", reqs->num_lm,
+ reqs->num_ctl, reqs->top_name);
+ SDE_DEBUG("num_lm %d topology_name %d\n", reqs->num_lm,
+ reqs->top_name);
+ SDE_EVT32(mode->hdisplay, rm->lm_max_width, reqs->num_lm,
+ reqs->top_ctrl, reqs->top_name, reqs->num_ctl);
+
+ return 0;
+}
+
+static struct sde_rm_rsvp *_sde_rm_get_rsvp(
+ struct sde_rm *rm,
+ struct drm_encoder *enc)
+{
+ struct sde_rm_rsvp *i;
+
+ if (!rm || !enc) {
+ SDE_ERROR("invalid params\n");
+ return NULL;
+ }
+
+ if (list_empty(&rm->rsvps))
+ return NULL;
+
+ list_for_each_entry(i, &rm->rsvps, list)
+ if (i->enc_id == enc->base.id)
+ return i;
+
+ return NULL;
+}
+
+static struct drm_connector *_sde_rm_get_connector(
+ struct drm_encoder *enc)
+{
+ struct drm_connector *conn = NULL;
+ struct list_head *connector_list =
+ &enc->dev->mode_config.connector_list;
+
+ list_for_each_entry(conn, connector_list, head)
+ if (conn->encoder == enc)
+ return conn;
+
+ return NULL;
+}
+
+/**
+ * _sde_rm_release_rsvp - release resources and release a reservation
+ * @rm: KMS handle
+ * @rsvp: RSVP pointer to release and release resources for
+ */
+static void _sde_rm_release_rsvp(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ struct drm_connector *conn)
+{
+ struct sde_rm_rsvp *rsvp_c, *rsvp_n;
+ struct sde_rm_hw_blk *blk;
+ enum sde_hw_blk_type type;
+
+ if (!rsvp)
+ return;
+
+ SDE_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
+
+ list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
+ if (rsvp == rsvp_c) {
+ list_del(&rsvp_c->list);
+ break;
+ }
+ }
+
+ for (type = 0; type < SDE_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (blk->rsvp == rsvp) {
+ blk->rsvp = NULL;
+ SDE_DEBUG("rel rsvp %d enc %d %s %d\n",
+ rsvp->seq, rsvp->enc_id,
+ blk->type_name, blk->id);
+ }
+ if (blk->rsvp_nxt == rsvp) {
+ blk->rsvp_nxt = NULL;
+ SDE_DEBUG("rel rsvp_nxt %d enc %d %s %d\n",
+ rsvp->seq, rsvp->enc_id,
+ blk->type_name, blk->id);
+ }
+ }
+ }
+
+ kfree(rsvp);
+}
+
+void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc)
+{
+ struct sde_rm_rsvp *rsvp;
+ struct drm_connector *conn;
+ uint64_t top_ctrl;
+
+ if (!rm || !enc) {
+ SDE_ERROR("invalid params\n");
+ return;
+ }
+
+ mutex_lock(&rm->rm_lock);
+
+ rsvp = _sde_rm_get_rsvp(rm, enc);
+ if (!rsvp) {
+ SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
+ goto end;
+ }
+
+ conn = _sde_rm_get_connector(enc);
+ if (!conn) {
+ SDE_ERROR("failed to get connector for enc %d\n", enc->base.id);
+ goto end;
+ }
+
+ top_ctrl = sde_connector_get_property(conn->state,
+ CONNECTOR_PROP_TOPOLOGY_CONTROL);
+
+ if (top_ctrl & BIT(SDE_RM_TOPCTL_RESERVE_LOCK)) {
+ SDE_DEBUG("rsvp[s%de%d] not releasing locked resources\n",
+ rsvp->seq, rsvp->enc_id);
+ } else {
+ SDE_DEBUG("release rsvp[s%de%d]\n", rsvp->seq,
+ rsvp->enc_id);
+ _sde_rm_release_rsvp(rm, rsvp, conn);
+
+ (void) msm_property_set_property(
+ sde_connector_get_propinfo(conn),
+ sde_connector_get_property_values(conn->state),
+ CONNECTOR_PROP_TOPOLOGY_NAME,
+ SDE_RM_TOPOLOGY_UNKNOWN);
+ }
+
+end:
+ mutex_unlock(&rm->rm_lock);
+}
+
+static int _sde_rm_commit_rsvp(
+ struct sde_rm *rm,
+ struct sde_rm_rsvp *rsvp,
+ struct drm_connector_state *conn_state)
+{
+ struct sde_rm_hw_blk *blk;
+ enum sde_hw_blk_type type;
+ int ret = 0;
+
+ ret = msm_property_set_property(
+ sde_connector_get_propinfo(conn_state->connector),
+ sde_connector_get_property_values(conn_state),
+ CONNECTOR_PROP_TOPOLOGY_NAME,
+ rsvp->topology);
+ if (ret) {
+ SDE_ERROR("failed to set topology name property, ret %d\n",
+ ret);
+ _sde_rm_release_rsvp(rm, rsvp, conn_state->connector);
+ return ret;
+ }
+
+ /* Swap next rsvp to be the active */
+ for (type = 0; type < SDE_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (blk->rsvp_nxt) {
+ blk->rsvp = blk->rsvp_nxt;
+ blk->rsvp_nxt = NULL;
+ }
+ }
+ }
+
+ if (!ret) {
+ SDE_DEBUG("rsrv enc %d topology %d\n", rsvp->enc_id,
+ rsvp->topology);
+ SDE_EVT32(rsvp->enc_id, rsvp->topology);
+ }
+
+ return ret;
+}
+
+int sde_rm_check_property_topctl(uint64_t val)
+{
+ if ((BIT(SDE_RM_TOPCTL_FORCE_TILING) & val) &&
+ (BIT(SDE_RM_TOPCTL_PPSPLIT) & val)) {
+ SDE_ERROR("ppsplit & force_tiling are incompatible\n");
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+int sde_rm_reserve(
+ struct sde_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ bool test_only)
+{
+ struct sde_rm_rsvp *rsvp_cur, *rsvp_nxt;
+ struct sde_rm_requirements reqs;
+ struct msm_drm_private *priv;
+ struct sde_kms *sde_kms;
+ struct sde_connector *sde_conn;
+ int ret;
+
+ if (!rm || !enc || !crtc_state || !conn_state) {
+ SDE_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ if (!enc->dev || !enc->dev->dev_private) {
+ SDE_ERROR("invalid drm device\n");
+ return -EINVAL;
+ }
+
+ priv = enc->dev->dev_private;
+ if (!priv->kms) {
+ SDE_ERROR("invald kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+
+ /* Check if this is just a page-flip */
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return 0;
+
+ sde_conn = to_sde_connector(conn_state->connector);
+ if (sde_conn->is_shared)
+ return 0;
+
+ SDE_DEBUG("reserving hw for conn %d enc %d crtc %d test_only %d\n",
+ conn_state->connector->base.id, enc->base.id,
+ crtc_state->crtc->base.id, test_only);
+ SDE_EVT32(enc->base.id, conn_state->connector->base.id);
+
+ mutex_lock(&rm->rm_lock);
+
+ _sde_rm_print_rsvps(rm, SDE_RM_STAGE_BEGIN);
+
+ ret = _sde_rm_populate_requirements(rm, enc, crtc_state,
+ conn_state, &reqs);
+ if (ret) {
+ SDE_ERROR("failed to populate hw requirements\n");
+ goto end;
+ }
+
+ /*
+ * We only support one active reservation per-hw-block. But to implement
+ * transactional semantics for test-only, and for allowing failure while
+ * modifying your existing reservation, over the course of this
+ * function we can have two reservations:
+ * Current: Existing reservation
+ * Next: Proposed reservation. The proposed reservation may fail, or may
+ * be discarded if in test-only mode.
+ * If reservation is successful, and we're not in test-only, then we
+ * replace the current with the next.
+ */
+ rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
+ if (!rsvp_nxt) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ rsvp_cur = _sde_rm_get_rsvp(rm, enc);
+
+ /*
+ * User can request that we clear out any reservation during the
+ * atomic_check phase by using this CLEAR bit
+ */
+ if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
+ SDE_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
+ rsvp_cur->seq, rsvp_cur->enc_id);
+ _sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+ rsvp_cur = NULL;
+ _sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_CLEAR);
+ (void) msm_property_set_property(
+ sde_connector_get_propinfo(
+ conn_state->connector),
+ sde_connector_get_property_values(conn_state),
+ CONNECTOR_PROP_TOPOLOGY_NAME,
+ SDE_RM_TOPOLOGY_UNKNOWN);
+ }
+
+ /* Check the proposed reservation, store it in hw's "next" field */
+ if (sde_kms->splash_info.handoff) {
+ SDE_DEBUG("Reserve resource for splash\n");
+ ret = _sde_rm_make_next_rsvp_for_splash
+ (rm, enc, crtc_state, conn_state, rsvp_nxt, &reqs);
+ } else
+ ret = _sde_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+ rsvp_nxt, &reqs);
+
+ _sde_rm_print_rsvps(rm, SDE_RM_STAGE_AFTER_RSVPNEXT);
+
+ if (ret) {
+ SDE_ERROR("failed to reserve hw resources: %d\n", ret);
+ _sde_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ } else if (test_only && !RM_RQ_LOCK(&reqs)) {
+ /*
+ * Normally, if test_only, test the reservation and then undo
+ * However, if the user requests LOCK, then keep the reservation
+ * made during the atomic_check phase.
+ */
+ SDE_DEBUG("test_only: discard test rsvp[s%de%d]\n",
+ rsvp_nxt->seq, rsvp_nxt->enc_id);
+ _sde_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ } else {
+ if (test_only && RM_RQ_LOCK(&reqs))
+ SDE_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
+ rsvp_nxt->seq, rsvp_nxt->enc_id);
+
+ _sde_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+
+ ret = _sde_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
+ }
+
+ _sde_rm_print_rsvps(rm, SDE_RM_STAGE_FINAL);
+
+end:
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
+int sde_rm_ext_blk_create_reserve(struct sde_rm *rm,
+ enum sde_hw_blk_type type,
+ uint32_t id,
+ void *hw,
+ struct drm_encoder *enc)
+{
+ struct sde_rm_hw_blk *blk;
+ struct sde_rm_rsvp *rsvp;
+ int ret = 0;
+
+ if (!rm || !hw || !enc) {
+ SDE_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ if (type >= SDE_HW_BLK_MAX) {
+ SDE_ERROR("invalid HW type\n");
+ return -EINVAL;
+ }
+
+ blk = kzalloc(sizeof(*blk), GFP_KERNEL);
+ if (!blk) {
+ _sde_rm_hw_destroy(type, hw);
+ return -ENOMEM;
+ }
+
+ mutex_lock(&rm->rm_lock);
+
+ rsvp = _sde_rm_get_rsvp(rm, enc);
+ if (!rsvp) {
+ rsvp = kzalloc(sizeof(*rsvp), GFP_KERNEL);
+ if (!rsvp) {
+ ret = -ENOMEM;
+ kfree(blk);
+ goto end;
+ }
+
+ rsvp->seq = ++rm->rsvp_next_seq;
+ rsvp->enc_id = enc->base.id;
+ list_add_tail(&rsvp->list, &rm->rsvps);
+
+ SDE_DEBUG("create rsvp %d for enc %d\n",
+ rsvp->seq, rsvp->enc_id);
+ }
+
+ blk->type = type;
+ blk->id = id;
+ blk->hw = hw;
+ blk->rsvp = rsvp;
+ list_add_tail(&blk->list, &rm->hw_blks[type]);
+
+ SDE_DEBUG("create blk %d %d for rsvp %d enc %d\n", blk->type, blk->id,
+ rsvp->seq, rsvp->enc_id);
+
+end:
+ mutex_unlock(&rm->rm_lock);
+ return ret;
+}
+
+int sde_rm_ext_blk_destroy(struct sde_rm *rm,
+ struct drm_encoder *enc)
+{
+ struct sde_rm_hw_blk *blk = NULL, *p;
+ struct sde_rm_rsvp *rsvp;
+ enum sde_hw_blk_type type;
+ int ret = 0;
+
+ if (!rm || !enc) {
+ SDE_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&rm->rm_lock);
+
+ rsvp = _sde_rm_get_rsvp(rm, enc);
+ if (!rsvp) {
+ ret = -ENOENT;
+ SDE_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
+ goto end;
+ }
+
+ for (type = 0; type < SDE_HW_BLK_MAX; type++) {
+ list_for_each_entry_safe(blk, p, &rm->hw_blks[type], list) {
+ if (blk->rsvp == rsvp) {
+ list_del(&blk->list);
+ SDE_DEBUG("del blk %d %d from rsvp %d enc %d\n",
+ blk->type, blk->id,
+ rsvp->seq, rsvp->enc_id);
+ kfree(blk);
+ }
+ }
+ }
+
+ SDE_DEBUG("del rsvp %d\n", rsvp->seq);
+ list_del(&rsvp->list);
+ kfree(rsvp);
+end:
+ mutex_unlock(&rm->rm_lock);
+ return ret;
+}
+
+static int _sde_rm_get_ctl_lm_for_splash(struct sde_hw_ctl *ctl,
+ int max_lm_cnt, u8 lm_cnt, u8 *lm_ids,
+ struct splash_ctl_top *top, int index)
+{
+ int j;
+ struct splash_lm_hw *lm;
+
+ if (!ctl || !top) {
+ SDE_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ lm = top->lm;
+ for (j = 0; j < max_lm_cnt; j++) {
+ lm[top->ctl_lm_cnt].lm_reg_value =
+ ctl->ops.read_ctl_layers_for_splash(ctl, j + LM_0);
+
+ if (lm[top->ctl_lm_cnt].lm_reg_value) {
+ lm[top->ctl_lm_cnt].ctl_id = index + CTL_0;
+ lm_ids[lm_cnt++] = j + LM_0;
+ lm[top->ctl_lm_cnt].lm_id = j + LM_0;
+ top->ctl_lm_cnt++;
+ }
+ }
+
+ return top->ctl_lm_cnt;
+}
+
+static void _sde_rm_get_ctl_top_for_splash(struct sde_hw_ctl *ctl,
+ struct splash_ctl_top *top)
+{
+ if (!ctl || !top) {
+ SDE_ERROR("invalid ctl or top\n");
+ return;
+ }
+
+ if (!ctl->ops.read_ctl_top_for_splash) {
+ SDE_ERROR("read_ctl_top not initialized\n");
+ return;
+ }
+
+ top->value = ctl->ops.read_ctl_top_for_splash(ctl);
+ top->intf_sel = (top->value >> 4) & 0xf;
+}
+
+int sde_rm_read_resource_for_splash(struct sde_rm *rm,
+ void *splash_info,
+ struct sde_mdss_cfg *cat)
+{
+ struct sde_rm_hw_iter ctl_iter;
+ int index = 0;
+ struct sde_splash_info *sinfo;
+ struct sde_hw_ctl *ctl;
+
+ if (!rm || !splash_info || !cat)
+ return -EINVAL;
+
+ sinfo = (struct sde_splash_info *)splash_info;
+
+ sde_rm_init_hw_iter(&ctl_iter, 0, SDE_HW_BLK_CTL);
+
+ while (_sde_rm_get_hw_locked(rm, &ctl_iter)) {
+ ctl = (struct sde_hw_ctl *)ctl_iter.hw;
+
+ _sde_rm_get_ctl_top_for_splash(ctl,
+ &sinfo->res.top[index]);
+
+ if (sinfo->res.top[index].intf_sel) {
+ sinfo->res.lm_cnt +=
+ _sde_rm_get_ctl_lm_for_splash(ctl,
+ cat->mixer_count,
+ sinfo->res.lm_cnt,
+ sinfo->res.lm_ids,
+ &sinfo->res.top[index], index);
+
+ sinfo->res.ctl_ids[sinfo->res.ctl_top_cnt] =
+ index + CTL_0;
+
+ sinfo->res.ctl_top_cnt++;
+ }
+ index++;
+ }
+
+ SDE_DEBUG("%s: ctl_top_cnt=%d, lm_cnt=%d\n", __func__,
+ sinfo->res.ctl_top_cnt, sinfo->res.lm_cnt);
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_rm.h b/drivers/gpu/drm/msm/sde/sde_rm.h
new file mode 100644
index 000000000000..d0a31f97d45b
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_rm.h
@@ -0,0 +1,253 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __SDE_RM_H__
+#define __SDE_RM_H__
+
+#include <linux/list.h>
+
+#include "msm_kms.h"
+#include "sde_hw_top.h"
+
+/**
+ * enum sde_rm_topology_name - HW resource use case in use by connector
+ * @SDE_RM_TOPOLOGY_UNKNOWN: No topology in use currently
+ * @SDE_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
+ * @SDE_RM_TOPOLOGY_PPSPLIT: 1 LM, 2 PPs, 2 INTF/WB
+ * @SDE_RM_TOPOLOGY_DUALPIPEMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
+ */
+enum sde_rm_topology_name {
+ SDE_RM_TOPOLOGY_UNKNOWN = 0,
+ SDE_RM_TOPOLOGY_SINGLEPIPE,
+ SDE_RM_TOPOLOGY_DUALPIPE,
+ SDE_RM_TOPOLOGY_PPSPLIT,
+ SDE_RM_TOPOLOGY_DUALPIPEMERGE,
+};
+
+/**
+ * enum sde_rm_topology_control - HW resource use case in use by connector
+ * @SDE_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
+ * test, reserve the resources for this display.
+ * Normal behavior would not impact the reservation
+ * list during the AtomicTest phase.
+ * @SDE_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
+ * release any reservation held by this display.
+ * Normal behavior would not impact the
+ * reservation list during the AtomicTest phase.
+ * @SDE_RM_TOPCTL_DSPP: Require layer mixers with DSPP capabilities
+ * @SDE_RM_TOPCTL_FORCE_TILING: Require kernel to split across multiple layer
+ * mixers, despite width fitting within capability
+ * of a single layer mixer.
+ * @SDE_RM_TOPCTL_PPSPLIT: Require kernel to use pingpong split pipe
+ * configuration instead of dual pipe.
+ * @SDE_RM_TOPCTL_FORCE_MIXER: Require kernel to force single mixer usage
+ */
+enum sde_rm_topology_control {
+ SDE_RM_TOPCTL_RESERVE_LOCK,
+ SDE_RM_TOPCTL_RESERVE_CLEAR,
+ SDE_RM_TOPCTL_DSPP,
+ SDE_RM_TOPCTL_FORCE_TILING,
+ SDE_RM_TOPCTL_PPSPLIT,
+ SDE_RM_TOPCTL_FORCE_MIXER,
+};
+
+/**
+ * struct sde_rm - SDE dynamic hardware resource manager
+ * @dev: device handle for event logging purposes
+ * @rsvps: list of hardware reservations by each crtc->encoder->connector
+ * @hw_blks: array of lists of hardware resources present in the system, one
+ * list per type of hardware block
+ * @hw_mdp: hardware object for mdp_top
+ * @lm_max_width: cached layer mixer maximum width
+ * @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
+ */
+struct sde_rm {
+ struct drm_device *dev;
+ struct list_head rsvps;
+ struct list_head hw_blks[SDE_HW_BLK_MAX];
+ struct sde_hw_mdp *hw_mdp;
+ uint32_t lm_max_width;
+ uint32_t rsvp_next_seq;
+ struct mutex rm_lock;
+};
+
+/**
+ * struct sde_rm_hw_blk - resource manager internal structure
+ * forward declaration for single iterator definition without void pointer
+ */
+struct sde_rm_hw_blk;
+
+/**
+ * struct sde_rm_hw_iter - iterator for use with sde_rm
+ * @hw: sde_hw object requested, or NULL on failure
+ * @blk: sde_rm internal block representation. Clients ignore. Used as iterator.
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+struct sde_rm_hw_iter {
+ void *hw;
+ struct sde_rm_hw_blk *blk;
+ uint32_t enc_id;
+ enum sde_hw_blk_type type;
+};
+
+/**
+ * sde_rm_init - Read hardware catalog and create reservation tracking objects
+ * for all HW blocks.
+ * @rm: SDE Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mmio: mapped register io address of MDP
+ * @dev: device handle for event logging purposes
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int sde_rm_init(struct sde_rm *rm,
+ struct sde_mdss_cfg *cat,
+ void *mmio,
+ struct drm_device *dev);
+
+/**
+ * sde_rm_destroy - Free all memory allocated by sde_rm_init
+ * @rm: SDE Resource Manager handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int sde_rm_destroy(struct sde_rm *rm);
+
+/**
+ * sde_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ * the use connections and user requirements, specified through related
+ * topology control properties, and reserve hardware blocks to that
+ * display chain.
+ * HW blocks can then be accessed through sde_rm_get_* functions.
+ * HW Reservations should be released via sde_rm_release_hw.
+ * @rm: SDE Resource Manager handle
+ * @drm_enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @conn_state: Proposed Atomic DRM Connector State handle
+ * @test_only: Atomic-Test phase, discard results (unless property overrides)
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int sde_rm_reserve(struct sde_rm *rm,
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ bool test_only);
+
+/**
+ * sde_rm_reserve - Given the encoder for the display chain, release any
+ * HW blocks previously reserved for that use case.
+ * @rm: SDE Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+void sde_rm_release(struct sde_rm *rm, struct drm_encoder *enc);
+
+/**
+ * sde_rm_get_mdp - Retrieve HW block for MDP TOP.
+ * This is never reserved, and is usable by any display.
+ * @rm: SDE Resource Manager handle
+ * @Return: Pointer to hw block or NULL
+ */
+struct sde_hw_mdp *sde_rm_get_mdp(struct sde_rm *rm);
+
+/**
+ * sde_rm_init_hw_iter - setup given iterator for new iteration over hw list
+ * using sde_rm_get_hw
+ * @iter: iter object to initialize
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+void sde_rm_init_hw_iter(
+ struct sde_rm_hw_iter *iter,
+ uint32_t enc_id,
+ enum sde_hw_blk_type type);
+/**
+ * sde_rm_get_hw - retrieve reserved hw object given encoder and hw type
+ * Meant to do a single pass through the hardware list to iteratively
+ * retrieve hardware blocks of a given type for a given encoder.
+ * Initialize an iterator object.
+ * Set hw block type of interest. Set encoder id of interest, 0 for any.
+ * Function returns first hw of type for that encoder.
+ * Subsequent calls will return the next reserved hw of that type in-order.
+ * Iterator HW pointer will be null on failure to find hw.
+ * @rm: SDE Resource Manager handle
+ * @iter: iterator object
+ * @Return: true on match found, false on no match found
+ */
+bool sde_rm_get_hw(struct sde_rm *rm, struct sde_rm_hw_iter *iter);
+
+/**
+ * sde_rm_get_hw_by_id - retrieve hw object given hw type and hw id
+ * Meant to do a single pass through the hardware list to iteratively
+ * retrieve hardware blocks of a given type and id.
+ * Function returns the hw resource pointer.
+ * @rm: SDE Resource Manager handle
+ * @type: hw type
+ * @id: hw id
+ * @Return: hw resource pointer on match found, NULL on no match found
+ */
+void *sde_rm_get_hw_by_id(struct sde_rm *rm, enum sde_hw_blk_type type, int id);
+
+/**
+ * sde_rm_check_property_topctl - validate property bitmask before it is set
+ * @val: user's proposed topology control bitmask
+ * @Return: 0 on success or error
+ */
+int sde_rm_check_property_topctl(uint64_t val);
+
+/**
+ * sde_rm_check_property_topctl - validate property bitmask before it is set
+ * @val: user's proposed topology control bitmask
+ * @Return: 0 on success or error
+ */
+int sde_rm_check_property_topctl(uint64_t val);
+
+/**
+ * sde_rm_read_resource_for_splash - read splash resource used in bootloader
+ * @rm: SDE Resource Manager handle
+ * @sinfo: handle for splash info
+ * @cat: Pointer to hardware catalog
+ */
+int sde_rm_read_resource_for_splash(struct sde_rm *rm,
+ void *sinfo,
+ struct sde_mdss_cfg *cat);
+
+/**
+ * sde_rm_ext_blk_create_reserve - Create external HW blocks
+ * in resource manager and reserve for specific encoder.
+ * @rm: SDE Resource Manager handle
+ * @type: external HW block type
+ * @id: external HW block id
+ * @hw: external HW block
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int sde_rm_ext_blk_create_reserve(struct sde_rm *rm,
+ enum sde_hw_blk_type type,
+ uint32_t id,
+ void *hw,
+ struct drm_encoder *enc);
+
+/**
+ * sde_rm_ext_blk_destroy - Given the encoder for the display chain, release
+ * external HW blocks created for that.
+ * @rm: SDE Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int sde_rm_ext_blk_destroy(struct sde_rm *rm,
+ struct drm_encoder *enc);
+
+#endif /* __SDE_RM_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_shd.c b/drivers/gpu/drm/msm/sde/sde_shd.c
new file mode 100644
index 000000000000..2c42676bedcb
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_shd.c
@@ -0,0 +1,1101 @@
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm-shd] %s: " fmt, __func__
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/uaccess.h>
+#include <linux/debugfs.h>
+#include <linux/component.h>
+#include <linux/of_irq.h>
+#include "sde_connector.h"
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_crtc.h>
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "sde_connector.h"
+#include "sde_encoder.h"
+#include "sde_crtc.h"
+#include "sde_shd.h"
+#include "sde_splash.h"
+
+#define SHD_DEBUG(fmt, ...) pr_debug(fmt, ##__VA_ARGS__)
+
+static LIST_HEAD(g_base_list);
+
+static const struct of_device_id shd_dt_match[] = {
+ {.compatible = "qcom,shared-display"},
+ {}
+};
+
+struct shd_bridge {
+ struct drm_bridge base;
+ struct shd_display *display;
+};
+
+int shd_display_get_num_of_displays(void)
+{
+ int display_num = 0;
+ struct shd_display *disp;
+ struct shd_display_base *base;
+
+ list_for_each_entry(base, &g_base_list, head) {
+ list_for_each_entry(disp, &base->disp_list, head)
+ ++display_num;
+ }
+
+ return display_num;
+}
+
+int shd_display_get_displays(void **displays, int count)
+{
+ int display_num = 0;
+ struct shd_display *disp;
+ struct shd_display_base *base;
+
+ list_for_each_entry(base, &g_base_list, head)
+ list_for_each_entry(disp, &base->disp_list, head)
+ displays[display_num++] = disp;
+
+ return display_num;
+}
+
+static enum drm_connector_status shd_display_base_detect(
+ struct drm_connector *connector,
+ bool force,
+ void *disp)
+{
+ return connector_status_disconnected;
+}
+
+static int shd_display_init_base_connector(struct drm_device *dev,
+ struct shd_display_base *base)
+{
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct sde_connector *sde_conn;
+ int rc = 0;
+
+ list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ sde_conn = to_sde_connector(connector);
+ encoder = sde_conn->encoder;
+ if (encoder == base->encoder) {
+ base->connector = connector;
+ break;
+ }
+ }
+
+ if (!base->connector) {
+ SDE_ERROR("failed to find connector\n");
+ rc = -ENOENT;
+ goto error;
+ }
+
+ /* set base connector disconnected*/
+ sde_conn = to_sde_connector(base->connector);
+ base->ops = sde_conn->ops;
+ sde_conn->ops.detect = shd_display_base_detect;
+
+ SHD_DEBUG("found base connector %d\n", base->connector->base.id);
+
+error:
+ return rc;
+}
+
+static int shd_display_init_base_encoder(struct drm_device *dev,
+ struct shd_display_base *base)
+{
+ struct drm_encoder *encoder;
+ struct sde_encoder_hw_resources hw_res;
+ struct sde_connector_state conn_state = {};
+ int i, rc = 0;
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ sde_encoder_get_hw_resources(encoder,
+ &hw_res, &conn_state.base);
+ for (i = 0; i < INTF_MAX; i++) {
+ if (hw_res.intfs[i] != INTF_MODE_NONE &&
+ base->intf_idx == i) {
+ base->encoder = encoder;
+ goto found;
+ }
+ }
+ }
+
+ if (!base->encoder) {
+ pr_err("can't find base encoder for intf %d\n",
+ base->intf_idx);
+ rc = -ENOENT;
+ goto error;
+ }
+
+found:
+ switch (base->encoder->encoder_type) {
+ case DRM_MODE_ENCODER_DSI:
+ base->connector_type = DRM_MODE_CONNECTOR_DSI;
+ break;
+ case DRM_MODE_ENCODER_TMDS:
+ base->connector_type = DRM_MODE_CONNECTOR_HDMIA;
+ break;
+ default:
+ base->connector_type = DRM_MODE_CONNECTOR_Unknown;
+ break;
+ }
+
+ SHD_DEBUG("found base encoder %d, type %d, connect type %d\n",
+ base->encoder->base.id,
+ base->encoder->encoder_type,
+ base->connector_type);
+
+error:
+ return rc;
+}
+
+static int shd_display_init_base_crtc(struct drm_device *dev,
+ struct shd_display_base *base)
+{
+ struct drm_crtc *crtc;
+ struct drm_display_mode *drm_mode;
+ int rc = 0;
+
+ crtc = list_last_entry(&dev->mode_config.crtc_list,
+ struct drm_crtc, head);
+
+ base->crtc = crtc;
+ base->encoder->crtc = crtc;
+ SHD_DEBUG("found base crtc %d\n", crtc->base.id);
+
+ /* hide crtc from user */
+ list_del_init(&crtc->head);
+
+ /* fixed mode is used */
+ drm_mode = &base->mode;
+
+ /* update crtc drm structure */
+ crtc->state->active = true;
+ rc = drm_atomic_set_mode_for_crtc(crtc->state, drm_mode);
+ if (rc) {
+ SDE_ERROR("Failed: set mode for crtc. rc = %d\n", rc);
+ goto error;
+ }
+ drm_mode_copy(&crtc->state->adjusted_mode, drm_mode);
+ drm_mode_copy(&crtc->mode, drm_mode);
+
+ crtc->state->active_changed = true;
+ crtc->state->mode_changed = true;
+ crtc->state->connectors_changed = true;
+
+ if (base->connector) {
+ base->connector->state->crtc = crtc;
+ base->connector->state->best_encoder = base->encoder;
+ base->connector->encoder = base->encoder;
+ }
+
+error:
+ return rc;
+}
+
+static void shd_display_enable_base(struct drm_device *dev,
+ struct shd_display_base *base)
+{
+ const struct drm_encoder_helper_funcs *enc_funcs;
+ const struct drm_crtc_helper_funcs *crtc_funcs;
+ struct drm_display_mode *adjusted_mode;
+ struct sde_crtc *sde_crtc;
+ struct sde_hw_mixer_cfg lm_cfg;
+ struct sde_hw_mixer *hw_lm;
+ int rc, i;
+
+ SHD_DEBUG("enable base display %d\n", base->intf_idx);
+
+ enc_funcs = base->encoder->helper_private;
+ if (!enc_funcs) {
+ SDE_ERROR("failed to find encoder helper\n");
+ return;
+ }
+
+ crtc_funcs = base->crtc->helper_private;
+ if (!crtc_funcs) {
+ SDE_ERROR("failed to find crtc helper\n");
+ return;
+ }
+
+ if (!base->connector) {
+ SDE_ERROR("failed to find base connector\n");
+ return;
+ }
+
+ adjusted_mode = drm_mode_duplicate(dev, &base->mode);
+ if (!adjusted_mode) {
+ SDE_ERROR("failed to create adjusted mode\n");
+ return;
+ }
+
+ drm_bridge_mode_fixup(base->encoder->bridge,
+ &base->mode,
+ adjusted_mode);
+
+ if (enc_funcs->atomic_check) {
+ rc = enc_funcs->atomic_check(base->encoder,
+ base->crtc->state,
+ base->connector->state);
+ if (rc) {
+ SDE_ERROR("encoder atomic check failed\n");
+ goto state_fail;
+ }
+ }
+
+ if (enc_funcs->mode_fixup) {
+ enc_funcs->mode_fixup(base->encoder,
+ &base->mode,
+ adjusted_mode);
+ }
+
+ if (enc_funcs->mode_set) {
+ enc_funcs->mode_set(base->encoder,
+ &base->mode,
+ adjusted_mode);
+ }
+
+ if (crtc_funcs->atomic_begin) {
+ crtc_funcs->atomic_begin(base->crtc,
+ base->crtc->state);
+ }
+
+ sde_crtc = to_sde_crtc(base->crtc);
+ if (!sde_crtc->num_mixers) {
+ SDE_ERROR("no layer mixer found\n");
+ goto state_fail;
+ }
+
+ lm_cfg.out_width = base->mode.hdisplay / sde_crtc->num_mixers;
+ lm_cfg.out_height = base->mode.vdisplay;
+ lm_cfg.flags = 0;
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ lm_cfg.right_mixer = i;
+ hw_lm = sde_crtc->mixers[i].hw_lm;
+ hw_lm->ops.setup_mixer_out(hw_lm, &lm_cfg);
+ }
+
+ drm_bridge_mode_set(base->encoder->bridge,
+ &base->mode,
+ adjusted_mode);
+
+ drm_bridge_pre_enable(base->encoder->bridge);
+
+ if (enc_funcs->enable)
+ enc_funcs->enable(base->encoder);
+
+ sde_encoder_kickoff(base->encoder);
+
+ drm_bridge_enable(base->encoder->bridge);
+
+ base->enabled = true;
+
+state_fail:
+ drm_mode_destroy(dev, adjusted_mode);
+}
+
+static void shd_display_disable_base(struct drm_device *dev,
+ struct shd_display_base *base)
+{
+ const struct drm_encoder_helper_funcs *enc_funcs;
+
+ SHD_DEBUG("disable base display %d\n", base->intf_idx);
+
+ enc_funcs = base->encoder->helper_private;
+ if (!enc_funcs) {
+ SDE_ERROR("failed to find encoder helper\n");
+ return;
+ }
+
+ drm_bridge_disable(base->encoder->bridge);
+
+ if (enc_funcs->disable)
+ enc_funcs->disable(base->encoder);
+
+ drm_bridge_post_disable(base->encoder->bridge);
+
+ base->enabled = false;
+}
+
+static void shd_display_enable(struct shd_display *display)
+{
+ struct drm_device *dev = display->drm_dev;
+ struct shd_display_base *base = display->base;
+
+ SHD_DEBUG("enable %s conn %d\n", display->name,
+ DRMID(display->connector));
+
+ mutex_lock(&base->base_mutex);
+
+ display->enabled = true;
+
+ if (!base->enabled) {
+ shd_display_enable_base(dev, base);
+ /*
+ * Since base display is enabled, and it's marked to have
+ * splash on, but it's not available to user. So for early
+ * splash case, it's needed to update total registered
+ * connector number to reflect the true case to make handoff
+ * can finish.
+ */
+ sde_splash_decrease_connector_cnt(dev, base->connector_type,
+ display->cont_splash_enabled);
+ }
+
+ mutex_unlock(&base->base_mutex);
+}
+
+static void shd_display_disable(struct shd_display *display)
+{
+ struct drm_device *dev = display->drm_dev;
+ struct shd_display_base *base = display->base;
+ struct shd_display *p;
+ bool enabled = false;
+
+ SHD_DEBUG("disable %s conn %d\n", display->name,
+ DRMID(display->connector));
+
+ mutex_lock(&base->base_mutex);
+
+ display->enabled = false;
+
+ if (!base->enabled)
+ goto end;
+
+ list_for_each_entry(p, &base->disp_list, head) {
+ if (p->enabled) {
+ enabled = true;
+ break;
+ }
+ }
+
+ if (!enabled)
+ shd_display_disable_base(dev, base);
+
+end:
+ mutex_unlock(&base->base_mutex);
+}
+
+void shd_display_prepare_commit(struct sde_kms *sde_kms,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ int i;
+
+ if (!sde_kms->shd_display_count)
+ return;
+
+ for_each_connector_in_state(state, connector, old_conn_state, i) {
+ struct sde_connector *sde_conn;
+
+ sde_conn = to_sde_connector(connector);
+ if (!sde_conn->is_shared)
+ continue;
+
+ if (!connector->state->best_encoder)
+ continue;
+
+ if (!connector->state->crtc->state->active ||
+ !drm_atomic_crtc_needs_modeset(
+ connector->state->crtc->state))
+ continue;
+
+ shd_display_enable(sde_conn->display);
+ }
+}
+
+void shd_display_complete_commit(struct sde_kms *sde_kms,
+ struct drm_atomic_state *state)
+{
+ struct drm_connector *connector;
+ struct drm_connector_state *old_conn_state;
+ int i;
+
+ if (!sde_kms->shd_display_count)
+ return;
+
+ for_each_connector_in_state(state, connector, old_conn_state, i) {
+ struct sde_connector *sde_conn;
+ struct drm_crtc_state *old_crtc_state;
+ unsigned int crtc_idx;
+
+ sde_conn = to_sde_connector(connector);
+ if (!sde_conn->is_shared)
+ continue;
+
+ if (!old_conn_state->crtc)
+ continue;
+
+ crtc_idx = drm_crtc_index(old_conn_state->crtc);
+ old_crtc_state = state->crtc_states[crtc_idx];
+
+ if (!old_crtc_state->active ||
+ !drm_atomic_crtc_needs_modeset(old_conn_state->crtc->state))
+ continue;
+
+ if (old_conn_state->crtc->state->active)
+ continue;
+
+ shd_display_disable(sde_conn->display);
+ }
+}
+
+int shd_display_post_init(struct sde_kms *sde_kms)
+{
+ struct shd_display *disp;
+ struct shd_display_base *base;
+ int rc = 0, i;
+
+ for (i = 0; i < sde_kms->shd_display_count; i++) {
+ disp = sde_kms->shd_displays[i];
+ base = disp->base;
+
+ if (base->crtc)
+ continue;
+
+ rc = shd_display_init_base_crtc(disp->drm_dev, base);
+ if (rc) {
+ SDE_ERROR("failed initialize base crtc\n");
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int shd_connector_get_info(struct msm_display_info *info, void *data)
+{
+ struct shd_display *display = data;
+ int rc;
+
+ if (!info || !data || !display->base) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!display->base->encoder) {
+ rc = shd_display_init_base_encoder(display->drm_dev,
+ display->base);
+ if (rc) {
+ SDE_ERROR("failed to find base encoder\n");
+ return rc;
+ }
+
+ rc = shd_display_init_base_connector(display->drm_dev,
+ display->base);
+ if (rc) {
+ SDE_ERROR("failed to find base connector\n");
+ return rc;
+ }
+ }
+
+ info->intf_type = display->base->connector_type;
+ info->capabilities = MSM_DISPLAY_CAP_VID_MODE |
+ MSM_DISPLAY_CAP_HOT_PLUG;
+ info->is_connected = true;
+ info->num_of_h_tiles = 1;
+ info->h_tile_instance[0] = display->base->intf_idx;
+ info->capabilities |= MSM_DISPLAY_CAP_SHARED;
+
+ return 0;
+}
+
+int shd_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ struct shd_display *disp = display;
+ struct sde_connector *conn;
+
+ disp->connector = connector;
+ conn = to_sde_connector(connector);
+ conn->is_shared = true;
+ conn->shared_roi = disp->roi;
+
+ sde_kms_info_add_keyint(info, "max_blendstages",
+ disp->stage_range.size);
+
+ sde_kms_info_add_keystr(info, "display type",
+ disp->display_type);
+
+ if (disp->src.h != disp->roi.h) {
+ sde_kms_info_add_keyint(info, "padding height",
+ disp->roi.h);
+ }
+
+ return 0;
+}
+
+enum drm_connector_status shd_connector_detect(struct drm_connector *conn,
+ bool force,
+ void *display)
+{
+ struct shd_display *disp = display;
+ struct sde_connector *sde_conn;
+ enum drm_connector_status status = connector_status_disconnected;
+
+ if (!conn || !display || !disp->base) {
+ pr_err("invalid params\n");
+ goto end;
+ }
+
+ mutex_lock(&disp->base->base_mutex);
+ if (disp->base->connector) {
+ sde_conn = to_sde_connector(disp->base->connector);
+ status = disp->base->ops.detect(disp->base->connector,
+ force, sde_conn->display);
+ }
+ mutex_unlock(&disp->base->base_mutex);
+
+end:
+ return status;
+}
+
+int shd_connector_get_modes(struct drm_connector *connector,
+ void *display)
+{
+ struct drm_display_mode drm_mode;
+ struct shd_display *disp = display;
+ struct drm_display_mode *m;
+
+ memcpy(&drm_mode, &disp->base->mode, sizeof(drm_mode));
+
+ drm_mode.hdisplay = disp->src.w;
+ drm_mode.hsync_start = drm_mode.hdisplay;
+ drm_mode.hsync_end = drm_mode.hsync_start;
+ drm_mode.htotal = drm_mode.hsync_end;
+
+ drm_mode.vdisplay = disp->src.h;
+ drm_mode.vsync_start = drm_mode.vdisplay;
+ drm_mode.vsync_end = drm_mode.vsync_start;
+ drm_mode.vtotal = drm_mode.vsync_end;
+
+ m = drm_mode_duplicate(disp->drm_dev, &drm_mode);
+ drm_mode_set_name(m);
+ drm_mode_probed_add(connector, m);
+
+ return 1;
+}
+
+enum drm_mode_status shd_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display)
+{
+ return MODE_OK;
+}
+
+static int shd_bridge_attach(struct drm_bridge *shd_bridge)
+{
+ return 0;
+}
+
+static void shd_bridge_pre_enable(struct drm_bridge *drm_bridge)
+{
+}
+
+static void shd_bridge_enable(struct drm_bridge *drm_bridge)
+{
+}
+
+static void shd_bridge_disable(struct drm_bridge *drm_bridge)
+{
+}
+
+static void shd_bridge_post_disable(struct drm_bridge *drm_bridge)
+{
+}
+
+
+static void shd_bridge_mode_set(struct drm_bridge *drm_bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+}
+
+static bool shd_bridge_mode_fixup(struct drm_bridge *drm_bridge,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
+{
+ return true;
+}
+
+static const struct drm_bridge_funcs shd_bridge_ops = {
+ .attach = shd_bridge_attach,
+ .mode_fixup = shd_bridge_mode_fixup,
+ .pre_enable = shd_bridge_pre_enable,
+ .enable = shd_bridge_enable,
+ .disable = shd_bridge_disable,
+ .post_disable = shd_bridge_post_disable,
+ .mode_set = shd_bridge_mode_set,
+};
+
+int shd_drm_bridge_init(void *data, struct drm_encoder *encoder)
+{
+ int rc = 0;
+ struct shd_bridge *bridge;
+ struct drm_device *dev;
+ struct shd_display *display = data;
+ struct msm_drm_private *priv = NULL;
+
+ bridge = kzalloc(sizeof(*bridge), GFP_KERNEL);
+ if (!bridge) {
+ rc = -ENOMEM;
+ goto error;
+ }
+
+ dev = display->drm_dev;
+ bridge->display = display;
+ bridge->base.funcs = &shd_bridge_ops;
+ bridge->base.encoder = encoder;
+
+ priv = dev->dev_private;
+
+ rc = drm_bridge_attach(dev, &bridge->base);
+ if (rc) {
+ SDE_ERROR("failed to attach bridge, rc=%d\n", rc);
+ goto error_free_bridge;
+ }
+
+ encoder->bridge = &bridge->base;
+ priv->bridges[priv->num_bridges++] = &bridge->base;
+ display->bridge = &bridge->base;
+
+ return 0;
+
+error_free_bridge:
+ kfree(bridge);
+error:
+ return rc;
+}
+
+void shd_drm_bridge_deinit(void *data)
+{
+ struct shd_display *display = data;
+ struct shd_bridge *bridge = container_of(display->bridge,
+ struct shd_bridge, base);
+
+ if (bridge && bridge->base.encoder)
+ bridge->base.encoder->bridge = NULL;
+
+ kfree(bridge);
+}
+
+/**
+ * sde_shd_bind - bind writeback device with controlling device
+ * @dev: Pointer to base of platform device
+ * @master: Pointer to container of drm device
+ * @data: Pointer to private data
+ * Returns: Zero on success
+ */
+static int sde_shd_bind(struct device *dev, struct device *master, void *data)
+{
+ struct shd_display *shd_dev;
+
+ shd_dev = platform_get_drvdata(to_platform_device(dev));
+ if (!shd_dev) {
+ SDE_ERROR("invalid shd device\n");
+ return -EINVAL;
+ }
+
+ shd_dev->drm_dev = dev_get_drvdata(master);
+
+ return 0;
+}
+
+/**
+ * sde_shd_unbind - unbind writeback from controlling device
+ * @dev: Pointer to base of platform device
+ * @master: Pointer to container of drm device
+ * @data: Pointer to private data
+ */
+static void sde_shd_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ struct shd_display *shd_dev;
+
+ shd_dev = platform_get_drvdata(to_platform_device(dev));
+ if (!shd_dev) {
+ SDE_ERROR("invalid shd device\n");
+ return;
+ }
+
+ shd_dev->drm_dev = NULL;
+}
+
+static const struct component_ops sde_shd_comp_ops = {
+ .bind = sde_shd_bind,
+ .unbind = sde_shd_unbind,
+};
+
+static int sde_shd_parse_display(struct shd_display *display)
+{
+ struct device_node *of_node = display->pdev->dev.of_node;
+ struct device_node *of_src, *of_roi;
+ u32 src_w, src_h, dst_x, dst_y, dst_w, dst_h;
+ u32 range[2];
+ int rc;
+
+ display->name = of_node->full_name;
+
+ display->display_type = of_get_property(of_node,
+ "qcom,display-type", NULL);
+ if (!display->display_type)
+ display->display_type = "unknown";
+
+ display->base_of = of_parse_phandle(of_node,
+ "qcom,shared-display-base", 0);
+ if (!display->base_of) {
+ pr_err("No base device present\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ of_src = of_get_child_by_name(of_node, "qcom,shared-display-src-mode");
+ if (!of_src) {
+ pr_err("No src mode present\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_src, "qcom,mode-h-active",
+ &src_w);
+ if (rc) {
+ pr_err("Failed to parse h active\n");
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_src, "qcom,mode-v-active",
+ &src_h);
+ if (rc) {
+ pr_err("Failed to parse v active\n");
+ goto error;
+ }
+
+ of_roi = of_get_child_by_name(of_node, "qcom,shared-display-dst-mode");
+ if (!of_roi) {
+ pr_err("No roi mode present\n");
+ rc = -ENODEV;
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_roi, "qcom,mode-x-offset",
+ &dst_x);
+ if (rc) {
+ pr_err("Failed to parse x offset\n");
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_roi, "qcom,mode-y-offset",
+ &dst_y);
+ if (rc) {
+ pr_err("Failed to parse y offset\n");
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_roi, "qcom,mode-width",
+ &dst_w);
+ if (rc) {
+ pr_err("Failed to parse roi width\n");
+ goto error;
+ }
+
+ rc = of_property_read_u32(of_roi, "qcom,mode-height",
+ &dst_h);
+ if (rc) {
+ pr_err("Failed to parse roi height\n");
+ goto error;
+ }
+
+ rc = of_property_read_u32_array(of_node, "qcom,blend-stage-range",
+ range, 2);
+ if (rc)
+ pr_err("Failed to parse blend stage range\n");
+
+ display->src.w = src_w;
+ display->src.h = src_h;
+ display->roi.x = dst_x;
+ display->roi.y = dst_y;
+ display->roi.w = dst_w;
+ display->roi.h = dst_h;
+ display->stage_range.start = range[0];
+ display->stage_range.size = range[1];
+
+ SHD_DEBUG("%s src %dx%d dst %d,%d %dx%d range %d-%d\n", display->name,
+ display->src.w, display->src.h,
+ display->roi.x, display->roi.y,
+ display->roi.w, display->roi.h,
+ display->stage_range.start,
+ display->stage_range.size);
+
+error:
+ return rc;
+}
+
+static int sde_shd_parse_base(struct shd_display_base *base)
+{
+ struct device_node *of_node = base->of_node;
+ struct device_node *node;
+ struct drm_display_mode *mode = &base->mode;
+ u32 h_front_porch, h_pulse_width, h_back_porch;
+ u32 v_front_porch, v_pulse_width, v_back_porch;
+ bool h_active_high, v_active_high;
+ u32 flags = 0;
+ int rc;
+
+ rc = of_property_read_u32(of_node, "qcom,shared-display-base-intf",
+ &base->intf_idx);
+ if (rc) {
+ pr_err("failed to read base intf, rc=%d\n", rc);
+ goto fail;
+ }
+
+ node = of_get_child_by_name(of_node, "qcom,shared-display-base-mode");
+ if (!node) {
+ pr_err("No base mode present\n");
+ rc = -ENODEV;
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-active",
+ &mode->hdisplay);
+ if (rc) {
+ SDE_ERROR("failed to read h-active, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-front-porch",
+ &h_front_porch);
+ if (rc) {
+ SDE_ERROR("failed to read h-front-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-pulse-width",
+ &h_pulse_width);
+ if (rc) {
+ SDE_ERROR("failed to read h-pulse-width, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-h-back-porch",
+ &h_back_porch);
+ if (rc) {
+ SDE_ERROR("failed to read h-back-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ h_active_high = of_property_read_bool(node,
+ "qcom,mode-h-active-high");
+
+ rc = of_property_read_u32(node, "qcom,mode-v-active",
+ &mode->vdisplay);
+ if (rc) {
+ SDE_ERROR("failed to read v-active, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-front-porch",
+ &v_front_porch);
+ if (rc) {
+ SDE_ERROR("failed to read v-front-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-pulse-width",
+ &v_pulse_width);
+ if (rc) {
+ SDE_ERROR("failed to read v-pulse-width, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-v-back-porch",
+ &v_back_porch);
+ if (rc) {
+ SDE_ERROR("failed to read v-back-porch, rc=%d\n", rc);
+ goto fail;
+ }
+
+ v_active_high = of_property_read_bool(node,
+ "qcom,mode-v-active-high");
+
+ rc = of_property_read_u32(node, "qcom,mode-refresh-rate",
+ &mode->vrefresh);
+ if (rc) {
+ SDE_ERROR("failed to read refresh-rate, rc=%d\n", rc);
+ goto fail;
+ }
+
+ rc = of_property_read_u32(node, "qcom,mode-clock-in-khz",
+ &mode->clock);
+ if (rc) {
+ SDE_ERROR("failed to read clock, rc=%d\n", rc);
+ goto fail;
+ }
+
+ mode->hsync_start = mode->hdisplay + h_front_porch;
+ mode->hsync_end = mode->hsync_start + h_pulse_width;
+ mode->htotal = mode->hsync_end + h_back_porch;
+ mode->vsync_start = mode->vdisplay + v_front_porch;
+ mode->vsync_end = mode->vsync_start + v_pulse_width;
+ mode->vtotal = mode->vsync_end + v_back_porch;
+ if (h_active_high)
+ flags |= DRM_MODE_FLAG_PHSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NHSYNC;
+ if (v_active_high)
+ flags |= DRM_MODE_FLAG_PVSYNC;
+ else
+ flags |= DRM_MODE_FLAG_NVSYNC;
+ mode->flags = flags;
+
+ SHD_DEBUG("base mode h[%d,%d,%d,%d] v[%d,%d,%d,%d] %d %xH %d\n",
+ mode->hdisplay, mode->hsync_start,
+ mode->hsync_end, mode->htotal, mode->vdisplay,
+ mode->vsync_start, mode->vsync_end, mode->vtotal,
+ mode->vrefresh, mode->flags, mode->clock);
+
+fail:
+ return rc;
+}
+
+/**
+ * sde_shd_probe - load shared display module
+ * @pdev: Pointer to platform device
+ */
+static int sde_shd_probe(struct platform_device *pdev)
+{
+ struct shd_display *shd_dev;
+ struct shd_display_base *base;
+ int ret;
+
+ shd_dev = devm_kzalloc(&pdev->dev, sizeof(*shd_dev), GFP_KERNEL);
+ if (!shd_dev)
+ return -ENOMEM;
+
+ shd_dev->pdev = pdev;
+
+ ret = sde_shd_parse_display(shd_dev);
+ if (ret) {
+ SDE_ERROR("failed to parse shared display\n");
+ goto error;
+ }
+
+ platform_set_drvdata(pdev, shd_dev);
+
+ list_for_each_entry(base, &g_base_list, head) {
+ if (base->of_node == shd_dev->base_of)
+ goto next;
+ }
+
+ base = devm_kzalloc(&pdev->dev, sizeof(*base), GFP_KERNEL);
+ if (!base) {
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ mutex_init(&base->base_mutex);
+ INIT_LIST_HEAD(&base->disp_list);
+ base->of_node = shd_dev->base_of;
+
+ ret = sde_shd_parse_base(base);
+ if (ret) {
+ SDE_ERROR("failed to parse shared display base\n");
+ goto base_error;
+ }
+
+ list_add_tail(&base->head, &g_base_list);
+
+next:
+ shd_dev->base = base;
+ list_add_tail(&shd_dev->head, &base->disp_list);
+ SHD_DEBUG("add shd to intf %d\n", base->intf_idx);
+
+ ret = component_add(&pdev->dev, &sde_shd_comp_ops);
+ if (ret) {
+ goto base_error;
+ pr_err("component add failed\n");
+ }
+
+ return 0;
+
+base_error:
+ devm_kfree(&pdev->dev, base);
+error:
+ devm_kfree(&pdev->dev, shd_dev);
+ return ret;
+}
+
+/**
+ * sde_shd_remove - unload shared display module
+ * @pdev: Pointer to platform device
+ */
+static int sde_shd_remove(struct platform_device *pdev)
+{
+ struct shd_display *shd_dev;
+
+ shd_dev = platform_get_drvdata(pdev);
+ if (!shd_dev)
+ return 0;
+
+ SHD_DEBUG("\n");
+
+ mutex_lock(&shd_dev->base->base_mutex);
+ list_del_init(&shd_dev->head);
+ if (list_empty(&shd_dev->base->disp_list)) {
+ list_del_init(&shd_dev->base->head);
+ mutex_unlock(&shd_dev->base->base_mutex);
+ devm_kfree(&pdev->dev, shd_dev->base);
+ } else
+ mutex_unlock(&shd_dev->base->base_mutex);
+
+ platform_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, shd_dev);
+
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,shared-display"},
+ {}
+};
+
+static struct platform_driver sde_shd_driver = {
+ .probe = sde_shd_probe,
+ .remove = sde_shd_remove,
+ .driver = {
+ .name = "sde_shd",
+ .of_match_table = dt_match,
+ .suppress_bind_attrs = true,
+ },
+};
+
+static int __init sde_shd_register(void)
+{
+ return platform_driver_register(&sde_shd_driver);
+}
+
+static void __exit sde_shd_unregister(void)
+{
+ platform_driver_unregister(&sde_shd_driver);
+}
+
+module_init(sde_shd_register);
+module_exit(sde_shd_unregister);
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/msm/sde/sde_shd.h b/drivers/gpu/drm/msm/sde/sde_shd.h
new file mode 100644
index 000000000000..a3334ccd51d2
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_shd.h
@@ -0,0 +1,231 @@
+/*
+ * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_SHD_H_
+#define _SDE_SHD_H_
+
+#include <linux/types.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "msm_drv.h"
+
+struct shd_mode_info {
+ int x_offset;
+ int y_offset;
+ int width;
+ int height;
+};
+
+struct shd_stage_range {
+ u32 start;
+ u32 size;
+};
+
+struct shd_display_base {
+ struct mutex base_mutex;
+ struct drm_display_mode mode;
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct list_head head;
+ struct list_head disp_list;
+ struct device_node *of_node;
+ struct sde_connector_ops ops;
+
+ int intf_idx;
+ int connector_type;
+ bool enabled;
+};
+
+struct shd_display {
+ struct drm_device *drm_dev;
+ const char *name;
+ const char *display_type;
+
+ struct shd_display_base *base;
+ struct drm_bridge *bridge;
+ struct drm_connector *connector;
+
+ struct device_node *base_of;
+ struct sde_rect src;
+ struct sde_rect roi;
+ struct shd_stage_range stage_range;
+
+ struct platform_device *pdev;
+ struct completion vsync_comp;
+ struct list_head head;
+
+ bool enabled;
+ bool cont_splash_enabled;
+};
+
+#ifdef CONFIG_DRM_SDE_SHD
+int shd_display_get_num_of_displays(void);
+int shd_display_get_displays(void **displays, int count);
+int shd_display_post_init(struct sde_kms *sde_kms);
+void shd_display_prepare_commit(struct sde_kms *sde_kms,
+ struct drm_atomic_state *state);
+void shd_display_complete_commit(struct sde_kms *sde_kms,
+ struct drm_atomic_state *state);
+
+/**
+ * shd_connector_post_init - callback to perform additional initialization steps
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Zero on success
+ */
+int shd_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display);
+
+/**
+ * shd_connector_detect - callback to determine if connector is connected
+ * @connector: Pointer to drm connector structure
+ * @force: Force detect setting from drm framework
+ * @display: Pointer to private display handle
+ * Returns: Connector 'is connected' status
+ */
+enum drm_connector_status shd_connector_detect(struct drm_connector *conn,
+ bool force,
+ void *display);
+
+/**
+ * shd_connector_get_modes - callback to add drm modes via drm_mode_probed_add()
+ * @connector: Pointer to drm connector structure
+ * @display: Pointer to private display handle
+ * Returns: Number of modes added
+ */
+int shd_connector_get_modes(struct drm_connector *connector,
+ void *display);
+
+/**
+ * shd_connector_mode_valid - callback to determine if specified mode is valid
+ * @connector: Pointer to drm connector structure
+ * @mode: Pointer to drm mode structure
+ * @display: Pointer to private display handle
+ * Returns: Validity status for specified mode
+ */
+enum drm_mode_status shd_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display);
+
+/**
+ * shd_connector_get_info - retrieve connector display info
+ * @connector: Pointer to drm connector structure
+ * @info: Out parameter. Information of the connected display
+ * @display: Pointer to private display structure
+ * Returns: zero on success
+ */
+int shd_connector_get_info(struct msm_display_info *info, void *display);
+
+/**
+ * shd_display_drm_bridge_init() - initializes DRM bridge object
+ * for shared display
+ * @display: Handle to the display.
+ * @encoder: Pointer to the encoder object which is connected to the
+ * display.
+ * Return: error code.
+ */
+int shd_drm_bridge_init(void *display,
+ struct drm_encoder *encoder);
+
+/**
+ * shd_display_drm_bridge_deinit() - destroys DRM bridge for the display
+ * @display: Handle to the display.
+ * Return: error code.
+ */
+void shd_drm_bridge_deinit(void *display);
+#else
+static inline
+int shd_display_get_num_of_displays(void)
+{
+ return 0;
+}
+
+static inline
+int shd_display_get_displays(void **displays, int count)
+{
+ return 0;
+}
+
+static inline
+int shd_display_post_init(struct sde_kms *sde_kms)
+{
+ return 0;
+}
+
+static inline
+void shd_display_prepare_commit(struct sde_kms *sde_kms,
+ struct drm_atomic_state *state)
+{
+}
+
+static inline
+void shd_display_complete_commit(struct sde_kms *sde_kms,
+ struct drm_atomic_state *state)
+{
+}
+
+static inline
+int shd_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ return 0;
+}
+
+static inline
+enum drm_connector_status shd_connector_detect(struct drm_connector *conn,
+ bool force,
+ void *display)
+{
+ return connector_status_unknown;
+}
+
+static inline
+int shd_connector_get_modes(struct drm_connector *connector,
+ void *display)
+{
+ return 0;
+}
+
+static inline
+enum drm_mode_status shd_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode,
+ void *display)
+{
+ return MODE_ERROR;
+}
+
+static inline
+int shd_connector_get_info(struct msm_display_info *info, void *display)
+{
+ return -EINVAL;
+}
+
+static inline
+int shd_drm_bridge_init(void *display,
+ struct drm_encoder *encoder)
+{
+ return 0;
+}
+
+static inline
+void shd_drm_bridge_deinit(void *display)
+{
+}
+#endif
+
+#endif /* _SDE_SHD_H_ */
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.c b/drivers/gpu/drm/msm/sde/sde_splash.c
new file mode 100644
index 000000000000..765e3634a936
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_splash.c
@@ -0,0 +1,1204 @@
+/*
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/of_address.h>
+#include <linux/debugfs.h>
+#include <linux/memblock.h>
+#include <soc/qcom/early_domain.h>
+#include <linux/suspend.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "sde_kms.h"
+#include "sde_hw_mdss.h"
+#include "sde_hw_util.h"
+#include "sde_hw_intf.h"
+#include "sde_hw_catalog.h"
+#include "sde_rm.h"
+#include "dsi_display.h"
+#include "sde_hdmi.h"
+#include "sde_crtc.h"
+#include "sde_plane.h"
+#include "sde_shd.h"
+
+#define MDP_SSPP_TOP0_OFF 0x1000
+#define DISP_INTF_SEL 0x004
+#define SPLIT_DISPLAY_EN 0x2F4
+
+/* scratch registers */
+#define SCRATCH_REGISTER_0 0x014
+#define SCRATCH_REGISTER_1 0x018
+#define SCRATCH_REGISTER_2 0x01C
+
+#define SDE_LK_RUNNING_VALUE 0xC001CAFE
+#define SDE_LK_STOP_SPLASH_VALUE 0xDEADDEAD
+#define SDE_LK_EXIT_VALUE 0xDEADBEEF
+#define SDE_LK_INTERMEDIATE_STOP 0xBEEFBEEF
+#define SDE_LK_KERNEL_SPLASH_TALK_LOOP 20
+
+#define INTF_HDMI_SEL (BIT(25) | BIT(24))
+#define INTF_DSI0_SEL BIT(8)
+#define INTF_DSI1_SEL BIT(16)
+
+static DEFINE_MUTEX(sde_splash_lock);
+
+static struct splash_pipe_caps splash_pipe_cap[MAX_BLOCKS] = {
+ {SSPP_VIG0, BIT(0), 0x7 << 0, BIT(0)},
+ {SSPP_VIG1, BIT(1), 0x7 << 3, BIT(2)},
+ {SSPP_VIG2, BIT(2), 0x7 << 6, BIT(4)},
+ {SSPP_VIG3, BIT(18), 0x7 << 26, BIT(6)},
+ {SSPP_RGB0, BIT(3), 0x7 << 9, BIT(8)},
+ {SSPP_RGB1, BIT(4), 0x7 << 12, BIT(10)},
+ {SSPP_RGB2, BIT(5), 0x7 << 15, BIT(12)},
+ {SSPP_RGB3, BIT(19), 0x7 << 29, BIT(14)},
+ {SSPP_DMA0, BIT(11), 0x7 << 18, BIT(16)},
+ {SSPP_DMA1, BIT(12), 0x7 << 21, BIT(18)},
+ {SSPP_CURSOR0, 0, 0, 0},
+ {SSPP_CURSOR1, 0, 0, 0},
+};
+
+static inline uint32_t _sde_splash_get_pipe_arrary_index(enum sde_sspp pipe)
+{
+ uint32_t i = 0, index = MAX_BLOCKS;
+
+ for (i = 0; i < MAX_BLOCKS; i++) {
+ if (pipe == splash_pipe_cap[i].pipe) {
+ index = i;
+ break;
+ }
+ }
+
+ return index;
+}
+
+/*
+ * In order to free reseved memory from bootup, and we are not
+ * able to call the __init free functions, so we need to free
+ * this memory by ourselves using the free_reserved_page() function.
+ */
+static void _sde_splash_free_bootup_memory_to_system(phys_addr_t phys,
+ size_t size)
+{
+ unsigned long pfn_start, pfn_end, pfn_idx;
+
+ memblock_free(phys, size);
+
+ pfn_start = phys >> PAGE_SHIFT;
+ pfn_end = (phys + size) >> PAGE_SHIFT;
+
+ for (pfn_idx = pfn_start; pfn_idx < pfn_end; pfn_idx++)
+ free_reserved_page(pfn_to_page(pfn_idx));
+}
+
+static int _sde_splash_parse_dt_get_display_node(struct drm_device *dev,
+ struct sde_splash_info *sinfo)
+{
+ unsigned long size = 0;
+ dma_addr_t start;
+ struct device_node *node;
+ int ret = 0, i = 0, len = 0;
+
+ /* get reserved memory for display module */
+ if (of_get_property(dev->dev->of_node, "contiguous-region", &len))
+ sinfo->splash_mem_num = len / sizeof(u32);
+ else
+ sinfo->splash_mem_num = 0;
+
+ sinfo->splash_mem_paddr =
+ kmalloc(sizeof(phys_addr_t) * sinfo->splash_mem_num,
+ GFP_KERNEL);
+ if (!sinfo->splash_mem_paddr) {
+ SDE_ERROR("alloc splash_mem_paddr failed\n");
+ return -ENOMEM;
+ }
+
+ sinfo->splash_mem_size =
+ kmalloc(sizeof(size_t) * sinfo->splash_mem_num,
+ GFP_KERNEL);
+ if (!sinfo->splash_mem_size) {
+ SDE_ERROR("alloc splash_mem_size failed\n");
+ goto error;
+ }
+
+ sinfo->obj = kmalloc(sizeof(struct drm_gem_object *) *
+ sinfo->splash_mem_num, GFP_KERNEL);
+ if (!sinfo->obj) {
+ SDE_ERROR("construct splash gem objects failed\n");
+ goto error;
+ }
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ node = of_parse_phandle(dev->dev->of_node,
+ "contiguous-region", i);
+
+ if (node) {
+ struct resource r;
+
+ ret = of_address_to_resource(node, 0, &r);
+ if (ret)
+ return ret;
+
+ size = r.end - r.start;
+ start = (dma_addr_t)r.start;
+
+ sinfo->splash_mem_paddr[i] = start;
+ sinfo->splash_mem_size[i] = size;
+
+ DRM_INFO("blk: %d, addr:%pK, size:%pK\n",
+ i, (void *)sinfo->splash_mem_paddr[i],
+ (void *)sinfo->splash_mem_size[i]);
+
+ of_node_put(node);
+ }
+ }
+
+ return ret;
+
+error:
+ kfree(sinfo->splash_mem_paddr);
+ sinfo->splash_mem_paddr = NULL;
+
+ kfree(sinfo->splash_mem_size);
+ sinfo->splash_mem_size = NULL;
+
+ return -ENOMEM;
+}
+
+static bool _sde_splash_lk_check(void)
+{
+ return get_early_service_status(EARLY_DISPLAY);
+}
+
+/**
+ * _sde_splash_notify_lk_stop_splash.
+ *
+ * Function to stop early splash in LK.
+ */
+static inline void _sde_splash_notify_lk_stop_splash(void)
+{
+ int i = 0;
+ int32_t *scratch_pad = NULL;
+
+ /* request Lk to stop splash */
+ request_early_service_shutdown(EARLY_DISPLAY);
+
+ /*
+ * Before next proceeding, kernel needs to check bootloader's
+ * intermediate status to ensure LK's concurrent flush is done.
+ */
+ while (i++ < SDE_LK_KERNEL_SPLASH_TALK_LOOP) {
+
+ scratch_pad =
+ (int32_t *)get_service_shared_mem_start(EARLY_DISPLAY);
+
+ if (scratch_pad) {
+ if ((*scratch_pad != SDE_LK_INTERMEDIATE_STOP) &&
+ (_sde_splash_lk_check())) {
+ DRM_INFO("wait for LK's intermediate ack\n");
+ msleep(20);
+ } else {
+ SDE_DEBUG("received LK intermediate ack\n");
+ break;
+ }
+ }
+ }
+
+ if (i == SDE_LK_KERNEL_SPLASH_TALK_LOOP)
+ SDE_ERROR("Loop talk for LK and Kernel failed\n");
+}
+
+static int _sde_splash_gem_new(struct drm_device *dev,
+ struct sde_splash_info *sinfo)
+{
+ int i, ret;
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ sinfo->obj[i] = msm_gem_new(dev,
+ sinfo->splash_mem_size[i], MSM_BO_UNCACHED);
+
+ if (IS_ERR(sinfo->obj[i])) {
+ ret = PTR_ERR(sinfo->obj[i]);
+ SDE_ERROR("failed to allocate gem, ret=%d\n", ret);
+ goto error;
+ }
+ }
+
+ return 0;
+
+error:
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ if (sinfo->obj[i])
+ msm_gem_free_object(sinfo->obj[i]);
+ sinfo->obj[i] = NULL;
+ }
+
+ return ret;
+}
+
+static int _sde_splash_get_pages(struct drm_gem_object *obj, phys_addr_t phys)
+{
+ struct msm_gem_object *msm_obj = to_msm_bo(obj);
+ struct page **p;
+ dma_addr_t paddr;
+ int npages = obj->size >> PAGE_SHIFT;
+ int i;
+
+ p = drm_malloc_ab(npages, sizeof(struct page *));
+ if (!p)
+ return -ENOMEM;
+
+ paddr = phys;
+
+ for (i = 0; i < npages; i++) {
+ p[i] = phys_to_page(paddr);
+ paddr += PAGE_SIZE;
+ }
+
+ msm_obj->sgt = drm_prime_pages_to_sg(p, npages);
+ if (IS_ERR(msm_obj->sgt)) {
+ SDE_ERROR("failed to allocate sgt\n");
+ return -ENOMEM;
+ }
+
+ msm_obj->pages = p;
+
+ return 0;
+}
+
+static void _sde_splash_destroy_gem_object(struct msm_gem_object *msm_obj)
+{
+ if (msm_obj->pages) {
+ sg_free_table(msm_obj->sgt);
+ kfree(msm_obj->sgt);
+ drm_free_large(msm_obj->pages);
+ msm_obj->pages = NULL;
+ }
+}
+
+static void _sde_splash_destroy_splash_node(struct sde_splash_info *sinfo)
+{
+ kfree(sinfo->splash_mem_paddr);
+ sinfo->splash_mem_paddr = NULL;
+
+ kfree(sinfo->splash_mem_size);
+ sinfo->splash_mem_size = NULL;
+}
+
+static void _sde_splash_update_display_splash_status(struct sde_kms *sde_kms)
+{
+ struct dsi_display *dsi_display;
+ struct sde_hdmi *sde_hdmi;
+ int i = 0;
+
+ for (i = 0; i < sde_kms->dsi_display_count; i++) {
+ dsi_display = (struct dsi_display *)sde_kms->dsi_displays[i];
+
+ dsi_display->cont_splash_enabled = false;
+ }
+
+ for (i = 0; i < sde_kms->hdmi_display_count; i++) {
+ sde_hdmi = (struct sde_hdmi *)sde_kms->hdmi_displays[i];
+
+ sde_hdmi->cont_splash_enabled = false;
+ }
+}
+
+static void _sde_splash_sent_pipe_update_uevent(struct sde_kms *sde_kms)
+{
+ char *event_string;
+ char *envp[2];
+ struct drm_device *dev;
+ struct device *kdev;
+ int i = 0;
+
+ if (!sde_kms || !sde_kms->dev) {
+ DRM_ERROR("invalid input\n");
+ return;
+ }
+
+ dev = sde_kms->dev;
+ kdev = dev->primary->kdev;
+
+ event_string = kzalloc(SZ_4K, GFP_KERNEL);
+ if (!event_string) {
+ SDE_ERROR("failed to allocate event string\n");
+ return;
+ }
+
+ for (i = 0; i < MAX_BLOCKS; i++) {
+ if (sde_kms->splash_info.reserved_pipe_info[i].pipe_id !=
+ 0xFFFFFFFF)
+ snprintf(event_string, SZ_4K, "pipe%d avialable",
+ sde_kms->splash_info.reserved_pipe_info[i].pipe_id);
+ }
+
+ DRM_INFO("generating pipe update event[%s]", event_string);
+
+ envp[0] = event_string;
+ envp[1] = NULL;
+
+ kobject_uevent_env(&kdev->kobj, KOBJ_CHANGE, envp);
+
+ kfree(event_string);
+}
+
+static void _sde_splash_get_connector_ref_cnt(struct sde_splash_info *sinfo,
+ u32 *hdmi_cnt, u32 *dsi_cnt)
+{
+ mutex_lock(&sde_splash_lock);
+ *hdmi_cnt = sinfo->hdmi_connector_cnt;
+ *dsi_cnt = sinfo->dsi_connector_cnt;
+ mutex_unlock(&sde_splash_lock);
+}
+
+static int _sde_splash_free_module_resource(struct msm_mmu *mmu,
+ struct sde_splash_info *sinfo)
+{
+ int i = 0;
+ struct msm_gem_object *msm_obj;
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ msm_obj = to_msm_bo(sinfo->obj[i]);
+
+ if (!msm_obj)
+ return -EINVAL;
+
+ if (mmu->funcs && mmu->funcs->early_splash_unmap)
+ mmu->funcs->early_splash_unmap(mmu,
+ sinfo->splash_mem_paddr[i], msm_obj->sgt);
+
+ _sde_splash_free_bootup_memory_to_system(
+ sinfo->splash_mem_paddr[i],
+ sinfo->splash_mem_size[i]);
+
+ _sde_splash_destroy_gem_object(msm_obj);
+ }
+
+ return 0;
+}
+
+static bool _sde_splash_validate_commit(struct sde_kms *sde_kms,
+ struct drm_atomic_state *state)
+{
+ int i, nplanes;
+ struct drm_plane *plane;
+ struct drm_device *dev = sde_kms->dev;
+
+ nplanes = dev->mode_config.num_total_plane;
+
+ for (i = 0; i < nplanes; i++) {
+ plane = state->planes[i];
+
+ /*
+ * As plane state has been swapped, we need to check
+ * fb in state->planes, not fb in state->plane_state.
+ */
+ if (plane && plane->fb)
+ return true;
+ }
+
+ return false;
+}
+
+static void _sde_splash_update_property(struct sde_kms *sde_kms)
+{
+ struct drm_device *dev = sde_kms->dev;
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct sde_mdss_cfg *catalog = sde_kms->catalog;
+
+ /*
+ * Update plane availability property
+ * after splash handoff is done.
+ */
+ drm_for_each_plane(plane, dev) {
+ sde_plane_update_blob_property(plane,
+ "plane_unavailability=", 0);
+ }
+
+ /* update crtc blend stage property */
+ drm_for_each_crtc(crtc, dev)
+ sde_crtc_update_blob_property(crtc, "max_blendstages=",
+ catalog->max_mixer_blendstages);
+}
+
+static void
+_sde_splash_release_early_splash_layer(struct sde_splash_info *splash_info)
+{
+ int i = 0;
+ uint32_t index;
+
+ for (i = 0; i < MAX_BLOCKS; i++) {
+ if (splash_info->reserved_pipe_info[i].early_release) {
+ index = _sde_splash_get_pipe_arrary_index(
+ splash_info->reserved_pipe_info[i].pipe_id);
+ if (index < MAX_BLOCKS) {
+ /*
+ * Clear flush bits, mixer mask and extension
+ * mask of released pipes.
+ */
+ splash_info->flush_bits &=
+ ~splash_pipe_cap[index].flush_bit;
+ splash_info->mixer_mask &=
+ ~splash_pipe_cap[index].mixer_mask;
+ splash_info->mixer_ext_mask &=
+ ~splash_pipe_cap[index].mixer_ext_mask;
+ }
+
+ splash_info->reserved_pipe_info[i].pipe_id =
+ 0xFFFFFFFF;
+ splash_info->reserved_pipe_info[i].early_release =
+ false;
+ }
+ }
+}
+
+static bool _sde_splash_check_splash(int connector_type,
+ void *display,
+ bool connector_is_shared)
+{
+ struct dsi_display *dsi_display;
+ struct sde_hdmi *sde_hdmi;
+ struct shd_display *shd_display;
+ bool splash_on = false;
+
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (connector_is_shared) {
+ shd_display = (struct shd_display *)display;
+ splash_on = shd_display->cont_splash_enabled;
+ } else {
+ sde_hdmi = (struct sde_hdmi *)display;
+ splash_on = sde_hdmi->cont_splash_enabled;
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DSI:
+ if (connector_is_shared) {
+ shd_display = (struct shd_display *)display;
+ splash_on = shd_display->cont_splash_enabled;
+ } else {
+ dsi_display = (struct dsi_display *)display;
+ splash_on = dsi_display->cont_splash_enabled;
+ }
+ break;
+ default:
+ SDE_ERROR("%s:invalid connector_type %d\n",
+ __func__, connector_type);
+ }
+
+ return splash_on;
+}
+
+__ref int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms;
+ struct sde_splash_info *sinfo;
+ int ret = 0;
+ int i = 0;
+
+ if (!phandle || !kms) {
+ SDE_ERROR("invalid phandle/kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ sinfo = &sde_kms->splash_info;
+
+ sinfo->dsi_connector_cnt = 0;
+ sinfo->hdmi_connector_cnt = 0;
+
+ /* Vote data bus after splash is enabled in bootloader */
+ sde_power_data_bus_bandwidth_ctrl(phandle,
+ sde_kms->core_client, true);
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ if (!memblock_is_reserved(sinfo->splash_mem_paddr[i])) {
+ SDE_ERROR("LK's splash memory is not reserved\n");
+
+ /* withdraw the vote when failed. */
+ sde_power_data_bus_bandwidth_ctrl(phandle,
+ sde_kms->core_client, false);
+
+ return -EINVAL;
+ }
+ }
+
+ ret = sde_rm_read_resource_for_splash(&sde_kms->rm,
+ (void *)sinfo, sde_kms->catalog);
+
+ return ret;
+}
+
+void sde_splash_destroy(struct sde_splash_info *sinfo,
+ struct sde_power_handle *phandle,
+ struct sde_power_client *pclient)
+{
+ struct msm_gem_object *msm_obj;
+ int i = 0;
+
+ if (!sinfo || !phandle || !pclient) {
+ SDE_ERROR("invalid sde_kms/phandle/pclient\n");
+ return;
+ }
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ msm_obj = to_msm_bo(sinfo->obj[i]);
+
+ if (msm_obj)
+ _sde_splash_destroy_gem_object(msm_obj);
+ }
+
+ sde_power_data_bus_bandwidth_ctrl(phandle, pclient, false);
+
+ _sde_splash_destroy_splash_node(sinfo);
+}
+
+/*
+ * sde_splash_parse_memory_dt.
+ * In the function, it will parse and reserve two kinds of memory node.
+ * First is to get the reserved memory for display buffers.
+ * Second is to get the memory node which LK's heap memory is running on.
+ */
+int sde_splash_parse_memory_dt(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct sde_kms *sde_kms;
+ struct sde_splash_info *sinfo;
+
+ if (!priv || !priv->kms) {
+ SDE_ERROR("Invalid kms\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+ sinfo = &sde_kms->splash_info;
+
+ if (_sde_splash_parse_dt_get_display_node(dev, sinfo)) {
+ SDE_ERROR("get display node failed\n");
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline u32 _sde_splash_parse_sspp_id(struct sde_mdss_cfg *cfg,
+ const char *name)
+{
+ int i;
+
+ for (i = 0; i < cfg->sspp_count; i++) {
+ if (!strcmp(cfg->sspp[i].name, name))
+ return cfg->sspp[i].id;
+ }
+
+ return 0;
+}
+
+int sde_splash_parse_reserved_plane_dt(struct drm_device *dev,
+ struct sde_splash_info *splash_info,
+ struct sde_mdss_cfg *cfg)
+{
+ struct device_node *parent, *node;
+ struct property *prop;
+ const char *cname;
+ int ret = 0, i = 0;
+ uint32_t index;
+
+ if (!splash_info || !cfg)
+ return -EINVAL;
+
+ parent = of_get_child_by_name(dev->dev->of_node,
+ "qcom,sde-reserved-plane");
+ if (!parent)
+ return -EINVAL;
+
+ for (i = 0; i < MAX_BLOCKS; i++) {
+ splash_info->reserved_pipe_info[i].pipe_id = 0xFFFFFFFF;
+ splash_info->reserved_pipe_info[i].early_release = false;
+ }
+
+ /* Reset flush bits and mixer mask of reserved planes */
+ splash_info->flush_bits = 0;
+ splash_info->mixer_mask = 0;
+ splash_info->mixer_ext_mask = 0;
+
+ i = 0;
+ for_each_child_of_node(parent, node) {
+ if (i >= MAX_BLOCKS) {
+ SDE_ERROR("num of nodes(%d) is bigger than max(%d)\n",
+ i, MAX_BLOCKS);
+ ret = -EINVAL;
+ goto parent_node_err;
+ }
+
+ of_property_for_each_string(node, "qcom,plane-name",
+ prop, cname)
+ splash_info->reserved_pipe_info[i].pipe_id =
+ _sde_splash_parse_sspp_id(cfg, cname);
+
+ splash_info->reserved_pipe_info[i].early_release =
+ of_property_read_bool(node, "qcom,pipe-early-release");
+
+ index = _sde_splash_get_pipe_arrary_index(
+ splash_info->reserved_pipe_info[i].pipe_id);
+
+ if (index < MAX_BLOCKS) {
+ splash_info->flush_bits |=
+ splash_pipe_cap[index].flush_bit;
+ splash_info->mixer_mask |=
+ splash_pipe_cap[index].mixer_mask;
+ splash_info->mixer_ext_mask |=
+ splash_pipe_cap[index].mixer_ext_mask;
+ }
+
+ i++;
+ }
+
+parent_node_err:
+ of_node_put(parent);
+
+ return ret;
+}
+
+bool sde_splash_query_plane_is_reserved(struct sde_splash_info *sinfo,
+ uint32_t pipe)
+{
+ int i = 0;
+
+ if (!sinfo)
+ return false;
+
+ /* early return if no splash is enabled */
+ if (!sinfo->handoff)
+ return false;
+
+ for (i = 0; i < MAX_BLOCKS; i++) {
+ if (!sinfo->reserved_pipe_info[i].early_release &&
+ (sinfo->reserved_pipe_info[i].pipe_id == pipe))
+ return true;
+ }
+
+ return false;
+}
+
+int sde_splash_get_handoff_status(struct msm_kms *kms)
+{
+ uint32_t intf_sel = 0;
+ uint32_t split_display = 0;
+ uint32_t num_of_display_on = 0;
+ uint32_t i = 0;
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct sde_rm *rm;
+ struct sde_hw_blk_reg_map *c;
+ struct sde_splash_info *sinfo;
+ struct sde_mdss_cfg *catalog;
+
+ sinfo = &sde_kms->splash_info;
+ if (!sinfo) {
+ SDE_ERROR("%s(%d): invalid splash info\n",
+ __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ rm = &sde_kms->rm;
+
+ if (!rm || !rm->hw_mdp) {
+ SDE_ERROR("invalid rm.\n");
+ return -EINVAL;
+ }
+
+ c = &rm->hw_mdp->hw;
+ if (c) {
+ intf_sel = SDE_REG_READ(c, DISP_INTF_SEL);
+ split_display = SDE_REG_READ(c, SPLIT_DISPLAY_EN);
+ }
+
+ catalog = sde_kms->catalog;
+
+ if (intf_sel != 0) {
+ for (i = 0; i < catalog->intf_count; i++)
+ if ((intf_sel >> i*8) & 0x000000FF)
+ num_of_display_on++;
+
+ /*
+ * For split display enabled - DSI0, DSI1 interfaces are
+ * considered as single display. So decrement
+ * 'num_of_display_on' by 1
+ */
+ if (split_display) {
+ num_of_display_on--;
+ sinfo->split_is_enabled = true;
+ }
+ }
+
+ if (num_of_display_on) {
+ sinfo->handoff = true;
+ sinfo->display_splash_enabled = true;
+ sinfo->lk_is_exited = false;
+ sinfo->intf_sel_status = intf_sel;
+ } else {
+ sinfo->handoff = false;
+ sinfo->display_splash_enabled = false;
+ sinfo->lk_is_exited = true;
+ }
+
+ return 0;
+}
+
+int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
+ struct sde_splash_info *sinfo)
+{
+ struct msm_gem_object *msm_obj;
+ int i = 0, ret = 0;
+
+ if (!mmu || !sinfo)
+ return -EINVAL;
+
+ /* first is to construct drm_gem_objects for splash memory */
+ if (_sde_splash_gem_new(dev, sinfo))
+ return -ENOMEM;
+
+ /* second is to contruct sgt table for calling smmu map */
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ if (_sde_splash_get_pages(sinfo->obj[i],
+ sinfo->splash_mem_paddr[i]))
+ return -ENOMEM;
+ }
+
+ for (i = 0; i < sinfo->splash_mem_num; i++) {
+ msm_obj = to_msm_bo(sinfo->obj[i]);
+
+ if (mmu->funcs && mmu->funcs->early_splash_map) {
+ ret = mmu->funcs->early_splash_map(mmu,
+ sinfo->splash_mem_paddr[i], msm_obj->sgt,
+ IOMMU_READ | IOMMU_NOEXEC);
+
+ if (!ret) {
+ SDE_ERROR("Map blk %d @%pK failed.\n",
+ i, (void *)sinfo->splash_mem_paddr[i]);
+ return ret;
+ }
+ }
+ }
+
+ return ret ? 0 : -ENOMEM;
+}
+
+static bool _sde_splash_get_panel_intf_status(struct sde_splash_info *sinfo,
+ const char *display_name, int connector_type)
+{
+ bool ret = false;
+ int intf_status = 0;
+
+ if (sinfo && sinfo->handoff) {
+ if (connector_type == DRM_MODE_CONNECTOR_DSI) {
+ if (!strcmp(display_name, "dsi_adv_7533_1")) {
+ if (sinfo->intf_sel_status & INTF_DSI0_SEL)
+ ret = true;
+ } else if (!strcmp(display_name, "dsi_adv_7533_2")) {
+ if (sinfo->intf_sel_status & INTF_DSI1_SEL)
+ ret = true;
+ } else
+ DRM_INFO("wrong display name %s\n",
+ display_name);
+ } else if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ intf_status = sinfo->intf_sel_status & INTF_HDMI_SEL;
+ ret = (intf_status == INTF_HDMI_SEL);
+ }
+ }
+
+ return ret;
+}
+
+int sde_splash_setup_display_resource(struct sde_splash_info *sinfo,
+ void *disp, int connector_type,
+ bool display_is_shared)
+{
+ struct dsi_display *dsi_display;
+ struct sde_hdmi *sde_hdmi;
+ struct shd_display *shd_display;
+ bool splash_is_on;
+
+ if (!sinfo || !disp)
+ return -EINVAL;
+
+ /* early return if splash is not enabled in bootloader */
+ if (!sinfo->handoff)
+ return 0;
+
+ if (connector_type == DRM_MODE_CONNECTOR_DSI) {
+ if (display_is_shared) {
+ shd_display = (struct shd_display *)disp;
+ shd_display->cont_splash_enabled =
+ _sde_splash_get_panel_intf_status(sinfo,
+ shd_display->name, connector_type);
+ splash_is_on = shd_display->cont_splash_enabled;
+ } else {
+ dsi_display = (struct dsi_display *)disp;
+ dsi_display->cont_splash_enabled =
+ _sde_splash_get_panel_intf_status(sinfo,
+ dsi_display->name,
+ connector_type);
+ splash_is_on = dsi_display->cont_splash_enabled;
+
+ if (dsi_display->cont_splash_enabled) {
+ if (dsi_dsiplay_setup_splash_resource(
+ dsi_display))
+ return -EINVAL;
+ }
+ }
+
+ DRM_INFO("DSI %s splash %s\n",
+ display_is_shared ? "shared" : "normal",
+ splash_is_on ? "enabled" : "disabled");
+ } else if (connector_type == DRM_MODE_CONNECTOR_HDMIA) {
+ if (display_is_shared) {
+ shd_display = (struct shd_display *)disp;
+ shd_display->cont_splash_enabled =
+ _sde_splash_get_panel_intf_status(sinfo,
+ NULL, connector_type);
+ splash_is_on = shd_display->cont_splash_enabled;
+ } else {
+ sde_hdmi = (struct sde_hdmi *)disp;
+ sde_hdmi->cont_splash_enabled =
+ _sde_splash_get_panel_intf_status(sinfo,
+ NULL, connector_type);
+ splash_is_on = sde_hdmi->cont_splash_enabled;
+ }
+
+ DRM_INFO("HDMI %s splash %s\n",
+ display_is_shared ? "shared" : "normal",
+ splash_is_on ? "enabled" : "disabled");
+ }
+
+ return 0;
+}
+
+void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
+ int connector_type,
+ void *display,
+ bool connector_is_shared)
+{
+ bool splash_on = false;
+
+ if (!sinfo || !display)
+ return;
+
+ splash_on = _sde_splash_check_splash(connector_type,
+ display, connector_is_shared);
+
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (splash_on)
+ sinfo->hdmi_connector_cnt++;
+ break;
+ case DRM_MODE_CONNECTOR_DSI:
+ if (splash_on)
+ sinfo->dsi_connector_cnt++;
+ break;
+ default:
+ SDE_ERROR("%s:invalid connector_type %d\n",
+ __func__, connector_type);
+ }
+}
+
+void sde_splash_decrease_connector_cnt(struct drm_device *dev,
+ int connector_type, bool splash_on)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct sde_kms *sde_kms;
+ struct sde_splash_info *sinfo;
+
+ if (!priv || !priv->kms) {
+ SDE_ERROR("Invalid kms\n");
+ return;
+ }
+
+ sde_kms = to_sde_kms(priv->kms);
+ sinfo = &sde_kms->splash_info;
+
+ if (!sinfo->handoff || !splash_on)
+ return;
+
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ sinfo->hdmi_connector_cnt--;
+ break;
+ case DRM_MODE_CONNECTOR_DSI:
+ sinfo->dsi_connector_cnt--;
+ break;
+ default:
+ SDE_ERROR("%s: invalid connector_type %d\n",
+ __func__, connector_type);
+ }
+}
+
+void sde_splash_get_mixer_mask(struct sde_splash_info *sinfo,
+ bool *splash_on, u32 *mixercfg, u32 *mixercfg_ext)
+{
+ mutex_lock(&sde_splash_lock);
+ *splash_on = sinfo->handoff;
+ *mixercfg = sinfo->mixer_mask;
+ *mixercfg_ext = sinfo->mixer_ext_mask;
+ mutex_unlock(&sde_splash_lock);
+}
+
+bool sde_splash_get_lk_complete_status(struct msm_kms *kms)
+{
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+ struct sde_hw_intr *intr;
+
+ if (!sde_kms || !sde_kms->hw_intr) {
+ SDE_ERROR("invalid kms\n");
+ return false;
+ }
+
+ intr = sde_kms->hw_intr;
+
+ if (sde_kms->splash_info.handoff &&
+ !sde_kms->splash_info.display_splash_enabled &&
+ !sde_kms->splash_info.early_display_enabled &&
+ !_sde_splash_lk_check()) {
+ SDE_DEBUG("LK totally exits\n");
+ return true;
+ }
+
+ return false;
+}
+
+int sde_splash_free_resource(struct msm_kms *kms,
+ struct sde_power_handle *phandle,
+ int connector_type, void *display,
+ bool connector_is_shared)
+{
+ struct sde_kms *sde_kms;
+ struct sde_splash_info *sinfo;
+ struct msm_mmu *mmu;
+ struct dsi_display *dsi_display;
+ struct sde_hdmi *hdmi_display;
+ struct shd_display *shd_display;
+ const char *disp_type;
+ int ret = 0;
+ int hdmi_conn_count = 0;
+ int dsi_conn_count = 0;
+ static const char *dsi_old_disp_type = "unknown";
+ static const char *hdmi_old_disp_type = "unknown";
+
+ if (!phandle || !kms || !display) {
+ SDE_ERROR("invalid phandle/kms/display\n");
+ return -EINVAL;
+ }
+
+ sde_kms = to_sde_kms(kms);
+ sinfo = &sde_kms->splash_info;
+ if (!sinfo) {
+ SDE_ERROR("%s(%d): invalid splash info\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ /* Get ref count of connector who has early splash. */
+ _sde_splash_get_connector_ref_cnt(sinfo, &hdmi_conn_count,
+ &dsi_conn_count);
+
+ mutex_lock(&sde_splash_lock);
+ if (!sinfo->handoff) {
+ mutex_unlock(&sde_splash_lock);
+ return 0;
+ }
+
+ /*
+ * Start to free all LK's resource till user commit happens
+ * on each display which early splash is enabled on.
+ */
+ if (hdmi_conn_count == 0 && dsi_conn_count == 0) {
+ mmu = sde_kms->aspace[0]->mmu;
+ if (!mmu) {
+ mutex_unlock(&sde_splash_lock);
+ return -EINVAL;
+ }
+
+ /* free HDMI's, DSI's and early camera's reserved memory */
+ _sde_splash_free_module_resource(mmu, sinfo);
+
+ _sde_splash_destroy_splash_node(sinfo);
+
+ /* withdraw data bus vote */
+ sde_power_data_bus_bandwidth_ctrl(phandle,
+ sde_kms->core_client, false);
+
+ /*
+ * Turn off MDP core power to keep power on/off operations
+ * be matched, as MDP core power is enabled already when
+ * early splash is enabled.
+ */
+ sde_power_resource_enable(phandle,
+ sde_kms->core_client, false);
+
+ /* update impacted crtc and plane property by splash */
+ _sde_splash_update_property(sde_kms);
+
+ /* send uevent to notify user to recycle resource */
+ _sde_splash_sent_pipe_update_uevent(sde_kms);
+
+ /* set display's splash status to false after handoff is done */
+ _sde_splash_update_display_splash_status(sde_kms);
+
+ /* Reset flush_bits and mixer mask */
+ sinfo->flush_bits = 0;
+ sinfo->mixer_mask = 0;
+ sinfo->mixer_ext_mask = 0;
+
+ /* Finally mark handoff flag to false to say
+ * handoff is complete.
+ */
+ sinfo->handoff = false;
+
+ DRM_INFO("HDMI and DSI resource handoff is completed\n");
+ mutex_unlock(&sde_splash_lock);
+ return 0;
+ }
+
+ /*
+ * Ensure user commit happens on different connectors
+ * who has splash.
+ */
+ switch (connector_type) {
+ case DRM_MODE_CONNECTOR_HDMIA:
+ if (connector_is_shared) {
+ shd_display = (struct shd_display *)display;
+ disp_type = shd_display->display_type;
+ } else {
+ hdmi_display = (struct sde_hdmi *)display;
+ disp_type = hdmi_display->display_type;
+ }
+
+ if (strcmp(disp_type, "unknown") &&
+ strcmp(hdmi_old_disp_type, disp_type)) {
+ if (sinfo->hdmi_connector_cnt >= 1)
+ sinfo->hdmi_connector_cnt--;
+
+ hdmi_old_disp_type = disp_type;
+ }
+ break;
+ case DRM_MODE_CONNECTOR_DSI:
+ if (connector_is_shared) {
+ shd_display = (struct shd_display *)display;
+ disp_type = shd_display->display_type;
+ } else {
+ dsi_display = (struct dsi_display *)display;
+ disp_type = dsi_display->display_type;
+ }
+
+ if (strcmp(disp_type, "unknown") &&
+ strcmp(dsi_old_disp_type, disp_type)) {
+ if (sinfo->dsi_connector_cnt >= 1)
+ sinfo->dsi_connector_cnt--;
+
+ dsi_old_disp_type = disp_type;
+ }
+ break;
+ default:
+ ret = -EINVAL;
+ SDE_ERROR("%s: invalid connector_type %d\n",
+ __func__, connector_type);
+ }
+
+ mutex_unlock(&sde_splash_lock);
+ return ret;
+}
+
+/*
+ * Below function will detach all the pipes of the mixer
+ */
+static int _sde_splash_clear_mixer_blendstage(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct sde_crtc *sde_crtc;
+ struct sde_crtc_mixer *mixer;
+ int i;
+ struct sde_splash_info *sinfo;
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+
+ sinfo = &sde_kms->splash_info;
+
+ if (!sinfo) {
+ SDE_ERROR("%s(%d): invalid splash info\n", __func__, __LINE__);
+ return -EINVAL;
+ }
+
+ for (i = 0; i < state->dev->mode_config.num_crtc; i++) {
+ crtc = state->crtcs[i];
+ if (!crtc) {
+ SDE_ERROR("CRTC is NULL");
+ continue;
+ }
+ sde_crtc = to_sde_crtc(crtc);
+ if (!sde_crtc) {
+ SDE_ERROR("SDE CRTC is NULL");
+ return -EINVAL;
+ }
+ mixer = sde_crtc->mixers;
+ if (!mixer) {
+ SDE_ERROR("Mixer is NULL");
+ return -EINVAL;
+ }
+ for (i = 0; i < sde_crtc->num_mixers; i++) {
+ if (mixer[i].hw_ctl->ops.clear_all_blendstages)
+ mixer[i].hw_ctl->ops.clear_all_blendstages(
+ mixer[i].hw_ctl,
+ sinfo->handoff,
+ sinfo->mixer_mask,
+ sinfo->mixer_ext_mask);
+ }
+ }
+ return 0;
+}
+
+/*
+ * Below function will notify LK to stop display splash.
+ */
+int sde_splash_lk_stop_splash(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ int error = 0;
+ struct sde_splash_info *sinfo;
+ struct sde_kms *sde_kms = to_sde_kms(kms);
+
+ sinfo = &sde_kms->splash_info;
+
+ /* Monitor LK's status and tell it to exit. */
+ mutex_lock(&sde_splash_lock);
+ if (_sde_splash_validate_commit(sde_kms, state) &&
+ sinfo->display_splash_enabled) {
+ /* release splash RGB layer */
+ _sde_splash_release_early_splash_layer(sinfo);
+
+ if (_sde_splash_lk_check()) {
+ _sde_splash_notify_lk_stop_splash();
+ error = _sde_splash_clear_mixer_blendstage(kms, state);
+ }
+
+ if (get_hibernation_status() == true) {
+ sinfo->display_splash_enabled = false;
+ } else {
+ /* preserve the display_splash_enabled state for
+ * case when system is restoring from hibernation
+ * image and splash is enabled.
+ */
+ sinfo->display_splash_enabled = true;
+ }
+ }
+ mutex_unlock(&sde_splash_lock);
+
+ return error;
+}
diff --git a/drivers/gpu/drm/msm/sde/sde_splash.h b/drivers/gpu/drm/msm/sde/sde_splash.h
new file mode 100644
index 000000000000..345bf819ee1b
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_splash.h
@@ -0,0 +1,233 @@
+/**
+ * Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#ifndef SDE_SPLASH_H_
+#define SDE_SPLASH_H_
+
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "sde_hw_mdss.h"
+
+enum splash_connector_type {
+ SPLASH_DSI = 0,
+ SPLASH_HDMI,
+};
+
+struct splash_lm_hw {
+ u8 lm_id;
+ u8 ctl_id;
+ u32 lm_reg_value;
+};
+
+struct splash_ctl_top {
+ u32 value;
+ u8 intf_sel;
+ u8 ctl_lm_cnt;
+ struct splash_lm_hw lm[LM_MAX - LM_0];
+};
+
+struct splash_res_data {
+ struct splash_ctl_top top[CTL_MAX - CTL_0];
+ u8 ctl_ids[CTL_MAX - CTL_0];
+ u8 lm_ids[LM_MAX - LM_0];
+ u8 ctl_top_cnt;
+ u8 lm_cnt;
+};
+
+struct splash_reserved_pipe_info {
+ uint32_t pipe_id;
+ bool early_release;
+};
+
+struct splash_pipe_caps {
+ enum sde_sspp pipe;
+ u32 flush_bit;
+ u32 mixer_mask;
+ u32 mixer_ext_mask;
+};
+
+struct sde_splash_info {
+ /* handoff flag */
+ bool handoff;
+
+ /* current hw configuration */
+ struct splash_res_data res;
+
+ /* flag of display splash status */
+ bool display_splash_enabled;
+
+ /* to indicate LK is totally exited */
+ bool lk_is_exited;
+
+ /* flag of early display status */
+ bool early_display_enabled;
+
+ /* flag of early RVC status */
+ bool early_camera_enabled;
+
+ /* memory node used for display buffer */
+ uint32_t splash_mem_num;
+
+ /* physical address of memory node for display buffer */
+ phys_addr_t *splash_mem_paddr;
+
+ /* size of memory node */
+ size_t *splash_mem_size;
+
+ /* constructed gem objects for smmu mapping */
+ struct drm_gem_object **obj;
+
+ /* enabled statue of displays*/
+ uint32_t intf_sel_status;
+
+ /* DSI split enabled flag */
+ bool split_is_enabled;
+
+ /* registered hdmi connector count */
+ uint32_t hdmi_connector_cnt;
+
+ /* registered dsi connector count */
+ uint32_t dsi_connector_cnt;
+
+ /* reserved pipe info both for early RVC and early splash */
+ struct splash_reserved_pipe_info reserved_pipe_info[MAX_BLOCKS];
+
+ /* flush bits of reserved pipes */
+ uint32_t flush_bits;
+
+ /* layer mixer mask of reserved pipes */
+ uint32_t mixer_mask;
+
+ /* layer mixer extension mask of reserved pipes */
+ uint32_t mixer_ext_mask;
+};
+
+/* APIs for early splash handoff functions */
+
+/**
+ * sde_splash_get_handoff_status.
+ *
+ * This function will read DISP_INTF_SEL regsiter to get
+ * the status of early splash.
+ */
+int sde_splash_get_handoff_status(struct msm_kms *kms);
+
+/**
+ * sde_splash_init
+ *
+ * This function will do bandwidth vote and reserved memory
+ */
+int sde_splash_init(struct sde_power_handle *phandle, struct msm_kms *kms);
+
+/**
+ *sde_splash_setup_connector_count
+ *
+ * To count connector numbers for DSI and HDMI respectively.
+ */
+void sde_splash_setup_connector_count(struct sde_splash_info *sinfo,
+ int connector_type, void *display,
+ bool connector_is_shared);
+
+/**
+ * sde_splash_lk_stop_splash.
+ *
+ * Tell LK to stop display splash once one valid user commit arrives.
+ */
+int sde_splash_lk_stop_splash(struct msm_kms *kms,
+ struct drm_atomic_state *state);
+
+/**
+ * sde_splash_free_resource.
+ *
+ * To free all LK's resource, including free reserved memory to system,
+ * withdraw data bus vote, disable MDP core power, send uevent to user
+ * to recycle pipe etc.
+ */
+int sde_splash_free_resource(struct msm_kms *kms,
+ struct sde_power_handle *phandle,
+ int connector_type, void *display,
+ bool connector_is_shared);
+
+/**
+ * sde_splash_parse_memory_dt.
+ *
+ * Parse reserved memory block from DT for early splash.
+ */
+int sde_splash_parse_memory_dt(struct drm_device *dev);
+
+/**
+ * sde_splash_parse_reserved_plane_dt
+ *
+ * Parse reserved plane information from DT for early RVC case.
+ */
+int sde_splash_parse_reserved_plane_dt(struct drm_device *dev,
+ struct sde_splash_info *splash_info,
+ struct sde_mdss_cfg *cfg);
+
+/*
+ * sde_splash_query_plane_is_reserved
+ *
+ * Query plane is reserved in dt.
+ */
+bool sde_splash_query_plane_is_reserved(struct sde_splash_info *sinfo,
+ uint32_t pipe);
+
+/**
+ * sde_splash_smmu_map.
+ *
+ * Map the physical memory LK visited into iommu driver.
+ */
+int sde_splash_smmu_map(struct drm_device *dev, struct msm_mmu *mmu,
+ struct sde_splash_info *sinfo);
+
+/**
+ * sde_splash_destroy
+ *
+ * Destroy the resource in failed case.
+ */
+void sde_splash_destroy(struct sde_splash_info *sinfo,
+ struct sde_power_handle *phandle,
+ struct sde_power_client *pclient);
+
+/**
+ * sde_splash_get_lk_complete_status
+ *
+ * Get LK's status to check if it has been stopped.
+ */
+bool sde_splash_get_lk_complete_status(struct msm_kms *kms);
+
+/**
+ * sde_splash_setup_display_resource
+ *
+ * Setup display resource based on connector type.
+ */
+int sde_splash_setup_display_resource(struct sde_splash_info *sinfo,
+ void *disp, int connector_type,
+ bool display_is_shared);
+
+/**
+ * sde_splash_decrease_connector_cnt()
+ *
+ * Decrease splash connector count when shared display configuration is enabled.
+ */
+void sde_splash_decrease_connector_cnt(struct drm_device *dev,
+ int connector_type,
+ bool splash_on);
+
+/**
+ * sde_splash_get_mixer_mask
+ *
+ * Retrieve mixer mask and extension mask from splash_info structure.
+ */
+void sde_splash_get_mixer_mask(struct sde_splash_info *sinfo,
+ bool *splash_on, u32 *mixercfg, u32 *mixercfg_ext);
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_trace.h b/drivers/gpu/drm/msm/sde/sde_trace.h
new file mode 100644
index 000000000000..d28562eabccb
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_trace.h
@@ -0,0 +1,211 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_SDE_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _SDE_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM sde
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE sde_trace
+
+TRACE_EVENT(sde_perf_set_qos_luts,
+ TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
+ u32 lut, bool linear),
+ TP_ARGS(pnum, fmt, rt, fl, lut, linear),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(bool, rt)
+ __field(u32, fl)
+ __field(u32, lut)
+ __field(bool, linear)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->rt = rt;
+ __entry->fl = fl;
+ __entry->lut = lut;
+ __entry->linear = linear;
+ ),
+ TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%x lin=%d",
+ __entry->pnum, __entry->fmt,
+ __entry->rt, __entry->fl,
+ __entry->lut, __entry->linear)
+);
+
+TRACE_EVENT(sde_perf_set_danger_luts,
+ TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut,
+ u32 safe_lut),
+ TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(u32, mode)
+ __field(u32, danger_lut)
+ __field(u32, safe_lut)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->mode = mode;
+ __entry->danger_lut = danger_lut;
+ __entry->safe_lut = safe_lut;
+ ),
+ TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]",
+ __entry->pnum, __entry->fmt,
+ __entry->mode, __entry->danger_lut,
+ __entry->safe_lut)
+);
+
+TRACE_EVENT(sde_perf_set_ot,
+ TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx),
+ TP_ARGS(pnum, xin_id, rd_lim, vbif_idx),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, xin_id)
+ __field(u32, rd_lim)
+ __field(u32, vbif_idx)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->xin_id = xin_id;
+ __entry->rd_lim = rd_lim;
+ __entry->vbif_idx = vbif_idx;
+ ),
+ TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d",
+ __entry->pnum, __entry->xin_id, __entry->rd_lim,
+ __entry->vbif_idx)
+)
+
+TRACE_EVENT(sde_perf_update_bus,
+ TP_PROTO(int client, unsigned long long ab_quota,
+ unsigned long long ib_quota),
+ TP_ARGS(client, ab_quota, ib_quota),
+ TP_STRUCT__entry(
+ __field(int, client)
+ __field(u64, ab_quota)
+ __field(u64, ib_quota)
+ ),
+ TP_fast_assign(
+ __entry->client = client;
+ __entry->ab_quota = ab_quota;
+ __entry->ib_quota = ib_quota;
+ ),
+ TP_printk("Request client:%d ab=%llu ib=%llu",
+ __entry->client,
+ __entry->ab_quota,
+ __entry->ib_quota)
+)
+
+
+TRACE_EVENT(sde_cmd_release_bw,
+ TP_PROTO(u32 crtc_id),
+ TP_ARGS(crtc_id),
+ TP_STRUCT__entry(
+ __field(u32, crtc_id)
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ ),
+ TP_printk("crtc:%d", __entry->crtc_id)
+);
+
+TRACE_EVENT(sde_encoder_underrun,
+ TP_PROTO(u32 enc_id, u32 underrun_cnt),
+ TP_ARGS(enc_id, underrun_cnt),
+ TP_STRUCT__entry(
+ __field(u32, enc_id)
+ __field(u32, underrun_cnt)
+ ),
+ TP_fast_assign(
+ __entry->enc_id = enc_id;
+ __entry->underrun_cnt = underrun_cnt;
+
+ ),
+ TP_printk("enc:%d underrun_cnt:%d", __entry->enc_id,
+ __entry->underrun_cnt)
+);
+
+TRACE_EVENT(sde_mark_write,
+ TP_PROTO(int pid, const char *name, bool trace_begin),
+ TP_ARGS(pid, name, trace_begin),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(trace_name, name)
+ __field(bool, trace_begin)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __assign_str(trace_name, name);
+ __entry->trace_begin = trace_begin;
+ ),
+ TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+ __entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(sde_trace_counter,
+ TP_PROTO(int pid, char *name, int value),
+ TP_ARGS(pid, name, value),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(counter_name, name)
+ __field(int, value)
+ ),
+ TP_fast_assign(
+ __entry->pid = current->tgid;
+ __assign_str(counter_name, name);
+ __entry->value = value;
+ ),
+ TP_printk("%d|%s|%d", __entry->pid,
+ __get_str(counter_name), __entry->value)
+)
+
+TRACE_EVENT(sde_evtlog,
+ TP_PROTO(const char *tag, u32 tag_id, u64 value1, u64 value2),
+ TP_ARGS(tag, tag_id, value1, value2),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(evtlog_tag, tag)
+ __field(u32, tag_id)
+ __field(u64, value1)
+ __field(u64, value2)
+ ),
+ TP_fast_assign(
+ __entry->pid = current->tgid;
+ __assign_str(evtlog_tag, tag);
+ __entry->tag_id = tag_id;
+ __entry->value1 = value1;
+ __entry->value2 = value2;
+ ),
+ TP_printk("%d|%s:%d|%llu|%llu", __entry->pid, __get_str(evtlog_tag),
+ __entry->tag_id, __entry->value1, __entry->value2)
+)
+
+#define SDE_ATRACE_END(name) trace_sde_mark_write(current->tgid, name, 0)
+#define SDE_ATRACE_BEGIN(name) trace_sde_mark_write(current->tgid, name, 1)
+#define SDE_ATRACE_FUNC() SDE_ATRACE_BEGIN(__func__)
+
+#define SDE_ATRACE_INT(name, value) \
+ trace_sde_trace_counter(current->tgid, name, value)
+
+#endif /* _SDE_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.c b/drivers/gpu/drm/msm/sde/sde_vbif.c
new file mode 100644
index 000000000000..b114840d741c
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.c
@@ -0,0 +1,284 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include "sde_vbif.h"
+#include "sde_hw_vbif.h"
+#include "sde_trace.h"
+
+/**
+ * _sde_vbif_wait_for_xin_halt - wait for the xin to halt
+ * @vbif: Pointer to hardware vbif driver
+ * @xin_id: Client interface identifier
+ * @return: 0 if success; error code otherwise
+ */
+static int _sde_vbif_wait_for_xin_halt(struct sde_hw_vbif *vbif, u32 xin_id)
+{
+ ktime_t timeout;
+ bool status;
+ int rc;
+
+ if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
+ SDE_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ return -EINVAL;
+ }
+
+ timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
+ for (;;) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ if (status)
+ break;
+ if (ktime_compare_safe(ktime_get(), timeout) > 0) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ break;
+ }
+ usleep_range(501, 1000);
+ }
+
+ if (!status) {
+ rc = -ETIMEDOUT;
+ SDE_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
+ vbif->idx - VBIF_0, xin_id);
+ } else {
+ rc = 0;
+ SDE_DEBUG("VBIF %d client %d is halted\n",
+ vbif->idx - VBIF_0, xin_id);
+ }
+
+ return rc;
+}
+
+/**
+ * _sde_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @ot_lim: Pointer to OT limit to be modified
+ * @params: Pointer to usecase parameters
+ */
+static void _sde_vbif_apply_dynamic_ot_limit(struct sde_hw_vbif *vbif,
+ u32 *ot_lim, struct sde_vbif_set_ot_params *params)
+{
+ u64 pps;
+ const struct sde_vbif_dynamic_ot_tbl *tbl;
+ u32 i;
+
+ if (!vbif || !(vbif->cap->features & BIT(SDE_VBIF_QOS_OTLIM)))
+ return;
+
+ /* Dynamic OT setting done only for WFD */
+ if (!params->is_wfd)
+ return;
+
+ pps = params->frame_rate;
+ pps *= params->width;
+ pps *= params->height;
+
+ tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
+ &vbif->cap->dynamic_ot_wr_tbl;
+
+ for (i = 0; i < tbl->count; i++) {
+ if (pps <= tbl->cfg[i].pps) {
+ *ot_lim = tbl->cfg[i].ot_limit;
+ break;
+ }
+ }
+
+ SDE_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+ vbif->idx - VBIF_0, params->xin_id,
+ params->width, params->height, params->frame_rate,
+ pps, *ot_lim);
+}
+
+/**
+ * _sde_vbif_get_ot_limit - get OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ * @return: OT limit
+ */
+static u32 _sde_vbif_get_ot_limit(struct sde_hw_vbif *vbif,
+ struct sde_vbif_set_ot_params *params)
+{
+ u32 ot_lim = 0;
+ u32 val;
+
+ if (!vbif || !vbif->cap) {
+ SDE_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ return -EINVAL;
+ }
+
+ if (vbif->cap->default_ot_wr_limit && !params->rd)
+ ot_lim = vbif->cap->default_ot_wr_limit;
+ else if (vbif->cap->default_ot_rd_limit && params->rd)
+ ot_lim = vbif->cap->default_ot_rd_limit;
+
+ /*
+ * If default ot is not set from dt/catalog,
+ * then do not configure it.
+ */
+ if (ot_lim == 0)
+ goto exit;
+
+ /* Modify the limits if the target and the use case requires it */
+ _sde_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
+
+ if (vbif && vbif->ops.get_limit_conf) {
+ val = vbif->ops.get_limit_conf(vbif,
+ params->xin_id, params->rd);
+ if (val == ot_lim)
+ ot_lim = 0;
+ }
+
+exit:
+ SDE_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
+ vbif->idx - VBIF_0, params->xin_id, ot_lim);
+ return ot_lim;
+}
+
+/**
+ * sde_vbif_set_ot_limit - set OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ *
+ * Note this function would block waiting for bus halt.
+ */
+void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
+ struct sde_vbif_set_ot_params *params)
+{
+ struct sde_hw_vbif *vbif = NULL;
+ struct sde_hw_mdp *mdp;
+ bool forced_on = false;
+ u32 ot_lim;
+ int ret, i;
+
+ if (!sde_kms) {
+ SDE_ERROR("invalid arguments\n");
+ return;
+ }
+ mdp = sde_kms->hw_mdp;
+
+ for (i = 0; i < ARRAY_SIZE(sde_kms->hw_vbif); i++) {
+ if (sde_kms->hw_vbif[i] &&
+ sde_kms->hw_vbif[i]->idx == params->vbif_idx)
+ vbif = sde_kms->hw_vbif[i];
+ }
+
+ if (!vbif || !mdp) {
+ SDE_DEBUG("invalid arguments vbif %d mdp %d\n",
+ vbif != 0, mdp != 0);
+ return;
+ }
+
+ if (!mdp->ops.setup_clk_force_ctrl ||
+ !vbif->ops.set_limit_conf ||
+ !vbif->ops.set_halt_ctrl)
+ return;
+
+ ot_lim = _sde_vbif_get_ot_limit(vbif, params) & 0xFF;
+
+ if (ot_lim == 0)
+ goto exit;
+
+ trace_sde_perf_set_ot(params->num, params->xin_id, ot_lim,
+ params->vbif_idx);
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+ ret = _sde_vbif_wait_for_xin_halt(vbif, params->xin_id);
+ if (ret)
+ SDE_EVT32(vbif->idx, params->xin_id);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+exit:
+ return;
+}
+
+#ifdef CONFIG_DEBUG_FS
+void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
+{
+ debugfs_remove_recursive(sde_kms->debugfs_vbif);
+ sde_kms->debugfs_vbif = NULL;
+}
+
+int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root)
+{
+ char vbif_name[32];
+ struct dentry *debugfs_vbif;
+ int i, j;
+
+ sde_kms->debugfs_vbif = debugfs_create_dir("vbif",
+ sde_kms->debugfs_root);
+ if (!sde_kms->debugfs_vbif) {
+ SDE_ERROR("failed to create vbif debugfs\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < sde_kms->catalog->vbif_count; i++) {
+ struct sde_vbif_cfg *vbif = &sde_kms->catalog->vbif[i];
+
+ snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
+
+ debugfs_vbif = debugfs_create_dir(vbif_name,
+ sde_kms->debugfs_vbif);
+
+ debugfs_create_u32("features", 0644, debugfs_vbif,
+ (u32 *)&vbif->features);
+
+ debugfs_create_u32("xin_halt_timeout", S_IRUGO, debugfs_vbif,
+ (u32 *)&vbif->xin_halt_timeout);
+
+ debugfs_create_u32("default_rd_ot_limit", S_IRUGO, debugfs_vbif,
+ (u32 *)&vbif->default_ot_rd_limit);
+
+ debugfs_create_u32("default_wr_ot_limit", S_IRUGO, debugfs_vbif,
+ (u32 *)&vbif->default_ot_wr_limit);
+
+ for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+ struct sde_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_rd_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_pps", j);
+ debugfs_create_u64(vbif_name, S_IRUGO, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, S_IRUGO, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+
+ for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+ struct sde_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_wr_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_pps", j);
+ debugfs_create_u64(vbif_name, S_IRUGO, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, S_IRUGO, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/sde/sde_vbif.h b/drivers/gpu/drm/msm/sde/sde_vbif.h
new file mode 100644
index 000000000000..33f16a867a60
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_vbif.h
@@ -0,0 +1,51 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_VBIF_H__
+#define __SDE_VBIF_H__
+
+#include "sde_kms.h"
+
+struct sde_vbif_set_ot_params {
+ u32 xin_id;
+ u32 num;
+ u32 width;
+ u32 height;
+ u32 frame_rate;
+ bool rd;
+ bool is_wfd;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+};
+
+/**
+ * sde_vbif_set_ot_limit - set OT limit for vbif client
+ * @sde_kms: SDE handler
+ * @params: Pointer to OT configuration parameters
+ */
+void sde_vbif_set_ot_limit(struct sde_kms *sde_kms,
+ struct sde_vbif_set_ot_params *params);
+
+#ifdef CONFIG_DEBUG_FS
+int sde_debugfs_vbif_init(struct sde_kms *sde_kms, struct dentry *debugfs_root);
+void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms);
+#else
+static inline int sde_debugfs_vbif_init(struct sde_kms *sde_kms,
+ struct dentry *debugfs_root)
+{
+ return 0;
+}
+static inline void sde_debugfs_vbif_destroy(struct sde_kms *sde_kms)
+{
+}
+#endif
+#endif /* __SDE_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.c b/drivers/gpu/drm/msm/sde/sde_wb.c
new file mode 100644
index 000000000000..647cb5891153
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_wb.c
@@ -0,0 +1,745 @@
+/*
+ * Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_kms.h"
+#include "sde_kms.h"
+#include "sde_wb.h"
+#include "sde_formats.h"
+
+/* maximum display mode resolution if not available from catalog */
+#define SDE_WB_MODE_MAX_WIDTH 4096
+#define SDE_WB_MODE_MAX_HEIGHT 4096
+
+/* Serialization lock for sde_wb_list */
+static DEFINE_MUTEX(sde_wb_list_lock);
+
+/* List of all writeback devices installed */
+static LIST_HEAD(sde_wb_list);
+
+/**
+ * sde_wb_is_format_valid - check if given format/modifier is supported
+ * @wb_dev: Pointer to writeback device
+ * @pixel_format: Fourcc pixel format
+ * @format_modifier: Format modifier
+ * Returns: true if valid; false otherwise
+ */
+static int sde_wb_is_format_valid(struct sde_wb_device *wb_dev,
+ u32 pixel_format, u64 format_modifier)
+{
+ const struct sde_format_extended *fmts = wb_dev->wb_cfg->format_list;
+ int i;
+
+ if (!fmts)
+ return false;
+
+ for (i = 0; fmts[i].fourcc_format; i++)
+ if ((fmts[i].modifier == format_modifier) &&
+ (fmts[i].fourcc_format == pixel_format))
+ return true;
+
+ return false;
+}
+
+enum drm_connector_status
+sde_wb_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display)
+{
+ enum drm_connector_status rc = connector_status_unknown;
+
+ SDE_DEBUG("\n");
+
+ if (display)
+ rc = ((struct sde_wb_device *)display)->detect_status;
+
+ return rc;
+}
+
+int sde_wb_connector_get_modes(struct drm_connector *connector, void *display)
+{
+ struct sde_wb_device *wb_dev;
+ int num_modes = 0;
+
+ if (!connector || !display)
+ return 0;
+
+ wb_dev = display;
+
+ SDE_DEBUG("\n");
+
+ mutex_lock(&wb_dev->wb_lock);
+ if (wb_dev->count_modes && wb_dev->modes) {
+ struct drm_display_mode *mode;
+ int i, ret;
+
+ for (i = 0; i < wb_dev->count_modes; i++) {
+ mode = drm_mode_create(connector->dev);
+ if (!mode) {
+ SDE_ERROR("failed to create mode\n");
+ break;
+ }
+ ret = drm_mode_convert_umode(mode,
+ &wb_dev->modes[i]);
+ if (ret) {
+ SDE_ERROR("failed to convert mode %d\n", ret);
+ break;
+ }
+
+ drm_mode_probed_add(connector, mode);
+ num_modes++;
+ }
+ } else {
+ u32 max_width = (wb_dev->wb_cfg && wb_dev->wb_cfg->sblk) ?
+ wb_dev->wb_cfg->sblk->maxlinewidth :
+ SDE_WB_MODE_MAX_WIDTH;
+
+ num_modes = drm_add_modes_noedid(connector, max_width,
+ SDE_WB_MODE_MAX_HEIGHT);
+ }
+ mutex_unlock(&wb_dev->wb_lock);
+ return num_modes;
+}
+
+struct drm_framebuffer *
+sde_wb_connector_state_get_output_fb(struct drm_connector_state *state)
+{
+ if (!state || !state->connector ||
+ (state->connector->connector_type !=
+ DRM_MODE_CONNECTOR_VIRTUAL)) {
+ SDE_ERROR("invalid params\n");
+ return NULL;
+ }
+
+ SDE_DEBUG("\n");
+
+ return sde_connector_get_out_fb(state);
+}
+
+int sde_wb_connector_state_get_output_roi(struct drm_connector_state *state,
+ struct sde_rect *roi)
+{
+ if (!state || !roi || !state->connector ||
+ (state->connector->connector_type !=
+ DRM_MODE_CONNECTOR_VIRTUAL)) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ roi->x = sde_connector_get_property(state, CONNECTOR_PROP_DST_X);
+ roi->y = sde_connector_get_property(state, CONNECTOR_PROP_DST_Y);
+ roi->w = sde_connector_get_property(state, CONNECTOR_PROP_DST_W);
+ roi->h = sde_connector_get_property(state, CONNECTOR_PROP_DST_H);
+
+ return 0;
+}
+
+/**
+ * sde_wb_connector_set_modes - set writeback modes and connection status
+ * @wb_dev: Pointer to write back device
+ * @count_modes: Count of modes
+ * @modes: Pointer to writeback mode requested
+ * @connected: Connection status requested
+ * Returns: 0 if success; error code otherwise
+ */
+static
+int sde_wb_connector_set_modes(struct sde_wb_device *wb_dev,
+ u32 count_modes, struct drm_mode_modeinfo __user *modes,
+ bool connected)
+{
+ int ret = 0;
+
+ if (!wb_dev || !wb_dev->connector ||
+ (wb_dev->connector->connector_type !=
+ DRM_MODE_CONNECTOR_VIRTUAL)) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ if (connected) {
+ SDE_DEBUG("connect\n");
+
+ if (wb_dev->modes) {
+ wb_dev->count_modes = 0;
+
+ kfree(wb_dev->modes);
+ wb_dev->modes = NULL;
+ }
+
+ if (count_modes && modes) {
+ wb_dev->modes = kcalloc(count_modes,
+ sizeof(struct drm_mode_modeinfo),
+ GFP_KERNEL);
+ if (!wb_dev->modes) {
+ SDE_ERROR("invalid params\n");
+ ret = -ENOMEM;
+ goto error;
+ }
+
+ if (copy_from_user(wb_dev->modes, modes,
+ count_modes *
+ sizeof(struct drm_mode_modeinfo))) {
+ SDE_ERROR("failed to copy modes\n");
+ kfree(wb_dev->modes);
+ wb_dev->modes = NULL;
+ ret = -EFAULT;
+ goto error;
+ }
+
+ wb_dev->count_modes = count_modes;
+ }
+
+ wb_dev->detect_status = connector_status_connected;
+ } else {
+ SDE_DEBUG("disconnect\n");
+
+ if (wb_dev->modes) {
+ wb_dev->count_modes = 0;
+
+ kfree(wb_dev->modes);
+ wb_dev->modes = NULL;
+ }
+
+ wb_dev->detect_status = connector_status_disconnected;
+ }
+
+error:
+ return ret;
+}
+
+int sde_wb_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display)
+{
+ struct sde_wb_device *wb_dev = display;
+ struct drm_framebuffer *out_fb;
+ int rc = 0;
+
+ SDE_DEBUG("\n");
+
+ if (state && (property_index == CONNECTOR_PROP_OUT_FB)) {
+ const struct sde_format *sde_format;
+
+ out_fb = sde_connector_get_out_fb(state);
+ if (!out_fb)
+ goto done;
+
+ sde_format = sde_get_sde_format_ext(out_fb->pixel_format,
+ out_fb->modifier,
+ drm_format_num_planes(out_fb->pixel_format));
+ if (!sde_format) {
+ SDE_ERROR("failed to get sde format\n");
+ rc = -EINVAL;
+ goto done;
+ }
+
+ if (!sde_wb_is_format_valid(wb_dev, out_fb->pixel_format,
+ out_fb->modifier[0])) {
+ SDE_ERROR("unsupported writeback format 0x%x/0x%llx\n",
+ out_fb->pixel_format,
+ out_fb->modifier[0]);
+ rc = -EINVAL;
+ goto done;
+ }
+ }
+
+done:
+ return rc;
+}
+
+int sde_wb_get_info(struct msm_display_info *info, void *display)
+{
+ struct sde_wb_device *wb_dev = display;
+
+ if (!info || !wb_dev) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ info->intf_type = DRM_MODE_CONNECTOR_VIRTUAL;
+ info->num_of_h_tiles = 1;
+ info->h_tile_instance[0] = sde_wb_get_index(display);
+ info->is_connected = true;
+ info->capabilities = MSM_DISPLAY_CAP_HOT_PLUG | MSM_DISPLAY_CAP_EDID;
+ info->max_width = (wb_dev->wb_cfg && wb_dev->wb_cfg->sblk) ?
+ wb_dev->wb_cfg->sblk->maxlinewidth :
+ SDE_WB_MODE_MAX_WIDTH;
+ info->max_height = SDE_WB_MODE_MAX_HEIGHT;
+ info->compression = MSM_DISPLAY_COMPRESS_NONE;
+ return 0;
+}
+
+int sde_wb_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ struct sde_connector *c_conn;
+ struct sde_wb_device *wb_dev = display;
+ const struct sde_format_extended *format_list;
+
+ if (!connector || !info || !display || !wb_dev->wb_cfg) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ c_conn = to_sde_connector(connector);
+ wb_dev->connector = connector;
+ wb_dev->detect_status = connector_status_connected;
+ format_list = wb_dev->wb_cfg->format_list;
+
+ /*
+ * Add extra connector properties
+ */
+ msm_property_install_range(&c_conn->property_info, "FB_ID",
+ 0x0, 0, ~0, ~0, CONNECTOR_PROP_OUT_FB);
+ msm_property_install_range(&c_conn->property_info, "DST_X",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_X);
+ msm_property_install_range(&c_conn->property_info, "DST_Y",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_Y);
+ msm_property_install_range(&c_conn->property_info, "DST_W",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_W);
+ msm_property_install_range(&c_conn->property_info, "DST_H",
+ 0x0, 0, UINT_MAX, 0, CONNECTOR_PROP_DST_H);
+
+ /*
+ * Populate info buffer
+ */
+ if (format_list) {
+ sde_kms_info_start(info, "pixel_formats");
+ while (format_list->fourcc_format) {
+ sde_kms_info_append_format(info,
+ format_list->fourcc_format,
+ format_list->modifier);
+ ++format_list;
+ }
+ sde_kms_info_stop(info);
+ }
+
+ sde_kms_info_add_keyint(info,
+ "wb_intf_index",
+ wb_dev->wb_idx - WB_0);
+
+ sde_kms_info_add_keyint(info,
+ "maxlinewidth",
+ wb_dev->wb_cfg->sblk->maxlinewidth);
+
+ sde_kms_info_start(info, "features");
+ if (wb_dev->wb_cfg && (wb_dev->wb_cfg->features & SDE_WB_UBWC_1_0))
+ sde_kms_info_append(info, "wb_ubwc");
+ sde_kms_info_stop(info);
+
+ return 0;
+}
+
+struct drm_framebuffer *sde_wb_get_output_fb(struct sde_wb_device *wb_dev)
+{
+ struct drm_framebuffer *fb;
+
+ if (!wb_dev || !wb_dev->connector) {
+ SDE_ERROR("invalid params\n");
+ return NULL;
+ }
+
+ SDE_DEBUG("\n");
+
+ mutex_lock(&wb_dev->wb_lock);
+ fb = sde_wb_connector_state_get_output_fb(wb_dev->connector->state);
+ mutex_unlock(&wb_dev->wb_lock);
+
+ return fb;
+}
+
+int sde_wb_get_output_roi(struct sde_wb_device *wb_dev, struct sde_rect *roi)
+{
+ int rc;
+
+ if (!wb_dev || !wb_dev->connector || !roi) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ mutex_lock(&wb_dev->wb_lock);
+ rc = sde_wb_connector_state_get_output_roi(
+ wb_dev->connector->state, roi);
+ mutex_unlock(&wb_dev->wb_lock);
+
+ return rc;
+}
+
+u32 sde_wb_get_num_of_displays(void)
+{
+ u32 count = 0;
+ struct sde_wb_device *wb_dev;
+
+ SDE_DEBUG("\n");
+
+ mutex_lock(&sde_wb_list_lock);
+ list_for_each_entry(wb_dev, &sde_wb_list, wb_list) {
+ count++;
+ }
+ mutex_unlock(&sde_wb_list_lock);
+
+ return count;
+}
+
+int wb_display_get_displays(void **display_array, u32 max_display_count)
+{
+ struct sde_wb_device *curr;
+ int i = 0;
+
+ SDE_DEBUG("\n");
+
+ if (!display_array || !max_display_count) {
+ if (!display_array)
+ SDE_ERROR("invalid param\n");
+ return 0;
+ }
+
+ mutex_lock(&sde_wb_list_lock);
+ list_for_each_entry(curr, &sde_wb_list, wb_list) {
+ if (i >= max_display_count)
+ break;
+ display_array[i++] = curr;
+ }
+ mutex_unlock(&sde_wb_list_lock);
+
+ return i;
+}
+
+int sde_wb_config(struct drm_device *drm_dev, void *data,
+ struct drm_file *file_priv)
+{
+ struct sde_drm_wb_cfg *config = data;
+ struct msm_drm_private *priv;
+ struct sde_wb_device *wb_dev = NULL;
+ struct sde_wb_device *curr;
+ struct drm_connector *connector;
+ uint32_t flags;
+ uint32_t connector_id;
+ uint32_t count_modes;
+ uint64_t modes;
+ int rc;
+
+ if (!drm_dev || !data) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ flags = config->flags;
+ connector_id = config->connector_id;
+ count_modes = config->count_modes;
+ modes = config->modes;
+
+ priv = drm_dev->dev_private;
+
+ connector = drm_connector_find(drm_dev, connector_id);
+ if (!connector) {
+ SDE_ERROR("failed to find connector\n");
+ rc = -ENOENT;
+ goto fail;
+ }
+
+ mutex_lock(&sde_wb_list_lock);
+ list_for_each_entry(curr, &sde_wb_list, wb_list) {
+ if (curr->connector == connector) {
+ wb_dev = curr;
+ break;
+ }
+ }
+ mutex_unlock(&sde_wb_list_lock);
+
+ if (!wb_dev) {
+ SDE_ERROR("failed to find wb device\n");
+ rc = -ENOENT;
+ goto fail;
+ }
+
+ mutex_lock(&wb_dev->wb_lock);
+
+ rc = sde_wb_connector_set_modes(wb_dev, count_modes,
+ (struct drm_mode_modeinfo __user *) (uintptr_t) modes,
+ (flags & SDE_DRM_WB_CFG_FLAGS_CONNECTED) ? true : false);
+
+ mutex_unlock(&wb_dev->wb_lock);
+ drm_helper_hpd_irq_event(drm_dev);
+fail:
+ return rc;
+}
+
+/**
+ * _sde_wb_dev_init - perform device initialization
+ * @wb_dev: Pointer to writeback device
+ */
+static int _sde_wb_dev_init(struct sde_wb_device *wb_dev)
+{
+ int rc = 0;
+
+ if (!wb_dev) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ return rc;
+}
+
+/**
+ * _sde_wb_dev_deinit - perform device de-initialization
+ * @wb_dev: Pointer to writeback device
+ */
+static int _sde_wb_dev_deinit(struct sde_wb_device *wb_dev)
+{
+ int rc = 0;
+
+ if (!wb_dev) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ return rc;
+}
+
+/**
+ * sde_wb_bind - bind writeback device with controlling device
+ * @dev: Pointer to base of platform device
+ * @master: Pointer to container of drm device
+ * @data: Pointer to private data
+ * Returns: Zero on success
+ */
+static int sde_wb_bind(struct device *dev, struct device *master, void *data)
+{
+ struct sde_wb_device *wb_dev;
+
+ if (!dev || !master) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ wb_dev = platform_get_drvdata(to_platform_device(dev));
+ if (!wb_dev) {
+ SDE_ERROR("invalid wb device\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ mutex_lock(&wb_dev->wb_lock);
+ wb_dev->drm_dev = dev_get_drvdata(master);
+ mutex_unlock(&wb_dev->wb_lock);
+
+ return 0;
+}
+
+/**
+ * sde_wb_unbind - unbind writeback from controlling device
+ * @dev: Pointer to base of platform device
+ * @master: Pointer to container of drm device
+ * @data: Pointer to private data
+ */
+static void sde_wb_unbind(struct device *dev,
+ struct device *master, void *data)
+{
+ struct sde_wb_device *wb_dev;
+
+ if (!dev) {
+ SDE_ERROR("invalid params\n");
+ return;
+ }
+
+ wb_dev = platform_get_drvdata(to_platform_device(dev));
+ if (!wb_dev) {
+ SDE_ERROR("invalid wb device\n");
+ return;
+ }
+
+ SDE_DEBUG("\n");
+
+ mutex_lock(&wb_dev->wb_lock);
+ wb_dev->drm_dev = NULL;
+ mutex_unlock(&wb_dev->wb_lock);
+}
+
+static const struct component_ops sde_wb_comp_ops = {
+ .bind = sde_wb_bind,
+ .unbind = sde_wb_unbind,
+};
+
+/**
+ * sde_wb_drm_init - perform DRM initialization
+ * @wb_dev: Pointer to writeback device
+ * @encoder: Pointer to associated encoder
+ */
+int sde_wb_drm_init(struct sde_wb_device *wb_dev, struct drm_encoder *encoder)
+{
+ int rc = 0;
+
+ if (!wb_dev || !wb_dev->drm_dev || !encoder) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ mutex_lock(&wb_dev->wb_lock);
+
+ if (wb_dev->drm_dev->dev_private) {
+ struct msm_drm_private *priv = wb_dev->drm_dev->dev_private;
+ struct sde_kms *sde_kms = to_sde_kms(priv->kms);
+
+ if (wb_dev->index < sde_kms->catalog->wb_count) {
+ wb_dev->wb_idx = sde_kms->catalog->wb[wb_dev->index].id;
+ wb_dev->wb_cfg = &sde_kms->catalog->wb[wb_dev->index];
+ }
+ }
+
+ wb_dev->drm_dev = encoder->dev;
+ wb_dev->encoder = encoder;
+ mutex_unlock(&wb_dev->wb_lock);
+ return rc;
+}
+
+int sde_wb_drm_deinit(struct sde_wb_device *wb_dev)
+{
+ int rc = 0;
+
+ if (!wb_dev) {
+ SDE_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ SDE_DEBUG("\n");
+
+ return rc;
+}
+
+/**
+ * sde_wb_probe - load writeback module
+ * @pdev: Pointer to platform device
+ */
+static int sde_wb_probe(struct platform_device *pdev)
+{
+ struct sde_wb_device *wb_dev;
+ int ret;
+
+ wb_dev = devm_kzalloc(&pdev->dev, sizeof(*wb_dev), GFP_KERNEL);
+ if (!wb_dev)
+ return -ENOMEM;
+
+ SDE_DEBUG("\n");
+
+ ret = of_property_read_u32(pdev->dev.of_node, "cell-index",
+ &wb_dev->index);
+ if (ret) {
+ SDE_DEBUG("cell index not set, default to 0\n");
+ wb_dev->index = 0;
+ }
+
+ wb_dev->name = of_get_property(pdev->dev.of_node, "label", NULL);
+ if (!wb_dev->name) {
+ SDE_DEBUG("label not set, default to unknown\n");
+ wb_dev->name = "unknown";
+ }
+
+ wb_dev->wb_idx = SDE_NONE;
+
+ mutex_init(&wb_dev->wb_lock);
+ platform_set_drvdata(pdev, wb_dev);
+
+ mutex_lock(&sde_wb_list_lock);
+ list_add(&wb_dev->wb_list, &sde_wb_list);
+ mutex_unlock(&sde_wb_list_lock);
+
+ if (!_sde_wb_dev_init(wb_dev)) {
+ ret = component_add(&pdev->dev, &sde_wb_comp_ops);
+ if (ret)
+ pr_err("component add failed\n");
+ }
+
+ return ret;
+}
+
+/**
+ * sde_wb_remove - unload writeback module
+ * @pdev: Pointer to platform device
+ */
+static int sde_wb_remove(struct platform_device *pdev)
+{
+ struct sde_wb_device *wb_dev;
+ struct sde_wb_device *curr, *next;
+
+ wb_dev = platform_get_drvdata(pdev);
+ if (!wb_dev)
+ return 0;
+
+ SDE_DEBUG("\n");
+
+ (void)_sde_wb_dev_deinit(wb_dev);
+
+ mutex_lock(&sde_wb_list_lock);
+ list_for_each_entry_safe(curr, next, &sde_wb_list, wb_list) {
+ if (curr == wb_dev) {
+ list_del(&wb_dev->wb_list);
+ break;
+ }
+ }
+ mutex_unlock(&sde_wb_list_lock);
+
+ kfree(wb_dev->modes);
+ mutex_destroy(&wb_dev->wb_lock);
+
+ platform_set_drvdata(pdev, NULL);
+ devm_kfree(&pdev->dev, wb_dev);
+
+ return 0;
+}
+
+static const struct of_device_id dt_match[] = {
+ { .compatible = "qcom,wb-display"},
+ {}
+};
+
+static struct platform_driver sde_wb_driver = {
+ .probe = sde_wb_probe,
+ .remove = sde_wb_remove,
+ .driver = {
+ .name = "sde_wb",
+ .of_match_table = dt_match,
+ },
+};
+
+static int __init sde_wb_register(void)
+{
+ return platform_driver_register(&sde_wb_driver);
+}
+
+static void __exit sde_wb_unregister(void)
+{
+ platform_driver_unregister(&sde_wb_driver);
+}
+
+module_init(sde_wb_register);
+module_exit(sde_wb_unregister);
diff --git a/drivers/gpu/drm/msm/sde/sde_wb.h b/drivers/gpu/drm/msm/sde/sde_wb.h
new file mode 100644
index 000000000000..4e335956db55
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde/sde_wb.h
@@ -0,0 +1,321 @@
+/* Copyright (c) 2015-2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_WB_H__
+#define __SDE_WB_H__
+
+#include <linux/platform_device.h>
+
+#include "msm_kms.h"
+#include "sde_kms.h"
+#include "sde_connector.h"
+
+/**
+ * struct sde_wb_device - Writeback device context
+ * @drm_dev: Pointer to controlling DRM device
+ * @index: Index of hardware instance from device tree
+ * @wb_idx: Writeback identifier of enum sde_wb
+ * @wb_cfg: Writeback configuration catalog
+ * @name: Name of writeback device from device tree
+ * @display_type: Display type from device tree
+ * @wb_list List of all writeback devices
+ * @wb_lock Serialization lock for writeback context structure
+ * @connector: Connector associated with writeback device
+ * @encoder: Encoder associated with writeback device
+ * @count_modes: Length of writeback connector modes array
+ * @modes: Writeback connector modes array
+ */
+struct sde_wb_device {
+ struct drm_device *drm_dev;
+
+ u32 index;
+ u32 wb_idx;
+ struct sde_wb_cfg *wb_cfg;
+ const char *name;
+
+ struct list_head wb_list;
+ struct mutex wb_lock;
+
+ struct drm_connector *connector;
+ struct drm_encoder *encoder;
+
+ enum drm_connector_status detect_status;
+ u32 count_modes;
+ struct drm_mode_modeinfo *modes;
+};
+
+/**
+ * sde_wb_get_index - get device index of the given writeback device
+ * @wb_dev: Pointer to writeback device
+ * Returns: Index of hardware instance
+ */
+static inline
+int sde_wb_get_index(struct sde_wb_device *wb_dev)
+{
+ return wb_dev ? wb_dev->index : -1;
+}
+
+#ifdef CONFIG_DRM_SDE_WB
+/**
+ * sde_wb_get_output_fb - get framebuffer in current atomic state
+ * @wb_dev: Pointer to writeback device
+ * Returns: Pointer to framebuffer
+ */
+struct drm_framebuffer *sde_wb_get_output_fb(struct sde_wb_device *wb_dev);
+
+/**
+ * sde_wb_get_output_roi - get region-of-interest in current atomic state
+ * @wb_dev: Pointer to writeback device
+ * @roi: Pointer to region of interest
+ * Returns: 0 if success; error code otherwise
+ */
+int sde_wb_get_output_roi(struct sde_wb_device *wb_dev, struct sde_rect *roi);
+
+/**
+ * sde_wb_get_num_of_displays - get total number of writeback devices
+ * Returns: Number of writeback devices
+ */
+u32 sde_wb_get_num_of_displays(void);
+
+/**
+ * wb_display_get_displays - returns pointers for supported display devices
+ * @display_array: Pointer to display array to be filled
+ * @max_display_count: Size of display_array
+ * @Returns: Number of display entries filled
+ */
+int wb_display_get_displays(void **display_array, u32 max_display_count);
+
+void sde_wb_set_active_state(struct sde_wb_device *wb_dev, bool is_active);
+bool sde_wb_is_active(struct sde_wb_device *wb_dev);
+
+/**
+ * sde_wb_drm_init - perform DRM initialization
+ * @wb_dev: Pointer to writeback device
+ * @encoder: Pointer to associated encoder
+ * Returns: 0 if success; error code otherwise
+ */
+int sde_wb_drm_init(struct sde_wb_device *wb_dev, struct drm_encoder *encoder);
+
+/**
+ * sde_wb_drm_deinit - perform DRM de-initialization
+ * @wb_dev: Pointer to writeback device
+ * Returns: 0 if success; error code otherwise
+ */
+int sde_wb_drm_deinit(struct sde_wb_device *wb_dev);
+
+/**
+ * sde_wb_config - setup connection status and available drm modes of the
+ * given writeback connector
+ * @drm_dev: Pointer to DRM device
+ * @data: Pointer to writeback configuration
+ * @file_priv: Pointer file private data
+ * Returns: 0 if success; error code otherwise
+ *
+ * This function will initiate hot-plug detection event.
+ */
+int sde_wb_config(struct drm_device *drm_dev, void *data,
+ struct drm_file *file_priv);
+
+/**
+ * sde_wb_connector_post_init - perform writeback specific initialization
+ * @connector: Pointer to drm connector structure
+ * @info: Pointer to connector info
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+int sde_wb_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display);
+
+/**
+ * sde_wb_connector_detect - perform writeback connection status detection
+ * @connector: Pointer to connector
+ * @force: Indicate force detection
+ * @display: Pointer to writeback device
+ * Returns: connector status
+ */
+enum drm_connector_status
+sde_wb_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display);
+
+/**
+ * sde_wb_connector_get_modes - get display modes of connector
+ * @connector: Pointer to connector
+ * @display: Pointer to writeback device
+ * Returns: Number of modes
+ *
+ * If display modes are not specified in writeback configuration IOCTL, this
+ * function will install default EDID modes up to maximum resolution support.
+ */
+int sde_wb_connector_get_modes(struct drm_connector *connector, void *display);
+
+/**
+ * sde_wb_connector_set_property - set atomic connector property
+ * @connector: Pointer to drm connector structure
+ * @state: Pointer to drm connector state structure
+ * @property_index: DRM property index
+ * @value: Incoming property value
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+int sde_wb_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display);
+
+/**
+ * sde_wb_get_info - retrieve writeback 'display' information
+ * @info: Pointer to display info structure
+ * @display: Pointer to private display structure
+ * Returns: Zero on success
+ */
+int sde_wb_get_info(struct msm_display_info *info, void *display);
+
+/**
+ * sde_wb_connector_get_wb - retrieve writeback device of the given connector
+ * @connector: Pointer to drm connector
+ * Returns: Pointer to writeback device on success; NULL otherwise
+ */
+static inline
+struct sde_wb_device *sde_wb_connector_get_wb(struct drm_connector *connector)
+{
+ if (!connector ||
+ (connector->connector_type != DRM_MODE_CONNECTOR_VIRTUAL)) {
+ SDE_ERROR("invalid params\n");
+ return NULL;
+ }
+
+ return sde_connector_get_display(connector);
+}
+
+/**
+ * sde_wb_connector_state_get_output_fb - get framebuffer of given state
+ * @state: Pointer to connector state
+ * Returns: Pointer to framebuffer
+ */
+struct drm_framebuffer *
+sde_wb_connector_state_get_output_fb(struct drm_connector_state *state);
+
+/**
+ * sde_wb_connector_state_get_output_roi - get roi from given atomic state
+ * @state: Pointer to atomic state
+ * @roi: Pointer to region of interest
+ * Returns: 0 if success; error code otherwise
+ */
+int sde_wb_connector_state_get_output_roi(struct drm_connector_state *state,
+ struct sde_rect *roi);
+
+#else
+static inline
+struct drm_framebuffer *sde_wb_get_output_fb(struct sde_wb_device *wb_dev)
+{
+ return NULL;
+}
+static inline
+int sde_wb_get_output_roi(struct sde_wb_device *wb_dev, struct sde_rect *roi)
+{
+ return 0;
+}
+static inline
+u32 sde_wb_get_num_of_displays(void)
+{
+ return 0;
+}
+static inline
+int wb_display_get_displays(void **display_array, u32 max_display_count)
+{
+ return 0;
+}
+static inline
+void sde_wb_set_active_state(struct sde_wb_device *wb_dev, bool is_active)
+{
+}
+static inline
+bool sde_wb_is_active(struct sde_wb_device *wb_dev)
+{
+ return false;
+}
+static inline
+int sde_wb_drm_init(struct sde_wb_device *wb_dev, struct drm_encoder *encoder)
+{
+ return 0;
+}
+static inline
+int sde_wb_drm_deinit(struct sde_wb_device *wb_dev)
+{
+ return 0;
+}
+static inline
+int sde_wb_config(struct drm_device *drm_dev, void *data,
+ struct drm_file *file_priv)
+{
+ return 0;
+}
+static inline
+int sde_wb_connector_post_init(struct drm_connector *connector,
+ void *info,
+ void *display)
+{
+ return 0;
+}
+static inline
+enum drm_connector_status
+sde_wb_connector_detect(struct drm_connector *connector,
+ bool force,
+ void *display)
+{
+ return connector_status_disconnected;
+}
+static inline
+int sde_wb_connector_get_modes(struct drm_connector *connector, void *display)
+{
+ return -EINVAL;
+}
+static inline
+int sde_wb_connector_set_property(struct drm_connector *connector,
+ struct drm_connector_state *state,
+ int property_index,
+ uint64_t value,
+ void *display)
+{
+ return 0;
+}
+static inline
+int sde_wb_get_info(struct msm_display_info *info, void *display)
+{
+ return 0;
+}
+static inline
+struct sde_wb_device *sde_wb_connector_get_wb(struct drm_connector *connector)
+{
+ return NULL;
+}
+
+static inline
+struct drm_framebuffer *
+sde_wb_connector_state_get_output_fb(struct drm_connector_state *state)
+{
+ return NULL;
+}
+
+static inline
+int sde_wb_connector_state_get_output_roi(struct drm_connector_state *state,
+ struct sde_rect *roi)
+{
+ return 0;
+}
+
+#endif
+#endif /* __SDE_WB_H__ */
+
diff --git a/drivers/gpu/drm/msm/sde_dbg.c b/drivers/gpu/drm/msm/sde_dbg.c
new file mode 100644
index 000000000000..50c0787d794d
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_dbg.c
@@ -0,0 +1,2323 @@
+/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+
+#include "sde_dbg.h"
+#include "sde/sde_hw_catalog.h"
+
+#define SDE_DBG_BASE_MAX 10
+
+#define DEFAULT_PANIC 1
+#define DEFAULT_REGDUMP SDE_DBG_DUMP_IN_MEM
+#define DEFAULT_DBGBUS_SDE SDE_DBG_DUMP_IN_MEM
+#define DEFAULT_DBGBUS_VBIFRT SDE_DBG_DUMP_IN_MEM
+#define DEFAULT_BASE_REG_CNT 0x100
+#define GROUP_BYTES 4
+#define ROW_BYTES 16
+#define RANGE_NAME_LEN 40
+#define REG_BASE_NAME_LEN 80
+
+#define DBGBUS_FLAGS_DSPP BIT(0)
+#define DBGBUS_DSPP_STATUS 0x34C
+
+#define DBGBUS_NAME_SDE "sde"
+#define DBGBUS_NAME_VBIF_RT "vbif_rt"
+
+/* offsets from sde top address for the debug buses */
+#define DBGBUS_SSPP0 0x188
+#define DBGBUS_SSPP1 0x298
+#define DBGBUS_DSPP 0x348
+#define DBGBUS_PERIPH 0x418
+
+#define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
+
+/* following offsets are with respect to MDP VBIF base for DBG BUS access */
+#define MMSS_VBIF_CLKON 0x4
+#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
+#define MMSS_VBIF_TEST_BUS_OUT 0x230
+
+/* print debug ranges in groups of 4 u32s */
+#define REG_DUMP_ALIGN 16
+#define DBG_CTRL_STOP_FTRACE BIT(0)
+#define DBG_CTRL_PANIC_UNDERRUN BIT(1)
+#define DBG_CTRL_MAX BIT(2)
+
+/**
+ * struct sde_dbg_reg_offset - tracking for start and end of region
+ * @start: start offset
+ * @start: end offset
+ */
+struct sde_dbg_reg_offset {
+ u32 start;
+ u32 end;
+};
+
+/**
+ * struct sde_dbg_reg_range - register dumping named sub-range
+ * @head: head of this node
+ * @reg_dump: address for the mem dump
+ * @range_name: name of this range
+ * @offset: offsets for range to dump
+ * @xin_id: client xin id
+ */
+struct sde_dbg_reg_range {
+ struct list_head head;
+ u32 *reg_dump;
+ char range_name[RANGE_NAME_LEN];
+ struct sde_dbg_reg_offset offset;
+ uint32_t xin_id;
+};
+
+/**
+ * struct sde_dbg_reg_base - register region base.
+ * may sub-ranges: sub-ranges are used for dumping
+ * or may not have sub-ranges: dumping is base -> max_offset
+ * @reg_base_head: head of this node
+ * @sub_range_list: head to the list with dump ranges
+ * @name: register base name
+ * @base: base pointer
+ * @off: cached offset of region for manual register dumping
+ * @cnt: cached range of region for manual register dumping
+ * @max_offset: length of region
+ * @buf: buffer used for manual register dumping
+ * @buf_len: buffer length used for manual register dumping
+ * @reg_dump: address for the mem dump if no ranges used
+ */
+struct sde_dbg_reg_base {
+ struct list_head reg_base_head;
+ struct list_head sub_range_list;
+ char name[REG_BASE_NAME_LEN];
+ void __iomem *base;
+ size_t off;
+ size_t cnt;
+ size_t max_offset;
+ char *buf;
+ size_t buf_len;
+ u32 *reg_dump;
+};
+
+struct sde_debug_bus_entry {
+ u32 wr_addr;
+ u32 block_id;
+ u32 test_id;
+};
+
+struct vbif_debug_bus_entry {
+ u32 disable_bus_addr;
+ u32 block_bus_addr;
+ u32 bit_offset;
+ u32 block_cnt;
+ u32 test_pnt_start;
+ u32 test_pnt_cnt;
+};
+
+struct sde_dbg_debug_bus_common {
+ char *name;
+ u32 enable_mask;
+ bool include_in_deferred_work;
+ u32 flags;
+ u32 entries_size;
+ u32 *dumped_content;
+};
+
+struct sde_dbg_sde_debug_bus {
+ struct sde_dbg_debug_bus_common cmn;
+ struct sde_debug_bus_entry *entries;
+ u32 top_blk_off;
+};
+
+struct sde_dbg_vbif_debug_bus {
+ struct sde_dbg_debug_bus_common cmn;
+ struct vbif_debug_bus_entry *entries;
+};
+
+/**
+ * struct sde_dbg_base - global sde debug base structure
+ * @evtlog: event log instance
+ * @reg_base_list: list of register dumping regions
+ * @root: base debugfs root
+ * @dev: device pointer
+ * @mutex: mutex to serialize access to serialze dumps, debugfs access
+ * @power_ctrl: callback structure for enabling power for reading hw registers
+ * @req_dump_blks: list of blocks requested for dumping
+ * @panic_on_err: whether to kernel panic after triggering dump via debugfs
+ * @dump_work: work struct for deferring register dump work to separate thread
+ * @work_panic: panic after dump if internal user passed "panic" special region
+ * @enable_reg_dump: whether to dump registers into memory, kernel log, or both
+ * @dbgbus_sde: debug bus structure for the sde
+ * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
+ * @dump_all: dump all entries in register dump
+ */
+static struct sde_dbg_base {
+ struct sde_dbg_evtlog *evtlog;
+ struct list_head reg_base_list;
+ struct dentry *root;
+ struct device *dev;
+ struct mutex mutex;
+ struct sde_dbg_power_ctrl power_ctrl;
+
+ struct sde_dbg_reg_base *req_dump_blks[SDE_DBG_BASE_MAX];
+
+ u32 panic_on_err;
+ struct work_struct dump_work;
+ bool work_panic;
+ u32 enable_reg_dump;
+
+ struct sde_dbg_sde_debug_bus dbgbus_sde;
+ struct sde_dbg_vbif_debug_bus dbgbus_vbif_rt;
+ bool dump_all;
+ u32 debugfs_ctrl;
+} sde_dbg_base;
+
+/* sde_dbg_base_evtlog - global pointer to main sde event log for macro use */
+struct sde_dbg_evtlog *sde_dbg_base_evtlog;
+
+static struct sde_debug_bus_entry dbg_bus_sde_8998[] = {
+
+ /* Unpack 0 sspp 0*/
+ { DBGBUS_SSPP0, 50, 2 },
+ { DBGBUS_SSPP0, 60, 2 },
+ { DBGBUS_SSPP0, 70, 2 },
+ { DBGBUS_SSPP0, 85, 2 },
+
+ /* Upack 0 sspp 1*/
+ { DBGBUS_SSPP1, 50, 2 },
+ { DBGBUS_SSPP1, 60, 2 },
+ { DBGBUS_SSPP1, 70, 2 },
+ { DBGBUS_SSPP1, 85, 2 },
+
+ /* scheduler */
+ { DBGBUS_DSPP, 130, 0 },
+ { DBGBUS_DSPP, 130, 1 },
+ { DBGBUS_DSPP, 130, 2 },
+ { DBGBUS_DSPP, 130, 3 },
+ { DBGBUS_DSPP, 130, 4 },
+ { DBGBUS_DSPP, 130, 5 },
+
+ /* qseed */
+ { DBGBUS_SSPP0, 6, 0},
+ { DBGBUS_SSPP0, 6, 1},
+ { DBGBUS_SSPP0, 26, 0},
+ { DBGBUS_SSPP0, 26, 1},
+ { DBGBUS_SSPP1, 6, 0},
+ { DBGBUS_SSPP1, 6, 1},
+ { DBGBUS_SSPP1, 26, 0},
+ { DBGBUS_SSPP1, 26, 1},
+
+ /* scale */
+ { DBGBUS_SSPP0, 16, 0},
+ { DBGBUS_SSPP0, 16, 1},
+ { DBGBUS_SSPP0, 36, 0},
+ { DBGBUS_SSPP0, 36, 1},
+ { DBGBUS_SSPP1, 16, 0},
+ { DBGBUS_SSPP1, 16, 1},
+ { DBGBUS_SSPP1, 36, 0},
+ { DBGBUS_SSPP1, 36, 1},
+
+ /* fetch sspp0 */
+
+ /* vig 0 */
+ { DBGBUS_SSPP0, 0, 0 },
+ { DBGBUS_SSPP0, 0, 1 },
+ { DBGBUS_SSPP0, 0, 2 },
+ { DBGBUS_SSPP0, 0, 3 },
+ { DBGBUS_SSPP0, 0, 4 },
+ { DBGBUS_SSPP0, 0, 5 },
+ { DBGBUS_SSPP0, 0, 6 },
+ { DBGBUS_SSPP0, 0, 7 },
+
+ { DBGBUS_SSPP0, 1, 0 },
+ { DBGBUS_SSPP0, 1, 1 },
+ { DBGBUS_SSPP0, 1, 2 },
+ { DBGBUS_SSPP0, 1, 3 },
+ { DBGBUS_SSPP0, 1, 4 },
+ { DBGBUS_SSPP0, 1, 5 },
+ { DBGBUS_SSPP0, 1, 6 },
+ { DBGBUS_SSPP0, 1, 7 },
+
+ { DBGBUS_SSPP0, 2, 0 },
+ { DBGBUS_SSPP0, 2, 1 },
+ { DBGBUS_SSPP0, 2, 2 },
+ { DBGBUS_SSPP0, 2, 3 },
+ { DBGBUS_SSPP0, 2, 4 },
+ { DBGBUS_SSPP0, 2, 5 },
+ { DBGBUS_SSPP0, 2, 6 },
+ { DBGBUS_SSPP0, 2, 7 },
+
+ { DBGBUS_SSPP0, 4, 0 },
+ { DBGBUS_SSPP0, 4, 1 },
+ { DBGBUS_SSPP0, 4, 2 },
+ { DBGBUS_SSPP0, 4, 3 },
+ { DBGBUS_SSPP0, 4, 4 },
+ { DBGBUS_SSPP0, 4, 5 },
+ { DBGBUS_SSPP0, 4, 6 },
+ { DBGBUS_SSPP0, 4, 7 },
+
+ { DBGBUS_SSPP0, 5, 0 },
+ { DBGBUS_SSPP0, 5, 1 },
+ { DBGBUS_SSPP0, 5, 2 },
+ { DBGBUS_SSPP0, 5, 3 },
+ { DBGBUS_SSPP0, 5, 4 },
+ { DBGBUS_SSPP0, 5, 5 },
+ { DBGBUS_SSPP0, 5, 6 },
+ { DBGBUS_SSPP0, 5, 7 },
+
+ /* vig 2 */
+ { DBGBUS_SSPP0, 20, 0 },
+ { DBGBUS_SSPP0, 20, 1 },
+ { DBGBUS_SSPP0, 20, 2 },
+ { DBGBUS_SSPP0, 20, 3 },
+ { DBGBUS_SSPP0, 20, 4 },
+ { DBGBUS_SSPP0, 20, 5 },
+ { DBGBUS_SSPP0, 20, 6 },
+ { DBGBUS_SSPP0, 20, 7 },
+
+ { DBGBUS_SSPP0, 21, 0 },
+ { DBGBUS_SSPP0, 21, 1 },
+ { DBGBUS_SSPP0, 21, 2 },
+ { DBGBUS_SSPP0, 21, 3 },
+ { DBGBUS_SSPP0, 21, 4 },
+ { DBGBUS_SSPP0, 21, 5 },
+ { DBGBUS_SSPP0, 21, 6 },
+ { DBGBUS_SSPP0, 21, 7 },
+
+ { DBGBUS_SSPP0, 22, 0 },
+ { DBGBUS_SSPP0, 22, 1 },
+ { DBGBUS_SSPP0, 22, 2 },
+ { DBGBUS_SSPP0, 22, 3 },
+ { DBGBUS_SSPP0, 22, 4 },
+ { DBGBUS_SSPP0, 22, 5 },
+ { DBGBUS_SSPP0, 22, 6 },
+ { DBGBUS_SSPP0, 22, 7 },
+
+ { DBGBUS_SSPP0, 24, 0 },
+ { DBGBUS_SSPP0, 24, 1 },
+ { DBGBUS_SSPP0, 24, 2 },
+ { DBGBUS_SSPP0, 24, 3 },
+ { DBGBUS_SSPP0, 24, 4 },
+ { DBGBUS_SSPP0, 24, 5 },
+ { DBGBUS_SSPP0, 24, 6 },
+ { DBGBUS_SSPP0, 24, 7 },
+
+ { DBGBUS_SSPP0, 25, 0 },
+ { DBGBUS_SSPP0, 25, 1 },
+ { DBGBUS_SSPP0, 25, 2 },
+ { DBGBUS_SSPP0, 25, 3 },
+ { DBGBUS_SSPP0, 25, 4 },
+ { DBGBUS_SSPP0, 25, 5 },
+ { DBGBUS_SSPP0, 25, 6 },
+ { DBGBUS_SSPP0, 25, 7 },
+
+ /* dma 2 */
+ { DBGBUS_SSPP0, 30, 0 },
+ { DBGBUS_SSPP0, 30, 1 },
+ { DBGBUS_SSPP0, 30, 2 },
+ { DBGBUS_SSPP0, 30, 3 },
+ { DBGBUS_SSPP0, 30, 4 },
+ { DBGBUS_SSPP0, 30, 5 },
+ { DBGBUS_SSPP0, 30, 6 },
+ { DBGBUS_SSPP0, 30, 7 },
+
+ { DBGBUS_SSPP0, 31, 0 },
+ { DBGBUS_SSPP0, 31, 1 },
+ { DBGBUS_SSPP0, 31, 2 },
+ { DBGBUS_SSPP0, 31, 3 },
+ { DBGBUS_SSPP0, 31, 4 },
+ { DBGBUS_SSPP0, 31, 5 },
+ { DBGBUS_SSPP0, 31, 6 },
+ { DBGBUS_SSPP0, 31, 7 },
+
+ { DBGBUS_SSPP0, 32, 0 },
+ { DBGBUS_SSPP0, 32, 1 },
+ { DBGBUS_SSPP0, 32, 2 },
+ { DBGBUS_SSPP0, 32, 3 },
+ { DBGBUS_SSPP0, 32, 4 },
+ { DBGBUS_SSPP0, 32, 5 },
+ { DBGBUS_SSPP0, 32, 6 },
+ { DBGBUS_SSPP0, 32, 7 },
+
+ { DBGBUS_SSPP0, 33, 0 },
+ { DBGBUS_SSPP0, 33, 1 },
+ { DBGBUS_SSPP0, 33, 2 },
+ { DBGBUS_SSPP0, 33, 3 },
+ { DBGBUS_SSPP0, 33, 4 },
+ { DBGBUS_SSPP0, 33, 5 },
+ { DBGBUS_SSPP0, 33, 6 },
+ { DBGBUS_SSPP0, 33, 7 },
+
+ { DBGBUS_SSPP0, 34, 0 },
+ { DBGBUS_SSPP0, 34, 1 },
+ { DBGBUS_SSPP0, 34, 2 },
+ { DBGBUS_SSPP0, 34, 3 },
+ { DBGBUS_SSPP0, 34, 4 },
+ { DBGBUS_SSPP0, 34, 5 },
+ { DBGBUS_SSPP0, 34, 6 },
+ { DBGBUS_SSPP0, 34, 7 },
+
+ { DBGBUS_SSPP0, 35, 0 },
+ { DBGBUS_SSPP0, 35, 1 },
+ { DBGBUS_SSPP0, 35, 2 },
+ { DBGBUS_SSPP0, 35, 3 },
+
+ /* dma 0 */
+ { DBGBUS_SSPP0, 40, 0 },
+ { DBGBUS_SSPP0, 40, 1 },
+ { DBGBUS_SSPP0, 40, 2 },
+ { DBGBUS_SSPP0, 40, 3 },
+ { DBGBUS_SSPP0, 40, 4 },
+ { DBGBUS_SSPP0, 40, 5 },
+ { DBGBUS_SSPP0, 40, 6 },
+ { DBGBUS_SSPP0, 40, 7 },
+
+ { DBGBUS_SSPP0, 41, 0 },
+ { DBGBUS_SSPP0, 41, 1 },
+ { DBGBUS_SSPP0, 41, 2 },
+ { DBGBUS_SSPP0, 41, 3 },
+ { DBGBUS_SSPP0, 41, 4 },
+ { DBGBUS_SSPP0, 41, 5 },
+ { DBGBUS_SSPP0, 41, 6 },
+ { DBGBUS_SSPP0, 41, 7 },
+
+ { DBGBUS_SSPP0, 42, 0 },
+ { DBGBUS_SSPP0, 42, 1 },
+ { DBGBUS_SSPP0, 42, 2 },
+ { DBGBUS_SSPP0, 42, 3 },
+ { DBGBUS_SSPP0, 42, 4 },
+ { DBGBUS_SSPP0, 42, 5 },
+ { DBGBUS_SSPP0, 42, 6 },
+ { DBGBUS_SSPP0, 42, 7 },
+
+ { DBGBUS_SSPP0, 44, 0 },
+ { DBGBUS_SSPP0, 44, 1 },
+ { DBGBUS_SSPP0, 44, 2 },
+ { DBGBUS_SSPP0, 44, 3 },
+ { DBGBUS_SSPP0, 44, 4 },
+ { DBGBUS_SSPP0, 44, 5 },
+ { DBGBUS_SSPP0, 44, 6 },
+ { DBGBUS_SSPP0, 44, 7 },
+
+ { DBGBUS_SSPP0, 45, 0 },
+ { DBGBUS_SSPP0, 45, 1 },
+ { DBGBUS_SSPP0, 45, 2 },
+ { DBGBUS_SSPP0, 45, 3 },
+ { DBGBUS_SSPP0, 45, 4 },
+ { DBGBUS_SSPP0, 45, 5 },
+ { DBGBUS_SSPP0, 45, 6 },
+ { DBGBUS_SSPP0, 45, 7 },
+
+ /* fetch sspp1 */
+ /* vig 1 */
+ { DBGBUS_SSPP1, 0, 0 },
+ { DBGBUS_SSPP1, 0, 1 },
+ { DBGBUS_SSPP1, 0, 2 },
+ { DBGBUS_SSPP1, 0, 3 },
+ { DBGBUS_SSPP1, 0, 4 },
+ { DBGBUS_SSPP1, 0, 5 },
+ { DBGBUS_SSPP1, 0, 6 },
+ { DBGBUS_SSPP1, 0, 7 },
+
+ { DBGBUS_SSPP1, 1, 0 },
+ { DBGBUS_SSPP1, 1, 1 },
+ { DBGBUS_SSPP1, 1, 2 },
+ { DBGBUS_SSPP1, 1, 3 },
+ { DBGBUS_SSPP1, 1, 4 },
+ { DBGBUS_SSPP1, 1, 5 },
+ { DBGBUS_SSPP1, 1, 6 },
+ { DBGBUS_SSPP1, 1, 7 },
+
+ { DBGBUS_SSPP1, 2, 0 },
+ { DBGBUS_SSPP1, 2, 1 },
+ { DBGBUS_SSPP1, 2, 2 },
+ { DBGBUS_SSPP1, 2, 3 },
+ { DBGBUS_SSPP1, 2, 4 },
+ { DBGBUS_SSPP1, 2, 5 },
+ { DBGBUS_SSPP1, 2, 6 },
+ { DBGBUS_SSPP1, 2, 7 },
+
+ { DBGBUS_SSPP1, 4, 0 },
+ { DBGBUS_SSPP1, 4, 1 },
+ { DBGBUS_SSPP1, 4, 2 },
+ { DBGBUS_SSPP1, 4, 3 },
+ { DBGBUS_SSPP1, 4, 4 },
+ { DBGBUS_SSPP1, 4, 5 },
+ { DBGBUS_SSPP1, 4, 6 },
+ { DBGBUS_SSPP1, 4, 7 },
+
+ { DBGBUS_SSPP1, 5, 0 },
+ { DBGBUS_SSPP1, 5, 1 },
+ { DBGBUS_SSPP1, 5, 2 },
+ { DBGBUS_SSPP1, 5, 3 },
+ { DBGBUS_SSPP1, 5, 4 },
+ { DBGBUS_SSPP1, 5, 5 },
+ { DBGBUS_SSPP1, 5, 6 },
+ { DBGBUS_SSPP1, 5, 7 },
+
+ /* vig 3 */
+ { DBGBUS_SSPP1, 20, 0 },
+ { DBGBUS_SSPP1, 20, 1 },
+ { DBGBUS_SSPP1, 20, 2 },
+ { DBGBUS_SSPP1, 20, 3 },
+ { DBGBUS_SSPP1, 20, 4 },
+ { DBGBUS_SSPP1, 20, 5 },
+ { DBGBUS_SSPP1, 20, 6 },
+ { DBGBUS_SSPP1, 20, 7 },
+
+ { DBGBUS_SSPP1, 21, 0 },
+ { DBGBUS_SSPP1, 21, 1 },
+ { DBGBUS_SSPP1, 21, 2 },
+ { DBGBUS_SSPP1, 21, 3 },
+ { DBGBUS_SSPP1, 21, 4 },
+ { DBGBUS_SSPP1, 21, 5 },
+ { DBGBUS_SSPP1, 21, 6 },
+ { DBGBUS_SSPP1, 21, 7 },
+
+ { DBGBUS_SSPP1, 22, 0 },
+ { DBGBUS_SSPP1, 22, 1 },
+ { DBGBUS_SSPP1, 22, 2 },
+ { DBGBUS_SSPP1, 22, 3 },
+ { DBGBUS_SSPP1, 22, 4 },
+ { DBGBUS_SSPP1, 22, 5 },
+ { DBGBUS_SSPP1, 22, 6 },
+ { DBGBUS_SSPP1, 22, 7 },
+
+ { DBGBUS_SSPP1, 24, 0 },
+ { DBGBUS_SSPP1, 24, 1 },
+ { DBGBUS_SSPP1, 24, 2 },
+ { DBGBUS_SSPP1, 24, 3 },
+ { DBGBUS_SSPP1, 24, 4 },
+ { DBGBUS_SSPP1, 24, 5 },
+ { DBGBUS_SSPP1, 24, 6 },
+ { DBGBUS_SSPP1, 24, 7 },
+
+ { DBGBUS_SSPP1, 25, 0 },
+ { DBGBUS_SSPP1, 25, 1 },
+ { DBGBUS_SSPP1, 25, 2 },
+ { DBGBUS_SSPP1, 25, 3 },
+ { DBGBUS_SSPP1, 25, 4 },
+ { DBGBUS_SSPP1, 25, 5 },
+ { DBGBUS_SSPP1, 25, 6 },
+ { DBGBUS_SSPP1, 25, 7 },
+
+ /* dma 3 */
+ { DBGBUS_SSPP1, 30, 0 },
+ { DBGBUS_SSPP1, 30, 1 },
+ { DBGBUS_SSPP1, 30, 2 },
+ { DBGBUS_SSPP1, 30, 3 },
+ { DBGBUS_SSPP1, 30, 4 },
+ { DBGBUS_SSPP1, 30, 5 },
+ { DBGBUS_SSPP1, 30, 6 },
+ { DBGBUS_SSPP1, 30, 7 },
+
+ { DBGBUS_SSPP1, 31, 0 },
+ { DBGBUS_SSPP1, 31, 1 },
+ { DBGBUS_SSPP1, 31, 2 },
+ { DBGBUS_SSPP1, 31, 3 },
+ { DBGBUS_SSPP1, 31, 4 },
+ { DBGBUS_SSPP1, 31, 5 },
+ { DBGBUS_SSPP1, 31, 6 },
+ { DBGBUS_SSPP1, 31, 7 },
+
+ { DBGBUS_SSPP1, 32, 0 },
+ { DBGBUS_SSPP1, 32, 1 },
+ { DBGBUS_SSPP1, 32, 2 },
+ { DBGBUS_SSPP1, 32, 3 },
+ { DBGBUS_SSPP1, 32, 4 },
+ { DBGBUS_SSPP1, 32, 5 },
+ { DBGBUS_SSPP1, 32, 6 },
+ { DBGBUS_SSPP1, 32, 7 },
+
+ { DBGBUS_SSPP1, 33, 0 },
+ { DBGBUS_SSPP1, 33, 1 },
+ { DBGBUS_SSPP1, 33, 2 },
+ { DBGBUS_SSPP1, 33, 3 },
+ { DBGBUS_SSPP1, 33, 4 },
+ { DBGBUS_SSPP1, 33, 5 },
+ { DBGBUS_SSPP1, 33, 6 },
+ { DBGBUS_SSPP1, 33, 7 },
+
+ { DBGBUS_SSPP1, 34, 0 },
+ { DBGBUS_SSPP1, 34, 1 },
+ { DBGBUS_SSPP1, 34, 2 },
+ { DBGBUS_SSPP1, 34, 3 },
+ { DBGBUS_SSPP1, 34, 4 },
+ { DBGBUS_SSPP1, 34, 5 },
+ { DBGBUS_SSPP1, 34, 6 },
+ { DBGBUS_SSPP1, 34, 7 },
+
+ { DBGBUS_SSPP1, 35, 0 },
+ { DBGBUS_SSPP1, 35, 1 },
+ { DBGBUS_SSPP1, 35, 2 },
+
+ /* dma 1 */
+ { DBGBUS_SSPP1, 40, 0 },
+ { DBGBUS_SSPP1, 40, 1 },
+ { DBGBUS_SSPP1, 40, 2 },
+ { DBGBUS_SSPP1, 40, 3 },
+ { DBGBUS_SSPP1, 40, 4 },
+ { DBGBUS_SSPP1, 40, 5 },
+ { DBGBUS_SSPP1, 40, 6 },
+ { DBGBUS_SSPP1, 40, 7 },
+
+ { DBGBUS_SSPP1, 41, 0 },
+ { DBGBUS_SSPP1, 41, 1 },
+ { DBGBUS_SSPP1, 41, 2 },
+ { DBGBUS_SSPP1, 41, 3 },
+ { DBGBUS_SSPP1, 41, 4 },
+ { DBGBUS_SSPP1, 41, 5 },
+ { DBGBUS_SSPP1, 41, 6 },
+ { DBGBUS_SSPP1, 41, 7 },
+
+ { DBGBUS_SSPP1, 42, 0 },
+ { DBGBUS_SSPP1, 42, 1 },
+ { DBGBUS_SSPP1, 42, 2 },
+ { DBGBUS_SSPP1, 42, 3 },
+ { DBGBUS_SSPP1, 42, 4 },
+ { DBGBUS_SSPP1, 42, 5 },
+ { DBGBUS_SSPP1, 42, 6 },
+ { DBGBUS_SSPP1, 42, 7 },
+
+ { DBGBUS_SSPP1, 44, 0 },
+ { DBGBUS_SSPP1, 44, 1 },
+ { DBGBUS_SSPP1, 44, 2 },
+ { DBGBUS_SSPP1, 44, 3 },
+ { DBGBUS_SSPP1, 44, 4 },
+ { DBGBUS_SSPP1, 44, 5 },
+ { DBGBUS_SSPP1, 44, 6 },
+ { DBGBUS_SSPP1, 44, 7 },
+
+ { DBGBUS_SSPP1, 45, 0 },
+ { DBGBUS_SSPP1, 45, 1 },
+ { DBGBUS_SSPP1, 45, 2 },
+ { DBGBUS_SSPP1, 45, 3 },
+ { DBGBUS_SSPP1, 45, 4 },
+ { DBGBUS_SSPP1, 45, 5 },
+ { DBGBUS_SSPP1, 45, 6 },
+ { DBGBUS_SSPP1, 45, 7 },
+
+ /* cursor 1 */
+ { DBGBUS_SSPP1, 80, 0 },
+ { DBGBUS_SSPP1, 80, 1 },
+ { DBGBUS_SSPP1, 80, 2 },
+ { DBGBUS_SSPP1, 80, 3 },
+ { DBGBUS_SSPP1, 80, 4 },
+ { DBGBUS_SSPP1, 80, 5 },
+ { DBGBUS_SSPP1, 80, 6 },
+ { DBGBUS_SSPP1, 80, 7 },
+
+ { DBGBUS_SSPP1, 81, 0 },
+ { DBGBUS_SSPP1, 81, 1 },
+ { DBGBUS_SSPP1, 81, 2 },
+ { DBGBUS_SSPP1, 81, 3 },
+ { DBGBUS_SSPP1, 81, 4 },
+ { DBGBUS_SSPP1, 81, 5 },
+ { DBGBUS_SSPP1, 81, 6 },
+ { DBGBUS_SSPP1, 81, 7 },
+
+ { DBGBUS_SSPP1, 82, 0 },
+ { DBGBUS_SSPP1, 82, 1 },
+ { DBGBUS_SSPP1, 82, 2 },
+ { DBGBUS_SSPP1, 82, 3 },
+ { DBGBUS_SSPP1, 82, 4 },
+ { DBGBUS_SSPP1, 82, 5 },
+ { DBGBUS_SSPP1, 82, 6 },
+ { DBGBUS_SSPP1, 82, 7 },
+
+ { DBGBUS_SSPP1, 83, 0 },
+ { DBGBUS_SSPP1, 83, 1 },
+ { DBGBUS_SSPP1, 83, 2 },
+ { DBGBUS_SSPP1, 83, 3 },
+ { DBGBUS_SSPP1, 83, 4 },
+ { DBGBUS_SSPP1, 83, 5 },
+ { DBGBUS_SSPP1, 83, 6 },
+ { DBGBUS_SSPP1, 83, 7 },
+
+ { DBGBUS_SSPP1, 84, 0 },
+ { DBGBUS_SSPP1, 84, 1 },
+ { DBGBUS_SSPP1, 84, 2 },
+ { DBGBUS_SSPP1, 84, 3 },
+ { DBGBUS_SSPP1, 84, 4 },
+ { DBGBUS_SSPP1, 84, 5 },
+ { DBGBUS_SSPP1, 84, 6 },
+ { DBGBUS_SSPP1, 84, 7 },
+
+ /* dspp */
+ { DBGBUS_DSPP, 13, 0 },
+ { DBGBUS_DSPP, 19, 0 },
+ { DBGBUS_DSPP, 14, 0 },
+ { DBGBUS_DSPP, 14, 1 },
+ { DBGBUS_DSPP, 14, 3 },
+ { DBGBUS_DSPP, 20, 0 },
+ { DBGBUS_DSPP, 20, 1 },
+ { DBGBUS_DSPP, 20, 3 },
+
+ /* ppb_0 */
+ { DBGBUS_DSPP, 31, 0 },
+ { DBGBUS_DSPP, 33, 0 },
+ { DBGBUS_DSPP, 35, 0 },
+ { DBGBUS_DSPP, 42, 0 },
+
+ /* ppb_1 */
+ { DBGBUS_DSPP, 32, 0 },
+ { DBGBUS_DSPP, 34, 0 },
+ { DBGBUS_DSPP, 36, 0 },
+ { DBGBUS_DSPP, 43, 0 },
+
+ /* lm_lut */
+ { DBGBUS_DSPP, 109, 0 },
+ { DBGBUS_DSPP, 105, 0 },
+ { DBGBUS_DSPP, 103, 0 },
+
+ /* tear-check */
+ { DBGBUS_PERIPH, 63, 0 },
+ { DBGBUS_PERIPH, 64, 0 },
+ { DBGBUS_PERIPH, 65, 0 },
+ { DBGBUS_PERIPH, 73, 0 },
+ { DBGBUS_PERIPH, 74, 0 },
+
+ /* crossbar */
+ { DBGBUS_DSPP, 0, 0},
+
+ /* rotator */
+ { DBGBUS_DSPP, 9, 0},
+
+ /* blend */
+ /* LM0 */
+ { DBGBUS_DSPP, 63, 0},
+ { DBGBUS_DSPP, 63, 1},
+ { DBGBUS_DSPP, 63, 2},
+ { DBGBUS_DSPP, 63, 3},
+ { DBGBUS_DSPP, 63, 4},
+ { DBGBUS_DSPP, 63, 5},
+ { DBGBUS_DSPP, 63, 6},
+ { DBGBUS_DSPP, 63, 7},
+
+ { DBGBUS_DSPP, 64, 0},
+ { DBGBUS_DSPP, 64, 1},
+ { DBGBUS_DSPP, 64, 2},
+ { DBGBUS_DSPP, 64, 3},
+ { DBGBUS_DSPP, 64, 4},
+ { DBGBUS_DSPP, 64, 5},
+ { DBGBUS_DSPP, 64, 6},
+ { DBGBUS_DSPP, 64, 7},
+
+ { DBGBUS_DSPP, 65, 0},
+ { DBGBUS_DSPP, 65, 1},
+ { DBGBUS_DSPP, 65, 2},
+ { DBGBUS_DSPP, 65, 3},
+ { DBGBUS_DSPP, 65, 4},
+ { DBGBUS_DSPP, 65, 5},
+ { DBGBUS_DSPP, 65, 6},
+ { DBGBUS_DSPP, 65, 7},
+
+ { DBGBUS_DSPP, 66, 0},
+ { DBGBUS_DSPP, 66, 1},
+ { DBGBUS_DSPP, 66, 2},
+ { DBGBUS_DSPP, 66, 3},
+ { DBGBUS_DSPP, 66, 4},
+ { DBGBUS_DSPP, 66, 5},
+ { DBGBUS_DSPP, 66, 6},
+ { DBGBUS_DSPP, 66, 7},
+
+ { DBGBUS_DSPP, 67, 0},
+ { DBGBUS_DSPP, 67, 1},
+ { DBGBUS_DSPP, 67, 2},
+ { DBGBUS_DSPP, 67, 3},
+ { DBGBUS_DSPP, 67, 4},
+ { DBGBUS_DSPP, 67, 5},
+ { DBGBUS_DSPP, 67, 6},
+ { DBGBUS_DSPP, 67, 7},
+
+ { DBGBUS_DSPP, 68, 0},
+ { DBGBUS_DSPP, 68, 1},
+ { DBGBUS_DSPP, 68, 2},
+ { DBGBUS_DSPP, 68, 3},
+ { DBGBUS_DSPP, 68, 4},
+ { DBGBUS_DSPP, 68, 5},
+ { DBGBUS_DSPP, 68, 6},
+ { DBGBUS_DSPP, 68, 7},
+
+ { DBGBUS_DSPP, 69, 0},
+ { DBGBUS_DSPP, 69, 1},
+ { DBGBUS_DSPP, 69, 2},
+ { DBGBUS_DSPP, 69, 3},
+ { DBGBUS_DSPP, 69, 4},
+ { DBGBUS_DSPP, 69, 5},
+ { DBGBUS_DSPP, 69, 6},
+ { DBGBUS_DSPP, 69, 7},
+
+ /* LM1 */
+ { DBGBUS_DSPP, 70, 0},
+ { DBGBUS_DSPP, 70, 1},
+ { DBGBUS_DSPP, 70, 2},
+ { DBGBUS_DSPP, 70, 3},
+ { DBGBUS_DSPP, 70, 4},
+ { DBGBUS_DSPP, 70, 5},
+ { DBGBUS_DSPP, 70, 6},
+ { DBGBUS_DSPP, 70, 7},
+
+ { DBGBUS_DSPP, 71, 0},
+ { DBGBUS_DSPP, 71, 1},
+ { DBGBUS_DSPP, 71, 2},
+ { DBGBUS_DSPP, 71, 3},
+ { DBGBUS_DSPP, 71, 4},
+ { DBGBUS_DSPP, 71, 5},
+ { DBGBUS_DSPP, 71, 6},
+ { DBGBUS_DSPP, 71, 7},
+
+ { DBGBUS_DSPP, 72, 0},
+ { DBGBUS_DSPP, 72, 1},
+ { DBGBUS_DSPP, 72, 2},
+ { DBGBUS_DSPP, 72, 3},
+ { DBGBUS_DSPP, 72, 4},
+ { DBGBUS_DSPP, 72, 5},
+ { DBGBUS_DSPP, 72, 6},
+ { DBGBUS_DSPP, 72, 7},
+
+ { DBGBUS_DSPP, 73, 0},
+ { DBGBUS_DSPP, 73, 1},
+ { DBGBUS_DSPP, 73, 2},
+ { DBGBUS_DSPP, 73, 3},
+ { DBGBUS_DSPP, 73, 4},
+ { DBGBUS_DSPP, 73, 5},
+ { DBGBUS_DSPP, 73, 6},
+ { DBGBUS_DSPP, 73, 7},
+
+ { DBGBUS_DSPP, 74, 0},
+ { DBGBUS_DSPP, 74, 1},
+ { DBGBUS_DSPP, 74, 2},
+ { DBGBUS_DSPP, 74, 3},
+ { DBGBUS_DSPP, 74, 4},
+ { DBGBUS_DSPP, 74, 5},
+ { DBGBUS_DSPP, 74, 6},
+ { DBGBUS_DSPP, 74, 7},
+
+ { DBGBUS_DSPP, 75, 0},
+ { DBGBUS_DSPP, 75, 1},
+ { DBGBUS_DSPP, 75, 2},
+ { DBGBUS_DSPP, 75, 3},
+ { DBGBUS_DSPP, 75, 4},
+ { DBGBUS_DSPP, 75, 5},
+ { DBGBUS_DSPP, 75, 6},
+ { DBGBUS_DSPP, 75, 7},
+
+ { DBGBUS_DSPP, 76, 0},
+ { DBGBUS_DSPP, 76, 1},
+ { DBGBUS_DSPP, 76, 2},
+ { DBGBUS_DSPP, 76, 3},
+ { DBGBUS_DSPP, 76, 4},
+ { DBGBUS_DSPP, 76, 5},
+ { DBGBUS_DSPP, 76, 6},
+ { DBGBUS_DSPP, 76, 7},
+
+ /* LM2 */
+ { DBGBUS_DSPP, 77, 0},
+ { DBGBUS_DSPP, 77, 1},
+ { DBGBUS_DSPP, 77, 2},
+ { DBGBUS_DSPP, 77, 3},
+ { DBGBUS_DSPP, 77, 4},
+ { DBGBUS_DSPP, 77, 5},
+ { DBGBUS_DSPP, 77, 6},
+ { DBGBUS_DSPP, 77, 7},
+
+ { DBGBUS_DSPP, 78, 0},
+ { DBGBUS_DSPP, 78, 1},
+ { DBGBUS_DSPP, 78, 2},
+ { DBGBUS_DSPP, 78, 3},
+ { DBGBUS_DSPP, 78, 4},
+ { DBGBUS_DSPP, 78, 5},
+ { DBGBUS_DSPP, 78, 6},
+ { DBGBUS_DSPP, 78, 7},
+
+ { DBGBUS_DSPP, 79, 0},
+ { DBGBUS_DSPP, 79, 1},
+ { DBGBUS_DSPP, 79, 2},
+ { DBGBUS_DSPP, 79, 3},
+ { DBGBUS_DSPP, 79, 4},
+ { DBGBUS_DSPP, 79, 5},
+ { DBGBUS_DSPP, 79, 6},
+ { DBGBUS_DSPP, 79, 7},
+
+ { DBGBUS_DSPP, 80, 0},
+ { DBGBUS_DSPP, 80, 1},
+ { DBGBUS_DSPP, 80, 2},
+ { DBGBUS_DSPP, 80, 3},
+ { DBGBUS_DSPP, 80, 4},
+ { DBGBUS_DSPP, 80, 5},
+ { DBGBUS_DSPP, 80, 6},
+ { DBGBUS_DSPP, 80, 7},
+
+ { DBGBUS_DSPP, 81, 0},
+ { DBGBUS_DSPP, 81, 1},
+ { DBGBUS_DSPP, 81, 2},
+ { DBGBUS_DSPP, 81, 3},
+ { DBGBUS_DSPP, 81, 4},
+ { DBGBUS_DSPP, 81, 5},
+ { DBGBUS_DSPP, 81, 6},
+ { DBGBUS_DSPP, 81, 7},
+
+ { DBGBUS_DSPP, 82, 0},
+ { DBGBUS_DSPP, 82, 1},
+ { DBGBUS_DSPP, 82, 2},
+ { DBGBUS_DSPP, 82, 3},
+ { DBGBUS_DSPP, 82, 4},
+ { DBGBUS_DSPP, 82, 5},
+ { DBGBUS_DSPP, 82, 6},
+ { DBGBUS_DSPP, 82, 7},
+
+ { DBGBUS_DSPP, 83, 0},
+ { DBGBUS_DSPP, 83, 1},
+ { DBGBUS_DSPP, 83, 2},
+ { DBGBUS_DSPP, 83, 3},
+ { DBGBUS_DSPP, 83, 4},
+ { DBGBUS_DSPP, 83, 5},
+ { DBGBUS_DSPP, 83, 6},
+ { DBGBUS_DSPP, 83, 7},
+
+ /* csc */
+ { DBGBUS_SSPP0, 7, 0},
+ { DBGBUS_SSPP0, 7, 1},
+ { DBGBUS_SSPP0, 27, 0},
+ { DBGBUS_SSPP0, 27, 1},
+ { DBGBUS_SSPP1, 7, 0},
+ { DBGBUS_SSPP1, 7, 1},
+ { DBGBUS_SSPP1, 27, 0},
+ { DBGBUS_SSPP1, 27, 1},
+
+ /* pcc */
+ { DBGBUS_SSPP0, 3, 3},
+ { DBGBUS_SSPP0, 23, 3},
+ { DBGBUS_SSPP0, 33, 3},
+ { DBGBUS_SSPP0, 43, 3},
+ { DBGBUS_SSPP1, 3, 3},
+ { DBGBUS_SSPP1, 23, 3},
+ { DBGBUS_SSPP1, 33, 3},
+ { DBGBUS_SSPP1, 43, 3},
+
+ /* spa */
+ { DBGBUS_SSPP0, 8, 0},
+ { DBGBUS_SSPP0, 28, 0},
+ { DBGBUS_SSPP1, 8, 0},
+ { DBGBUS_SSPP1, 28, 0},
+ { DBGBUS_DSPP, 13, 0},
+ { DBGBUS_DSPP, 19, 0},
+
+ /* igc */
+ { DBGBUS_SSPP0, 9, 0},
+ { DBGBUS_SSPP0, 9, 1},
+ { DBGBUS_SSPP0, 9, 3},
+ { DBGBUS_SSPP0, 29, 0},
+ { DBGBUS_SSPP0, 29, 1},
+ { DBGBUS_SSPP0, 29, 3},
+ { DBGBUS_SSPP0, 17, 0},
+ { DBGBUS_SSPP0, 17, 1},
+ { DBGBUS_SSPP0, 17, 3},
+ { DBGBUS_SSPP0, 37, 0},
+ { DBGBUS_SSPP0, 37, 1},
+ { DBGBUS_SSPP0, 37, 3},
+ { DBGBUS_SSPP0, 46, 0},
+ { DBGBUS_SSPP0, 46, 1},
+ { DBGBUS_SSPP0, 46, 3},
+
+ { DBGBUS_SSPP1, 9, 0},
+ { DBGBUS_SSPP1, 9, 1},
+ { DBGBUS_SSPP1, 9, 3},
+ { DBGBUS_SSPP1, 29, 0},
+ { DBGBUS_SSPP1, 29, 1},
+ { DBGBUS_SSPP1, 29, 3},
+ { DBGBUS_SSPP1, 17, 0},
+ { DBGBUS_SSPP1, 17, 1},
+ { DBGBUS_SSPP1, 17, 3},
+ { DBGBUS_SSPP1, 37, 0},
+ { DBGBUS_SSPP1, 37, 1},
+ { DBGBUS_SSPP1, 37, 3},
+ { DBGBUS_SSPP1, 46, 0},
+ { DBGBUS_SSPP1, 46, 1},
+ { DBGBUS_SSPP1, 46, 3},
+
+ { DBGBUS_DSPP, 14, 0},
+ { DBGBUS_DSPP, 14, 1},
+ { DBGBUS_DSPP, 14, 3},
+ { DBGBUS_DSPP, 20, 0},
+ { DBGBUS_DSPP, 20, 1},
+ { DBGBUS_DSPP, 20, 3},
+
+ { DBGBUS_PERIPH, 60, 0},
+};
+
+static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
+ {0x214, 0x21c, 16, 2, 0x0, 0xd}, /* arb clients */
+ {0x214, 0x21c, 16, 2, 0x80, 0xc0}, /* arb clients */
+ {0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
+ {0x214, 0x21c, 0, 16, 0x0, 0xf}, /* xin blocks - axi side */
+ {0x214, 0x21c, 0, 16, 0x80, 0xa4}, /* xin blocks - axi side */
+ {0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
+ {0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
+};
+
+/**
+ * _sde_dbg_enable_power - use callback to turn power on for hw register access
+ * @enable: whether to turn power on or off
+ */
+static inline void _sde_dbg_enable_power(int enable)
+{
+ if (!sde_dbg_base.power_ctrl.enable_fn)
+ return;
+ sde_dbg_base.power_ctrl.enable_fn(
+ sde_dbg_base.power_ctrl.handle,
+ sde_dbg_base.power_ctrl.client,
+ enable);
+}
+
+/**
+ * _sde_dump_reg - helper function for dumping rotator register set content
+ * @dump_name: register set name
+ * @reg_dump_flag: dumping flag controlling in-log/memory dump location
+ * @base_addr: starting address of io region for calculating offsets to print
+ * @addr: starting address offset for dumping
+ * @len_bytes: range of the register set
+ * @dump_mem: output buffer for memory dump location option
+ * @from_isr: whether being called from isr context
+ */
+static void _sde_dump_reg(const char *dump_name, u32 reg_dump_flag,
+ char __iomem *base_addr, char __iomem *addr, size_t len_bytes,
+ u32 **dump_mem, bool from_isr)
+{
+ u32 in_log, in_mem, len_align, len_padded;
+ u32 *dump_addr = NULL;
+ char __iomem *end_addr;
+ int i;
+
+ if (!len_bytes)
+ return;
+
+ in_log = (reg_dump_flag & SDE_DBG_DUMP_IN_LOG);
+ in_mem = (reg_dump_flag & SDE_DBG_DUMP_IN_MEM);
+
+ pr_debug("%s: reg_dump_flag=%d in_log=%d in_mem=%d\n",
+ dump_name, reg_dump_flag, in_log, in_mem);
+
+ if (!in_log && !in_mem)
+ return;
+
+ if (in_log)
+ dev_info(sde_dbg_base.dev, "%s: start_offset 0x%lx len 0x%zx\n",
+ dump_name, addr - base_addr, len_bytes);
+
+ len_align = (len_bytes + REG_DUMP_ALIGN - 1) / REG_DUMP_ALIGN;
+ len_padded = len_align * REG_DUMP_ALIGN;
+ end_addr = addr + len_bytes;
+
+ if (in_mem) {
+ if (dump_mem && !(*dump_mem)) {
+ phys_addr_t phys = 0;
+ *dump_mem = dma_alloc_coherent(sde_dbg_base.dev,
+ len_padded, &phys, GFP_KERNEL);
+ }
+
+ if (dump_mem && *dump_mem) {
+ dump_addr = *dump_mem;
+ dev_info(sde_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x reg_offset=0x%lx\n",
+ dump_name, dump_addr, len_padded,
+ addr - base_addr);
+ } else {
+ in_mem = 0;
+ pr_err("dump_mem: kzalloc fails!\n");
+ }
+ }
+
+ if (!from_isr)
+ _sde_dbg_enable_power(true);
+
+ for (i = 0; i < len_align; i++) {
+ u32 x0, x4, x8, xc;
+
+ x0 = (addr < end_addr) ? readl_relaxed(addr + 0x0) : 0;
+ x4 = (addr + 0x4 < end_addr) ? readl_relaxed(addr + 0x4) : 0;
+ x8 = (addr + 0x8 < end_addr) ? readl_relaxed(addr + 0x8) : 0;
+ xc = (addr + 0xc < end_addr) ? readl_relaxed(addr + 0xc) : 0;
+
+ if (in_log)
+ dev_info(sde_dbg_base.dev,
+ "0x%lx : %08x %08x %08x %08x\n",
+ addr - base_addr, x0, x4, x8, xc);
+
+ if (dump_addr) {
+ dump_addr[i * 4] = x0;
+ dump_addr[i * 4 + 1] = x4;
+ dump_addr[i * 4 + 2] = x8;
+ dump_addr[i * 4 + 3] = xc;
+ }
+
+ addr += REG_DUMP_ALIGN;
+ }
+
+ if (!from_isr)
+ _sde_dbg_enable_power(false);
+}
+
+/**
+ * _sde_dbg_get_dump_range - helper to retrieve dump length for a range node
+ * @range_node: range node to dump
+ * @max_offset: max offset of the register base
+ * @Return: length
+ */
+static u32 _sde_dbg_get_dump_range(struct sde_dbg_reg_offset *range_node,
+ size_t max_offset)
+{
+ u32 length = 0;
+
+ if ((range_node->start > range_node->end) ||
+ (range_node->end > max_offset) || (range_node->start == 0
+ && range_node->end == 0)) {
+ length = max_offset;
+ } else {
+ length = range_node->end - range_node->start;
+ }
+
+ return length;
+}
+
+static int _sde_dump_reg_range_cmp(void *priv, struct list_head *a,
+ struct list_head *b)
+{
+ struct sde_dbg_reg_range *ar, *br;
+
+ if (!a || !b)
+ return 0;
+
+ ar = container_of(a, struct sde_dbg_reg_range, head);
+ br = container_of(b, struct sde_dbg_reg_range, head);
+
+ return ar->offset.start - br->offset.start;
+}
+
+/**
+ * _sde_dump_reg_by_ranges - dump ranges or full range of the register blk base
+ * @dbg: register blk base structure
+ * @reg_dump_flag: dump target, memory, kernel log, or both
+ */
+static void _sde_dump_reg_by_ranges(struct sde_dbg_reg_base *dbg,
+ u32 reg_dump_flag)
+{
+ char __iomem *addr;
+ size_t len;
+ struct sde_dbg_reg_range *range_node;
+
+ if (!dbg || !dbg->base) {
+ pr_err("dbg base is null!\n");
+ return;
+ }
+
+ dev_info(sde_dbg_base.dev, "%s:=========%s DUMP=========\n", __func__,
+ dbg->name);
+
+ /* If there is a list to dump the registers by ranges, use the ranges */
+ if (!list_empty(&dbg->sub_range_list)) {
+ /* sort the list by start address first */
+ list_sort(NULL, &dbg->sub_range_list, _sde_dump_reg_range_cmp);
+ list_for_each_entry(range_node, &dbg->sub_range_list, head) {
+ len = _sde_dbg_get_dump_range(&range_node->offset,
+ dbg->max_offset);
+ addr = dbg->base + range_node->offset.start;
+ pr_debug("%s: range_base=0x%pK start=0x%x end=0x%x\n",
+ range_node->range_name,
+ addr, range_node->offset.start,
+ range_node->offset.end);
+
+ _sde_dump_reg(range_node->range_name, reg_dump_flag,
+ dbg->base, addr, len,
+ &range_node->reg_dump, false);
+ }
+ } else {
+ /* If there is no list to dump ranges, dump all registers */
+ dev_info(sde_dbg_base.dev,
+ "Ranges not found, will dump full registers\n");
+ dev_info(sde_dbg_base.dev, "base:0x%pK len:0x%zx\n", dbg->base,
+ dbg->max_offset);
+ addr = dbg->base;
+ len = dbg->max_offset;
+ _sde_dump_reg(dbg->name, reg_dump_flag, dbg->base, addr, len,
+ &dbg->reg_dump, false);
+ }
+}
+
+/**
+ * _sde_dump_reg_by_blk - dump a named register base region
+ * @blk_name: register blk name
+ */
+static void _sde_dump_reg_by_blk(const char *blk_name)
+{
+ struct sde_dbg_base *dbg_base = &sde_dbg_base;
+ struct sde_dbg_reg_base *blk_base;
+
+ if (!dbg_base)
+ return;
+
+ list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head) {
+ if (strlen(blk_base->name) &&
+ !strcmp(blk_base->name, blk_name)) {
+ _sde_dump_reg_by_ranges(blk_base,
+ dbg_base->enable_reg_dump);
+ break;
+ }
+ }
+}
+
+/**
+ * _sde_dump_reg_all - dump all register regions
+ */
+static void _sde_dump_reg_all(void)
+{
+ struct sde_dbg_base *dbg_base = &sde_dbg_base;
+ struct sde_dbg_reg_base *blk_base;
+
+ if (!dbg_base)
+ return;
+
+ list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head)
+ if (strlen(blk_base->name))
+ _sde_dump_reg_by_blk(blk_base->name);
+}
+
+/**
+ * _sde_dump_get_blk_addr - retrieve register block address by name
+ * @blk_name: register blk name
+ * @Return: register blk base, or NULL
+ */
+static struct sde_dbg_reg_base *_sde_dump_get_blk_addr(const char *blk_name)
+{
+ struct sde_dbg_base *dbg_base = &sde_dbg_base;
+ struct sde_dbg_reg_base *blk_base;
+
+ list_for_each_entry(blk_base, &dbg_base->reg_base_list, reg_base_head)
+ if (strlen(blk_base->name) && !strcmp(blk_base->name, blk_name))
+ return blk_base;
+
+ return NULL;
+}
+
+static void _sde_dbg_dump_sde_dbg_bus(struct sde_dbg_sde_debug_bus *bus)
+{
+ bool in_log, in_mem;
+ u32 **dump_mem = NULL;
+ u32 *dump_addr = NULL;
+ u32 status = 0;
+ struct sde_debug_bus_entry *head;
+ phys_addr_t phys = 0;
+ int list_size;
+ int i;
+ u32 offset;
+ void __iomem *mem_base = NULL;
+ struct sde_dbg_reg_base *reg_base;
+
+ if (!bus || !bus->cmn.entries_size)
+ return;
+
+ list_for_each_entry(reg_base, &sde_dbg_base.reg_base_list,
+ reg_base_head)
+ if (strlen(reg_base->name) &&
+ !strcmp(reg_base->name, bus->cmn.name))
+ mem_base = reg_base->base + bus->top_blk_off;
+
+ if (!mem_base) {
+ pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+ return;
+ }
+
+ dump_mem = &bus->cmn.dumped_content;
+
+ /* will keep in memory 4 entries of 4 bytes each */
+ list_size = (bus->cmn.entries_size * 4 * 4);
+
+ in_log = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_LOG);
+ in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM);
+
+ if (!in_log && !in_mem)
+ return;
+
+ dev_info(sde_dbg_base.dev, "======== start %s dump =========\n",
+ bus->cmn.name);
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(sde_dbg_base.dev,
+ list_size, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ dev_info(sde_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x\n",
+ __func__, dump_addr, list_size);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ _sde_dbg_enable_power(true);
+ for (i = 0; i < bus->cmn.entries_size; i++) {
+ head = bus->entries + i;
+ writel_relaxed(TEST_MASK(head->block_id, head->test_id),
+ mem_base + head->wr_addr);
+ wmb(); /* make sure test bits were written */
+
+ if (bus->cmn.flags & DBGBUS_FLAGS_DSPP)
+ offset = DBGBUS_DSPP_STATUS;
+ else
+ offset = head->wr_addr + 0x4;
+
+ status = readl_relaxed(mem_base + offset);
+
+ if (in_log)
+ dev_info(sde_dbg_base.dev,
+ "waddr=0x%x blk=%d tst=%d val=0x%x\n",
+ head->wr_addr, head->block_id,
+ head->test_id, status);
+
+ if (dump_addr && in_mem) {
+ dump_addr[i*4] = head->wr_addr;
+ dump_addr[i*4 + 1] = head->block_id;
+ dump_addr[i*4 + 2] = head->test_id;
+ dump_addr[i*4 + 3] = status;
+ }
+
+ /* Disable debug bus once we are done */
+ writel_relaxed(0, mem_base + head->wr_addr);
+
+ }
+ _sde_dbg_enable_power(false);
+
+ dev_info(sde_dbg_base.dev, "======== end %s dump =========\n",
+ bus->cmn.name);
+}
+
+static void _sde_dbg_dump_vbif_debug_bus_entry(
+ struct vbif_debug_bus_entry *head, void __iomem *mem_base,
+ u32 *dump_addr, bool in_log)
+{
+ int i, j;
+ u32 val;
+
+ if (!dump_addr && !in_log)
+ return;
+
+ for (i = 0; i < head->block_cnt; i++) {
+ writel_relaxed(1 << (i + head->bit_offset),
+ mem_base + head->block_bus_addr);
+ /* make sure that current bus blcok enable */
+ wmb();
+ for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
+ writel_relaxed(j, mem_base + head->block_bus_addr + 4);
+ /* make sure that test point is enabled */
+ wmb();
+ val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
+ if (dump_addr) {
+ *dump_addr++ = head->block_bus_addr;
+ *dump_addr++ = i;
+ *dump_addr++ = j;
+ *dump_addr++ = val;
+ }
+ if (in_log)
+ dev_info(sde_dbg_base.dev,
+ "testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+ head->block_bus_addr, i, j, val);
+ }
+ }
+}
+
+static void _sde_dbg_dump_vbif_dbg_bus(struct sde_dbg_vbif_debug_bus *bus)
+{
+ bool in_log, in_mem;
+ u32 **dump_mem = NULL;
+ u32 *dump_addr = NULL;
+ u32 value;
+ struct vbif_debug_bus_entry *head;
+ phys_addr_t phys = 0;
+ int i, list_size = 0;
+ void __iomem *mem_base = NULL;
+ struct vbif_debug_bus_entry *dbg_bus;
+ u32 bus_size;
+ struct sde_dbg_reg_base *reg_base;
+
+ if (!bus || !bus->cmn.entries_size)
+ return;
+
+ list_for_each_entry(reg_base, &sde_dbg_base.reg_base_list,
+ reg_base_head)
+ if (strlen(reg_base->name) &&
+ !strcmp(reg_base->name, bus->cmn.name))
+ mem_base = reg_base->base;
+
+ if (!mem_base) {
+ pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+ return;
+ }
+
+ dbg_bus = bus->entries;
+ bus_size = bus->cmn.entries_size;
+ list_size = bus->cmn.entries_size;
+ dump_mem = &bus->cmn.dumped_content;
+
+ dev_info(sde_dbg_base.dev, "======== start %s dump =========\n",
+ bus->cmn.name);
+
+ if (!dump_mem || !dbg_bus || !bus_size || !list_size)
+ return;
+
+ /* allocate memory for each test point */
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+ list_size += (head->block_cnt * head->test_pnt_cnt);
+ }
+
+ /* 4 bytes * 4 entries for each test point*/
+ list_size *= 16;
+
+ in_log = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_LOG);
+ in_mem = (bus->cmn.enable_mask & SDE_DBG_DUMP_IN_MEM);
+
+ if (!in_log && !in_mem)
+ return;
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(sde_dbg_base.dev,
+ list_size, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ dev_info(sde_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x\n",
+ __func__, dump_addr, list_size);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ _sde_dbg_enable_power(true);
+
+ value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
+ writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
+
+ /* make sure that vbif core is on */
+ wmb();
+
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+
+ writel_relaxed(0, mem_base + head->disable_bus_addr);
+ writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
+ /* make sure that other bus is off */
+ wmb();
+
+ _sde_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
+ in_log);
+ if (dump_addr)
+ dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
+ }
+
+ _sde_dbg_enable_power(false);
+
+ dev_info(sde_dbg_base.dev, "======== end %s dump =========\n",
+ bus->cmn.name);
+}
+
+/**
+ * _sde_dump_array - dump array of register bases
+ * @blk_arr: array of register base pointers
+ * @len: length of blk_arr
+ * @do_panic: whether to trigger a panic after dumping
+ * @name: string indicating origin of dump
+ * @dump_dbgbus_sde: whether to dump the sde debug bus
+ * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
+ */
+static void _sde_dump_array(struct sde_dbg_reg_base *blk_arr[],
+ u32 len, bool do_panic, const char *name, bool dump_dbgbus_sde,
+ bool dump_dbgbus_vbif_rt, bool dump_all)
+{
+ int i;
+
+ mutex_lock(&sde_dbg_base.mutex);
+
+ for (i = 0; i < len; i++) {
+ if (blk_arr[i] != NULL)
+ _sde_dump_reg_by_ranges(blk_arr[i],
+ sde_dbg_base.enable_reg_dump);
+ }
+
+ if (dump_all)
+ sde_evtlog_dump_all(sde_dbg_base.evtlog);
+
+ if (dump_dbgbus_sde)
+ _sde_dbg_dump_sde_dbg_bus(&sde_dbg_base.dbgbus_sde);
+
+ if (dump_dbgbus_vbif_rt)
+ _sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt);
+
+ if (do_panic && sde_dbg_base.panic_on_err)
+ panic(name);
+
+ mutex_unlock(&sde_dbg_base.mutex);
+}
+
+/**
+ * _sde_dump_work - deferred dump work function
+ * @work: work structure
+ */
+static void _sde_dump_work(struct work_struct *work)
+{
+ _sde_dump_array(sde_dbg_base.req_dump_blks,
+ ARRAY_SIZE(sde_dbg_base.req_dump_blks),
+ sde_dbg_base.work_panic, "evtlog_workitem",
+ sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work,
+ sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work,
+ sde_dbg_base.dump_all);
+}
+
+void sde_dbg_dump(bool queue_work, const char *name, ...)
+{
+ int i, index = 0;
+ bool do_panic = false;
+ bool dump_dbgbus_sde = false;
+ bool dump_dbgbus_vbif_rt = false;
+ bool dump_all = false;
+ va_list args;
+ char *blk_name = NULL;
+ struct sde_dbg_reg_base *blk_base = NULL;
+ struct sde_dbg_reg_base **blk_arr;
+ u32 blk_len;
+
+ if (!sde_evtlog_is_enabled(sde_dbg_base.evtlog, SDE_EVTLOG_DEFAULT))
+ return;
+
+ if (queue_work && work_pending(&sde_dbg_base.dump_work))
+ return;
+
+ blk_arr = &sde_dbg_base.req_dump_blks[0];
+ blk_len = ARRAY_SIZE(sde_dbg_base.req_dump_blks);
+
+ memset(sde_dbg_base.req_dump_blks, 0,
+ sizeof(sde_dbg_base.req_dump_blks));
+ sde_dbg_base.dump_all = false;
+
+ va_start(args, name);
+ i = 0;
+ while ((blk_name = va_arg(args, char*))) {
+ if (i++ >= SDE_EVTLOG_MAX_DATA) {
+ pr_err("could not parse all dump arguments\n");
+ break;
+ }
+ if (IS_ERR_OR_NULL(blk_name))
+ break;
+
+ blk_base = _sde_dump_get_blk_addr(blk_name);
+ if (blk_base) {
+ if (index < blk_len) {
+ blk_arr[index] = blk_base;
+ index++;
+ } else {
+ pr_err("insufficient space to to dump %s\n",
+ blk_name);
+ }
+ }
+ if (!strcmp(blk_name, "all"))
+ dump_all = true;
+
+ if (!strcmp(blk_name, "dbg_bus"))
+ dump_dbgbus_sde = true;
+
+ if (!strcmp(blk_name, "vbif_dbg_bus"))
+ dump_dbgbus_vbif_rt = true;
+
+ if (!strcmp(blk_name, "panic"))
+ do_panic = true;
+ }
+ va_end(args);
+
+ if (queue_work) {
+ /* schedule work to dump later */
+ sde_dbg_base.work_panic = do_panic;
+ sde_dbg_base.dbgbus_sde.cmn.include_in_deferred_work =
+ dump_dbgbus_sde;
+ sde_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
+ dump_dbgbus_vbif_rt;
+ sde_dbg_base.dump_all = dump_all;
+ schedule_work(&sde_dbg_base.dump_work);
+ } else {
+ _sde_dump_array(blk_arr, blk_len, do_panic, name,
+ dump_dbgbus_sde, dump_dbgbus_vbif_rt, dump_all);
+ }
+}
+
+void sde_dbg_ctrl(const char *name, ...)
+{
+ int i = 0;
+ va_list args;
+ char *blk_name = NULL;
+
+
+ /* no debugfs controlled events are enabled, just return */
+ if (!sde_dbg_base.debugfs_ctrl)
+ return;
+
+ va_start(args, name);
+
+ while ((blk_name = va_arg(args, char*))) {
+ if (i++ >= SDE_EVTLOG_MAX_DATA) {
+ pr_err("could not parse all dbg arguments\n");
+ break;
+ }
+
+ if (IS_ERR_OR_NULL(blk_name))
+ break;
+
+ if (!strcmp(blk_name, "stop_ftrace") &&
+ sde_dbg_base.debugfs_ctrl &
+ DBG_CTRL_STOP_FTRACE) {
+ pr_debug("tracing off\n");
+ tracing_off();
+ }
+
+ if (!strcmp(blk_name, "panic_underrun") &&
+ sde_dbg_base.debugfs_ctrl &
+ DBG_CTRL_PANIC_UNDERRUN) {
+ pr_debug("panic underrun\n");
+ panic("underrun");
+ }
+ }
+
+}
+
+/*
+ * sde_dbg_debugfs_open - debugfs open handler for evtlog dump
+ * @inode: debugfs inode
+ * @file: file handle
+ */
+static int sde_dbg_debugfs_open(struct inode *inode, struct file *file)
+{
+ if (!inode || !file)
+ return -EINVAL;
+
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+/**
+ * sde_evtlog_dump_read - debugfs read handler for evtlog dump
+ * @file: file handler
+ * @buff: user buffer content for debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_evtlog_dump_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ ssize_t len = 0;
+ char evtlog_buf[SDE_EVTLOG_BUF_MAX];
+
+ if (!buff || !ppos)
+ return -EINVAL;
+
+ len = sde_evtlog_dump_to_buffer(sde_dbg_base.evtlog, evtlog_buf,
+ SDE_EVTLOG_BUF_MAX, true);
+ if (len < 0 || len > count) {
+ pr_err("len is more than user buffer size");
+ return 0;
+ }
+
+ if (copy_to_user(buff, evtlog_buf, len))
+ return -EFAULT;
+ *ppos += len;
+
+ return len;
+}
+
+/**
+ * sde_evtlog_dump_write - debugfs write handler for evtlog dump
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_evtlog_dump_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ _sde_dump_reg_all();
+
+ sde_evtlog_dump_all(sde_dbg_base.evtlog);
+
+ _sde_dbg_dump_sde_dbg_bus(&sde_dbg_base.dbgbus_sde);
+ _sde_dbg_dump_vbif_dbg_bus(&sde_dbg_base.dbgbus_vbif_rt);
+
+ if (sde_dbg_base.panic_on_err)
+ panic("sde");
+
+ return count;
+}
+
+static const struct file_operations sde_evtlog_fops = {
+ .open = sde_dbg_debugfs_open,
+ .read = sde_evtlog_dump_read,
+ .write = sde_evtlog_dump_write,
+};
+
+/**
+ * sde_dbg_ctrl_read - debugfs read handler for debug ctrl read
+ * @file: file handler
+ * @buff: user buffer content for debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_ctrl_read(struct file *file, char __user *buff,
+ size_t count, loff_t *ppos)
+{
+ ssize_t len = 0;
+ char buf[24] = {'\0'};
+
+ if (!buff || !ppos)
+ return -EINVAL;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "0x%x\n", sde_dbg_base.debugfs_ctrl);
+ pr_debug("%s: ctrl:0x%x len:0x%zx\n",
+ __func__, sde_dbg_base.debugfs_ctrl, len);
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) {
+ pr_err("error copying the buffer! count:0x%zx\n", count);
+ return -EFAULT;
+ }
+
+ *ppos += len; /* increase offset */
+ return len;
+}
+
+/**
+ * sde_dbg_ctrl_write - debugfs read handler for debug ctrl write
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_ctrl_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ u32 dbg_ctrl = 0;
+ char buf[24];
+
+ if (!file) {
+ pr_err("DbgDbg: %s: error no file --\n", __func__);
+ return -EINVAL;
+ }
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtouint(buf, 0, &dbg_ctrl)) {
+ pr_err("%s: error in the number of bytes\n", __func__);
+ return -EFAULT;
+ }
+
+ pr_debug("dbg_ctrl_read:0x%x\n", dbg_ctrl);
+ sde_dbg_base.debugfs_ctrl = dbg_ctrl;
+
+ return count;
+}
+
+static const struct file_operations sde_dbg_ctrl_fops = {
+ .open = sde_dbg_debugfs_open,
+ .read = sde_dbg_ctrl_read,
+ .write = sde_dbg_ctrl_write,
+};
+
+void sde_dbg_init_dbg_buses(u32 hwversion)
+{
+ static struct sde_dbg_base *dbg = &sde_dbg_base;
+ char debug_name[80] = "";
+
+ memset(&dbg->dbgbus_sde, 0, sizeof(dbg->dbgbus_sde));
+ memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
+
+ switch (hwversion) {
+ case SDE_HW_VER_300:
+ case SDE_HW_VER_301:
+ dbg->dbgbus_sde.entries = dbg_bus_sde_8998;
+ dbg->dbgbus_sde.cmn.entries_size = ARRAY_SIZE(dbg_bus_sde_8998);
+ dbg->dbgbus_sde.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+ dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+ dbg->dbgbus_vbif_rt.cmn.entries_size =
+ ARRAY_SIZE(vbif_dbg_bus_msm8998);
+ break;
+ default:
+ pr_err("unsupported chipset id %u\n", hwversion);
+ break;
+ }
+
+ if (dbg->dbgbus_sde.entries) {
+ dbg->dbgbus_sde.cmn.name = DBGBUS_NAME_SDE;
+ snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+ dbg->dbgbus_sde.cmn.name);
+ dbg->dbgbus_sde.cmn.enable_mask = DEFAULT_DBGBUS_SDE;
+ debugfs_create_u32(debug_name, 0600, dbg->root,
+ &dbg->dbgbus_sde.cmn.enable_mask);
+ }
+
+ if (dbg->dbgbus_vbif_rt.entries) {
+ dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
+ snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+ dbg->dbgbus_vbif_rt.cmn.name);
+ dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
+ debugfs_create_u32(debug_name, 0600, dbg->root,
+ &dbg->dbgbus_vbif_rt.cmn.enable_mask);
+ }
+}
+
+int sde_dbg_init(struct dentry *debugfs_root, struct device *dev,
+ struct sde_dbg_power_ctrl *power_ctrl)
+{
+ int i;
+
+ mutex_init(&sde_dbg_base.mutex);
+ INIT_LIST_HEAD(&sde_dbg_base.reg_base_list);
+ sde_dbg_base.dev = dev;
+ sde_dbg_base.power_ctrl = *power_ctrl;
+
+
+ sde_dbg_base.evtlog = sde_evtlog_init();
+ if (IS_ERR_OR_NULL(sde_dbg_base.evtlog))
+ return PTR_ERR(sde_dbg_base.evtlog);
+
+ sde_dbg_base_evtlog = sde_dbg_base.evtlog;
+
+ sde_dbg_base.root = debugfs_create_dir("evt_dbg", debugfs_root);
+ if (IS_ERR_OR_NULL(sde_dbg_base.root)) {
+ pr_err("debugfs_create_dir fail, error %ld\n",
+ PTR_ERR(sde_dbg_base.root));
+ sde_dbg_base.root = NULL;
+ return -ENODEV;
+ }
+
+ INIT_WORK(&sde_dbg_base.dump_work, _sde_dump_work);
+ sde_dbg_base.work_panic = false;
+
+ for (i = 0; i < SDE_EVTLOG_ENTRY; i++)
+ sde_dbg_base.evtlog->logs[i].counter = i;
+
+ debugfs_create_file("dbg_ctrl", 0600, sde_dbg_base.root, NULL,
+ &sde_dbg_ctrl_fops);
+ debugfs_create_file("dump", 0600, sde_dbg_base.root, NULL,
+ &sde_evtlog_fops);
+ debugfs_create_u32("enable", 0600, sde_dbg_base.root,
+ &(sde_dbg_base.evtlog->enable));
+ debugfs_create_u32("panic", 0600, sde_dbg_base.root,
+ &sde_dbg_base.panic_on_err);
+ debugfs_create_u32("reg_dump", 0600, sde_dbg_base.root,
+ &sde_dbg_base.enable_reg_dump);
+
+ sde_dbg_base.panic_on_err = DEFAULT_PANIC;
+ sde_dbg_base.enable_reg_dump = DEFAULT_REGDUMP;
+
+ pr_info("evtlog_status: enable:%d, panic:%d, dump:%d\n",
+ sde_dbg_base.evtlog->enable, sde_dbg_base.panic_on_err,
+ sde_dbg_base.enable_reg_dump);
+
+ return 0;
+}
+
+/**
+ * sde_dbg_destroy - destroy sde debug facilities
+ */
+void sde_dbg_destroy(void)
+{
+ debugfs_remove_recursive(sde_dbg_base.root);
+ sde_dbg_base.root = NULL;
+
+ sde_dbg_base_evtlog = NULL;
+ sde_evtlog_destroy(sde_dbg_base.evtlog);
+ sde_dbg_base.evtlog = NULL;
+ mutex_destroy(&sde_dbg_base.mutex);
+}
+
+/**
+ * sde_dbg_reg_base_release - release allocated reg dump file private data
+ * @inode: debugfs inode
+ * @file: file handle
+ * @Return: 0 on success
+ */
+static int sde_dbg_reg_base_release(struct inode *inode, struct file *file)
+{
+ struct sde_dbg_reg_base *dbg;
+
+ if (!file)
+ return -EINVAL;
+
+ dbg = file->private_data;
+ if (!dbg)
+ return -ENODEV;
+
+ mutex_lock(&sde_dbg_base.mutex);
+ if (dbg && dbg->buf) {
+ kfree(dbg->buf);
+ dbg->buf_len = 0;
+ dbg->buf = NULL;
+ }
+ mutex_unlock(&sde_dbg_base.mutex);
+
+ return 0;
+}
+
+/**
+ * sde_dbg_reg_base_is_valid_range - verify if requested memory range is valid
+ * @off: address offset in bytes
+ * @cnt: memory size in bytes
+ * Return: true if valid; false otherwise
+ */
+static bool sde_dbg_reg_base_is_valid_range(u32 off, u32 cnt)
+{
+ static struct sde_dbg_base *dbg_base = &sde_dbg_base;
+ struct sde_dbg_reg_range *node;
+ struct sde_dbg_reg_base *base;
+
+ pr_debug("check offset=0x%x cnt=0x%x\n", off, cnt);
+
+ list_for_each_entry(base, &dbg_base->reg_base_list, reg_base_head) {
+ list_for_each_entry(node, &base->sub_range_list, head) {
+ pr_debug("%s: start=0x%x end=0x%x\n", node->range_name,
+ node->offset.start, node->offset.end);
+
+ if (node->offset.start <= off
+ && off <= node->offset.end
+ && off + cnt <= node->offset.end) {
+ pr_debug("valid range requested\n");
+ return true;
+ }
+ }
+ }
+
+ pr_err("invalid range requested\n");
+ return false;
+}
+
+/**
+ * sde_dbg_reg_base_offset_write - set new offset and len to debugfs reg base
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_reg_base_offset_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_dbg_reg_base *dbg;
+ u32 off = 0;
+ u32 cnt = DEFAULT_BASE_REG_CNT;
+ char buf[24];
+ ssize_t rc = count;
+
+ if (!file)
+ return -EINVAL;
+
+ dbg = file->private_data;
+ if (!dbg)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (sscanf(buf, "%x %x", &off, &cnt) != 2)
+ return -EFAULT;
+
+ mutex_lock(&sde_dbg_base.mutex);
+ if (off > dbg->max_offset) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (off % sizeof(u32)) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (cnt > (dbg->max_offset - off))
+ cnt = dbg->max_offset - off;
+
+ if (cnt % sizeof(u32)) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (cnt == 0) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ if (!sde_dbg_reg_base_is_valid_range(off, cnt)) {
+ rc = -EINVAL;
+ goto exit;
+ }
+
+ dbg->off = off;
+ dbg->cnt = cnt;
+
+exit:
+ mutex_unlock(&sde_dbg_base.mutex);
+ pr_debug("offset=%x cnt=%x\n", off, cnt);
+
+ return rc;
+}
+
+/**
+ * sde_dbg_reg_base_offset_read - read current offset and len of register base
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_reg_base_offset_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct sde_dbg_reg_base *dbg;
+ int len = 0;
+ char buf[24] = {'\0'};
+
+ if (!file)
+ return -EINVAL;
+
+ dbg = file->private_data;
+ if (!dbg)
+ return -ENODEV;
+
+ if (!ppos)
+ return -EINVAL;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ mutex_lock(&sde_dbg_base.mutex);
+ if (dbg->off % sizeof(u32)) {
+ mutex_unlock(&sde_dbg_base.mutex);
+ return -EFAULT;
+ }
+
+ len = snprintf(buf, sizeof(buf), "0x%08zx %zx\n", dbg->off, dbg->cnt);
+ if (len < 0 || len >= sizeof(buf)) {
+ mutex_unlock(&sde_dbg_base.mutex);
+ return 0;
+ }
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len)) {
+ mutex_unlock(&sde_dbg_base.mutex);
+ return -EFAULT;
+ }
+
+ *ppos += len; /* increase offset */
+ mutex_unlock(&sde_dbg_base.mutex);
+
+ return len;
+}
+
+/**
+ * sde_dbg_reg_base_reg_write - write to reg base hw at offset a given value
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_reg_base_reg_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_dbg_reg_base *dbg;
+ size_t off;
+ u32 data, cnt;
+ char buf[24];
+
+ if (!file)
+ return -EINVAL;
+
+ dbg = file->private_data;
+ if (!dbg)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ cnt = sscanf(buf, "%zx %x", &off, &data);
+
+ if (cnt < 2)
+ return -EFAULT;
+
+ mutex_lock(&sde_dbg_base.mutex);
+ if (off >= dbg->max_offset) {
+ mutex_unlock(&sde_dbg_base.mutex);
+ return -EFAULT;
+ }
+
+ _sde_dbg_enable_power(true);
+
+ writel_relaxed(data, dbg->base + off);
+
+ _sde_dbg_enable_power(false);
+
+ mutex_unlock(&sde_dbg_base.mutex);
+
+ pr_debug("addr=%zx data=%x\n", off, data);
+
+ return count;
+}
+
+/**
+ * sde_dbg_reg_base_reg_read - read len from reg base hw at current offset
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t sde_dbg_reg_base_reg_read(struct file *file,
+ char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct sde_dbg_reg_base *dbg;
+ size_t len;
+
+ if (!file)
+ return -EINVAL;
+
+ dbg = file->private_data;
+ if (!dbg) {
+ pr_err("invalid handle\n");
+ return -ENODEV;
+ }
+
+ if (!ppos)
+ return -EINVAL;
+
+ mutex_lock(&sde_dbg_base.mutex);
+ if (!dbg->buf) {
+ char *hwbuf;
+ char dump_buf[64];
+ char __iomem *ioptr;
+ int cnt, tot;
+
+ dbg->buf_len = sizeof(dump_buf) *
+ DIV_ROUND_UP(dbg->cnt, ROW_BYTES);
+
+ if (dbg->buf_len % sizeof(u32))
+ return -EINVAL;
+
+ dbg->buf = kzalloc(dbg->buf_len, GFP_KERNEL);
+
+ if (!dbg->buf) {
+ mutex_unlock(&sde_dbg_base.mutex);
+ return -ENOMEM;
+ }
+
+ hwbuf = kzalloc(ROW_BYTES, GFP_KERNEL);
+ if (!hwbuf) {
+ kfree(dbg->buf);
+ mutex_unlock(&sde_dbg_base.mutex);
+ return -ENOMEM;
+ }
+
+ ioptr = dbg->base + dbg->off;
+ tot = 0;
+ _sde_dbg_enable_power(true);
+
+ for (cnt = dbg->cnt; cnt > 0; cnt -= ROW_BYTES) {
+ memcpy_fromio(hwbuf, ioptr, ROW_BYTES);
+ hex_dump_to_buffer(hwbuf,
+ min(cnt, ROW_BYTES),
+ ROW_BYTES, GROUP_BYTES, dump_buf,
+ sizeof(dump_buf), false);
+ len = scnprintf(dbg->buf + tot, dbg->buf_len - tot,
+ "0x%08x: %s\n",
+ ((int) (unsigned long) ioptr) -
+ ((int) (unsigned long) dbg->base),
+ dump_buf);
+
+ ioptr += ROW_BYTES;
+ tot += len;
+ if (tot >= dbg->buf_len)
+ break;
+ }
+
+ _sde_dbg_enable_power(false);
+
+ dbg->buf_len = tot;
+ kfree(hwbuf);
+ }
+
+ if (*ppos >= dbg->buf_len) {
+ mutex_unlock(&sde_dbg_base.mutex);
+ return 0; /* done reading */
+ }
+
+ len = min(count, dbg->buf_len - (size_t) *ppos);
+ if (copy_to_user(user_buf, dbg->buf + *ppos, len)) {
+ mutex_unlock(&sde_dbg_base.mutex);
+ pr_err("failed to copy to user\n");
+ return -EFAULT;
+ }
+
+ *ppos += len; /* increase offset */
+ mutex_unlock(&sde_dbg_base.mutex);
+
+ return len;
+}
+
+static const struct file_operations sde_off_fops = {
+ .open = sde_dbg_debugfs_open,
+ .release = sde_dbg_reg_base_release,
+ .read = sde_dbg_reg_base_offset_read,
+ .write = sde_dbg_reg_base_offset_write,
+};
+
+static const struct file_operations sde_reg_fops = {
+ .open = sde_dbg_debugfs_open,
+ .release = sde_dbg_reg_base_release,
+ .read = sde_dbg_reg_base_reg_read,
+ .write = sde_dbg_reg_base_reg_write,
+};
+
+int sde_dbg_reg_register_base(const char *name, void __iomem *base,
+ size_t max_offset)
+{
+ struct sde_dbg_base *dbg_base = &sde_dbg_base;
+ struct sde_dbg_reg_base *reg_base;
+ struct dentry *ent_off, *ent_reg;
+ char dn[80] = "";
+ int prefix_len = 0;
+
+ reg_base = kzalloc(sizeof(*reg_base), GFP_KERNEL);
+ if (!reg_base)
+ return -ENOMEM;
+
+ if (name)
+ strlcpy(reg_base->name, name, sizeof(reg_base->name));
+ reg_base->base = base;
+ reg_base->max_offset = max_offset;
+ reg_base->off = 0;
+ reg_base->cnt = DEFAULT_BASE_REG_CNT;
+ reg_base->reg_dump = NULL;
+
+ if (name)
+ prefix_len = snprintf(dn, sizeof(dn), "%s_", name);
+ strlcpy(dn + prefix_len, "off", sizeof(dn) - prefix_len);
+ ent_off = debugfs_create_file(dn, 0600, dbg_base->root, reg_base,
+ &sde_off_fops);
+ if (IS_ERR_OR_NULL(ent_off)) {
+ pr_err("debugfs_create_file: offset fail\n");
+ goto off_fail;
+ }
+
+ strlcpy(dn + prefix_len, "reg", sizeof(dn) - prefix_len);
+ ent_reg = debugfs_create_file(dn, 0600, dbg_base->root, reg_base,
+ &sde_reg_fops);
+ if (IS_ERR_OR_NULL(ent_reg)) {
+ pr_err("debugfs_create_file: reg fail\n");
+ goto reg_fail;
+ }
+
+ /* Initialize list to make sure check for null list will be valid */
+ INIT_LIST_HEAD(&reg_base->sub_range_list);
+
+ pr_debug("%s base: %pK max_offset 0x%zX\n", reg_base->name,
+ reg_base->base, reg_base->max_offset);
+
+ list_add(&reg_base->reg_base_head, &dbg_base->reg_base_list);
+
+ return 0;
+reg_fail:
+ debugfs_remove(ent_off);
+off_fail:
+ kfree(reg_base);
+ return -ENODEV;
+}
+
+void sde_dbg_reg_register_dump_range(const char *base_name,
+ const char *range_name, u32 offset_start, u32 offset_end,
+ uint32_t xin_id)
+{
+ struct sde_dbg_reg_base *reg_base;
+ struct sde_dbg_reg_range *range;
+
+ reg_base = _sde_dump_get_blk_addr(base_name);
+ if (!reg_base) {
+ pr_err("error: for range %s unable to locate base %s\n",
+ range_name, base_name);
+ return;
+ }
+
+ if (!range_name || strlen(range_name) == 0) {
+ pr_err("%pS: bad range name, base_name %s, offset_start 0x%X, end 0x%X\n",
+ __builtin_return_address(0), base_name,
+ offset_start, offset_end);
+ return;
+ }
+
+ if (offset_end - offset_start < REG_DUMP_ALIGN ||
+ offset_start > offset_end) {
+ pr_err("%pS: bad range, base_name %s, range_name %s, offset_start 0x%X, end 0x%X\n",
+ __builtin_return_address(0), base_name,
+ range_name, offset_start, offset_end);
+ return;
+ }
+
+ range = kzalloc(sizeof(*range), GFP_KERNEL);
+ if (!range)
+ return;
+
+ strlcpy(range->range_name, range_name, sizeof(range->range_name));
+ range->offset.start = offset_start;
+ range->offset.end = offset_end;
+ range->xin_id = xin_id;
+ list_add_tail(&range->head, &reg_base->sub_range_list);
+
+ pr_debug("base %s, range %s, start 0x%X, end 0x%X\n",
+ base_name, range->range_name,
+ range->offset.start, range->offset.end);
+}
+
+void sde_dbg_set_sde_top_offset(u32 blk_off)
+{
+ sde_dbg_base.dbgbus_sde.top_blk_off = blk_off;
+}
diff --git a/drivers/gpu/drm/msm/sde_dbg.h b/drivers/gpu/drm/msm/sde_dbg.h
new file mode 100644
index 000000000000..ce36cba08039
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_dbg.h
@@ -0,0 +1,341 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef SDE_DBG_H_
+#define SDE_DBG_H_
+
+#include <stdarg.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+
+#define SDE_EVTLOG_DATA_LIMITER (0xC0DEBEEF)
+#define SDE_EVTLOG_FUNC_ENTRY 0x1111
+#define SDE_EVTLOG_FUNC_EXIT 0x2222
+#define SDE_EVTLOG_ERROR 0xebad
+
+#define SDE_DBG_DUMP_DATA_LIMITER (NULL)
+
+enum sde_dbg_evtlog_flag {
+ SDE_EVTLOG_DEFAULT = BIT(0),
+ SDE_EVTLOG_IRQ = BIT(1),
+ SDE_EVTLOG_ALL = BIT(7)
+};
+
+enum sde_dbg_dump_flag {
+ SDE_DBG_DUMP_IN_LOG = BIT(0),
+ SDE_DBG_DUMP_IN_MEM = BIT(1),
+};
+
+#ifdef CONFIG_DRM_SDE_EVTLOG_DEBUG
+#define SDE_EVTLOG_DEFAULT_ENABLE 1
+#else
+#define SDE_EVTLOG_DEFAULT_ENABLE 0
+#endif
+
+/*
+ * evtlog will print this number of entries when it is called through
+ * sysfs node or panic. This prevents kernel log from evtlog message
+ * flood.
+ */
+#define SDE_EVTLOG_PRINT_ENTRY 256
+
+/*
+ * evtlog keeps this number of entries in memory for debug purpose. This
+ * number must be greater than print entry to prevent out of bound evtlog
+ * entry array access.
+ */
+#define SDE_EVTLOG_ENTRY (SDE_EVTLOG_PRINT_ENTRY * 8)
+#define SDE_EVTLOG_MAX_DATA 15
+#define SDE_EVTLOG_BUF_MAX 512
+#define SDE_EVTLOG_BUF_ALIGN 32
+
+struct sde_dbg_power_ctrl {
+ void *handle;
+ void *client;
+ int (*enable_fn)(void *handle, void *client, bool enable);
+};
+
+struct sde_dbg_evtlog_log {
+ u32 counter;
+ s64 time;
+ const char *name;
+ int line;
+ u32 data[SDE_EVTLOG_MAX_DATA];
+ u32 data_cnt;
+ int pid;
+};
+
+struct sde_dbg_evtlog {
+ struct sde_dbg_evtlog_log logs[SDE_EVTLOG_ENTRY];
+ u32 first;
+ u32 last;
+ u32 last_dump;
+ u32 curr;
+ u32 next;
+ u32 enable;
+ spinlock_t spin_lock;
+};
+
+extern struct sde_dbg_evtlog *sde_dbg_base_evtlog;
+
+/**
+ * SDE_EVT32 - Write a list of 32bit values to the event log, default area
+ * ... - variable arguments
+ */
+#define SDE_EVT32(...) sde_evtlog_log(sde_dbg_base_evtlog, __func__, \
+ __LINE__, SDE_EVTLOG_DEFAULT, ##__VA_ARGS__, \
+ SDE_EVTLOG_DATA_LIMITER)
+
+/**
+ * SDE_EVT32_IRQ - Write a list of 32bit values to the event log, IRQ area
+ * ... - variable arguments
+ */
+#define SDE_EVT32_IRQ(...) sde_evtlog_log(sde_dbg_base_evtlog, __func__, \
+ __LINE__, SDE_EVTLOG_IRQ, ##__VA_ARGS__, \
+ SDE_EVTLOG_DATA_LIMITER)
+
+/**
+ * SDE_DBG_DUMP - trigger dumping of all sde_dbg facilities
+ * @va_args: list of named register dump ranges and regions to dump, as
+ * registered previously through sde_dbg_reg_register_base and
+ * sde_dbg_reg_register_dump_range.
+ * Including the special name "panic" will trigger a panic after
+ * the dumping work has completed.
+ */
+#define SDE_DBG_DUMP(...) sde_dbg_dump(false, __func__, ##__VA_ARGS__, \
+ SDE_DBG_DUMP_DATA_LIMITER)
+
+/**
+ * SDE_DBG_DUMP_WQ - trigger dumping of all sde_dbg facilities, queuing the work
+ * @va_args: list of named register dump ranges and regions to dump, as
+ * registered previously through sde_dbg_reg_register_base and
+ * sde_dbg_reg_register_dump_range.
+ * Including the special name "panic" will trigger a panic after
+ * the dumping work has completed.
+ */
+#define SDE_DBG_DUMP_WQ(...) sde_dbg_dump(true, __func__, ##__VA_ARGS__, \
+ SDE_DBG_DUMP_DATA_LIMITER)
+
+/**
+ * SDE_DBG_EVT_CTRL - trigger a different driver events
+ * event: event that trigger different behavior in the driver
+ */
+#define SDE_DBG_CTRL(...) sde_dbg_ctrl(__func__, ##__VA_ARGS__, \
+ SDE_DBG_DUMP_DATA_LIMITER)
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * sde_evtlog_init - allocate a new event log object
+ * Returns: evtlog or -ERROR
+ */
+struct sde_dbg_evtlog *sde_evtlog_init(void);
+
+/**
+ * sde_evtlog_destroy - destroy previously allocated event log
+ * @evtlog: pointer to evtlog
+ * Returns: none
+ */
+void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog);
+
+/**
+ * sde_evtlog_log - log an entry into the event log.
+ * log collection may be enabled/disabled entirely via debugfs
+ * log area collection may be filtered by user provided flags via debugfs.
+ * @evtlog: pointer to evtlog
+ * @name: function name of call site
+ * @line: line number of call site
+ * @flag: log area filter flag checked against user's debugfs request
+ * Returns: none
+ */
+void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line,
+ int flag, ...);
+
+/**
+ * sde_evtlog_dump_all - print all entries in event log to kernel log
+ * @evtlog: pointer to evtlog
+ * Returns: none
+ */
+void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog);
+
+/**
+ * sde_evtlog_is_enabled - check whether log collection is enabled for given
+ * event log and log area flag
+ * @evtlog: pointer to evtlog
+ * @flag: log area filter flag
+ * Returns: none
+ */
+bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, u32 flag);
+
+/**
+ * sde_evtlog_dump_to_buffer - print content of event log to the given buffer
+ * @evtlog: pointer to evtlog
+ * @evtlog_buf: target buffer to print into
+ * @evtlog_buf_size: size of target buffer
+ * @update_last_entry:» whether or not to stop at most recent entry
+ * Returns: number of bytes written to buffer
+ */
+ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
+ char *evtlog_buf, ssize_t evtlog_buf_size,
+ bool update_last_entry);
+
+/**
+ * sde_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
+ * @hwversion: Chipset revision
+ */
+void sde_dbg_init_dbg_buses(u32 hwversion);
+
+/**
+ * sde_dbg_init - initialize global sde debug facilities: evtlog, regdump
+ * @debugfs_root: debugfs root in which to create sde debug entries
+ * @dev: device handle
+ * @power_ctrl: power control callback structure for enabling clocks
+ * during register dumping
+ * Returns: 0 or -ERROR
+ */
+int sde_dbg_init(struct dentry *debugfs_root, struct device *dev,
+ struct sde_dbg_power_ctrl *power_ctrl);
+
+/**
+ * sde_dbg_destroy - destroy the global sde debug facilities
+ * Returns: none
+ */
+void sde_dbg_destroy(void);
+
+/**
+ * sde_dbg_dump - trigger dumping of all sde_dbg facilities
+ * @queue_work: whether to queue the dumping work to the work_struct
+ * @name: string indicating origin of dump
+ * @va_args: list of named register dump ranges and regions to dump, as
+ * registered previously through sde_dbg_reg_register_base and
+ * sde_dbg_reg_register_dump_range.
+ * Including the special name "panic" will trigger a panic after
+ * the dumping work has completed.
+ * Returns: none
+ */
+void sde_dbg_dump(bool queue_work, const char *name, ...);
+
+/**
+ * sde_dbg_ctrl - trigger specific actions for the driver with debugging
+ * purposes. Those actions need to be enabled by the debugfs entry
+ * so the driver executes those actions in the corresponding calls.
+ * @va_args: list of actions to trigger
+ * Returns: none
+ */
+void sde_dbg_ctrl(const char *name, ...);
+
+/**
+ * sde_dbg_reg_register_base - register a hw register address section for later
+ * dumping. call this before calling sde_dbg_reg_register_dump_range
+ * to be able to specify sub-ranges within the base hw range.
+ * @name: name of base region
+ * @base: base pointer of region
+ * @max_offset: length of region
+ * Returns: 0 or -ERROR
+ */
+int sde_dbg_reg_register_base(const char *name, void __iomem *base,
+ size_t max_offset);
+
+/**
+ * sde_dbg_reg_register_dump_range - register a hw register sub-region for
+ * later register dumping associated with base specified by
+ * sde_dbg_reg_register_base
+ * @base_name: name of base region
+ * @range_name: name of sub-range within base region
+ * @offset_start: sub-range's start offset from base's base pointer
+ * @offset_end: sub-range's end offset from base's base pointer
+ * @xin_id: xin id
+ * Returns: none
+ */
+void sde_dbg_reg_register_dump_range(const char *base_name,
+ const char *range_name, u32 offset_start, u32 offset_end,
+ uint32_t xin_id);
+
+/**
+ * sde_dbg_set_sde_top_offset - set the target specific offset from mdss base
+ * address of the top registers. Used for accessing debug bus controls.
+ * @blk_off: offset from mdss base of the top block
+ */
+void sde_dbg_set_sde_top_offset(u32 blk_off);
+#else
+static inline struct sde_dbg_evtlog *sde_evtlog_init(void)
+{
+ return NULL;
+}
+
+static inline void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog)
+{
+}
+
+static inline void sde_evtlog_log(struct sde_dbg_evtlog *evtlog,
+ const char *name, int line, int flag, ...)
+{
+}
+
+static inline void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog)
+{
+}
+
+static inline bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog,
+ u32 flag)
+{
+ return false;
+}
+
+static inline ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
+ char *evtlog_buf, ssize_t evtlog_buf_size,
+ bool update_last_entry)
+{
+ return 0;
+}
+
+void sde_dbg_init_dbg_buses(u32 hwversion)
+{
+}
+
+static inline int sde_dbg_init(struct dentry *debugfs_root, struct device *dev,
+ struct sde_dbg_power_ctrl *power_ctrl)
+{
+ return 0;
+}
+
+static inline void sde_dbg_destroy(void)
+{
+}
+
+static inline void sde_dbg_dump(bool queue_work, const char *name, ...)
+{
+}
+
+static inline void sde_dbg_ctrl(const char *name, ...)
+{
+}
+
+static inline int sde_dbg_reg_register_base(const char *name,
+ void __iomem *base, size_t max_offset)
+{
+ return 0;
+}
+
+static inline void sde_dbg_reg_register_dump_range(const char *base_name,
+ const char *range_name, u32 offset_start, u32 offset_end,
+ uint32_t xin_id)
+{
+}
+
+void sde_dbg_set_sde_top_offset(u32 blk_off)
+{
+}
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+
+#endif /* SDE_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/sde_dbg_evtlog.c b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
new file mode 100644
index 000000000000..70ba127ceb08
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_dbg_evtlog.c
@@ -0,0 +1,198 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "sde_dbg:[%s] " fmt, __func__
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+
+#include "sde_dbg.h"
+#include "sde_trace.h"
+
+bool sde_evtlog_is_enabled(struct sde_dbg_evtlog *evtlog, u32 flag)
+{
+ if (!evtlog)
+ return false;
+
+ return (flag & evtlog->enable) ||
+ (flag == SDE_EVTLOG_ALL && evtlog->enable);
+}
+
+void sde_evtlog_log(struct sde_dbg_evtlog *evtlog, const char *name, int line,
+ int flag, ...)
+{
+ unsigned long flags;
+ int i, val = 0;
+ va_list args;
+ struct sde_dbg_evtlog_log *log;
+
+ if (!evtlog)
+ return;
+
+ if (!sde_evtlog_is_enabled(evtlog, flag))
+ return;
+
+ spin_lock_irqsave(&evtlog->spin_lock, flags);
+ log = &evtlog->logs[evtlog->curr];
+ log->time = ktime_to_us(ktime_get());
+ log->name = name;
+ log->line = line;
+ log->data_cnt = 0;
+ log->pid = current->pid;
+
+ va_start(args, flag);
+ for (i = 0; i < SDE_EVTLOG_MAX_DATA; i++) {
+
+ val = va_arg(args, int);
+ if (val == SDE_EVTLOG_DATA_LIMITER)
+ break;
+
+ log->data[i] = val;
+ }
+ va_end(args);
+ log->data_cnt = i;
+ evtlog->curr = (evtlog->curr + 1) % SDE_EVTLOG_ENTRY;
+ evtlog->last++;
+
+ trace_sde_evtlog(name, line, i > 0 ? log->data[0] : 0,
+ i > 1 ? log->data[1] : 0);
+
+ spin_unlock_irqrestore(&evtlog->spin_lock, flags);
+}
+
+/* always dump the last entries which are not dumped yet */
+static bool _sde_evtlog_dump_calc_range(struct sde_dbg_evtlog *evtlog,
+ bool update_last_entry)
+{
+ bool need_dump = true;
+ unsigned long flags;
+
+ if (!evtlog)
+ return false;
+
+ spin_lock_irqsave(&evtlog->spin_lock, flags);
+
+ evtlog->first = evtlog->next;
+
+ if (update_last_entry)
+ evtlog->last_dump = evtlog->last;
+
+ if (evtlog->last_dump == evtlog->first) {
+ need_dump = false;
+ goto dump_exit;
+ }
+
+ if (evtlog->last_dump < evtlog->first) {
+ evtlog->first %= SDE_EVTLOG_ENTRY;
+ if (evtlog->last_dump < evtlog->first)
+ evtlog->last_dump += SDE_EVTLOG_ENTRY;
+ }
+
+ if ((evtlog->last_dump - evtlog->first) > SDE_EVTLOG_PRINT_ENTRY) {
+ pr_info("evtlog skipping %d entries, last=%d\n",
+ evtlog->last_dump - evtlog->first -
+ SDE_EVTLOG_PRINT_ENTRY,
+ evtlog->last_dump - 1);
+ evtlog->first = evtlog->last_dump - SDE_EVTLOG_PRINT_ENTRY;
+ }
+ evtlog->next = evtlog->first + 1;
+
+dump_exit:
+ spin_unlock_irqrestore(&evtlog->spin_lock, flags);
+
+ return need_dump;
+}
+
+ssize_t sde_evtlog_dump_to_buffer(struct sde_dbg_evtlog *evtlog,
+ char *evtlog_buf, ssize_t evtlog_buf_size,
+ bool update_last_entry)
+{
+ int i;
+ ssize_t off = 0;
+ struct sde_dbg_evtlog_log *log, *prev_log;
+ unsigned long flags;
+
+ if (!evtlog || !evtlog_buf)
+ return 0;
+
+ /* update markers, exit if nothing to print */
+ if (!_sde_evtlog_dump_calc_range(evtlog, update_last_entry))
+ return 0;
+
+ spin_lock_irqsave(&evtlog->spin_lock, flags);
+
+ log = &evtlog->logs[evtlog->first % SDE_EVTLOG_ENTRY];
+
+ prev_log = &evtlog->logs[(evtlog->first - 1) %
+ SDE_EVTLOG_ENTRY];
+
+ off = snprintf((evtlog_buf + off), (evtlog_buf_size - off), "%s:%-4d",
+ log->name, log->line);
+
+ if (off < SDE_EVTLOG_BUF_ALIGN) {
+ memset((evtlog_buf + off), 0x20, (SDE_EVTLOG_BUF_ALIGN - off));
+ off = SDE_EVTLOG_BUF_ALIGN;
+ }
+
+ off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
+ "=>[%-8d:%-11llu:%9llu][%-4d]:", evtlog->first,
+ log->time, (log->time - prev_log->time), log->pid);
+
+ for (i = 0; i < log->data_cnt; i++)
+ off += snprintf((evtlog_buf + off), (evtlog_buf_size - off),
+ "%x ", log->data[i]);
+
+ off += snprintf((evtlog_buf + off), (evtlog_buf_size - off), "\n");
+
+ spin_unlock_irqrestore(&evtlog->spin_lock, flags);
+
+ return off;
+}
+
+void sde_evtlog_dump_all(struct sde_dbg_evtlog *evtlog)
+{
+ char buf[SDE_EVTLOG_BUF_MAX];
+ bool update_last_entry = true;
+
+ if (!evtlog)
+ return;
+
+ while (sde_evtlog_dump_to_buffer(evtlog, buf, sizeof(buf),
+ update_last_entry)) {
+ pr_info("%s", buf);
+ update_last_entry = false;
+ }
+}
+
+struct sde_dbg_evtlog *sde_evtlog_init(void)
+{
+ struct sde_dbg_evtlog *evtlog;
+
+ evtlog = kzalloc(sizeof(*evtlog), GFP_KERNEL);
+ if (!evtlog)
+ return ERR_PTR(-ENOMEM);
+
+ spin_lock_init(&evtlog->spin_lock);
+ evtlog->enable = SDE_EVTLOG_DEFAULT_ENABLE;
+
+ return evtlog;
+}
+
+void sde_evtlog_destroy(struct sde_dbg_evtlog *evtlog)
+{
+ kfree(evtlog);
+}
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.c b/drivers/gpu/drm/msm/sde_edid_parser.c
new file mode 100644
index 000000000000..cceaf1c27716
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_edid_parser.c
@@ -0,0 +1,633 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drm_edid.h>
+
+#include "sde_kms.h"
+#include "sde_edid_parser.h"
+
+/* TODO: copy from drm_edid.c and mdss_hdmi_edid.c. remove if using ELD */
+#define DBC_START_OFFSET 4
+#define EDID_DTD_LEN 18
+
+enum data_block_types {
+ RESERVED,
+ AUDIO_DATA_BLOCK,
+ VIDEO_DATA_BLOCK,
+ VENDOR_SPECIFIC_DATA_BLOCK,
+ SPEAKER_ALLOCATION_DATA_BLOCK,
+ VESA_DTC_DATA_BLOCK,
+ RESERVED2,
+ USE_EXTENDED_TAG
+};
+
+static u8 *sde_find_edid_extension(struct edid *edid, int ext_id)
+{
+ u8 *edid_ext = NULL;
+ int i;
+
+ /* No EDID or EDID extensions */
+ if (edid == NULL || edid->extensions == 0)
+ return NULL;
+
+ /* Find CEA extension */
+ for (i = 0; i < edid->extensions; i++) {
+ edid_ext = (u8 *)edid + EDID_LENGTH * (i + 1);
+ if (edid_ext[0] == ext_id)
+ break;
+ }
+
+ if (i == edid->extensions)
+ return NULL;
+
+ return edid_ext;
+}
+
+static u8 *sde_find_cea_extension(struct edid *edid)
+{
+ return sde_find_edid_extension(edid, SDE_CEA_EXT);
+}
+
+static int
+sde_cea_db_payload_len(const u8 *db)
+{
+ return db[0] & 0x1f;
+}
+
+static int
+sde_cea_db_tag(const u8 *db)
+{
+ return db[0] >> 5;
+}
+
+static int
+sde_cea_revision(const u8 *cea)
+{
+ return cea[1];
+}
+
+static int
+sde_cea_db_offsets(const u8 *cea, int *start, int *end)
+{
+ /* Data block offset in CEA extension block */
+ *start = 4;
+ *end = cea[2];
+ if (*end == 0)
+ *end = 127;
+ if (*end < 4 || *end > 127)
+ return -ERANGE;
+ return 0;
+}
+
+#define sde_for_each_cea_db(cea, i, start, end) \
+for ((i) = (start); \
+(i) < (end) && (i) + sde_cea_db_payload_len(&(cea)[(i)]) < (end); \
+(i) += sde_cea_db_payload_len(&(cea)[(i)]) + 1)
+
+static bool sde_cea_db_is_hdmi_hf_vsdb(const u8 *db)
+{
+ int hdmi_id;
+
+ if (sde_cea_db_tag(db) != VENDOR_SPECIFIC_DATA_BLOCK)
+ return false;
+
+ if (sde_cea_db_payload_len(db) < 7)
+ return false;
+
+ hdmi_id = db[1] | (db[2] << 8) | (db[3] << 16);
+
+ return hdmi_id == HDMI_IEEE_OUI_HF;
+}
+
+static u8 *sde_edid_find_extended_tag_block(struct edid *edid, int blk_id)
+{
+ u8 *db = NULL;
+ u8 *cea = NULL;
+
+ if (!edid) {
+ pr_err("%s: invalid input\n", __func__);
+ return NULL;
+ }
+
+ cea = sde_find_cea_extension(edid);
+
+ if (cea && sde_cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (sde_cea_db_offsets(cea, &start, &end))
+ return NULL;
+
+ sde_for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ if ((sde_cea_db_tag(db) == SDE_EXTENDED_TAG) &&
+ (db[1] == blk_id))
+ return db;
+ }
+ }
+ return NULL;
+}
+
+static u8 *
+sde_edid_find_block(struct edid *edid, int blk_id)
+{
+ u8 *db = NULL;
+ u8 *cea = NULL;
+
+ if (!edid) {
+ pr_err("%s: invalid input\n", __func__);
+ return NULL;
+ }
+
+ cea = sde_find_cea_extension(edid);
+
+ if (cea && sde_cea_revision(cea) >= 3) {
+ int i, start, end;
+
+ if (sde_cea_db_offsets(cea, &start, &end))
+ return 0;
+
+ sde_for_each_cea_db(cea, i, start, end) {
+ db = &cea[i];
+ if (sde_cea_db_tag(db) == blk_id)
+ return db;
+ }
+ }
+ return NULL;
+}
+
+
+static const u8 *_sde_edid_find_block(const u8 *in_buf, u32 start_offset,
+ u8 type, u8 *len)
+{
+ /* the start of data block collection, start of Video Data Block */
+ u32 offset = start_offset;
+ u32 dbc_offset = in_buf[2];
+
+ SDE_EDID_DEBUG("%s +", __func__);
+ /*
+ * * edid buffer 1, byte 2 being 4 means no non-DTD/Data block
+ * collection present.
+ * * edid buffer 1, byte 2 being 0 means no non-DTD/DATA block
+ * collection present and no DTD data present.
+ */
+ if ((dbc_offset == 0) || (dbc_offset == 4)) {
+ SDE_ERROR("EDID: no DTD or non-DTD data present\n");
+ return NULL;
+ }
+
+ while (offset < dbc_offset) {
+ u8 block_len = in_buf[offset] & 0x1F;
+
+ if ((offset + block_len <= dbc_offset) &&
+ (in_buf[offset] >> 5) == type) {
+ *len = block_len;
+ SDE_EDID_DEBUG("block=%d found @ 0x%x w/ len=%d\n",
+ type, offset, block_len);
+
+ return in_buf + offset;
+ }
+ offset += 1 + block_len;
+ }
+
+ return NULL;
+}
+
+static void sde_edid_extract_vendor_id(struct sde_edid_ctrl *edid_ctrl)
+{
+ char *vendor_id;
+ u32 id_codes;
+
+ SDE_EDID_DEBUG("%s +", __func__);
+ if (!edid_ctrl) {
+ SDE_ERROR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ vendor_id = edid_ctrl->vendor_id;
+ id_codes = ((u32)edid_ctrl->edid->mfg_id[0] << 8) +
+ edid_ctrl->edid->mfg_id[1];
+
+ vendor_id[0] = 'A' - 1 + ((id_codes >> 10) & 0x1F);
+ vendor_id[1] = 'A' - 1 + ((id_codes >> 5) & 0x1F);
+ vendor_id[2] = 'A' - 1 + (id_codes & 0x1F);
+ vendor_id[3] = 0;
+ SDE_EDID_DEBUG("vendor id is %s ", vendor_id);
+ SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void sde_edid_set_y420_support(struct drm_connector *connector,
+u32 video_format)
+{
+ u8 cea_mode = 0;
+ struct drm_display_mode *mode;
+ u32 mode_fmt_flags = 0;
+
+ /* Need to add Y420 support flag to the modes */
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ /* Cache the format flags before clearing */
+ mode_fmt_flags = mode->flags;
+ /* Clear the RGB/YUV format flags before calling upstream API */
+ mode->flags &= ~SDE_DRM_MODE_FLAG_FMT_MASK;
+ cea_mode = drm_match_cea_mode(mode);
+ /* Restore the format flags */
+ mode->flags = mode_fmt_flags;
+ if ((cea_mode != 0) && (cea_mode == video_format)) {
+ SDE_EDID_DEBUG("%s found match for %d ", __func__,
+ video_format);
+ mode->flags |= DRM_MODE_FLAG_SUPPORTS_YUV;
+ }
+ }
+}
+
+static void sde_edid_parse_Y420CMDB(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
+const u8 *db)
+{
+ u8 cmdb_len = 0;
+ u8 svd_len = 0;
+ const u8 *svd = NULL;
+ u32 i = 0;
+ u32 video_format = 0;
+ u32 num_cmdb_svd = 0;
+ const u32 mult = 8;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: edid_ctrl is NULL\n", __func__);
+ return;
+ }
+
+ if (!db) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+ SDE_EDID_DEBUG("%s +\n", __func__);
+ cmdb_len = db[0] & 0x1f;
+
+ if (cmdb_len < 1)
+ return;
+
+ svd = sde_edid_find_block(edid_ctrl->edid, VIDEO_DATA_BLOCK);
+
+ if (svd) {
+ /*moving to the next byte as vic info begins there*/
+ svd_len = svd[0] & 0x1f;
+ ++svd;
+ }
+
+ if (cmdb_len == 1)
+ num_cmdb_svd = svd_len;
+ else {
+ num_cmdb_svd = (cmdb_len - 1) * mult;
+ if (num_cmdb_svd > svd_len)
+ num_cmdb_svd = svd_len;
+ }
+
+ for (i = 0; i < num_cmdb_svd; i++) {
+ video_format = *(svd + i) & 0x7F;
+ /*
+ * If cmdb_len is 1, it means all SVDs support YUV
+ * Else, we check each byte of the cmdb bitmap bitwise
+ * and match those bits with the formats populated
+ * during the parsing of the Video Data Blocks.
+ * Refer to CTA 861-F section 7.5.11 YCBCR 4:2:0 Capability
+ * Map Data Block for more details on this.
+ */
+ if (cmdb_len == 1 || (db[2 + i / mult] & (1 << (i % mult))))
+ sde_edid_set_y420_support(connector, video_format);
+ }
+
+ SDE_EDID_DEBUG("%s -\n", __func__);
+
+}
+
+static void sde_edid_parse_Y420VDB(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl,
+const u8 *db)
+{
+ u8 len = db[0] & 0x1f;
+ u32 i = 0;
+ u32 video_format = 0;
+
+ if (!edid_ctrl) {
+ DEV_ERR("%s: invalid input\n", __func__);
+ return;
+ }
+
+ SDE_EDID_DEBUG("%s +\n", __func__);
+
+ /* Offset to byte 3 */
+ db += 2;
+ for (i = 0; i < len - 1; i++) {
+ video_format = *(db + i) & 0x7F;
+ /*
+ * mode was already added in get_modes()
+ * only need to set the Y420 support flag
+ */
+ sde_edid_set_y420_support(connector, video_format);
+ }
+ SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void sde_edid_set_mode_format(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl)
+{
+ const u8 *db = NULL;
+ struct drm_display_mode *mode;
+
+ SDE_EDID_DEBUG("%s +\n", __func__);
+ /* Set YUV mode support flags for YCbcr420VDB */
+ db = sde_edid_find_extended_tag_block(edid_ctrl->edid,
+ Y420_VIDEO_DATA_BLOCK);
+ if (db)
+ sde_edid_parse_Y420VDB(connector, edid_ctrl, db);
+ else
+ SDE_EDID_DEBUG("YCbCr420 VDB is not present\n");
+
+ /* Set RGB supported on all modes where YUV is not set */
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ if (!(mode->flags & DRM_MODE_FLAG_SUPPORTS_YUV))
+ mode->flags |= DRM_MODE_FLAG_SUPPORTS_RGB;
+ }
+
+
+ db = sde_edid_find_extended_tag_block(edid_ctrl->edid,
+ Y420_CAPABILITY_MAP_DATA_BLOCK);
+ if (db)
+ sde_edid_parse_Y420CMDB(connector, edid_ctrl, db);
+ else
+ SDE_EDID_DEBUG("YCbCr420 CMDB is not present\n");
+
+ /*
+ * As per HDMI 2.0 spec, a sink supporting any modes
+ * requiring more than 340Mhz clock rate should support
+ * SCDC as well. This is required because we need the SCDC
+ * channel to set the TMDS clock ratio. However in cases
+ * where the TV publishes such a mode in its list of modes
+ * but does not have SCDC support as per HDMI HFVSDB block
+ * remove RGB mode support from the flags. Currently, in
+ * the list of modes not having deep color support only RGB
+ * modes shall requre a clock of 340Mhz and above such as the
+ * 4K@60fps case. All other modes shall be YUV.
+ * Deep color case is handled separately while choosing the
+ * best mode in the _sde_hdmi_choose_best_format API where
+ * we enable deep color only if it satisfies both source and
+ * sink requirements. However, that API assumes that at least
+ * RGB mode is supported on the mode. Hence, it would be better
+ * to remove the format support flags while parsing the EDID
+ * itself if it doesn't satisfy the HDMI spec requirement.
+ */
+
+ list_for_each_entry(mode, &connector->probed_modes, head) {
+ if ((mode->clock > MIN_SCRAMBLER_REQ_RATE) &&
+ !connector->scdc_present) {
+ mode->flags &= ~DRM_MODE_FLAG_SUPPORTS_RGB;
+ }
+ }
+
+ SDE_EDID_DEBUG("%s -\n", __func__);
+}
+
+static void _sde_edid_update_dc_modes(
+struct drm_connector *connector, struct sde_edid_ctrl *edid_ctrl)
+{
+ int i, start, end;
+ u8 *edid_ext, *hdmi;
+ struct drm_display_info *disp_info;
+ u32 hdmi_dc_yuv_modes = 0;
+
+ SDE_EDID_DEBUG("%s +\n", __func__);
+
+ if (!connector || !edid_ctrl) {
+ SDE_ERROR("invalid input\n");
+ return;
+ }
+
+ disp_info = &connector->display_info;
+
+ edid_ext = sde_find_cea_extension(edid_ctrl->edid);
+
+ if (!edid_ext) {
+ SDE_ERROR("no cea extension\n");
+ return;
+ }
+
+ if (sde_cea_db_offsets(edid_ext, &start, &end))
+ return;
+
+ sde_for_each_cea_db(edid_ext, i, start, end) {
+ if (sde_cea_db_is_hdmi_hf_vsdb(&edid_ext[i])) {
+
+ hdmi = &edid_ext[i];
+
+ if (sde_cea_db_payload_len(hdmi) < 7)
+ continue;
+
+ if (hdmi[7] & DRM_EDID_YCBCR420_DC_30) {
+ hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_30;
+ SDE_EDID_DEBUG("Y420 30-bit supported\n");
+ }
+
+ if (hdmi[7] & DRM_EDID_YCBCR420_DC_36) {
+ hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_36;
+ SDE_EDID_DEBUG("Y420 36-bit supported\n");
+ }
+
+ if (hdmi[7] & DRM_EDID_YCBCR420_DC_48) {
+ hdmi_dc_yuv_modes |= DRM_EDID_YCBCR420_DC_36;
+ SDE_EDID_DEBUG("Y420 48-bit supported\n");
+ }
+ }
+ }
+
+ disp_info->edid_hdmi_dc_modes |= hdmi_dc_yuv_modes;
+
+ SDE_EDID_DEBUG("%s -\n", __func__);
+}
+
+static void _sde_edid_extract_audio_data_blocks(
+ struct sde_edid_ctrl *edid_ctrl)
+{
+ u8 len = 0;
+ u8 adb_max = 0;
+ const u8 *adb = NULL;
+ u32 offset = DBC_START_OFFSET;
+ u8 *cea = NULL;
+
+ if (!edid_ctrl) {
+ SDE_ERROR("invalid edid_ctrl\n");
+ return;
+ }
+ SDE_EDID_DEBUG("%s +", __func__);
+ cea = sde_find_cea_extension(edid_ctrl->edid);
+ if (!cea) {
+ SDE_DEBUG("CEA extension not found\n");
+ return;
+ }
+
+ edid_ctrl->adb_size = 0;
+
+ memset(edid_ctrl->audio_data_block, 0,
+ sizeof(edid_ctrl->audio_data_block));
+
+ do {
+ len = 0;
+ adb = _sde_edid_find_block(cea, offset, AUDIO_DATA_BLOCK,
+ &len);
+
+ if ((adb == NULL) || (len > MAX_AUDIO_DATA_BLOCK_SIZE ||
+ adb_max >= MAX_NUMBER_ADB)) {
+ if (!edid_ctrl->adb_size) {
+ SDE_DEBUG("No/Invalid Audio Data Block\n");
+ return;
+ }
+
+ continue;
+ }
+
+ memcpy(edid_ctrl->audio_data_block + edid_ctrl->adb_size,
+ adb + 1, len);
+ offset = (adb - cea) + 1 + len;
+
+ edid_ctrl->adb_size += len;
+ adb_max++;
+ } while (adb);
+ SDE_EDID_DEBUG("%s -", __func__);
+}
+
+static void _sde_edid_extract_speaker_allocation_data(
+ struct sde_edid_ctrl *edid_ctrl)
+{
+ u8 len;
+ const u8 *sadb = NULL;
+ u8 *cea = NULL;
+
+ if (!edid_ctrl) {
+ SDE_ERROR("invalid edid_ctrl\n");
+ return;
+ }
+ SDE_EDID_DEBUG("%s +", __func__);
+ cea = sde_find_cea_extension(edid_ctrl->edid);
+ if (!cea) {
+ SDE_DEBUG("CEA extension not found\n");
+ return;
+ }
+
+ sadb = _sde_edid_find_block(cea, DBC_START_OFFSET,
+ SPEAKER_ALLOCATION_DATA_BLOCK, &len);
+ if ((sadb == NULL) || (len != MAX_SPKR_ALLOC_DATA_BLOCK_SIZE)) {
+ SDE_DEBUG("No/Invalid Speaker Allocation Data Block\n");
+ return;
+ }
+
+ memcpy(edid_ctrl->spkr_alloc_data_block, sadb + 1, len);
+ edid_ctrl->sadb_size = len;
+
+ SDE_EDID_DEBUG("speaker alloc data SP byte = %08x %s%s%s%s%s%s%s\n",
+ sadb[1],
+ (sadb[1] & BIT(0)) ? "FL/FR," : "",
+ (sadb[1] & BIT(1)) ? "LFE," : "",
+ (sadb[1] & BIT(2)) ? "FC," : "",
+ (sadb[1] & BIT(3)) ? "RL/RR," : "",
+ (sadb[1] & BIT(4)) ? "RC," : "",
+ (sadb[1] & BIT(5)) ? "FLC/FRC," : "",
+ (sadb[1] & BIT(6)) ? "RLC/RRC," : "");
+ SDE_EDID_DEBUG("%s -", __func__);
+}
+
+struct sde_edid_ctrl *sde_edid_init(void)
+{
+ struct sde_edid_ctrl *edid_ctrl = NULL;
+
+ SDE_EDID_DEBUG("%s +\n", __func__);
+ edid_ctrl = kzalloc(sizeof(*edid_ctrl), GFP_KERNEL);
+ if (!edid_ctrl) {
+ SDE_ERROR("edid_ctrl alloc failed\n");
+ return NULL;
+ }
+ memset((edid_ctrl), 0, sizeof(*edid_ctrl));
+ SDE_EDID_DEBUG("%s -\n", __func__);
+ return edid_ctrl;
+}
+
+void sde_free_edid(void **input)
+{
+ struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+ SDE_EDID_DEBUG("%s +", __func__);
+ kfree(edid_ctrl->edid);
+ edid_ctrl->edid = NULL;
+}
+
+void sde_edid_deinit(void **input)
+{
+ struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+ SDE_EDID_DEBUG("%s +", __func__);
+ sde_free_edid((void *)&edid_ctrl);
+ kfree(edid_ctrl);
+ SDE_EDID_DEBUG("%s -", __func__);
+}
+
+int _sde_edid_update_modes(struct drm_connector *connector,
+ void *input)
+{
+ int rc = 0;
+ struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+ struct drm_display_info *disp_info;
+
+ disp_info = &connector->display_info;
+
+ if (disp_info)
+ disp_info->edid_hdmi_dc_modes = 0;
+
+ SDE_EDID_DEBUG("%s +", __func__);
+ if (edid_ctrl->edid) {
+ drm_mode_connector_update_edid_property(connector,
+ edid_ctrl->edid);
+
+ rc = drm_add_edid_modes(connector, edid_ctrl->edid);
+ sde_edid_set_mode_format(connector, edid_ctrl);
+ _sde_edid_update_dc_modes(connector, edid_ctrl);
+ SDE_EDID_DEBUG("%s -", __func__);
+ return rc;
+ }
+
+ drm_mode_connector_update_edid_property(connector, NULL);
+ SDE_EDID_DEBUG("%s null edid -", __func__);
+ return rc;
+}
+
+bool sde_detect_hdmi_monitor(void *input)
+{
+ struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(input);
+
+ return drm_detect_hdmi_monitor(edid_ctrl->edid);
+}
+
+void sde_get_edid(struct drm_connector *connector,
+ struct i2c_adapter *adapter, void **input)
+{
+ struct sde_edid_ctrl *edid_ctrl = (struct sde_edid_ctrl *)(*input);
+
+ edid_ctrl->edid = drm_get_edid(connector, adapter);
+ SDE_EDID_DEBUG("%s +\n", __func__);
+
+ if (!edid_ctrl->edid)
+ SDE_ERROR("EDID read failed\n");
+
+ if (edid_ctrl->edid) {
+ sde_edid_extract_vendor_id(edid_ctrl);
+ _sde_edid_extract_audio_data_blocks(edid_ctrl);
+ _sde_edid_extract_speaker_allocation_data(edid_ctrl);
+ }
+ SDE_EDID_DEBUG("%s -\n", __func__);
+};
diff --git a/drivers/gpu/drm/msm/sde_edid_parser.h b/drivers/gpu/drm/msm/sde_edid_parser.h
new file mode 100644
index 000000000000..a913219aac50
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_edid_parser.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_EDID_PARSER_H_
+#define _SDE_EDID_PARSER_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+
+
+#define MAX_NUMBER_ADB 5
+#define MAX_AUDIO_DATA_BLOCK_SIZE 30
+#define MAX_SPKR_ALLOC_DATA_BLOCK_SIZE 3
+#define EDID_VENDOR_ID_SIZE 4
+
+#define SDE_CEA_EXT 0x02
+#define SDE_EXTENDED_TAG 0x07
+
+#define MIN_SCRAMBLER_REQ_RATE 340000
+
+#define SDE_DRM_MODE_FLAG_FMT_MASK (0x3 << 20)
+
+enum extended_data_block_types {
+ VIDEO_CAPABILITY_DATA_BLOCK = 0x0,
+ VENDOR_SPECIFIC_VIDEO_DATA_BLOCK = 0x01,
+ HDMI_VIDEO_DATA_BLOCK = 0x04,
+ HDR_STATIC_METADATA_DATA_BLOCK = 0x06,
+ Y420_VIDEO_DATA_BLOCK = 0x0E,
+ VIDEO_FORMAT_PREFERENCE_DATA_BLOCK = 0x0D,
+ Y420_CAPABILITY_MAP_DATA_BLOCK = 0x0F,
+ VENDOR_SPECIFIC_AUDIO_DATA_BLOCK = 0x11,
+ INFOFRAME_DATA_BLOCK = 0x20,
+};
+
+#ifdef SDE_EDID_DEBUG_ENABLE
+#define SDE_EDID_DEBUG(fmt, args...) SDE_ERROR(fmt, ##args)
+#else
+#define SDE_EDID_DEBUG(fmt, args...) SDE_DEBUG(fmt, ##args)
+#endif
+
+/*
+ * struct hdmi_edid_hdr_data - HDR Static Metadata
+ * @eotf: Electro-Optical Transfer Function
+ * @metadata_type_one: Static Metadata Type 1 support
+ * @max_luminance: Desired Content Maximum Luminance
+ * @avg_luminance: Desired Content Frame-average Luminance
+ * @min_luminance: Desired Content Minimum Luminance
+ */
+struct sde_edid_hdr_data {
+ u32 eotf;
+ bool metadata_type_one;
+ u32 max_luminance;
+ u32 avg_luminance;
+ u32 min_luminance;
+};
+
+struct sde_edid_sink_caps {
+ u32 max_pclk_in_hz;
+ bool scdc_present;
+ bool scramble_support; /* scramble support for less than 340Mcsc */
+ bool read_req_support;
+ bool osd_disparity;
+ bool dual_view_support;
+ bool ind_view_support;
+};
+
+struct sde_edid_ctrl {
+ struct edid *edid;
+ u8 pt_scan_info;
+ u8 it_scan_info;
+ u8 ce_scan_info;
+ u8 audio_data_block[MAX_NUMBER_ADB * MAX_AUDIO_DATA_BLOCK_SIZE];
+ int adb_size;
+ u8 spkr_alloc_data_block[MAX_SPKR_ALLOC_DATA_BLOCK_SIZE];
+ int sadb_size;
+ bool hdr_supported;
+ char vendor_id[EDID_VENDOR_ID_SIZE];
+ struct sde_edid_sink_caps sink_caps;
+ struct sde_edid_hdr_data hdr_data;
+};
+
+/**
+ * sde_edid_init() - init edid structure.
+ * @edid_ctrl: Handle to the edid_ctrl structure.
+ * Return: handle to sde_edid_ctrl for the client.
+ */
+struct sde_edid_ctrl *sde_edid_init(void);
+
+/**
+ * sde_edid_deinit() - deinit edid structure.
+ * @edid_ctrl: Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_edid_deinit(void **edid_ctrl);
+
+/**
+ * sde_get_edid() - get edid info.
+ * @connector: Handle to the drm_connector.
+ * @adapter: handle to i2c adapter for DDC read
+ * @edid_ctrl: Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_get_edid(struct drm_connector *connector,
+struct i2c_adapter *adapter,
+void **edid_ctrl);
+
+/**
+ * sde_free_edid() - free edid structure.
+ * @edid_ctrl: Handle to the edid_ctrl structure.
+ *
+ * Return: void.
+ */
+void sde_free_edid(void **edid_ctrl);
+
+/**
+ * sde_detect_hdmi_monitor() - detect HDMI mode.
+ * @edid_ctrl: Handle to the edid_ctrl structure.
+ *
+ * Return: error code.
+ */
+bool sde_detect_hdmi_monitor(void *edid_ctrl);
+
+/**
+ * _sde_edid_update_modes() - populate EDID modes.
+ * @edid_ctrl: Handle to the edid_ctrl structure.
+ *
+ * Return: error code.
+ */
+int _sde_edid_update_modes(struct drm_connector *connector,
+ void *edid_ctrl);
+
+#endif /* _SDE_EDID_PARSER_H_ */
+
diff --git a/drivers/gpu/drm/msm/sde_hdcp.h b/drivers/gpu/drm/msm/sde_hdcp.h
new file mode 100644
index 000000000000..90c0d1cef26b
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_hdcp.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __SDE_HDCP_H__
+#define __SDE_HDCP_H__
+
+#include <soc/qcom/scm.h>
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+#include <linux/debugfs.h>
+#include <linux/of_device.h>
+#include <linux/i2c.h>
+#include <drm/drmP.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_edid.h>
+#include "hdmi.h"
+#include "sde_kms.h"
+#include "sde_hdmi_util.h"
+
+#ifdef SDE_HDCP_DEBUG_ENABLE
+#define SDE_HDCP_DEBUG(fmt, args...) SDE_ERROR(fmt, ##args)
+#else
+#define SDE_HDCP_DEBUG(fmt, args...) SDE_DEBUG(fmt, ##args)
+#endif
+
+#define SDE_HDCP_SRM_FAIL 29
+
+enum sde_hdcp_client_id {
+ HDCP_CLIENT_HDMI,
+ HDCP_CLIENT_DP,
+};
+
+enum sde_hdcp_states {
+ HDCP_STATE_INACTIVE,
+ HDCP_STATE_AUTHENTICATING,
+ HDCP_STATE_AUTHENTICATED,
+ HDCP_STATE_AUTH_FAIL,
+ HDCP_STATE_AUTH_FAIL_NOREAUTH,
+ HDCP_STATE_AUTH_ENC_NONE,
+ HDCP_STATE_AUTH_ENC_1X,
+ HDCP_STATE_AUTH_ENC_2P2
+};
+
+struct sde_hdcp_init_data {
+ struct dss_io_data *core_io;
+ struct dss_io_data *qfprom_io;
+ struct dss_io_data *hdcp_io;
+ struct mutex *mutex;
+ struct workqueue_struct *workq;
+ void *cb_data;
+ void (*notify_status)(void *cb_data, enum sde_hdcp_states status);
+ void (*avmute_sink)(void *cb_data);
+ struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+ u8 sink_rx_status;
+ u16 *version;
+ u32 phy_addr;
+ u32 hdmi_tx_ver;
+ bool sec_access;
+ enum sde_hdcp_client_id client_id;
+};
+
+struct sde_hdcp_ops {
+ int (*isr)(void *ptr);
+ int (*cp_irq)(void *ptr);
+ int (*reauthenticate)(void *input);
+ int (*authenticate)(void *hdcp_ctrl);
+ bool (*feature_supported)(void *input);
+ void (*off)(void *hdcp_ctrl);
+};
+
+void *sde_hdcp_1x_init(struct sde_hdcp_init_data *init_data);
+void sde_hdcp_1x_deinit(void *input);
+struct sde_hdcp_ops *sde_hdcp_1x_start(void *input);
+void *sde_hdmi_hdcp2p2_init(struct sde_hdcp_init_data *init_data);
+void sde_hdmi_hdcp2p2_deinit(void *input);
+const char *sde_hdcp_state_name(enum sde_hdcp_states hdcp_state);
+struct sde_hdcp_ops *sde_hdmi_hdcp2p2_start(void *input);
+#endif /* __SDE_HDCP_H__ */
diff --git a/drivers/gpu/drm/msm/sde_hdcp_1x.c b/drivers/gpu/drm/msm/sde_hdcp_1x.c
new file mode 100644
index 000000000000..49ce37393e81
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_hdcp_1x.c
@@ -0,0 +1,1910 @@
+/* Copyright (c) 2017-2019, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "%s: " fmt, __func__
+
+#include <linux/io.h>
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/slab.h>
+#include <linux/stat.h>
+#include <linux/iopoll.h>
+#include <linux/hdcp_qseecom.h>
+#include "sde_hdcp.h"
+#include "sde_hdmi_util.h"
+#include "video/msm_hdmi_hdcp_mgr.h"
+
+#define SDE_HDCP_STATE_NAME (sde_hdcp_state_name(hdcp->hdcp_state))
+
+/* HDCP Keys state based on HDMI_HDCP_LINK0_STATUS:KEYS_STATE */
+#define HDCP_KEYS_STATE_NO_KEYS 0
+#define HDCP_KEYS_STATE_NOT_CHECKED 1
+#define HDCP_KEYS_STATE_CHECKING 2
+#define HDCP_KEYS_STATE_VALID 3
+#define HDCP_KEYS_STATE_AKSV_NOT_VALID 4
+#define HDCP_KEYS_STATE_CHKSUM_MISMATCH 5
+#define HDCP_KEYS_STATE_PROD_AKSV 6
+#define HDCP_KEYS_STATE_RESERVED 7
+
+#define TZ_HDCP_CMD_ID 0x00004401
+
+#define HDCP_INT_CLR (isr->auth_success_ack | isr->auth_fail_ack | \
+ isr->auth_fail_info_ack | isr->tx_req_ack | \
+ isr->encryption_ready_ack | \
+ isr->encryption_not_ready_ack | isr->tx_req_done_ack)
+
+#define HDCP_INT_EN (isr->auth_success_mask | isr->auth_fail_mask | \
+ isr->encryption_ready_mask | \
+ isr->encryption_not_ready_mask)
+
+#define HDCP_POLL_SLEEP_US (20 * 1000)
+#define HDCP_POLL_TIMEOUT_US (HDCP_POLL_SLEEP_US * 100)
+
+#define sde_hdcp_1x_state(x) (hdcp->hdcp_state == x)
+
+struct sde_hdcp_sink_addr {
+ char *name;
+ u32 addr;
+ u32 len;
+};
+
+struct sde_hdcp_1x_reg_data {
+ u32 reg_id;
+ struct sde_hdcp_sink_addr *sink;
+};
+
+struct sde_hdcp_skaddr_map {
+ /* addresses to read from sink */
+ struct sde_hdcp_sink_addr bcaps;
+ struct sde_hdcp_sink_addr bksv;
+ struct sde_hdcp_sink_addr r0;
+ struct sde_hdcp_sink_addr bstatus;
+ struct sde_hdcp_sink_addr cp_irq_status;
+ struct sde_hdcp_sink_addr ksv_fifo;
+ struct sde_hdcp_sink_addr v_h0;
+ struct sde_hdcp_sink_addr v_h1;
+ struct sde_hdcp_sink_addr v_h2;
+ struct sde_hdcp_sink_addr v_h3;
+ struct sde_hdcp_sink_addr v_h4;
+
+ /* addresses to write to sink */
+ struct sde_hdcp_sink_addr an;
+ struct sde_hdcp_sink_addr aksv;
+ struct sde_hdcp_sink_addr ainfo;
+};
+
+struct sde_hdcp_int_set {
+ /* interrupt register */
+ u32 int_reg;
+
+ /* interrupt enable/disable masks */
+ u32 auth_success_mask;
+ u32 auth_fail_mask;
+ u32 encryption_ready_mask;
+ u32 encryption_not_ready_mask;
+ u32 tx_req_mask;
+ u32 tx_req_done_mask;
+
+ /* interrupt acknowledgment */
+ u32 auth_success_ack;
+ u32 auth_fail_ack;
+ u32 auth_fail_info_ack;
+ u32 encryption_ready_ack;
+ u32 encryption_not_ready_ack;
+ u32 tx_req_ack;
+ u32 tx_req_done_ack;
+
+ /* interrupt status */
+ u32 auth_success_int;
+ u32 auth_fail_int;
+ u32 encryption_ready;
+ u32 encryption_not_ready;
+ u32 tx_req_int;
+ u32 tx_req_done_int;
+};
+
+struct sde_hdcp_reg_set {
+ u32 status;
+ u32 keys_offset;
+ u32 r0_offset;
+ u32 v_offset;
+ u32 ctrl;
+ u32 aksv_lsb;
+ u32 aksv_msb;
+ u32 entropy_ctrl0;
+ u32 entropy_ctrl1;
+ u32 sec_sha_ctrl;
+ u32 sec_sha_data;
+ u32 sha_status;
+
+ u32 data2_0;
+ u32 data3;
+ u32 data4;
+ u32 data5;
+ u32 data6;
+
+ u32 sec_data0;
+ u32 sec_data1;
+ u32 sec_data7;
+ u32 sec_data8;
+ u32 sec_data9;
+ u32 sec_data10;
+ u32 sec_data11;
+ u32 sec_data12;
+
+ u32 reset;
+ u32 reset_bit;
+
+ u32 repeater;
+};
+
+#define HDCP_REG_SET_CLIENT_HDMI \
+ {HDMI_HDCP_LINK0_STATUS, 28, 24, 20, HDMI_HDCP_CTRL, \
+ HDMI_HDCP_SW_LOWER_AKSV, HDMI_HDCP_SW_UPPER_AKSV, \
+ HDMI_HDCP_ENTROPY_CTRL0, HDMI_HDCP_ENTROPY_CTRL1, \
+ HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_CTRL, \
+ HDCP_SEC_TZ_HV_HLOS_HDCP_SHA_DATA, \
+ HDMI_HDCP_SHA_STATUS, HDMI_HDCP_RCVPORT_DATA2_0, \
+ HDMI_HDCP_RCVPORT_DATA3, HDMI_HDCP_RCVPORT_DATA4, \
+ HDMI_HDCP_RCVPORT_DATA5, HDMI_HDCP_RCVPORT_DATA6, \
+ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA0, \
+ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA1, \
+ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA7, \
+ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA8, \
+ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA9, \
+ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA10, \
+ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA11, \
+ HDCP_SEC_TZ_HV_HLOS_HDCP_RCVPORT_DATA12, \
+ HDMI_HDCP_RESET, BIT(0), BIT(6)}
+
+/* To do for DP */
+#define HDCP_REG_SET_CLIENT_DP \
+ {0}
+
+#define HDCP_HDMI_SINK_ADDR_MAP \
+ {{"bcaps", 0x40, 1}, {"bksv", 0x00, 5}, {"r0'", 0x08, 2}, \
+ {"bstatus", 0x41, 2}, {"??", 0x0, 0}, {"ksv-fifo", 0x43, 0}, \
+ {"v_h0", 0x20, 4}, {"v_h1", 0x24, 4}, {"v_h2", 0x28, 4}, \
+ {"v_h3", 0x2c, 4}, {"v_h4", 0x30, 4}, {"an", 0x18, 8}, \
+ {"aksv", 0x10, 5}, {"ainfo", 0x00, 0},}
+
+#define HDCP_DP_SINK_ADDR_MAP \
+ {{"bcaps", 0x68028, 1}, {"bksv", 0x68000, 5}, {"r0'", 0x68005, 2}, \
+ {"binfo", 0x6802A, 2}, {"cp_irq_status", 0x68029, 1}, \
+ {"ksv-fifo", 0x6802C, 0}, {"v_h0", 0x68014, 4}, {"v_h1", 0x68018, 4}, \
+ {"v_h2", 0x6801C, 4}, {"v_h3", 0x68020, 4}, {"v_h4", 0x68024, 4}, \
+ {"an", 0x6800C, 8}, {"aksv", 0x68007, 5}, {"ainfo", 0x6803B, 1} }
+
+#define HDCP_HDMI_INT_SET \
+ {HDMI_HDCP_INT_CTRL, \
+ BIT(2), BIT(6), 0, 0, 0, 0, \
+ BIT(1), BIT(5), BIT(7), 0, 0, 0, 0, \
+ BIT(0), BIT(4), 0, 0, 0, 0}
+
+#define HDCP_DP_INT_SET \
+ {DP_INTR_STATUS2, \
+ BIT(17), BIT(20), BIT(24), BIT(27), 0, 0, \
+ BIT(16), BIT(19), BIT(21), BIT(23), BIT(26), 0, 0, \
+ BIT(15), BIT(18), BIT(22), BIT(25), 0, 0}
+
+struct sde_hdcp_1x {
+ u8 bcaps;
+ u32 tp_msgid;
+ u32 an_0, an_1, aksv_0, aksv_1;
+ bool sink_r0_ready;
+ bool reauth;
+ bool ksv_ready;
+ enum sde_hdcp_states hdcp_state;
+ struct HDCP_V2V1_MSG_TOPOLOGY cached_tp;
+ struct HDCP_V2V1_MSG_TOPOLOGY current_tp;
+ struct delayed_work hdcp_auth_work;
+ struct completion r0_checked;
+ struct completion sink_r0_available;
+ struct sde_hdcp_init_data init_data;
+ struct sde_hdcp_ops *ops;
+ struct sde_hdcp_reg_set reg_set;
+ struct sde_hdcp_int_set int_set;
+ struct sde_hdcp_skaddr_map sink_addr;
+ struct workqueue_struct *workq;
+};
+
+const char *sde_hdcp_state_name(enum sde_hdcp_states hdcp_state)
+{
+ switch (hdcp_state) {
+ case HDCP_STATE_INACTIVE: return "HDCP_STATE_INACTIVE";
+ case HDCP_STATE_AUTHENTICATING: return "HDCP_STATE_AUTHENTICATING";
+ case HDCP_STATE_AUTHENTICATED: return "HDCP_STATE_AUTHENTICATED";
+ case HDCP_STATE_AUTH_FAIL: return "HDCP_STATE_AUTH_FAIL";
+ default: return "???";
+ }
+}
+
+static int sde_hdcp_1x_count_one(u8 *array, u8 len)
+{
+ int i, j, count = 0;
+
+ for (i = 0; i < len; i++)
+ for (j = 0; j < 8; j++)
+ count += (((array[i] >> j) & 0x1) ? 1 : 0);
+ return count;
+}
+
+static void reset_hdcp_ddc_failures(struct sde_hdcp_1x *hdcp)
+{
+ int hdcp_ddc_ctrl1_reg;
+ int hdcp_ddc_status;
+ int failure;
+ int nack0;
+ struct dss_io_data *io;
+
+ if (!hdcp || !hdcp->init_data.core_io) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ io = hdcp->init_data.core_io;
+
+ /* Check for any DDC transfer failures */
+ hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+ failure = (hdcp_ddc_status >> 16) & BIT(0);
+ nack0 = (hdcp_ddc_status >> 14) & BIT(0);
+ SDE_HDCP_DEBUG("%s: HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d\n",
+ SDE_HDCP_STATE_NAME, hdcp_ddc_status, failure, nack0);
+
+ if (failure) {
+ /*
+ * Indicates that the last HDCP HW DDC transfer failed.
+ * This occurs when a transfer is attempted with HDCP DDC
+ * disabled (HDCP_DDC_DISABLE=1) or the number of retries
+ * matches HDCP_DDC_RETRY_CNT.
+ * Failure occurred, let's clear it.
+ */
+ SDE_HDCP_DEBUG("%s: DDC failure HDCP_DDC_STATUS=0x%08x\n",
+ SDE_HDCP_STATE_NAME, hdcp_ddc_status);
+
+ /* First, Disable DDC */
+ DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_0, BIT(0));
+
+ /* ACK the Failure to Clear it */
+ hdcp_ddc_ctrl1_reg = DSS_REG_R(io, HDMI_HDCP_DDC_CTRL_1);
+ DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_1,
+ hdcp_ddc_ctrl1_reg | BIT(0));
+
+ /* Check if the FAILURE got Cleared */
+ hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+ hdcp_ddc_status = (hdcp_ddc_status >> 16) & BIT(0);
+ if (hdcp_ddc_status == 0x0)
+ SDE_HDCP_DEBUG("%s: HDCP DDC Failure cleared\n",
+ SDE_HDCP_STATE_NAME);
+ else
+ SDE_ERROR("%s: Unable to clear HDCP DDC Failure",
+ SDE_HDCP_STATE_NAME);
+
+ /* Re-Enable HDCP DDC */
+ DSS_REG_W(io, HDMI_HDCP_DDC_CTRL_0, 0);
+ }
+
+ if (nack0) {
+ SDE_HDCP_DEBUG("%s: Before: HDMI_DDC_SW_STATUS=0x%08x\n",
+ SDE_HDCP_STATE_NAME, DSS_REG_R(io, HDMI_DDC_SW_STATUS));
+ /* Reset HDMI DDC software status */
+ DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+ DSS_REG_R(io, HDMI_DDC_CTRL) | BIT(3));
+ msleep(20);
+ DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+ DSS_REG_R(io, HDMI_DDC_CTRL) & ~(BIT(3)));
+
+ /* Reset HDMI DDC Controller */
+ DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+ DSS_REG_R(io, HDMI_DDC_CTRL) | BIT(1));
+ msleep(20);
+ DSS_REG_W_ND(io, HDMI_DDC_CTRL,
+ DSS_REG_R(io, HDMI_DDC_CTRL) & ~BIT(1));
+ SDE_HDCP_DEBUG("%s: After: HDMI_DDC_SW_STATUS=0x%08x\n",
+ SDE_HDCP_STATE_NAME, DSS_REG_R(io, HDMI_DDC_SW_STATUS));
+ }
+
+ hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+
+ failure = (hdcp_ddc_status >> 16) & BIT(0);
+ nack0 = (hdcp_ddc_status >> 14) & BIT(0);
+ SDE_HDCP_DEBUG("%s: On Exit: HDCP_DDC_STATUS=0x%x, FAIL=%d, NACK0=%d\n",
+ SDE_HDCP_STATE_NAME, hdcp_ddc_status, failure, nack0);
+} /* reset_hdcp_ddc_failures */
+
+static void sde_hdcp_1x_hw_ddc_clean(struct sde_hdcp_1x *hdcp)
+{
+ struct dss_io_data *io = NULL;
+ u32 hdcp_ddc_status, ddc_hw_status;
+ u32 ddc_xfer_done, ddc_xfer_req;
+ u32 ddc_hw_req, ddc_hw_not_idle;
+ bool ddc_hw_not_ready, xfer_not_done, hw_not_done;
+ u32 timeout_count;
+
+ if (!hdcp || !hdcp->init_data.core_io) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ io = hdcp->init_data.core_io;
+ if (!io->base) {
+ pr_err("core io not inititalized\n");
+ return;
+ }
+
+ /* Wait to be clean on DDC HW engine */
+ timeout_count = 100;
+ do {
+ hdcp_ddc_status = DSS_REG_R(io, HDMI_HDCP_DDC_STATUS);
+ ddc_xfer_req = hdcp_ddc_status & BIT(4);
+ ddc_xfer_done = hdcp_ddc_status & BIT(10);
+
+ ddc_hw_status = DSS_REG_R(io, HDMI_DDC_HW_STATUS);
+ ddc_hw_req = ddc_hw_status & BIT(16);
+ ddc_hw_not_idle = ddc_hw_status & (BIT(0) | BIT(1));
+
+ /* ddc transfer was requested but not completed */
+ xfer_not_done = ddc_xfer_req && !ddc_xfer_done;
+
+ /* ddc status is not idle or a hw request pending */
+ hw_not_done = ddc_hw_not_idle || ddc_hw_req;
+
+ ddc_hw_not_ready = xfer_not_done || hw_not_done;
+
+ SDE_HDCP_DEBUG("%s: timeout count(%d): ddc hw%sready\n",
+ SDE_HDCP_STATE_NAME, timeout_count,
+ ddc_hw_not_ready ? " not " : " ");
+ SDE_HDCP_DEBUG("hdcp_ddc_status[0x%x], ddc_hw_status[0x%x]\n",
+ hdcp_ddc_status, ddc_hw_status);
+ if (ddc_hw_not_ready)
+ msleep(20);
+ } while (ddc_hw_not_ready && --timeout_count);
+} /* hdcp_1x_hw_ddc_clean */
+
+static int sde_hdcp_1x_load_keys(void *input)
+{
+ int rc = 0;
+ bool use_sw_keys = false;
+ u32 reg_val;
+ u32 ksv_lsb_addr, ksv_msb_addr;
+ u32 aksv_lsb, aksv_msb;
+ u8 aksv[5];
+ struct dss_io_data *io;
+ struct dss_io_data *qfprom_io;
+ struct sde_hdcp_1x *hdcp = input;
+ struct sde_hdcp_reg_set *reg_set;
+
+ if (!hdcp || !hdcp->init_data.core_io ||
+ !hdcp->init_data.qfprom_io) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE) &&
+ !sde_hdcp_1x_state(HDCP_STATE_AUTH_FAIL)) {
+ pr_err("%s: invalid state. returning\n",
+ SDE_HDCP_STATE_NAME);
+ rc = -EINVAL;
+ goto end;
+ }
+
+ io = hdcp->init_data.core_io;
+ qfprom_io = hdcp->init_data.qfprom_io;
+ reg_set = &hdcp->reg_set;
+
+ /* On compatible hardware, use SW keys */
+ reg_val = DSS_REG_R(qfprom_io, SEC_CTRL_HW_VERSION);
+ if (reg_val >= HDCP_SEL_MIN_SEC_VERSION) {
+ reg_val = DSS_REG_R(qfprom_io,
+ QFPROM_RAW_FEAT_CONFIG_ROW0_MSB +
+ QFPROM_RAW_VERSION_4);
+
+ if (!(reg_val & BIT(23)))
+ use_sw_keys = true;
+ }
+
+ if (use_sw_keys) {
+ if (hdcp1_set_keys(&aksv_msb, &aksv_lsb)) {
+ pr_err("setting hdcp SW keys failed\n");
+ rc = -EINVAL;
+ goto end;
+ }
+ } else {
+ /* Fetch aksv from QFPROM, this info should be public. */
+ ksv_lsb_addr = HDCP_KSV_LSB;
+ ksv_msb_addr = HDCP_KSV_MSB;
+
+ if (hdcp->init_data.sec_access) {
+ ksv_lsb_addr += HDCP_KSV_VERSION_4_OFFSET;
+ ksv_msb_addr += HDCP_KSV_VERSION_4_OFFSET;
+ }
+
+ aksv_lsb = DSS_REG_R(qfprom_io, ksv_lsb_addr);
+ aksv_msb = DSS_REG_R(qfprom_io, ksv_msb_addr);
+ }
+
+ SDE_HDCP_DEBUG("%s: AKSV=%02x%08x\n", SDE_HDCP_STATE_NAME,
+ aksv_msb, aksv_lsb);
+
+ aksv[0] = aksv_lsb & 0xFF;
+ aksv[1] = (aksv_lsb >> 8) & 0xFF;
+ aksv[2] = (aksv_lsb >> 16) & 0xFF;
+ aksv[3] = (aksv_lsb >> 24) & 0xFF;
+ aksv[4] = aksv_msb & 0xFF;
+
+ /* check there are 20 ones in AKSV */
+ if (sde_hdcp_1x_count_one(aksv, 5) != 20) {
+ pr_err("AKSV bit count failed\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ DSS_REG_W(io, reg_set->aksv_lsb, aksv_lsb);
+ DSS_REG_W(io, reg_set->aksv_msb, aksv_msb);
+
+ /* Setup seed values for random number An */
+ DSS_REG_W(io, reg_set->entropy_ctrl0, 0xB1FFB0FF);
+ DSS_REG_W(io, reg_set->entropy_ctrl1, 0xF00DFACE);
+
+ /* make sure hw is programmed */
+ wmb();
+
+ /* enable hdcp engine */
+ DSS_REG_W(io, reg_set->ctrl, 0x1);
+
+ hdcp->hdcp_state = HDCP_STATE_AUTHENTICATING;
+end:
+ return rc;
+}
+
+static int sde_hdcp_1x_read(struct sde_hdcp_1x *hdcp,
+ struct sde_hdcp_sink_addr *sink,
+ u8 *buf, bool realign)
+{
+ u32 rc = 0;
+ struct sde_hdmi_tx_ddc_data *ddc_data;
+ struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+ if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
+ reset_hdcp_ddc_failures(hdcp);
+
+ ddc_ctrl = hdcp->init_data.ddc_ctrl;
+ ddc_data = &ddc_ctrl->ddc_data;
+ if (!ddc_data) {
+ SDE_ERROR("invalid ddc data\n");
+ return -EINVAL;
+ }
+ memset(ddc_data, 0, sizeof(*ddc_data));
+ ddc_data->dev_addr = 0x74;
+ ddc_data->offset = sink->addr;
+ ddc_data->data_buf = buf;
+ ddc_data->data_len = sink->len;
+ ddc_data->request_len = sink->len;
+ ddc_data->retry = 5;
+ ddc_data->what = sink->name;
+ ddc_data->retry_align = realign;
+
+ rc = sde_hdmi_ddc_read((void *)hdcp->init_data.cb_data);
+ if (rc)
+ SDE_ERROR("%s: %s read failed\n",
+ SDE_HDCP_STATE_NAME, sink->name);
+ } else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+ /* To-do DP APIs go here */
+ }
+
+ return rc;
+}
+
+static int sde_hdcp_1x_write(struct sde_hdcp_1x *hdcp,
+ struct sde_hdcp_sink_addr *sink, u8 *buf)
+{
+ int rc = 0;
+ struct sde_hdmi_tx_ddc_data *ddc_data;
+ struct sde_hdmi_tx_ddc_ctrl *ddc_ctrl;
+
+ if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
+ ddc_ctrl = hdcp->init_data.ddc_ctrl;
+ ddc_data = &ddc_ctrl->ddc_data;
+
+ if (!ddc_data) {
+ SDE_ERROR("invalid ddc data\n");
+ return -EINVAL;
+ }
+ memset(ddc_data, 0, sizeof(*ddc_data));
+
+ ddc_data->dev_addr = 0x74;
+ ddc_data->offset = sink->addr;
+ ddc_data->data_buf = buf;
+ ddc_data->data_len = sink->len;
+ ddc_data->what = sink->name;
+
+ rc = sde_hdmi_ddc_write((void *)hdcp->init_data.cb_data);
+ if (rc)
+ SDE_ERROR("%s: %s write failed\n",
+ SDE_HDCP_STATE_NAME, sink->name);
+ } else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+ /* To-do DP APIs go here */
+ }
+
+ return rc;
+}
+
+static void sde_hdcp_1x_enable_interrupts(struct sde_hdcp_1x *hdcp)
+{
+ u32 intr_reg;
+ struct dss_io_data *io;
+ struct sde_hdcp_int_set *isr;
+
+ io = hdcp->init_data.core_io;
+ isr = &hdcp->int_set;
+
+ intr_reg = DSS_REG_R(io, isr->int_reg);
+
+ intr_reg |= HDCP_INT_CLR | HDCP_INT_EN;
+
+ DSS_REG_W(io, isr->int_reg, intr_reg);
+}
+
+static int sde_hdcp_1x_read_bcaps(struct sde_hdcp_1x *hdcp)
+{
+ int rc;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+ struct dss_io_data *hdcp_io = hdcp->init_data.hdcp_io;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bcaps,
+ &hdcp->bcaps, false);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("error reading bcaps\n");
+ goto error;
+ }
+
+ SDE_HDCP_DEBUG("bcaps read: 0x%x\n", hdcp->bcaps);
+
+ hdcp->current_tp.ds_type = hdcp->bcaps & reg_set->repeater ?
+ DS_REPEATER : DS_RECEIVER;
+
+ SDE_HDCP_DEBUG("ds: %s\n", hdcp->current_tp.ds_type == DS_REPEATER ?
+ "repeater" : "receiver");
+
+ /* Write BCAPS to the hardware */
+ DSS_REG_W(hdcp_io, reg_set->sec_data12, hdcp->bcaps);
+error:
+ return rc;
+}
+
+static int sde_hdcp_1x_wait_for_hw_ready(struct sde_hdcp_1x *hdcp)
+{
+ int rc;
+ u32 link0_status;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+ struct dss_io_data *io = hdcp->init_data.core_io;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ /* Wait for HDCP keys to be checked and validated */
+ rc = readl_poll_timeout(io->base + reg_set->status, link0_status,
+ ((link0_status >> reg_set->keys_offset) & 0x7)
+ == HDCP_KEYS_STATE_VALID ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+ HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("key not ready\n");
+ goto error;
+ }
+
+ /*
+ * 1.1_Features turned off by default.
+ * No need to write AInfo since 1.1_Features is disabled.
+ */
+ DSS_REG_W(io, reg_set->data4, 0);
+
+ /* Wait for An0 and An1 bit to be ready */
+ rc = readl_poll_timeout(io->base + reg_set->status, link0_status,
+ (link0_status & (BIT(8) | BIT(9))) ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+ HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("An not ready\n");
+ goto error;
+ }
+
+ /* As per hardware recommendations, wait before reading An */
+ msleep(20);
+error:
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static int sde_hdcp_1x_send_an_aksv_to_sink(struct sde_hdcp_1x *hdcp)
+{
+ int rc;
+ u8 an[8], aksv[5];
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ an[0] = hdcp->an_0 & 0xFF;
+ an[1] = (hdcp->an_0 >> 8) & 0xFF;
+ an[2] = (hdcp->an_0 >> 16) & 0xFF;
+ an[3] = (hdcp->an_0 >> 24) & 0xFF;
+ an[4] = hdcp->an_1 & 0xFF;
+ an[5] = (hdcp->an_1 >> 8) & 0xFF;
+ an[6] = (hdcp->an_1 >> 16) & 0xFF;
+ an[7] = (hdcp->an_1 >> 24) & 0xFF;
+
+ SDE_HDCP_DEBUG("an read: 0x%2x%2x%2x%2x%2x%2x%2x%2x\n",
+ an[7], an[6], an[5], an[4], an[3], an[2], an[1], an[0]);
+
+ rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.an, an);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("error writing an to sink\n");
+ goto error;
+ }
+
+ /* Copy An and AKSV to byte arrays for transmission */
+ aksv[0] = hdcp->aksv_0 & 0xFF;
+ aksv[1] = (hdcp->aksv_0 >> 8) & 0xFF;
+ aksv[2] = (hdcp->aksv_0 >> 16) & 0xFF;
+ aksv[3] = (hdcp->aksv_0 >> 24) & 0xFF;
+ aksv[4] = hdcp->aksv_1 & 0xFF;
+
+ SDE_HDCP_DEBUG("aksv read: 0x%2x%2x%2x%2x%2x\n",
+ aksv[4], aksv[3], aksv[2], aksv[1], aksv[0]);
+
+ rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.aksv, aksv);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("error writing aksv to sink\n");
+ goto error;
+ }
+error:
+ return rc;
+}
+
+static int sde_hdcp_1x_read_an_aksv_from_hw(struct sde_hdcp_1x *hdcp)
+{
+ struct dss_io_data *io = hdcp->init_data.core_io;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ hdcp->an_0 = DSS_REG_R(io, reg_set->data5);
+ if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+ udelay(1);
+ hdcp->an_0 = DSS_REG_R(io, reg_set->data5);
+ }
+
+ hdcp->an_1 = DSS_REG_R(io, reg_set->data6);
+ if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+ udelay(1);
+ hdcp->an_1 = DSS_REG_R(io, reg_set->data6);
+ }
+
+ /* Read AKSV */
+ hdcp->aksv_0 = DSS_REG_R(io, reg_set->data3);
+ hdcp->aksv_1 = DSS_REG_R(io, reg_set->data4);
+
+ return 0;
+}
+
+static int sde_hdcp_1x_get_bksv_from_sink(struct sde_hdcp_1x *hdcp)
+{
+ int rc;
+ u8 *bksv = hdcp->current_tp.bksv;
+ u32 link0_bksv_0, link0_bksv_1;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+ struct dss_io_data *hdcp_io = hdcp->init_data.hdcp_io;
+
+ rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bksv, bksv, false);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("error reading bksv from sink\n");
+ goto error;
+ }
+
+ SDE_HDCP_DEBUG("bksv read: 0x%2x%2x%2x%2x%2x\n",
+ bksv[4], bksv[3], bksv[2], bksv[1], bksv[0]);
+
+ /* check there are 20 ones in BKSV */
+ if (sde_hdcp_1x_count_one(bksv, 5) != 20) {
+ pr_err("%s: BKSV doesn't have 20 1's and 20 0's\n",
+ SDE_HDCP_STATE_NAME);
+ rc = -EINVAL;
+ goto error;
+ }
+
+ link0_bksv_0 = bksv[3];
+ link0_bksv_0 = (link0_bksv_0 << 8) | bksv[2];
+ link0_bksv_0 = (link0_bksv_0 << 8) | bksv[1];
+ link0_bksv_0 = (link0_bksv_0 << 8) | bksv[0];
+ link0_bksv_1 = bksv[4];
+
+ DSS_REG_W(hdcp_io, reg_set->sec_data0, link0_bksv_0);
+ DSS_REG_W(hdcp_io, reg_set->sec_data1, link0_bksv_1);
+error:
+ return rc;
+}
+
+static u8 *sde_hdcp_1x_swap_byte_order(u8 *bksv_in, int num_dev)
+{
+ u8 *bksv_out;
+ u8 *tmp_out;
+ u8 *tmp_in;
+ int i, j;
+
+ /* Dont exceed max downstream devices */
+ if (num_dev > MAX_DEVICES_SUPPORTED) {
+ pr_err("invalid params\n");
+ return NULL;
+ }
+
+ bksv_out = kzalloc(RECV_ID_SIZE * num_dev, GFP_KERNEL);
+
+ if (!bksv_out)
+ return NULL;
+
+ SDE_HDCP_DEBUG("num_dev = %d\n", num_dev);
+
+ /* Store temporarily for return */
+ tmp_out = bksv_out;
+ tmp_in = bksv_in;
+
+ for (i = 0; i < num_dev; i++) {
+ for (j = 0; j < RECV_ID_SIZE; j++)
+ bksv_out[j] = tmp_in[RECV_ID_SIZE - j - 1];
+
+ /* Each KSV is 5 bytes long */
+ bksv_out += RECV_ID_SIZE;
+ tmp_in += RECV_ID_SIZE;
+ }
+
+ return tmp_out;
+}
+
+static int sde_hdcp_1x_revoked_rcv_chk(struct sde_hdcp_1x *hdcp)
+{
+ int rc = 0;
+ u8 *bksv = hdcp->current_tp.bksv;
+ u8 *bksv_out;
+ struct hdcp_srm_device_id_t *bksv_srm;
+
+ bksv_out = sde_hdcp_1x_swap_byte_order(bksv, 1);
+
+ if (!bksv_out) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ SDE_HDCP_DEBUG("bksv_out : 0x%2x%2x%2x%2x%2x\n",
+ bksv_out[4], bksv_out[3], bksv_out[2],
+ bksv_out[1], bksv_out[0]);
+
+ bksv_srm = (struct hdcp_srm_device_id_t *)bksv_out;
+ /* Here we are checking only receiver ID
+ * hence the device count is one
+ */
+ rc = hdcp1_validate_receiver_ids(bksv_srm, 1);
+
+ kfree(bksv_out);
+
+exit:
+ return rc;
+}
+
+static int sde_hdcp_1x_revoked_rpt_chk(struct sde_hdcp_1x *hdcp)
+{
+ int rc = 0;
+ int i;
+ u8 *bksv = hdcp->current_tp.ksv_list;
+ u8 *bksv_out;
+ struct hdcp_srm_device_id_t *bksv_srm;
+
+ for (i = 0; i < hdcp->sink_addr.ksv_fifo.len;
+ i += RECV_ID_SIZE) {
+ SDE_HDCP_DEBUG("bksv : 0x%2x%2x%2x%2x%2x\n",
+ bksv[i + 4],
+ bksv[i + 3], bksv[i + 2],
+ bksv[i + 1], bksv[i]);
+ }
+
+ bksv_out = sde_hdcp_1x_swap_byte_order(bksv,
+ hdcp->current_tp.dev_count);
+
+ if (!bksv_out) {
+ rc = -ENOMEM;
+ goto exit;
+ }
+
+ bksv_srm = (struct hdcp_srm_device_id_t *)bksv_out;
+ /* Here we are checking repeater ksv list */
+ rc = hdcp1_validate_receiver_ids(bksv_srm,
+ hdcp->current_tp.dev_count);
+
+ kfree(bksv_out);
+
+exit:
+ return rc;
+}
+
+static void sde_hdcp_1x_enable_sink_irq_hpd(struct sde_hdcp_1x *hdcp)
+{
+ int rc;
+ u8 const required_major = 1, required_minor = 2;
+ u8 sink_major = 0, sink_minor = 0;
+ u8 enable_hpd_irq = 0x1;
+ u16 version;
+
+ if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI)
+ return;
+
+ version = *hdcp->init_data.version;
+ sink_major = (version >> 4) & 0x0f;
+ sink_minor = version & 0x0f;
+
+ if ((sink_minor < required_minor) || (sink_major < required_major) ||
+ (hdcp->current_tp.ds_type != DS_REPEATER)) {
+ pr_debug("sink irq hpd not enabled\n");
+ return;
+ }
+
+ rc = sde_hdcp_1x_write(hdcp, &hdcp->sink_addr.ainfo, &enable_hpd_irq);
+ if (IS_ERR_VALUE(rc))
+ SDE_HDCP_DEBUG("error writing ainfo to sink\n");
+}
+
+static int sde_hdcp_1x_verify_r0(struct sde_hdcp_1x *hdcp)
+{
+ int rc, r0_retry = 3;
+ u8 buf[2];
+ u32 link0_status, timeout_count;
+ u32 const r0_read_delay_us = 1;
+ u32 const r0_read_timeout_us = r0_read_delay_us * 10;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+ struct dss_io_data *io = hdcp->init_data.core_io;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ /* Wait for HDCP R0 computation to be completed */
+ rc = readl_poll_timeout(io->base + reg_set->status, link0_status,
+ (link0_status & BIT(reg_set->r0_offset)) ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+ HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("R0 not ready\n");
+ goto error;
+ }
+
+ /*
+ * HDCP Compliace Test case 1A-01:
+ * Wait here at least 100ms before reading R0'
+ */
+ if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
+ msleep(100);
+ } else {
+ if (!hdcp->sink_r0_ready) {
+ reinit_completion(&hdcp->sink_r0_available);
+ timeout_count = wait_for_completion_timeout(
+ &hdcp->sink_r0_available, HZ / 2);
+
+ if (hdcp->reauth) {
+ pr_err("sink R0 not ready\n");
+ rc = -EINVAL;
+ goto error;
+ }
+ }
+ }
+
+ do {
+ memset(buf, 0, sizeof(buf));
+
+ rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.r0,
+ buf, false);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("error reading R0' from sink\n");
+ goto error;
+ }
+
+ SDE_HDCP_DEBUG("sink R0'read: %2x%2x\n", buf[1], buf[0]);
+
+ DSS_REG_W(io, reg_set->data2_0, (((u32)buf[1]) << 8) | buf[0]);
+
+ rc = readl_poll_timeout(io->base + reg_set->status,
+ link0_status, (link0_status & BIT(12)) ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+ r0_read_delay_us, r0_read_timeout_us);
+ } while (rc && --r0_retry);
+error:
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static int sde_hdcp_1x_authentication_part1(struct sde_hdcp_1x *hdcp)
+{
+ int rc;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ sde_hdcp_1x_enable_interrupts(hdcp);
+
+ rc = sde_hdcp_1x_read_bcaps(hdcp);
+ if (rc)
+ goto error;
+
+ rc = sde_hdcp_1x_wait_for_hw_ready(hdcp);
+ if (rc)
+ goto error;
+
+ rc = sde_hdcp_1x_read_an_aksv_from_hw(hdcp);
+ if (rc)
+ goto error;
+
+ rc = sde_hdcp_1x_get_bksv_from_sink(hdcp);
+ if (rc)
+ goto error;
+
+ rc = sde_hdcp_1x_revoked_rcv_chk(hdcp);
+ if (rc) {
+ rc = -SDE_HDCP_SRM_FAIL;
+ goto error;
+ }
+
+ rc = sde_hdcp_1x_send_an_aksv_to_sink(hdcp);
+ if (rc)
+ goto error;
+
+ sde_hdcp_1x_enable_sink_irq_hpd(hdcp);
+
+ rc = sde_hdcp_1x_verify_r0(hdcp);
+ if (rc)
+ goto error;
+
+ pr_info("SUCCESSFUL\n");
+
+ return 0;
+error:
+ pr_err("%s: FAILED\n", SDE_HDCP_STATE_NAME);
+
+ return rc;
+}
+
+static int sde_hdcp_1x_transfer_v_h(struct sde_hdcp_1x *hdcp)
+{
+ int rc = 0;
+ struct dss_io_data *io = hdcp->init_data.hdcp_io;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+ struct sde_hdcp_1x_reg_data reg_data[] = {
+ {reg_set->sec_data7, &hdcp->sink_addr.v_h0},
+ {reg_set->sec_data8, &hdcp->sink_addr.v_h1},
+ {reg_set->sec_data9, &hdcp->sink_addr.v_h2},
+ {reg_set->sec_data10, &hdcp->sink_addr.v_h3},
+ {reg_set->sec_data11, &hdcp->sink_addr.v_h4},
+ };
+ struct sde_hdcp_sink_addr sink = {"V", reg_data->sink->addr};
+ u32 size = ARRAY_SIZE(reg_data);
+ u8 buf[0xFF] = {0};
+ u32 i = 0, len = 0;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < size; i++) {
+ struct sde_hdcp_1x_reg_data *rd = reg_data + i;
+
+ len += rd->sink->len;
+ }
+
+ sink.len = len;
+
+ rc = sde_hdcp_1x_read(hdcp, &sink, buf, false);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("error reading %s\n", sink.name);
+ goto end;
+ }
+
+
+ for (i = 0; i < size; i++) {
+ struct sde_hdcp_1x_reg_data *rd = reg_data + i;
+ u32 reg_data;
+
+ memcpy(&reg_data, buf + (sizeof(u32) * i), sizeof(u32));
+ DSS_REG_W(io, rd->reg_id, reg_data);
+ }
+end:
+ return rc;
+}
+
+static int sde_hdcp_1x_validate_downstream(struct sde_hdcp_1x *hdcp)
+{
+ int rc;
+ u8 buf[2] = {0, 0};
+ u8 device_count, depth;
+ u8 max_cascade_exceeded, max_devs_exceeded;
+ u16 bstatus;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bstatus,
+ buf, false);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("error reading bstatus\n");
+ goto end;
+ }
+
+ bstatus = buf[1];
+ bstatus = (bstatus << 8) | buf[0];
+
+ device_count = bstatus & 0x7F;
+
+ SDE_HDCP_DEBUG("device count %d\n", device_count);
+
+ /* Cascaded repeater depth */
+ depth = (bstatus >> 8) & 0x7;
+ SDE_HDCP_DEBUG("depth %d\n", depth);
+
+ /*
+ * HDCP Compliance 1B-05:
+ * Check if no. of devices connected to repeater
+ * exceed max_devices_connected from bit 7 of Bstatus.
+ */
+ max_devs_exceeded = (bstatus & BIT(7)) >> 7;
+ if (max_devs_exceeded == 0x01) {
+ pr_err("no. of devs connected exceed max allowed\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /*
+ * HDCP Compliance 1B-06:
+ * Check if no. of cascade connected to repeater
+ * exceed max_cascade_connected from bit 11 of Bstatus.
+ */
+ max_cascade_exceeded = (bstatus & BIT(11)) >> 11;
+ if (max_cascade_exceeded == 0x01) {
+ pr_err("no. of cascade connections exceed max allowed\n");
+ rc = -EINVAL;
+ goto end;
+ }
+
+ /* Update topology information */
+ hdcp->current_tp.dev_count = device_count;
+ hdcp->current_tp.max_cascade_exceeded = max_cascade_exceeded;
+ hdcp->current_tp.max_dev_exceeded = max_devs_exceeded;
+ hdcp->current_tp.depth = depth;
+
+ DSS_REG_W(hdcp->init_data.hdcp_io,
+ reg_set->sec_data12, hdcp->bcaps | (bstatus << 8));
+end:
+ return rc;
+}
+
+static int sde_hdcp_1x_read_ksv_fifo(struct sde_hdcp_1x *hdcp)
+{
+ u32 ksv_read_retry = 20, ksv_bytes, rc = 0;
+ u8 *ksv_fifo = hdcp->current_tp.ksv_list;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ memset(ksv_fifo, 0, sizeof(hdcp->current_tp.ksv_list));
+
+ /* each KSV is 5 bytes long */
+ ksv_bytes = 5 * hdcp->current_tp.dev_count;
+ hdcp->sink_addr.ksv_fifo.len = ksv_bytes;
+
+ while (ksv_bytes && --ksv_read_retry) {
+ rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.ksv_fifo,
+ ksv_fifo, true);
+ if (IS_ERR_VALUE(rc))
+ pr_err("could not read ksv fifo (%d)\n",
+ ksv_read_retry);
+ else
+ break;
+ }
+
+ if (rc)
+ pr_err("error reading ksv_fifo\n");
+
+ return rc;
+}
+
+static int sde_hdcp_1x_write_ksv_fifo(struct sde_hdcp_1x *hdcp)
+{
+ int i, rc = 0;
+ u8 *ksv_fifo = hdcp->current_tp.ksv_list;
+ u32 ksv_bytes = hdcp->sink_addr.ksv_fifo.len;
+ struct dss_io_data *io = hdcp->init_data.core_io;
+ struct dss_io_data *sec_io = hdcp->init_data.hdcp_io;
+ struct sde_hdcp_reg_set *reg_set = &hdcp->reg_set;
+ u32 sha_status = 0, status;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ /* reset SHA Controller */
+ DSS_REG_W(sec_io, reg_set->sec_sha_ctrl, 0x1);
+ DSS_REG_W(sec_io, reg_set->sec_sha_ctrl, 0x0);
+
+ for (i = 0; i < ksv_bytes - 1; i++) {
+ /* Write KSV byte and do not set DONE bit[0] */
+ DSS_REG_W_ND(sec_io, reg_set->sec_sha_data, ksv_fifo[i] << 16);
+
+ /*
+ * Once 64 bytes have been written, we need to poll for
+ * HDCP_SHA_BLOCK_DONE before writing any further
+ */
+ if (i && !((i + 1) % 64)) {
+ rc = readl_poll_timeout(io->base + reg_set->sha_status,
+ sha_status, (sha_status & BIT(0)) ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+ HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("block not done\n");
+ goto error;
+ }
+ }
+ }
+
+ /* Write l to DONE bit[0] */
+ DSS_REG_W_ND(sec_io, reg_set->sec_sha_data,
+ (ksv_fifo[ksv_bytes - 1] << 16) | 0x1);
+
+ /* Now wait for HDCP_SHA_COMP_DONE */
+ rc = readl_poll_timeout(io->base + reg_set->sha_status, sha_status,
+ (sha_status & BIT(4)) ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+ HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("V computation not done\n");
+ goto error;
+ }
+
+ /* Wait for V_MATCHES */
+ rc = readl_poll_timeout(io->base + reg_set->status, status,
+ (status & BIT(reg_set->v_offset)) ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING),
+ HDCP_POLL_SLEEP_US, HDCP_POLL_TIMEOUT_US);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("V mismatch\n");
+ rc = -EINVAL;
+ }
+error:
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static int sde_hdcp_1x_wait_for_ksv_ready(struct sde_hdcp_1x *hdcp)
+{
+ int rc, timeout;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ /*
+ * Wait until READY bit is set in BCAPS, as per HDCP specifications
+ * maximum permitted time to check for READY bit is five seconds.
+ */
+ rc = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.bcaps,
+ &hdcp->bcaps, false);
+ if (IS_ERR_VALUE(rc)) {
+ pr_err("error reading bcaps\n");
+ goto error;
+ }
+
+ if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
+ timeout = 50;
+
+ while (!(hdcp->bcaps & BIT(5)) && --timeout) {
+ rc = sde_hdcp_1x_read(hdcp,
+ &hdcp->sink_addr.bcaps,
+ &hdcp->bcaps, false);
+ if (IS_ERR_VALUE(rc) ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("error reading bcaps\n");
+ goto error;
+ }
+ msleep(100);
+ }
+ } else {
+ u8 cp_buf = 0;
+ struct sde_hdcp_sink_addr *sink =
+ &hdcp->sink_addr.cp_irq_status;
+
+ timeout = jiffies_to_msecs(jiffies);
+
+ while (1) {
+ rc = sde_hdcp_1x_read(hdcp, sink, &cp_buf, false);
+ if (rc)
+ goto error;
+
+ if (cp_buf & BIT(0))
+ break;
+
+ /* max timeout of 5 sec as per hdcp 1.x spec */
+ if (abs(timeout - jiffies_to_msecs(jiffies)) > 5000) {
+ timeout = 0;
+ break;
+ }
+
+ if (hdcp->ksv_ready || hdcp->reauth ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
+ break;
+
+ /* re-read after a minimum delay */
+ msleep(20);
+ }
+ }
+
+ if (!timeout || hdcp->reauth ||
+ !sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("DS KSV not ready\n");
+ rc = -EINVAL;
+ } else {
+ hdcp->ksv_ready = true;
+ }
+error:
+ return rc;
+}
+
+static int sde_hdcp_1x_authentication_part2(struct sde_hdcp_1x *hdcp)
+{
+ int rc;
+ int v_retry = 3;
+
+ rc = sde_hdcp_1x_validate_downstream(hdcp);
+ if (rc)
+ goto error;
+
+ rc = sde_hdcp_1x_read_ksv_fifo(hdcp);
+ if (rc)
+ goto error;
+
+ rc = sde_hdcp_1x_revoked_rpt_chk(hdcp);
+ if (rc) {
+ rc = -SDE_HDCP_SRM_FAIL;
+ goto error;
+ }
+
+ do {
+ /*
+ * Do not proceed further if no device connected
+ * If no downstream devices are attached to the repeater
+ * then part II fails.
+ */
+
+ if (!hdcp->current_tp.dev_count) {
+ rc = -EINVAL;
+ goto error;
+ }
+
+ rc = sde_hdcp_1x_transfer_v_h(hdcp);
+ if (rc)
+ goto error;
+
+ rc = sde_hdcp_1x_write_ksv_fifo(hdcp);
+ } while (--v_retry && rc);
+error:
+ if (rc) {
+ pr_err("%s: FAILED\n", SDE_HDCP_STATE_NAME);
+ } else {
+ hdcp->hdcp_state = HDCP_STATE_AUTHENTICATED;
+
+ pr_info("SUCCESSFUL\n");
+ }
+
+ return rc;
+}
+
+static void sde_hdcp_1x_cache_topology(struct sde_hdcp_1x *hdcp)
+{
+ if (!hdcp || !hdcp->init_data.core_io) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ memcpy((void *)&hdcp->cached_tp,
+ (void *) &hdcp->current_tp,
+ sizeof(hdcp->cached_tp));
+ hdcp1_cache_repeater_topology((void *)&hdcp->cached_tp);
+}
+
+static void sde_hdcp_1x_notify_topology(void)
+{
+ hdcp1_notify_topology();
+}
+
+static void sde_hdcp_1x_update_auth_status(struct sde_hdcp_1x *hdcp)
+{
+ if (sde_hdcp_1x_state(HDCP_STATE_AUTH_FAIL))
+ hdcp->init_data.avmute_sink(hdcp->init_data.cb_data);
+
+ if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATED)) {
+ sde_hdcp_1x_cache_topology(hdcp);
+ sde_hdcp_1x_notify_topology();
+ }
+
+ if (hdcp->init_data.notify_status &&
+ !sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
+ hdcp->init_data.notify_status(
+ hdcp->init_data.cb_data,
+ hdcp->hdcp_state);
+ }
+}
+
+static void sde_hdcp_1x_auth_work(struct work_struct *work)
+{
+ int rc;
+ struct delayed_work *dw = to_delayed_work(work);
+ struct sde_hdcp_1x *hdcp = container_of(dw,
+ struct sde_hdcp_1x, hdcp_auth_work);
+ struct dss_io_data *io;
+
+ if (!hdcp) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ pr_err("invalid state\n");
+ return;
+ }
+
+ hdcp->sink_r0_ready = false;
+ hdcp->reauth = false;
+ hdcp->ksv_ready = false;
+
+ io = hdcp->init_data.core_io;
+ /* Enabling Software DDC for HDMI and REF timer for DP */
+ if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI)
+ DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION, DSS_REG_R(io,
+ HDMI_DDC_ARBITRATION) & ~(BIT(4)));
+ else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+ /* To do for DP */
+ }
+
+ /*
+ * program hw to enable encryption as soon as
+ * authentication is successful.
+ */
+ hdcp1_set_enc(true);
+
+ rc = sde_hdcp_1x_authentication_part1(hdcp);
+ if (rc)
+ goto end;
+
+ if (hdcp->current_tp.ds_type == DS_REPEATER) {
+ rc = sde_hdcp_1x_wait_for_ksv_ready(hdcp);
+ if (rc)
+ goto end;
+ } else {
+ hdcp->hdcp_state = HDCP_STATE_AUTHENTICATED;
+ goto end;
+ }
+
+ hdcp->ksv_ready = false;
+
+ rc = sde_hdcp_1x_authentication_part2(hdcp);
+ if (rc)
+ goto end;
+
+
+end:
+ if (rc && !sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
+ hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
+ if (rc == -SDE_HDCP_SRM_FAIL)
+ hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL_NOREAUTH;
+ }
+
+ /*
+ * Disabling software DDC before going into part3 to make sure
+ * there is no Arbitration between software and hardware for DDC
+ */
+ if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI)
+ DSS_REG_W_ND(io, HDMI_DDC_ARBITRATION, DSS_REG_R(io,
+ HDMI_DDC_ARBITRATION) | (BIT(4)));
+
+ sde_hdcp_1x_update_auth_status(hdcp);
+}
+
+static int sde_hdcp_1x_authenticate(void *input)
+{
+ struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+
+ if (!hdcp) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ flush_delayed_work(&hdcp->hdcp_auth_work);
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ if (!sde_hdcp_1x_load_keys(input)) {
+
+ queue_delayed_work(hdcp->workq,
+ &hdcp->hdcp_auth_work, HZ/2);
+ } else {
+ hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
+ sde_hdcp_1x_update_auth_status(hdcp);
+ }
+
+ return 0;
+} /* hdcp_1x_authenticate */
+
+static int sde_hdcp_1x_reauthenticate(void *input)
+{
+ struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+ struct dss_io_data *io;
+ struct sde_hdcp_reg_set *reg_set;
+ struct sde_hdcp_int_set *isr;
+ u32 hdmi_hw_version;
+ u32 ret = 0, reg;
+
+ if (!hdcp || !hdcp->init_data.core_io) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ io = hdcp->init_data.core_io;
+ reg_set = &hdcp->reg_set;
+ isr = &hdcp->int_set;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_AUTH_FAIL)) {
+ pr_err("invalid state\n");
+ return -EINVAL;
+ }
+
+ if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
+ hdmi_hw_version = DSS_REG_R(io, HDMI_VERSION);
+ if (hdmi_hw_version >= 0x30030000) {
+ DSS_REG_W(io, HDMI_CTRL_SW_RESET, BIT(1));
+ DSS_REG_W(io, HDMI_CTRL_SW_RESET, 0);
+ }
+
+ /* Wait to be clean on DDC HW engine */
+ sde_hdcp_1x_hw_ddc_clean(hdcp);
+ }
+
+ /* Disable HDCP interrupts */
+ DSS_REG_W(io, isr->int_reg, DSS_REG_R(io, isr->int_reg) & ~HDCP_INT_EN);
+
+ reg = DSS_REG_R(io, reg_set->reset);
+ DSS_REG_W(io, reg_set->reset, reg | reg_set->reset_bit);
+
+ /* Disable encryption and disable the HDCP block */
+ DSS_REG_W(io, reg_set->ctrl, 0);
+
+ DSS_REG_W(io, reg_set->reset, reg & ~reg_set->reset_bit);
+
+ hdcp->hdcp_state = HDCP_STATE_INACTIVE;
+ sde_hdcp_1x_authenticate(hdcp);
+
+ return ret;
+} /* hdcp_1x_reauthenticate */
+
+static void sde_hdcp_1x_off(void *input)
+{
+ struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+ struct dss_io_data *io;
+ struct sde_hdcp_reg_set *reg_set;
+ struct sde_hdcp_int_set *isr;
+ int rc = 0;
+ u32 reg;
+
+ if (!hdcp || !hdcp->init_data.core_io) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ io = hdcp->init_data.core_io;
+ reg_set = &hdcp->reg_set;
+ isr = &hdcp->int_set;
+
+ if (sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
+ pr_err("invalid state\n");
+ return;
+ }
+
+ /*
+ * Disable HDCP interrupts.
+ * Also, need to set the state to inactive here so that any ongoing
+ * reauth works will know that the HDCP session has been turned off.
+ */
+ mutex_lock(hdcp->init_data.mutex);
+ DSS_REG_W(io, isr->int_reg,
+ DSS_REG_R(io, isr->int_reg) & ~HDCP_INT_EN);
+ hdcp->hdcp_state = HDCP_STATE_INACTIVE;
+ mutex_unlock(hdcp->init_data.mutex);
+
+ /* complete any wait pending */
+ complete_all(&hdcp->sink_r0_available);
+ complete_all(&hdcp->r0_checked);
+ /*
+ * Cancel any pending auth/reauth attempts.
+ * If one is ongoing, this will wait for it to finish.
+ * No more reauthentiaction attempts will be scheduled since we
+ * set the currect state to inactive.
+ */
+ rc = cancel_delayed_work_sync(&hdcp->hdcp_auth_work);
+ if (rc)
+ SDE_HDCP_DEBUG("%s: Deleted hdcp auth work\n",
+ SDE_HDCP_STATE_NAME);
+
+ hdcp1_set_enc(false);
+
+ reg = DSS_REG_R(io, reg_set->reset);
+ DSS_REG_W(io, reg_set->reset, reg | reg_set->reset_bit);
+
+ /* Disable encryption and disable the HDCP block */
+ DSS_REG_W(io, reg_set->ctrl, 0);
+
+ DSS_REG_W(io, reg_set->reset, reg & ~reg_set->reset_bit);
+
+ hdcp->sink_r0_ready = false;
+
+ SDE_HDCP_DEBUG("%s: HDCP: Off\n", SDE_HDCP_STATE_NAME);
+} /* hdcp_1x_off */
+
+static int sde_hdcp_1x_isr(void *input)
+{
+ struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+ int rc = 0;
+ struct dss_io_data *io;
+ u32 hdcp_int_val;
+ struct sde_hdcp_reg_set *reg_set;
+ struct sde_hdcp_int_set *isr;
+
+ if (!hdcp || !hdcp->init_data.core_io) {
+ pr_err("invalid input\n");
+ rc = -EINVAL;
+ goto error;
+ }
+
+ io = hdcp->init_data.core_io;
+ reg_set = &hdcp->reg_set;
+ isr = &hdcp->int_set;
+
+ hdcp_int_val = DSS_REG_R(io, isr->int_reg);
+
+ /* Ignore HDCP interrupts if HDCP is disabled */
+ if (sde_hdcp_1x_state(HDCP_STATE_INACTIVE)) {
+ DSS_REG_W(io, isr->int_reg, hdcp_int_val | HDCP_INT_CLR);
+ return 0;
+ }
+
+ if (hdcp_int_val & isr->auth_success_int) {
+ /* AUTH_SUCCESS_INT */
+ DSS_REG_W(io, isr->int_reg,
+ (hdcp_int_val | isr->auth_success_ack));
+ SDE_HDCP_DEBUG("%s: AUTH SUCCESS\n", SDE_HDCP_STATE_NAME);
+
+ if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING))
+ complete_all(&hdcp->r0_checked);
+ }
+
+ if (hdcp_int_val & isr->auth_fail_int) {
+ /* AUTH_FAIL_INT */
+ u32 link_status = DSS_REG_R(io, reg_set->status);
+
+ DSS_REG_W(io, isr->int_reg,
+ (hdcp_int_val | isr->auth_fail_ack));
+
+ SDE_HDCP_DEBUG("%s: AUTH FAIL, LINK0_STATUS=0x%08x\n",
+ SDE_HDCP_STATE_NAME, link_status);
+
+ /* Clear AUTH_FAIL_INFO as well */
+ DSS_REG_W(io, isr->int_reg,
+ (hdcp_int_val | isr->auth_fail_info_ack));
+
+ if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATED)) {
+ hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
+ sde_hdcp_1x_update_auth_status(hdcp);
+ } else if (sde_hdcp_1x_state(HDCP_STATE_AUTHENTICATING)) {
+ complete_all(&hdcp->r0_checked);
+ }
+
+ }
+
+ if (hdcp_int_val & isr->tx_req_int) {
+ /* DDC_XFER_REQ_INT */
+ DSS_REG_W(io, isr->int_reg,
+ (hdcp_int_val | isr->tx_req_ack));
+ SDE_HDCP_DEBUG("%s: DDC_XFER_REQ_INT received\n",
+ SDE_HDCP_STATE_NAME);
+ }
+
+ if (hdcp_int_val & isr->tx_req_done_int) {
+ /* DDC_XFER_DONE_INT */
+ DSS_REG_W(io, isr->int_reg,
+ (hdcp_int_val | isr->tx_req_done_ack));
+ SDE_HDCP_DEBUG("%s: DDC_XFER_DONE received\n",
+ SDE_HDCP_STATE_NAME);
+ }
+
+ if (hdcp_int_val & isr->encryption_ready) {
+ /* Encryption enabled */
+ DSS_REG_W(io, isr->int_reg,
+ (hdcp_int_val | isr->encryption_ready_ack));
+ SDE_HDCP_DEBUG("%s: encryption ready received\n",
+ SDE_HDCP_STATE_NAME);
+ }
+
+ if (hdcp_int_val & isr->encryption_not_ready) {
+ /* Encryption enabled */
+ DSS_REG_W(io, isr->int_reg,
+ (hdcp_int_val | isr->encryption_not_ready_ack));
+ SDE_HDCP_DEBUG("%s: encryption not ready received\n",
+ SDE_HDCP_STATE_NAME);
+ }
+
+error:
+ return rc;
+}
+
+void sde_hdcp_1x_deinit(void *input)
+{
+ struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+
+ if (!hdcp) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ if (hdcp->workq)
+ destroy_workqueue(hdcp->workq);
+
+ hdcp1_client_unregister();
+ kfree(hdcp);
+} /* hdcp_1x_deinit */
+
+static void sde_hdcp_1x_update_client_reg_set(struct sde_hdcp_1x *hdcp)
+{
+
+ if (hdcp->init_data.client_id == HDCP_CLIENT_HDMI) {
+ struct sde_hdcp_reg_set reg_set = HDCP_REG_SET_CLIENT_HDMI;
+ struct sde_hdcp_skaddr_map sink_addr = HDCP_HDMI_SINK_ADDR_MAP;
+ struct sde_hdcp_int_set isr = HDCP_HDMI_INT_SET;
+
+ hdcp->reg_set = reg_set;
+ hdcp->sink_addr = sink_addr;
+ hdcp->int_set = isr;
+ } else if (hdcp->init_data.client_id == HDCP_CLIENT_DP) {
+ /* TO DO for DP
+ * Will be filled later
+ */
+ }
+}
+
+static bool sde_hdcp_1x_is_cp_irq_raised(struct sde_hdcp_1x *hdcp)
+{
+ int ret;
+ u8 buf = 0;
+ struct sde_hdcp_sink_addr sink = {"irq", 0x201, 1};
+
+ ret = sde_hdcp_1x_read(hdcp, &sink, &buf, false);
+ if (IS_ERR_VALUE(ret))
+ pr_err("error reading irq_vector\n");
+
+ return buf & BIT(2) ? true : false;
+}
+
+static void sde_hdcp_1x_clear_cp_irq(struct sde_hdcp_1x *hdcp)
+{
+ int ret;
+ u8 buf = BIT(2);
+ struct sde_hdcp_sink_addr sink = {"irq", 0x201, 1};
+
+ ret = sde_hdcp_1x_write(hdcp, &sink, &buf);
+ if (IS_ERR_VALUE(ret))
+ pr_err("error clearing irq_vector\n");
+}
+
+static int sde_hdcp_1x_cp_irq(void *input)
+{
+ struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+ u8 buf = 0;
+ int ret;
+
+ if (!hdcp) {
+ pr_err("invalid input\n");
+ goto irq_not_handled;
+ }
+
+ if (!sde_hdcp_1x_is_cp_irq_raised(hdcp)) {
+ SDE_HDCP_DEBUG("cp_irq not raised\n");
+ goto irq_not_handled;
+ }
+
+ ret = sde_hdcp_1x_read(hdcp, &hdcp->sink_addr.cp_irq_status,
+ &buf, false);
+ if (IS_ERR_VALUE(ret)) {
+ pr_err("error reading cp_irq_status\n");
+ goto irq_not_handled;
+ }
+
+ if ((buf & BIT(2)) || (buf & BIT(3))) {
+ pr_err("%s\n",
+ buf & BIT(2) ? "LINK_INTEGRITY_FAILURE" :
+ "REAUTHENTICATION_REQUEST");
+
+ hdcp->reauth = true;
+
+ if (!sde_hdcp_1x_state(HDCP_STATE_INACTIVE))
+ hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL;
+
+ complete_all(&hdcp->sink_r0_available);
+ sde_hdcp_1x_update_auth_status(hdcp);
+ } else if (buf & BIT(1)) {
+ SDE_HDCP_DEBUG("R0' AVAILABLE\n");
+ hdcp->sink_r0_ready = true;
+ complete_all(&hdcp->sink_r0_available);
+ } else if ((buf & BIT(0))) {
+ SDE_HDCP_DEBUG("KSVs READY\n");
+
+ hdcp->ksv_ready = true;
+ } else {
+ SDE_HDCP_DEBUG("spurious interrupt\n");
+ }
+
+ sde_hdcp_1x_clear_cp_irq(hdcp);
+ return 0;
+
+irq_not_handled:
+ return -EINVAL;
+}
+
+static void sde_hdcp_1x_srm_cb(void *input)
+{
+
+ struct sde_hdcp_1x *hdcp = (struct sde_hdcp_1x *)input;
+ int rc = 0;
+
+ if (!hdcp) {
+ pr_err("invalid input\n");
+ return;
+ }
+
+ rc = sde_hdcp_1x_revoked_rcv_chk(hdcp);
+
+ if (rc) {
+ pr_err("receiver failed SRM check\n");
+ goto fail_noreauth;
+ }
+
+ /* If its not a repeater we are done */
+ if (hdcp->current_tp.ds_type != DS_REPEATER)
+ return;
+
+
+ /* Check the repeater KSV against SRM */
+ rc = sde_hdcp_1x_revoked_rpt_chk(hdcp);
+ if (rc) {
+ pr_err("repeater failed SRM check\n");
+ goto fail_noreauth;
+ }
+
+ return;
+
+ fail_noreauth:
+ /* No reauth in case of SRM failure */
+ hdcp->hdcp_state = HDCP_STATE_AUTH_FAIL_NOREAUTH;
+ sde_hdcp_1x_update_auth_status(hdcp);
+}
+
+void *sde_hdcp_1x_init(struct sde_hdcp_init_data *init_data)
+{
+ struct sde_hdcp_1x *hdcp = NULL;
+ char name[20];
+ static struct sde_hdcp_ops ops = {
+ .isr = sde_hdcp_1x_isr,
+ .cp_irq = sde_hdcp_1x_cp_irq,
+ .reauthenticate = sde_hdcp_1x_reauthenticate,
+ .authenticate = sde_hdcp_1x_authenticate,
+ .off = sde_hdcp_1x_off
+ };
+
+ static struct hdcp_client_ops client_ops = {
+ .srm_cb = sde_hdcp_1x_srm_cb,
+ };
+
+ if (!init_data || !init_data->core_io || !init_data->qfprom_io ||
+ !init_data->mutex || !init_data->notify_status ||
+ !init_data->workq || !init_data->cb_data ||
+ !init_data->avmute_sink) {
+ pr_err("invalid input\n");
+ goto error;
+ }
+
+ if (init_data->sec_access && !init_data->hdcp_io) {
+ pr_err("hdcp_io required\n");
+ goto error;
+ }
+
+ hdcp = kzalloc(sizeof(*hdcp), GFP_KERNEL);
+ if (!hdcp)
+ goto error;
+
+ hdcp->init_data = *init_data;
+ hdcp->ops = &ops;
+
+ snprintf(name, sizeof(name), "hdcp_1x_%d",
+ hdcp->init_data.client_id);
+
+ hdcp->workq = create_workqueue(name);
+ if (!hdcp->workq) {
+ pr_err("Error creating workqueue\n");
+ kfree(hdcp);
+ goto error;
+ }
+
+ sde_hdcp_1x_update_client_reg_set(hdcp);
+
+ INIT_DELAYED_WORK(&hdcp->hdcp_auth_work, sde_hdcp_1x_auth_work);
+
+ hdcp->hdcp_state = HDCP_STATE_INACTIVE;
+ init_completion(&hdcp->r0_checked);
+ init_completion(&hdcp->sink_r0_available);
+
+ /* Register client ctx and the srm_cb with hdcp lib */
+ hdcp1_client_register((void *)hdcp, &client_ops);
+ SDE_HDCP_DEBUG("HDCP module initialized. HDCP_STATE=%s\n",
+ SDE_HDCP_STATE_NAME);
+
+ return (void *)hdcp;
+
+error:
+ return NULL;
+} /* hdcp_1x_init */
+
+struct sde_hdcp_ops *sde_hdcp_1x_start(void *input)
+{
+ return ((struct sde_hdcp_1x *)input)->ops;
+}
+
diff --git a/drivers/gpu/drm/msm/sde_io_util.c b/drivers/gpu/drm/msm/sde_io_util.c
new file mode 100644
index 000000000000..70a42254a909
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_io_util.c
@@ -0,0 +1,502 @@
+/* Copyright (c) 2012-2015, 2017, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/err.h>
+#include <linux/io.h>
+#include <linux/regulator/consumer.h>
+#include <linux/delay.h>
+#include <linux/sde_io_util.h>
+
+#define MAX_I2C_CMDS 16
+void dss_reg_w(struct dss_io_data *io, u32 offset, u32 value, u32 debug)
+{
+ u32 in_val;
+
+ if (!io || !io->base) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ if (offset > io->len) {
+ DEV_ERR("%pS->%s: offset out of range\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ writel_relaxed(value, io->base + offset);
+ if (debug) {
+ in_val = readl_relaxed(io->base + offset);
+ DEV_DBG("[%08x] => %08x [%08x]\n",
+ (u32)(unsigned long)(io->base + offset),
+ value, in_val);
+ }
+} /* dss_reg_w */
+EXPORT_SYMBOL(dss_reg_w);
+
+u32 dss_reg_r(struct dss_io_data *io, u32 offset, u32 debug)
+{
+ u32 value;
+
+ if (!io || !io->base) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ if (offset > io->len) {
+ DEV_ERR("%pS->%s: offset out of range\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ value = readl_relaxed(io->base + offset);
+ if (debug)
+ DEV_DBG("[%08x] <= %08x\n",
+ (u32)(unsigned long)(io->base + offset), value);
+
+ return value;
+} /* dss_reg_r */
+EXPORT_SYMBOL(dss_reg_r);
+
+void dss_reg_dump(void __iomem *base, u32 length, const char *prefix,
+ u32 debug)
+{
+ if (debug)
+ print_hex_dump(KERN_INFO, prefix, DUMP_PREFIX_OFFSET, 32, 4,
+ (void *)base, length, false);
+} /* dss_reg_dump */
+EXPORT_SYMBOL(dss_reg_dump);
+
+static struct resource *msm_dss_get_res_byname(struct platform_device *pdev,
+ unsigned int type, const char *name)
+{
+ struct resource *res = NULL;
+
+ res = platform_get_resource_byname(pdev, type, name);
+ if (!res)
+ DEV_ERR("%s: '%s' resource not found\n", __func__, name);
+
+ return res;
+} /* msm_dss_get_res_byname */
+EXPORT_SYMBOL(msm_dss_get_res_byname);
+
+int msm_dss_ioremap_byname(struct platform_device *pdev,
+ struct dss_io_data *io_data, const char *name)
+{
+ struct resource *res = NULL;
+
+ if (!pdev || !io_data) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return -EINVAL;
+ }
+
+ res = msm_dss_get_res_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ DEV_ERR("%pS->%s: '%s' msm_dss_get_res_byname failed\n",
+ __builtin_return_address(0), __func__, name);
+ return -ENODEV;
+ }
+
+ io_data->len = (u32)resource_size(res);
+ io_data->base = ioremap(res->start, io_data->len);
+ if (!io_data->base) {
+ DEV_ERR("%pS->%s: '%s' ioremap failed\n",
+ __builtin_return_address(0), __func__, name);
+ return -EIO;
+ }
+
+ return 0;
+} /* msm_dss_ioremap_byname */
+EXPORT_SYMBOL(msm_dss_ioremap_byname);
+
+void msm_dss_iounmap(struct dss_io_data *io_data)
+{
+ if (!io_data) {
+ DEV_ERR("%pS->%s: invalid input\n",
+ __builtin_return_address(0), __func__);
+ return;
+ }
+
+ if (io_data->base) {
+ iounmap(io_data->base);
+ io_data->base = NULL;
+ }
+ io_data->len = 0;
+} /* msm_dss_iounmap */
+EXPORT_SYMBOL(msm_dss_iounmap);
+
+int msm_dss_config_vreg(struct device *dev, struct dss_vreg *in_vreg,
+ int num_vreg, int config)
+{
+ int i = 0, rc = 0;
+ struct dss_vreg *curr_vreg = NULL;
+ enum dss_vreg_type type;
+
+ if (!in_vreg || !num_vreg)
+ return rc;
+
+ if (config) {
+ for (i = 0; i < num_vreg; i++) {
+ curr_vreg = &in_vreg[i];
+ curr_vreg->vreg = regulator_get(dev,
+ curr_vreg->vreg_name);
+ rc = PTR_RET(curr_vreg->vreg);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ curr_vreg->vreg_name, rc);
+ curr_vreg->vreg = NULL;
+ goto vreg_get_fail;
+ }
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ if (type == DSS_REG_LDO) {
+ rc = regulator_set_voltage(
+ curr_vreg->vreg,
+ curr_vreg->min_voltage,
+ curr_vreg->max_voltage);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s set vltg fail\n",
+ __builtin_return_address(0),
+ __func__,
+ curr_vreg->vreg_name);
+ goto vreg_set_voltage_fail;
+ }
+ }
+ }
+ } else {
+ for (i = num_vreg-1; i >= 0; i--) {
+ curr_vreg = &in_vreg[i];
+ if (curr_vreg->vreg) {
+ type = (regulator_count_voltages(
+ curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ if (type == DSS_REG_LDO) {
+ regulator_set_voltage(curr_vreg->vreg,
+ 0, curr_vreg->max_voltage);
+ }
+ regulator_put(curr_vreg->vreg);
+ curr_vreg->vreg = NULL;
+ }
+ }
+ }
+ return 0;
+
+vreg_unconfig:
+if (type == DSS_REG_LDO)
+ regulator_set_load(curr_vreg->vreg, 0);
+
+vreg_set_voltage_fail:
+ regulator_put(curr_vreg->vreg);
+ curr_vreg->vreg = NULL;
+
+vreg_get_fail:
+ for (i--; i >= 0; i--) {
+ curr_vreg = &in_vreg[i];
+ type = (regulator_count_voltages(curr_vreg->vreg) > 0)
+ ? DSS_REG_LDO : DSS_REG_VS;
+ goto vreg_unconfig;
+ }
+ return rc;
+} /* msm_dss_config_vreg */
+EXPORT_SYMBOL(msm_dss_config_vreg);
+
+int msm_dss_enable_vreg(struct dss_vreg *in_vreg, int num_vreg, int enable)
+{
+ int i = 0, rc = 0;
+ bool need_sleep;
+
+ if (enable) {
+ for (i = 0; i < num_vreg; i++) {
+ rc = PTR_RET(in_vreg[i].vreg);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s regulator error. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name, rc);
+ goto vreg_set_opt_mode_fail;
+ }
+ need_sleep = !regulator_is_enabled(in_vreg[i].vreg);
+ if (in_vreg[i].pre_on_sleep && need_sleep)
+ usleep_range(in_vreg[i].pre_on_sleep * 1000,
+ in_vreg[i].pre_on_sleep * 1000);
+ rc = regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].enable_load);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s set opt m fail\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name);
+ goto vreg_set_opt_mode_fail;
+ }
+ rc = regulator_enable(in_vreg[i].vreg);
+ if (in_vreg[i].post_on_sleep && need_sleep)
+ usleep_range(in_vreg[i].post_on_sleep * 1000,
+ in_vreg[i].post_on_sleep * 1000);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s enable failed\n",
+ __builtin_return_address(0), __func__,
+ in_vreg[i].vreg_name);
+ goto disable_vreg;
+ }
+ }
+ } else {
+ for (i = num_vreg-1; i >= 0; i--) {
+ if (in_vreg[i].pre_off_sleep)
+ usleep_range(in_vreg[i].pre_off_sleep * 1000,
+ in_vreg[i].pre_off_sleep * 1000);
+ regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].disable_load);
+ regulator_disable(in_vreg[i].vreg);
+ if (in_vreg[i].post_off_sleep)
+ usleep_range(in_vreg[i].post_off_sleep * 1000,
+ in_vreg[i].post_off_sleep * 1000);
+ }
+ }
+ return rc;
+
+disable_vreg:
+ regulator_set_load(in_vreg[i].vreg, in_vreg[i].disable_load);
+
+vreg_set_opt_mode_fail:
+ for (i--; i >= 0; i--) {
+ if (in_vreg[i].pre_off_sleep)
+ usleep_range(in_vreg[i].pre_off_sleep * 1000,
+ in_vreg[i].pre_off_sleep * 1000);
+ regulator_set_load(in_vreg[i].vreg,
+ in_vreg[i].disable_load);
+ regulator_disable(in_vreg[i].vreg);
+ if (in_vreg[i].post_off_sleep)
+ usleep_range(in_vreg[i].post_off_sleep * 1000,
+ in_vreg[i].post_off_sleep * 1000);
+ }
+
+ return rc;
+} /* msm_dss_enable_vreg */
+EXPORT_SYMBOL(msm_dss_enable_vreg);
+
+int msm_dss_enable_gpio(struct dss_gpio *in_gpio, int num_gpio, int enable)
+{
+ int i = 0, rc = 0;
+
+ if (enable) {
+ for (i = 0; i < num_gpio; i++) {
+ DEV_DBG("%pS->%s: %s enable\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+
+ rc = gpio_request(in_gpio[i].gpio,
+ in_gpio[i].gpio_name);
+ if (rc < 0) {
+ DEV_ERR("%pS->%s: %s enable failed\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+ goto disable_gpio;
+ }
+ gpio_set_value(in_gpio[i].gpio, in_gpio[i].value);
+ }
+ } else {
+ for (i = num_gpio-1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: %s disable\n",
+ __builtin_return_address(0), __func__,
+ in_gpio[i].gpio_name);
+ if (in_gpio[i].gpio)
+ gpio_free(in_gpio[i].gpio);
+ }
+ }
+ return rc;
+
+disable_gpio:
+ for (i--; i >= 0; i--)
+ if (in_gpio[i].gpio)
+ gpio_free(in_gpio[i].gpio);
+
+ return rc;
+} /* msm_dss_enable_gpio */
+EXPORT_SYMBOL(msm_dss_enable_gpio);
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+ int i;
+
+ for (i = num_clk - 1; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+} /* msm_dss_put_clk */
+EXPORT_SYMBOL(msm_dss_put_clk);
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+ rc = PTR_RET(clk_arry[i].clk);
+ if (rc) {
+ DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name, rc);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ msm_dss_put_clk(clk_arry, num_clk);
+
+ return rc;
+} /* msm_dss_get_clk */
+EXPORT_SYMBOL(msm_dss_get_clk);
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ if (clk_arry[i].clk) {
+ if (clk_arry[i].type != DSS_CLK_AHB) {
+ DEV_DBG("%pS->%s: '%s' rate %ld\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name,
+ clk_arry[i].rate);
+ rc = clk_set_rate(clk_arry[i].clk,
+ clk_arry[i].rate);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ break;
+ }
+ }
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ break;
+ }
+ }
+
+ return rc;
+} /* msm_dss_clk_set_rate */
+EXPORT_SYMBOL(msm_dss_clk_set_rate);
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+ int i, rc = 0;
+
+ if (enable) {
+ for (i = 0; i < num_clk; i++) {
+ DEV_DBG("%pS->%s: enable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ if (clk_arry[i].clk) {
+ rc = clk_prepare_enable(clk_arry[i].clk);
+ if (rc)
+ DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ }
+
+ if (rc) {
+ msm_dss_enable_clk(&clk_arry[i],
+ i, false);
+ break;
+ }
+ }
+ } else {
+ for (i = num_clk - 1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: disable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+
+ if (clk_arry[i].clk)
+ clk_disable_unprepare(clk_arry[i].clk);
+ else
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ }
+ }
+
+ return rc;
+} /* msm_dss_enable_clk */
+EXPORT_SYMBOL(msm_dss_enable_clk);
+
+
+int sde_i2c_byte_read(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *read_buf)
+{
+ struct i2c_msg msgs[2];
+ int ret = -1;
+
+ pr_debug("%s: reading from slave_addr=[%x] and offset=[%x]\n",
+ __func__, slave_addr, reg_offset);
+
+ msgs[0].addr = slave_addr >> 1;
+ msgs[0].flags = 0;
+ msgs[0].buf = &reg_offset;
+ msgs[0].len = 1;
+
+ msgs[1].addr = slave_addr >> 1;
+ msgs[1].flags = I2C_M_RD;
+ msgs[1].buf = read_buf;
+ msgs[1].len = 1;
+
+ ret = i2c_transfer(client->adapter, msgs, 2);
+ if (ret < 1) {
+ pr_err("%s: I2C READ FAILED=[%d]\n", __func__, ret);
+ return -EACCES;
+ }
+ pr_debug("%s: i2c buf is [%x]\n", __func__, *read_buf);
+ return 0;
+}
+EXPORT_SYMBOL(sde_i2c_byte_read);
+
+int sde_i2c_byte_write(struct i2c_client *client, uint8_t slave_addr,
+ uint8_t reg_offset, uint8_t *value)
+{
+ struct i2c_msg msgs[1];
+ uint8_t data[2];
+ int status = -EACCES;
+
+ pr_debug("%s: writing from slave_addr=[%x] and offset=[%x]\n",
+ __func__, slave_addr, reg_offset);
+
+ data[0] = reg_offset;
+ data[1] = *value;
+
+ msgs[0].addr = slave_addr >> 1;
+ msgs[0].flags = 0;
+ msgs[0].len = 2;
+ msgs[0].buf = data;
+
+ status = i2c_transfer(client->adapter, msgs, 1);
+ if (status < 1) {
+ pr_err("I2C WRITE FAILED=[%d]\n", status);
+ return -EACCES;
+ }
+ pr_debug("%s: I2C write status=%x\n", __func__, status);
+ return status;
+}
+EXPORT_SYMBOL(sde_i2c_byte_write);
diff --git a/drivers/gpu/drm/msm/sde_power_handle.c b/drivers/gpu/drm/msm/sde_power_handle.c
new file mode 100644
index 000000000000..a26188f9e8e9
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_power_handle.c
@@ -0,0 +1,925 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d]: " fmt, __func__, __LINE__
+
+#include <linux/clk.h>
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+
+#include <linux/msm-bus.h>
+#include <linux/msm-bus-board.h>
+#include <linux/sde_io_util.h>
+
+#include "sde_power_handle.h"
+#include "sde_trace.h"
+
+struct sde_power_client *sde_power_client_create(
+ struct sde_power_handle *phandle, char *client_name)
+{
+ struct sde_power_client *client;
+ static u32 id;
+
+ if (!client_name || !phandle) {
+ pr_err("client name is null or invalid power data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ client = kzalloc(sizeof(struct sde_power_client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&phandle->phandle_lock);
+ strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
+ client->usecase_ndx = VOTE_INDEX_DISABLE;
+ client->id = id;
+ pr_debug("client %s created:%pK id :%d\n", client_name,
+ client, id);
+ id++;
+ list_add(&client->list, &phandle->power_client_clist);
+ mutex_unlock(&phandle->phandle_lock);
+
+ return client;
+}
+
+void sde_power_client_destroy(struct sde_power_handle *phandle,
+ struct sde_power_client *client)
+{
+ if (!client || !phandle) {
+ pr_err("reg bus vote: invalid client handle\n");
+ } else {
+ pr_debug("bus vote client %s destroyed:%pK id:%u\n",
+ client->name, client, client->id);
+ mutex_lock(&phandle->phandle_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&phandle->phandle_lock);
+ kfree(client);
+ }
+}
+
+static int sde_power_parse_dt_supply(struct platform_device *pdev,
+ struct dss_module_power *mp)
+{
+ int i = 0, rc = 0;
+ u32 tmp = 0;
+ struct device_node *of_node = NULL, *supply_root_node = NULL;
+ struct device_node *supply_node = NULL;
+
+ if (!pdev || !mp) {
+ pr_err("invalid input param pdev:%pK mp:%pK\n", pdev, mp);
+ return -EINVAL;
+ }
+
+ of_node = pdev->dev.of_node;
+
+ mp->num_vreg = 0;
+ supply_root_node = of_get_child_by_name(of_node,
+ "qcom,platform-supply-entries");
+ if (!supply_root_node) {
+ pr_debug("no supply entry present\n");
+ return rc;
+ }
+
+ for_each_child_of_node(supply_root_node, supply_node)
+ mp->num_vreg++;
+
+ if (mp->num_vreg == 0) {
+ pr_debug("no vreg\n");
+ return rc;
+ }
+
+ pr_debug("vreg found. count=%d\n", mp->num_vreg);
+ mp->vreg_config = devm_kzalloc(&pdev->dev, sizeof(struct dss_vreg) *
+ mp->num_vreg, GFP_KERNEL);
+ if (!mp->vreg_config) {
+ rc = -ENOMEM;
+ return rc;
+ }
+
+ for_each_child_of_node(supply_root_node, supply_node) {
+
+ const char *st = NULL;
+
+ rc = of_property_read_string(supply_node,
+ "qcom,supply-name", &st);
+ if (rc) {
+ pr_err("error reading name. rc=%d\n", rc);
+ goto error;
+ }
+
+ strlcpy(mp->vreg_config[i].vreg_name, st,
+ sizeof(mp->vreg_config[i].vreg_name));
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-min-voltage", &tmp);
+ if (rc) {
+ pr_err("error reading min volt. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].min_voltage = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-max-voltage", &tmp);
+ if (rc) {
+ pr_err("error reading max volt. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].max_voltage = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-enable-load", &tmp);
+ if (rc) {
+ pr_err("error reading enable load. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].enable_load = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-disable-load", &tmp);
+ if (rc) {
+ pr_err("error reading disable load. rc=%d\n", rc);
+ goto error;
+ }
+ mp->vreg_config[i].disable_load = tmp;
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-pre-on-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply pre sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].pre_on_sleep = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-pre-off-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply pre sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].pre_off_sleep = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-post-on-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply post sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].post_on_sleep = (!rc ? tmp : 0);
+
+ rc = of_property_read_u32(supply_node,
+ "qcom,supply-post-off-sleep", &tmp);
+ if (rc)
+ pr_debug("error reading supply post sleep value. rc=%d\n",
+ rc);
+
+ mp->vreg_config[i].post_off_sleep = (!rc ? tmp : 0);
+
+ pr_debug("%s min=%d, max=%d, enable=%d, disable=%d, preonsleep=%d, postonsleep=%d, preoffsleep=%d, postoffsleep=%d\n",
+ mp->vreg_config[i].vreg_name,
+ mp->vreg_config[i].min_voltage,
+ mp->vreg_config[i].max_voltage,
+ mp->vreg_config[i].enable_load,
+ mp->vreg_config[i].disable_load,
+ mp->vreg_config[i].pre_on_sleep,
+ mp->vreg_config[i].post_on_sleep,
+ mp->vreg_config[i].pre_off_sleep,
+ mp->vreg_config[i].post_off_sleep);
+ ++i;
+
+ rc = 0;
+ }
+
+ return rc;
+
+error:
+ if (mp->vreg_config) {
+ devm_kfree(&pdev->dev, mp->vreg_config);
+ mp->vreg_config = NULL;
+ mp->num_vreg = 0;
+ }
+
+ return rc;
+}
+
+static int sde_power_parse_dt_clock(struct platform_device *pdev,
+ struct dss_module_power *mp)
+{
+ u32 i = 0, rc = 0;
+ const char *clock_name;
+ u32 clock_rate = 0;
+ u32 clock_max_rate = 0;
+ int num_clk = 0;
+
+ if (!pdev || !mp) {
+ pr_err("invalid input param pdev:%pK mp:%pK\n", pdev, mp);
+ return -EINVAL;
+ }
+
+ mp->num_clk = 0;
+ num_clk = of_property_count_strings(pdev->dev.of_node,
+ "clock-names");
+ if (num_clk <= 0) {
+ pr_debug("clocks are not defined\n");
+ goto clk_err;
+ }
+
+ mp->num_clk = num_clk;
+ mp->clk_config = devm_kzalloc(&pdev->dev,
+ sizeof(struct dss_clk) * num_clk, GFP_KERNEL);
+ if (!mp->clk_config) {
+ rc = -ENOMEM;
+ mp->num_clk = 0;
+ goto clk_err;
+ }
+
+ for (i = 0; i < num_clk; i++) {
+ of_property_read_string_index(pdev->dev.of_node, "clock-names",
+ i, &clock_name);
+ strlcpy(mp->clk_config[i].clk_name, clock_name,
+ sizeof(mp->clk_config[i].clk_name));
+
+ of_property_read_u32_index(pdev->dev.of_node, "clock-rate",
+ i, &clock_rate);
+ mp->clk_config[i].rate = clock_rate;
+
+ if (!clock_rate)
+ mp->clk_config[i].type = DSS_CLK_AHB;
+ else
+ mp->clk_config[i].type = DSS_CLK_PCLK;
+
+ clock_max_rate = 0;
+ of_property_read_u32_index(pdev->dev.of_node, "clock-max-rate",
+ i, &clock_max_rate);
+ mp->clk_config[i].max_rate = clock_max_rate;
+ }
+
+clk_err:
+ return rc;
+}
+
+#ifdef CONFIG_QCOM_BUS_SCALING
+
+#define MAX_AXI_PORT_COUNT 3
+
+static int _sde_power_data_bus_set_quota(
+ struct sde_power_data_bus_handle *pdbus,
+ u64 ab_quota_rt, u64 ab_quota_nrt,
+ u64 ib_quota_rt, u64 ib_quota_nrt)
+{
+ int new_uc_idx;
+ u64 ab_quota[MAX_AXI_PORT_COUNT] = {0, 0};
+ u64 ib_quota[MAX_AXI_PORT_COUNT] = {0, 0};
+ int rc;
+
+ if (pdbus->data_bus_hdl < 1) {
+ pr_err("invalid bus handle %d\n", pdbus->data_bus_hdl);
+ return -EINVAL;
+ }
+
+ if (!ab_quota_rt && !ab_quota_nrt && !ib_quota_rt && !ib_quota_nrt) {
+ new_uc_idx = 0;
+ } else {
+ int i;
+ struct msm_bus_vectors *vect = NULL;
+ struct msm_bus_scale_pdata *bw_table =
+ pdbus->data_bus_scale_table;
+ u32 nrt_axi_port_cnt = pdbus->nrt_axi_port_cnt;
+ u32 total_axi_port_cnt = pdbus->axi_port_cnt;
+ u32 rt_axi_port_cnt = total_axi_port_cnt - nrt_axi_port_cnt;
+ int match_cnt = 0;
+
+ if (!bw_table || !total_axi_port_cnt ||
+ total_axi_port_cnt > MAX_AXI_PORT_COUNT) {
+ pr_err("invalid input\n");
+ return -EINVAL;
+ }
+
+ if (pdbus->bus_channels) {
+ ib_quota_rt = div_u64(ib_quota_rt,
+ pdbus->bus_channels);
+ ib_quota_nrt = div_u64(ib_quota_nrt,
+ pdbus->bus_channels);
+ }
+
+ if (nrt_axi_port_cnt) {
+
+ ab_quota_rt = div_u64(ab_quota_rt, rt_axi_port_cnt);
+ ab_quota_nrt = div_u64(ab_quota_nrt, nrt_axi_port_cnt);
+
+ for (i = 0; i < total_axi_port_cnt; i++) {
+ if (i < rt_axi_port_cnt) {
+ ab_quota[i] = ab_quota_rt;
+ ib_quota[i] = ib_quota_rt;
+ } else {
+ ab_quota[i] = ab_quota_nrt;
+ ib_quota[i] = ib_quota_nrt;
+ }
+ }
+ } else {
+ ab_quota[0] = div_u64(ab_quota_rt + ab_quota_nrt,
+ total_axi_port_cnt);
+ ib_quota[0] = ib_quota_rt + ib_quota_nrt;
+
+ for (i = 1; i < total_axi_port_cnt; i++) {
+ ab_quota[i] = ab_quota[0];
+ ib_quota[i] = ib_quota[0];
+ }
+ }
+
+ for (i = 0; i < total_axi_port_cnt; i++) {
+ vect = &bw_table->usecase
+ [pdbus->curr_bw_uc_idx].vectors[i];
+ /* avoid performing updates for small changes */
+ if ((ab_quota[i] == vect->ab) &&
+ (ib_quota[i] == vect->ib))
+ match_cnt++;
+ }
+
+ if (match_cnt == total_axi_port_cnt) {
+ pr_debug("skip BW vote\n");
+ return 0;
+ }
+
+ new_uc_idx = (pdbus->curr_bw_uc_idx %
+ (bw_table->num_usecases - 1)) + 1;
+
+ for (i = 0; i < total_axi_port_cnt; i++) {
+ vect = &bw_table->usecase[new_uc_idx].vectors[i];
+ vect->ab = ab_quota[i];
+ vect->ib = ib_quota[i];
+
+ pr_debug("uc_idx=%d %s path idx=%d ab=%llu ib=%llu\n",
+ new_uc_idx, (i < rt_axi_port_cnt) ? "rt" : "nrt"
+ , i, vect->ab, vect->ib);
+ }
+ }
+ pdbus->curr_bw_uc_idx = new_uc_idx;
+ pdbus->ao_bw_uc_idx = new_uc_idx;
+
+ if ((pdbus->bus_ref_cnt == 0) && pdbus->curr_bw_uc_idx) {
+ rc = 0;
+ } else { /* vote BW if bus_bw_cnt > 0 or uc_idx is zero */
+ SDE_ATRACE_BEGIN("msm_bus_scale_req");
+ rc = msm_bus_scale_client_update_request(pdbus->data_bus_hdl,
+ new_uc_idx);
+ SDE_ATRACE_END("msm_bus_scale_req");
+ }
+ return rc;
+}
+
+int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
+ struct sde_power_client *pclient,
+ int bus_client, u64 ab_quota, u64 ib_quota)
+{
+ int rc = 0;
+ int i;
+ u64 total_ab_rt = 0, total_ib_rt = 0;
+ u64 total_ab_nrt = 0, total_ib_nrt = 0;
+ struct sde_power_client *client;
+
+ if (!phandle || !pclient ||
+ bus_client >= SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX) {
+ pr_err("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&phandle->phandle_lock);
+
+ pclient->ab[bus_client] = ab_quota;
+ pclient->ib[bus_client] = ib_quota;
+ trace_sde_perf_update_bus(bus_client, ab_quota, ib_quota);
+
+ list_for_each_entry(client, &phandle->power_client_clist, list) {
+ for (i = 0; i < SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX; i++) {
+ if (i == SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT) {
+ total_ab_nrt += client->ab[i];
+ total_ib_nrt += client->ib[i];
+ } else {
+ total_ab_rt += client->ab[i];
+ total_ib_rt = max(total_ib_rt, client->ib[i]);
+ }
+ }
+ }
+
+ rc = _sde_power_data_bus_set_quota(&phandle->data_bus_handle,
+ total_ab_rt, total_ab_nrt,
+ total_ib_rt, total_ib_nrt);
+
+ mutex_unlock(&phandle->phandle_lock);
+
+ return rc;
+}
+
+static void sde_power_data_bus_unregister(
+ struct sde_power_data_bus_handle *pdbus)
+{
+ if (pdbus->data_bus_hdl) {
+ msm_bus_scale_unregister_client(pdbus->data_bus_hdl);
+ pdbus->data_bus_hdl = 0;
+ }
+}
+
+static int sde_power_data_bus_parse(struct platform_device *pdev,
+ struct sde_power_data_bus_handle *pdbus)
+{
+ struct device_node *node;
+ int rc = 0;
+ int paths;
+
+ pdbus->bus_channels = 1;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,sde-dram-channels", &pdbus->bus_channels);
+ if (rc) {
+ pr_debug("number of channels property not specified\n");
+ rc = 0;
+ }
+
+ pdbus->nrt_axi_port_cnt = 0;
+ rc = of_property_read_u32(pdev->dev.of_node,
+ "qcom,sde-num-nrt-paths",
+ &pdbus->nrt_axi_port_cnt);
+ if (rc) {
+ pr_debug("number of axi port property not specified\n");
+ rc = 0;
+ }
+
+ node = of_get_child_by_name(pdev->dev.of_node, "qcom,sde-data-bus");
+ if (node) {
+ rc = of_property_read_u32(node,
+ "qcom,msm-bus,num-paths", &paths);
+ if (rc) {
+ pr_err("Error. qcom,msm-bus,num-paths not found\n");
+ return rc;
+ }
+ pdbus->axi_port_cnt = paths;
+
+ pdbus->data_bus_scale_table =
+ msm_bus_pdata_from_node(pdev, node);
+ if (IS_ERR_OR_NULL(pdbus->data_bus_scale_table)) {
+ pr_err("reg bus handle parsing failed\n");
+ rc = PTR_ERR(pdbus->data_bus_scale_table);
+ goto end;
+ }
+ pdbus->data_bus_hdl = msm_bus_scale_register_client(
+ pdbus->data_bus_scale_table);
+ if (!pdbus->data_bus_hdl) {
+ pr_err("data_bus_client register failed\n");
+ rc = -EINVAL;
+ goto end;
+ }
+ pr_debug("register data_bus_hdl=%x\n", pdbus->data_bus_hdl);
+
+ /*
+ * Following call will not result in actual vote rather update
+ * the current index and ab/ib value. When continuous splash
+ * is enabled, actual vote will happen when splash handoff is
+ * done.
+ */
+ return _sde_power_data_bus_set_quota(pdbus,
+ SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA,
+ SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA,
+ SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA,
+ SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA);
+ }
+
+end:
+ return rc;
+}
+
+static int sde_power_reg_bus_parse(struct platform_device *pdev,
+ struct sde_power_handle *phandle)
+{
+ struct device_node *node;
+ struct msm_bus_scale_pdata *bus_scale_table;
+ int rc = 0;
+
+ node = of_get_child_by_name(pdev->dev.of_node, "qcom,sde-reg-bus");
+ if (node) {
+ bus_scale_table = msm_bus_pdata_from_node(pdev, node);
+ if (IS_ERR_OR_NULL(bus_scale_table)) {
+ pr_err("reg bus handle parsing failed\n");
+ rc = PTR_ERR(bus_scale_table);
+ goto end;
+ }
+ phandle->reg_bus_hdl = msm_bus_scale_register_client(
+ bus_scale_table);
+ if (!phandle->reg_bus_hdl) {
+ pr_err("reg_bus_client register failed\n");
+ rc = -EINVAL;
+ goto end;
+ }
+ pr_debug("register reg_bus_hdl=%x\n", phandle->reg_bus_hdl);
+ }
+
+end:
+ return rc;
+}
+
+static void sde_power_reg_bus_unregister(u32 reg_bus_hdl)
+{
+ if (reg_bus_hdl)
+ msm_bus_scale_unregister_client(reg_bus_hdl);
+}
+
+static int sde_power_reg_bus_update(u32 reg_bus_hdl, u32 usecase_ndx)
+{
+ int rc = 0;
+
+ if (reg_bus_hdl)
+ rc = msm_bus_scale_client_update_request(reg_bus_hdl,
+ usecase_ndx);
+ if (rc)
+ pr_err("failed to set reg bus vote rc=%d\n", rc);
+
+ return rc;
+}
+#else
+static int sde_power_data_bus_parse(struct platform_device *pdev,
+ struct sde_power_data_bus_handle *pdbus)
+{
+ return 0;
+}
+
+static void sde_power_data_bus_unregister(
+ struct sde_power_data_bus_handle *pdbus)
+{
+}
+
+int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
+ struct sde_power_client *pclient,
+ int bus_client, u64 ab_quota, u64 ib_quota)
+{
+ return 0;
+}
+
+static int sde_power_reg_bus_parse(struct platform_device *pdev,
+ struct sde_power_handle *phandle)
+{
+ return 0;
+}
+
+static void sde_power_reg_bus_unregister(u32 reg_bus_hdl)
+{
+}
+
+static int sde_power_reg_bus_update(u32 reg_bus_hdl, u32 usecase_ndx)
+{
+ return 0;
+}
+#endif
+
+void sde_power_data_bus_bandwidth_ctrl(struct sde_power_handle *phandle,
+ struct sde_power_client *pclient, int enable)
+{
+ struct sde_power_data_bus_handle *pdbus;
+ int changed = 0;
+
+ if (!phandle || !pclient) {
+ pr_err("invalid power/client handle\n");
+ return;
+ }
+
+ pdbus = &phandle->data_bus_handle;
+
+ mutex_lock(&phandle->phandle_lock);
+ if (enable) {
+ if (pdbus->bus_ref_cnt == 0)
+ changed++;
+ pdbus->bus_ref_cnt++;
+ } else {
+ if (pdbus->bus_ref_cnt) {
+ pdbus->bus_ref_cnt--;
+ if (pdbus->bus_ref_cnt == 0)
+ changed++;
+ } else {
+ pr_debug("Can not be turned off\n");
+ }
+ }
+
+ pr_debug("%pS: task:%s bw_cnt=%d changed=%d enable=%d\n",
+ __builtin_return_address(0), current->group_leader->comm,
+ pdbus->bus_ref_cnt, changed, enable);
+
+ if (changed) {
+ SDE_ATRACE_INT("data_bus_ctrl", enable);
+
+ if (!enable) {
+ if (!pdbus->handoff_pending) {
+ msm_bus_scale_client_update_request(
+ pdbus->data_bus_hdl, 0);
+ pdbus->ao_bw_uc_idx = 0;
+ }
+ } else {
+ msm_bus_scale_client_update_request(
+ pdbus->data_bus_hdl,
+ pdbus->curr_bw_uc_idx);
+ }
+ }
+
+ mutex_unlock(&phandle->phandle_lock);
+}
+
+int sde_power_resource_init(struct platform_device *pdev,
+ struct sde_power_handle *phandle)
+{
+ int rc = 0;
+ struct dss_module_power *mp;
+
+ if (!phandle || !pdev) {
+ pr_err("invalid input param\n");
+ rc = -EINVAL;
+ goto end;
+ }
+ mp = &phandle->mp;
+ phandle->dev = &pdev->dev;
+
+ rc = sde_power_parse_dt_clock(pdev, mp);
+ if (rc) {
+ pr_err("device clock parsing failed\n");
+ goto end;
+ }
+
+ rc = sde_power_parse_dt_supply(pdev, mp);
+ if (rc) {
+ pr_err("device vreg supply parsing failed\n");
+ goto parse_vreg_err;
+ }
+
+ rc = msm_dss_config_vreg(&pdev->dev,
+ mp->vreg_config, mp->num_vreg, 1);
+ if (rc) {
+ pr_err("vreg config failed rc=%d\n", rc);
+ goto vreg_err;
+ }
+
+ rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, mp->num_clk);
+ if (rc) {
+ pr_err("clock get failed rc=%d\n", rc);
+ goto clk_err;
+ }
+
+ rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+ if (rc) {
+ pr_err("clock set rate failed rc=%d\n", rc);
+ goto bus_err;
+ }
+
+ rc = sde_power_reg_bus_parse(pdev, phandle);
+ if (rc) {
+ pr_err("register bus parse failed rc=%d\n", rc);
+ goto bus_err;
+ }
+
+ rc = sde_power_data_bus_parse(pdev, &phandle->data_bus_handle);
+ if (rc) {
+ pr_err("register data bus parse failed rc=%d\n", rc);
+ goto data_bus_err;
+ }
+
+ INIT_LIST_HEAD(&phandle->power_client_clist);
+ mutex_init(&phandle->phandle_lock);
+
+ return rc;
+
+data_bus_err:
+ sde_power_reg_bus_unregister(phandle->reg_bus_hdl);
+bus_err:
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+clk_err:
+ msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
+vreg_err:
+ devm_kfree(&pdev->dev, mp->vreg_config);
+ mp->num_vreg = 0;
+parse_vreg_err:
+ devm_kfree(&pdev->dev, mp->clk_config);
+ mp->num_clk = 0;
+end:
+ return rc;
+}
+
+void sde_power_resource_deinit(struct platform_device *pdev,
+ struct sde_power_handle *phandle)
+{
+ struct dss_module_power *mp;
+
+ if (!phandle || !pdev) {
+ pr_err("invalid input param\n");
+ return;
+ }
+ mp = &phandle->mp;
+
+ sde_power_data_bus_unregister(&phandle->data_bus_handle);
+
+ sde_power_reg_bus_unregister(phandle->reg_bus_hdl);
+
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+
+ msm_dss_config_vreg(&pdev->dev, mp->vreg_config, mp->num_vreg, 0);
+
+ if (mp->clk_config)
+ devm_kfree(&pdev->dev, mp->clk_config);
+
+ if (mp->vreg_config)
+ devm_kfree(&pdev->dev, mp->vreg_config);
+
+ mp->num_vreg = 0;
+ mp->num_clk = 0;
+}
+
+int sde_power_resource_enable(struct sde_power_handle *phandle,
+ struct sde_power_client *pclient, bool enable)
+{
+ int rc = 0;
+ bool changed = false;
+ u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
+ struct sde_power_client *client;
+ struct dss_module_power *mp;
+
+ if (!phandle || !pclient) {
+ pr_err("invalid input argument\n");
+ return -EINVAL;
+ }
+
+ mp = &phandle->mp;
+
+ mutex_lock(&phandle->phandle_lock);
+ if (enable)
+ pclient->refcount++;
+ else if (pclient->refcount)
+ pclient->refcount--;
+
+ if (pclient->refcount)
+ pclient->usecase_ndx = VOTE_INDEX_LOW;
+ else
+ pclient->usecase_ndx = VOTE_INDEX_DISABLE;
+
+ list_for_each_entry(client, &phandle->power_client_clist, list) {
+ if (client->usecase_ndx < VOTE_INDEX_MAX &&
+ client->usecase_ndx > max_usecase_ndx)
+ max_usecase_ndx = client->usecase_ndx;
+ }
+
+ if (phandle->current_usecase_ndx != max_usecase_ndx) {
+ changed = true;
+ prev_usecase_ndx = phandle->current_usecase_ndx;
+ phandle->current_usecase_ndx = max_usecase_ndx;
+ }
+
+ pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
+ __builtin_return_address(0), changed, max_usecase_ndx,
+ pclient->name, pclient->id, enable, pclient->refcount);
+
+ if (!changed)
+ goto end;
+
+ if (enable) {
+ rc = msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
+ if (rc) {
+ pr_err("failed to enable vregs rc=%d\n", rc);
+ goto vreg_err;
+ }
+
+ rc = sde_power_reg_bus_update(phandle->reg_bus_hdl,
+ max_usecase_ndx);
+ if (rc) {
+ pr_err("failed to set reg bus vote rc=%d\n", rc);
+ goto reg_bus_hdl_err;
+ }
+
+ rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+ if (rc) {
+ pr_err("clock enable failed rc:%d\n", rc);
+ goto clk_err;
+ }
+ } else {
+ msm_dss_enable_clk(mp->clk_config, mp->num_clk, enable);
+
+ sde_power_reg_bus_update(phandle->reg_bus_hdl,
+ max_usecase_ndx);
+
+ msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, enable);
+ }
+
+end:
+ mutex_unlock(&phandle->phandle_lock);
+ return rc;
+
+clk_err:
+ sde_power_reg_bus_update(phandle->reg_bus_hdl, prev_usecase_ndx);
+reg_bus_hdl_err:
+ msm_dss_enable_vreg(mp->vreg_config, mp->num_vreg, 0);
+vreg_err:
+ phandle->current_usecase_ndx = prev_usecase_ndx;
+ mutex_unlock(&phandle->phandle_lock);
+ return rc;
+}
+
+int sde_power_clk_set_rate(struct sde_power_handle *phandle, char *clock_name,
+ u64 rate)
+{
+ int i, rc = -EINVAL;
+ struct dss_module_power *mp;
+
+ if (!phandle) {
+ pr_err("invalid input power handle\n");
+ return -EINVAL;
+ }
+ mp = &phandle->mp;
+
+ for (i = 0; i < mp->num_clk; i++) {
+ if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
+ if (mp->clk_config[i].max_rate &&
+ (rate > mp->clk_config[i].max_rate))
+ rate = mp->clk_config[i].max_rate;
+
+ mp->clk_config[i].rate = rate;
+ rc = msm_dss_clk_set_rate(mp->clk_config, mp->num_clk);
+ break;
+ }
+ }
+
+ return rc;
+}
+
+u64 sde_power_clk_get_rate(struct sde_power_handle *phandle, char *clock_name)
+{
+ int i;
+ struct dss_module_power *mp;
+ u64 rate = -EINVAL;
+
+ if (!phandle) {
+ pr_err("invalid input power handle\n");
+ return -EINVAL;
+ }
+ mp = &phandle->mp;
+
+ for (i = 0; i < mp->num_clk; i++) {
+ if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
+ rate = clk_get_rate(mp->clk_config[i].clk);
+ break;
+ }
+ }
+
+ return rate;
+}
+
+u64 sde_power_clk_get_max_rate(struct sde_power_handle *phandle,
+ char *clock_name)
+{
+ int i;
+ struct dss_module_power *mp;
+ u64 rate = 0;
+
+ if (!phandle) {
+ pr_err("invalid input power handle\n");
+ return 0;
+ }
+ mp = &phandle->mp;
+
+ for (i = 0; i < mp->num_clk; i++) {
+ if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
+ rate = mp->clk_config[i].max_rate;
+ break;
+ }
+ }
+
+ return rate;
+}
+
+struct clk *sde_power_clk_get_clk(struct sde_power_handle *phandle,
+ char *clock_name)
+{
+ int i;
+ struct dss_module_power *mp;
+ struct clk *clk = NULL;
+
+ if (!phandle) {
+ pr_err("invalid input power handle\n");
+ return 0;
+ }
+ mp = &phandle->mp;
+
+ for (i = 0; i < mp->num_clk; i++) {
+ if (!strcmp(mp->clk_config[i].clk_name, clock_name)) {
+ clk = mp->clk_config[i].clk;
+ break;
+ }
+ }
+
+ return clk;
+}
diff --git a/drivers/gpu/drm/msm/sde_power_handle.h b/drivers/gpu/drm/msm/sde_power_handle.h
new file mode 100644
index 000000000000..b982d1704312
--- /dev/null
+++ b/drivers/gpu/drm/msm/sde_power_handle.h
@@ -0,0 +1,229 @@
+/* Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _SDE_POWER_HANDLE_H_
+#define _SDE_POWER_HANDLE_H_
+
+#define MAX_CLIENT_NAME_LEN 128
+
+#define SDE_POWER_HANDLE_DATA_BUS_IB_QUOTA 2000000000
+#define SDE_POWER_HANDLE_DATA_BUS_AB_QUOTA 2000000000
+
+/**
+ * mdss_bus_vote_type: register bus vote type
+ * VOTE_INDEX_DISABLE: removes the client vote
+ * VOTE_INDEX_LOW: keeps the lowest vote for register bus
+ * VOTE_INDEX_MAX: invalid
+ */
+enum mdss_bus_vote_type {
+ VOTE_INDEX_DISABLE,
+ VOTE_INDEX_LOW,
+ VOTE_INDEX_MAX,
+};
+
+/**
+ * enum sde_power_handle_data_bus_client - type of axi bus clients
+ * @SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
+ * @SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
+ * @SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
+ */
+enum sde_power_handle_data_bus_client {
+ SDE_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+ SDE_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
+ SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX
+};
+
+/**
+ * struct sde_power_client: stores the power client for sde driver
+ * @name: name of the client
+ * @usecase_ndx: current regs bus vote type
+ * @refcount: current refcount if multiple modules are using same
+ * same client for enable/disable. Power module will
+ * aggregate the refcount and vote accordingly for this
+ * client.
+ * @id: assigned during create. helps for debugging.
+ * @list: list to attach power handle master list
+ * @ab: arbitrated bandwidth for each bus client
+ * @ib: instantaneous bandwidth for each bus client
+ */
+struct sde_power_client {
+ char name[MAX_CLIENT_NAME_LEN];
+ short usecase_ndx;
+ short refcount;
+ u32 id;
+ struct list_head list;
+ u64 ab[SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+ u64 ib[SDE_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+};
+
+/**
+ * struct sde_power_data_handle: power handle struct for data bus
+ * @data_bus_scale_table: pointer to bus scaling table
+ * @data_bus_hdl: current data bus handle
+ * @axi_port_cnt: number of rt axi ports
+ * @nrt_axi_port_cnt: number of nrt axi ports
+ * @bus_channels: number of memory bus channels
+ * @curr_bw_uc_idx: current use case index of data bus
+ * @ao_bw_uc_idx: active only use case index of data bus
+ * @bus_ref_cnt: reference count of data bus enable request
+ * @handoff_pending: True to indicate if bootloader hand-over is pending
+ */
+struct sde_power_data_bus_handle {
+ struct msm_bus_scale_pdata *data_bus_scale_table;
+ u32 data_bus_hdl;
+ u32 axi_port_cnt;
+ u32 nrt_axi_port_cnt;
+ u32 bus_channels;
+ u32 curr_bw_uc_idx;
+ u32 ao_bw_uc_idx;
+ u32 bus_ref_cnt;
+ int handoff_pending;
+};
+
+/**
+ * struct sde_power_handle: power handle main struct
+ * @mp: module power for clock and regulator
+ * @client_clist: master list to store all clients
+ * @phandle_lock: lock to synchronize the enable/disable
+ * @dev: pointer to device structure
+ * @usecase_ndx: current usecase index
+ * @reg_bus_hdl: current register bus handle
+ * @data_bus_handle: context structure for data bus control
+ */
+struct sde_power_handle {
+ struct dss_module_power mp;
+ struct list_head power_client_clist;
+ struct mutex phandle_lock;
+ struct device *dev;
+ u32 current_usecase_ndx;
+ u32 reg_bus_hdl;
+ struct sde_power_data_bus_handle data_bus_handle;
+};
+
+/**
+ * sde_power_resource_init() - initializes the sde power handle
+ * @pdev: platform device to search the power resources
+ * @pdata: power handle to store the power resources
+ *
+ * Return: error code.
+ */
+int sde_power_resource_init(struct platform_device *pdev,
+ struct sde_power_handle *pdata);
+
+/**
+ * sde_power_resource_deinit() - release the sde power handle
+ * @pdev: platform device for power resources
+ * @pdata: power handle containing the resources
+ *
+ * Return: error code.
+ */
+void sde_power_resource_deinit(struct platform_device *pdev,
+ struct sde_power_handle *pdata);
+
+/**
+ * sde_power_client_create() - create the client on power handle
+ * @pdata: power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: error code.
+ */
+struct sde_power_client *sde_power_client_create(struct sde_power_handle *pdata,
+ char *client_name);
+
+/**
+ * sde_power_client_destroy() - destroy the client on power handle
+ * @pdata: power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: none
+ */
+void sde_power_client_destroy(struct sde_power_handle *phandle,
+ struct sde_power_client *client);
+
+/**
+ * sde_power_resource_enable() - enable/disable the power resources
+ * @pdata: power handle containing the resources
+ * @client: client information to enable/disable its vote
+ * @enable: boolean request for enable/disable
+ *
+ * Return: error code.
+ */
+int sde_power_resource_enable(struct sde_power_handle *pdata,
+ struct sde_power_client *pclient, bool enable);
+
+/**
+ * sde_power_clk_set_rate() - set the clock rate
+ * @pdata: power handle containing the resources
+ * @clock_name: clock name which needs rate update.
+ * @rate: Requested rate.
+ *
+ * Return: error code.
+ */
+int sde_power_clk_set_rate(struct sde_power_handle *pdata, char *clock_name,
+ u64 rate);
+
+/**
+ * sde_power_clk_get_rate() - get the clock rate
+ * @pdata: power handle containing the resources
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
+u64 sde_power_clk_get_rate(struct sde_power_handle *pdata, char *clock_name);
+
+/**
+ * sde_power_clk_get_max_rate() - get the maximum clock rate
+ * @pdata: power handle containing the resources
+ * @clock_name: clock name to get the max rate.
+ *
+ * Return: maximum clock rate or 0 if not found.
+ */
+u64 sde_power_clk_get_max_rate(struct sde_power_handle *pdata,
+ char *clock_name);
+
+/**
+ * sde_power_clk_get_clk() - get the clock
+ * @pdata: power handle containing the resources
+ * @clock_name: clock name to get the clk pointer.
+ *
+ * Return: Pointer to clock
+ */
+struct clk *sde_power_clk_get_clk(struct sde_power_handle *phandle,
+ char *clock_name);
+
+/**
+ * sde_power_data_bus_set_quota() - set data bus quota for power client
+ * @phandle: power handle containing the resources
+ * @client: client information to set quota
+ * @bus_client: real-time or non-real-time bus client
+ * @ab_quota: arbitrated bus bandwidth
+ * @ib_quota: instantaneous bus bandwidth
+ *
+ * Return: zero if success, or error code otherwise
+ */
+int sde_power_data_bus_set_quota(struct sde_power_handle *phandle,
+ struct sde_power_client *pclient,
+ int bus_client, u64 ab_quota, u64 ib_quota);
+
+/**
+ * sde_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
+ * @phandle: power handle containing the resources
+ * @client: client information to bandwidth control
+ * @enable: true to enable bandwidth for data base
+ *
+ * Return: none
+ */
+void sde_power_data_bus_bandwidth_ctrl(struct sde_power_handle *phandle,
+ struct sde_power_client *pclient, int enable);
+
+#endif /* _SDE_POWER_HANDLE_H_ */